diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index e35a574c5fa..32d2d428c7f 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -23,6 +23,7 @@ pipeline { } triggers { issueCommentTrigger('(?i)^\\/packaging$') + upstream('Beats/beats-beats-mbp/master') } parameters { booleanParam(name: 'macos', defaultValue: false, description: 'Allow macOS stages.') @@ -76,7 +77,19 @@ pipeline { } environment { HOME = "${env.WORKSPACE}" - PLATFORMS = "!darwin +linux/armv7 +linux/ppc64le +linux/s390x +linux/mips64" + PLATFORMS = [ + '+all', + 'linux/amd64', + 'linux/386', + 'linux/arm64', + 'linux/armv7', + 'linux/ppc64le', + 'linux/mips64', + 'linux/s390x', + 'windows/amd64', + 'windows/386', + (params.macos ? '' : 'darwin/amd64'), + ].join(' ') } steps { release() @@ -94,7 +107,10 @@ pipeline { } environment { HOME = "${env.WORKSPACE}" - PLATFORMS = "!defaults +darwin/amd64" + PLATFORMS = [ + '+all', + 'darwin/amd64', + ].join(' ') } steps { withMacOSEnv(){ diff --git a/.ci/scripts/install-kind.sh b/.ci/scripts/install-kind.sh new file mode 100755 index 00000000000..dc83bb4cd2a --- /dev/null +++ b/.ci/scripts/install-kind.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -exuo pipefail + +MSG="parameter missing." +DEFAULT_HOME="/usr/local" +KIND_VERSION=${KIND_VERSION:?$MSG} +HOME=${HOME:?$DEFAULT_HOME} +KIND_CMD="${HOME}/bin/kind" + +mkdir -p "${HOME}/bin" + +curl -sSLo "${KIND_CMD}" "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64" +chmod +x "${KIND_CMD}" diff --git a/.ci/scripts/install-kubectl.sh b/.ci/scripts/install-kubectl.sh new file mode 100755 index 00000000000..d0b7080d188 --- /dev/null +++ b/.ci/scripts/install-kubectl.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +set -exuo pipefail + +MSG="parameter missing." +DEFAULT_HOME="/usr/local" +K8S_VERSION=${K8S_VERSION:?$MSG} +HOME=${HOME:?$DEFAULT_HOME} +KUBECTL_CMD="${HOME}/bin/kubectl" + +mkdir -p "${HOME}/bin" + +curl -sSLo "${KUBECTL_CMD}" "https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl" +chmod +x "${KUBECTL_CMD}" + diff --git a/.ci/scripts/kind-setup.sh b/.ci/scripts/kind-setup.sh index 4ac8fb0f6c3..fa4f66dd6e6 100755 --- a/.ci/scripts/kind-setup.sh +++ b/.ci/scripts/kind-setup.sh @@ -1,18 +1,5 @@ #!/usr/bin/env bash set -exuo pipefail -MSG="parameter missing." -K8S_VERSION=${K8S_VERSION:?$MSG} -HOME=${HOME:?$MSG} -KBC_CMD="${HOME}/bin/kubectl" - -mkdir -p "${HOME}/bin" - -curl -sSLo "${KBC_CMD}" "https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl" -chmod +x "${KBC_CMD}" - -GO111MODULE="on" go get sigs.k8s.io/kind@v0.5.1 kind create cluster --image kindest/node:${K8S_VERSION} - -export KUBECONFIG="$(kind get kubeconfig-path)" kubectl cluster-info diff --git a/.travis.yml b/.travis.yml index 7717c0366f8..b9f903b023b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -111,23 +111,28 @@ jobs: # Metricbeat - os: linux before_install: .ci/scripts/travis_has_changes.sh metricbeat libbeat || travis_terminate 0 - env: TARGETS="-C metricbeat unit-tests coverage-report" + env: TARGETS="-C metricbeat unit-tests" go: $TRAVIS_GO_VERSION stage: test - os: linux - before_install: .ci/scripts/travis_has_changes.sh metricbeat libbeat vendor || travis_terminate 0 - env: TARGETS="-C metricbeat integration-tests-environment coverage-report" + before_install: .ci/scripts/travis_has_changes.sh metricbeat libbeat || travis_terminate 0 + install: + - .ci/scripts/install-kind.sh + - .ci/scripts/install-kubectl.sh + env: + - TARGETS="-C metricbeat integration-tests" + - K8S_VERSION=v1.17.2 + - KIND_VERSION=v0.7.0 go: $TRAVIS_GO_VERSION stage: test - os: linux - before_install: .ci/scripts/travis_has_changes.sh metricbeat libbeat vendor || travis_terminate 0 - env: TARGETS="-C metricbeat update system-tests-environment coverage-report" + before_install: .ci/scripts/travis_has_changes.sh metricbeat libbeat || travis_terminate 0 + env: TARGETS="-C metricbeat system-tests" go: $TRAVIS_GO_VERSION stage: test - - os: osx before_install: .ci/scripts/travis_has_changes.sh metricbeat libbeat || travis_terminate 0 - env: TARGETS="TEST_ENVIRONMENT=0 -C metricbeat testsuite" + env: TARGETS="-C metricbeat testsuite" go: $TRAVIS_GO_VERSION stage: test - os: linux @@ -137,6 +142,21 @@ jobs: stage: test - os: linux before_install: .ci/scripts/travis_has_changes.sh x-pack/metricbeat metricbeat libbeat || travis_terminate 0 + env: TARGETS="-C x-pack/metricbeat unit-tests" + go: $TRAVIS_GO_VERSION + stage: test + - os: linux + before_install: .ci/scripts/travis_has_changes.sh x-pack/metricbeat metricbeat libbeat || travis_terminate 0 + env: TARGETS="-C x-pack/metricbeat integration-tests" + go: $TRAVIS_GO_VERSION + stage: test + - os: linux + before_install: .ci/scripts/travis_has_changes.sh x-pack/metricbeat metricbeat libbeat || travis_terminate 0 + env: TARGETS="-C x-pack/metricbeat system-tests" + go: $TRAVIS_GO_VERSION + stage: test + - os: osx + before_install: .ci/scripts/travis_has_changes.sh metricbeat libbeat || travis_terminate 0 env: TARGETS="-C x-pack/metricbeat testsuite" go: $TRAVIS_GO_VERSION stage: test diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index b3fa34b8b9b..3da2b9c4fcc 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -30,6 +30,7 @@ https://github.com/elastic/beats/compare/v7.5.0...v7.5.1[View commits] - Fix docker network stats when multiple interfaces are configured. {issue}14586[14586] {pull}14825[14825] - Fix ListMetrics pagination in aws module. {issue}14926[14926] {pull}14942[14942] - Fix CPU count in docker/cpu in cases where no `online_cpus` are reported {pull}15070[15070] +- Add domain state to kvm module {pull}17673[17673] [[release-notes-7.5.0]] === Beats version 7.5.0 diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index b6f0c1c8e86..9fed58cb59f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -20,7 +20,9 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - File integrity dataset (macOS): Replace unnecessary `file.origin.raw` (type keyword) with `file.origin.text` (type `text`). {issue}12423[12423] {pull}15630[15630] *Filebeat* - +- Improve ECS field mappings in panw module. event.outcome now only contains success/failure per ECS specification. {issue}16025[16025] {pull}17910[17910] +- Improve ECS categorization field mappings for nginx module. http.request.referrer is now lowercase & http.request.referrer only populated when nginx sets a value {issue}16174[16174] {pull}17844[17844] +- Improve ECS field mappings in santa module. move hash.sha256 to process.hash.sha256 & move certificate fields to santa.certificate . {issue}16180[16180] {pull}17982[17982] *Heartbeat* @@ -75,6 +77,9 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix bug with `monitoring.cluster_uuid` setting not always being exposed via GET /state Beats API. {issue}16732[16732] {pull}17420[17420] - Fix building on FreeBSD by removing build flags from `add_cloudfoundry_metadata` processor. {pull}17486[17486] - Do not rotate log files on startup when interval is configured and rotateonstartup is disabled. {pull}17613[17613] +- Fix goroutine leak and Elasticsearch output file descriptor leak when output reloading is in use. {issue}10491[10491] {pull}17381[17381] +- Fix `setup.dashboards.index` setting not working. {pull}17749[17749] +- Fix Elasticsearch license endpoint URL referenced in error message. {issue}17880[17880] {pull}18030[18030] *Auditbeat* @@ -111,6 +116,10 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fixed activemq module causing "regular expression has redundant nested repeat operator" warning in Elasticsearch. {pull}17428[17428] - Remove migrationVersion map 7.7.0 reference from Kibana dashboard file to fix backward compatibility issues. {pull}17425[17425] - Fix issue 17734 to retry on rate-limit error in the Filebeat httpjson input. {issue}17734[17734] {pull}17735[17735] +- Fixed `cloudfoundry.access` to have the correct `cloudfoundry.app.id` contents. {pull}17847[17847] +- Fixing `ingress_controller.` fields to be of type keyword instead of text. {issue}17834[17834] +- Fixed typo in log message. {pull}17897[17897] +- Fix Cisco ASA ASA 3020** and 106023 messages {pull}17964[17964] *Heartbeat* @@ -194,9 +203,12 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Update RPM packages contained in Beat Docker images. {issue}17035[17035] - Update supported versions of `redis` output. {pull}17198[17198] - Update documentation for system.process.memory fields to include clarification on Windows os's. {pull}17268[17268] +- Add `replace` processor for replacing string values of fields. {pull}17342[17342] - Add optional regex based cid extractor to `add_kubernetes_metadata` processor. {pull}17360[17360] - Add `urldecode` processor to for decoding URL-encoded fields. {pull}17505[17505] - Add support for AWS IAM `role_arn` in credentials config. {pull}17658[17658] {issue}12464[12464] +- Add keystore support for autodiscover static configurations. {pull]16306[16306] +- Add Kerberos support to Elasticsearch output. {pull}17927[17927] *Auditbeat* @@ -257,6 +269,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add dashboard for Google Cloud Audit and AWS CloudTrail. {pull}17379[17379] - Improve ECS categorization field mappings for mysql module. {issue}16172[16172] {pull}17491[17491] - Release Google Cloud module as GA. {pull}17511[17511] +- Add config option to select a different azure cloud env in the azure-eventhub input and azure module. {issue}17649[17649] {pull}17659[17659] - Added new Checkpoint Syslog filebeat module. {pull}17682[17682] - Improve ECS categorization field mappings for nats module. {issue}16173[16173] {pull}17550[17550] - Enhance `elasticsearch/server` fileset to handle ECS-compatible logs emitted by Elasticsearch. {issue}17715[17715] {pull}17714[17714] @@ -265,11 +278,15 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Enhance `elasticsearch/slowlog` fileset to handle ECS-compatible logs emitted by Elasticsearch. {issue}17715[17715] {pull}17729[17729] - Improve ECS categorization field mappings in misp module. {issue}16026[16026] {pull}17344[17344] - Added Unix stream socket support as an input source and a syslog input source. {pull}17492[17492] +- Improve ECS categorization field mappings in postgresql module. {issue}16177[16177] {pull}17914[17914] +- Improve ECS categorization field mappings in rabbitmq module. {issue}16178[16178] {pull}17916[17916] +- Improve ECS categorization field mappings in redis module. {issue}16179[16179] {pull}17918[17918] +- Improve ECS categorization field mappings for zeek module. {issue}16029[16029] {pull}17738[17738] *Heartbeat* - Allow a list of status codes for HTTP checks. {pull}15587[15587] - +- Add additional ECS compatible fields for TLS information. {pull}17687[17687] *Journalbeat* @@ -334,8 +351,14 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Added documentation for running Metricbeat in Cloud Foundry. {pull}17275[17275] - Add test for documented fields check for metricsets without a http input. {issue}17315[17315] {pull}17334[17334] - Add final tests and move label to GA for the azure module in metricbeat. {pull}17319[17319] +- Refactor windows/perfmon metricset configuration options and event output. {pull}17596[17596] - Reference kubernetes manifests mount data directory from the host when running metricbeat as daemonset, so data persist between executions in the same node. {pull}17429[17429] +- Add more detailed error messages, system tests and small refactoring to the service metricset in windows. {pull}17725[17725] - Stack Monitoring modules now auto-configure required metricsets when `xpack.enabled: true` is set. {issue}16471[[16471] {pull}17609[17609] +- Allow partial region and zone name in googlecloud module config. {pull}17913[17913] +- Add aggregation aligner as a config parameter for googlecloud stackdriver metricset. {issue}17141[[17141] {pull}17719[17719] +- Move the perfmon metricset to GA. {issue}16608[16608] {pull}17879[17879] +- Add static mapping for metricsets under aws module. {pull}17614[17614] {pull}17650[17650] - Update MSSQL module to fix some SSPI authentication and add brackets to USE statements {pull}17862[17862]] *Packetbeat* diff --git a/Jenkinsfile b/Jenkinsfile index 6fceffba358..44b88922ee1 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -2,6 +2,13 @@ @Library('apm@current') _ +import groovy.transform.Field + +/** + This is required to store the stashed id with the test results to be digested with runbld +*/ +@Field def stashedTestReports = [:] + pipeline { agent { label 'ubuntu && immutable' } environment { @@ -11,6 +18,7 @@ pipeline { PIPELINE_LOG_LEVEL = "INFO" DOCKERELASTIC_SECRET = 'secret/observability-team/ci/docker-registry/prod' DOCKER_REGISTRY = 'docker.elastic.co' + RUNBLD_DISABLE_NOTIFICATIONS = 'true' } options { timeout(time: 2, unit: 'HOURS') @@ -40,13 +48,13 @@ pipeline { steps { deleteDir() gitCheckout(basedir: "${BASE_DIR}") + stash allowEmpty: true, name: 'source', useDefaultExcludes: false dir("${BASE_DIR}"){ loadConfigEnvVars() } whenTrue(params.debug){ dumpFilteredEnvironment() } - stash allowEmpty: true, name: 'source', useDefaultExcludes: false } } stage('Lint'){ @@ -68,7 +76,7 @@ pipeline { } } steps { - makeTarget("Elastic Agent x-pack Linux", "-C x-pack/elastic-agent testsuite") + mageTarget("Elastic Agent x-pack Linux", "x-pack/elastic-agent", "build test") } } @@ -82,7 +90,7 @@ pipeline { } } steps { - mageTargetWin("Elastic Agent x-pack Windows Unit test", "x-pack/elastic-agent", "unitTest") + mageTargetWin("Elastic Agent x-pack Windows Unit test", "x-pack/elastic-agent", "build unitTest") } } @@ -92,14 +100,11 @@ pipeline { when { beforeAgent true expression { - /** - * Disable macOS test until it's fixed for the Elastic Agent. - */ - return env.BUILD_ELASTIC_AGENT_XPACK != "false" && params.macosTest && true == false + return env.BUILD_ELASTIC_AGENT_XPACK != "false" && params.macosTest } } steps { - makeTarget("Elastic Agent x-pack Mac OS X", "TEST_ENVIRONMENT=0 -C x-pack/elastic-agent testsuite") + mageTarget("Elastic Agent x-pack Mac OS X", "x-pack/elastic-agent", "build unitTest") } } @@ -139,7 +144,7 @@ pipeline { } } steps { - makeTarget("Filebeat oss Mac OS X", "TEST_ENVIRONMENT=0 -C filebeat testsuite") + mageTarget("Filebeat oss Mac OS X", "filebeat", "build unitTest") } } stage('Filebeat Windows'){ @@ -152,7 +157,7 @@ pipeline { } } steps { - mageTargetWin("Filebeat oss Windows Unit test", "filebeat", "unitTest") + mageTargetWin("Filebeat oss Windows Unit test", "filebeat", "build unitTest") } } stage('Heartbeat'){ @@ -180,7 +185,7 @@ pipeline { } } steps { - makeTarget("Heartbeat oss Mac OS X", "TEST_ENVIRONMENT=0 -C heartbeat testsuite") + mageTarget("Heartbeat oss Mac OS X", "heartbeat", "build unitTest") } } stage('Heartbeat Windows'){ @@ -193,7 +198,7 @@ pipeline { } } steps { - mageTargetWin("Heartbeat oss Windows Unit test", "heartbeat", "unitTest") + mageTargetWin("Heartbeat oss Windows Unit test", "heartbeat", "build unitTest") } } } @@ -228,7 +233,7 @@ pipeline { } } steps { - makeTarget("Auditbeat oss Mac OS X", "TEST_ENVIRONMENT=0 -C auditbeat testsuite") + mageTarget("Auditbeat oss Mac OS X", "auditbeat", "build unitTest") } } stage('Auditbeat Windows'){ @@ -241,7 +246,7 @@ pipeline { } } steps { - mageTargetWin("Auditbeat Windows Unit test", "auditbeat", "unitTest") + mageTargetWin("Auditbeat Windows Unit test", "auditbeat", "build unitTest") } } } @@ -299,7 +304,7 @@ pipeline { makeTarget("Libbeat x-pack Linux", "-C x-pack/libbeat testsuite") } } - stage('Metricbeat Unit tests'){ + stage('Metricbeat OSS Unit tests'){ agent { label 'ubuntu && immutable' } options { skipDefaultCheckout() } when { @@ -309,10 +314,10 @@ pipeline { } } steps { - makeTarget("Metricbeat Unit tests", "-C metricbeat unit-tests coverage-report") + mageTarget("Metricbeat OSS linux/amd64 (unitTest)", "metricbeat", "build unitTest") } } - stage('Metricbeat Integration tests'){ + stage('Metricbeat OSS Integration tests'){ agent { label 'ubuntu && immutable' } options { skipDefaultCheckout() } when { @@ -322,10 +327,10 @@ pipeline { } } steps { - makeTarget("Metricbeat Integration tests", "-C metricbeat integration-tests-environment coverage-report") + mageTarget("Metricbeat OSS linux/amd64 (goIntegTest)", "metricbeat", "goIntegTest") } } - stage('Metricbeat System tests'){ + stage('Metricbeat Python integration tests'){ agent { label 'ubuntu && immutable' } options { skipDefaultCheckout() } when { @@ -335,7 +340,7 @@ pipeline { } } steps { - makeTarget("Metricbeat System tests", "-C metricbeat update system-tests-environment coverage-report") + mageTarget("Metricbeat OSS linux/amd64 (pythonIntegTest)", "metricbeat", "pythonIntegTest") } } stage('Metricbeat x-pack'){ @@ -348,7 +353,7 @@ pipeline { } } steps { - mageTarget("Metricbeat x-pack Linux", "x-pack/metricbeat", "update build test") + mageTarget("Metricbeat x-pack Linux", "x-pack/metricbeat", "build test") } } stage('Metricbeat crosscompile'){ @@ -361,7 +366,7 @@ pipeline { } } steps { - makeTarget("Metricbeat oss crosscompile", "-C metricbeat crosscompile") + makeTarget("Metricbeat OSS crosscompile", "-C metricbeat crosscompile") } } stage('Metricbeat Mac OS X'){ @@ -374,7 +379,7 @@ pipeline { } } steps { - makeTarget("Metricbeat oss Mac OS X", "TEST_ENVIRONMENT=0 -C metricbeat testsuite") + mageTarget("Metricbeat OSS Mac OS X", "metricbeat", "build unitTest") } } stage('Metricbeat Windows'){ @@ -387,7 +392,7 @@ pipeline { } } steps { - mageTargetWin("Metricbeat Windows Unit test", "metricbeat", "unitTest") + mageTargetWin("Metricbeat Windows Unit test", "metricbeat", "build unitTest") } } stage('Packetbeat'){ @@ -449,7 +454,7 @@ pipeline { } } steps { - mageTargetWin("Winlogbeat Windows Unit test", "winlogbeat", "unitTest") + mageTargetWin("Winlogbeat Windows Unit test", "winlogbeat", "build unitTest") } } } @@ -464,7 +469,7 @@ pipeline { } } steps { - mageTargetWin("Winlogbeat Windows Unit test", "x-pack/winlogbeat", "unitTest") + mageTargetWin("Winlogbeat Windows Unit test", "x-pack/winlogbeat", "build unitTest") } } stage('Functionbeat'){ @@ -495,7 +500,7 @@ pipeline { } } steps { - mageTarget("Functionbeat x-pack Mac OS X", "x-pack/functionbeat", "update build test") + mageTarget("Functionbeat x-pack Mac OS X", "x-pack/functionbeat", "build unitTest") } } stage('Functionbeat Windows'){ @@ -508,7 +513,7 @@ pipeline { } } steps { - mageTargetWin("Functionbeat Windows Unit test", "x-pack/functionbeat", "unitTest") + mageTargetWin("Functionbeat Windows Unit test", "x-pack/functionbeat", "build unitTest") } } } @@ -590,12 +595,20 @@ pipeline { } } steps { - k8sTest(["v1.16.2","v1.15.3","v1.14.6","v1.13.10","v1.12.10","v1.11.10"]) + k8sTest(["v1.18.2","v1.17.2","v1.16.4","v1.15.7","v1.14.10"]) } } } } } + post { + always { + runbld() + } + cleanup { + notifyBuildResult(prComment: true) + } + } } def makeTarget(String context, String target, boolean clean = true) { @@ -663,14 +676,8 @@ def withBeatsEnv(boolean archive, Closure body) { ]) { deleteDir() unstash 'source' - if(os == 'linux'){ + if(isDockerInstalled()){ dockerLogin(secret: "${DOCKERELASTIC_SECRET}", registry: "${DOCKER_REGISTRY}") - // FIXME workaround until we fix the packer cache - // Retry to avoid DDoS detection from the server - retry(3) { - sleep randomNumber(min: 2, max: 5) - sh 'docker pull docker.elastic.co/observability-ci/database-enterprise:12.2.0.1' - } } dir("${env.BASE_DIR}") { sh(label: "Install Go ${GO_VERSION}", script: ".ci/scripts/install-go.sh") @@ -686,7 +693,7 @@ def withBeatsEnv(boolean archive, Closure body) { } finally { if (archive) { catchError(buildResult: 'SUCCESS', stageResult: 'UNSTABLE') { - junit(allowEmptyResults: true, keepLongStdio: true, testResults: "**/build/TEST*.xml") + junitAndStore(allowEmptyResults: true, keepLongStdio: true, testResults: "**/build/TEST*.xml") archiveArtifacts(allowEmptyArchive: true, artifacts: '**/build/TEST*.out') } } @@ -720,7 +727,7 @@ def withBeatsEnvWin(Closure body) { } } finally { catchError(buildResult: 'SUCCESS', stageResult: 'UNSTABLE') { - junit(allowEmptyResults: true, keepLongStdio: true, testResults: "**\\build\\TEST*.xml") + junitAndStore(allowEmptyResults: true, keepLongStdio: true, testResults: "**\\build\\TEST*.xml") archiveArtifacts(allowEmptyArchive: true, artifacts: '**\\build\\TEST*.out') } } @@ -810,14 +817,14 @@ def dumpFilteredEnvironment(){ def k8sTest(versions){ versions.each{ v -> stage("k8s ${v}"){ - withEnv(["K8S_VERSION=${v}"]){ + withEnv(["K8S_VERSION=${v}", "KIND_VERSION=v0.7.0", "KUBECONFIG=${env.WORKSPACE}/kubecfg"]){ withGithubNotify(context: "K8s ${v}") { withBeatsEnv(false) { - sh(label: "Install k8s", script: """ - eval "\$(gvm use ${GO_VERSION} --format=bash)" - .ci/scripts/kind-setup.sh - """) - sh(label: "Kubernetes Kind",script: "make KUBECONFIG=\"\$(kind get kubeconfig-path)\" -C deploy/kubernetes test") + sh(label: "Install kind", script: ".ci/scripts/install-kind.sh") + sh(label: "Install kubectl", script: ".ci/scripts/install-kubectl.sh") + sh(label: "Integration tests", script: "MODULE=kubernetes make -C metricbeat integration-tests") + sh(label: "Setup kind", script: ".ci/scripts/kind-setup.sh") + sh(label: "Deploy to kubernetes",script: "make -C deploy/kubernetes test") sh(label: 'Delete cluster', script: 'kind delete cluster') } } @@ -862,7 +869,6 @@ def isChangedOSSCode(patterns) { "^\\.ci/.*", ] allPatterns.addAll(patterns) - allPatterns.addAll(getVendorPatterns('libbeat')) return isChanged(allPatterns) } @@ -877,7 +883,6 @@ def isChangedXPackCode(patterns) { "^\\.ci/.*", ] allPatterns.addAll(patterns) - allPatterns.addAll(getVendorPatterns('x-pack/libbeat')) return isChanged(allPatterns) } @@ -885,6 +890,10 @@ def loadConfigEnvVars(){ def empty = [] env.GO_VERSION = readFile(".go-version").trim() + withEnv(["HOME=${env.WORKSPACE}"]) { + sh(label: "Install Go ${env.GO_VERSION}", script: ".ci/scripts/install-go.sh") + } + // Libbeat is the core framework of Beats. It has no additional dependencies // on other projects in the Beats repository. env.BUILD_LIBBEAT = isChangedOSSCode(empty) @@ -943,17 +952,25 @@ def loadConfigEnvVars(){ // involved. env.BUILD_KUBERNETES = isChanged(["^deploy/kubernetes/.*"]) - env.BUILD_GENERATOR = isChangedOSSCode(getVendorPatterns('generator')) + def generatorPatterns = ['^generator/.*'] + generatorPatterns.addAll(getVendorPatterns('generator/common/beatgen')) + generatorPatterns.addAll(getVendorPatterns('metricbeat/beater')) + env.BUILD_GENERATOR = isChangedOSSCode(generatorPatterns) } /** This method grab the dependencies of a Go module and transform them on regexp */ def getVendorPatterns(beatName){ + def os = goos() + def goRoot = "${env.WORKSPACE}/.gvm/versions/go${GO_VERSION}.${os}.amd64" def output = "" - docker.image("golang:${GO_VERSION}").inside{ + + withEnv([ + "HOME=${env.WORKSPACE}/${env.BASE_DIR}", + "PATH=${env.WORKSPACE}/bin:${goRoot}/bin:${env.PATH}", + ]) { output = sh(label: 'Get vendor dependency patterns', returnStdout: true, script: """ - export HOME=${WORKSPACE}/${BASE_DIR} go list -mod=vendor -f '{{ .ImportPath }}{{ "\\n" }}{{ join .Deps "\\n" }}' ./${beatName}\ |awk '{print \$1"/.*"}'\ |sed -e "s#github.com/elastic/beats/v7/##g" @@ -970,3 +987,39 @@ def setGitConfig(){ fi ''') } + +def isDockerInstalled(){ + return sh(label: 'check for Docker', script: 'command -v docker', returnStatus: true) +} + +def junitAndStore(Map params = [:]){ + junit(params) + // STAGE_NAME env variable could be null in some cases, so let's use the currentmilliseconds + def stageName = env.STAGE_NAME ? env.STAGE_NAME.replaceAll("[\\W]|_",'-') : "uncategorized-${new java.util.Date().getTime()}" + stash(includes: params.testResults, allowEmpty: true, name: stageName, useDefaultExcludes: true) + stashedTestReports[stageName] = stageName +} + +def runbld() { + catchError(buildResult: 'SUCCESS', message: 'runbld post build action failed.') { + if (stashedTestReports) { + dir("${env.BASE_DIR}") { + sh(label: 'Prepare workspace context', + script: 'find . -type f -name "TEST*.xml" -path "*/build/*" -delete') + // Unstash the test reports + stashedTestReports.each { k, v -> + dir(k) { + unstash v + } + } + sh(label: 'Process JUnit reports with runbld', + script: '''\ + cat >./runbld-script < 0 { + return fmt.Errorf("failed modules: %s", strings.Join(failedModules, ", ")) + } + return nil } // GoTest invokes "go test" and reports the results to stdout. It returns an diff --git a/dev-tools/mage/integtest.go b/dev-tools/mage/integtest.go index 6fc26915415..f573d3df80a 100644 --- a/dev-tools/mage/integtest.go +++ b/dev-tools/mage/integtest.go @@ -19,334 +19,334 @@ package mage import ( "fmt" - "io/ioutil" - "log" "os" "path/filepath" - "runtime" "strconv" - "strings" - "sync" - - "github.com/pkg/errors" + "github.com/joeshaw/multierror" "github.com/magefile/mage/mg" - "github.com/magefile/mage/sh" + "github.com/pkg/errors" ) const ( - // BEATS_DOCKER_INTEGRATION_TEST_ENV is used to indicate that we are inside - // of the Docker integration test environment (e.g. in a container). - beatsDockerIntegrationTestEnvVar = "BEATS_DOCKER_INTEGRATION_TEST_ENV" + // BEATS_INSIDE_INTEGRATION_TEST_ENV is used to indicate that we are inside + // of the integration test environment. + insideIntegrationTestEnvVar = "BEATS_INSIDE_INTEGRATION_TEST_ENV" ) var ( - integTestUseCount int32 // Reference count for the integ test env. - integTestUseCountLock sync.Mutex // Lock to guard integTestUseCount. - - integTestLock sync.Mutex // Only allow one integration test at a time. + globalIntegrationTesters map[string]IntegrationTester + globalIntegrationTestSetupSteps IntegrationTestSteps - integTestBuildImagesOnce sync.Once // Build images one time for all integ testing. -) - -// Integration Test Configuration -var ( - // StackEnvironment specifies what testing environment - // to use (like snapshot (default), latest, 5x). Formerly known as - // TESTING_ENVIRONMENT. - StackEnvironment = EnvOr("STACK_ENVIRONMENT", "snapshot") + defaultPassthroughEnvVars = []string{ + "TEST_COVERAGE", + "RACE_DETECTOR", + "TEST_TAGS", + "PYTHON_EXE", + "MODULE", + "KUBECONFIG", + "KUBE_CONFIG", + } ) -// AddIntegTestUsage increments the use count for the integration test -// environment and prevents it from being stopped until the last call to -// StopIntegTestEnv(). You should also pair this with -// 'defer StopIntegTestEnv()'. -// -// This allows for the same environment to be reused by multiple tests (like -// both Go and Python) without tearing it down in between runs. -func AddIntegTestUsage() { - if IsInIntegTestEnv() { - return +// RegisterIntegrationTester registers a integration tester. +func RegisterIntegrationTester(tester IntegrationTester) { + if globalIntegrationTesters == nil { + globalIntegrationTesters = make(map[string]IntegrationTester) } - - integTestUseCountLock.Lock() - defer integTestUseCountLock.Unlock() - integTestUseCount++ + globalIntegrationTesters[tester.Name()] = tester } -// StopIntegTestEnv will stop and removing the integration test environment -// (e.g. docker-compose rm --stop --force) when there are no more users -// of the environment. -func StopIntegTestEnv() error { - if IsInIntegTestEnv() { - return nil - } - - integTestUseCountLock.Lock() - defer integTestUseCountLock.Unlock() - if integTestUseCount == 0 { - panic("integTestUseCount is 0. Did you call AddIntegTestUsage()?") - } - - integTestUseCount-- - if integTestUseCount > 0 { - return nil - } - - if err := haveIntegTestEnvRequirements(); err != nil { - // Ignore error because it will be logged by RunIntegTest. - return nil - } +// RegisterIntegrationTestSetupStep registers a integration step. +func RegisterIntegrationTestSetupStep(step IntegrationTestSetupStep) { + globalIntegrationTestSetupSteps = append(globalIntegrationTestSetupSteps, step) +} - if _, skip := skipIntegTest(); skip { - return nil - } +// IntegrationTestSetupStep is interface used by a step in the integration setup +// chain. Example could be: Terraform -> Kind -> Kubernetes (IntegrationTester). +type IntegrationTestSetupStep interface { + // Name is the name of the step. + Name() string + // Use returns true in the case that the step should be used. Not called + // when a step is defined as a dependency of a tester. + Use(dir string) (bool, error) + // Setup sets up the environment for the integration test. + Setup(env map[string]string) error + // Teardown brings down the environment for the integration test. + Teardown(env map[string]string) error +} - composeEnv, err := integTestDockerComposeEnvVars() - if err != nil { - return err - } +// IntegrationTestSteps wraps all the steps and completes the in the order added. +type IntegrationTestSteps []IntegrationTestSetupStep - // Stop docker-compose when reference count hits 0. - fmt.Println(">> Stopping Docker test environment...") +// Name is the name of the step. +func (steps IntegrationTestSteps) Name() string { + return "IntegrationTestSteps" +} - // Docker-compose rm is noisy. So only pass through stderr when in verbose. - out := ioutil.Discard - if mg.Verbose() { - out = os.Stderr +// Setup calls Setup on each step in the order defined. +// +// In the case that Setup fails on a step, Teardown will be called on the previous +// successful steps. +func (steps IntegrationTestSteps) Setup(env map[string]string) error { + for i, step := range steps { + if mg.Verbose() { + fmt.Printf("Setup %s...\n", step.Name()) + } + if err := step.Setup(env); err != nil { + prev := i - 1 + if prev >= 0 { + // errors ignored + _ = steps.teardownFrom(prev, env) + } + return errors.Wrapf(err, "%s setup failed", step.Name()) + } } - - _, err = sh.Exec( - composeEnv, - ioutil.Discard, - out, - "docker-compose", - "-p", dockerComposeProjectName(), - "rm", "--stop", "--force", - ) - return err + return nil } -// RunIntegTest executes the given target inside the integration testing -// environment (Docker). -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -// Use STACK_ENVIRONMENT=env to specify what testing environment -// to use (like snapshot (default), latest, 5x). +// Teardown calls Teardown in the reverse order defined. // -// Always use this with AddIntegTestUsage() and defer StopIntegTestEnv(). -func RunIntegTest(mageTarget string, test func() error, passThroughEnvVars ...string) error { - if reason, skip := skipIntegTest(); skip { - fmt.Printf(">> %v: Skipping because %v\n", mageTarget, reason) - return nil +// In the case a teardown step fails the error is recorded but the +// previous steps teardown is still called. This guarantees that teardown +// will always be called for each step. +func (steps IntegrationTestSteps) Teardown(env map[string]string) error { + return steps.teardownFrom(len(steps)-1, env) +} + +func (steps IntegrationTestSteps) teardownFrom(start int, env map[string]string) error { + var errs multierror.Errors + for i := start; i >= 0; i-- { + if mg.Verbose() { + fmt.Printf("Teardown %s...\n", steps[i].Name()) + } + if err := steps[i].Teardown(env); err != nil { + errs = append(errs, errors.Wrapf(err, "%s teardown failed", steps[i].Name())) + } } + return errs.Err() +} - AddIntegTestUsage() - defer StopIntegTestEnv() +// IntegrationTester is interface used by the actual test runner. +type IntegrationTester interface { + // Name returns the name of the tester. + Name() string + // Use returns true in the case that the tester should be used. + Use(dir string) (bool, error) + // HasRequirements returns an error if requirements are missing. + HasRequirements() error + // Test performs excecuting the test inside the environment. + Test(dir string, mageTarget string, env map[string]string) error + // InsideTest performs the actual test on the inside of environment. + InsideTest(test func() error) error + // StepRequirements returns the steps this tester requires. These + // are always placed before other autodiscover steps. + StepRequirements() IntegrationTestSteps +} - env := []string{ - "TEST_COVERAGE", - "RACE_DETECTOR", - "TEST_TAGS", - "PYTHON_EXE", - "MODULE", - } - env = append(env, passThroughEnvVars...) - return runInIntegTestEnv(mageTarget, test, env...) +// IntegrationRunner performs the running of the integration tests. +type IntegrationRunner struct { + steps IntegrationTestSteps + tester IntegrationTester + dir string + env map[string]string } -func runInIntegTestEnv(mageTarget string, test func() error, passThroughEnvVars ...string) error { - if IsInIntegTestEnv() { - // Fix file permissions after test is done writing files as root. - if runtime.GOOS != "windows" { - defer DockerChown(".") - } - return test() - } +// IntegrationRunners is an array of multiple runners. +type IntegrationRunners []*IntegrationRunner - var err error - integTestBuildImagesOnce.Do(func() { err = dockerComposeBuildImages() }) +// NewIntegrationRunners returns the integration test runners discovered from the provided path. +func NewIntegrationRunners(path string, passInEnv map[string]string) (IntegrationRunners, error) { + cwd, err := os.Getwd() if err != nil { - return err + return nil, err } - - // Test that we actually have Docker and docker-compose. - if err := haveIntegTestEnvRequirements(); err != nil { - return errors.Wrapf(err, "failed to run %v target in integration environment", mageTarget) + dir := filepath.Join(cwd, path) + + // Load the overall steps to use (skipped inside of test environment, as they are never ran on the inside). + // These steps are duplicated per scenario. + var steps IntegrationTestSteps + if !IsInIntegTestEnv() { + for _, step := range globalIntegrationTestSetupSteps { + use, err := step.Use(dir) + if err != nil { + return nil, errors.Wrapf(err, "%s step failed on Use", step.Name()) + } + if use { + steps = append(steps, step) + } + } } - // Pre-build a mage binary to execute inside docker so that we don't need to - // have mage installed inside the container. - mg.Deps(buildMage) + // Create the runners (can only be multiple). + var runners IntegrationRunners + for _, t := range globalIntegrationTesters { + use, err := t.Use(dir) + if err != nil { + return nil, errors.Wrapf(err, "%s tester failed on Use", t.Name()) + } + if use { + // Create the steps for the specific runner. + var runnerSteps IntegrationTestSteps + requirements := t.StepRequirements() + if requirements != nil { + runnerSteps = append(runnerSteps, requirements...) + } + runnerSteps = append(runnerSteps, steps...) + + // Create the custom env for the runner. + env := map[string]string{} + for k, v := range passInEnv { + env[k] = v + } + env[insideIntegrationTestEnvVar] = "true" + passThroughEnvs(env, defaultPassthroughEnvVars...) + if mg.Verbose() { + env["MAGEFILE_VERBOSE"] = "1" + } + if UseVendor { + env["GOFLAGS"] = "-mod=vendor" + } + + runner := &IntegrationRunner{ + steps: runnerSteps, + tester: t, + dir: dir, + env: env, + } + runners = append(runners, runner) + } + } + return runners, nil +} - // Determine the path to use inside the container. - repo, err := GetProjectRepoInfo() +// NewDockerIntegrationRunner returns an intergration runner configured only for docker. +func NewDockerIntegrationRunner(passThroughEnvVars ...string) (*IntegrationRunner, error) { + cwd, err := os.Getwd() if err != nil { - return err - } - magePath := filepath.Join("/go/src", repo.CanonicalRootImportPath, repo.SubDir, "build/mage-linux-amd64") - - // Build docker-compose args. - args := []string{"-p", dockerComposeProjectName(), "run", - "-e", "DOCKER_COMPOSE_PROJECT_NAME=" + dockerComposeProjectName(), - // Disable strict.perms because we moust host dirs inside containers - // and the UID/GID won't meet the strict requirements. - "-e", "BEAT_STRICT_PERMS=false", - // compose.EnsureUp needs to know the environment type. - "-e", "STACK_ENVIRONMENT=" + StackEnvironment, - "-e", "TESTING_ENVIRONMENT=" + StackEnvironment, + return nil, err } - if UseVendor { - args = append(args, "-e", "GOFLAGS=-mod=vendor") + tester, ok := globalIntegrationTesters["docker"] + if !ok { + return nil, fmt.Errorf("docker integration test runner not registered") } - args, err = addUidGidEnvArgs(args) - if err != nil { - return err + var runnerSteps IntegrationTestSteps + requirements := tester.StepRequirements() + if requirements != nil { + runnerSteps = append(runnerSteps, requirements...) } - for _, envVar := range passThroughEnvVars { - args = append(args, "-e", envVar+"="+os.Getenv(envVar)) + + // Create the custom env for the runner. + env := map[string]string{ + insideIntegrationTestEnvVar: "true", } + passThroughEnvs(env, defaultPassthroughEnvVars...) + passThroughEnvs(env, passThroughEnvVars...) if mg.Verbose() { - args = append(args, "-e", "MAGEFILE_VERBOSE=1") + env["MAGEFILE_VERBOSE"] = "1" } - args = append(args, - "-e", beatsDockerIntegrationTestEnvVar+"=true", - "beat", // Docker compose container name. - magePath, - mageTarget, - ) - - composeEnv, err := integTestDockerComposeEnvVars() - if err != nil { - return err + if UseVendor { + env["GOFLAGS"] = "-mod=vendor" } - // Only allow one usage at a time. - integTestLock.Lock() - defer integTestLock.Unlock() - - _, err = sh.Exec( - composeEnv, - os.Stdout, - os.Stderr, - "docker-compose", - args..., - ) - return err -} - -// IsInIntegTestEnv return true if executing inside the integration test -// environment. -func IsInIntegTestEnv() bool { - _, found := os.LookupEnv(beatsDockerIntegrationTestEnvVar) - return found -} - -func haveIntegTestEnvRequirements() error { - if err := HaveDockerCompose(); err != nil { - return err - } - if err := HaveDocker(); err != nil { - return err + runner := &IntegrationRunner{ + steps: runnerSteps, + tester: tester, + dir: cwd, + env: env, } - return nil + return runner, nil } -// skipIntegTest returns true if integ tests should be skipped. -func skipIntegTest() (reason string, skip bool) { +// Test actually performs the test. +func (r *IntegrationRunner) Test(mageTarget string, test func() error) (err error) { + // Inside the testing environment just run the test. if IsInIntegTestEnv() { - return "", false + err = r.tester.InsideTest(test) + return } // Honor the TEST_ENVIRONMENT value if set. if testEnvVar, isSet := os.LookupEnv("TEST_ENVIRONMENT"); isSet { - enabled, err := strconv.ParseBool(testEnvVar) + var enabled bool + enabled, err = strconv.ParseBool(testEnvVar) if err != nil { - panic(errors.Wrap(err, "failed to parse TEST_ENVIRONMENT value")) + err = errors.Wrap(err, "failed to parse TEST_ENVIRONMENT value") + return + } + if !enabled { + err = fmt.Errorf("TEST_ENVIRONMENT=%s", testEnvVar) + return } - return "TEST_ENVIRONMENT=" + testEnvVar, !enabled - } - - // Otherwise skip if we don't have all the right dependencies. - if err := haveIntegTestEnvRequirements(); err != nil { - // Skip if we don't meet the requirements. - log.Println("Skipping integ test because:", err) - return "docker is not available", true } - return "", false -} - -// integTestDockerComposeEnvVars returns the environment variables used for -// executing docker-compose (not the variables passed into the containers). -// docker-compose uses these when evaluating docker-compose.yml files. -func integTestDockerComposeEnvVars() (map[string]string, error) { - esBeatsDir, err := ElasticBeatsDir() + // log missing requirements and do nothing + err = r.tester.HasRequirements() if err != nil { - return nil, err + // log error; and return (otherwise on machines without requirements it will mark the tests as failed) + fmt.Printf("skipping test run with %s due to missing requirements: %s\n", r.tester.Name(), err) + err = nil + return } - return map[string]string{ - "ES_BEATS": esBeatsDir, - "STACK_ENVIRONMENT": StackEnvironment, - // Deprecated use STACK_ENVIRONMENT instead (it's more descriptive). - "TESTING_ENVIRONMENT": StackEnvironment, - }, nil -} - -// dockerComposeProjectName returns the project name to use with docker-compose. -// It is passed to docker-compose using the `-p` flag. And is passed to our -// Go and Python testing libraries through the DOCKER_COMPOSE_PROJECT_NAME -// environment variable. -func dockerComposeProjectName() string { - commit, err := CommitHash() - if err != nil { - panic(errors.Wrap(err, "failed to construct docker compose project name")) + if err = r.steps.Setup(r.env); err != nil { + return } - version, err := BeatQualifiedVersion() - if err != nil { - panic(errors.Wrap(err, "failed to construct docker compose project name")) + // catch any panics to run teardown + inTeardown := false + defer func() { + if recoverErr := recover(); recoverErr != nil { + err = recoverErr.(error) + if !inTeardown { + // ignore errors + _ = r.steps.Teardown(r.env) + } + } + }() + + if mg.Verbose() { + fmt.Printf(">> Running testing inside of %s...\n", r.tester.Name()) } - version = strings.NewReplacer(".", "_").Replace(version) - - projectName := "{{.BeatName}}_{{.Version}}_{{.ShortCommit}}-{{.StackEnvironment}}" - projectName = MustExpand(projectName, map[string]interface{}{ - "StackEnvironment": StackEnvironment, - "ShortCommit": commit[:10], - "Version": version, - }) - return projectName -} -// dockerComposeBuildImages builds all images in the docker-compose.yml file. -func dockerComposeBuildImages() error { - fmt.Println(">> Building docker images") + err = r.tester.Test(r.dir, mageTarget, r.env) - composeEnv, err := integTestDockerComposeEnvVars() - if err != nil { - return err + if mg.Verbose() { + fmt.Printf(">> Done running testing inside of %s...\n", r.tester.Name()) } - args := []string{"-p", dockerComposeProjectName(), "build", "--force-rm"} - if _, noCache := os.LookupEnv("DOCKER_NOCACHE"); noCache { - args = append(args, "--no-cache") + inTeardown = true + if teardownErr := r.steps.Teardown(r.env); teardownErr != nil { + if err == nil { + // test didn't error, but teardown did + err = teardownErr + } } + return +} - if _, forcePull := os.LookupEnv("DOCKER_PULL"); forcePull { - args = append(args, "--pull") +// Test runs the test on each runner and collects the errors. +func (r IntegrationRunners) Test(mageTarget string, test func() error) error { + var errs multierror.Errors + for _, runner := range r { + if err := runner.Test(mageTarget, test); err != nil { + errs = append(errs, err) + } } + return errs.Err() +} - out := ioutil.Discard - if mg.Verbose() { - out = os.Stderr +func passThroughEnvs(env map[string]string, passthrough ...string) { + for _, envName := range passthrough { + val, set := os.LookupEnv(envName) + if set { + env[envName] = val + } } +} - _, err = sh.Exec( - composeEnv, - out, - os.Stderr, - "docker-compose", args..., - ) - return err +// IsInIntegTestEnv return true if executing inside the integration test environment. +func IsInIntegTestEnv() bool { + _, found := os.LookupEnv(insideIntegrationTestEnvVar) + return found } diff --git a/dev-tools/mage/integtest_docker.go b/dev-tools/mage/integtest_docker.go new file mode 100644 index 00000000000..6bbca1f6d64 --- /dev/null +++ b/dev-tools/mage/integtest_docker.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + + "github.com/pkg/errors" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +var ( + // StackEnvironment specifies what testing environment + // to use (like snapshot (default), latest, 5x). Formerly known as + // TESTING_ENVIRONMENT. + StackEnvironment = EnvOr("STACK_ENVIRONMENT", "snapshot") +) + +func init() { + RegisterIntegrationTester(&DockerIntegrationTester{}) +} + +type DockerIntegrationTester struct { + buildImagesOnce sync.Once +} + +// Name returns docker name. +func (d *DockerIntegrationTester) Name() string { + return "docker" +} + +// Use determines if this tester should be used. +func (d *DockerIntegrationTester) Use(dir string) (bool, error) { + dockerFile := filepath.Join(dir, "docker-compose.yml") + if _, err := os.Stat(dockerFile); !os.IsNotExist(err) { + return true, nil + } + return false, nil +} + +// HasRequirements ensures that the required docker and docker-compose are installed. +func (d *DockerIntegrationTester) HasRequirements() error { + if err := HaveDocker(); err != nil { + return err + } + if err := HaveDockerCompose(); err != nil { + return err + } + return nil +} + +// StepRequirements returns the steps required for this tester. +func (d *DockerIntegrationTester) StepRequirements() IntegrationTestSteps { + return IntegrationTestSteps{&MageIntegrationTestStep{}} +} + +// Test performs the tests with docker-compose. +func (d *DockerIntegrationTester) Test(_ string, mageTarget string, env map[string]string) error { + var err error + d.buildImagesOnce.Do(func() { err = dockerComposeBuildImages() }) + if err != nil { + return err + } + + // Determine the path to use inside the container. + repo, err := GetProjectRepoInfo() + if err != nil { + return err + } + magePath := filepath.Join("/go/src", repo.CanonicalRootImportPath, repo.SubDir, "build/mage-linux-amd64") + + // Execute the inside of docker-compose. + args := []string{"-p", dockerComposeProjectName(), "run", + "-e", "DOCKER_COMPOSE_PROJECT_NAME=" + dockerComposeProjectName(), + // Disable strict.perms because we moust host dirs inside containers + // and the UID/GID won't meet the strict requirements. + "-e", "BEAT_STRICT_PERMS=false", + // compose.EnsureUp needs to know the environment type. + "-e", "STACK_ENVIRONMENT=" + StackEnvironment, + "-e", "TESTING_ENVIRONMENT=" + StackEnvironment, + } + args, err = addUidGidEnvArgs(args) + if err != nil { + return err + } + for envVame, envVal := range env { + args = append(args, "-e", fmt.Sprintf("%s=%s", envVame, envVal)) + } + args = append(args, + "beat", // Docker compose container name. + magePath, + mageTarget, + ) + + composeEnv, err := integTestDockerComposeEnvVars() + if err != nil { + return err + } + + _, testErr := sh.Exec( + composeEnv, + os.Stdout, + os.Stderr, + "docker-compose", + args..., + ) + + // Docker-compose rm is noisy. So only pass through stderr when in verbose. + out := ioutil.Discard + if mg.Verbose() { + out = os.Stderr + } + + _, err = sh.Exec( + composeEnv, + ioutil.Discard, + out, + "docker-compose", + "-p", dockerComposeProjectName(), + "rm", "--stop", "--force", + ) + if err != nil && testErr == nil { + // docker-compose rm failed but the test didn't + return err + } + return testErr +} + +// InsideTest performs the tests inside of environment. +func (d *DockerIntegrationTester) InsideTest(test func() error) error { + // Fix file permissions after test is done writing files as root. + if runtime.GOOS != "windows" { + defer DockerChown(".") + } + return test() +} + +// integTestDockerComposeEnvVars returns the environment variables used for +// executing docker-compose (not the variables passed into the containers). +// docker-compose uses these when evaluating docker-compose.yml files. +func integTestDockerComposeEnvVars() (map[string]string, error) { + esBeatsDir, err := ElasticBeatsDir() + if err != nil { + return nil, err + } + + return map[string]string{ + "ES_BEATS": esBeatsDir, + "STACK_ENVIRONMENT": StackEnvironment, + // Deprecated use STACK_ENVIRONMENT instead (it's more descriptive). + "TESTING_ENVIRONMENT": StackEnvironment, + }, nil +} + +// dockerComposeProjectName returns the project name to use with docker-compose. +// It is passed to docker-compose using the `-p` flag. And is passed to our +// Go and Python testing libraries through the DOCKER_COMPOSE_PROJECT_NAME +// environment variable. +func dockerComposeProjectName() string { + commit, err := CommitHash() + if err != nil { + panic(errors.Wrap(err, "failed to construct docker compose project name")) + } + + version, err := BeatQualifiedVersion() + if err != nil { + panic(errors.Wrap(err, "failed to construct docker compose project name")) + } + version = strings.NewReplacer(".", "_").Replace(version) + + projectName := "{{.BeatName}}_{{.Version}}_{{.ShortCommit}}-{{.StackEnvironment}}" + projectName = MustExpand(projectName, map[string]interface{}{ + "StackEnvironment": StackEnvironment, + "ShortCommit": commit[:10], + "Version": version, + }) + return projectName +} + +// dockerComposeBuildImages builds all images in the docker-compose.yml file. +func dockerComposeBuildImages() error { + fmt.Println(">> Building docker images") + + composeEnv, err := integTestDockerComposeEnvVars() + if err != nil { + return err + } + + args := []string{"-p", dockerComposeProjectName(), "build", "--force-rm"} + if _, noCache := os.LookupEnv("DOCKER_NOCACHE"); noCache { + args = append(args, "--no-cache") + } + + if _, forcePull := os.LookupEnv("DOCKER_PULL"); forcePull { + args = append(args, "--pull") + } + + out := ioutil.Discard + if mg.Verbose() { + out = os.Stderr + } + + _, err = sh.Exec( + composeEnv, + out, + os.Stderr, + "docker-compose", args..., + ) + return err +} diff --git a/dev-tools/mage/integtest_mage.go b/dev-tools/mage/integtest_mage.go new file mode 100644 index 00000000000..82dcb90fefd --- /dev/null +++ b/dev-tools/mage/integtest_mage.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "sync" + + "github.com/magefile/mage/mg" +) + +var ( + buildMageOnce sync.Once +) + +// MageIntegrationTestStep setups mage to be ran. +type MageIntegrationTestStep struct{} + +// Name returns the mage name. +func (m *MageIntegrationTestStep) Name() string { + return "mage" +} + +// Use always returns false. +// +// This step should be defined in `StepRequirements` for the tester, for it +// to be used. It cannot be autodiscovered for usage. +func (m *MageIntegrationTestStep) Use(dir string) (bool, error) { + return false, nil +} + +// Setup ensures the mage binary is built. +// +// Multiple uses of this step will only build the mage binary once. +func (m *MageIntegrationTestStep) Setup(_ map[string]string) error { + // Pre-build a mage binary to execute. + buildMageOnce.Do(func() { mg.Deps(buildMage) }) + return nil +} + +// Teardown does nothing. +func (m *MageIntegrationTestStep) Teardown(_ map[string]string) error { + return nil +} diff --git a/dev-tools/mage/kubectl.go b/dev-tools/mage/kubectl.go new file mode 100644 index 00000000000..df42bc8049d --- /dev/null +++ b/dev-tools/mage/kubectl.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "io" + "os" + "os/exec" + "strings" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +// KubectlApply applys the manifest file to the kubernetes cluster. +// +// KUBECONFIG must be in `env` to target a specific cluster. +func KubectlApply(env map[string]string, stdout, stderr io.Writer, filepath string) error { + _, err := sh.Exec( + env, + stdout, + stderr, + "kubectl", + "apply", + "-f", + filepath, + ) + return err +} + +// KubectlDelete deletes the resources from the manifest file from the kubernetes cluster. +// +// KUBECONFIG must be in `env` to target a specific cluster. +func KubectlDelete(env map[string]string, stdout, stderr io.Writer, filepath string) error { + _, err := sh.Exec( + env, + stdout, + stderr, + "kubectl", + "delete", + "-f", + filepath, + ) + return err +} + +// KubectlApplyInput applys the manifest string to the kubernetes cluster. +// +// KUBECONFIG must be in `env` to target a specific cluster. +func KubectlApplyInput(env map[string]string, stdout, stderr io.Writer, manifest string) error { + return kubectlIn(env, stdout, stderr, manifest, "apply", "-f", "-") +} + +// KubectlDeleteInput deletes the resources from the manifest string from the kubernetes cluster. +// +// KUBECONFIG must be in `env` to target a specific cluster. +func KubectlDeleteInput(env map[string]string, stdout, stderr io.Writer, manifest string) error { + return kubectlIn(env, stdout, stderr, manifest, "delete", "-f", "-") +} + +// KubectlWait waits for a condition to occur for a resource in the kubernetes cluster. +// +// KUBECONFIG must be in `env` to target a specific cluster. +func KubectlWait(env map[string]string, stdout, stderr io.Writer, waitFor, resource string) error { + _, err := sh.Exec( + env, + stdout, + stderr, + "kubectl", + "wait", + "--timeout=300s", + fmt.Sprintf("--for=%s", waitFor), + resource, + ) + return err +} + +func kubectlIn(env map[string]string, stdout, stderr io.Writer, input string, args ...string) error { + c := exec.Command("kubectl", args...) + c.Env = os.Environ() + for k, v := range env { + c.Env = append(c.Env, k+"="+v) + } + c.Stdout = stdout + c.Stderr = stderr + c.Stdin = strings.NewReader(input) + + if mg.Verbose() { + fmt.Println("exec:", "kubectl", strings.Join(args, " ")) + } + + return c.Run() +} + +func kubectlStart(env map[string]string, stdout, stderr io.Writer, args ...string) (*exec.Cmd, error) { + c := exec.Command("kubectl", args...) + c.Env = os.Environ() + for k, v := range env { + c.Env = append(c.Env, k+"="+v) + } + c.Stdout = stdout + c.Stderr = stderr + c.Stdin = nil + + if mg.Verbose() { + fmt.Println("exec:", "kubectl", strings.Join(args, " ")) + } + + if err := c.Start(); err != nil { + return nil, err + } + return c, nil +} diff --git a/dev-tools/mage/kuberemote.go b/dev-tools/mage/kuberemote.go new file mode 100644 index 00000000000..078680fbf0b --- /dev/null +++ b/dev-tools/mage/kuberemote.go @@ -0,0 +1,612 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "bufio" + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os/exec" + "strings" + "time" + + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" + + apiv1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/portforward" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/client-go/transport/spdy" +) + +const sshBitSize = 4096 + +var mode = int32(256) + +// KubeRemote rsyncs the passed directory to a pod and runs the command inside of that pod. +type KubeRemote struct { + cfg *rest.Config + cs *kubernetes.Clientset + namespace string + name string + workDir string + destDir string + syncDir string + + svcAccName string + secretName string + privateKey []byte + publicKey []byte +} + +// NewKubeRemote creates a new kubernetes remote runner. +func NewKubeRemote(kubeconfig string, namespace string, name string, workDir string, destDir string, syncDir string) (*KubeRemote, error) { + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, err + } + cs, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + name = strings.Replace(name, "_", "-", -1) + svcAccName := fmt.Sprintf("%s-sa", name) + secretName := fmt.Sprintf("%s-ssh-key", name) + privateKey, publicKey, err := generateSSHKeyPair() + if err != nil { + return nil, err + } + return &KubeRemote{config, cs, namespace, name, workDir, destDir, syncDir, svcAccName, secretName, privateKey, publicKey}, nil +} + +// Run runs the command remotely on the kubernetes cluster. +func (r *KubeRemote) Run(env map[string]string, stdout io.Writer, stderr io.Writer, args ...string) error { + if err := r.syncSSHKey(); err != nil { + return errors.Wrap(err, "failed to sync SSH secret") + } + defer r.deleteSSHKey() + if err := r.syncServiceAccount(); err != nil { + return err + } + defer r.deleteServiceAccount() + _, err := r.createPod(env, args...) + if err != nil { + return errors.Wrap(err, "failed to create execute pod") + } + defer r.deletePod() + + // wait for SSH to be up inside the init container. + _, err = r.waitForPod(5*time.Minute, podInitReady) + if err != nil { + return errors.Wrap(err, "execute pod init container never started") + } + time.Sleep(1 * time.Second) // SSH inside of container can take a moment + + // forward the SSH port so rsync can be ran. + randomPort, err := getFreePort() + if err != nil { + return errors.Wrap(err, "failed to find a free port") + } + stopChannel := make(chan struct{}, 1) + readyChannel := make(chan struct{}, 1) + f, err := r.portForward([]string{fmt.Sprintf("%d:%d", randomPort, 22)}, stopChannel, readyChannel, stderr, stderr) + if err != nil { + return err + } + go f.ForwardPorts() + <-readyChannel + + // perform the rsync + r.rsync(randomPort, stderr, stderr) + + // stop port forwarding + close(stopChannel) + + // wait for exec container to be running + _, err = r.waitForPod(5*time.Minute, containerRunning("exec")) + if err != nil { + return errors.Wrap(err, "execute pod container never started") + } + + // stream the logs of the container + err = r.streamLogs("exec", stdout) + if err != nil { + return errors.Wrap(err, "failed to stream the logs") + } + + // wait for exec container to be completely done + pod, err := r.waitForPod(30*time.Second, podDone) + if err != nil { + return errors.Wrap(err, "execute pod didn't terminate after 30 seconds of log stream") + } + + // return error on failure + if pod.Status.Phase == apiv1.PodFailed { + return fmt.Errorf("execute pod test failed") + } + return nil +} + +// deleteSSHKey deletes SSH key from the cluster. +func (r *KubeRemote) deleteSSHKey() { + _ = r.cs.CoreV1().Secrets(r.namespace).Delete(r.secretName, &metav1.DeleteOptions{}) +} + +// syncSSHKey syncs the SSH key to the cluster. +func (r *KubeRemote) syncSSHKey() error { + // delete before create + r.deleteSSHKey() + _, err := r.cs.CoreV1().Secrets(r.namespace).Create(createSecretManifest(r.secretName, r.publicKey)) + if err != nil { + return err + } + return nil +} + +// deleteServiceAccount syncs required service account. +func (r *KubeRemote) deleteServiceAccount() { + _ = r.cs.RbacV1().ClusterRoleBindings().Delete(r.name, &metav1.DeleteOptions{}) + _ = r.cs.RbacV1().ClusterRoles().Delete(r.name, &metav1.DeleteOptions{}) + _ = r.cs.CoreV1().ServiceAccounts(r.namespace).Delete(r.svcAccName, &metav1.DeleteOptions{}) +} + +// syncServiceAccount syncs required service account. +func (r *KubeRemote) syncServiceAccount() error { + // delete before create + r.deleteServiceAccount() + _, err := r.cs.CoreV1().ServiceAccounts(r.namespace).Create(createServiceAccountManifest(r.svcAccName)) + if err != nil { + return errors.Wrap(err, "failed to create service account") + } + _, err = r.cs.RbacV1().ClusterRoles().Create(createClusterRoleManifest(r.name)) + if err != nil { + return errors.Wrap(err, "failed to create cluster role") + } + _, err = r.cs.RbacV1().ClusterRoleBindings().Create(createClusterRoleBindingManifest(r.name, r.namespace, r.svcAccName)) + if err != nil { + return errors.Wrap(err, "failed to create cluster role binding") + } + return nil +} + +// createPod creates the pod. +func (r *KubeRemote) createPod(env map[string]string, cmd ...string) (*apiv1.Pod, error) { + r.deletePod() // ensure it doesn't already exist + return r.cs.CoreV1().Pods(r.namespace).Create(createPodManifest(r.name, "golang:1.13.9", env, cmd, r.workDir, r.destDir, r.secretName, r.svcAccName)) +} + +// deletePod deletes the pod. +func (r *KubeRemote) deletePod() { + _ = r.cs.CoreV1().Pods(r.namespace).Delete(r.name, &metav1.DeleteOptions{}) +} + +// waitForPod waits for the created pod to match the given condition. +func (r *KubeRemote) waitForPod(wait time.Duration, condition watchtools.ConditionFunc) (*apiv1.Pod, error) { + w, err := r.cs.CoreV1().Pods(r.namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: r.name})) + if err != nil { + return nil, err + } + + ctx, _ := watchtools.ContextWithOptionalTimeout(context.Background(), wait) + ev, err := watchtools.UntilWithoutRetry(ctx, w, func(ev watch.Event) (bool, error) { + return condition(ev) + }) + if ev != nil { + return ev.Object.(*apiv1.Pod), err + } + return nil, err +} + +// portFoward runs the port forwarding so SSH rsync can be ran into the pod. +func (r *KubeRemote) portForward(ports []string, stopChannel, readyChannel chan struct{}, stdout, stderr io.Writer) (*portforward.PortForwarder, error) { + roundTripper, upgrader, err := spdy.RoundTripperFor(r.cfg) + if err != nil { + return nil, err + } + + path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", r.namespace, r.name) + hostIP := strings.TrimLeft(r.cfg.Host, "https://") + serverURL := url.URL{Scheme: "https", Path: path, Host: hostIP} + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, &serverURL) + return portforward.New(dialer, ports, stopChannel, readyChannel, stdout, stderr) +} + +// rsync performs the rsync of sync directory to destination directory inside of the pod. +func (r *KubeRemote) rsync(port uint16, stdout, stderr io.Writer) error { + privateKeyFile, err := createTempFile(r.privateKey) + if err != nil { + return err + } + + rsh := fmt.Sprintf("/usr/bin/ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR -p %d -i %s", port, privateKeyFile) + args := []string{ + "--rsh", rsh, + "-a", fmt.Sprintf("%s/", r.syncDir), + fmt.Sprintf("root@localhost:%s", r.destDir), + } + cmd := exec.Command("rsync", args...) + cmd.Stdout = stdout + cmd.Stderr = stderr + return cmd.Run() +} + +// streamLogs streams the logs from the execution pod until the pod is terminated. +func (r *KubeRemote) streamLogs(container string, stdout io.Writer) error { + req := r.cs.CoreV1().Pods(r.namespace).GetLogs(r.name, &apiv1.PodLogOptions{ + Container: container, + Follow: true, + }) + logs, err := req.Stream() + if err != nil { + return err + } + defer logs.Close() + + reader := bufio.NewReader(logs) + for { + bytes, err := reader.ReadBytes('\n') + if _, err := stdout.Write(bytes); err != nil { + return err + } + if err != nil { + if err != io.EOF { + return err + } + return nil + } + } +} + +// generateSSHKeyPair generates a new SSH key pair. +func generateSSHKeyPair() ([]byte, []byte, error) { + private, err := rsa.GenerateKey(rand.Reader, sshBitSize) + if err != nil { + return nil, nil, err + } + if err = private.Validate(); err != nil { + return nil, nil, err + } + public, err := ssh.NewPublicKey(&private.PublicKey) + if err != nil { + return nil, nil, err + } + return encodePrivateKeyToPEM(private), ssh.MarshalAuthorizedKey(public), nil +} + +// encodePrivateKeyToPEM encodes private key from RSA to PEM format. +func encodePrivateKeyToPEM(privateKey *rsa.PrivateKey) []byte { + privDER := x509.MarshalPKCS1PrivateKey(privateKey) + privBlock := pem.Block{ + Type: "RSA PRIVATE KEY", + Headers: nil, + Bytes: privDER, + } + return pem.EncodeToMemory(&privBlock) +} + +// getFreePort finds a free port. +func getFreePort() (uint16, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return 0, err + } + defer l.Close() + return uint16(l.Addr().(*net.TCPAddr).Port), nil +} + +// createSecretManifest creates the secret object to create in the cluster. +// +// This is the public key that the sshd uses as the authorized key. +func createSecretManifest(name string, publicKey []byte) *apiv1.Secret { + return &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + StringData: map[string]string{ + "authorized_keys": string(publicKey), + }, + } +} + +// createServiceAccountManifest creates the service account the pod will used. +func createServiceAccountManifest(name string) *apiv1.ServiceAccount { + return &apiv1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } +} + +// createClusterRoleManifest creates the cluster role the pod will used. +// +// This gives the pod all permissions on everything! +func createClusterRoleManifest(name string) *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Rules: []rbacv1.PolicyRule{ + rbacv1.PolicyRule{ + Verbs: []string{"*"}, + APIGroups: []string{"*"}, + Resources: []string{"*"}, + }, + rbacv1.PolicyRule{ + Verbs: []string{"*"}, + NonResourceURLs: []string{"*"}, + }, + }, + } +} + +// createClusterRoleBindingManifest creates the cluster role binding the pod will used. +// +// This binds the service account to the cluster role. +func createClusterRoleBindingManifest(name string, namespace string, svcAccName string) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Subjects: []rbacv1.Subject{ + rbacv1.Subject{ + Kind: "ServiceAccount", + Name: svcAccName, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: name, + }, + } +} + +// createPodManifest creates the pod inside of the cluster that will be used for remote execution. +// +// Creates a pod with an init container that runs sshd-rsync, once the first connection closes the init container +// exits then the exec container starts using the rsync'd directory as its work directory. +func createPodManifest(name string, image string, env map[string]string, cmd []string, workDir string, destDir string, secretName string, svcAccName string) *apiv1.Pod { + execEnv := []apiv1.EnvVar{ + apiv1.EnvVar{ + Name: "NODE_NAME", + ValueFrom: &apiv1.EnvVarSource{ + FieldRef: &apiv1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, + } + for k, v := range env { + execEnv = append(execEnv, apiv1.EnvVar{ + Name: k, + Value: v, + }) + } + return &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: apiv1.PodSpec{ + ServiceAccountName: svcAccName, + RestartPolicy: apiv1.RestartPolicyNever, + InitContainers: []apiv1.Container{ + { + Name: "sync-init", + Image: "ernoaapa/sshd-rsync", + Ports: []apiv1.ContainerPort{ + { + Name: "ssh", + Protocol: apiv1.ProtocolTCP, + ContainerPort: 22, + }, + }, + Env: []apiv1.EnvVar{ + { + Name: "ONE_TIME", + Value: "true", + }, + }, + VolumeMounts: []apiv1.VolumeMount{ + { + Name: "ssh-config", + MountPath: "/root/.ssh/authorized_keys", + SubPath: "authorized_keys", + }, + { + Name: "destdir", + MountPath: destDir, + }, + }, + }, + }, + Containers: []apiv1.Container{ + apiv1.Container{ + Name: "exec", + Image: image, + Command: cmd, + WorkingDir: workDir, + Env: execEnv, + VolumeMounts: []apiv1.VolumeMount{ + { + Name: "destdir", + MountPath: destDir, + }, + }, + }, + }, + Volumes: []apiv1.Volume{ + { + Name: "ssh-config", + VolumeSource: apiv1.VolumeSource{ + Secret: &apiv1.SecretVolumeSource{ + SecretName: secretName, + DefaultMode: &mode, + }, + }, + }, + { + Name: "destdir", + VolumeSource: apiv1.VolumeSource{ + EmptyDir: &apiv1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + } +} + +func podInitReady(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, k8serrors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *apiv1.Pod: + switch t.Status.Phase { + case apiv1.PodFailed, apiv1.PodSucceeded: + return false, nil + case apiv1.PodRunning: + return false, nil + case apiv1.PodPending: + return isInitContainersReady(t), nil + } + } + return false, nil +} + +func isInitContainersReady(pod *apiv1.Pod) bool { + if isScheduled(pod) && isInitContainersRunning(pod) { + return true + } + return false +} + +func isScheduled(pod *apiv1.Pod) bool { + if &pod.Status != nil && len(pod.Status.Conditions) > 0 { + for _, condition := range pod.Status.Conditions { + if condition.Type == apiv1.PodScheduled && + condition.Status == apiv1.ConditionTrue { + return true + } + } + } + return false +} + +func isInitContainersRunning(pod *apiv1.Pod) bool { + if &pod.Status != nil { + if len(pod.Spec.InitContainers) != len(pod.Status.InitContainerStatuses) { + return false + } + for _, status := range pod.Status.InitContainerStatuses { + if status.State.Running == nil { + return false + } + } + return true + } + return false +} + +func containerRunning(containerName string) func(watch.Event) (bool, error) { + return func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, k8serrors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *apiv1.Pod: + switch t.Status.Phase { + case apiv1.PodFailed, apiv1.PodSucceeded: + return false, nil + case apiv1.PodRunning: + return isContainerRunning(t, containerName) + } + } + return false, nil + } +} + +func isContainerRunning(pod *apiv1.Pod, containerName string) (bool, error) { + for _, status := range pod.Status.ContainerStatuses { + if status.Name == containerName { + if status.State.Waiting != nil { + return false, nil + } else if status.State.Running != nil { + return true, nil + } else if status.State.Terminated != nil { + return false, nil + } else { + return false, fmt.Errorf("Unknown container state") + } + } + } + return false, nil +} + +func podDone(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, k8serrors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *apiv1.Pod: + switch t.Status.Phase { + case apiv1.PodFailed, apiv1.PodSucceeded: + return true, nil + } + } + return false, nil +} + +func createTempFile(content []byte) (string, error) { + randBytes := make([]byte, 16) + rand.Read(randBytes) + tmpfile, err := ioutil.TempFile("", hex.EncodeToString(randBytes)) + if err != nil { + return "", err + } + defer tmpfile.Close() + if _, err := tmpfile.Write(content); err != nil { + return "", err + } + return tmpfile.Name(), nil +} diff --git a/dev-tools/mage/kubernetes/kind.go b/dev-tools/mage/kubernetes/kind.go new file mode 100644 index 00000000000..c4a94649ca7 --- /dev/null +++ b/dev-tools/mage/kubernetes/kind.go @@ -0,0 +1,143 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package kubernetes + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +// KindIntegrationTestStep setups a kind environment. +type KindIntegrationTestStep struct{} + +// Name returns the kind name. +func (m *KindIntegrationTestStep) Name() string { + return "kind" +} + +// Use always returns false. +// +// This step should be defined in `StepRequirements` for the tester, for it +// to be used. It cannot be autodiscovered for usage. +func (m *KindIntegrationTestStep) Use(dir string) (bool, error) { + return false, nil +} + +// Setup ensures that a kubernetes cluster is up and running. +// +// If `KUBECONFIG` is already deinfed in the env then it will do nothing. +func (m *KindIntegrationTestStep) Setup(env map[string]string) error { + _, exists := env["KUBECONFIG"] + if exists { + // do nothing + return nil + } + _, exists = env["KUBE_CONFIG"] + if exists { + // do nothing + return nil + } + _, err := exec.LookPath("kind") + if err != nil { + if mg.Verbose() { + fmt.Println("Skipping kind setup; kind command missing") + } + return nil + } + + clusterName := kubernetesPodName() + stdOut := ioutil.Discard + stdErr := ioutil.Discard + if mg.Verbose() { + stdOut = os.Stdout + stdErr = os.Stderr + } + + kubeCfgDir := filepath.Join("build", "kind", clusterName) + kubeCfgDir, err = filepath.Abs(kubeCfgDir) + if err != nil { + return err + } + kubeConfig := filepath.Join(kubeCfgDir, "kubecfg") + if err := os.MkdirAll(kubeCfgDir, os.ModePerm); err != nil { + return err + } + + args := []string{ + "create", + "cluster", + "--name", clusterName, + "--kubeconfig", kubeConfig, + "--wait", + "300s", + } + kubeVersion := os.Getenv("K8S_VERSION") + if kubeVersion != "" { + args = append(args, "--image", fmt.Sprintf("kindest/node:%s", kubeVersion)) + } + + _, err = sh.Exec( + map[string]string{}, + stdOut, + stdErr, + "kind", + args..., + ) + if err != nil { + return err + } + env["KUBECONFIG"] = kubeConfig + env["KIND_CLUSTER"] = clusterName + return nil +} + +// Teardown destroys the kubernetes cluster. +func (m *KindIntegrationTestStep) Teardown(env map[string]string) error { + stdOut := ioutil.Discard + stdErr := ioutil.Discard + if mg.Verbose() { + stdOut = os.Stdout + stdErr = os.Stderr + } + + name, created := env["KIND_CLUSTER"] + _, keepUp := os.LookupEnv("KIND_SKIP_DELETE") + if created && !keepUp { + _, err := sh.Exec( + env, + stdOut, + stdErr, + "kind", + "delete", + "cluster", + "--name", + name, + ) + if err != nil { + return err + } + delete(env, "KIND_CLUSTER") + } + return nil +} diff --git a/dev-tools/mage/kubernetes/kuberemote.go b/dev-tools/mage/kubernetes/kuberemote.go new file mode 100644 index 00000000000..4e98b536a10 --- /dev/null +++ b/dev-tools/mage/kubernetes/kuberemote.go @@ -0,0 +1,612 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package kubernetes + +import ( + "bufio" + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os/exec" + "strings" + "time" + + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" + + apiv1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/portforward" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/client-go/transport/spdy" +) + +const sshBitSize = 4096 + +var mode = int32(256) + +// KubeRemote rsyncs the passed directory to a pod and runs the command inside of that pod. +type KubeRemote struct { + cfg *rest.Config + cs *kubernetes.Clientset + namespace string + name string + workDir string + destDir string + syncDir string + + svcAccName string + secretName string + privateKey []byte + publicKey []byte +} + +// New creates a new kubernetes remote runner. +func New(kubeconfig string, namespace string, name string, workDir string, destDir string, syncDir string) (*KubeRemote, error) { + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, err + } + cs, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + name = strings.Replace(name, "_", "-", -1) + svcAccName := fmt.Sprintf("%s-sa", name) + secretName := fmt.Sprintf("%s-ssh-key", name) + privateKey, publicKey, err := generateSSHKeyPair() + if err != nil { + return nil, err + } + return &KubeRemote{config, cs, namespace, name, workDir, destDir, syncDir, svcAccName, secretName, privateKey, publicKey}, nil +} + +// Run runs the command remotely on the kubernetes cluster. +func (r *KubeRemote) Run(env map[string]string, stdout io.Writer, stderr io.Writer, args ...string) error { + if err := r.syncSSHKey(); err != nil { + return errors.Wrap(err, "failed to sync SSH secret") + } + defer r.deleteSSHKey() + if err := r.syncServiceAccount(); err != nil { + return err + } + defer r.deleteServiceAccount() + _, err := r.createPod(env, args...) + if err != nil { + return errors.Wrap(err, "failed to create execute pod") + } + defer r.deletePod() + + // wait for SSH to be up inside the init container. + _, err = r.waitForPod(5*time.Minute, podInitReady) + if err != nil { + return errors.Wrap(err, "execute pod init container never started") + } + time.Sleep(1 * time.Second) // SSH inside of container can take a moment + + // forward the SSH port so rsync can be ran. + randomPort, err := getFreePort() + if err != nil { + return errors.Wrap(err, "failed to find a free port") + } + stopChannel := make(chan struct{}, 1) + readyChannel := make(chan struct{}, 1) + f, err := r.portForward([]string{fmt.Sprintf("%d:%d", randomPort, 22)}, stopChannel, readyChannel, stderr, stderr) + if err != nil { + return err + } + go f.ForwardPorts() + <-readyChannel + + // perform the rsync + r.rsync(randomPort, stderr, stderr) + + // stop port forwarding + close(stopChannel) + + // wait for exec container to be running + _, err = r.waitForPod(5*time.Minute, containerRunning("exec")) + if err != nil { + return errors.Wrap(err, "execute pod container never started") + } + + // stream the logs of the container + err = r.streamLogs("exec", stdout) + if err != nil { + return errors.Wrap(err, "failed to stream the logs") + } + + // wait for exec container to be completely done + pod, err := r.waitForPod(30*time.Second, podDone) + if err != nil { + return errors.Wrap(err, "execute pod didn't terminate after 30 seconds of log stream") + } + + // return error on failure + if pod.Status.Phase == apiv1.PodFailed { + return fmt.Errorf("execute pod test failed") + } + return nil +} + +// deleteSSHKey deletes SSH key from the cluster. +func (r *KubeRemote) deleteSSHKey() { + _ = r.cs.CoreV1().Secrets(r.namespace).Delete(r.secretName, &metav1.DeleteOptions{}) +} + +// syncSSHKey syncs the SSH key to the cluster. +func (r *KubeRemote) syncSSHKey() error { + // delete before create + r.deleteSSHKey() + _, err := r.cs.CoreV1().Secrets(r.namespace).Create(createSecretManifest(r.secretName, r.publicKey)) + if err != nil { + return err + } + return nil +} + +// deleteServiceAccount syncs required service account. +func (r *KubeRemote) deleteServiceAccount() { + _ = r.cs.RbacV1().ClusterRoleBindings().Delete(r.name, &metav1.DeleteOptions{}) + _ = r.cs.RbacV1().ClusterRoles().Delete(r.name, &metav1.DeleteOptions{}) + _ = r.cs.CoreV1().ServiceAccounts(r.namespace).Delete(r.svcAccName, &metav1.DeleteOptions{}) +} + +// syncServiceAccount syncs required service account. +func (r *KubeRemote) syncServiceAccount() error { + // delete before create + r.deleteServiceAccount() + _, err := r.cs.CoreV1().ServiceAccounts(r.namespace).Create(createServiceAccountManifest(r.svcAccName)) + if err != nil { + return errors.Wrap(err, "failed to create service account") + } + _, err = r.cs.RbacV1().ClusterRoles().Create(createClusterRoleManifest(r.name)) + if err != nil { + return errors.Wrap(err, "failed to create cluster role") + } + _, err = r.cs.RbacV1().ClusterRoleBindings().Create(createClusterRoleBindingManifest(r.name, r.namespace, r.svcAccName)) + if err != nil { + return errors.Wrap(err, "failed to create cluster role binding") + } + return nil +} + +// createPod creates the pod. +func (r *KubeRemote) createPod(env map[string]string, cmd ...string) (*apiv1.Pod, error) { + r.deletePod() // ensure it doesn't already exist + return r.cs.CoreV1().Pods(r.namespace).Create(createPodManifest(r.name, "golang:1.13.9", env, cmd, r.workDir, r.destDir, r.secretName, r.svcAccName)) +} + +// deletePod deletes the pod. +func (r *KubeRemote) deletePod() { + _ = r.cs.CoreV1().Pods(r.namespace).Delete(r.name, &metav1.DeleteOptions{}) +} + +// waitForPod waits for the created pod to match the given condition. +func (r *KubeRemote) waitForPod(wait time.Duration, condition watchtools.ConditionFunc) (*apiv1.Pod, error) { + w, err := r.cs.CoreV1().Pods(r.namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: r.name})) + if err != nil { + return nil, err + } + + ctx, _ := watchtools.ContextWithOptionalTimeout(context.Background(), wait) + ev, err := watchtools.UntilWithoutRetry(ctx, w, func(ev watch.Event) (bool, error) { + return condition(ev) + }) + if ev != nil { + return ev.Object.(*apiv1.Pod), err + } + return nil, err +} + +// portFoward runs the port forwarding so SSH rsync can be ran into the pod. +func (r *KubeRemote) portForward(ports []string, stopChannel, readyChannel chan struct{}, stdout, stderr io.Writer) (*portforward.PortForwarder, error) { + roundTripper, upgrader, err := spdy.RoundTripperFor(r.cfg) + if err != nil { + return nil, err + } + + path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", r.namespace, r.name) + hostIP := strings.TrimLeft(r.cfg.Host, "https://") + serverURL := url.URL{Scheme: "https", Path: path, Host: hostIP} + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, &serverURL) + return portforward.New(dialer, ports, stopChannel, readyChannel, stdout, stderr) +} + +// rsync performs the rsync of sync directory to destination directory inside of the pod. +func (r *KubeRemote) rsync(port uint16, stdout, stderr io.Writer) error { + privateKeyFile, err := createTempFile(r.privateKey) + if err != nil { + return err + } + + rsh := fmt.Sprintf("ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR -p %d -i %s", port, privateKeyFile) + args := []string{ + "--rsh", rsh, + "-a", fmt.Sprintf("%s/", r.syncDir), + fmt.Sprintf("root@localhost:%s", r.destDir), + } + cmd := exec.Command("rsync", args...) + cmd.Stdout = stdout + cmd.Stderr = stderr + return cmd.Run() +} + +// streamLogs streams the logs from the execution pod until the pod is terminated. +func (r *KubeRemote) streamLogs(container string, stdout io.Writer) error { + req := r.cs.CoreV1().Pods(r.namespace).GetLogs(r.name, &apiv1.PodLogOptions{ + Container: container, + Follow: true, + }) + logs, err := req.Stream() + if err != nil { + return err + } + defer logs.Close() + + reader := bufio.NewReader(logs) + for { + bytes, err := reader.ReadBytes('\n') + if _, err := stdout.Write(bytes); err != nil { + return err + } + if err != nil { + if err != io.EOF { + return err + } + return nil + } + } +} + +// generateSSHKeyPair generates a new SSH key pair. +func generateSSHKeyPair() ([]byte, []byte, error) { + private, err := rsa.GenerateKey(rand.Reader, sshBitSize) + if err != nil { + return nil, nil, err + } + if err = private.Validate(); err != nil { + return nil, nil, err + } + public, err := ssh.NewPublicKey(&private.PublicKey) + if err != nil { + return nil, nil, err + } + return encodePrivateKeyToPEM(private), ssh.MarshalAuthorizedKey(public), nil +} + +// encodePrivateKeyToPEM encodes private key from RSA to PEM format. +func encodePrivateKeyToPEM(privateKey *rsa.PrivateKey) []byte { + privDER := x509.MarshalPKCS1PrivateKey(privateKey) + privBlock := pem.Block{ + Type: "RSA PRIVATE KEY", + Headers: nil, + Bytes: privDER, + } + return pem.EncodeToMemory(&privBlock) +} + +// getFreePort finds a free port. +func getFreePort() (uint16, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return 0, err + } + defer l.Close() + return uint16(l.Addr().(*net.TCPAddr).Port), nil +} + +// createSecretManifest creates the secret object to create in the cluster. +// +// This is the public key that the sshd uses as the authorized key. +func createSecretManifest(name string, publicKey []byte) *apiv1.Secret { + return &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + StringData: map[string]string{ + "authorized_keys": string(publicKey), + }, + } +} + +// createServiceAccountManifest creates the service account the pod will used. +func createServiceAccountManifest(name string) *apiv1.ServiceAccount { + return &apiv1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } +} + +// createClusterRoleManifest creates the cluster role the pod will used. +// +// This gives the pod all permissions on everything! +func createClusterRoleManifest(name string) *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Rules: []rbacv1.PolicyRule{ + rbacv1.PolicyRule{ + Verbs: []string{"*"}, + APIGroups: []string{"*"}, + Resources: []string{"*"}, + }, + rbacv1.PolicyRule{ + Verbs: []string{"*"}, + NonResourceURLs: []string{"*"}, + }, + }, + } +} + +// createClusterRoleBindingManifest creates the cluster role binding the pod will used. +// +// This binds the service account to the cluster role. +func createClusterRoleBindingManifest(name string, namespace string, svcAccName string) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Subjects: []rbacv1.Subject{ + rbacv1.Subject{ + Kind: "ServiceAccount", + Name: svcAccName, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: name, + }, + } +} + +// createPodManifest creates the pod inside of the cluster that will be used for remote execution. +// +// Creates a pod with an init container that runs sshd-rsync, once the first connection closes the init container +// exits then the exec container starts using the rsync'd directory as its work directory. +func createPodManifest(name string, image string, env map[string]string, cmd []string, workDir string, destDir string, secretName string, svcAccName string) *apiv1.Pod { + execEnv := []apiv1.EnvVar{ + apiv1.EnvVar{ + Name: "NODE_NAME", + ValueFrom: &apiv1.EnvVarSource{ + FieldRef: &apiv1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, + } + for k, v := range env { + execEnv = append(execEnv, apiv1.EnvVar{ + Name: k, + Value: v, + }) + } + return &apiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: apiv1.PodSpec{ + ServiceAccountName: svcAccName, + RestartPolicy: apiv1.RestartPolicyNever, + InitContainers: []apiv1.Container{ + { + Name: "sync-init", + Image: "ernoaapa/sshd-rsync", + Ports: []apiv1.ContainerPort{ + { + Name: "ssh", + Protocol: apiv1.ProtocolTCP, + ContainerPort: 22, + }, + }, + Env: []apiv1.EnvVar{ + { + Name: "ONE_TIME", + Value: "true", + }, + }, + VolumeMounts: []apiv1.VolumeMount{ + { + Name: "ssh-config", + MountPath: "/root/.ssh/authorized_keys", + SubPath: "authorized_keys", + }, + { + Name: "destdir", + MountPath: destDir, + }, + }, + }, + }, + Containers: []apiv1.Container{ + apiv1.Container{ + Name: "exec", + Image: image, + Command: cmd, + WorkingDir: workDir, + Env: execEnv, + VolumeMounts: []apiv1.VolumeMount{ + { + Name: "destdir", + MountPath: destDir, + }, + }, + }, + }, + Volumes: []apiv1.Volume{ + { + Name: "ssh-config", + VolumeSource: apiv1.VolumeSource{ + Secret: &apiv1.SecretVolumeSource{ + SecretName: secretName, + DefaultMode: &mode, + }, + }, + }, + { + Name: "destdir", + VolumeSource: apiv1.VolumeSource{ + EmptyDir: &apiv1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + } +} + +func podInitReady(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, k8serrors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *apiv1.Pod: + switch t.Status.Phase { + case apiv1.PodFailed, apiv1.PodSucceeded: + return false, nil + case apiv1.PodRunning: + return false, nil + case apiv1.PodPending: + return isInitContainersReady(t), nil + } + } + return false, nil +} + +func isInitContainersReady(pod *apiv1.Pod) bool { + if isScheduled(pod) && isInitContainersRunning(pod) { + return true + } + return false +} + +func isScheduled(pod *apiv1.Pod) bool { + if &pod.Status != nil && len(pod.Status.Conditions) > 0 { + for _, condition := range pod.Status.Conditions { + if condition.Type == apiv1.PodScheduled && + condition.Status == apiv1.ConditionTrue { + return true + } + } + } + return false +} + +func isInitContainersRunning(pod *apiv1.Pod) bool { + if &pod.Status != nil { + if len(pod.Spec.InitContainers) != len(pod.Status.InitContainerStatuses) { + return false + } + for _, status := range pod.Status.InitContainerStatuses { + if status.State.Running == nil { + return false + } + } + return true + } + return false +} + +func containerRunning(containerName string) func(watch.Event) (bool, error) { + return func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, k8serrors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *apiv1.Pod: + switch t.Status.Phase { + case apiv1.PodFailed, apiv1.PodSucceeded: + return false, nil + case apiv1.PodRunning: + return isContainerRunning(t, containerName) + } + } + return false, nil + } +} + +func isContainerRunning(pod *apiv1.Pod, containerName string) (bool, error) { + for _, status := range pod.Status.ContainerStatuses { + if status.Name == containerName { + if status.State.Waiting != nil { + return false, nil + } else if status.State.Running != nil { + return true, nil + } else if status.State.Terminated != nil { + return false, nil + } else { + return false, fmt.Errorf("Unknown container state") + } + } + } + return false, nil +} + +func podDone(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, k8serrors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *apiv1.Pod: + switch t.Status.Phase { + case apiv1.PodFailed, apiv1.PodSucceeded: + return true, nil + } + } + return false, nil +} + +func createTempFile(content []byte) (string, error) { + randBytes := make([]byte, 16) + rand.Read(randBytes) + tmpfile, err := ioutil.TempFile("", hex.EncodeToString(randBytes)) + if err != nil { + return "", err + } + defer tmpfile.Close() + if _, err := tmpfile.Write(content); err != nil { + return "", err + } + return tmpfile.Name(), nil +} diff --git a/dev-tools/mage/kubernetes/kubernetes.go b/dev-tools/mage/kubernetes/kubernetes.go new file mode 100644 index 00000000000..2f929da9e16 --- /dev/null +++ b/dev-tools/mage/kubernetes/kubernetes.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package kubernetes + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + + "github.com/magefile/mage/mg" + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/dev-tools/mage" +) + +func init() { + mage.RegisterIntegrationTester(&KubernetesIntegrationTester{}) +} + +type KubernetesIntegrationTester struct { +} + +// Name returns kubernetes name. +func (d *KubernetesIntegrationTester) Name() string { + return "kubernetes" +} + +// Use determines if this tester should be used. +func (d *KubernetesIntegrationTester) Use(dir string) (bool, error) { + kubernetesFile := filepath.Join(dir, "kubernetes.yml") + if _, err := os.Stat(kubernetesFile); !os.IsNotExist(err) { + return true, nil + } + return false, nil +} + +// HasRequirements ensures that the required kubectl are installed. +func (d *KubernetesIntegrationTester) HasRequirements() error { + if err := mage.HaveKubectl(); err != nil { + return err + } + return nil +} + +// StepRequirements returns the steps required for this tester. +func (d *KubernetesIntegrationTester) StepRequirements() mage.IntegrationTestSteps { + return mage.IntegrationTestSteps{&mage.MageIntegrationTestStep{}, &KindIntegrationTestStep{}} +} + +// Test performs the tests with kubernetes. +func (d *KubernetesIntegrationTester) Test(dir string, mageTarget string, env map[string]string) error { + stdOut := ioutil.Discard + stdErr := ioutil.Discard + if mg.Verbose() { + stdOut = os.Stdout + stdErr = os.Stderr + } + + manifestPath := filepath.Join(dir, "kubernetes.yml") + if _, err := os.Stat(manifestPath); os.IsNotExist(err) { + // defensive, as `Use` should cause this runner not to be used if no file. + return fmt.Errorf("no kubernetes.yml") + } + + kubeConfig := env["KUBECONFIG"] + if kubeConfig == "" { + kubeConfig = env["KUBE_CONFIG"] + } + if kubeConfig == "" { + fmt.Println("Skip running tests inside of kubernetes no KUBECONFIG defined.") + return nil + } + + if mg.Verbose() { + fmt.Println(">> Applying module manifest to cluster...") + } + + // Determine the path to use inside the pod. + repo, err := mage.GetProjectRepoInfo() + if err != nil { + return err + } + magePath := filepath.Join("/go/src", repo.CanonicalRootImportPath, repo.SubDir, "build/mage-linux-amd64") + + // Apply the manifest from the dir. This is the requirements for the tests that will + // run inside the cluster. + if err := mage.KubectlApply(env, stdOut, stdErr, manifestPath); err != nil { + return errors.Wrapf(err, "failed to apply manifest %s", manifestPath) + } + defer func() { + if mg.Verbose() { + fmt.Println(">> Deleting module manifest from cluster...") + } + if err := mage.KubectlDelete(env, stdOut, stdErr, manifestPath); err != nil { + log.Printf("%s", errors.Wrapf(err, "failed to apply manifest %s", manifestPath)) + } + }() + + // Pass all environment variables inside the pod, except for KUBECONFIG as the test + // should use the environment set by kubernetes on the pod. + insideEnv := map[string]string{} + for envKey, envVal := range env { + if envKey != "KUBECONFIG" && envKey != "KUBE_CONFIG" { + insideEnv[envKey] = envVal + } + } + + destDir := filepath.Join("/go/src", repo.CanonicalRootImportPath) + workDir := filepath.Join(destDir, repo.SubDir) + remote, err := mage.NewKubeRemote(kubeConfig, "default", kubernetesPodName(), workDir, destDir, repo.RootDir) + if err != nil { + return err + } + // Uses `os.Stdout` directly as its output should always be shown. + err = remote.Run(insideEnv, os.Stdout, stdErr, magePath, mageTarget) + if err != nil { + return err + } + return nil +} + +// InsideTest performs the tests inside of environment. +func (d *KubernetesIntegrationTester) InsideTest(test func() error) error { + return test() +} + +// kubernetesPodName returns the pod name to use with kubernetes. +func kubernetesPodName() string { + commit, err := mage.CommitHash() + if err != nil { + panic(errors.Wrap(err, "failed to construct kind cluster name")) + } + + version, err := mage.BeatQualifiedVersion() + if err != nil { + panic(errors.Wrap(err, "failed to construct kind cluster name")) + } + version = strings.NewReplacer(".", "_").Replace(version) + + clusterName := "{{.BeatName}}_{{.Version}}_{{.ShortCommit}}-{{.StackEnvironment}}" + clusterName = mage.MustExpand(clusterName, map[string]interface{}{ + "StackEnvironment": mage.StackEnvironment, + "ShortCommit": commit[:10], + "Version": version, + }) + return clusterName +} diff --git a/dev-tools/mage/pkg_test.go b/dev-tools/mage/pkg_test.go index e360686b54a..c0b92f15de3 100644 --- a/dev-tools/mage/pkg_test.go +++ b/dev-tools/mage/pkg_test.go @@ -103,7 +103,7 @@ func TestDumpVariables(t *testing.T) { } func TestLoadSpecs(t *testing.T) { - pkgs, err := LoadSpecs("files/packages.yml") + pkgs, err := LoadSpecs("../packaging/packages.yml") if err != nil { t.Fatal(err) } diff --git a/dev-tools/mage/target/integtest/integtest.go b/dev-tools/mage/target/integtest/integtest.go index abf276bc038..62d601cea6d 100644 --- a/dev-tools/mage/target/integtest/integtest.go +++ b/dev-tools/mage/target/integtest/integtest.go @@ -53,8 +53,6 @@ func WhitelistEnvVar(key ...string) { // IntegTest executes integration tests (it uses Docker to run the tests). func IntegTest() { - devtools.AddIntegTestUsage() - defer devtools.StopIntegTestEnv() mg.SerialDeps(GoIntegTest, PythonIntegTest) } @@ -65,18 +63,30 @@ func GoIntegTest(ctx context.Context) error { if !devtools.IsInIntegTestEnv() { mg.SerialDeps(goTestDeps...) } - return devtools.RunIntegTest("goIntegTest", func() error { + runner, err := devtools.NewDockerIntegrationRunner(whitelistedEnvVars...) + if err != nil { + return err + } + return runner.Test("goIntegTest", func() error { return devtools.GoTest(ctx, devtools.DefaultGoTestIntegrationArgs()) - }, whitelistedEnvVars...) + }) } -// PythonIntegTest executes the python system tests in the integration environment (Docker). +// PythonIntegTest executes the python system tests in the integration +// environment (Docker). +// Use NOSE_TESTMATCH=pattern to only run tests matching the specified pattern. +// Use any other NOSE_* environment variable to influence the behavior of +// nosetests. func PythonIntegTest(ctx context.Context) error { if !devtools.IsInIntegTestEnv() { mg.SerialDeps(pythonTestDeps...) } - return devtools.RunIntegTest("pythonIntegTest", func() error { + runner, err := devtools.NewDockerIntegrationRunner(append(whitelistedEnvVars, devtools.ListMatchingEnvVars("NOSE_")...)...) + if err != nil { + return err + } + return runner.Test("pythonIntegTest", func() error { mg.Deps(devtools.BuildSystemTestBinary) return devtools.PythonNoseTest(devtools.DefaultPythonTestIntegrationArgs()) - }, whitelistedEnvVars...) + }) } diff --git a/dev-tools/mage/target/integtest/notests/integtest.go b/dev-tools/mage/target/integtest/notests/integtest.go index 2249faf75cb..b94cf5ddf9d 100644 --- a/dev-tools/mage/target/integtest/notests/integtest.go +++ b/dev-tools/mage/target/integtest/notests/integtest.go @@ -19,7 +19,7 @@ package notests import "fmt" -// IntegTest method fallbacks to GoIntegTest() +// IntegTest executes integration tests (it uses Docker to run the tests). func IntegTest() { GoIntegTest() } diff --git a/dev-tools/magefile.go b/dev-tools/magefile.go index 083e7e3479c..5bdc2479042 100644 --- a/dev-tools/magefile.go +++ b/dev-tools/magefile.go @@ -20,6 +20,23 @@ package main import ( + "context" + + devtools "github.com/elastic/beats/v7/dev-tools/mage" + // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/common" + // mage:import + "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) + +func init() { + test.RegisterDeps(GoUnitTest) +} + +// GoUnitTest executes the Go unit tests. +// Use TEST_COVERAGE=true to enable code coverage profiling. +// Use RACE_DETECTOR=true to enable the race detector. +func GoUnitTest(ctx context.Context) { + devtools.GoTest(ctx, devtools.DefaultGoTestUnitArgs()) +} diff --git a/dev-tools/make/mage-install.mk b/dev-tools/make/mage-install.mk new file mode 100644 index 00000000000..8966ed6f474 --- /dev/null +++ b/dev-tools/make/mage-install.mk @@ -0,0 +1,13 @@ +MAGE_VERSION ?= v1.9.0 +MAGE_PRESENT := $(shell mage --version 2> /dev/null | grep $(MAGE_VERSION)) +MAGE_IMPORT_PATH ?= github.com/magefile/mage +export MAGE_IMPORT_PATH + +.PHONY: mage +mage: +ifndef MAGE_PRESENT + @echo Installing mage $(MAGE_VERSION) from vendor dir. + @go install -mod=vendor -ldflags="-X $(MAGE_IMPORT_PATH)/mage.gitTag=$(MAGE_VERSION)" ${MAGE_IMPORT_PATH}/... + @-mage -clean +endif + @true diff --git a/dev-tools/make/mage.mk b/dev-tools/make/mage.mk index 8966ed6f474..6b210832006 100644 --- a/dev-tools/make/mage.mk +++ b/dev-tools/make/mage.mk @@ -1,13 +1,77 @@ -MAGE_VERSION ?= v1.9.0 -MAGE_PRESENT := $(shell mage --version 2> /dev/null | grep $(MAGE_VERSION)) -MAGE_IMPORT_PATH ?= github.com/magefile/mage -export MAGE_IMPORT_PATH - -.PHONY: mage -mage: -ifndef MAGE_PRESENT - @echo Installing mage $(MAGE_VERSION) from vendor dir. - @go install -mod=vendor -ldflags="-X $(MAGE_IMPORT_PATH)/mage.gitTag=$(MAGE_VERSION)" ${MAGE_IMPORT_PATH}/... - @-mage -clean -endif - @true +# This is a minimal Makefile for Beats that are built with Mage. Its only +# responsibility is to provide compatibility with existing Jenkins and Travis +# setups. + +# +# Variables +# +.DEFAULT_GOAL := help +PWD := $(CURDIR) + +# +# Includes +# +include $(ES_BEATS)/dev-tools/make/mage-install.mk + +# +# Targets (alphabetically sorted). +# +.PHONY: check +check: mage + mage check + +.PHONY: clean +clean: mage + mage clean + +fix-permissions: + +.PHONY: fmt +fmt: mage + mage fmt + +# Default target. +.PHONY: help +help: + @echo Use mage rather than make. Here are the available mage targets: + @mage -l + +.PHONY: release +release: mage + mage package + +stop-environment: + +.PHONY: unit-tests +unit-tests: mage + mage unitTest + +.PHONY: integration-tests +integration-tests: mage + rm -f build/TEST-go-integration.out + mage goIntegTest || ( cat build/TEST-go-integration.out && false ) + +.PHONY: system-tests +system-tests: mage + mage pythonIntegTest + +.PHONY: testsuite +testsuite: mage + rm -f build/TEST-go-integration.out + mage update build unitTest integTest || ( cat build/TEST-go-integration.out && false ) + +.PHONY: update +update: mage + mage update + +.PHONY: crosscompile +crosscompile: mage + mage crossBuild + +.PHONY: docs +docs: + mage docs + +.PHONY: docs-preview +docs-preview: + PREVIEW=1 $(MAKE) docs diff --git a/dev-tools/make/xpack.mk b/dev-tools/make/xpack.mk deleted file mode 100644 index 54f60831108..00000000000 --- a/dev-tools/make/xpack.mk +++ /dev/null @@ -1,53 +0,0 @@ -# This is a minimal Makefile for Beats that are built with Mage. Its only -# responsibility is to provide compatibility with existing Jenkins and Travis -# setups. - -# -# Variables -# -.DEFAULT_GOAL := help -PWD := $(CURDIR) - -# -# Includes -# -include $(ES_BEATS)/dev-tools/make/mage.mk - -# -# Targets (alphabetically sorted). -# -.PHONY: check -check: mage - mage check - -.PHONY: clean -clean: mage - mage clean - -fix-permissions: - -.PHONY: fmt -fmt: mage - mage fmt - -# Default target. -.PHONY: help -help: - @echo Use mage rather than make. Here are the available mage targets: - @mage -l - -.PHONY: release -release: mage - mage package - -stop-environment: - -.PHONY: testsuite -testsuite: mage - rm -f build/TEST-go-integration.out - mage update build unitTest integTest || ( cat build/TEST-go-integration.out && false ) - -.PHONY: update -update: mage - mage update - diff --git a/dev-tools/packaging/templates/common/magefile.go.tmpl b/dev-tools/packaging/templates/common/magefile.go.tmpl deleted file mode 100644 index 286f9d30555..00000000000 --- a/dev-tools/packaging/templates/common/magefile.go.tmpl +++ /dev/null @@ -1,92 +0,0 @@ -// +build mage - -package main - -import ( - "context" - "fmt" - "time" - - "github.com/magefile/mage/mg" - "github.com/magefile/mage/sh" - - devtools "github.com/elastic/beats/v7/dev-tools/mage" -) - -func init() { - devtools.BeatDescription = "One sentence description of the Beat." -} - -// Build builds the Beat binary. -func Build() error { - return devtools.Build(devtools.DefaultBuildArgs()) -} - -// GolangCrossBuild build the Beat binary inside of the golang-builder. -// Do not use directly, use crossBuild instead. -func GolangCrossBuild() error { - return devtools.GolangCrossBuild(devtools.DefaultGolangCrossBuildArgs()) -} - -// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). -func BuildGoDaemon() error { - return devtools.BuildGoDaemon() -} - -// CrossBuild cross-builds the beat for all target platforms. -func CrossBuild() error { - return devtools.CrossBuild() -} - -// CrossBuildGoDaemon cross-builds the go-daemon binary using Docker. -func CrossBuildGoDaemon() error { - return devtools.CrossBuildGoDaemon() -} - -// Clean cleans all generated files and build artifacts. -func Clean() error { - return devtools.Clean() -} - -// Package packages the Beat for distribution. -// Use SNAPSHOT=true to build snapshots. -// Use PLATFORMS to control the target platforms. -func Package() { - start := time.Now() - defer func() { fmt.Println("package ran for", time.Since(start)) }() - - devtools.UseCommunityBeatPackaging() - - mg.Deps(Update) - mg.Deps(CrossBuild, CrossBuildGoDaemon) - mg.SerialDeps(devtools.Package, TestPackages) -} - -// TestPackages tests the generated packages (i.e. file modes, owners, groups). -func TestPackages() error { - return devtools.TestPackages() -} - -// Update updates the generated files (aka make update). -func Update() error { - return sh.Run("make", "update") -} - -// Fields generates a fields.yml for the Beat. -func Fields() error { - return devtools.GenerateFieldsYAML() -} - -// GoTestUnit executes the Go unit tests. -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -func GoTestUnit(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestUnitArgs()) -} - -// GoTestIntegration executes the Go integration tests. -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -func GoTestIntegration(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestIntegrationArgs()) -} diff --git a/docs/devguide/fields-yml.asciidoc b/docs/devguide/fields-yml.asciidoc index fe2c4fad95b..caaca6624bb 100644 --- a/docs/devguide/fields-yml.asciidoc +++ b/docs/devguide/fields-yml.asciidoc @@ -1,7 +1,8 @@ [[event-fields-yml]] === Defining field mappings -Fields used by your Beat, along with their mapping details, must be defined in `_meta/fields.yml`. After editing this file, you must re-run `make update`. +You must define the fields used by your Beat, along with their mapping details, +in `_meta/fields.yml`. After editing this file, run `make update`. Define the field mappings in the `fields` array: @@ -29,27 +30,36 @@ Define the field mappings in the `fields` array: ---------------------------------------------------------------------- <1> `name`: The field name -<2> `type`: The field type. The value for the `type` key can be any of the datatypes https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html[available in Elasticsearch]. If no value is specified, the field will default to being a `keyword`. +<2> `type`: The field type. The value of `type` can be any datatype {ref}/mapping-types.html[available in {es}]. If no value is specified, the default type is `keyword`. <3> `required`: Whether or not a field value is required <4> `description`: Some information about the field contents ==== Mapping parameters -Other mapping parameters can be specified for each field. Consult the https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-params.html[Elasticsearch reference] for more details on each of these parameters. + +You can specify other mapping parameters for each field. See the +{ref}/mapping-params.html[{es} Reference] for more details about each +parameter. [horizontal] `format`:: Specify a custom date format used by the field. -`multi_fields`:: For `text` or `keyword` fields, `multi_fields` can be used to define multi-field mappings. +`multi_fields`:: For `text` or `keyword` fields, use `multi_fields` to define +multi-field mappings. `enabled`:: Whether or not the field is enabled. `analyzer`:: Which analyzer to use when indexing. `search_analyzer`:: Which analyzer to use when searching. `norms`:: Applies to `text` and `keyword` fields. Default is `false`. -`dynamic`:: Dynamic field control. Can be one of `true` (default), `false` or `strict`. +`dynamic`:: Dynamic field control. Can be one of `true` (default), `false`, or +`strict`. `index`:: Whether or not the field should be indexed. -`doc_values`:: Whether or not the field should have doc values generated. See docs. +`doc_values`:: Whether or not the field should have doc values generated. `copy_to`:: Which field to copy the field value into. -`ignore_above`:: When this property value is missing or is `0`, it receives the `libbeat` default value of `1024`. If the value is `-1`, the Elasticsearch default value will be applied. Any other specified value will be applied as-is. +`ignore_above`:: {es} ignores (does not index) strings that are longer than the +specified value. When this property value is missing or `0`, the `libbeat` +default value of `1024` characters is used. If the value is `-1`, the {es} +default value is used. -For example, we could use the `copy_to` mapping parameter to copy the `last_name` and `first_name` fields into the `full_name` field at index time: +For example, you can use the `copy_to` mapping parameter to copy the +`last_name` and `first_name` fields into the `full_name` field at index time: [source,yaml] ---------------------------------------------------------------------- @@ -78,12 +88,20 @@ For example, we could use the `copy_to` mapping parameter to copy the `last_name <1> Copy the value of `last_name` into `full_name` <2> Copy the value of `first_name` into `full_name` -There are also some Kibana-specific properties, not detailed here. These are: `analyzed`, `count`, `searchable`, `aggregatable`, `script`. Kibana parameters can also be described using the `pattern`, `input_format`, `output_format`, `output_precision`, `label_template`, `url_template` and `open_link_in_current_tab`. +There are also some {kib}-specific properties, not detailed here. These are: +`analyzed`, `count`, `searchable`, `aggregatable`, and `script`. {kib} +parameters can also be described using `pattern`, `input_format`, +`output_format`, `output_precision`, `label_template`, `url_template`, and +`open_link_in_current_tab`. ==== Defining text multi-fields -There are various options that can be applied when using text fields. A simple text field using the default analyzer can be defined without any other options, as in the example above. -To keep the original keyword value when using `text` mappings, for instance to use in aggregations or ordering, a multi-field mapping can be used: +There are various options that you can apply when using text fields. You can +define a simple text field using the default analyzer without any other options, +as in the example shown earlier. + +To keep the original keyword value when using `text` mappings, for instance to +use in aggregations or ordering, you can use a multi-field mapping: [source,yaml] ---------------------------------------------------------------------- @@ -97,8 +115,9 @@ To keep the original keyword value when using `text` mappings, for instance to u - name: keyword <2> type: keyword <3> ---------------------------------------------------------------------- -<1> `multi_fields`: Define the `multi_fields` mapping parameter +<1> `multi_fields`: Define the `multi_fields` mapping parameter. <2> `name`: This is a conventional name for a multi-field. It can be anything (`raw` is another common option) but the convention is to use `keyword`. -<3> `type`: Specify the `keyword` type so we can use the field in aggregations or to order documents. +<3> `type`: Specify the `keyword` type to use the field in aggregations or to order documents. -Consult the https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html[reference] for more information on multi-fields. +For more information, see the {ref}/multi-fields.html[{es} documentation about +multi-fields]. diff --git a/filebeat/autodiscover/builder/hints/logs.go b/filebeat/autodiscover/builder/hints/logs.go index 05ec4ac7b8f..e2f37caee74 100644 --- a/filebeat/autodiscover/builder/hints/logs.go +++ b/filebeat/autodiscover/builder/hints/logs.go @@ -109,7 +109,7 @@ func (l *logHints) CreateConfig(event bus.Event) []*common.Config { } logp.Debug("hints.builder", "generated config %+v", configs) // Apply information in event to the template to generate the final config - return template.ApplyConfigTemplate(event, configs) + return template.ApplyConfigTemplate(event, configs, false) } tempCfg := common.MapStr{} @@ -163,7 +163,7 @@ func (l *logHints) CreateConfig(event bus.Event) []*common.Config { logp.Debug("hints.builder", "generated config %+v", config) // Apply information in event to the template to generate the final config - return template.ApplyConfigTemplate(event, []*common.Config{config}) + return template.ApplyConfigTemplate(event, []*common.Config{config}, false) } func (l *logHints) getMultiline(hints common.MapStr) common.MapStr { diff --git a/filebeat/beater/filebeat.go b/filebeat/beater/filebeat.go index c339c0a6cc0..b16dad08895 100644 --- a/filebeat/beater/filebeat.go +++ b/filebeat/beater/filebeat.go @@ -317,6 +317,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error { ), autodiscover.QueryConfig(), config.Autodiscover, + b.Keystore, ) if err != nil { return err diff --git a/filebeat/docs/fields.asciidoc b/filebeat/docs/fields.asciidoc index 5e163ab76c5..05f4677f138 100644 --- a/filebeat/docs/fields.asciidoc +++ b/filebeat/docs/fields.asciidoc @@ -1405,6 +1405,17 @@ type: boolean Fields for AWS CloudWatch logs. + +*`aws.cloudwatch.message`*:: ++ +-- +CloudWatch log message. + + +type: text + +-- + [float] === ec2 @@ -1664,6 +1675,7 @@ type: keyword -- The error reason if the executed action failed. + type: keyword -- @@ -26446,7 +26458,7 @@ format: duration The name of the upstream. -type: text +type: keyword -- @@ -26456,7 +26468,7 @@ type: text The name of the alternative upstream. -type: text +type: keyword -- @@ -26500,7 +26512,7 @@ type: long The randomly generated ID of the request -type: text +type: keyword -- @@ -28240,6 +28252,15 @@ type: keyword Palo Alto Networks name for the threat. +type: keyword + +-- + +*`panw.panos.action`*:: ++ +-- +Action taken for the session. + type: keyword -- @@ -28687,7 +28708,7 @@ The disk volume path. -- -*`certificate.common_name`*:: +*`santa.certificate.common_name`*:: + -- Common name from code signing certificate. @@ -28696,7 +28717,7 @@ type: keyword -- -*`certificate.sha256`*:: +*`santa.certificate.sha256`*:: + -- SHA256 hash of code signing certificate. diff --git a/filebeat/docs/inputs/input-common-unix-options.asciidoc b/filebeat/docs/inputs/input-common-unix-options.asciidoc index 443fe761274..f73278944a6 100644 --- a/filebeat/docs/inputs/input-common-unix-options.asciidoc +++ b/filebeat/docs/inputs/input-common-unix-options.asciidoc @@ -16,6 +16,22 @@ The maximum size of the message received over the socket. The default is `20MiB` The path to the Unix socket that will receive event streams. +[float] +[id="{beatname_lc}-input-{type}-unix-group"] +==== `group` + +The group ownership of the Unix socket that will be created by Filebeat. +The default is the primary group name for the user Filebeat is running as. +This option is ignored on Windows. + +[float] +[id="{beatname_lc}-input-{type}-unix-mode"] +==== `mode` + +The file mode of the Unix socket that will be created by Filebeat. This is +expected to be a file mode as an octal string. The default value is the system +default (generally `0755`). + [float] [id="{beatname_lc}-input-{type}-unix-line-delimiter"] ==== `line_delimiter` diff --git a/filebeat/docs/inputs/input-syslog.asciidoc b/filebeat/docs/inputs/input-syslog.asciidoc index 0c360a03f7f..f9a24c04b81 100644 --- a/filebeat/docs/inputs/input-syslog.asciidoc +++ b/filebeat/docs/inputs/input-syslog.asciidoc @@ -51,6 +51,8 @@ include::../inputs/input-common-tcp-options.asciidoc[] ===== Protocol `unix`: +beta[] + include::../inputs/input-common-unix-options.asciidoc[] [id="{beatname_lc}-input-{type}-common-options"] diff --git a/filebeat/docs/inputs/input-unix.asciidoc b/filebeat/docs/inputs/input-unix.asciidoc new file mode 100644 index 00000000000..a2f445159b7 --- /dev/null +++ b/filebeat/docs/inputs/input-unix.asciidoc @@ -0,0 +1,35 @@ +:type: unix + +[id="{beatname_lc}-input-{type}"] +=== Unix input + +beta[] + +++++ +Unix +++++ + +Use the `unix` input to read events over a stream-oriented Unix domain socket. + +Example configuration: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: unix + max_message_size: 10MiB + path: "/var/run/filebeat.sock" +---- + + +==== Configuration options + +The `unix` input supports the following configuration options plus the +<<{beatname_lc}-input-{type}-common-options>> described later. + +include::../inputs/input-common-unix-options.asciidoc[] + +[id="{beatname_lc}-input-{type}-common-options"] +include::../inputs/input-common-options.asciidoc[] + +:type!: diff --git a/filebeat/docs/modules/activemq.asciidoc b/filebeat/docs/modules/activemq.asciidoc index c276cd63952..225090f80ef 100644 --- a/filebeat/docs/modules/activemq.asciidoc +++ b/filebeat/docs/modules/activemq.asciidoc @@ -10,8 +10,6 @@ This file is generated! See scripts/docs_collector.py == ActiveMQ module -ga[] - This module parses Apache ActiveMQ logs. It supports application and audit logs. include::../include/what-happens.asciidoc[] diff --git a/filebeat/docs/modules/azure.asciidoc b/filebeat/docs/modules/azure.asciidoc index 5d52e33beac..b194b7c320c 100644 --- a/filebeat/docs/modules/azure.asciidoc +++ b/filebeat/docs/modules/azure.asciidoc @@ -43,6 +43,7 @@ Will retrieve azure Active Directory audit logs. The audit logs provide traceabi connection_string: "" storage_account: "" storage_account_key: "" + resource_manager_endpoint: "" auditlogs: enabled: false @@ -52,6 +53,7 @@ Will retrieve azure Active Directory audit logs. The audit logs provide traceabi connection_string: "" storage_account: "" storage_account_key: "" + resource_manager_endpoint: "" signinlogs: enabled: false @@ -61,6 +63,7 @@ Will retrieve azure Active Directory audit logs. The audit logs provide traceabi connection_string: "" storage_account: "" storage_account_key: "" + resource_manager_endpoint: "" ``` @@ -90,6 +93,16 @@ The name of the storage account the state/offsets will be stored and updated. _string_ The storage account key, this key will be used to authorize access to data in your storage account. +`resource_manager_endpoint` :: +_string_ +Optional, by default we are using the azure public environment, to override, users can provide a specific resource manager endpoint in order to use a different azure environment. +Ex: +https://management.chinacloudapi.cn/ for azure ChinaCloud +https://management.microsoftazure.de/ for azure GermanCloud +https://management.azure.com/ for azure PublicCloud +https://management.usgovcloudapi.net/ for azure USGovernmentCloud +Users can also use this in case of a Hybrid Cloud model, where one may define their own endpoints. + include::../include/what-happens.asciidoc[] include::../include/gs-link.asciidoc[] diff --git a/filebeat/docs/modules/cisco.asciidoc b/filebeat/docs/modules/cisco.asciidoc index 14d571e6172..e252aacbf68 100644 --- a/filebeat/docs/modules/cisco.asciidoc +++ b/filebeat/docs/modules/cisco.asciidoc @@ -294,7 +294,7 @@ parameters on your Elasticsearch cluster: - {ref}/circuit-breaker.html#script-compilation-circuit-breaker[script.max_compilations_rate]: Increase to at least `100/5m`. -- {ref}/modules-scripting-using.html#modules-scripting-using-caching[script.cache_max_size]: +- {ref}/modules-scripting-using.html#modules-scripting-using-caching[script.cache.max_size]: Increase to at least `200` if using both filesets or other script-heavy modules. [float] diff --git a/filebeat/docs/modules/mssql.asciidoc b/filebeat/docs/modules/mssql.asciidoc index fdcc52fd567..7ecaa5e247a 100644 --- a/filebeat/docs/modules/mssql.asciidoc +++ b/filebeat/docs/modules/mssql.asciidoc @@ -25,7 +25,7 @@ file to override the default paths for Træfik logs: ["source","yaml",subs="attributes"] ----- - module: mssql - access: + log: enabled: true var.paths: ["/var/opt/mssql/log/error*"] ----- @@ -35,7 +35,7 @@ To specify the same settings at the command line, you use: ["source","sh",subs="attributes"] ----- --M "mssql.access.var.paths=[/var/opt/mssql/log/error*]" +-M "mssql.log.var.paths=[/var/opt/mssql/log/error*]" ----- //set the fileset name used in the included example diff --git a/filebeat/docs/running-on-cloudfoundry.asciidoc b/filebeat/docs/running-on-cloudfoundry.asciidoc index 34c225ed831..ae9603dc012 100644 --- a/filebeat/docs/running-on-cloudfoundry.asciidoc +++ b/filebeat/docs/running-on-cloudfoundry.asciidoc @@ -1,5 +1,5 @@ [[running-on-cloudfoundry]] -=== Running {beatname_uc} on Cloud Foundry +=== Run {beatname_uc} on Cloud Foundry You can use {beatname_uc} on Cloud Foundry to retrieve and ship logs. @@ -14,18 +14,19 @@ endif::[] [float] ==== Cloud Foundry credentials -{beatname_uc} needs credentials created with UAA so it can connect to loggregator to receive the logs. The uaac +{beatname_uc} needs credentials created with UAA so it can connect to loggregator to receive the logs. The `uaac` command will create the required credentials for connecting to loggregator. -["source", "sh"] +["source","sh",subs="attributes"] ------------------------------------------------ uaac client add {beatname_lc} --name {beatname_lc} --secret changeme --authorized_grant_types client_credentials,refresh_token --authorities doppler.firehose,cloud_controller.admin_read_only ------------------------------------------------ [WARNING] ======================================= -*Use a unique secret:* The uaac command above is just an example and the secret should be changed and the -`{beatname_lc}.yml` should be updated with your choosen secret. +*Use a unique secret:* The `uaac` command shown here is an example. Remember to +replace `changeme` with your secret, and update the +{beatname_lc}.yml+ file to +use your chosen secret. ======================================= diff --git a/filebeat/docs/running-on-kubernetes.asciidoc b/filebeat/docs/running-on-kubernetes.asciidoc index f104a06e245..0df3c811a95 100644 --- a/filebeat/docs/running-on-kubernetes.asciidoc +++ b/filebeat/docs/running-on-kubernetes.asciidoc @@ -1,5 +1,5 @@ [[running-on-kubernetes]] -=== Running {beatname_uc} on Kubernetes +=== Run {beatname_uc} on Kubernetes You can use {beatname_uc} <> on Kubernetes to retrieve and ship container logs. diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index af2831f8848..dfa8f631360 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -1232,6 +1232,27 @@ output.elasticsearch: # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. @@ -1500,6 +1521,9 @@ output.elasticsearch: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password @@ -2086,6 +2110,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m diff --git a/filebeat/fileset/fileset_test.go b/filebeat/fileset/fileset_test.go index 59e5862eb2f..e7865074d8d 100644 --- a/filebeat/fileset/fileset_test.go +++ b/filebeat/fileset/fileset_test.go @@ -56,7 +56,7 @@ func TestLoadManifestNginx(t *testing.T) { manifest, err := fs.readManifest() assert.NoError(t, err) assert.Equal(t, manifest.ModuleVersion, "1.0") - assert.Equal(t, manifest.IngestPipeline, []string{"ingest/default.json"}) + assert.Equal(t, manifest.IngestPipeline, []string{"ingest/pipeline.yml"}) assert.Equal(t, manifest.Input, "config/nginx-access.yml") vars := manifest.Vars @@ -189,7 +189,7 @@ func TestGetInputConfigNginx(t *testing.T) { assert.True(t, cfg.HasField("pipeline")) pipelineID, err := cfg.String("pipeline", -1) assert.NoError(t, err) - assert.Equal(t, "filebeat-5.2.0-nginx-access-default", pipelineID) + assert.Equal(t, "filebeat-5.2.0-nginx-access-pipeline", pipelineID) } func TestGetInputConfigNginxOverrides(t *testing.T) { @@ -217,7 +217,7 @@ func TestGetInputConfigNginxOverrides(t *testing.T) { pipelineID, err := c.String("pipeline", -1) assert.NoError(t, err) - assert.Equal(t, "filebeat-5.2.0-nginx-access-default", pipelineID) + assert.Equal(t, "filebeat-5.2.0-nginx-access-pipeline", pipelineID) }, }, "pipeline": { @@ -276,7 +276,7 @@ func TestGetPipelineNginx(t *testing.T) { assert.Len(t, pipelines, 1) pipeline := pipelines[0] - assert.Equal(t, "filebeat-5.2.0-nginx-access-default", pipeline.id) + assert.Equal(t, "filebeat-5.2.0-nginx-access-pipeline", pipeline.id) assert.Contains(t, pipeline.contents, "description") assert.Contains(t, pipeline.contents, "processors") } diff --git a/filebeat/fileset/modules_integration_test.go b/filebeat/fileset/modules_integration_test.go index 8c5bc91bf70..5428fb1f549 100644 --- a/filebeat/fileset/modules_integration_test.go +++ b/filebeat/fileset/modules_integration_test.go @@ -115,7 +115,7 @@ func TestSetupNginx(t *testing.T) { t.Fatal(err) } - status, _, _ := client.Request("GET", "/_ingest/pipeline/filebeat-5.2.0-nginx-access-default", "", nil, nil) + status, _, _ := client.Request("GET", "/_ingest/pipeline/filebeat-5.2.0-nginx-access-pipeline", "", nil, nil) assert.Equal(t, 200, status) status, _, _ = client.Request("GET", "/_ingest/pipeline/filebeat-5.2.0-nginx-error-pipeline", "", nil, nil) assert.Equal(t, 200, status) diff --git a/filebeat/input/kafka/config.go b/filebeat/input/kafka/config.go index b132a843055..0e4888b90c3 100644 --- a/filebeat/input/kafka/config.go +++ b/filebeat/input/kafka/config.go @@ -180,7 +180,7 @@ func newSaramaConfig(config kafkaInputConfig) (*sarama.Config, error) { k.Net.TLS.Config = tls.BuildModuleConfig("") } - if config.Kerberos != nil { + if config.Kerberos.IsEnabled() { cfgwarn.Beta("Kerberos authentication for Kafka is beta.") k.Net.SASL.Enable = true diff --git a/filebeat/input/log/input.go b/filebeat/input/log/input.go index ac0d71cf53d..b3cf4049551 100644 --- a/filebeat/input/log/input.go +++ b/filebeat/input/log/input.go @@ -161,7 +161,7 @@ func NewInput( // It goes through all states coming from the registry. Only the states which match the glob patterns of // the input will be loaded and updated. All other states will not be touched. func (p *Input) loadStates(states []file.State) error { - logp.Debug("input", "exclude_files: %s. Number of stats: %d", p.config.ExcludeFiles, len(states)) + logp.Debug("input", "exclude_files: %s. Number of states: %d", p.config.ExcludeFiles, len(states)) for _, state := range states { // Check if state source belongs to this input. If yes, update the state. diff --git a/filebeat/input/syslog/config.go b/filebeat/input/syslog/config.go index 5b6ac1452b4..ff009bfb1dd 100644 --- a/filebeat/input/syslog/config.go +++ b/filebeat/input/syslog/config.go @@ -30,6 +30,7 @@ import ( "github.com/elastic/beats/v7/filebeat/inputsource/udp" "github.com/elastic/beats/v7/filebeat/inputsource/unix" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -98,6 +99,8 @@ func factory( return tcp.New(&config.Config, factory) case unix.Name: + cfgwarn.Beta("Syslog Unix socket support is beta.") + config := defaultUnix if err := cfg.Unpack(&config); err != nil { return nil, err diff --git a/filebeat/input/unix/input.go b/filebeat/input/unix/input.go index 12c091f00da..19609cb5ab8 100644 --- a/filebeat/input/unix/input.go +++ b/filebeat/input/unix/input.go @@ -30,6 +30,7 @@ import ( "github.com/elastic/beats/v7/filebeat/inputsource/unix" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -56,6 +57,7 @@ func NewInput( connector channel.Connector, context input.Context, ) (input.Input, error) { + cfgwarn.Beta("Unix socket support is beta.") out, err := connector.ConnectWith(cfg, beat.ClientConfig{ Processing: beat.ProcessingConfig{ diff --git a/filebeat/inputsource/tcp/server_test.go b/filebeat/inputsource/tcp/server_test.go index 15831666206..032f7d33e29 100644 --- a/filebeat/inputsource/tcp/server_test.go +++ b/filebeat/inputsource/tcp/server_test.go @@ -213,7 +213,7 @@ func TestReceiveNewEventsConcurrently(t *testing.T) { to := func(message []byte, mt inputsource.NetworkMetadata) { ch <- &info{message: string(message), mt: mt} } - cfg, err := common.NewConfigFrom(map[string]interface{}{"host": ":0"}) + cfg, err := common.NewConfigFrom(map[string]interface{}{"host": "127.0.0.1:0"}) if !assert.NoError(t, err) { return } diff --git a/filebeat/inputsource/unix/config.go b/filebeat/inputsource/unix/config.go index 79b2a43dd08..5051ab86e75 100644 --- a/filebeat/inputsource/unix/config.go +++ b/filebeat/inputsource/unix/config.go @@ -30,6 +30,8 @@ const Name = "unix" // Config exposes the unix configuration. type Config struct { Path string `config:"path"` + Group *string `config:"group"` + Mode *string `config:"mode"` Timeout time.Duration `config:"timeout" validate:"nonzero,positive"` MaxMessageSize cfgtype.ByteSize `config:"max_message_size" validate:"nonzero,positive"` MaxConnections int `config:"max_connections"` diff --git a/filebeat/inputsource/unix/server.go b/filebeat/inputsource/unix/server.go index 965a3300282..ee9a0f4564d 100644 --- a/filebeat/inputsource/unix/server.go +++ b/filebeat/inputsource/unix/server.go @@ -20,10 +20,16 @@ package unix import ( "fmt" "net" + "os" + "os/user" + "runtime" + "strconv" + "github.com/pkg/errors" "golang.org/x/net/netutil" "github.com/elastic/beats/v7/filebeat/inputsource/common" + "github.com/elastic/beats/v7/libbeat/logp" ) // Server represent a unix server @@ -55,13 +61,91 @@ func New( } func (s *Server) createServer() (net.Listener, error) { + if err := s.cleanupStaleSocket(); err != nil { + return nil, err + } + l, err := net.Listen("unix", s.config.Path) if err != nil { return nil, err } + if err := s.setSocketOwnership(); err != nil { + return nil, err + } + + if err := s.setSocketMode(); err != nil { + return nil, err + } + if s.config.MaxConnections > 0 { return netutil.LimitListener(l, s.config.MaxConnections), nil } return l, nil } + +func (s *Server) cleanupStaleSocket() error { + path := s.config.Path + info, err := os.Lstat(path) + if err != nil { + // If the file does not exist, then the cleanup can be considered successful. + if os.IsNotExist(err) { + return nil + } + return errors.Wrapf(err, "cannot lstat unix socket file at location %s", path) + } + + if runtime.GOOS != "windows" { + // see https://github.com/golang/go/issues/33357 for context on Windows socket file attributes bug + if info.Mode()&os.ModeSocket == 0 { + return fmt.Errorf("refusing to remove file at location %s, it is not a socket", path) + } + } + + if err := os.Remove(path); err != nil { + return errors.Wrapf(err, "cannot remove existing unix socket file at location %s", path) + } + + return nil +} + +func (s *Server) setSocketOwnership() error { + if s.config.Group != nil { + if runtime.GOOS == "windows" { + logp.NewLogger("unix").Warn("windows does not support the 'group' configuration option, ignoring") + return nil + } + g, err := user.LookupGroup(*s.config.Group) + if err != nil { + return err + } + gid, err := strconv.Atoi(g.Gid) + if err != nil { + return err + } + return os.Chown(s.config.Path, -1, gid) + } + return nil +} + +func (s *Server) setSocketMode() error { + if s.config.Mode != nil { + mode, err := parseFileMode(*s.config.Mode) + if err != nil { + return err + } + return os.Chmod(s.config.Path, mode) + } + return nil +} + +func parseFileMode(mode string) (os.FileMode, error) { + parsed, err := strconv.ParseUint(mode, 8, 32) + if err != nil { + return 0, err + } + if parsed > 0777 { + return 0, errors.New("invalid file mode") + } + return os.FileMode(parsed), nil +} diff --git a/filebeat/inputsource/unix/server_test.go b/filebeat/inputsource/unix/server_test.go index 36e75c757e9..a9043d14a8e 100644 --- a/filebeat/inputsource/unix/server_test.go +++ b/filebeat/inputsource/unix/server_test.go @@ -23,7 +23,10 @@ import ( "math/rand" "net" "os" + "os/user" "path/filepath" + "runtime" + "strconv" "strings" "testing" "time" @@ -35,6 +38,7 @@ import ( "github.com/elastic/beats/v7/filebeat/inputsource" netcommon "github.com/elastic/beats/v7/filebeat/inputsource/common" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/file" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -207,6 +211,92 @@ func TestReceiveEventsAndMetadata(t *testing.T) { } } +func TestSocketOwnershipAndMode(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("changing socket ownership is only supported on non-windows") + return + } + + groups, err := os.Getgroups() + require.NoError(t, err) + + if len(groups) <= 1 { + t.Skip("no group that we can change to") + return + } + + group, err := user.LookupGroupId(strconv.Itoa(groups[1])) + require.NoError(t, err) + + path := filepath.Join(os.TempDir(), "test.sock") + cfg, _ := common.NewConfigFrom(map[string]interface{}{ + "path": path, + "group": group.Name, + "mode": "0740", + }) + config := defaultConfig + err = cfg.Unpack(&config) + require.NoError(t, err) + + factory := netcommon.SplitHandlerFactory(netcommon.FamilyUnix, logp.NewLogger("test"), MetadataCallback, nil, netcommon.SplitFunc([]byte("\n"))) + server, err := New(&config, factory) + require.NoError(t, err) + err = server.Start() + require.NoError(t, err) + defer server.Stop() + + info, err := file.Lstat(path) + require.NoError(t, err) + require.NotEqual(t, 0, info.Mode()&os.ModeSocket) + require.Equal(t, os.FileMode(0740), info.Mode().Perm()) + gid, err := info.GID() + require.NoError(t, err) + require.Equal(t, group.Gid, strconv.Itoa(gid)) +} + +func TestSocketCleanup(t *testing.T) { + path := filepath.Join(os.TempDir(), "test.sock") + mockStaleSocket, err := net.Listen("unix", path) + require.NoError(t, err) + defer mockStaleSocket.Close() + + cfg, _ := common.NewConfigFrom(map[string]interface{}{ + "path": path, + }) + config := defaultConfig + require.NoError(t, cfg.Unpack(&config)) + factory := netcommon.SplitHandlerFactory(netcommon.FamilyUnix, logp.NewLogger("test"), MetadataCallback, nil, netcommon.SplitFunc([]byte("\n"))) + server, err := New(&config, factory) + require.NoError(t, err) + err = server.Start() + require.NoError(t, err) + server.Stop() +} + +func TestSocketCleanupRefusal(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping due to windows FileAttributes bug https://github.com/golang/go/issues/33357") + return + } + path := filepath.Join(os.TempDir(), "test.sock") + f, err := os.Create(path) + require.NoError(t, err) + require.NoError(t, f.Close()) + defer os.Remove(path) + + cfg, _ := common.NewConfigFrom(map[string]interface{}{ + "path": path, + }) + config := defaultConfig + require.NoError(t, cfg.Unpack(&config)) + factory := netcommon.SplitHandlerFactory(netcommon.FamilyUnix, logp.NewLogger("test"), MetadataCallback, nil, netcommon.SplitFunc([]byte("\n"))) + server, err := New(&config, factory) + require.NoError(t, err) + err = server.Start() + require.Error(t, err) + require.Contains(t, err.Error(), "refusing to remove file at location") +} + func TestReceiveNewEventsConcurrently(t *testing.T) { workers := 4 eventsCount := 100 diff --git a/filebeat/magefile.go b/filebeat/magefile.go index e63c6331852..6270c51f205 100644 --- a/filebeat/magefile.go +++ b/filebeat/magefile.go @@ -34,7 +34,7 @@ import ( // mage:import generate _ "github.com/elastic/beats/v7/filebeat/scripts/mage/generate" // mage:import - "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" + _ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" // mage:import "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) @@ -46,12 +46,6 @@ func init() { devtools.BeatDescription = "Filebeat sends log files to Logstash or directly to Elasticsearch." } -// Aliases provides compatibility with CI while we transition all Beats -// to having common testing targets. -var Aliases = map[string]interface{}{ - "goTestUnit": unittest.GoUnitTest, // dev-tools/jenkins_ci.ps1 uses this. -} - // Build builds the Beat binary. func Build() error { return devtools.Build(devtools.DefaultBuildArgs()) @@ -182,8 +176,6 @@ func ExportDashboard() error { // IntegTest executes integration tests (it uses Docker to run the tests). func IntegTest() { - devtools.AddIntegTestUsage() - defer devtools.StopIntegTestEnv() mg.SerialDeps(GoIntegTest, PythonIntegTest) } @@ -191,7 +183,13 @@ func IntegTest() { // Use TEST_COVERAGE=true to enable code coverage profiling. // Use RACE_DETECTOR=true to enable the race detector. func GoIntegTest(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestIntegrationArgs()) + runner, err := devtools.NewDockerIntegrationRunner() + if err != nil { + return err + } + return runner.Test("goIntegTest", func() error { + return devtools.GoTest(ctx, devtools.DefaultGoTestIntegrationArgs()) + }) } // PythonIntegTest executes the python system tests in the integration environment (Docker). @@ -202,10 +200,14 @@ func PythonIntegTest(ctx context.Context) error { if !devtools.IsInIntegTestEnv() { mg.Deps(Fields) } - return devtools.RunIntegTest("pythonIntegTest", func() error { + runner, err := devtools.NewDockerIntegrationRunner(append(devtools.ListMatchingEnvVars("TESTING_FILEBEAT_", "NOSE_"), "GENERATE")...) + if err != nil { + return err + } + return runner.Test("pythonIntegTest", func() error { mg.Deps(devtools.BuildSystemTestBinary) args := devtools.DefaultPythonTestIntegrationArgs() args.Env["MODULES_PATH"] = devtools.CWD("module") return devtools.PythonNoseTest(args) - }, "GENERATE", "TESTING_FILEBEAT_MODULES", "TESTING_FILEBEAT_FILESETS") + }) } diff --git a/filebeat/module/mysql/error/_meta/fields.epr.yml b/filebeat/module/mysql/error/_meta/fields.epr.yml new file mode 100644 index 00000000000..33f95664d61 --- /dev/null +++ b/filebeat/module/mysql/error/_meta/fields.epr.yml @@ -0,0 +1,21 @@ +- name: event.code + type: keyword + description: Identification code for this event +- name: event.provider + type: keyword + description: Source of the event (e.g. Server) +- name: event.created + type: date + description: Date/time when the event was first read by an agent, or by your pipeline. +- name: event.timezone + type: keyword + description: Time zone information +- name: event.kind + type: keyword + description: Event kind (e.g. event) +- name: event.category + type: keyword + description: Event category (e.g. database) +- name: event.type + type: keyword + description: Event severity (e.g. info, error) diff --git a/filebeat/module/nginx/access/ingest/default.json b/filebeat/module/nginx/access/ingest/default.json deleted file mode 100644 index 04efd885e69..00000000000 --- a/filebeat/module/nginx/access/ingest/default.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "description": "Pipeline for parsing Nginx access logs. Requires the geoip and user_agent plugins.", - "processors": [ - { - "grok": { - "field": "message", - "patterns": [ - "(%{NGINX_HOST} )?\"?(?:%{NGINX_ADDRESS_LIST:nginx.access.remote_ip_list}|%{NOTSPACE:source.address}) - %{DATA:user.name} \\[%{HTTPDATE:nginx.access.time}\\] \"%{DATA:nginx.access.info}\" %{NUMBER:http.response.status_code:long} %{NUMBER:http.response.body.bytes:long} \"%{DATA:http.request.referrer}\" \"%{DATA:user_agent.original}\"" - ], - "pattern_definitions": { - "NGINX_HOST": "(?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port})?", - "NGINX_NOTSEPARATOR": "[^\t ,:]+", - "NGINX_ADDRESS_LIST": "(?:%{IP}|%{WORD})(\"?,?\\s*(?:%{IP}|%{WORD}))*" - }, - "ignore_missing": true - } - }, - { - "grok": { - "field": "nginx.access.info", - "patterns": [ - "%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}", - "" - ], - "ignore_missing": true - } - }, - { - "remove": { - "field": "nginx.access.info" - } - }, - { - "split": { - "field": "nginx.access.remote_ip_list", - "separator": "\"?,?\\s+", - "ignore_missing": true - } - }, - { - "split": { - "field": "nginx.access.origin", - "separator": "\"?,?\\s+", - "ignore_missing": true - } - }, - { - "set": { - "field": "source.address", - "if": "ctx.source?.address == null", - "value": "" - } - }, - { - "script": { - "if": "ctx.nginx?.access?.remote_ip_list != null && ctx.nginx.access.remote_ip_list.length > 0", - "lang": "painless", - "source": "boolean isPrivate(def dot, def ip) { try { StringTokenizer tok = new StringTokenizer(ip, dot); int firstByte = Integer.parseInt(tok.nextToken()); int secondByte = Integer.parseInt(tok.nextToken()); if (firstByte == 10) { return true; } if (firstByte == 192 && secondByte == 168) { return true; } if (firstByte == 172 && secondByte >= 16 && secondByte <= 31) { return true; } if (firstByte == 127) { return true; } return false; } catch (Exception e) { return false; } } try { ctx.source.address = null; if (ctx.nginx.access.remote_ip_list == null) { return; } def found = false; for (def item : ctx.nginx.access.remote_ip_list) { if (!isPrivate(params.dot, item)) { ctx.source.address = item; found = true; break; } } if (!found) { ctx.source.address = ctx.nginx.access.remote_ip_list[0]; }} catch (Exception e) { ctx.source.address = null; }", - "params": { - "dot": "." - } - } - }, - { - "remove": { - "field": "source.address", - "if": "ctx.source.address == null" - } - }, - { - "grok": { - "field": "source.address", - "patterns": ["^%{IP:source.ip}$"], - "ignore_failure": true - } - }, - { - "remove": { - "field": "message" - } - }, - { - "rename": { - "field": "@timestamp", - "target_field": "event.created" - } - }, - { - "date": { - "field": "nginx.access.time", - "target_field": "@timestamp", - "formats": [ - "dd/MMM/yyyy:H:m:s Z" - ], - "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] - } - }, - { - "remove": { - "field": "nginx.access.time" - } - }, - { - "user_agent": { - "field": "user_agent.original" - } - }, - { - "geoip": { - "field": "source.ip", - "target_field": "source.geo", - "ignore_missing": true - } - }, - { - "geoip": { - "database_file": "GeoLite2-ASN.mmdb", - "field": "source.ip", - "target_field": "source.as", - "properties": [ - "asn", - "organization_name" - ], - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.asn", - "target_field": "source.as.number", - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.organization_name", - "target_field": "source.as.organization.name", - "ignore_missing": true - } - } - ], - "on_failure": [ - { - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - } - ] -} diff --git a/filebeat/module/nginx/access/ingest/pipeline.yml b/filebeat/module/nginx/access/ingest/pipeline.yml new file mode 100644 index 00000000000..3a41265875b --- /dev/null +++ b/filebeat/module/nginx/access/ingest/pipeline.yml @@ -0,0 +1,167 @@ +description: Pipeline for parsing Nginx access logs. Requires the geoip and user_agent + plugins. +processors: +- grok: + field: message + patterns: + - (%{NGINX_HOST} )?"?(?:%{NGINX_ADDRESS_LIST:nginx.access.remote_ip_list}|%{NOTSPACE:source.address}) + - (-|%{DATA:user.name}) \[%{HTTPDATE:nginx.access.time}\] "%{DATA:nginx.access.info}" + %{NUMBER:http.response.status_code:long} %{NUMBER:http.response.body.bytes:long} + "(-|%{DATA:http.request.referrer})" "(-|%{DATA:user_agent.original})" + pattern_definitions: + NGINX_HOST: (?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port})? + NGINX_NOTSEPARATOR: "[^\t ,:]+" + NGINX_ADDRESS_LIST: (?:%{IP}|%{WORD})("?,?\s*(?:%{IP}|%{WORD}))* + ignore_missing: true +- grok: + field: nginx.access.info + patterns: + - '%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}' + - "" + ignore_missing: true +- remove: + field: nginx.access.info +- split: + field: nginx.access.remote_ip_list + separator: '"?,?\s+' + ignore_missing: true +- split: + field: nginx.access.origin + separator: '"?,?\s+' + ignore_missing: true +- set: + field: source.address + if: ctx.source?.address == null + value: "" +- script: + if: ctx.nginx?.access?.remote_ip_list != null && ctx.nginx.access.remote_ip_list.length > 0 + lang: painless + source: >- + boolean isPrivate(def dot, def ip) { + try { + StringTokenizer tok = new StringTokenizer(ip, dot); + int firstByte = Integer.parseInt(tok.nextToken()); + int secondByte = Integer.parseInt(tok.nextToken()); + if (firstByte == 10) { + return true; + } + if (firstByte == 192 && secondByte == 168) { + return true; + } + if (firstByte == 172 && secondByte >= 16 && secondByte <= 31) { + return true; + } + if (firstByte == 127) { + return true; + } + return false; + } + catch (Exception e) { + return false; + } + } + try { + ctx.source.address = null; + if (ctx.nginx.access.remote_ip_list == null) { + return; + } + def found = false; + for (def item : ctx.nginx.access.remote_ip_list) { + if (!isPrivate(params.dot, item)) { + ctx.source.address = item; + found = true; + break; + } + } + if (!found) { + ctx.source.address = ctx.nginx.access.remote_ip_list[0]; + } + } + catch (Exception e) { + ctx.source.address = null; + } + params: + dot: . +- remove: + field: source.address + if: ctx.source.address == null +- grok: + field: source.address + patterns: + - ^%{IP:source.ip}$ + ignore_failure: true +- remove: + field: message +- rename: + field: '@timestamp' + target_field: event.created +- date: + field: nginx.access.time + target_field: '@timestamp' + formats: + - dd/MMM/yyyy:H:m:s Z + on_failure: + - append: + field: error.message + value: '{{ _ingest.on_failure_message }}' +- remove: + field: nginx.access.time +- user_agent: + field: user_agent.original + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- set: + field: event.kind + value: event +- append: + field: event.category + value: web +- append: + field: event.type + value: access +- set: + field: event.outcome + value: success + if: "ctx?.http?.response?.status_code != null && ctx.http.response.status_code < 400" +- set: + field: event.outcome + value: failure + if: "ctx?.http?.response?.status_code != null && ctx.http.response.status_code >= 400" +- lowercase: + field: http.request.method + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/filebeat/module/nginx/access/manifest.yml b/filebeat/module/nginx/access/manifest.yml index d3d6211dccc..8d04dd32e5c 100644 --- a/filebeat/module/nginx/access/manifest.yml +++ b/filebeat/module/nginx/access/manifest.yml @@ -9,7 +9,7 @@ var: os.windows: - c:/programdata/nginx/logs/*access.log* -ingest_pipeline: ingest/default.json +ingest_pipeline: ingest/pipeline.yml input: config/nginx-access.yml requires.processors: diff --git a/filebeat/module/nginx/access/test/access.log-expected.json b/filebeat/module/nginx/access/test/access.log-expected.json index a121dd67613..12c94f2996d 100644 --- a/filebeat/module/nginx/access/test/access.log-expected.json +++ b/filebeat/module/nginx/access/test/access.log-expected.json @@ -1,12 +1,19 @@ [ { "@timestamp": "2016-10-25T12:49:33.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 612, "http.response.status_code": 200, "http.version": "1.1", @@ -15,6 +22,9 @@ "nginx.access.remote_ip_list": [ "77.179.66.156" ], + "related.ip": [ + "77.179.66.156" + ], "service.type": "nginx", "source.address": "77.179.66.156", "source.as.number": 6805, @@ -28,7 +38,6 @@ "source.geo.region_name": "Rheinland-Pfalz", "source.ip": "77.179.66.156", "url.original": "/", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36", @@ -39,11 +48,19 @@ }, { "@timestamp": "2016-10-25T12:49:34.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", + "http.request.method": "get", "http.request.referrer": "http://localhost:8080/", "http.response.body.bytes": 571, "http.response.status_code": 404, @@ -53,6 +70,9 @@ "nginx.access.remote_ip_list": [ "77.179.66.156" ], + "related.ip": [ + "77.179.66.156" + ], "service.type": "nginx", "source.address": "77.179.66.156", "source.as.number": 6805, @@ -66,7 +86,6 @@ "source.geo.region_name": "Rheinland-Pfalz", "source.ip": "77.179.66.156", "url.original": "/favicon.ico", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36", @@ -77,12 +96,19 @@ }, { "@timestamp": "2016-10-25T12:50:44.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 571, "http.response.status_code": 404, "http.version": "1.1", @@ -91,6 +117,9 @@ "nginx.access.remote_ip_list": [ "77.179.66.156" ], + "related.ip": [ + "77.179.66.156" + ], "service.type": "nginx", "source.address": "77.179.66.156", "source.as.number": 6805, @@ -104,7 +133,6 @@ "source.geo.region_name": "Rheinland-Pfalz", "source.ip": "77.179.66.156", "url.original": "/adsasd", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36", @@ -115,12 +143,19 @@ }, { "@timestamp": "2016-12-07T09:34:43.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 612, "http.response.status_code": 200, "http.version": "1.1", @@ -129,6 +164,9 @@ "nginx.access.remote_ip_list": [ "77.179.66.156" ], + "related.ip": [ + "77.179.66.156" + ], "service.type": "nginx", "source.address": "77.179.66.156", "source.as.number": 6805, @@ -142,7 +180,6 @@ "source.geo.region_name": "Rheinland-Pfalz", "source.ip": "77.179.66.156", "url.original": "/", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36", @@ -153,11 +190,19 @@ }, { "@timestamp": "2016-12-07T09:34:43.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", + "http.request.method": "get", "http.request.referrer": "http://localhost:8080/", "http.response.body.bytes": 571, "http.response.status_code": 404, @@ -167,6 +212,9 @@ "nginx.access.remote_ip_list": [ "77.179.66.156" ], + "related.ip": [ + "77.179.66.156" + ], "service.type": "nginx", "source.address": "77.179.66.156", "source.as.number": 6805, @@ -180,7 +228,6 @@ "source.geo.region_name": "Rheinland-Pfalz", "source.ip": "77.179.66.156", "url.original": "/favicon.ico", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36", @@ -191,12 +238,19 @@ }, { "@timestamp": "2016-12-07T09:43:18.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 571, "http.response.status_code": 404, "http.version": "1.1", @@ -205,6 +259,9 @@ "nginx.access.remote_ip_list": [ "77.179.66.156" ], + "related.ip": [ + "77.179.66.156" + ], "service.type": "nginx", "source.address": "77.179.66.156", "source.as.number": 6805, @@ -218,7 +275,6 @@ "source.geo.region_name": "Rheinland-Pfalz", "source.ip": "77.179.66.156", "url.original": "/test", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36", @@ -229,12 +285,19 @@ }, { "@timestamp": "2016-12-07T09:43:21.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 571, "http.response.status_code": 404, "http.version": "1.1", @@ -243,6 +306,9 @@ "nginx.access.remote_ip_list": [ "77.179.66.156" ], + "related.ip": [ + "77.179.66.156" + ], "service.type": "nginx", "source.address": "77.179.66.156", "source.as.number": 6805, @@ -256,7 +322,6 @@ "source.geo.region_name": "Rheinland-Pfalz", "source.ip": "77.179.66.156", "url.original": "/test", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36", @@ -267,12 +332,19 @@ }, { "@timestamp": "2016-12-07T09:43:23.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 571, "http.response.status_code": 404, "http.version": "1.1", @@ -281,6 +353,9 @@ "nginx.access.remote_ip_list": [ "77.179.66.156" ], + "related.ip": [ + "77.179.66.156" + ], "service.type": "nginx", "source.address": "77.179.66.156", "source.as.number": 6805, @@ -294,7 +369,6 @@ "source.geo.region_name": "Rheinland-Pfalz", "source.ip": "77.179.66.156", "url.original": "/test1", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36", @@ -305,12 +379,19 @@ }, { "@timestamp": "2016-12-07T10:04:37.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 571, "http.response.status_code": 404, "http.version": "1.1", @@ -319,11 +400,13 @@ "nginx.access.remote_ip_list": [ "127.0.0.1" ], + "related.ip": [ + "127.0.0.1" + ], "service.type": "nginx", "source.address": "127.0.0.1", "source.ip": "127.0.0.1", "url.original": "/test1", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36", @@ -334,12 +417,19 @@ }, { "@timestamp": "2016-12-07T10:04:58.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 0, "http.response.status_code": 304, "http.version": "1.1", @@ -348,11 +438,13 @@ "nginx.access.remote_ip_list": [ "127.0.0.1" ], + "related.ip": [ + "127.0.0.1" + ], "service.type": "nginx", "source.address": "127.0.0.1", "source.ip": "127.0.0.1", "url.original": "/", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0", @@ -363,12 +455,19 @@ }, { "@timestamp": "2016-12-07T10:04:59.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 0, "http.response.status_code": 304, "http.version": "1.1", @@ -377,11 +476,13 @@ "nginx.access.remote_ip_list": [ "127.0.0.1" ], + "related.ip": [ + "127.0.0.1" + ], "service.type": "nginx", "source.address": "127.0.0.1", "source.ip": "127.0.0.1", "url.original": "/", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0", @@ -392,12 +493,19 @@ }, { "@timestamp": "2016-12-07T10:05:07.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 169, "http.response.status_code": 404, "http.version": "1.1", @@ -406,11 +514,13 @@ "nginx.access.remote_ip_list": [ "127.0.0.1" ], + "related.ip": [ + "127.0.0.1" + ], "service.type": "nginx", "source.address": "127.0.0.1", "source.ip": "127.0.0.1", "url.original": "/taga", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0", diff --git a/filebeat/module/nginx/access/test/test-with-host.log-expected.json b/filebeat/module/nginx/access/test/test-with-host.log-expected.json index 38695946ca5..a641922d139 100644 --- a/filebeat/module/nginx/access/test/test-with-host.log-expected.json +++ b/filebeat/module/nginx/access/test/test-with-host.log-expected.json @@ -2,12 +2,19 @@ { "@timestamp": "2016-12-07T10:05:07.000Z", "destination.domain": "example.com", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 571, "http.response.status_code": 200, "http.version": "1.1", @@ -18,11 +25,13 @@ "10.0.0.1", "127.0.0.1" ], + "related.ip": [ + "10.0.0.2" + ], "service.type": "nginx", "source.address": "10.0.0.2", "source.ip": "10.0.0.2", "url.original": "/ocelot", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0", @@ -34,12 +43,19 @@ { "@timestamp": "2017-05-29T19:02:48.000Z", "destination.domain": "example.com", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 612, "http.response.status_code": 404, "http.version": "1.1", @@ -48,11 +64,13 @@ "nginx.access.remote_ip_list": [ "172.17.0.1" ], + "related.ip": [ + "172.17.0.1" + ], "service.type": "nginx", "source.address": "172.17.0.1", "source.ip": "172.17.0.1", "url.original": "/stringpatch", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox Alpha", "user_agent.original": "Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2", @@ -64,12 +82,19 @@ { "@timestamp": "2016-12-07T10:05:07.000Z", "destination.domain": "example.com", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 571, "http.response.status_code": 200, "http.version": "1.1", @@ -80,6 +105,9 @@ "10.0.0.1", "85.181.35.98" ], + "related.ip": [ + "85.181.35.98" + ], "service.type": "nginx", "source.address": "85.181.35.98", "source.as.number": 6805, @@ -93,7 +121,6 @@ "source.geo.region_name": "Land Berlin", "source.ip": "85.181.35.98", "url.original": "/ocelot", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0", @@ -106,12 +133,19 @@ "@timestamp": "2016-12-07T10:05:07.000Z", "destination.domain": "example.com", "destination.port": "80", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 571, "http.response.status_code": 200, "http.version": "1.1", @@ -120,6 +154,9 @@ "nginx.access.remote_ip_list": [ "85.181.35.98" ], + "related.ip": [ + "85.181.35.98" + ], "service.type": "nginx", "source.address": "85.181.35.98", "source.as.number": 6805, @@ -133,7 +170,6 @@ "source.geo.region_name": "Land Berlin", "source.ip": "85.181.35.98", "url.original": "/ocelot", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36", @@ -146,12 +182,19 @@ "@timestamp": "2016-01-22T13:18:29.000Z", "destination.domain": "example.com", "destination.port": "80", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 25507, "http.response.status_code": 200, "http.version": "1.1", @@ -163,6 +206,9 @@ "204.246.1.1", "10.2.1.185" ], + "related.ip": [ + "199.96.1.1" + ], "service.type": "nginx", "source.address": "199.96.1.1", "source.as.number": 19065, @@ -176,7 +222,6 @@ "source.geo.region_name": "Illinois", "source.ip": "199.96.1.1", "url.original": "/assets/xxxx?q=100", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Other", "user_agent.original": "Amazon CloudFront" @@ -184,12 +229,19 @@ { "@timestamp": "2016-12-30T06:47:09.000Z", "destination.ip": "1.2.3.4", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 8571, "http.response.status_code": 404, "http.version": "1.1", @@ -200,6 +252,10 @@ "10.225.192.17", "10.2.2.121" ], + "related.ip": [ + "2a03:0000:10ff:f00f:0000:0000:0:8000", + "1.2.3.4" + ], "service.type": "nginx", "source.address": "2a03:0000:10ff:f00f:0000:0000:0:8000", "source.geo.continent_name": "Europe", @@ -208,7 +264,6 @@ "source.geo.location.lon": -8.0, "source.ip": "2a03:0000:10ff:f00f:0000:0000:0:8000", "url.original": "/test.html", - "user.name": "-", "user_agent.device.name": "Spider", "user_agent.name": "Facebot", "user_agent.original": "Mozilla/5.0 (compatible; Facebot 1.0; https://developers.facebook.com/docs/sharing/webmasters/crawler)", @@ -218,11 +273,18 @@ "@timestamp": "2018-04-12T07:48:40.000Z", "destination.ip": "1.2.3.4", "destination.port": "80", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.referrer": "-", "http.response.body.bytes": 0, "http.response.status_code": 400, "input.type": "log", @@ -230,43 +292,53 @@ "nginx.access.remote_ip_list": [ "127.0.0.1" ], + "related.ip": [ + "127.0.0.1", + "1.2.3.4" + ], "service.type": "nginx", "source.address": "127.0.0.1", - "source.ip": "127.0.0.1", - "user.name": "-", - "user_agent.device.name": "Other", - "user_agent.name": "Other", - "user_agent.original": "-" + "source.ip": "127.0.0.1" }, { "@timestamp": "2019-02-26T14:39:42.000Z", "destination.domain": "example.com", "destination.port": "80", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.referrer": "-", "http.response.body.bytes": 173, "http.response.status_code": 400, "input.type": "log", "log.offset": 1269, "service.type": "nginx", - "source.address": "unix:", - "user.name": "-", - "user_agent.device.name": "Other", - "user_agent.name": "Other", - "user_agent.original": "-" + "source.address": "unix:" }, { "@timestamp": "2017-05-29T19:02:48.000Z", "destination.ip": "1.2.3.4", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 612, "http.response.status_code": 200, "http.version": "1.1", @@ -275,10 +347,12 @@ "nginx.access.remote_ip_list": [ "localhost" ], + "related.ip": [ + "1.2.3.4" + ], "service.type": "nginx", "source.address": "localhost", "url.original": "/test2", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox Alpha", "user_agent.original": "Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2", @@ -290,12 +364,19 @@ { "@timestamp": "2017-05-29T19:02:48.000Z", "destination.domain": "example.com", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 612, "http.response.status_code": 200, "http.version": "1.1", @@ -308,7 +389,6 @@ "service.type": "nginx", "source.address": "localhost", "url.original": "/test2", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox Alpha", "user_agent.original": "Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2", diff --git a/filebeat/module/nginx/access/test/test.log-expected.json b/filebeat/module/nginx/access/test/test.log-expected.json index 247b7a12e21..22959d1a8be 100644 --- a/filebeat/module/nginx/access/test/test.log-expected.json +++ b/filebeat/module/nginx/access/test/test.log-expected.json @@ -1,12 +1,19 @@ [ { "@timestamp": "2016-12-07T10:05:07.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 571, "http.response.status_code": 200, "http.version": "1.1", @@ -17,11 +24,13 @@ "10.0.0.1", "127.0.0.1" ], + "related.ip": [ + "10.0.0.2" + ], "service.type": "nginx", "source.address": "10.0.0.2", "source.ip": "10.0.0.2", "url.original": "/ocelot", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0", @@ -32,12 +41,19 @@ }, { "@timestamp": "2017-05-29T19:02:48.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 612, "http.response.status_code": 404, "http.version": "1.1", @@ -46,11 +62,13 @@ "nginx.access.remote_ip_list": [ "172.17.0.1" ], + "related.ip": [ + "172.17.0.1" + ], "service.type": "nginx", "source.address": "172.17.0.1", "source.ip": "172.17.0.1", "url.original": "/stringpatch", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox Alpha", "user_agent.original": "Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2", @@ -61,12 +79,19 @@ }, { "@timestamp": "2016-12-07T10:05:07.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 571, "http.response.status_code": 200, "http.version": "1.1", @@ -77,6 +102,9 @@ "10.0.0.1", "85.181.35.98" ], + "related.ip": [ + "85.181.35.98" + ], "service.type": "nginx", "source.address": "85.181.35.98", "source.as.number": 6805, @@ -90,7 +118,6 @@ "source.geo.region_name": "Land Berlin", "source.ip": "85.181.35.98", "url.original": "/ocelot", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0", @@ -101,12 +128,19 @@ }, { "@timestamp": "2016-12-07T10:05:07.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 571, "http.response.status_code": 200, "http.version": "1.1", @@ -115,6 +149,9 @@ "nginx.access.remote_ip_list": [ "85.181.35.98" ], + "related.ip": [ + "85.181.35.98" + ], "service.type": "nginx", "source.address": "85.181.35.98", "source.as.number": 6805, @@ -128,7 +165,6 @@ "source.geo.region_name": "Land Berlin", "source.ip": "85.181.35.98", "url.original": "/ocelot", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36", @@ -139,12 +175,19 @@ }, { "@timestamp": "2016-01-22T13:18:29.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 25507, "http.response.status_code": 200, "http.version": "1.1", @@ -156,6 +199,9 @@ "204.246.1.1", "10.2.1.185" ], + "related.ip": [ + "199.96.1.1" + ], "service.type": "nginx", "source.address": "199.96.1.1", "source.as.number": 19065, @@ -169,19 +215,25 @@ "source.geo.region_name": "Illinois", "source.ip": "199.96.1.1", "url.original": "/assets/xxxx?q=100", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Other", "user_agent.original": "Amazon CloudFront" }, { "@timestamp": "2016-12-30T06:47:09.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 8571, "http.response.status_code": 404, "http.version": "1.1", @@ -192,6 +244,9 @@ "10.225.192.17", "10.2.2.121" ], + "related.ip": [ + "2a03:0000:10ff:f00f:0000:0000:0:8000" + ], "service.type": "nginx", "source.address": "2a03:0000:10ff:f00f:0000:0000:0:8000", "source.geo.continent_name": "Europe", @@ -200,7 +255,6 @@ "source.geo.location.lon": -8.0, "source.ip": "2a03:0000:10ff:f00f:0000:0000:0:8000", "url.original": "/test.html", - "user.name": "-", "user_agent.device.name": "Spider", "user_agent.name": "Facebot", "user_agent.original": "Mozilla/5.0 (compatible; Facebot 1.0; https://developers.facebook.com/docs/sharing/webmasters/crawler)", @@ -208,11 +262,18 @@ }, { "@timestamp": "2018-04-12T07:48:40.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.referrer": "-", "http.response.body.bytes": 0, "http.response.status_code": 400, "input.type": "log", @@ -220,40 +281,49 @@ "nginx.access.remote_ip_list": [ "127.0.0.1" ], + "related.ip": [ + "127.0.0.1" + ], "service.type": "nginx", "source.address": "127.0.0.1", - "source.ip": "127.0.0.1", - "user.name": "-", - "user_agent.device.name": "Other", - "user_agent.name": "Other", - "user_agent.original": "-" + "source.ip": "127.0.0.1" }, { "@timestamp": "2019-02-26T14:39:42.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.referrer": "-", "http.response.body.bytes": 173, "http.response.status_code": 400, "input.type": "log", "log.offset": 1184, "service.type": "nginx", - "source.address": "unix:", - "user.name": "-", - "user_agent.device.name": "Other", - "user_agent.name": "Other", - "user_agent.original": "-" + "source.address": "unix:" }, { "@timestamp": "2017-05-29T19:02:48.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 612, "http.response.status_code": 200, "http.version": "1.1", @@ -265,7 +335,6 @@ "service.type": "nginx", "source.address": "localhost", "url.original": "/test2", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox Alpha", "user_agent.original": "Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2", @@ -276,12 +345,19 @@ }, { "@timestamp": "2017-05-29T19:02:48.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.access", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "access" + ], "fileset.name": "access", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 612, "http.response.status_code": 200, "http.version": "1.1", @@ -294,7 +370,6 @@ "service.type": "nginx", "source.address": "localhost", "url.original": "/test2", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox Alpha", "user_agent.original": "Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2", diff --git a/filebeat/module/nginx/error/ingest/pipeline.json b/filebeat/module/nginx/error/ingest/pipeline.json deleted file mode 100644 index 473fa087922..00000000000 --- a/filebeat/module/nginx/error/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for parsing the Nginx error logs", - "processors": [{ - "grok": { - "field": "message", - "patterns": [ - "%{DATA:nginx.error.time} \\[%{DATA:log.level}\\] %{NUMBER:process.pid:long}#%{NUMBER:process.thread.id:long}: (\\*%{NUMBER:nginx.error.connection_id:long} )?%{GREEDYMULTILINE:message}" - ], - "pattern_definitions": { - "GREEDYMULTILINE":"(.|\n|\t)*" - }, - "ignore_missing": true - } - }, { - "rename": { - "field": "@timestamp", - "target_field": "event.created" - } - }, { - "date": { - "if": "ctx.event.timezone == null", - "field": "nginx.error.time", - "target_field": "@timestamp", - "formats": ["yyyy/MM/dd H:m:s"], - "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] - } - }, { - "date": { - "if": "ctx.event.timezone != null", - "field": "nginx.error.time", - "target_field": "@timestamp", - "formats": ["yyyy/MM/dd H:m:s"], - "timezone": "{{ event.timezone }}", - "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] - } - }, { - "remove": { - "field": "nginx.error.time" - } - }], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/filebeat/module/nginx/error/ingest/pipeline.yml b/filebeat/module/nginx/error/ingest/pipeline.yml new file mode 100644 index 00000000000..5a33c34710c --- /dev/null +++ b/filebeat/module/nginx/error/ingest/pipeline.yml @@ -0,0 +1,51 @@ +description: Pipeline for parsing the Nginx error logs +processors: +- grok: + field: message + patterns: + - '%{DATA:nginx.error.time} \[%{DATA:log.level}\] %{NUMBER:process.pid:long}#%{NUMBER:process.thread.id:long}: + (\*%{NUMBER:nginx.error.connection_id:long} )?%{GREEDYMULTILINE:message}' + pattern_definitions: + GREEDYMULTILINE: |- + (.| + | )* + ignore_missing: true +- rename: + field: '@timestamp' + target_field: event.created +- date: + if: ctx.event.timezone == null + field: nginx.error.time + target_field: '@timestamp' + formats: + - yyyy/MM/dd H:m:s + on_failure: + - append: + field: error.message + value: '{{ _ingest.on_failure_message }}' +- date: + if: ctx.event.timezone != null + field: nginx.error.time + target_field: '@timestamp' + formats: + - yyyy/MM/dd H:m:s + timezone: '{{ event.timezone }}' + on_failure: + - append: + field: error.message + value: '{{ _ingest.on_failure_message }}' +- remove: + field: nginx.error.time +- set: + field: event.kind + value: event +- append: + field: event.category + value: web +- append: + field: event.type + value: error +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/filebeat/module/nginx/error/manifest.yml b/filebeat/module/nginx/error/manifest.yml index 641ec771bbb..b83c154693d 100644 --- a/filebeat/module/nginx/error/manifest.yml +++ b/filebeat/module/nginx/error/manifest.yml @@ -9,5 +9,5 @@ var: os.windows: - c:/programdata/nginx/logs/error.log* -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/nginx-error.yml diff --git a/filebeat/module/nginx/error/test/error.log-expected.json b/filebeat/module/nginx/error/test/error.log-expected.json index 6252e87d66b..8896a490705 100644 --- a/filebeat/module/nginx/error/test/error.log-expected.json +++ b/filebeat/module/nginx/error/test/error.log-expected.json @@ -1,9 +1,16 @@ [ { "@timestamp": "2016-10-25T14:49:34.000-02:00", + "event.category": [ + "web" + ], "event.dataset": "nginx.error", + "event.kind": "event", "event.module": "nginx", "event.timezone": "-02:00", + "event.type": [ + "error" + ], "fileset.name": "error", "input.type": "log", "log.level": "error", @@ -16,9 +23,16 @@ }, { "@timestamp": "2016-10-25T14:50:44.000-02:00", + "event.category": [ + "web" + ], "event.dataset": "nginx.error", + "event.kind": "event", "event.module": "nginx", "event.timezone": "-02:00", + "event.type": [ + "error" + ], "fileset.name": "error", "input.type": "log", "log.level": "error", @@ -31,9 +45,16 @@ }, { "@timestamp": "2019-10-30T23:26:34.000-02:00", + "event.category": [ + "web" + ], "event.dataset": "nginx.error", + "event.kind": "event", "event.module": "nginx", "event.timezone": "-02:00", + "event.type": [ + "error" + ], "fileset.name": "error", "input.type": "log", "log.flags": [ @@ -49,9 +70,16 @@ }, { "@timestamp": "2019-11-05T14:50:44.000-02:00", + "event.category": [ + "web" + ], "event.dataset": "nginx.error", + "event.kind": "event", "event.module": "nginx", "event.timezone": "-02:00", + "event.type": [ + "error" + ], "fileset.name": "error", "input.type": "log", "log.level": "error", diff --git a/filebeat/module/nginx/fields.go b/filebeat/module/nginx/fields.go index df2afc9669e..2f9e50ceb60 100644 --- a/filebeat/module/nginx/fields.go +++ b/filebeat/module/nginx/fields.go @@ -32,5 +32,5 @@ func init() { // AssetNginx returns asset data. // This is the base64 encoded gzipped contents of module/nginx. func AssetNginx() string { - return "eJzsWM2O4zYMvs9TENtLC8z4AXIoUGyxwBxaFEUPvWUVi3aIlUWXlDObty9kOz/jkRM7md32YJ9m7PD7PooUJfIJvuB+Bb4k//UBIFBwuIIPv8f/PzwAWNRcqA7EfgU/PwAA/Ma2cQgFC9RGlHwJYYvQmoDjEgpyqNkDgG5ZwjpnX1C5giANPgAUhM7qqoV6Am8qPNHHJ+xrXEEp3NT9m4SG+HxqgaAQrsYExOec75zT5DmqHl+niC+Qx+cj+2DIa0/RrshJSIcf9RylpOScSxKsOOCa6rUjDa9+cpBnRMx+8OWCxPj84jsr4KJngOc/wFgrqIqawXMAUjAQSWGDuWkUgdqXOVcVewgM5HPXWHyEDSpZ1NbT3BH6oVA4g398RdXFaovGoig4+oLw+e+nTywvRiza+Nfn7A3an2gcKDeSt8JJQVADC9qo63P3JaP6zDS5uhu2+7WiD9lmH1DTy+vIDL/UJmxXsA2hzgS1Zq+YRawkTEWlmC4Sfb6/FdIoyjr+OVNCtMsSdlM4Kwxbtrf5/E+DGrIkwiR3xc11VFzGQiV5MzSdQhhlr3coSuxv8ThtOoX5kB/rnO3c6L5OMA0mNJrCmaajQBGUe+I9gjGF3pRvq8KU5F63hnNDP77HxnUMqzyMVOZzSIs7yofRuOxa0r0OJ7WRL3k5VDNiPlPLnSI4RTJTAmtWNM6lCuI8KWMI8/XcKyWdv7doGUeatRtKZBqm++0bIWcfyKMP96x4f2yXyNlVvKkLn3Pjg+zXpJyqnDdJu4o4VZzjvP3Z/aIuIE0VI1gS+3eK32WwycGjsH+vhLoANXOF3i+VrgOOSTtIQhGWb9ewtPCz+pWcvcc8tG6l75aOfTmvW/l4xASy6AMVhHLlZu9wh3PvmI7LLGU35X5Tj3g7zlYLx24we2s5hS/czBe2gsZmN7FWqGrKudfYtNW11CZfxhZxHY8CYefwm+T5c8fS5/uJa+nV/y+9+g+HTHg6i47WmFNBeR+P0Z7v2Lo49GXYTi1IBUtlwgpS7fyVcP21RehJoSOFH7vFJ1+evpDHx34pH8F4e/yyYbv/6bo/gUZmBZabjRt+OvhjG0ldE665RBUCOlMrWlDyObbZU5BEvXGJ4AUlum3safaWSK5jJ1ZrEDRV6lbdeRHw6zAvJ6x7hIvbJNIfOS4rMC6geBNoh+Pzl/vVnNFMVXbs+79r6vYp28s+aADexOqJZ+E96ARF2Q1GAuO+fNe03SJEvlgsfAD2IJgj7Q7T6aNzYz6BUVDM2VuFFwpbqMg56t5Ea3bNG1Xjro+Pb269F0UHO1SIqO8UtFdlZuSWcduGEOMtV24PJXoUE9DC868n1S3j5cUc6VjfvJ4g5nSwDStGvzgZPBeguEOJ51f7qq9z8RwyeZRvGzmv6v0Vi3z5GBFJzs5OIwiKten83uzbs9nolSJQs6QvErdlS4Qbc/fyfXqZlC+T8mVSvkzKl0n5Mim/rGeZlC+T8mVSvkzK/+NJ+b8BAAD//1+3mU8=" + return "eJzsWM9u48YPvucpiP1dfgUSPYAPBYotFsihRVH00Jt3rKFkIqOhSo6c+u2LkeQ/kUe25GS3PUinRDK/7+OQwxnyCV5wvwJfkv/7ASBQcLiCT7/G/z89AFjUXKgOxH4FPz4AAPzCtnEIBQvURpR8CWGL0JqA4xIKcqjZA4BuWcI6Z19QuYIgDT4AFITO6qqFegJvKjzRxyfsa1xBKdzU/ZuEhvh8aYGgEK7GBMTnnO+c0+Q5qh5fp4ivkMfnM/tgyGtP0a7ISUiHH/UcpaTknEsSrDjgmuq1Iw1vfnKQZ0TMfvDlisT4/OQ7K+CiZ4Dn38BYK6iKmsFzAFIwEElhg7lpFIHalzlXFXsIDORz11h8hA0qWdTW09wR+qFQOIN/fEPVxWqLxqIoOHpB+Prn0xeWVyMWbfzra3aB9jsaB8qN5K1wUhDUwII26vrafcmoPjNNru6G7X6t6EO22QfU9PI6MsMvtQnbFWxDqDNBrdkrZhErCVNRKaaLRJ/vl0IaRVnHP2dKiHZZwm4KZ4Vhy/Y+n/9qUEOWRJjkrri5jorLWKgkb4amUwij7PUORYn9PR6nTacwH/JjnbOdG923CabBhEZTONN0FCiC8p54j2BMoTflZVWYktzr1nBu6Mf32LiOYZWHkcp8DmlxR/kwGtddS7rX4aQ28jUvh2pGzGdqeacITpHMlMCaFY1zqYI4T8oYwnw975WSzt97tIwjzdoNJTIN0/3+jZCzD+TRh/eseH9sl8jZTbypC59z44Ps16Scqpx3SbuJOFWc47z92ftFXUGaKkawJPYfFL/rYJODR2H/UQl1BWrmCn1cKt0GHJN2kIQiLN+uYWnhZ/UrOXuPeWjdSt8tHftyXrfy+YgJZNEHKgjlxs3e4Q7n3jEdl1nKbsr9ph7xdpytFo7dYHZpOYUv3M0XtoLGZnexVqhqyrnX2LTVrdQmX8YWcR2PAmHn8Jvk+XPH0uf7iWvp1f8rvfr/DpnwdBYdrTGngvI+HqM937F1cejLsJ1akAqWyoQVpNr5G+H6Y4vQk0JHCv/vFp98efpCHh/7pXwE4+3xy4bt/ofb/gQamRVYbjZu+Ongj20kdU245RJVCOhMrWhByefYZk9BEvXGJYJXlOi2safZWyK5jp1YrUHQVKlbdefFC+5fWYYlasLSR8S4U6KCI811EcYFFG8C7XB8BPMhgs6Ypoo7dv/fNYH7xO1lHzQAb2INxbMgH3SCouwGg4FxX75r8m4RIl8sGT4AexDMkXaHGfXRuTGfwCgo5uytwiuFLVTkHHVvojW75kLVuOvjQ5x7b0fRwQ4VIuoHBe1NsRm5a9y9J8R4y5XbQ4kexQS08PzzSXhLen09R1rXi9cTxJxOuGHd6Ncng+cCFHco8SBrX/UFLx5IJo/ybSPn5b2/a5EvHyMiydkhagRBsTad35t9e0gbvVEHapb0jeK+hIlwY+5ev1gvI/NlZL6MzJeR+TIyX0bm1/UsI/NlZL6MzJeR+b88Mv8nAAD///5cnQ8=" } diff --git a/filebeat/module/nginx/ingress_controller/_meta/fields.yml b/filebeat/module/nginx/ingress_controller/_meta/fields.yml index 0c9ca13de32..2c467e3856a 100644 --- a/filebeat/module/nginx/ingress_controller/_meta/fields.yml +++ b/filebeat/module/nginx/ingress_controller/_meta/fields.yml @@ -22,11 +22,11 @@ description: > Time elapsed since the first bytes were read from the client - name: upstream.name - type: text + type: keyword description: > The name of the upstream. - name: upstream.alternative_name - type: text + type: keyword description: > The name of the alternative upstream. - name: upstream.response.length @@ -44,7 +44,7 @@ description: > The status code of the response obtained from the upstream server - name: http.request.id - type: text + type: keyword description: > The randomly generated ID of the request - name: upstream.ip diff --git a/filebeat/module/nginx/ingress_controller/ingest/pipeline.json b/filebeat/module/nginx/ingress_controller/ingest/pipeline.json deleted file mode 100644 index e660f22f022..00000000000 --- a/filebeat/module/nginx/ingress_controller/ingest/pipeline.json +++ /dev/null @@ -1,151 +0,0 @@ -{ - "description": "Pipeline for parsing Nginx ingress controller access logs. Requires the geoip and user_agent plugins.", - "processors": [ - { - "grok": { - "field": "message", - "patterns": [ - "(%{NGINX_HOST} )?\"?(?:%{NGINX_ADDRESS_LIST:nginx.ingress_controller.remote_ip_list}|%{NOTSPACE:source.address}) - %{DATA:user.name} \\[%{HTTPDATE:nginx.ingress_controller.time}\\] \"%{DATA:nginx.ingress_controller.info}\" %{NUMBER:http.response.status_code:long} %{NUMBER:http.response.body.bytes:long} \"%{DATA:http.request.referrer}\" \"%{DATA:user_agent.original}\" %{NUMBER:nginx.ingress_controller.http.request.length:long} %{NUMBER:nginx.ingress_controller.http.request.time:double} \\[%{DATA:nginx.ingress_controller.upstream.name}\\] \\[%{DATA:nginx.ingress_controller.upstream.alternative_name}\\] (%{UPSTREAM_ADDRESS}|-) (%{NUMBER:nginx.ingress_controller.upstream.response.length:long}|-) (%{NUMBER:nginx.ingress_controller.upstream.response.time:double}|-) (%{NUMBER:nginx.ingress_controller.upstream.response.status_code:long}|-) %{GREEDYDATA:nginx.ingress_controller.http.request.id}" - ], - "pattern_definitions": { - "NGINX_HOST": "(?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port})?", - "NGINX_NOTSEPARATOR": "[^\t ,:]+", - "NGINX_ADDRESS_LIST": "(?:%{IP}|%{WORD})(\"?,?\\s*(?:%{IP}|%{WORD}))*", - "UPSTREAM_ADDRESS": "%{IP:nginx.ingress_controller.upstream.ip}(:%{NUMBER:nginx.ingress_controller.upstream.port})?" - }, - "ignore_missing": true - } - }, - { - "grok": { - "field": "nginx.ingress_controller.info", - "patterns": [ - "%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}", - "" - ], - "ignore_missing": true - } - }, - { - "remove": { - "field": "nginx.ingress_controller.info" - } - }, - { - "split": { - "field": "nginx.ingress_controller.remote_ip_list", - "separator": "\"?,?\\s+", - "ignore_missing": true - } - }, - { - "split": { - "field": "nginx.ingress_controller.origin", - "separator": "\"?,?\\s+", - "ignore_missing": true - } - }, - { - "set": { - "field": "source.address", - "if": "ctx.source?.address == null", - "value": "" - } - }, - { - "script": { - "if": "ctx.nginx?.access?.remote_ip_list != null && ctx.nginx.ingress_controller.remote_ip_list.length > 0", - "lang": "painless", - "source": "boolean isPrivate(def dot, def ip) { try { StringTokenizer tok = new StringTokenizer(ip, dot); int firstByte = Integer.parseInt(tok.nextToken()); int secondByte = Integer.parseInt(tok.nextToken()); if (firstByte == 10) { return true; } if (firstByte == 192 && secondByte == 168) { return true; } if (firstByte == 172 && secondByte >= 16 && secondByte <= 31) { return true; } if (firstByte == 127) { return true; } return false; } catch (Exception e) { return false; } } try { ctx.source.address = null; if (ctx.nginx.ingress_controller.remote_ip_list == null) { return; } def found = false; for (def item : ctx.nginx.ingress_controller.remote_ip_list) { if (!isPrivate(params.dot, item)) { ctx.source.address = item; found = true; break; } } if (!found) { ctx.source.address = ctx.nginx.ingress_controller.remote_ip_list[0]; }} catch (Exception e) { ctx.source.address = null; }", - "params": { - "dot": "." - } - } - }, - { - "remove": { - "field": "source.address", - "if": "ctx.source.address == null" - } - }, - { - "grok": { - "field": "source.address", - "patterns": ["^%{IP:source.ip}$"], - "ignore_failure": true - } - }, - { - "remove": { - "field": "message" - } - }, - { - "rename": { - "field": "@timestamp", - "target_field": "event.created" - } - }, - { - "date": { - "field": "nginx.ingress_controller.time", - "target_field": "@timestamp", - "formats": [ - "dd/MMM/yyyy:H:m:s Z" - ], - "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] - } - }, - { - "remove": { - "field": "nginx.ingress_controller.time" - } - }, - { - "user_agent": { - "field": "user_agent.original" - } - }, - { - "geoip": { - "field": "source.ip", - "target_field": "source.geo", - "ignore_missing": true - } - }, - { - "geoip": { - "database_file": "GeoLite2-ASN.mmdb", - "field": "source.ip", - "target_field": "source.as", - "properties": [ - "asn", - "organization_name" - ], - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.asn", - "target_field": "source.as.number", - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.organization_name", - "target_field": "source.as.organization.name", - "ignore_missing": true - } - } - ], - "on_failure": [ - { - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - } - ] -} diff --git a/filebeat/module/nginx/ingress_controller/ingest/pipeline.yml b/filebeat/module/nginx/ingress_controller/ingest/pipeline.yml new file mode 100644 index 00000000000..9721be136e3 --- /dev/null +++ b/filebeat/module/nginx/ingress_controller/ingest/pipeline.yml @@ -0,0 +1,172 @@ +description: Pipeline for parsing Nginx ingress controller access logs. Requires the + geoip and user_agent plugins. +processors: +- grok: + field: message + patterns: + - (%{NGINX_HOST} )?"?(?:%{NGINX_ADDRESS_LIST:nginx.ingress_controller.remote_ip_list}|%{NOTSPACE:source.address}) + - (-|%{DATA:user.name}) \[%{HTTPDATE:nginx.ingress_controller.time}\] "%{DATA:nginx.ingress_controller.info}" + %{NUMBER:http.response.status_code:long} %{NUMBER:http.response.body.bytes:long} + "(-|%{DATA:http.request.referrer})" "(-|%{DATA:user_agent.original})" %{NUMBER:nginx.ingress_controller.http.request.length:long} + %{NUMBER:nginx.ingress_controller.http.request.time:double} \[%{DATA:nginx.ingress_controller.upstream.name}\] + \[%{DATA:nginx.ingress_controller.upstream.alternative_name}\] (%{UPSTREAM_ADDRESS}|-) + (%{NUMBER:nginx.ingress_controller.upstream.response.length:long}|-) (%{NUMBER:nginx.ingress_controller.upstream.response.time:double}|-) + (%{NUMBER:nginx.ingress_controller.upstream.response.status_code:long}|-) %{GREEDYDATA:nginx.ingress_controller.http.request.id} + pattern_definitions: + NGINX_HOST: (?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port})? + NGINX_NOTSEPARATOR: "[^\t ,:]+" + NGINX_ADDRESS_LIST: (?:%{IP}|%{WORD})("?,?\s*(?:%{IP}|%{WORD}))* + UPSTREAM_ADDRESS: '%{IP:nginx.ingress_controller.upstream.ip}(:%{NUMBER:nginx.ingress_controller.upstream.port})?' + ignore_missing: true +- grok: + field: nginx.ingress_controller.info + patterns: + - '%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}' + - "" + ignore_missing: true +- remove: + field: nginx.ingress_controller.info +- split: + field: nginx.ingress_controller.remote_ip_list + separator: '"?,?\s+' + ignore_missing: true +- split: + field: nginx.ingress_controller.origin + separator: '"?,?\s+' + ignore_missing: true +- set: + field: source.address + if: ctx.source?.address == null + value: "" +- script: + if: ctx.nginx?.access?.remote_ip_list != null && ctx.nginx.ingress_controller.remote_ip_list.length > 0 + lang: painless + source: >- + boolean isPrivate(def dot, def ip) { + try { + StringTokenizer tok = new StringTokenizer(ip, dot); + int firstByte = Integer.parseInt(tok.nextToken()); + int secondByte = Integer.parseInt(tok.nextToken()); + if (firstByte == 10) { + return true; + } + if (firstByte == 192 && secondByte == 168) { + return true; + } + if (firstByte == 172 && secondByte >= 16 && secondByte <= 31) { + return true; + } + if (firstByte == 127) { + return true; + } + return false; + } + catch (Exception e) { + return false; + } + } + try { + ctx.source.address = null; + if (ctx.nginx.ingress_controller.remote_ip_list == null) { + return; + } + def found = false; + for (def item : ctx.nginx.ingress_controller.remote_ip_list) { + if (!isPrivate(params.dot, item)) { + ctx.source.address = item; + found = true; + break; + } + } + if (!found) { + ctx.source.address = ctx.nginx.ingress_controller.remote_ip_list[0]; + } + } + catch (Exception e) { + ctx.source.address = null; + } + params: + dot: . +- remove: + field: source.address + if: ctx.source.address == null +- grok: + field: source.address + patterns: + - ^%{IP:source.ip}$ + ignore_failure: true +- remove: + field: message +- rename: + field: '@timestamp' + target_field: event.created +- date: + field: nginx.ingress_controller.time + target_field: '@timestamp' + formats: + - dd/MMM/yyyy:H:m:s Z + on_failure: + - append: + field: error.message + value: '{{ _ingest.on_failure_message }}' +- remove: + field: nginx.ingress_controller.time +- user_agent: + field: user_agent.original + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- set: + field: event.kind + value: event +- append: + field: event.category + value: web +- append: + field: event.type + value: info +- set: + field: event.outcome + value: success + if: "ctx?.http?.response?.status_code != null && ctx.http.response.status_code < 400" +- set: + field: event.outcome + value: failure + if: "ctx?.http?.response?.status_code != null && ctx.http.response.status_code >= 400" +- lowercase: + field: http.request.method + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/filebeat/module/nginx/ingress_controller/manifest.yml b/filebeat/module/nginx/ingress_controller/manifest.yml index 0f51e4d5c04..326beb11461 100644 --- a/filebeat/module/nginx/ingress_controller/manifest.yml +++ b/filebeat/module/nginx/ingress_controller/manifest.yml @@ -9,7 +9,7 @@ var: os.windows: - c:/programdata/nginx/logs/*access.log* -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/ingress_controller.yml requires.processors: diff --git a/filebeat/module/nginx/ingress_controller/test/test.log-expected.json b/filebeat/module/nginx/ingress_controller/test/test.log-expected.json index 2dc9d1afbce..a2bf0f6c6e0 100644 --- a/filebeat/module/nginx/ingress_controller/test/test.log-expected.json +++ b/filebeat/module/nginx/ingress_controller/test/test.log-expected.json @@ -1,12 +1,19 @@ [ { "@timestamp": "2020-02-07T11:48:51.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "POST", - "http.request.referrer": "-", + "http.request.method": "post", "http.response.body.bytes": 59, "http.response.status_code": 200, "http.version": "1.1", @@ -28,7 +35,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/products", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "curl", "user_agent.original": "curl/7.54.0", @@ -36,12 +42,19 @@ }, { "@timestamp": "2020-02-07T11:49:15.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 59, "http.response.status_code": 200, "http.version": "1.1", @@ -63,7 +76,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/products/42", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "curl", "user_agent.original": "curl/7.54.0", @@ -71,12 +83,19 @@ }, { "@timestamp": "2020-02-07T11:49:30.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "DELETE", - "http.request.referrer": "-", + "http.request.method": "delete", "http.response.body.bytes": 59, "http.response.status_code": 200, "http.version": "1.1", @@ -98,7 +117,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/products/42", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "curl", "user_agent.original": "curl/7.54.0", @@ -106,12 +124,19 @@ }, { "@timestamp": "2020-02-07T11:49:43.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "PATCH", - "http.request.referrer": "-", + "http.request.method": "patch", "http.response.body.bytes": 59, "http.response.status_code": 200, "http.version": "1.1", @@ -133,7 +158,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/products/42", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "curl", "user_agent.original": "curl/7.54.0", @@ -141,12 +165,19 @@ }, { "@timestamp": "2020-02-07T11:49:50.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "PATCHp", - "http.request.referrer": "-", + "http.request.method": "patchp", "http.response.body.bytes": 163, "http.response.status_code": 400, "http.version": "1.1", @@ -162,20 +193,23 @@ "nginx.ingress_controller.upstream.name": "", "service.type": "nginx", "source.address": "", - "url.original": "/products/42", - "user.name": "-", - "user_agent.device.name": "Other", - "user_agent.name": "Other", - "user_agent.original": "-" + "url.original": "/products/42" }, { "@timestamp": "2020-02-07T11:50:09.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "failure", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", "http.request.method": "geti", - "http.request.referrer": "-", "http.response.body.bytes": 163, "http.response.status_code": 400, "http.version": "1.1", @@ -191,20 +225,23 @@ "nginx.ingress_controller.upstream.name": "", "service.type": "nginx", "source.address": "", - "url.original": "/products/42", - "user.name": "-", - "user_agent.device.name": "Other", - "user_agent.name": "Other", - "user_agent.original": "-" + "url.original": "/products/42" }, { "@timestamp": "2020-02-07T11:55:05.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 59, "http.response.status_code": 200, "http.version": "1.1", @@ -226,7 +263,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/products/42", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Wget", "user_agent.original": "Wget/1.20.3 (darwin18.6.0)", @@ -234,12 +270,19 @@ }, { "@timestamp": "2020-02-07T11:55:57.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 59, "http.response.status_code": 200, "http.version": "1.1", @@ -261,7 +304,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/products/42", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36", @@ -272,11 +314,19 @@ }, { "@timestamp": "2020-02-07T11:55:57.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", + "http.request.method": "get", "http.request.referrer": "http://hello-world.info/products/42", "http.response.body.bytes": 59, "http.response.status_code": 200, @@ -299,7 +349,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/favicon.ico", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36", @@ -310,12 +359,19 @@ }, { "@timestamp": "2020-02-07T11:56:24.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 61, "http.response.status_code": 200, "http.version": "1.1", @@ -337,7 +393,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/v2", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36", @@ -348,11 +403,19 @@ }, { "@timestamp": "2020-02-07T11:56:24.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", + "http.request.method": "get", "http.request.referrer": "http://hello-world.info/v2", "http.response.body.bytes": 59, "http.response.status_code": 200, @@ -375,7 +438,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/favicon.ico", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Chrome", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36", @@ -386,12 +448,19 @@ }, { "@timestamp": "2020-02-07T11:56:36.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 59, "http.response.status_code": 200, "http.version": "1.1", @@ -413,7 +482,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/products/42", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Safari", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15", @@ -424,11 +492,19 @@ }, { "@timestamp": "2020-02-07T11:56:36.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", + "http.request.method": "get", "http.request.referrer": "http://hello-world.info/products/42", "http.response.body.bytes": 59, "http.response.status_code": 200, @@ -451,7 +527,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/favicon.ico", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Safari", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15", @@ -462,12 +537,19 @@ }, { "@timestamp": "2020-02-07T11:56:54.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 59, "http.response.status_code": 200, "http.version": "1.1", @@ -489,7 +571,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/products/42", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Safari", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15", @@ -500,12 +581,19 @@ }, { "@timestamp": "2020-02-07T11:56:54.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 59, "http.response.status_code": 200, "http.version": "1.1", @@ -527,7 +615,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Safari", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15", @@ -538,11 +625,19 @@ }, { "@timestamp": "2020-02-07T11:56:54.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", + "http.request.method": "get", "http.request.referrer": "http://hello-world.info/", "http.response.body.bytes": 59, "http.response.status_code": 200, @@ -565,7 +660,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/favicon.ico", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Safari", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15", @@ -576,12 +670,19 @@ }, { "@timestamp": "2020-02-07T11:56:56.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 61, "http.response.status_code": 200, "http.version": "1.1", @@ -603,7 +704,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/v2", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Safari", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15", @@ -614,11 +714,19 @@ }, { "@timestamp": "2020-02-07T11:56:56.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", + "http.request.method": "get", "http.request.referrer": "http://hello-world.info/v2", "http.response.body.bytes": 59, "http.response.status_code": 200, @@ -641,7 +749,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/favicon.ico", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Safari", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15", @@ -652,12 +759,19 @@ }, { "@timestamp": "2020-02-07T12:00:28.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 59, "http.response.status_code": 200, "http.version": "1.1", @@ -679,7 +793,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/products/42?address=delhi+technological+university", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Python Requests", "user_agent.original": "python-requests/2.22.0", @@ -687,12 +800,19 @@ }, { "@timestamp": "2020-02-07T12:02:38.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 61, "http.response.status_code": 200, "http.version": "1.1", @@ -714,7 +834,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/v2", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:72.0) Gecko/20100101 Firefox/72.0", @@ -725,12 +844,19 @@ }, { "@timestamp": "2020-02-07T12:02:38.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 59, "http.response.status_code": 200, "http.version": "1.1", @@ -752,7 +878,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/favicon.ico", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:72.0) Gecko/20100101 Firefox/72.0", @@ -763,12 +888,19 @@ }, { "@timestamp": "2020-02-07T12:02:42.000Z", + "event.category": [ + "web" + ], "event.dataset": "nginx.ingress_controller", + "event.kind": "event", "event.module": "nginx", + "event.outcome": "success", "event.timezone": "-02:00", + "event.type": [ + "info" + ], "fileset.name": "ingress_controller", - "http.request.method": "GET", - "http.request.referrer": "-", + "http.request.method": "get", "http.response.body.bytes": 61, "http.response.status_code": 200, "http.version": "1.1", @@ -790,7 +922,6 @@ "service.type": "nginx", "source.address": "", "url.original": "/v2/some", - "user.name": "-", "user_agent.device.name": "Other", "user_agent.name": "Firefox", "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:72.0) Gecko/20100101 Firefox/72.0", diff --git a/filebeat/module/postgresql/log/ingest/pipeline.json b/filebeat/module/postgresql/log/ingest/pipeline.json deleted file mode 100644 index 1bed827739d..00000000000 --- a/filebeat/module/postgresql/log/ingest/pipeline.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "description": "Pipeline for parsing PostgreSQL logs.", - "processors": [ - { - "grok": { - "field": "message", - "ignore_missing": true, - "patterns": [ - "^%{DATETIME:postgresql.log.timestamp} \\[%{NUMBER:process.pid:long}(-%{BASE16FLOAT:postgresql.log.core_id:long})?\\] ((\\[%{USERNAME:user.name}\\]@\\[%{POSTGRESQL_DB_NAME:postgresql.log.database}\\]|%{USERNAME:user.name}@%{POSTGRESQL_DB_NAME:postgresql.log.database}) )?%{WORD:log.level}: (?:%{NUMBER:postgresql.log.error.code:long}|%{SPACE})(duration: %{NUMBER:temp.duration:float} ms %{POSTGRESQL_QUERY_STEP}: %{GREEDYDATA:postgresql.log.query}|: %{GREEDYDATA:message}|%{GREEDYDATA:message})" - ], - "pattern_definitions": { - "DATETIME": "[-0-9]+ %{TIME} %{WORD:event.timezone}", - "GREEDYDATA": "(.|\n|\t)*", - "POSTGRESQL_DB_NAME": "[a-zA-Z0-9_]+[a-zA-Z0-9_\\$]*", - "POSTGRESQL_QUERY_STEP": "%{WORD:postgresql.log.query_step}(?: | %{WORD:postgresql.log.query_name})?" - } - } - }, - { - "date": { - "field": "postgresql.log.timestamp", - "target_field": "@timestamp", - "formats": [ - "yyyy-MM-dd HH:mm:ss.SSS zz", "yyyy-MM-dd HH:mm:ss zz" - ] - } - }, { - "script": { - "lang": "painless", - "source": "ctx.event.duration = Math.round(ctx.temp.duration * params.scale)", - "params": { "scale": 1000000 }, - "if": "ctx.temp?.duration != null" - } - }, { - "remove": { - "field": "temp.duration", - "ignore_missing": true - } - } - ], - "on_failure": [ - { - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - } - ] -} diff --git a/filebeat/module/postgresql/log/ingest/pipeline.yml b/filebeat/module/postgresql/log/ingest/pipeline.yml new file mode 100644 index 00000000000..bd7fbd69e7d --- /dev/null +++ b/filebeat/module/postgresql/log/ingest/pipeline.yml @@ -0,0 +1,57 @@ +description: Pipeline for parsing PostgreSQL logs. +processors: +- grok: + field: message + ignore_missing: true + patterns: + - '^%{DATETIME:postgresql.log.timestamp} \[%{NUMBER:process.pid:long}(-%{BASE16FLOAT:postgresql.log.core_id:long})?\] + ((\[%{USERNAME:user.name}\]@\[%{POSTGRESQL_DB_NAME:postgresql.log.database}\]|%{USERNAME:user.name}@%{POSTGRESQL_DB_NAME:postgresql.log.database}) + )?%{WORD:log.level}: (?:%{NUMBER:postgresql.log.error.code:long}|%{SPACE})(duration: + %{NUMBER:temp.duration:float} ms %{POSTGRESQL_QUERY_STEP}: %{GREEDYDATA:postgresql.log.query}|: + %{GREEDYDATA:message}|%{GREEDYDATA:message})' + pattern_definitions: + DATETIME: '[-0-9]+ %{TIME} %{WORD:event.timezone}' + GREEDYDATA: |- + (.| + | )* + POSTGRESQL_DB_NAME: '[a-zA-Z0-9_]+[a-zA-Z0-9_\$]*' + POSTGRESQL_QUERY_STEP: '%{WORD:postgresql.log.query_step}(?: | %{WORD:postgresql.log.query_name})?' +- date: + field: postgresql.log.timestamp + target_field: '@timestamp' + formats: + - yyyy-MM-dd HH:mm:ss.SSS zz + - yyyy-MM-dd HH:mm:ss zz +- script: + lang: painless + source: ctx.event.duration = Math.round(ctx.temp.duration * params.scale) + params: + scale: 1000000 + if: ctx.temp?.duration != null +- remove: + field: temp.duration + ignore_missing: true +- set: + field: event.kind + value: event +- append: + field: event.category + value: + - database +- append: + field: event.type + value: + - info +- append: + field: event.type + value: + - error + if: "ctx?.postgresql?.log?.error?.code != null && ctx.postgresql.log.error.code >= 02000" +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/filebeat/module/postgresql/log/manifest.yml b/filebeat/module/postgresql/log/manifest.yml index e5ab4a9a69c..ade6e2899de 100644 --- a/filebeat/module/postgresql/log/manifest.yml +++ b/filebeat/module/postgresql/log/manifest.yml @@ -9,5 +9,5 @@ var: os.windows: - "c:/Program Files/PostgreSQL/*/logs/*.log*" -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/log.yml diff --git a/filebeat/module/postgresql/log/test/postgresql-11.4.log-expected.json b/filebeat/module/postgresql/log/test/postgresql-11.4.log-expected.json index 2c347c87c6a..2d95ce2fd0e 100644 --- a/filebeat/module/postgresql/log/test/postgresql-11.4.log-expected.json +++ b/filebeat/module/postgresql/log/test/postgresql-11.4.log-expected.json @@ -1,9 +1,16 @@ [ { "@timestamp": "2019-07-23T12:06:24.406Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -16,9 +23,16 @@ }, { "@timestamp": "2019-07-23T12:06:24.406Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -30,9 +44,16 @@ }, { "@timestamp": "2019-07-23T12:06:24.478Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -45,9 +66,16 @@ }, { "@timestamp": "2019-07-23T12:06:24.478Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -59,9 +87,16 @@ }, { "@timestamp": "2019-07-23T12:06:24.485Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -74,9 +109,16 @@ }, { "@timestamp": "2019-07-23T12:06:24.485Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -88,9 +130,16 @@ }, { "@timestamp": "2019-07-23T12:06:24.485Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -103,9 +152,16 @@ }, { "@timestamp": "2019-07-23T12:06:24.485Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -117,9 +173,16 @@ }, { "@timestamp": "2019-07-23T12:06:24.485Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -132,9 +195,16 @@ }, { "@timestamp": "2019-07-23T12:06:24.485Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -146,9 +216,16 @@ }, { "@timestamp": "2019-07-23T12:06:24.507Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -161,9 +238,16 @@ }, { "@timestamp": "2019-07-23T12:06:24.507Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -175,9 +259,16 @@ }, { "@timestamp": "2019-07-23T12:06:30.536Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -190,9 +281,16 @@ }, { "@timestamp": "2019-07-23T12:06:30.536Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -204,9 +302,16 @@ }, { "@timestamp": "2019-07-23T12:06:30.537Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -219,9 +324,16 @@ }, { "@timestamp": "2019-07-23T12:06:30.537Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -233,9 +345,16 @@ }, { "@timestamp": "2019-07-23T12:06:33.732Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -248,9 +367,16 @@ }, { "@timestamp": "2019-07-23T12:06:33.732Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -262,9 +388,17 @@ }, { "@timestamp": "2019-07-23T12:06:33.732Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info", + "error" + ], "fileset.name": "log", "input.type": "log", "log.level": "ERROR", @@ -277,9 +411,16 @@ }, { "@timestamp": "2019-07-23T12:06:33.732Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -291,9 +432,16 @@ }, { "@timestamp": "2019-07-23T12:06:33.732Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "STATEMENT", @@ -305,9 +453,16 @@ }, { "@timestamp": "2019-07-23T12:06:34.877Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -320,9 +475,16 @@ }, { "@timestamp": "2019-07-23T12:06:34.877Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -334,9 +496,16 @@ }, { "@timestamp": "2019-07-23T12:06:34.878Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -349,9 +518,16 @@ }, { "@timestamp": "2019-07-23T12:06:34.878Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -363,9 +539,16 @@ }, { "@timestamp": "2019-07-23T12:09:57.563Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -378,9 +561,16 @@ }, { "@timestamp": "2019-07-23T12:09:57.563Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", @@ -392,9 +582,16 @@ }, { "@timestamp": "2019-07-23T12:09:57.565Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -407,9 +604,16 @@ }, { "@timestamp": "2019-07-23T12:09:57.565Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOCATION", diff --git a/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json b/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json index 201c50cb0b7..280547f6b29 100644 --- a/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json +++ b/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json @@ -1,9 +1,16 @@ [ { "@timestamp": "2017-07-31T11:36:42.585Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -15,9 +22,16 @@ }, { "@timestamp": "2017-07-31T11:36:42.605Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -29,9 +43,16 @@ }, { "@timestamp": "2017-07-31T11:36:42.615Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -43,9 +64,16 @@ }, { "@timestamp": "2017-07-31T11:36:42.616Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -57,9 +85,16 @@ }, { "@timestamp": "2017-07-31T11:36:42.956Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -68,15 +103,25 @@ "postgresql.log.database": "unknown", "postgresql.log.timestamp": "2017-07-31 13:36:42.956 CEST", "process.pid": 4980, + "related.user": [ + "unknown" + ], "service.type": "postgresql", "user.name": "unknown" }, { "@timestamp": "2017-07-31T11:36:43.557Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 37118000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.flags": [ @@ -90,15 +135,25 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:36:43.557 CEST", "process.pid": 4983, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:36:44.104Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 2895000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.flags": [ @@ -112,15 +167,25 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:36:44.104 CEST", "process.pid": 4986, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:36:44.642Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 2809000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.flags": [ @@ -134,14 +199,24 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:36:44.642 CEST", "process.pid": 4989, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:39:16.249Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "FATAL", @@ -150,14 +225,24 @@ "postgresql.log.database": "users", "postgresql.log.timestamp": "2017-07-31 13:39:16.249 CEST", "process.pid": 5407, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:39:17.945Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "FATAL", @@ -166,15 +251,25 @@ "postgresql.log.database": "user", "postgresql.log.timestamp": "2017-07-31 13:39:17.945 CEST", "process.pid": 5500, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:39:21.025Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 37598000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.flags": [ @@ -188,15 +283,25 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:39:21.025 CEST", "process.pid": 5404, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:39:31.619Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 9482000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -207,15 +312,25 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:39:31.619 CEST", "process.pid": 5502, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:39:40.147Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 765000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -226,15 +341,25 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:39:40.147 CEST", "process.pid": 5502, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:40:54.310Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", - "event.duration": 26082001, + "event.duration": 26082000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.flags": [ @@ -248,15 +373,25 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:40:54.310 CEST", "process.pid": 5502, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:43:22.645Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", - "event.duration": 36161999, + "event.duration": 36162000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -267,15 +402,25 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:43:22.645 CEST", "process.pid": 5502, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:46:02.670Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 10540000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -286,15 +431,25 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:46:02.670 CEST", "process.pid": 5502, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:46:23.016Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 5156000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -305,15 +460,25 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:46:23.016 CEST", "process.pid": 5502, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T11:46:55.637Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 25871000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -324,15 +489,25 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:46:55.637 CEST", "process.pid": 5502, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2019-05-06T19:00:04.511Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 753000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.flags": [ @@ -346,6 +521,9 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2019-05-06 19:00:04.511 UTC", "process.pid": 913763, + "related.user": [ + "elastic" + ], "service.type": "postgresql", "user.name": "elastic" } diff --git a/filebeat/module/postgresql/log/test/postgresql-9.6-multi-core.log-expected.json b/filebeat/module/postgresql/log/test/postgresql-9.6-multi-core.log-expected.json index dbd1e12dd49..76f1bd2f065 100644 --- a/filebeat/module/postgresql/log/test/postgresql-9.6-multi-core.log-expected.json +++ b/filebeat/module/postgresql/log/test/postgresql-9.6-multi-core.log-expected.json @@ -1,9 +1,16 @@ [ { "@timestamp": "2017-04-03T20:32:14.322Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -13,14 +20,24 @@ "postgresql.log.database": "unknown", "postgresql.log.timestamp": "2017-04-03 22:32:14.322 CEST", "process.pid": 12975, + "related.user": [ + "unknown" + ], "service.type": "postgresql", "user.name": "unknown" }, { "@timestamp": "2017-04-03T20:32:14.322Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "FATAL", @@ -30,15 +47,25 @@ "postgresql.log.database": "user", "postgresql.log.timestamp": "2017-04-03 22:32:14.322 CEST", "process.pid": 5404, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-03T20:35:22.389Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 37598000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.flags": [ @@ -53,14 +80,24 @@ "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-04-03 22:35:22.389 CEST", "process.pid": 5404, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T17:36:43.557Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "EST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -73,9 +110,16 @@ }, { "@timestamp": "2017-07-31T17:36:44.227Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "EST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -88,9 +132,16 @@ }, { "@timestamp": "2017-07-31T17:46:02.670Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "EST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "HINT", @@ -103,9 +154,16 @@ }, { "@timestamp": "2017-07-31T17:46:23.016Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "EST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "FATAL", @@ -115,14 +173,24 @@ "postgresql.log.database": "postgres", "postgresql.log.timestamp": "2017-07-31 13:46:23.016 EST", "process.pid": 768, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T17:46:55.637Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "EST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "FATAL", @@ -132,6 +200,9 @@ "postgresql.log.database": "postgres", "postgresql.log.timestamp": "2017-07-31 13:46:55.637 EST", "process.pid": 771, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" } diff --git a/filebeat/module/postgresql/log/test/postgresql-9.6-new-timestamp.log-expected.json b/filebeat/module/postgresql/log/test/postgresql-9.6-new-timestamp.log-expected.json index 9737568df83..9a1d8b1b5fa 100644 --- a/filebeat/module/postgresql/log/test/postgresql-9.6-new-timestamp.log-expected.json +++ b/filebeat/module/postgresql/log/test/postgresql-9.6-new-timestamp.log-expected.json @@ -1,9 +1,16 @@ [ { "@timestamp": "2017-07-31T17:36:43.000Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "EST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -16,9 +23,16 @@ }, { "@timestamp": "2017-07-31T17:36:44.000Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "EST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -31,9 +45,16 @@ }, { "@timestamp": "2017-07-31T17:46:02.000Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "EST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "HINT", @@ -46,9 +67,16 @@ }, { "@timestamp": "2017-07-31T17:46:23.000Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "EST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "FATAL", @@ -58,14 +86,24 @@ "postgresql.log.database": "postgres", "postgresql.log.timestamp": "2017-07-31 13:46:23 EST", "process.pid": 768, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-07-31T17:46:55.000Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "EST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "FATAL", @@ -75,6 +113,9 @@ "postgresql.log.database": "postgres", "postgresql.log.timestamp": "2017-07-31 13:46:55 EST", "process.pid": 771, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" } diff --git a/filebeat/module/postgresql/log/test/postgresql-query-steps-slowlog.log-expected.json b/filebeat/module/postgresql/log/test/postgresql-query-steps-slowlog.log-expected.json index 273499e8634..cec040589ab 100644 --- a/filebeat/module/postgresql/log/test/postgresql-query-steps-slowlog.log-expected.json +++ b/filebeat/module/postgresql/log/test/postgresql-query-steps-slowlog.log-expected.json @@ -1,10 +1,17 @@ [ { "@timestamp": "2019-09-04T13:52:38.004Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 12437000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -15,15 +22,25 @@ "postgresql.log.query_step": "parse", "postgresql.log.timestamp": "2019-09-04 15:52:38.004 CEST", "process.pid": 31136, + "related.user": [ + "user" + ], "service.type": "postgresql", "user.name": "user" }, { "@timestamp": "2019-09-04T13:52:38.004Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", "event.duration": 12437000, + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.flags": [ @@ -38,6 +55,9 @@ "postgresql.log.query_step": "execute", "postgresql.log.timestamp": "2019-09-04 15:52:38.004 CEST", "process.pid": 31136, + "related.user": [ + "user" + ], "service.type": "postgresql", "user.name": "user" } diff --git a/filebeat/module/postgresql/log/test/postgresql-ubuntu-9.5.log-expected.json b/filebeat/module/postgresql/log/test/postgresql-ubuntu-9.5.log-expected.json index 0d1b3df95b5..f1248d53e45 100644 --- a/filebeat/module/postgresql/log/test/postgresql-ubuntu-9.5.log-expected.json +++ b/filebeat/module/postgresql/log/test/postgresql-ubuntu-9.5.log-expected.json @@ -1,9 +1,16 @@ [ { "@timestamp": "2017-04-03T20:32:14.322Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -12,14 +19,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-03 22:32:14.322 CEST", "process.pid": 31225, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-03T20:32:14.322Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -28,14 +45,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-03 22:32:14.322 CEST", "process.pid": 31225, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-03T20:35:22.389Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -44,14 +71,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-03 22:35:22.389 CEST", "process.pid": 3474, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-03T20:36:56.464Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -60,14 +97,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-03 22:36:56.464 CEST", "process.pid": 3525, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-03T20:37:12.961Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -76,14 +123,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-03 22:37:12.961 CEST", "process.pid": 3570, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T19:05:28.549Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -92,14 +149,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 21:05:28.549 CEST", "process.pid": 21483, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T19:09:41.345Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -108,14 +175,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 21:09:41.345 CEST", "process.pid": 21597, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T20:45:30.218Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "ERROR", @@ -124,14 +201,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 22:45:30.218 CEST", "process.pid": 22603, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T20:45:30.218Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "HINT", @@ -140,14 +227,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 22:45:30.218 CEST", "process.pid": 22603, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T20:45:30.218Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "STATEMENT", @@ -156,14 +253,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 22:45:30.218 CEST", "process.pid": 22603, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T20:46:09.751Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "ERROR", @@ -172,14 +279,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 22:46:09.751 CEST", "process.pid": 22608, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T20:46:09.751Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "STATEMENT", @@ -188,14 +305,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 22:46:09.751 CEST", "process.pid": 22608, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:02:51.199Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -204,14 +331,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:02:51.199 CEST", "process.pid": 24341, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:02:51.199Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -220,14 +357,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:02:51.199 CEST", "process.pid": 24341, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:04:36.087Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "ERROR", @@ -236,14 +383,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:04:36.087 CEST", "process.pid": 20730, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:04:36.087Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "STATEMENT", @@ -252,14 +409,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:04:36.087 CEST", "process.pid": 20730, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:04:51.462Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "ERROR", @@ -268,14 +435,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:04:51.462 CEST", "process.pid": 20730, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:04:51.462Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "STATEMENT", @@ -284,14 +461,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:04:51.462 CEST", "process.pid": 20730, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:05:06.217Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "ERROR", @@ -300,14 +487,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:05:06.217 CEST", "process.pid": 20730, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:05:06.217Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "STATEMENT", @@ -316,14 +513,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:05:06.217 CEST", "process.pid": 20730, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:05:18.295Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "ERROR", @@ -332,14 +539,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:05:18.295 CEST", "process.pid": 20730, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:05:18.295Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "STATEMENT", @@ -348,14 +565,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:05:18.295 CEST", "process.pid": 20730, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:13:47.505Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -364,14 +591,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:13:47.505 CEST", "process.pid": 24489, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-07T21:13:47.505Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -380,14 +617,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-07 23:13:47.505 CEST", "process.pid": 24489, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-08T10:32:51.056Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "ERROR", @@ -396,14 +643,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-08 12:32:51.056 CEST", "process.pid": 20730, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-08T10:32:51.056Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "DETAIL", @@ -412,14 +669,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-08 12:32:51.056 CEST", "process.pid": 20730, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-08T10:32:51.056Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "STATEMENT", @@ -428,14 +695,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-08 12:32:51.056 CEST", "process.pid": 20730, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-08T19:54:37.443Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -444,14 +721,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-08 21:54:37.443 CEST", "process.pid": 30630, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-08T19:54:37.468Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -460,14 +747,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-04-08 21:54:37.468 CEST", "process.pid": 30502, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-04-08T19:54:37.618Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -479,9 +776,16 @@ }, { "@timestamp": "2017-04-08T19:54:37.618Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -493,9 +797,16 @@ }, { "@timestamp": "2017-04-08T19:54:37.618Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -507,9 +818,16 @@ }, { "@timestamp": "2017-04-08T19:54:37.622Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -521,9 +839,16 @@ }, { "@timestamp": "2017-04-08T19:54:37.644Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -535,9 +860,16 @@ }, { "@timestamp": "2017-04-08T19:56:02.932Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -549,9 +881,16 @@ }, { "@timestamp": "2017-04-08T19:56:02.944Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -563,9 +902,16 @@ }, { "@timestamp": "2017-04-08T19:56:02.946Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -577,9 +923,16 @@ }, { "@timestamp": "2017-04-08T19:56:02.947Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -591,9 +944,16 @@ }, { "@timestamp": "2017-04-08T19:56:03.362Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -602,14 +962,24 @@ "postgresql.log.database": "unknown", "postgresql.log.timestamp": "2017-04-08 21:56:03.362 CEST", "process.pid": 891, + "related.user": [ + "unknown" + ], "service.type": "postgresql", "user.name": "unknown" }, { "@timestamp": "2017-05-27T14:07:53.007Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -621,9 +991,16 @@ }, { "@timestamp": "2017-05-27T14:07:53.010Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -635,9 +1012,16 @@ }, { "@timestamp": "2017-05-27T14:07:53.015Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -649,9 +1033,16 @@ }, { "@timestamp": "2017-05-27T14:07:53.016Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -663,9 +1054,16 @@ }, { "@timestamp": "2017-05-27T14:07:53.463Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -674,14 +1072,24 @@ "postgresql.log.database": "unknown", "postgresql.log.timestamp": "2017-05-27 14:07:53.463 UTC", "process.pid": 32573, + "related.user": [ + "unknown" + ], "service.type": "postgresql", "user.name": "unknown" }, { "@timestamp": "2017-05-27T14:08:13.661Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "FATAL", @@ -690,14 +1098,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-05-27 14:08:13.661 UTC", "process.pid": 1308, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-05-27T14:59:26.553Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -706,14 +1124,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-05-27 14:59:26.553 UTC", "process.pid": 1994, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-05-27T14:59:26.555Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "UTC", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -722,14 +1150,24 @@ "postgresql.log.database": "mydb", "postgresql.log.timestamp": "2017-05-27 14:59:26.555 UTC", "process.pid": 1989, + "related.user": [ + "postgres" + ], "service.type": "postgresql", "user.name": "postgres" }, { "@timestamp": "2017-06-06T05:54:13.753Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -741,9 +1179,16 @@ }, { "@timestamp": "2017-06-06T05:54:13.753Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -755,9 +1200,16 @@ }, { "@timestamp": "2017-06-06T05:54:13.753Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -769,9 +1221,16 @@ }, { "@timestamp": "2017-06-06T05:54:13.755Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -783,9 +1242,16 @@ }, { "@timestamp": "2017-06-06T05:54:13.816Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -797,9 +1263,16 @@ }, { "@timestamp": "2017-06-06T05:55:39.725Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -811,9 +1284,16 @@ }, { "@timestamp": "2017-06-06T05:55:39.736Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -825,9 +1305,16 @@ }, { "@timestamp": "2017-06-06T05:55:39.739Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -839,9 +1326,16 @@ }, { "@timestamp": "2017-06-06T05:55:39.739Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -853,9 +1347,16 @@ }, { "@timestamp": "2017-06-06T05:55:40.155Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -864,14 +1365,24 @@ "postgresql.log.database": "unknown", "postgresql.log.timestamp": "2017-06-06 07:55:40.155 CEST", "process.pid": 12975, + "related.user": [ + "unknown" + ], "service.type": "postgresql", "user.name": "unknown" }, { "@timestamp": "2017-06-06T05:55:40.156Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -880,14 +1391,24 @@ "postgresql.log.database": "unknown", "postgresql.log.timestamp": "2017-06-06 07:55:40.156 CEST", "process.pid": 12975, + "related.user": [ + "unknown" + ], "service.type": "postgresql", "user.name": "unknown" }, { "@timestamp": "2017-06-10T17:37:30.681Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -899,9 +1420,16 @@ }, { "@timestamp": "2017-06-10T17:37:30.695Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -913,9 +1441,16 @@ }, { "@timestamp": "2017-06-10T17:37:30.702Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -927,9 +1462,16 @@ }, { "@timestamp": "2017-06-10T17:37:30.702Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -941,9 +1483,16 @@ }, { "@timestamp": "2017-06-10T17:37:31.104Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -952,14 +1501,24 @@ "postgresql.log.database": "unknown", "postgresql.log.timestamp": "2017-06-10 19:37:31.104 CEST", "process.pid": 17404, + "related.user": [ + "unknown" + ], "service.type": "postgresql", "user.name": "unknown" }, { "@timestamp": "2017-06-10T18:27:55.911Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -971,9 +1530,16 @@ }, { "@timestamp": "2017-06-10T18:27:55.911Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -985,9 +1551,16 @@ }, { "@timestamp": "2017-06-10T18:27:55.911Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -999,9 +1572,16 @@ }, { "@timestamp": "2017-06-10T18:27:55.914Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -1013,9 +1593,16 @@ }, { "@timestamp": "2017-06-10T18:27:55.973Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -1027,9 +1614,16 @@ }, { "@timestamp": "2017-06-10T18:27:57.022Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -1041,9 +1635,16 @@ }, { "@timestamp": "2017-06-10T18:27:57.032Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -1055,9 +1656,16 @@ }, { "@timestamp": "2017-06-10T18:27:57.035Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -1069,9 +1677,16 @@ }, { "@timestamp": "2017-06-10T18:27:57.035Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -1083,9 +1698,16 @@ }, { "@timestamp": "2017-06-10T18:27:57.475Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -1094,14 +1716,24 @@ "postgresql.log.database": "unknown", "postgresql.log.timestamp": "2017-06-10 20:27:57.475 CEST", "process.pid": 24496, + "related.user": [ + "unknown" + ], "service.type": "postgresql", "user.name": "unknown" }, { "@timestamp": "2017-06-17T14:58:03.937Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -1113,9 +1745,16 @@ }, { "@timestamp": "2017-06-17T14:58:03.937Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -1127,9 +1766,16 @@ }, { "@timestamp": "2017-06-17T14:58:03.938Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -1141,9 +1787,16 @@ }, { "@timestamp": "2017-06-17T14:58:03.940Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", @@ -1155,9 +1808,16 @@ }, { "@timestamp": "2017-06-17T14:58:04.040Z", + "event.category": [ + "database" + ], "event.dataset": "postgresql.log", + "event.kind": "event", "event.module": "postgresql", "event.timezone": "CEST", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "LOG", diff --git a/filebeat/module/redis/log/ingest/pipeline.json b/filebeat/module/redis/log/ingest/pipeline.json deleted file mode 100644 index c9ec2d3371b..00000000000 --- a/filebeat/module/redis/log/ingest/pipeline.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "description": "Pipeline for parsing redis logs", - "processors": [ - { - "grok": { - "field": "message", - "patterns": [ - "(%{POSINT:process.pid:long}:%{CHAR:redis.log.role} )?(%{REDISTIMESTAMP1:redis.log.timestamp}||%{REDISTIMESTAMP2:redis.log.timestamp}) %{REDISLEVEL:log.level} %{GREEDYDATA:message}", - "%{POSINT:process.pid:long}:signal-handler \\(%{POSINT:redis.log.timestamp}\\) %{GREEDYDATA:message}" - ], - "pattern_definitions": { - "CHAR": "[a-zA-Z]", - "REDISLEVEL": "[.\\-*#]", - "REDISTIMESTAMP1": "%{MONTHDAY} %{MONTH} %{TIME}", - "REDISTIMESTAMP2": "%{MONTHDAY} %{MONTH} %{YEAR} %{TIME}" - } - } - }, - { - "script": { - "lang": "painless", - "source": "if (ctx.log.level == params.dot) {\n ctx.log.level = params.debug;\n } else if (ctx.log.level == params.dash) {\n ctx.log.level = params.verbose;\n } else if (ctx.log.level == params.asterisk) {\n ctx.log.level = params.notice;\n } else if (ctx.log.level == params.hash) {\n ctx.log.level = params.warning;\n }", - "params": { - "dot": ".", - "debug": "debug", - "dash": "-", - "verbose": "verbose", - "asterisk": "*", - "notice": "notice", - "hash": "#", - "warning": "warning" - } - } - }, - { - "script": { - "lang": "painless", - "source": "if (ctx.redis.log.role == params.master_abbrev) {\n ctx.redis.log.role = params.master;\n } else if (ctx.redis.log.role == params.slave_abbrev) {\n ctx.redis.log.role = params.slave;\n } else if (ctx.redis.log.role == params.child_abbrev) {\n ctx.redis.log.role = params.child;\n } else if (ctx.redis.log.role == params.sentinel_abbrev) {\n ctx.redis.log.role = params.sentinel;\n }\n ", - "params": { - "master_abbrev": "M", - "master": "master", - "slave_abbrev": "S", - "slave": "slave", - "child_abbrev": "C", - "child": "child", - "sentinel_abbrev": "X", - "sentinel": "sentinel" - } - } - }, - { - "rename": { - "field": "@timestamp", - "target_field": "event.created" - } - }, - { - "date": { - "field": "redis.log.timestamp", - "target_field": "@timestamp", - "formats": [ - "dd MMM yyyy H:m:s.SSS", - "dd MMM H:m:s.SSS", - "dd MMM H:m:s", - "UNIX" - ], - "ignore_failure": true - } - }, - { - "remove": { - "field": "redis.log.timestamp", - "ignore_failure": true - } - } - ], - "on_failure": [ - { - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - } - ] -} diff --git a/filebeat/module/redis/log/ingest/pipeline.yml b/filebeat/module/redis/log/ingest/pipeline.yml new file mode 100644 index 00000000000..d1c08cab378 --- /dev/null +++ b/filebeat/module/redis/log/ingest/pipeline.yml @@ -0,0 +1,84 @@ +description: Pipeline for parsing redis logs +processors: +- grok: + field: message + patterns: + - (%{POSINT:process.pid:long}:%{CHAR:redis.log.role} )?(%{REDISTIMESTAMP1:redis.log.timestamp}||%{REDISTIMESTAMP2:redis.log.timestamp}) + %{REDISLEVEL:log.level} %{GREEDYDATA:message} + - '%{POSINT:process.pid:long}:signal-handler \(%{POSINT:redis.log.timestamp}\) + %{GREEDYDATA:message}' + pattern_definitions: + CHAR: '[a-zA-Z]' + REDISLEVEL: '[.\-*#]' + REDISTIMESTAMP1: '%{MONTHDAY} %{MONTH} %{TIME}' + REDISTIMESTAMP2: '%{MONTHDAY} %{MONTH} %{YEAR} %{TIME}' +- script: + lang: painless + source: >- + if (ctx.log.level == params.dot) { + ctx.log.level = params.debug; + } else if (ctx.log.level == params.dash) { + ctx.log.level = params.verbose; + } else if (ctx.log.level == params.asterisk) { + ctx.log.level = params.notice; + } else if (ctx.log.level == params.hash) { + ctx.log.level = params.warning; + } + params: + dot: . + debug: debug + dash: '-' + verbose: verbose + asterisk: '*' + notice: notice + hash: '#' + warning: warning +- script: + lang: painless + source: >- + if (ctx.redis.log.role == params.master_abbrev) { + ctx.redis.log.role = params.master; + } else if (ctx.redis.log.role == params.slave_abbrev) { + ctx.redis.log.role = params.slave; + } else if (ctx.redis.log.role == params.child_abbrev) { + ctx.redis.log.role = params.child; + } else if (ctx.redis.log.role == params.sentinel_abbrev) { + ctx.redis.log.role = params.sentinel; + } + params: + master_abbrev: M + master: master + slave_abbrev: S + slave: slave + child_abbrev: C + child: child + sentinel_abbrev: X + sentinel: sentinel +- rename: + field: '@timestamp' + target_field: event.created +- date: + field: redis.log.timestamp + target_field: '@timestamp' + formats: + - dd MMM yyyy H:m:s.SSS + - dd MMM H:m:s.SSS + - dd MMM H:m:s + - UNIX + ignore_failure: true +- remove: + field: redis.log.timestamp + ignore_failure: true +- set: + field: event.kind + value: event +- append: + field: event.category + value: database +- append: + field: event.type + value: info +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/filebeat/module/redis/log/manifest.yml b/filebeat/module/redis/log/manifest.yml index 3c63a894c28..728e098d4c2 100644 --- a/filebeat/module/redis/log/manifest.yml +++ b/filebeat/module/redis/log/manifest.yml @@ -10,5 +10,5 @@ var: os.windows: - "c:/program files/Redis/logs/redis.log*" -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/log.yml diff --git a/filebeat/module/redis/log/test/redis-5.0.3.log-expected.json b/filebeat/module/redis/log/test/redis-5.0.3.log-expected.json index 71d76c30a96..d3efc715fe3 100644 --- a/filebeat/module/redis/log/test/redis-5.0.3.log-expected.json +++ b/filebeat/module/redis/log/test/redis-5.0.3.log-expected.json @@ -1,7 +1,14 @@ [ { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", diff --git a/filebeat/module/redis/log/test/redis-darwin-3.0.2.log-expected.json b/filebeat/module/redis/log/test/redis-darwin-3.0.2.log-expected.json index ff533b577ac..365ced2400b 100644 --- a/filebeat/module/redis/log/test/redis-darwin-3.0.2.log-expected.json +++ b/filebeat/module/redis/log/test/redis-darwin-3.0.2.log-expected.json @@ -1,7 +1,14 @@ [ { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -12,8 +19,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "warning", @@ -24,8 +38,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -36,8 +57,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -48,8 +76,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.offset": 1478, @@ -58,8 +93,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "warning", @@ -70,8 +112,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -82,8 +131,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -94,8 +150,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "warning", @@ -106,8 +169,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -118,8 +188,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "warning", @@ -130,8 +207,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -142,8 +226,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -154,8 +245,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.offset": 3273, @@ -164,8 +262,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "warning", @@ -176,8 +281,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -188,8 +300,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -200,8 +319,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "warning", diff --git a/filebeat/module/redis/log/test/redis-debian-1.2.6.log-expected.json b/filebeat/module/redis/log/test/redis-debian-1.2.6.log-expected.json index ff13e461ef4..a8f9d71736e 100644 --- a/filebeat/module/redis/log/test/redis-debian-1.2.6.log-expected.json +++ b/filebeat/module/redis/log/test/redis-debian-1.2.6.log-expected.json @@ -1,7 +1,14 @@ [ { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -10,8 +17,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -20,8 +34,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -30,8 +51,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -40,8 +68,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -50,8 +85,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -60,8 +102,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -70,8 +119,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -80,8 +136,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -90,8 +153,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -100,8 +170,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -110,8 +187,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -120,8 +204,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -130,8 +221,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -140,8 +238,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -150,8 +255,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -160,8 +272,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -170,8 +289,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -180,8 +306,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -190,8 +323,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -200,8 +340,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -210,8 +357,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -220,8 +374,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -230,8 +391,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -240,8 +408,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -250,8 +425,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -260,8 +442,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -270,8 +459,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -280,8 +476,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -290,8 +493,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -300,8 +510,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -310,8 +527,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -320,8 +544,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -330,8 +561,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -340,8 +578,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -350,8 +595,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -360,8 +612,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -370,8 +629,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -380,8 +646,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -390,8 +663,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -400,8 +680,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -410,8 +697,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -420,8 +714,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -430,8 +731,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -440,8 +748,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -450,8 +765,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -460,8 +782,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -470,8 +799,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -480,8 +816,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -490,8 +833,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -500,8 +850,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -510,8 +867,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -520,8 +884,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -530,8 +901,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -540,8 +918,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -550,8 +935,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -560,8 +952,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -570,8 +969,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -580,8 +986,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -590,8 +1003,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -600,8 +1020,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -610,8 +1037,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -620,8 +1054,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -630,8 +1071,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -640,8 +1088,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -650,8 +1105,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -660,8 +1122,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -670,8 +1139,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -680,8 +1156,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -690,8 +1173,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -700,8 +1190,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -710,8 +1207,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -720,8 +1224,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -730,8 +1241,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -740,8 +1258,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -750,8 +1275,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -760,8 +1292,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -770,8 +1309,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -780,8 +1326,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -790,8 +1343,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -800,8 +1360,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -810,8 +1377,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -820,8 +1394,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -830,8 +1411,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -840,8 +1428,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -850,8 +1445,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -860,8 +1462,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -870,8 +1479,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -880,8 +1496,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -890,8 +1513,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -900,8 +1530,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -910,8 +1547,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -920,8 +1564,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -930,8 +1581,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -940,8 +1598,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -950,8 +1615,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -960,8 +1632,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -970,8 +1649,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -980,8 +1666,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -990,8 +1683,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", diff --git a/filebeat/module/redis/log/test/redis-windows-2.4.6.log-expected.json b/filebeat/module/redis/log/test/redis-windows-2.4.6.log-expected.json index 4fb3b4e92b0..dbafda2b3df 100644 --- a/filebeat/module/redis/log/test/redis-windows-2.4.6.log-expected.json +++ b/filebeat/module/redis/log/test/redis-windows-2.4.6.log-expected.json @@ -1,7 +1,14 @@ [ { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -10,8 +17,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "warning", @@ -20,8 +34,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -30,8 +51,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -40,8 +68,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -50,8 +85,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -60,8 +102,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -70,8 +119,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -80,8 +136,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -90,8 +153,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -100,8 +170,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -110,8 +187,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -120,8 +204,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -130,8 +221,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -140,8 +238,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -150,8 +255,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -160,8 +272,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -170,8 +289,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -180,8 +306,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -190,8 +323,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -200,8 +340,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -210,8 +357,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -220,8 +374,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -230,8 +391,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -240,8 +408,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -250,8 +425,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -260,8 +442,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -270,8 +459,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -280,8 +476,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -290,8 +493,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -300,8 +510,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -310,8 +527,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -320,8 +544,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", @@ -330,8 +561,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "verbose", diff --git a/filebeat/module/redis/log/test/test.log-expected.json b/filebeat/module/redis/log/test/test.log-expected.json index b74b64a93ed..cee22b55c3b 100644 --- a/filebeat/module/redis/log/test/test.log-expected.json +++ b/filebeat/module/redis/log/test/test.log-expected.json @@ -1,7 +1,14 @@ [ { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -12,8 +19,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "debug", @@ -22,8 +36,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.level": "notice", @@ -32,8 +53,15 @@ "service.type": "redis" }, { + "event.category": [ + "database" + ], "event.dataset": "redis.log", + "event.kind": "event", "event.module": "redis", + "event.type": [ + "info" + ], "fileset.name": "log", "input.type": "log", "log.offset": 250, diff --git a/filebeat/module/santa/_meta/fields.yml b/filebeat/module/santa/_meta/fields.yml index fea0b03a78c..57255dd76c8 100644 --- a/filebeat/module/santa/_meta/fields.yml +++ b/filebeat/module/santa/_meta/fields.yml @@ -56,10 +56,10 @@ - name: mount description: The disk volume path. - - name: certificate.common_name - type: keyword - description: Common name from code signing certificate. + - name: certificate.common_name + type: keyword + description: Common name from code signing certificate. - - name: certificate.sha256 - type: keyword - description: SHA256 hash of code signing certificate. + - name: certificate.sha256 + type: keyword + description: SHA256 hash of code signing certificate. diff --git a/filebeat/module/santa/fields.go b/filebeat/module/santa/fields.go index 06b53e41d84..cd3f44d3647 100644 --- a/filebeat/module/santa/fields.go +++ b/filebeat/module/santa/fields.go @@ -32,5 +32,5 @@ func init() { // AssetSanta returns asset data. // This is the base64 encoded gzipped contents of module/santa. func AssetSanta() string { - return "eJyUk82O2jAQgO88xWhP7WFRl4o95FAphfRHBS0iK7W3yhtPiJXEE9lOW96+spMFk8QtcCJj+/s8npl7KPEYgWbSsBmAEabCCO4+Ex0qhNSG72YAHHWmRGMEyQg+zACgW4Mt8bbCGUAusOI6ckv3IFmNZ6r9mWODERwUtU0fmWCeMf3nmcUyu/EUfgWWePxNintx/MPqxiaR/EhWXvxCF3e0kYVjJvSNnnizefoeEq17IJiCme5BOBiicj6WK2T6NvUq2T+HzHtHg5wUmAJtZtrdZEJcE8dbtNuQ86lBxYyQB4cEyrsumVByocuR0u+OEfuT6wyXz/pr+i3e7ZJ437eFnnun/E4can9R1dZ4sTTQPBfY73JHvKv7nJdW/w9iM7T7oFFkKKMqgNKoBKuuonVbQbb1C6rQzTS3fwa8U+ks50G/v0r3MV3/6w1siYf3Pg/FbrdJIE3XkG7fLR8Wm6uMDhnQ5cMXP7lYM1qbxvd1LYXk8CYXFeqjNli77nsbzLKV5hZ6w0zRs14ZGSojcpExg/OM6prkT69GU+N2YVi5I44FuaIaMjtcWhyknTQfHtbqgi2Wj9ca0y/xYvkIBdOFHeKw728AAAD//5mnu+4=" + return "eJyUk82O2jAQgO88xWhP7WFRl4o9cKiUQvqjghaRldpb5Y0nxErsiWynLW9f2UkhJPGWcCJj+/tm7Jl7KPC0AsOUZTMAK2yJK7j7THQsERIXvpsBcDSpFpUVpFbwYQYAzRrsiNclzgAygSU3K790D4pJvFDdz54qXMFRU121kRHmBdN+XlgsdRvP4X/AAk+/SfNOHP8wWbki4h/xuhO/0kUNbWDhmAoz0RNtt0/fQ6JNCwSbM9tcCAdLVMyHco3MTFOv48NzyHzwNMhIg83RVWZ8JiNiSRynaHch51OFmlmhjh4JlDVdMqLkwhQDZbc7BuxPvjN8PZuvybdov4+jQ9sWZt451e3EvvYXlbXEq6We5jnHdpc/0km9y3mpzf8grkK3DypNllIqAyiDWrDyJlqzFVQtX1CHMjPc/enxzk/nOA/m/U26j8nmtTtwT9zP+zIU+/02hiTZQLJ7t3xYbG8yemRAl/Vv/Oxi1WBtHN++ayEUhzeZKNGcjEXpu+9tsMpa2Sn0itl8pOFT1FZkImUW5ylJSepn751CY3dlW/ujngmZJgmpGzQjjspNXVfyegomZ4vl41R78iVaLB8hZyZ3wx12/w0AAP//xDi+7g==" } diff --git a/filebeat/module/santa/log/ingest/pipeline.json b/filebeat/module/santa/log/ingest/pipeline.json deleted file mode 100644 index 4eaddc753a6..00000000000 --- a/filebeat/module/santa/log/ingest/pipeline.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "description": "Pipeline for parsing Google Santa logs.", - "processors": [ - { - "grok": { - "field": "message", - "patterns": [ - "\\[%{TIMESTAMP_ISO8601:process.start}\\] I santad: action=%{NOT_SEPARATOR:santa.action}\\|decision=%{NOT_SEPARATOR:santa.decision}\\|reason=%{NOT_SEPARATOR:santa.reason}\\|sha256=%{NOT_SEPARATOR:hash.sha256}\\|path=%{NOT_SEPARATOR:process.executable}(\\|args=%{NOT_SEPARATOR:process.args})?(\\|cert_sha256=%{NOT_SEPARATOR:certificate.sha256})?(\\|cert_cn=%{NOT_SEPARATOR:certificate.common_name})?\\|pid=%{NUMBER:process.pid:long}\\|ppid=%{NUMBER:process.ppid:long}\\|uid=%{NUMBER:user.id}\\|user=%{NOT_SEPARATOR:user.name}\\|gid=%{NUMBER:group.id}\\|group=%{NOT_SEPARATOR:group.name}\\|mode=%{WORD:santa.mode}", - "\\[%{TIMESTAMP_ISO8601:timestamp}\\] I santad: action=%{NOT_SEPARATOR:santa.action}\\|mount=%{NOT_SEPARATOR:santa.disk.mount}\\|volume=%{NOT_SEPARATOR:santa.disk.volume}\\|bsdname=%{NOT_SEPARATOR:santa.disk.bsdname}\\|fs=%{NOT_SEPARATOR:santa.disk.fs}\\|model=%{NOT_SEPARATOR:santa.disk.model}\\|serial=%{NOT_SEPARATOR:santa.disk.serial}\\|bus=%{NOT_SEPARATOR:santa.disk.bus}\\|dmgpath=%{NOT_SEPARATOR:santa.disk.dmgpath}?" - ], - "pattern_definitions": { - "NOT_SEPARATOR": "[^\\|]+" - } - } - }, - { - "rename": { - "field": "message", - "target_field": "log.original" - } - }, - { - "date": { - "field": "process.start", - "target_field": "process.start", - "formats": [ - "ISO8601" - ], - "ignore_failure": true - } - }, - { - "set": { - "field": "@timestamp", - "value": "{{ process.start }}", - "ignore_failure": true - } - }, - { - "split": { - "field": "process.args", - "separator": " ", - "ignore_failure": true - } - }, - { - "date": { - "field": "timestamp", - "target_field": "@timestamp", - "formats": [ - "ISO8601" - ], - "ignore_failure": true - } - }, - { - "remove": { - "field": "timestamp", - "ignore_missing": true - } - } - ], - "on_failure": [ - { - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - } - ] -} diff --git a/filebeat/module/santa/log/ingest/pipeline.yml b/filebeat/module/santa/log/ingest/pipeline.yml new file mode 100644 index 00000000000..11ad4cead6c --- /dev/null +++ b/filebeat/module/santa/log/ingest/pipeline.yml @@ -0,0 +1,91 @@ +description: Pipeline for parsing Google Santa logs. +processors: +- grok: + field: message + patterns: + - '\[%{TIMESTAMP_ISO8601:process.start}\] %{NOT_SEPARATOR:log.level} santad: action=%{NOT_SEPARATOR:santa.action}\|decision=%{NOT_SEPARATOR:santa.decision}\|reason=%{NOT_SEPARATOR:santa.reason}\|sha256=%{NOT_SEPARATOR:process.hash.sha256}\|path=%{NOT_SEPARATOR:process.executable}(\|args=%{NOT_SEPARATOR:santa.args})?(\|cert_sha256=%{NOT_SEPARATOR:santa.certificate.sha256})?(\|cert_cn=%{NOT_SEPARATOR:santa.certificate.common_name})?\|pid=%{NUMBER:process.pid:long}\|ppid=%{NUMBER:process.ppid:long}\|uid=%{NUMBER:user.id}\|user=%{NOT_SEPARATOR:user.name}\|gid=%{NUMBER:group.id}\|group=%{NOT_SEPARATOR:group.name}\|mode=%{WORD:santa.mode}' + - '\[%{TIMESTAMP_ISO8601:timestamp}\] %{NOT_SEPARATOR:log.level} santad: action=%{NOT_SEPARATOR:santa.action}\|mount=%{NOT_SEPARATOR:santa.disk.mount}\|volume=%{NOT_SEPARATOR:santa.disk.volume}\|bsdname=%{NOT_SEPARATOR:santa.disk.bsdname}\|fs=%{NOT_SEPARATOR:santa.disk.fs}\|model=%{NOT_SEPARATOR:santa.disk.model}\|serial=%{NOT_SEPARATOR:santa.disk.serial}\|bus=%{NOT_SEPARATOR:santa.disk.bus}\|dmgpath=%{NOT_SEPARATOR:santa.disk.dmgpath}?' + pattern_definitions: + NOT_SEPARATOR: '[^\|]+' +- rename: + field: message + target_field: log.original +- date: + field: process.start + target_field: process.start + formats: + - ISO8601 + ignore_failure: true +- set: + field: '@timestamp' + value: '{{ process.start }}' + ignore_failure: true +- split: + field: santa.args + separator: ' ' + ignore_failure: true +- date: + field: timestamp + target_field: '@timestamp' + formats: + - ISO8601 + ignore_failure: true +- remove: + field: timestamp + ignore_missing: true +- append: + field: process.args + value: "{{process.executable}}" + if: "ctx?.process?.executable != null" +- foreach: + field: santa.args + processor: + append: + field: process.args + value: "{{_ingest._value}}" + ignore_missing: true +- remove: + field: santa.args + ignore_missing: true +- set: + field: event.kind + value: event +- append: + field: event.category + value: process + if: "ctx?.santa?.action == 'EXEC'" +- append: + field: event.type + value: start + if: "ctx?.santa?.action == 'EXEC'" +- set: + field: event.outcome + value: success + if: "ctx?.santa?.decision == 'ALLOW'" +- set: + field: event.outcome + value: failure + if: "ctx?.santa?.decision == 'DENY'" +- set: + field: event.action + value: "{{santa.action}}" + if: "ctx?.santa?.action != null" +- lowercase: + field: event.action + ignore_missing: true +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +- append: + field: related.hash + value: "{{santa.certificate.sha256}}" + if: "ctx?.santa?.certificate?.sha256 != null" +- append: + field: related.hash + value: "{{process.hash.sha256}}" + if: "ctx?.process?.hash != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/filebeat/module/santa/log/manifest.yml b/filebeat/module/santa/log/manifest.yml index d0369930490..43cad6e1934 100644 --- a/filebeat/module/santa/log/manifest.yml +++ b/filebeat/module/santa/log/manifest.yml @@ -4,8 +4,9 @@ var: - name: paths default: - /var/log/santa.log + - /var/db/santa/santa.log - name: input default: file -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/filebeat/module/santa/log/test/santa.log-expected.json b/filebeat/module/santa/log/test/santa.log-expected.json index ab94261c13a..6c1fbe81184 100644 --- a/filebeat/module/santa/log/test/santa.log-expected.json +++ b/filebeat/module/santa/log/test/santa.log-expected.json @@ -1,25 +1,43 @@ [ { "@timestamp": "2018-12-10T06:45:16.802Z", - "certificate.common_name": "Software Signing", - "certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "event.action": "exec", + "event.category": [ + "process" + ], "event.dataset": "santa.log", + "event.kind": "event", "event.module": "santa", + "event.outcome": "success", + "event.type": [ + "start" + ], "fileset.name": "log", "group.id": "0", "group.name": "wheel", - "hash.sha256": "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4", "input.type": "log", + "log.level": "I", "log.offset": 0, "log.original": "[2018-12-10T06:45:16.802Z] I santad: action=EXEC|decision=ALLOW|reason=CERT|sha256=c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4|path=/usr/libexec/xpcproxy|args=/usr/sbin/newsyslog|cert_sha256=2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32|cert_cn=Software Signing|pid=29678|ppid=1|uid=0|user=root|gid=0|group=wheel|mode=M", "process.args": [ + "/usr/libexec/xpcproxy", "/usr/sbin/newsyslog" ], "process.executable": "/usr/libexec/xpcproxy", + "process.hash.sha256": "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4", "process.pid": 29678, "process.ppid": 1, "process.start": "2018-12-10T06:45:16.802Z", + "related.hash": [ + "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4" + ], + "related.user": [ + "root" + ], "santa.action": "EXEC", + "santa.certificate.common_name": "Software Signing", + "santa.certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", "santa.decision": "ALLOW", "santa.mode": "M", "santa.reason": "CERT", @@ -29,26 +47,44 @@ }, { "@timestamp": "2018-12-10T06:45:16.802Z", - "certificate.common_name": "Software Signing", - "certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "event.action": "exec", + "event.category": [ + "process" + ], "event.dataset": "santa.log", + "event.kind": "event", "event.module": "santa", + "event.outcome": "success", + "event.type": [ + "start" + ], "fileset.name": "log", "group.id": "0", "group.name": "wheel", - "hash.sha256": "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4", "input.type": "log", + "log.level": "I", "log.offset": 360, "log.original": "[2018-12-10T06:45:16.802Z] I santad: action=EXEC|decision=ALLOW|reason=CERT|sha256=c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4|path=/usr/libexec/xpcproxy|args=xpcproxy com.apple.systemstats.daily|cert_sha256=2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32|cert_cn=Software Signing|pid=29679|ppid=1|uid=0|user=root|gid=0|group=wheel|mode=M", "process.args": [ + "/usr/libexec/xpcproxy", "xpcproxy", "com.apple.systemstats.daily" ], "process.executable": "/usr/libexec/xpcproxy", + "process.hash.sha256": "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4", "process.pid": 29679, "process.ppid": 1, "process.start": "2018-12-10T06:45:16.802Z", + "related.hash": [ + "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4" + ], + "related.user": [ + "root" + ], "santa.action": "EXEC", + "santa.certificate.common_name": "Software Signing", + "santa.certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", "santa.decision": "ALLOW", "santa.mode": "M", "santa.reason": "CERT", @@ -58,25 +94,43 @@ }, { "@timestamp": "2018-12-10T06:45:16.851Z", - "certificate.common_name": "Software Signing", - "certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "event.action": "exec", + "event.category": [ + "process" + ], "event.dataset": "santa.log", + "event.kind": "event", "event.module": "santa", + "event.outcome": "success", + "event.type": [ + "start" + ], "fileset.name": "log", "group.id": "0", "group.name": "wheel", - "hash.sha256": "746f0dbafb7e675d5ce67131e5544772ee612b894e8ab51d3ce2d21f7cb7332d", "input.type": "log", + "log.level": "I", "log.offset": 737, "log.original": "[2018-12-10T06:45:16.851Z] I santad: action=EXEC|decision=ALLOW|reason=CERT|sha256=746f0dbafb7e675d5ce67131e5544772ee612b894e8ab51d3ce2d21f7cb7332d|path=/usr/sbin/newsyslog|args=/usr/sbin/newsyslog|cert_sha256=2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32|cert_cn=Software Signing|pid=29678|ppid=1|uid=0|user=root|gid=0|group=wheel|mode=M", "process.args": [ + "/usr/sbin/newsyslog", "/usr/sbin/newsyslog" ], "process.executable": "/usr/sbin/newsyslog", + "process.hash.sha256": "746f0dbafb7e675d5ce67131e5544772ee612b894e8ab51d3ce2d21f7cb7332d", "process.pid": 29678, "process.ppid": 1, "process.start": "2018-12-10T06:45:16.851Z", + "related.hash": [ + "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "746f0dbafb7e675d5ce67131e5544772ee612b894e8ab51d3ce2d21f7cb7332d" + ], + "related.user": [ + "root" + ], "santa.action": "EXEC", + "santa.certificate.common_name": "Software Signing", + "santa.certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", "santa.decision": "ALLOW", "santa.mode": "M", "santa.reason": "CERT", @@ -86,26 +140,44 @@ }, { "@timestamp": "2018-12-10T06:45:16.859Z", - "certificate.common_name": "Software Signing", - "certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "event.action": "exec", + "event.category": [ + "process" + ], "event.dataset": "santa.log", + "event.kind": "event", "event.module": "santa", + "event.outcome": "success", + "event.type": [ + "start" + ], "fileset.name": "log", "group.id": "0", "group.name": "wheel", - "hash.sha256": "d6be9bfbd777ac5dcd30488014acc787a2df5ce840f1fe4d5742d323ee00392f", "input.type": "log", + "log.level": "I", "log.offset": 1095, "log.original": "[2018-12-10T06:45:16.859Z] I santad: action=EXEC|decision=ALLOW|reason=CERT|sha256=d6be9bfbd777ac5dcd30488014acc787a2df5ce840f1fe4d5742d323ee00392f|path=/usr/sbin/systemstats|args=/usr/sbin/systemstats --daily|cert_sha256=2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32|cert_cn=Software Signing|pid=29679|ppid=1|uid=0|user=root|gid=0|group=wheel|mode=M", "process.args": [ + "/usr/sbin/systemstats", "/usr/sbin/systemstats", "--daily" ], "process.executable": "/usr/sbin/systemstats", + "process.hash.sha256": "d6be9bfbd777ac5dcd30488014acc787a2df5ce840f1fe4d5742d323ee00392f", "process.pid": 29679, "process.ppid": 1, "process.start": "2018-12-10T06:45:16.859Z", + "related.hash": [ + "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "d6be9bfbd777ac5dcd30488014acc787a2df5ce840f1fe4d5742d323ee00392f" + ], + "related.user": [ + "root" + ], "santa.action": "EXEC", + "santa.certificate.common_name": "Software Signing", + "santa.certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", "santa.decision": "ALLOW", "santa.mode": "M", "santa.reason": "CERT", @@ -115,25 +187,43 @@ }, { "@timestamp": "2018-12-10T08:45:27.810Z", - "certificate.common_name": "Software Signing", - "certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "event.action": "exec", + "event.category": [ + "process" + ], "event.dataset": "santa.log", + "event.kind": "event", "event.module": "santa", + "event.outcome": "success", + "event.type": [ + "start" + ], "fileset.name": "log", "group.id": "0", "group.name": "wheel", - "hash.sha256": "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4", "input.type": "log", + "log.level": "I", "log.offset": 1465, "log.original": "[2018-12-10T08:45:27.810Z] I santad: action=EXEC|decision=ALLOW|reason=CERT|sha256=c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4|path=/usr/libexec/xpcproxy|args=/usr/sbin/newsyslog|cert_sha256=2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32|cert_cn=Software Signing|pid=29681|ppid=1|uid=0|user=root|gid=0|group=wheel|mode=M", "process.args": [ + "/usr/libexec/xpcproxy", "/usr/sbin/newsyslog" ], "process.executable": "/usr/libexec/xpcproxy", + "process.hash.sha256": "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4", "process.pid": 29681, "process.ppid": 1, "process.start": "2018-12-10T08:45:27.810Z", + "related.hash": [ + "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4" + ], + "related.user": [ + "root" + ], "santa.action": "EXEC", + "santa.certificate.common_name": "Software Signing", + "santa.certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", "santa.decision": "ALLOW", "santa.mode": "M", "santa.reason": "CERT", @@ -143,26 +233,44 @@ }, { "@timestamp": "2018-12-10T08:45:27.810Z", - "certificate.common_name": "Software Signing", - "certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "event.action": "exec", + "event.category": [ + "process" + ], "event.dataset": "santa.log", + "event.kind": "event", "event.module": "santa", + "event.outcome": "success", + "event.type": [ + "start" + ], "fileset.name": "log", "group.id": "0", "group.name": "wheel", - "hash.sha256": "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4", "input.type": "log", + "log.level": "I", "log.offset": 1825, "log.original": "[2018-12-10T08:45:27.810Z] I santad: action=EXEC|decision=ALLOW|reason=CERT|sha256=c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4|path=/usr/libexec/xpcproxy|args=xpcproxy com.adobe.AAM.Scheduler-1.0|cert_sha256=2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32|cert_cn=Software Signing|pid=29680|ppid=1|uid=0|user=root|gid=0|group=wheel|mode=M", "process.args": [ + "/usr/libexec/xpcproxy", "xpcproxy", "com.adobe.AAM.Scheduler-1.0" ], "process.executable": "/usr/libexec/xpcproxy", + "process.hash.sha256": "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4", "process.pid": 29680, "process.ppid": 1, "process.start": "2018-12-10T08:45:27.810Z", + "related.hash": [ + "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "c4bc09fd2f248534552f517acf3edb9a635aba2b02e46f49df683ea9b778e5b4" + ], + "related.user": [ + "root" + ], "santa.action": "EXEC", + "santa.certificate.common_name": "Software Signing", + "santa.certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", "santa.decision": "ALLOW", "santa.mode": "M", "santa.reason": "CERT", @@ -172,24 +280,41 @@ }, { "@timestamp": "2018-12-10T21:37:27.247Z", + "event.action": "exec", + "event.category": [ + "process" + ], "event.dataset": "santa.log", + "event.kind": "event", "event.module": "santa", + "event.outcome": "success", + "event.type": [ + "start" + ], "fileset.name": "log", "group.id": "0", "group.name": "wheel", - "hash.sha256": "08bd61582657cd6d78c9e071d34d79a32bb59e7210077a44919d2c5477e988a1", "input.type": "log", + "log.level": "I", "log.offset": 2202, "log.original": "[2018-12-10T21:37:27.247Z] I santad: action=EXEC|decision=ALLOW|reason=UNKNOWN|sha256=08bd61582657cd6d78c9e071d34d79a32bb59e7210077a44919d2c5477e988a1|path=/usr/local/Cellar/osquery/3.3.0_1/bin/osqueryd|args=/usr/local/bin/osqueryd --flagfile=/private/var/osquery/osquery.flags --logger_min_stderr=1|pid=45084|ppid=1|uid=0|user=root|gid=0|group=wheel|mode=M", "process.args": [ + "/usr/local/Cellar/osquery/3.3.0_1/bin/osqueryd", "/usr/local/bin/osqueryd", "--flagfile=/private/var/osquery/osquery.flags", "--logger_min_stderr=1" ], "process.executable": "/usr/local/Cellar/osquery/3.3.0_1/bin/osqueryd", + "process.hash.sha256": "08bd61582657cd6d78c9e071d34d79a32bb59e7210077a44919d2c5477e988a1", "process.pid": 45084, "process.ppid": 1, "process.start": "2018-12-10T21:37:27.247Z", + "related.hash": [ + "08bd61582657cd6d78c9e071d34d79a32bb59e7210077a44919d2c5477e988a1" + ], + "related.user": [ + "root" + ], "santa.action": "EXEC", "santa.decision": "ALLOW", "santa.mode": "M", @@ -200,22 +325,42 @@ }, { "@timestamp": "2018-12-10T16:24:43.992Z", - "certificate.common_name": "Software Signing", - "certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "event.action": "exec", + "event.category": [ + "process" + ], "event.dataset": "santa.log", + "event.kind": "event", "event.module": "santa", + "event.outcome": "success", + "event.type": [ + "start" + ], "fileset.name": "log", "group.id": "20", "group.name": "staff", - "hash.sha256": "63b6a54848d7b4adf726d68f11409a4ac05b43926cb0f2792f7d41dc0221c106", "input.type": "log", + "log.level": "I", "log.offset": 2560, "log.original": "[2018-12-10T16:24:43.992Z] I santad: action=EXEC|decision=ALLOW|reason=CERT|sha256=63b6a54848d7b4adf726d68f11409a4ac05b43926cb0f2792f7d41dc0221c106|path=/usr/bin/basename|cert_sha256=2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32|cert_cn=Software Signing|pid=40757|ppid=40756|uid=501|user=akroh|gid=20|group=staff|mode=M", + "process.args": [ + "/usr/bin/basename" + ], "process.executable": "/usr/bin/basename", + "process.hash.sha256": "63b6a54848d7b4adf726d68f11409a4ac05b43926cb0f2792f7d41dc0221c106", "process.pid": 40757, "process.ppid": 40756, "process.start": "2018-12-10T16:24:43.992Z", + "related.hash": [ + "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", + "63b6a54848d7b4adf726d68f11409a4ac05b43926cb0f2792f7d41dc0221c106" + ], + "related.user": [ + "akroh" + ], "santa.action": "EXEC", + "santa.certificate.common_name": "Software Signing", + "santa.certificate.sha256": "2aa4b9973b7ba07add447ee4da8b5337c3ee2c3a991911e80e7282e8a751fc32", "santa.decision": "ALLOW", "santa.mode": "M", "santa.reason": "CERT", @@ -225,18 +370,26 @@ }, { "@timestamp": "2018-12-14T05:35:38.313Z", - "certificate.common_name": "Developer ID Application: Google, Inc. (EQHXZ8M8AV)", - "certificate.sha256": "345a8e098bd04794aaeefda8c9ef56a0bf3d3706d67d35bc0e23f11bb3bffce5", + "event.action": "exec", + "event.category": [ + "process" + ], "event.dataset": "santa.log", + "event.kind": "event", "event.module": "santa", + "event.outcome": "success", + "event.type": [ + "start" + ], "fileset.name": "log", "group.id": "20", "group.name": "staff", - "hash.sha256": "a8defc1b24c45f6dabeb8298af5f8e1daf39e1504e16f878345f15ac94ae96d7", "input.type": "log", + "log.level": "I", "log.offset": 2899, "log.original": "[2018-12-14T05:35:38.313Z] I santad: action=EXEC|decision=ALLOW|reason=UNKNOWN|sha256=a8defc1b24c45f6dabeb8298af5f8e1daf39e1504e16f878345f15ac94ae96d7|path=/Applications/Google Chrome.app/Contents/Versions/70.0.3538.110/Google Chrome Helper.app/Contents/MacOS/Google Chrome Helper|args=/Applications/Google Chrome.app/Contents/Versions/70.0.3538.110/Google Chrome Helper.app/Contents/MacOS/Google Chrome Helper --type=utility --field-trial-handle=120122713615061869,9401617251746517350,131072 --lang=en-US --service-sandbox-type=utility --service-request-channel-token=10458143409865682077 --seatbelt-client=262|cert_sha256=345a8e098bd04794aaeefda8c9ef56a0bf3d3706d67d35bc0e23f11bb3bffce5|cert_cn=Developer ID Application: Google, Inc. (EQHXZ8M8AV)|pid=89238|ppid=704|uid=501|user=akroh|gid=20|group=staff|mode=M", "process.args": [ + "/Applications/Google Chrome.app/Contents/Versions/70.0.3538.110/Google Chrome Helper.app/Contents/MacOS/Google Chrome Helper", "/Applications/Google", "Chrome.app/Contents/Versions/70.0.3538.110/Google", "Chrome", @@ -251,10 +404,20 @@ "--seatbelt-client=262" ], "process.executable": "/Applications/Google Chrome.app/Contents/Versions/70.0.3538.110/Google Chrome Helper.app/Contents/MacOS/Google Chrome Helper", + "process.hash.sha256": "a8defc1b24c45f6dabeb8298af5f8e1daf39e1504e16f878345f15ac94ae96d7", "process.pid": 89238, "process.ppid": 704, "process.start": "2018-12-14T05:35:38.313Z", + "related.hash": [ + "345a8e098bd04794aaeefda8c9ef56a0bf3d3706d67d35bc0e23f11bb3bffce5", + "a8defc1b24c45f6dabeb8298af5f8e1daf39e1504e16f878345f15ac94ae96d7" + ], + "related.user": [ + "akroh" + ], "santa.action": "EXEC", + "santa.certificate.common_name": "Developer ID Application: Google, Inc. (EQHXZ8M8AV)", + "santa.certificate.sha256": "345a8e098bd04794aaeefda8c9ef56a0bf3d3706d67d35bc0e23f11bb3bffce5", "santa.decision": "ALLOW", "santa.mode": "M", "santa.reason": "UNKNOWN", @@ -264,10 +427,13 @@ }, { "@timestamp": "2018-12-17T03:03:52.337Z", + "event.action": "diskappear", "event.dataset": "santa.log", + "event.kind": "event", "event.module": "santa", "fileset.name": "log", "input.type": "log", + "log.level": "I", "log.offset": 3712, "log.original": "[2018-12-17T03:03:52.337Z] I santad: action=DISKAPPEAR|mount=/Volumes/Recovery|volume=Recovery|bsdname=disk1s3|fs=apfs|model=APPLE SSD SM0512L|serial=C026495006UHCHH1Q|bus=PCI-Express|dmgpath=", "santa.action": "DISKAPPEAR", diff --git a/filebeat/scripts/tester/main.go b/filebeat/scripts/tester/main.go index 2ae9de44388..6da063e6204 100644 --- a/filebeat/scripts/tester/main.go +++ b/filebeat/scripts/tester/main.go @@ -108,6 +108,11 @@ func main() { } for _, path := range paths { + // TODO: Add support for testing YAML pipelines. + if filepath.Ext(path) == ".yml" { + fmt.Fprintf(os.Stderr, "YAML pipelines are not supported by this tool. Cannot process %q.", path) + os.Exit(3) + } err = testPipeline(*esURL, path, logs, *verbose, *simulateVerbose) if err != nil { os.Stderr.WriteString(err.Error()) @@ -185,8 +190,14 @@ func getPipelinePath(path, modulesPath string) ([]string, error) { module := parts[0] fileset := parts[1] - pathToPipeline := filepath.Join(modulesPath, module, fileset, "ingest", "pipeline.json") - _, err := os.Stat(pathToPipeline) + var pathToPipeline string + for _, ext := range []string{".json", ".yml"} { + pathToPipeline = filepath.Join(modulesPath, module, fileset, "ingest", "pipeline"+ext) + _, err = os.Stat(pathToPipeline) + if err == nil { + break + } + } if err != nil { return nil, fmt.Errorf("Cannot find pipeline in %s: %v %v\n", path, err, pathToPipeline) } @@ -199,8 +210,7 @@ func getPipelinePath(path, modulesPath string) ([]string, error) { return nil, err } for _, f := range files { - isPipelineFile := strings.HasSuffix(f.Name(), ".json") - if isPipelineFile { + if isPipelineFileExtension(f.Name()) { fullPath := filepath.Join(path, f.Name()) paths = append(paths, fullPath) } @@ -211,8 +221,7 @@ func getPipelinePath(path, modulesPath string) ([]string, error) { return paths, nil } - isPipelineFile := strings.HasSuffix(path, ".json") - if isPipelineFile { + if isPipelineFileExtension(path) { return []string{path}, nil } @@ -220,6 +229,15 @@ func getPipelinePath(path, modulesPath string) ([]string, error) { } +func isPipelineFileExtension(path string) bool { + ext := filepath.Ext(path) + switch strings.ToLower(ext) { + case ".yml", ".json": + return true + } + return false +} + func testPipeline(esURL, path string, logs []string, verbose, simulateVerbose bool) error { pipeline, err := readPipeline(path) if err != nil { diff --git a/filebeat/scripts/tester/main_test.go b/filebeat/scripts/tester/main_test.go index 79f53186a1b..6284f6f2e5e 100644 --- a/filebeat/scripts/tester/main_test.go +++ b/filebeat/scripts/tester/main_test.go @@ -29,7 +29,7 @@ func TestGetPipelinePath(t *testing.T) { count int }{ { - pipelinePath: "../../module/postgresql/log/ingest/pipeline.json", + pipelinePath: "../../module/postgresql/log/ingest/pipeline.yml", count: 1, }, { diff --git a/generator/_templates/metricbeat/{beat}/magefile.go b/generator/_templates/metricbeat/{beat}/magefile.go index 22b3dcfcc76..934276e633b 100644 --- a/generator/_templates/metricbeat/{beat}/magefile.go +++ b/generator/_templates/metricbeat/{beat}/magefile.go @@ -14,9 +14,11 @@ import ( "github.com/elastic/beats/v7/dev-tools/mage/target/common" "github.com/elastic/beats/v7/dev-tools/mage/target/pkg" "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" - "github.com/elastic/beats/v7/dev-tools/mage/target/update" "github.com/elastic/beats/v7/generator/common/beatgen" metricbeat "github.com/elastic/beats/v7/metricbeat/scripts/mage" + + // mage:import + _ "github.com/elastic/beats/v7/metricbeat/scripts/mage/target/metricset" ) func init() { @@ -45,7 +47,7 @@ func Package() { devtools.UseCommunityBeatPackaging() - mg.Deps(update.Update) + mg.Deps(Update) mg.Deps(build.CrossBuild, build.CrossBuildGoDaemon) mg.SerialDeps(devtools.Package, pkg.PackageTest) } @@ -99,9 +101,14 @@ func Fmt() { common.Fmt() } -// Update updates the generated files (aka make update). -func Update() error { - return update.Update() +// Update is an alias for running fields, dashboards, config. +func Update() { + mg.SerialDeps(Fields, Dashboards, Config, Imports) +} + +// Dashboards collects all the dashboards and generates index patterns. +func Dashboards() error { + return devtools.KibanaDashboards("module") } // Imports generates an include/list.go file containing diff --git a/generator/common/Makefile b/generator/common/Makefile index 0a7b3608dae..927da092a22 100644 --- a/generator/common/Makefile +++ b/generator/common/Makefile @@ -5,7 +5,7 @@ BEAT_PATH=${GOPATH}/src/${BEAT_NAME} ES_BEATS=${GOPATH}/src/github.com/elastic/beats PREPARE_COMMAND?= --include ${ES_BEATS}/dev-tools/make/mage.mk +-include ${ES_BEATS}/dev-tools/make/mage-install.mk # Runs test build for mock beat .PHONY: test diff --git a/go.mod b/go.mod index 9492ed8f4cd..6278b03a62f 100644 --- a/go.mod +++ b/go.mod @@ -158,7 +158,7 @@ require ( google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb google.golang.org/grpc v1.27.1 gopkg.in/inf.v0 v0.9.0 - gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect + gopkg.in/jcmturner/gokrb5.v7 v7.3.0 gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 gopkg.in/yaml.v2 v2.2.8 howett.net/plist v0.0.0-20181124034731-591f970eefbb diff --git a/go.sum b/go.sum index 64e954e1a44..14538961d08 100644 --- a/go.sum +++ b/go.sum @@ -204,6 +204,7 @@ github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 h1:RrkoB0pT3gnjXhL/t10BSP1mcr/0Ldea2uMyuBr2SWk= github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= @@ -254,12 +255,14 @@ github.com/elastic/gosigar v0.10.5 h1:GzPQ+78RaAb4J63unidA/JavQRKrB6s8IOzN6Ib59j github.com/elastic/gosigar v0.10.5/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= github.com/elastic/sarama v0.0.0-20191122160421-355d120d0970 h1:rSo6gsz4zOanqtJ5fmZYQJvEJnA5YsVOB25casIwqUw= github.com/elastic/sarama v0.0.0-20191122160421-355d120d0970/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.5.0 h1:vBh+kQp8lg9XPr56u1CPrWjFXtdphMoGWVHr9/1c+A0= github.com/fatih/color v1.5.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -925,7 +928,9 @@ k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.4-0.20190719014911-6a023d6d0e09 h1:w2hB+DoJsxpuO4hxMXfs44k1riAXX5kaV40564cWMUc= k8s.io/klog v0.3.4-0.20190719014911-6a023d6d0e09/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058 h1:di3XCwddOR9cWBNpfgXaskhh6cgJuwcK54rvtwUaC10= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= +k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190712204705-3dccf664f023 h1:1H4Jyzb0z2X0GfBMTwRjnt5ejffRHrGftUgJcV/ZfDc= diff --git a/heartbeat/_meta/beat.reference.yml b/heartbeat/_meta/beat.reference.yml index bd59aedd220..e84bd9e99c4 100644 --- a/heartbeat/_meta/beat.reference.yml +++ b/heartbeat/_meta/beat.reference.yml @@ -25,8 +25,11 @@ heartbeat.monitors: - type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping # configured hosts - # Monitor name used for job name and document type. - #name: icmp + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + id: my-monitor + + # Human readable display name for this service in Uptime UI and elsewhere + name: my-icmp-monitor # Enable/Disable monitor #enabled: true @@ -89,9 +92,11 @@ heartbeat.monitors: - type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint # by sending/receiving a custom payload + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + id: my-monitor - # Monitor name used for job name and document type - #name: tcp + # Human readable display name for this service in Uptime UI and elsewhere + name: my-tcp-monitor # Enable/Disable monitor #enabled: true @@ -165,9 +170,11 @@ heartbeat.monitors: #keep_null: false - type: http # monitor type `http`. Connect via HTTP an optionally verify response + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + id: my-http-monitor - # Monitor name used for job name and document type - #name: http + # Human readable display name for this service in Uptime UI and elsewhere + name: My Monitor # Enable/Disable monitor #enabled: true diff --git a/heartbeat/_meta/beat.yml b/heartbeat/_meta/beat.yml index 0e30f34f239..5459f28f989 100644 --- a/heartbeat/_meta/beat.yml +++ b/heartbeat/_meta/beat.yml @@ -22,13 +22,14 @@ heartbeat.config.monitors: # Configure monitors inline heartbeat.monitors: - type: http - + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + id: my-monitor + # Human readable display name for this service in Uptime UI and elsewhere + name: My Monitor # List or urls to query urls: ["http://localhost:9200"] - # Configure task schedule schedule: '@every 10s' - # Total test connection and data exchange timeout #timeout: 16s diff --git a/heartbeat/_meta/fields.common.yml b/heartbeat/_meta/fields.common.yml index 28a721494f9..8db94d8a56c 100644 --- a/heartbeat/_meta/fields.common.yml +++ b/heartbeat/_meta/fields.common.yml @@ -17,11 +17,19 @@ type: keyword description: > The monitors configured name + multi_fields: + - name: text + type: text + analyzer: simple - name: id type: keyword description: > The monitors full job ID as used by heartbeat. + multi_fields: + - name: text + type: text + analyzer: simple - name: duration type: group diff --git a/heartbeat/autodiscover/builder/hints/monitors.go b/heartbeat/autodiscover/builder/hints/monitors.go index ba89d81b456..836b5a9326c 100644 --- a/heartbeat/autodiscover/builder/hints/monitors.go +++ b/heartbeat/autodiscover/builder/hints/monitors.go @@ -91,7 +91,7 @@ func (hb *heartbeatHints) CreateConfig(event bus.Event) []*common.Config { } hb.logger.Debugf("generated config %+v", configs) // Apply information in event to the template to generate the final config - return template.ApplyConfigTemplate(event, configs) + return template.ApplyConfigTemplate(event, configs, false) } tempCfg := common.MapStr{} @@ -121,7 +121,7 @@ func (hb *heartbeatHints) CreateConfig(event bus.Event) []*common.Config { } // Apply information in event to the template to generate the final config - return template.ApplyConfigTemplate(event, configs) + return template.ApplyConfigTemplate(event, configs, false) } func (hb *heartbeatHints) getType(hints common.MapStr) common.MapStr { diff --git a/heartbeat/beater/heartbeat.go b/heartbeat/beater/heartbeat.go index dbdfe1dd740..1ad682fc496 100644 --- a/heartbeat/beater/heartbeat.go +++ b/heartbeat/beater/heartbeat.go @@ -156,7 +156,18 @@ func (bt *Heartbeat) RunReloadableMonitors(b *beat.Beat) (err error) { // makeAutodiscover creates an autodiscover object ready to be started. func (bt *Heartbeat) makeAutodiscover(b *beat.Beat) (*autodiscover.Autodiscover, error) { - return autodiscover.NewAutodiscover("heartbeat", b.Publisher, bt.dynamicFactory, autodiscover.QueryConfig(), bt.config.Autodiscover) + autodiscover, err := autodiscover.NewAutodiscover( + "heartbeat", + b.Publisher, + bt.dynamicFactory, + autodiscover.QueryConfig(), + bt.config.Autodiscover, + b.Keystore, + ) + if err != nil { + return nil, err + } + return autodiscover, nil } // Stop stops the beat. diff --git a/heartbeat/docker-compose.yml b/heartbeat/docker-compose.yml index 3f905081441..c7da39a8798 100644 --- a/heartbeat/docker-compose.yml +++ b/heartbeat/docker-compose.yml @@ -4,11 +4,11 @@ services: build: ${PWD}/. depends_on: - proxy_dep - env_file: - - ${PWD}/build/test.env environment: - REDIS_HOST=redis - REDIS_PORT=6379 + - ES_HOST=elasticsearch + - ES_PORT=9200 working_dir: /go/src/github.com/elastic/beats/heartbeat volumes: - ${PWD}/..:/go/src/github.com/elastic/beats/ diff --git a/heartbeat/docs/configuring-howto.asciidoc b/heartbeat/docs/configuring-howto.asciidoc index 7f2b6547d7e..525fc15baf3 100644 --- a/heartbeat/docs/configuring-howto.asciidoc +++ b/heartbeat/docs/configuring-howto.asciidoc @@ -26,6 +26,7 @@ _Beats Platform Reference_ for more about the structure of the config file. The following topics describe how to configure Heartbeat: * <> +* <> * <> * <> * <> @@ -44,6 +45,8 @@ The following topics describe how to configure Heartbeat: include::./heartbeat-options.asciidoc[] +include::./heartbeat-scheduler.asciidoc[] + include::./heartbeat-general-options.asciidoc[] include::{libbeat-dir}/shared-path-config.asciidoc[] diff --git a/heartbeat/docs/fields.asciidoc b/heartbeat/docs/fields.asciidoc index b288eec1788..05507f95dc5 100644 --- a/heartbeat/docs/fields.asciidoc +++ b/heartbeat/docs/fields.asciidoc @@ -215,6 +215,13 @@ type: keyword -- +*`monitor.name.text`*:: ++ +-- +type: text + +-- + *`monitor.id`*:: + -- @@ -225,6 +232,13 @@ type: keyword -- +*`monitor.id.text`*:: ++ +-- +type: text + +-- + [float] === duration @@ -7824,7 +7838,10 @@ TLS layer related fields. *`tls.certificate_not_valid_before`*:: + -- -Earliest time at which the connection's certificates are valid. + +deprecated:[7.8.0] + +Deprecated in favor of `tls.server.x509.not_before`. Earliest time at which the connection's certificates are valid. type: date @@ -7833,7 +7850,10 @@ type: date *`tls.certificate_not_valid_after`*:: + -- -Latest time at which the connection's certificates are valid. + +deprecated:[7.8.0] + +Deprecated in favor of `tls.server.x509.not_after`. Latest time at which the connection's certificates are valid. type: date @@ -7862,3 +7882,180 @@ type: long -- +[float] +=== server + +Detailed x509 certificate metadata + + + +*`tls.server.x509.alternative_names`*:: ++ +-- +List of subject alternative names (SAN). Name types vary by certificate authority and certificate type but commonly contain IP addresses, DNS names (and wildcards), and email addresses. + +type: keyword + +example: *.elastic.co + +-- + + +*`tls.server.x509.issuer.common_name`*:: ++ +-- +List of common name (CN) of issuing certificate authority. + +type: keyword + +example: DigiCert SHA2 High Assurance Server CA + +-- + +*`tls.server.x509.issuer.common_name.text`*:: ++ +-- +type: text + +-- + +*`tls.server.x509.issuer.distinguished_name`*:: ++ +-- +Distinguished name (DN) of issuing certificate authority. + +type: keyword + +example: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert SHA2 High Assurance Server CA + +-- + +*`tls.server.x509.not_after`*:: ++ +-- +Time at which the certificate is no longer considered valid. + +type: date + +example: 2020-07-16 03:15:39 + +-- + +*`tls.server.x509.not_before`*:: ++ +-- +Time at which the certificate is first considered valid. + +type: date + +example: 2019-08-16 01:40:25 + +-- + +*`tls.server.x509.public_key_algorithm`*:: ++ +-- +Algorithm used to generate the public key. + +type: keyword + +example: RSA + +-- + +*`tls.server.x509.public_key_curve`*:: ++ +-- +The curve used by the elliptic curve public key algorithm. This is algorithm specific. + +type: keyword + +example: nistp521 + +-- + +*`tls.server.x509.public_key_exponent`*:: ++ +-- +Exponent used to derive the public key. This is algorithm specific. + +type: long + +example: 65537 + +-- + +*`tls.server.x509.public_key_size`*:: ++ +-- +The size of the public key space in bits. + +type: long + +example: 2048 + +-- + +*`tls.server.x509.serial_number`*:: ++ +-- +Unique serial number issued by the certificate authority. For consistency, if this value is alphanumeric, it should be formatted without colons and uppercase characters. + +type: keyword + +example: 55FBB9C7DEBF09809D12CCAA + +-- + +*`tls.server.x509.signature_algorithm`*:: ++ +-- +Identifier for certificate signature algorithm. Recommend using names found in Go Lang Crypto library (See https://github.com/golang/go/blob/go1.14/src/crypto/x509/x509.go#L337-L353). + +type: keyword + +example: SHA256-RSA + +-- + + +*`tls.server.x509.subject.subject.common_name`*:: ++ +-- +List of common names (CN) of subject. + +type: keyword + +example: r2.shared.global.fastly.net + +-- + +*`tls.server.x509.subject.subject.common_name.text`*:: ++ +-- +type: text + +-- + +*`tls.server.x509.subject.subject.distinguished_name`*:: ++ +-- +Distinguished name (DN) of the certificate subject entity. + +type: keyword + +example: C=US, ST=California, L=San Francisco, O=Fastly, Inc., CN=r2.shared.global.fastly.net + +-- + +*`tls.server.x509.version_number`*:: ++ +-- +Version of x509 format. + +type: keyword + +example: 3 + +-- + diff --git a/heartbeat/docs/heartbeat-options.asciidoc b/heartbeat/docs/heartbeat-options.asciidoc index 4e166610a53..23f33c26a3d 100644 --- a/heartbeat/docs/heartbeat-options.asciidoc +++ b/heartbeat/docs/heartbeat-options.asciidoc @@ -81,661 +81,26 @@ monitor definitions only, e.g. what is normally under the `heartbeat.monitors` s ---------------------------------------------------------------------- [float] -[[monitor-options]] -=== Monitor options +[[monitor-types]] +=== Monitor types -You can specify the following options when defining a {beatname_uc} monitor in any location. -These options are the same for all monitors. Each monitor type has additional configuration -options that are specific to that monitor type. +You can configure {beatname_uc} to use the following monitor types: -[float] -[[monitor-type]] -==== `type` - -The type of monitor to run. One of: - -* `icmp`: Uses an ICMP (v4 and v6) Echo Request to ping the configured hosts. -Requires special permissions or root access. See <>. -* `tcp`: Connects via TCP and optionally verifies the endpoint by sending and/or -receiving a custom payload. See <>. -* `http`: Connects via HTTP and optionally verifies that the host returns the -expected response. See <>. Will use `Elastic-Heartbeat` as the user agent product. +*<>*:: Uses an ICMP (v4 and v6) Echo Request to ping the configured hosts. +Requires special permissions or root access. +*<>*:: Connects via TCP and optionally verifies the endpoint by sending and/or +receiving a custom payload. +*<>*:: Connects via HTTP and optionally verifies that the host returns the +expected response. Will use `Elastic-Heartbeat` as +the user agent product. The `tcp` and `http` monitor types both support SSL/TLS and some proxy settings. -[float] -[[monitor-id]] -==== `id` - -A unique identifier for this configuration. This should not change with edits to the monitor configuration -regardless of changes to any config fields. Examples: `uploader-service`, `http://example.net`, `us-west-loadbalancer`. Note that this uniqueness is only within a given beat instance. If you want to monitor the same endpoint from multiple locations it is recommended that those heartbeat instances use the same IDs so that their results can be correlated. You can use the `host.geo.name` property to disambiguate them. - -When querying against indexed monitor data this is the field you will be aggregating with. Appears in the -<> as `monitor.id`. - -If you do not set this explicitly the monitor's config will be hashed and a generated value used. This value will -change with any options change to this monitor making aggregations over time between changes impossible. For this reason -it is recommended that you set this manually. - -[float] -[[monitor-name]] -==== `name` - -Optional human readable name for this monitor. This value appears in the <> -as `monitor.name`. - -[float] -[[monitor-enabled]] -==== `enabled` - -A Boolean value that specifies whether the module is enabled. If the `enabled` -option is missing from the configuration block, the module is enabled by -default. - -[float] -[[monitor-schedule]] -==== `schedule` - -A cron-like expression that specifies the task schedule. For example: - -* `*/5 * * * * * *` runs the task every 5 seconds (for example, at 10:00:00, -10:00:05, and so on). -* `@every 5s` runs the task every 5 seconds from the time when {beatname_uc} was -started. - -The `schedule` option uses a cron-like syntax based on https://github.com/gorhill/cronexpr#implementation[this `cronexpr` implementation], -but adds the `@every` keyword. - -For stats on the execution of scheduled tasks you can enable the HTTP stats server with `http.enabled: true` in heartbeat.yml, then run `curl http://localhost:5066/stats | jq .heartbeat.scheduler` to view the scheduler's stats. Stats are provided for both jobs and tasks. Each time a monitor is scheduled is considered to be a single job, while portions of the work a job does, like DNS lookups and executing network requests are defined as tasks. The stats provided are: - -* **jobs.active:** The number of actively running jobs/monitors. -* **jobs.missed_deadline:** The number of jobs that executed after their scheduled time. This can be caused either by overlong long timeouts from the previous job or high load preventing heartbeat from keeping up with work. -* **tasks.active:** The number of tasks currently running. -* **tasks.waiting:** If the global `schedule.limit` option is set, this number will reflect the number of tasks that are ready to execute, but have not been started in order to prevent exceeding `schedule.limit`. - -[float] -[[monitor-ipv4]] -==== `ipv4` - -A Boolean value that specifies whether to ping using the ipv4 protocol if -hostnames are configured. The default is `true`. - -[float] -[[monitor-ipv6]] -==== `ipv6` - -A Boolean value that specifies whether to ping using the ipv6 protocol -if hostnames are configured. The default is `true`. - -[float] -[[monitor-mode]] -==== `mode` - -If `mode` is `any`, the monitor pings only one IP address for a hostname. If -`mode` is `all`, the monitor pings all resolvable IPs for a hostname. The -`mode: all` setting is useful if you are using a DNS-load balancer and want to -ping every IP address for the specified hostname. The default is `any`. - -[float] -[[monitor-timeout]] -==== `timeout` - -The total running time for each ping test. This is the total time allowed for -testing the connection and exchanging data. The default is 16 seconds (16s). - -If the timeout is exceeded, {beatname_uc} publishes a `service-down` event. If the -value specified for `timeout` is greater than `schedule`, intermediate checks -will not be executed by the scheduler. - -[float] -[[monitor-fields]] -==== `fields` - -Optional fields that you can specify to add additional information to the -output. For example, you might add fields that you can use for filtering log -data. Fields can be scalar values, arrays, dictionaries, or any nested -combination of these. By default, the fields that you specify here will be -grouped under a `fields` sub-dictionary in the output document. To store the -custom fields as top-level fields, set the `fields_under_root` option to true. -If a duplicate field is declared in the general configuration, then its value -will be overwritten by the value declared here. - -[float] -[[monitor-fields-under-root]] -==== `fields_under_root` - -If this option is set to true, the custom <> -are stored as top-level fields in the output document instead of being grouped -under a `fields` sub-dictionary. If the custom field names conflict with other -field names added by {beatname_uc}, then the custom fields overwrite the other -fields. - -[float] -[[monitor-tags]] -==== `tags` - -A list of tags that will be sent with the monitor event. This setting is optional. - -[float] -[[monitor-processors]] -==== `processors` - -A list of processors to apply to the data generated by the monitor. - -See <> for information about specifying -processors in your config. - -[float] -[[monitor-keep-null]] -==== `keep_null` - -If this option is set to true, fields with `null` values will be published in -the output document. By default, `keep_null` is set to `false`. - -[float] -[[monitor-icmp-options]] -=== ICMP options - -These options configure {beatname_uc} to use ICMP (v4 and v6) Echo Requests to check -the configured hosts. These options are valid when the <> is -`icmp`. Please note that on most platforms you must execute Heartbeat with elevated permissions -to perform ICMP pings. - -On Linux, regular users may perform pings if the right file capabilities are set. Run -`sudo setcap cap_net_raw+eip /path/to/heartbeat` to grant {beatname_uc} ping capabilities on Linux. -Alternatively, one may grant ping permissions to the user {beatname_uc} runs as. To grant ping permissions -in this way, run `sudo sysctl -w net.ipv4.ping_group_range='myuserid myuserid'`. - -Other platforms may require {beatname_uc} to run as root or administrator to execute pings. - -[float] -[[monitor-icmp-hosts]] -==== `hosts` - -A list of hosts to ping. - -[float] -[[monitor-icmp-wait]] -==== `wait` - -The duration to wait before emitting another ICMP Echo Request. The default is 1 -second (1s). - -[float] -[[monitor-tcp-options]] -=== TCP options - -These options configure {beatname_uc} to connect via TCP and optionally verify the -endpoint by sending and/or receiving a custom payload. These options are valid when -the <> is `tcp`. - -[float] -[[monitor-tcp-hosts]] -==== `hosts` - -A list of hosts to ping. The entries in the list can be: - -* A plain host name, such as `localhost`, or an IP address. If you specify this -option, you must also specify a value for <>. If the -monitor is <>, {beatname_uc} establishes an -SSL/TLS-based connection. Otherwise, it establishes a plain TCP connection. -* A hostname and port, such as `localhost:12345`. {beatname_uc} connects -to the port on the specified host. If the monitor is -<>, {beatname_uc} establishes an -SSL/TLS-based connection. Otherwise, it establishes a TCP connection. -* A full URL using the syntax `scheme://:[port]`, where: -** `scheme` is one of `tcp`, `plain`, `ssl` or `tls`. If `tcp` or `plain` is -specified, {beatname_uc} establishes a TCP connection even if the monitor is -configured to use SSL. If `tls` or `ssl` is specified, {beatname_uc} establishes -an SSL connection. However, if the monitor is not configured to use SSL, the -system defaults are used (currently not supported on Windows). -** `host` is the hostname. -** `port` is the port number. If `port` is missing in the URL, the -<> setting is required. - -[float] -[[monitor-tcp-ports]] -==== `ports` - -A list of ports to ping if the host specified in <> -does not contain a port number. It is generally preferable to use a single value here, -since each port will be monitored using a separate `id`, with the given `id` value, -used as a prefix in the Heartbeat data, and the configured `name` shared across events -sent via this check. - -Example configuration: - -[source,yaml] -------------------------------------------------------------------------------- -- type: tcp - id: my-host-services - name: My Host Services - hosts: ["myhost"] - ports: [80, 9200, 5044] - schedule: '@every 5s' -------------------------------------------------------------------------------- - -[float] -[[monitor-tcp-check]] -==== `check` - -An optional payload string to send to the remote host and the expected answer. -If no payload is specified, the endpoint is assumed to be available if the -connection attempt was successful. If `send` is specified without `receive`, -any response is accepted as OK. If `receive` is specified without `send`, no -payload is sent, but the client expects to receive a payload in the form of a -"hello message" or "banner" on connect. - -Example configuration: - -[source,yaml] -------------------------------------------------------------------------------- -- type: tcp - id: echo-service - name: Echo Service - hosts: ["myhost"] - ports: [7] - check.send: 'Hello World' - check.receive: 'Hello World' - schedule: '@every 5s' -------------------------------------------------------------------------------- - - -[float] -[[monitor-tcp-proxy-url]] -==== `proxy_url` - -The URL of the SOCKS5 proxy to use when connecting to the server. The value -must be a URL with a scheme of socks5://. - -If the SOCKS5 proxy server requires client authentication, then a username and -password can be embedded in the URL as shown in the example. - -[source,yaml] -------------------------------------------------------------------------------- - proxy_url: socks5://user:password@socks5-proxy:2233 -------------------------------------------------------------------------------- - -When using a proxy, hostnames are resolved on the proxy server instead of on -the client. You can change this behavior by setting the -`proxy_use_local_resolver` option. - -[float] -[[monitor-tcp-proxy-use-local-resolver]] -==== `proxy_use_local_resolver` - -A Boolean value that determines whether hostnames are resolved locally instead -of being resolved on the proxy server. The default value is false, which means -that name resolution occurs on the proxy server. - -[float] -[[monitor-tcp-tls-ssl]] -==== `ssl` - -The TLS/SSL connection settings. If the monitor is -<>, it will attempt an SSL -handshake. If `check` is not configured, the monitor will only check to see if -it can establish an SSL/TLS connection. This check can fail either at TCP level -or during certificate validation. - -Example configuration: - -[source,yaml] -------------------------------------------------------------------------------- -- type: tcp - id: tls-mail - name: TLS Mail - hosts: ["mail.example.net"] - ports: [465] - schedule: '@every 5s' - ssl: - certificate_authorities: ['/etc/ca.crt'] - supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] -------------------------------------------------------------------------------- - - -Also see <> for a full description of the `ssl` options. - -[float] -[[monitor-http-options]] -=== HTTP options - -These options configure {beatname_uc} to connect via HTTP and optionally verify that -the host returns the expected response. These options are valid when the -<> is `http`. - -[float] -[[monitor-http-urls]] -==== `hosts` - -A list of URLs to ping. - -Example configuration: - -[source,yaml] -------------------------------------------------------------------------------- -- type: http - id: myhost - name: My HTTP Host - schedule: '@every 5s' - hosts: ["http://myhost:80"] -------------------------------------------------------------------------------- - -[float] -[[monitor-http-max-redirects]] -==== `max_redirects` - -The total number of redirections Heartbeat will follow. Defaults to 0, meaning heartbeat will not follow redirects, -but will report the status of the redirect. If set to a number greater than 0 heartbeat will follow that number of redirects. - -When this option is set to a value greater than zero the `monitor.ip` field will no longer be reported, as multiple -DNS requests across multiple IPs may return multiple IPs. Fine grained network timing data will also not be recorded, as with redirects -that data will span multiple requests. Specifically the fields `http.rtt.content.us`, `http.rtt.response_header.us`, -`http.rtt.total.us`, `http.rtt.validate.us`, `http.rtt.write_request.us` and `dns.rtt.us` will be omitted. - -[float] -[[monitor-http-proxy-url]] -==== `proxy_url` - -The HTTP proxy URL. This setting is optional. Example `http://proxy.mydomain.com:3128` - -[float] -[[monitor-http-username]] -==== `username` - -The username for authenticating with the server. The credentials are passed -with the request. This setting is optional. - -You need to specify credentials when your `check.response` settings require it. -For example, you can check for a 403 response (`check.response.status: [403]`) -without setting credentials. - -[float] -[[monitor-http-password]] -==== `password` - -The password for authenticating with the server. This setting is optional. - -[float] -[[monitor-http-tls-ssl]] -==== `ssl` - -The TLS/SSL connection settings for use with the HTTPS endpoint. If you don't -specify settings, the system defaults are used. - - -Example configuration: - -[source,yaml] -------------------------------------------------------------------------------- -- type: http - id: my-http-service - name: My HTTP Service - hosts: ["https://myhost:443"] - schedule: '@every 5s' - ssl: - certificate_authorities: ['/etc/ca.crt'] - supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] -------------------------------------------------------------------------------- - -Also see <> for a full description of the `ssl` options. - -[float] -[[monitor-http-response]] -=== `response` - -Controls the indexing of the HTTP response body contents to the `http.response.body.contents` field. - -Set `response.include_body` to one of the options listed below. - -*`on_error`*:: Include the body if an error is encountered during the check. This is the default. -*`never`*:: Never include the body. -*`always`*:: Always include the body with checks. - -Set `response.include_body_max_bytes` to control the maximum size of the stored body contents. Defaults to 1024 bytes. - -[float] -[[monitor-http-check]] -==== `check` - -An optional `request` to send to the remote host and the expected `response`. - -Example configuration: - -[source,yaml] -------------------------------------------------------------------------------- -- type: http - id: my-http-host - name: My HTTP Service - hosts: ["http://myhost:80"] - check.request.method: HEAD - check.response.status: [200] - schedule: '@every 5s' -------------------------------------------------------------------------------- - - -Under `check.request`, specify these options: - -*`method`*:: The HTTP method to use. Valid values are `"HEAD"`, `"GET"` and -`"POST"`. -*`headers`*:: A dictionary of additional HTTP headers to send. By default heartbeat -will set the 'User-Agent' header to identify itself. -*`body`*:: Optional request body content. - -Example configuration: -This monitor POSTs an `x-www-form-urlencoded` string -to the endpoint `/demo/add` - -[source,yaml] -------------------------------------------------------------------------------- -- type: http - id: demo-service - name: Demo Service - schedule: '@every 5s' - urls: ["http://localhost:8080/demo/add"] - check.request: - method: POST - headers: - 'Content-Type': 'application/x-www-form-urlencoded' - # urlencode the body: - body: "name=first&email=someemail%40someemailprovider.com" - check.response: - status: [200] - body: - - Saved - - saved -------------------------------------------------------------------------------- - -Under `check.response`, specify these options: - -*`status`*:: A list of expected status codes. 4xx and 5xx codes are considered `down` by default. Other codes are considered `up`. -*`headers`*:: The required response headers. -*`body`*:: A list of regular expressions to match the the body output. Only a single expression needs to match. HTTP response -bodies of up to 100MiB are supported. - -Example configuration: -This monitor examines the -response body for the strings `saved` or `Saved` and expects 200 or 201 status codes - -[source,yaml] -------------------------------------------------------------------------------- -- type: http - id: demo-service - name: Demo Service - schedule: '@every 5s' - urls: ["http://localhost:8080/demo/add"] - check.request: - method: POST - headers: - 'Content-Type': 'application/x-www-form-urlencoded' - # urlencode the body: - body: "name=first&email=someemail%40someemailprovider.com" - check.response: - status: [200, 201] - body: - - Saved - - saved -------------------------------------------------------------------------------- - -*`json`*:: A list of <> expressions executed against the body when parsed as JSON. Body sizes -must be less than or equal to 100 MiB. - -The following configuration shows how to check the response when the body -contains JSON: - -[source,yaml] -------------------------------------------------------------------------------- -- type: http - id: demo-service - name: Demo Service - schedule: '@every 5s' - hosts: ["https://myhost:80"] - check.request: - method: GET - headers: - 'X-API-Key': '12345-mykey-67890' - check.response: - status: [200] - json: - - description: check status - condition: - equals: - status: ok -------------------------------------------------------------------------------- - -The following configuration shows how to check the response for multiple regex -patterns: - -[source,yaml] -------------------------------------------------------------------------------- -- type: http - id: demo-service - name: Demo Service - schedule: '@every 5s' - hosts: ["https://myhost:80"] - check.request: - method: GET - headers: - 'X-API-Key': '12345-mykey-67890' - check.response: - status: [200] - body: - - hello - - world -------------------------------------------------------------------------------- - -The following configuration shows how to check the response with a multiline -regex: - -[source,yaml] -------------------------------------------------------------------------------- -- type: http - id: demo-service - name: Demo Service - schedule: '@every 5s' - hosts: ["https://myhost:80"] - check.request: - method: GET - headers: - 'X-API-Key': '12345-mykey-67890' - check.response: - status: [200] - body: '(?s)first.*second.*third' -------------------------------------------------------------------------------- - - -[float] -[[monitors-scheduler]] -=== Scheduler options - -You specify options under `heartbeat.scheduler` to control the behavior of the task -scheduler. - -Example configuration: - -[source,yaml] -------------------------------------------------------------------------------- -heartbeat.scheduler: - limit: 10 - location: 'UTC-08:00' -------------------------------------------------------------------------------- - -In the example, setting `limit` to 10 guarantees that only 10 concurrent -I/O tasks will be active. An I/O task can be the actual check or resolving an -address via DNS. - -[float] -[[heartbeat-scheduler-limit]] -==== `limit` - -The number of concurrent I/O tasks that {beatname_uc} is allowed to execute. If set -to 0, there is no limit. The default is 0. - -Most operating systems set a file descriptor limit of 1024. For {beatname_uc} to -operate correctly and not accidentally block libbeat output, the value that you -specify for `limit` should be below the configured ulimit. - - -[float] -[[heartbeat-scheduler-location]] -==== `location` - -The timezone for the scheduler. By default the scheduler uses localtime. - -[float] -[[monitor-watch-poll-file]] -==== `watch.poll_file` - -deprecated:[6.5.0,Replaced by using dynamic reloading via the `heartbeat.config.monitors` option.] - -The JSON file to watch for additional monitor configurations. The JSON file can -contain multiple objects, each of which specifies a different monitor config. -{beatname_uc} checks this file periodically and starts a new monitor instance for -each new JSON object added to the file. For example, imagine that you add -10 new entries to the JSON file, each for a different hostname. When {beatname_uc} -picks up the changes in the file, it merges the original config -(`heartbeat.yml`) plus the JSON objects, and starts a monitor for each new host -that you've configured. If you delete an object from the JSON file and it -doesn't exist in the main config, {beatname_uc} stops the monitor instance running -for that object. - -Each monitor has a unique ID that's based on parameters like protocol, host, -and port. If two monitors have the same ID, {beatname_uc} uses the settings that -are defined in the last JSON object of the merged config. This means that -you can specify settings in the JSON file that overwrite the settings in -the main config. In this way, the configuration that you specify for the -monitor in the main {beatname_uc} config file acts like a default config that you -can live-reconfigure by specifying additional configurations in the external -JSON file. - -Example configuration: - -[source, yaml] -------------------------------------------------------------------------------- -heartbeat.monitors: -- type: tcp - id: demo-service - name: Demo Service - schedule: '*/5 * * * * * *' - hosts: ["myhost"] - watch.poll_file: - path: {path.config}/monitors/dynamic.json - interval: 5s -------------------------------------------------------------------------------- +include::monitors/monitor-common-options.asciidoc[] -*`path`*:: Specifies the path to the JSON file to check for updates. -*`interval`*:: Specifies how often {beatname_uc} checks the file for changes. +include::monitors/monitor-icmp.asciidoc[] -To reconfigure the settings specified in the example config, you could define -the following JSON objects in `dynamic.json`: +include::monitors/monitor-tcp.asciidoc[] -[source, json] -------------------------------------------------------------------------------- -{"hosts": ["myhost:1234"], "schedule": "*/15 * * * * * *"} <1> -{"hosts": ["tls://otherhost:479"], "ssl.certificate_authorities": ["path/to/ca/file.pem"]} <2> -------------------------------------------------------------------------------- -<1> Upon detecting the changes, {beatname_uc} stops the old monitor and then -restarts it with a schedule of 15 seconds between checks. -<2> {beatname_uc} starts a new monitor that uses a TLS-based connection with a -custom CA certificate. +include::monitors/monitor-http.asciidoc[] diff --git a/heartbeat/docs/heartbeat-scheduler.asciidoc b/heartbeat/docs/heartbeat-scheduler.asciidoc new file mode 100644 index 00000000000..ac2f647f6ee --- /dev/null +++ b/heartbeat/docs/heartbeat-scheduler.asciidoc @@ -0,0 +1,41 @@ +[[monitors-scheduler]] +== Configure the task scheduler + +++++ +Task scheduler +++++ + +You specify options under `heartbeat.scheduler` to control the behavior of the task +scheduler. + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +heartbeat.scheduler: + limit: 10 + location: 'UTC-08:00' +------------------------------------------------------------------------------- + +In the example, setting `limit` to 10 guarantees that only 10 concurrent +I/O tasks will be active. An I/O task can be the actual check or resolving an +address via DNS. + +[float] +[[heartbeat-scheduler-limit]] +==== `limit` + +The number of concurrent I/O tasks that {beatname_uc} is allowed to execute. If set +to 0, there is no limit. The default is 0. + +Most operating systems set a file descriptor limit of 1024. For {beatname_uc} to +operate correctly and not accidentally block libbeat output, the value that you +specify for `limit` should be below the configured ulimit. + + +[float] +[[heartbeat-scheduler-location]] +==== `location` + +The timezone for the scheduler. By default the scheduler uses localtime. + diff --git a/heartbeat/docs/monitors/monitor-common-options.asciidoc b/heartbeat/docs/monitors/monitor-common-options.asciidoc new file mode 100644 index 00000000000..68194b28119 --- /dev/null +++ b/heartbeat/docs/monitors/monitor-common-options.asciidoc @@ -0,0 +1,143 @@ +[[monitor-options]] +=== Common monitor options + +You can specify the following options when defining a {beatname_uc} monitor in any location. +These options are the same for all monitors. Each monitor type has additional configuration +options that are specific to that monitor type. + +[float] +[[monitor-type]] +==== `type` + +The type of monitor to run. See <>. + +[float] +[[monitor-id]] +==== `id` + +A unique identifier for this configuration. This should not change with edits to the monitor configuration +regardless of changes to any config fields. Examples: `uploader-service`, `http://example.net`, `us-west-loadbalancer`. Note that this uniqueness is only within a given beat instance. If you want to monitor the same endpoint from multiple locations it is recommended that those heartbeat instances use the same IDs so that their results can be correlated. You can use the `host.geo.name` property to disambiguate them. + +When querying against indexed monitor data this is the field you will be aggregating with. Appears in the +<> as `monitor.id`. + +If you do not set this explicitly the monitor's config will be hashed and a generated value used. This value will +change with any options change to this monitor making aggregations over time between changes impossible. For this reason +it is recommended that you set this manually. + +[float] +[[monitor-name]] +==== `name` + +Optional human readable name for this monitor. This value appears in the <> +as `monitor.name`. + +[float] +[[monitor-enabled]] +==== `enabled` + +A Boolean value that specifies whether the module is enabled. If the `enabled` +option is missing from the configuration block, the module is enabled by +default. + +[float] +[[monitor-schedule]] +==== `schedule` + +A cron-like expression that specifies the task schedule. For example: + +* `*/5 * * * * * *` runs the task every 5 seconds (for example, at 10:00:00, +10:00:05, and so on). +* `@every 5s` runs the task every 5 seconds from the time when {beatname_uc} was +started. + +The `schedule` option uses a cron-like syntax based on https://github.com/gorhill/cronexpr#implementation[this `cronexpr` implementation], +but adds the `@every` keyword. + +For stats on the execution of scheduled tasks you can enable the HTTP stats server with `http.enabled: true` in heartbeat.yml, then run `curl http://localhost:5066/stats | jq .heartbeat.scheduler` to view the scheduler's stats. Stats are provided for both jobs and tasks. Each time a monitor is scheduled is considered to be a single job, while portions of the work a job does, like DNS lookups and executing network requests are defined as tasks. The stats provided are: + +* **jobs.active:** The number of actively running jobs/monitors. +* **jobs.missed_deadline:** The number of jobs that executed after their scheduled time. This can be caused either by overlong long timeouts from the previous job or high load preventing heartbeat from keeping up with work. +* **tasks.active:** The number of tasks currently running. +* **tasks.waiting:** If the global `schedule.limit` option is set, this number will reflect the number of tasks that are ready to execute, but have not been started in order to prevent exceeding `schedule.limit`. + +Also see the <> settings. + +[float] +[[monitor-ipv4]] +==== `ipv4` + +A Boolean value that specifies whether to ping using the ipv4 protocol if +hostnames are configured. The default is `true`. + +[float] +[[monitor-ipv6]] +==== `ipv6` + +A Boolean value that specifies whether to ping using the ipv6 protocol +if hostnames are configured. The default is `true`. + +[float] +[[monitor-mode]] +==== `mode` + +If `mode` is `any`, the monitor pings only one IP address for a hostname. If +`mode` is `all`, the monitor pings all resolvable IPs for a hostname. The +`mode: all` setting is useful if you are using a DNS-load balancer and want to +ping every IP address for the specified hostname. The default is `any`. + +[float] +[[monitor-timeout]] +==== `timeout` + +The total running time for each ping test. This is the total time allowed for +testing the connection and exchanging data. The default is 16 seconds (16s). + +If the timeout is exceeded, {beatname_uc} publishes a `service-down` event. If the +value specified for `timeout` is greater than `schedule`, intermediate checks +will not be executed by the scheduler. + +[float] +[[monitor-fields]] +==== `fields` + +Optional fields that you can specify to add additional information to the +output. For example, you might add fields that you can use for filtering log +data. Fields can be scalar values, arrays, dictionaries, or any nested +combination of these. By default, the fields that you specify here will be +grouped under a `fields` sub-dictionary in the output document. To store the +custom fields as top-level fields, set the `fields_under_root` option to true. +If a duplicate field is declared in the general configuration, then its value +will be overwritten by the value declared here. + +[float] +[[monitor-fields-under-root]] +==== `fields_under_root` + +If this option is set to true, the custom <> +are stored as top-level fields in the output document instead of being grouped +under a `fields` sub-dictionary. If the custom field names conflict with other +field names added by {beatname_uc}, then the custom fields overwrite the other +fields. + +[float] +[[monitor-tags]] +==== `tags` + +A list of tags that will be sent with the monitor event. This setting is optional. + +[float] +[[monitor-processors]] +==== `processors` + +A list of processors to apply to the data generated by the monitor. + +See <> for information about specifying +processors in your config. + +[float] +[[monitor-keep-null]] +==== `keep_null` + +If this option is set to true, fields with `null` values will be published in +the output document. By default, `keep_null` is set to `false`. diff --git a/heartbeat/docs/monitors/monitor-http.asciidoc b/heartbeat/docs/monitors/monitor-http.asciidoc new file mode 100644 index 00000000000..1cea32f662f --- /dev/null +++ b/heartbeat/docs/monitors/monitor-http.asciidoc @@ -0,0 +1,246 @@ +[[monitor-http-options]] +=== HTTP options + +Also see <>. + +The options described here configure {beatname_uc} to connect via HTTP and +optionally verify that the host returns the expected response. + +Example configuration: + +[source,yaml] +---- +- type: http + id: myhost + name: My HTTP Host + schedule: '@every 5s' + hosts: ["http://myhost:80"] +---- + +[float] +[[monitor-http-urls]] +==== `hosts` + +A list of URLs to ping. + +[float] +[[monitor-http-max-redirects]] +==== `max_redirects` + +The total number of redirections Heartbeat will follow. Defaults to 0, meaning heartbeat will not follow redirects, +but will report the status of the redirect. If set to a number greater than 0 heartbeat will follow that number of redirects. + +When this option is set to a value greater than zero the `monitor.ip` field will no longer be reported, as multiple +DNS requests across multiple IPs may return multiple IPs. Fine grained network timing data will also not be recorded, as with redirects +that data will span multiple requests. Specifically the fields `http.rtt.content.us`, `http.rtt.response_header.us`, +`http.rtt.total.us`, `http.rtt.validate.us`, `http.rtt.write_request.us` and `dns.rtt.us` will be omitted. + +[float] +[[monitor-http-proxy-url]] +==== `proxy_url` + +The HTTP proxy URL. This setting is optional. Example `http://proxy.mydomain.com:3128` + +[float] +[[monitor-http-username]] +==== `username` + +The username for authenticating with the server. The credentials are passed +with the request. This setting is optional. + +You need to specify credentials when your `check.response` settings require it. +For example, you can check for a 403 response (`check.response.status: [403]`) +without setting credentials. + +[float] +[[monitor-http-password]] +==== `password` + +The password for authenticating with the server. This setting is optional. + +[float] +[[monitor-http-tls-ssl]] +==== `ssl` + +The TLS/SSL connection settings for use with the HTTPS endpoint. If you don't +specify settings, the system defaults are used. + + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + id: my-http-service + name: My HTTP Service + hosts: ["https://myhost:443"] + schedule: '@every 5s' + ssl: + certificate_authorities: ['/etc/ca.crt'] + supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] +------------------------------------------------------------------------------- + +Also see <> for a full description of the `ssl` options. + +[float] +[[monitor-http-response]] +=== `response` + +Controls the indexing of the HTTP response body contents to the `http.response.body.contents` field. + +Set `response.include_body` to one of the options listed below. + +*`on_error`*:: Include the body if an error is encountered during the check. This is the default. +*`never`*:: Never include the body. +*`always`*:: Always include the body with checks. + +Set `response.include_body_max_bytes` to control the maximum size of the stored body contents. Defaults to 1024 bytes. + +[float] +[[monitor-http-check]] +==== `check` + +An optional `request` to send to the remote host and the expected `response`. + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + id: my-http-host + name: My HTTP Service + hosts: ["http://myhost:80"] + check.request.method: HEAD + check.response.status: [200] + schedule: '@every 5s' +------------------------------------------------------------------------------- + + +Under `check.request`, specify these options: + +*`method`*:: The HTTP method to use. Valid values are `"HEAD"`, `"GET"` and +`"POST"`. +*`headers`*:: A dictionary of additional HTTP headers to send. By default heartbeat +will set the 'User-Agent' header to identify itself. +*`body`*:: Optional request body content. + +Example configuration: +This monitor POSTs an `x-www-form-urlencoded` string +to the endpoint `/demo/add` + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + id: demo-service + name: Demo Service + schedule: '@every 5s' + urls: ["http://localhost:8080/demo/add"] + check.request: + method: POST + headers: + 'Content-Type': 'application/x-www-form-urlencoded' + # urlencode the body: + body: "name=first&email=someemail%40someemailprovider.com" + check.response: + status: [200] + body: + - Saved + - saved +------------------------------------------------------------------------------- + +Under `check.response`, specify these options: + +*`status`*:: A list of expected status codes. 4xx and 5xx codes are considered `down` by default. Other codes are considered `up`. +*`headers`*:: The required response headers. +*`body`*:: A list of regular expressions to match the the body output. Only a single expression needs to match. HTTP response +bodies of up to 100MiB are supported. + +Example configuration: +This monitor examines the +response body for the strings `saved` or `Saved` and expects 200 or 201 status codes + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + id: demo-service + name: Demo Service + schedule: '@every 5s' + urls: ["http://localhost:8080/demo/add"] + check.request: + method: POST + headers: + 'Content-Type': 'application/x-www-form-urlencoded' + # urlencode the body: + body: "name=first&email=someemail%40someemailprovider.com" + check.response: + status: [200, 201] + body: + - Saved + - saved +------------------------------------------------------------------------------- + +*`json`*:: A list of <> expressions executed against the body when parsed as JSON. Body sizes +must be less than or equal to 100 MiB. + +The following configuration shows how to check the response when the body +contains JSON: + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + id: demo-service + name: Demo Service + schedule: '@every 5s' + hosts: ["https://myhost:80"] + check.request: + method: GET + headers: + 'X-API-Key': '12345-mykey-67890' + check.response: + status: [200] + json: + - description: check status + condition: + equals: + status: ok +------------------------------------------------------------------------------- + +The following configuration shows how to check the response for multiple regex +patterns: + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + id: demo-service + name: Demo Service + schedule: '@every 5s' + hosts: ["https://myhost:80"] + check.request: + method: GET + headers: + 'X-API-Key': '12345-mykey-67890' + check.response: + status: [200] + body: + - hello + - world +------------------------------------------------------------------------------- + +The following configuration shows how to check the response with a multiline +regex: + +[source,yaml] +------------------------------------------------------------------------------- +- type: http + id: demo-service + name: Demo Service + schedule: '@every 5s' + hosts: ["https://myhost:80"] + check.request: + method: GET + headers: + 'X-API-Key': '12345-mykey-67890' + check.response: + status: [200] + body: '(?s)first.*second.*third' +------------------------------------------------------------------------------- diff --git a/heartbeat/docs/monitors/monitor-icmp.asciidoc b/heartbeat/docs/monitors/monitor-icmp.asciidoc new file mode 100644 index 00000000000..ccd0ba5f397 --- /dev/null +++ b/heartbeat/docs/monitors/monitor-icmp.asciidoc @@ -0,0 +1,39 @@ +[[monitor-icmp-options]] +=== ICMP options + +Also see <>. + +The options described here configure {beatname_uc} to use ICMP (v4 and v6) Echo +Requests to check the configured hosts. Please note that on most platforms you +must execute Heartbeat with elevated permissions to perform ICMP pings. + +On Linux, regular users may perform pings if the right file capabilities are set. Run +`sudo setcap cap_net_raw+eip /path/to/heartbeat` to grant {beatname_uc} ping capabilities on Linux. +Alternatively, one may grant ping permissions to the user {beatname_uc} runs as. To grant ping permissions +in this way, run `sudo sysctl -w net.ipv4.ping_group_range='myuserid myuserid'`. + +Other platforms may require {beatname_uc} to run as root or administrator to execute pings. + +Example configuration: + +[source,yaml] +---- +- type: icmp + id: ping-myhost + name: My Host Ping + hosts: ["myhost"] + schedule: '*/5 * * * * * *' +---- + +[float] +[[monitor-icmp-hosts]] +==== `hosts` + +A list of hosts to ping. + +[float] +[[monitor-icmp-wait]] +==== `wait` + +The duration to wait before emitting another ICMP Echo Request. The default is 1 +second (1s). diff --git a/heartbeat/docs/monitors/monitor-tcp.asciidoc b/heartbeat/docs/monitors/monitor-tcp.asciidoc new file mode 100644 index 00000000000..10d39b06302 --- /dev/null +++ b/heartbeat/docs/monitors/monitor-tcp.asciidoc @@ -0,0 +1,134 @@ +[[monitor-tcp-options]] +=== TCP options + +Also see <>. + +The options described here configure {beatname_uc} to connect via TCP and +optionally verify the endpoint by sending and/or receiving a custom payload. + +Example configuration: + +[source,yaml] +---- +- type: tcp + id: my-host-services + name: My Host Services + hosts: ["myhost"] + ports: [80, 9200, 5044] + schedule: '@every 5s' +---- + +[float] +[[monitor-tcp-hosts]] +==== `hosts` + +A list of hosts to ping. The entries in the list can be: + +* A plain host name, such as `localhost`, or an IP address. If you specify this +option, you must also specify a value for <>. If the +monitor is <>, {beatname_uc} establishes an +SSL/TLS-based connection. Otherwise, it establishes a plain TCP connection. +* A hostname and port, such as `localhost:12345`. {beatname_uc} connects +to the port on the specified host. If the monitor is +<>, {beatname_uc} establishes an +SSL/TLS-based connection. Otherwise, it establishes a TCP connection. +* A full URL using the syntax `scheme://:[port]`, where: +** `scheme` is one of `tcp`, `plain`, `ssl` or `tls`. If `tcp` or `plain` is +specified, {beatname_uc} establishes a TCP connection even if the monitor is +configured to use SSL. If `tls` or `ssl` is specified, {beatname_uc} establishes +an SSL connection. However, if the monitor is not configured to use SSL, the +system defaults are used (currently not supported on Windows). +** `host` is the hostname. +** `port` is the port number. If `port` is missing in the URL, the +<> setting is required. + +[float] +[[monitor-tcp-ports]] +==== `ports` + +A list of ports to ping if the host specified in <> +does not contain a port number. It is generally preferable to use a single value here, +since each port will be monitored using a separate `id`, with the given `id` value, +used as a prefix in the Heartbeat data, and the configured `name` shared across events +sent via this check. + +[float] +[[monitor-tcp-check]] +==== `check` + +An optional payload string to send to the remote host and the expected answer. +If no payload is specified, the endpoint is assumed to be available if the +connection attempt was successful. If `send` is specified without `receive`, +any response is accepted as OK. If `receive` is specified without `send`, no +payload is sent, but the client expects to receive a payload in the form of a +"hello message" or "banner" on connect. + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +- type: tcp + id: echo-service + name: Echo Service + hosts: ["myhost"] + ports: [7] + check.send: 'Hello World' + check.receive: 'Hello World' + schedule: '@every 5s' +------------------------------------------------------------------------------- + + +[float] +[[monitor-tcp-proxy-url]] +==== `proxy_url` + +The URL of the SOCKS5 proxy to use when connecting to the server. The value +must be a URL with a scheme of socks5://. + +If the SOCKS5 proxy server requires client authentication, then a username and +password can be embedded in the URL as shown in the example. + +[source,yaml] +------------------------------------------------------------------------------- + proxy_url: socks5://user:password@socks5-proxy:2233 +------------------------------------------------------------------------------- + +When using a proxy, hostnames are resolved on the proxy server instead of on +the client. You can change this behavior by setting the +`proxy_use_local_resolver` option. + +[float] +[[monitor-tcp-proxy-use-local-resolver]] +==== `proxy_use_local_resolver` + +A Boolean value that determines whether hostnames are resolved locally instead +of being resolved on the proxy server. The default value is false, which means +that name resolution occurs on the proxy server. + +[float] +[[monitor-tcp-tls-ssl]] +==== `ssl` + +The TLS/SSL connection settings. If the monitor is +<>, it will attempt an SSL +handshake. If `check` is not configured, the monitor will only check to see if +it can establish an SSL/TLS connection. This check can fail either at TCP level +or during certificate validation. + +Example configuration: + +[source,yaml] +------------------------------------------------------------------------------- +- type: tcp + id: tls-mail + name: TLS Mail + hosts: ["mail.example.net"] + ports: [465] + schedule: '@every 5s' + ssl: + certificate_authorities: ['/etc/ca.crt'] + supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] +------------------------------------------------------------------------------- + + +Also see <> for a full description of the `ssl` options. diff --git a/heartbeat/hbtest/hbtestutil.go b/heartbeat/hbtest/hbtestutil.go index 548bc42a0eb..246104638e9 100644 --- a/heartbeat/hbtest/hbtestutil.go +++ b/heartbeat/hbtest/hbtestutil.go @@ -18,10 +18,12 @@ package hbtest import ( + "crypto/tls" "crypto/x509" "fmt" "io" "io/ioutil" + "net" "net/http" "net/http/httptest" "net/url" @@ -29,6 +31,10 @@ import ( "strconv" "strings" "testing" + "time" + + "github.com/elastic/beats/v7/heartbeat/monitors/active/dialchain/tlsmeta" + "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/heartbeat/hbtestllext" @@ -107,13 +113,25 @@ func ServerPort(server *httptest.Server) (uint16, error) { // TLSChecks validates the given x509 cert at the given position. func TLSChecks(chainIndex, certIndex int, certificate *x509.Certificate) validator.Validator { - return lookslike.MustCompile(map[string]interface{}{ - "tls": map[string]interface{}{ - "rtt.handshake.us": isdef.IsDuration, - "certificate_not_valid_before": certificate.NotBefore, - "certificate_not_valid_after": certificate.NotAfter, - }, - }) + expected := common.MapStr{} + // This function is well tested independently, so we just test that things match up here. + tlsmeta.AddTLSMetadata(expected, tls.ConnectionState{ + Version: tls.VersionTLS13, + HandshakeComplete: true, + CipherSuite: tls.TLS_AES_128_GCM_SHA256, + ServerName: certificate.Subject.CommonName, + PeerCertificates: []*x509.Certificate{certificate}, + }, time.Duration(1)) + + expected.Put("tls.rtt.handshake.us", isdef.IsDuration) + + return lookslike.MustCompile(expected) +} + +func TLSCertChecks(certificate *x509.Certificate) validator.Validator { + expected := common.MapStr{} + tlsmeta.AddCertMetadata(expected, []*x509.Certificate{certificate}) + return lookslike.MustCompile(expected) } // BaseChecks creates a skima.Validator that represents the "monitor" field present @@ -196,6 +214,14 @@ func ErrorChecks(msgSubstr string, errType string) validator.Validator { }) } +func ExpiredCertChecks(cert *x509.Certificate) validator.Validator { + msg := x509.CertificateInvalidError{Cert: cert, Reason: x509.Expired}.Error() + return lookslike.Compose( + ErrorChecks(msg, "io"), + TLSCertChecks(cert), + ) +} + // RespondingTCPChecks creates a skima.Validator that represents the "tcp" field present // in all heartbeat events that use a Tcp connection as part of their DialChain func RespondingTCPChecks() validator.Validator { @@ -215,3 +241,23 @@ func CertToTempFile(t *testing.T, cert *x509.Certificate) *os.File { certFile.WriteString(x509util.CertToPEMString(cert)) return certFile } + +func StartHTTPSServer(t *testing.T, tlsCert tls.Certificate) (host string, port string, cert *x509.Certificate, doClose func() error) { + cert, err := x509.ParseCertificate(tlsCert.Certificate[0]) + require.NoError(t, err) + + // No need to start a real server, since this is invalid, we just + l, err := tls.Listen("tcp", "127.0.0.1:0", &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + }) + require.NoError(t, err) + + srv := &http.Server{Handler: HelloWorldHandler(200)} + go func() { + srv.Serve(l) + }() + + host, port, err = net.SplitHostPort(l.Addr().String()) + require.NoError(t, err) + return host, port, cert, srv.Close +} diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index a4ec3030dd6..306569f0a2c 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -25,8 +25,11 @@ heartbeat.monitors: - type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping # configured hosts - # Monitor name used for job name and document type. - #name: icmp + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + id: my-monitor + + # Human readable display name for this service in Uptime UI and elsewhere + name: my-icmp-monitor # Enable/Disable monitor #enabled: true @@ -89,9 +92,11 @@ heartbeat.monitors: - type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint # by sending/receiving a custom payload + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + id: my-monitor - # Monitor name used for job name and document type - #name: tcp + # Human readable display name for this service in Uptime UI and elsewhere + name: my-tcp-monitor # Enable/Disable monitor #enabled: true @@ -165,9 +170,11 @@ heartbeat.monitors: #keep_null: false - type: http # monitor type `http`. Connect via HTTP an optionally verify response + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + id: my-http-monitor - # Monitor name used for job name and document type - #name: http + # Human readable display name for this service in Uptime UI and elsewhere + name: My Monitor # Enable/Disable monitor #enabled: true @@ -670,6 +677,27 @@ output.elasticsearch: # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. @@ -938,6 +966,9 @@ output.elasticsearch: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password @@ -1524,6 +1555,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m diff --git a/heartbeat/heartbeat.yml b/heartbeat/heartbeat.yml index c64fcd0bcad..aa3e1283f70 100644 --- a/heartbeat/heartbeat.yml +++ b/heartbeat/heartbeat.yml @@ -22,13 +22,14 @@ heartbeat.config.monitors: # Configure monitors inline heartbeat.monitors: - type: http - + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + id: my-monitor + # Human readable display name for this service in Uptime UI and elsewhere + name: My Monitor # List or urls to query urls: ["http://localhost:9200"] - # Configure task schedule schedule: '@every 10s' - # Total test connection and data exchange timeout #timeout: 16s @@ -121,13 +122,12 @@ output.elasticsearch: processors: - add_observer_metadata: - # Optional, but recommended geo settings for the location Heartbeat is running in - #geo: - # Token describing this location - #name: us-east-1a - - # Lat, Lon " - #location: "37.926868, -78.024902" + # Optional, but recommended geo settings for the location Heartbeat is running in + #geo: + # Token describing this location + #name: us-east-1a + # Lat, Lon " + #location: "37.926868, -78.024902" #================================ Logging ===================================== diff --git a/heartbeat/include/fields.go b/heartbeat/include/fields.go index e21fe351e87..a449cc5fe7e 100644 --- a/heartbeat/include/fields.go +++ b/heartbeat/include/fields.go @@ -32,5 +32,5 @@ func init() { // AssetFieldsYml returns asset data. // This is the base64 encoded gzipped contents of fields.yml. func AssetFieldsYml() string { - return "eJzsvXtTHLmSOPr/fApdNuKHOdsUD4ONuXcjfgwwM8TamDH4zJ5Zb9DqKnW3DlVSjaQC92zsd7+hTEmlegCNTfkxy5zdGbq7SkqlUql857+Q3w7enZ6c/vz/kCNJhDSEZdwQM+eaTHnOSMYVS02+GBFuyA3VZMYEU9SwjEwWxMwZOT48J6WS/2SpGf3wL2RCNcuIFPD9NVOaS0G2kt1kM/nhX8hZzqhm5JprbsjcmFLvb2zMuJlXkySVxQbLqTY83WCpJkYSXc1mTBuSzqmYMfjKDjvlLM908sMP6+SKLfYJS/UPhBhucrZvH/iBkIzpVPHScCngK/KTe4e4t/d/IGSdCFqwfbL6fw0vmDa0KFd/IISQnF2zfJ+kUjH4rNgfFVcs2ydGVfiVWZRsn2TU4MfGfKtH1LANOya5mTMBaGLXTBgiFZ9xYdGX/ADvEXJhcc01PJSF99hHo2hq0TxVsqhHGNmJeUrzfEEUKxXTTBguZjCRG7GernfDtKxUysL8J9PoBfyNzKkmQnpocxLQM0LSuKZ5xQDoAEwpyyq307hh3WRTrrSB91tgKZYyfl1DVfKS5VzUcL1zOMf9IlOpCM1zHEEnuE/sIy1Ku+mr25tbL9Y3d9e3n19s7u1v7u4/30n2dp//vhptc04nLNe9G4y7KSeWiuEL/PMSv79iixupsp6NPqy0kYV9YANxUlKudFjDIRVkwkhlj4SRhGYZKZihhIupVAW1g9jv3ZrI+VxWeQbHMJXCUC6IYNpuHYID5Gv/Ochz3ANNqGJEG2kRRbWHNABw7BE0zmR6xdSYUJGR8dWeHjt0dDD53yu0LHOeAnQr+2RlKuX6hKqVEVlh4tp+UyqZVSn8/j8xggumNZ2xOzBs2EfTg8afpCK5nDlEAD24sdzuO3TgT/ZJ9/OIyNLwgv8Z6M7SyTVnN/ZMcEEoPG2/YCpgxU6njapSU1m85XKmyQ03c1kZQkVN9g0YRkSaOVOOfZAUtzaVIqWGiYjyjbRAFISSeVVQsa4YzegkZ0RXRUHVgsjoxMXHsKhyw8s8rF0T9pFre+TnbFFPWEy4YBnhwkgiRXi6vZG/sDyX5Dep8izaIkNnd52AmNL5TEjFLulEXrN9srW5vdPduddcG7se954OpG7ojDCazv0qmzT2nzEJIV1tr/xXTEp0xgRSimPrB+GLmZJVuU+2e+joYs7wzbBL7hg55koJndhNRjY4NTf29FgGauwFN3VbQcXC4pzaU5jn9tyNSMYM/iEVkRPN1LXdHiRXaclsLu1OSUUMvWKaFIzqSrHCPuCGDY+1T6cmXKR5lTHyI6OWD8BaNSnogtBcS6IqYd928yqdwI0GC03+5pbqhtRzyyQnrObHQNkWfspz7WkPkaQqIew5kYggC1u0PuWGvJkzFXPvOS1LZinQLhZOalgqcHaLAOGocSqlEdLYPfeL3ScnOF1qJQE5xUXDubUHcVTDl1hSIE4SmTBqkuj8Hpy9AZnE3ZzNBbkdp2W5YZfCU5aQmjZi7ptJ5lEHbBcEDcKnSC1cE3u/EjNXsprNyR8Vq+z4eqENKzTJ+RUj/06nV3RE3rGMI32USqZMay5mflPc47pK55ZLv5YzbaieE1wHOQd0O5ThQQQiRxQGcaU+Haycs4Ipml9yz3XceWYfDRNZzYs6p/rWc90+S8d+DsIze0SmnCkkH64dIp/xKXAgYFN6LdC1F2rsVaYKEA+8BEdTJbW9/bWhyp6nSWXIGLebZ2PYD7sTDhkR09ijO9Pdzc1pAxHt5Qd29llLfy/4H1a+efi6w31rSRQJG967gYt9wgiQMc9uXV7WWJ799xALdGILnK+YI3R2UBOKTyE7xCtoxq8ZyC1UuNfwaffznOXltMrtIbKH2q0wDGxuJPnJHWjChTZUpE6OafEjbScGpmSJxF2npL5OWUkVnOIwNtdEMJahAnIz5+m8O1U42aks7GRWvo7WfTK1kq/nPLBUZEn+Kzk1TJCcTQ1hRWkW3a2cStnYRbtRQ+zixaK8Y/s8t7MTEG3oQhOa39j/BNxaWVDPPWnitjpxHN+1t3lSo0YEnh2wWj+LJO6mmLD6EbjC+LSx8fWOtQmgsfkFTedWJ+iiOB7H49lpmwOg+u9Oj20iuwXTi2Qz2VxX6XYsxuiGDFMZKWQhK03O4Uq4R545EITWr+AtQp4dnK/hwXTSiQMslUIw0BhPhGFKMEPOlDQylbmD9NnJ2RpRsgJ9sVRsyj8yTSqRMbzIrbCkZG4Hs9xNKlJIxYhg5kaqKyJLq0dKZQUer+SxOc2n9gVK7H2XM0KzgguujT2Z1164smNlskBJjBri9FZcRFFIMSJpzqjKFwH7UxByA7Qy5+kCBMs5s6IvLDBZ+sIUVTEJAs1dV2Uuw63d2Ap3JeA4VhGVKQhXDqLONjl5I3wdCN7tohvo2cH56RqpYPB8Ud84GoXngHo8EyeNdUekt7W79eJVY8FSzajgfwJ7TLrXyOeICaCmXMZYjlid1+9IV+UjIGOpQu+TKc11fSNkbEqr3OCQzR8be/A2WhPM18HDz1JaGnz9+jA6g2nOW7rEYf3NHcrEgXvTHjZPj1Q7AuSG27OApO+3yR1BC95UempzSoJiM6oyEB6tbCiFHkXPo+A44Whu49Jqn9Nc3hDFUqtXNVTXi8MzNyreTDWYHdjsF/bxCDI4gJqJoDLYZ87/cUpKml4x80yvJTALarulYyGdqdCsZEW7xqRe11FgM2PawuGkcY8lo6jQFIBJyLksWJCPK416hmGqICveVibVSq1ZKzb13MqBIloL1Hj03M9OD8SdnbCgB4EeGCHAHUsLlpj5ba6niOFHjdYRkZ/A3l6VrixC3Ki1AsaFBe+flcANAH0MNSxvyewZrMavkKYzpBWscL/W4UR7E1IwPOF4G36eYCqEw4OiGs0yollBheEp8H720Tipjn1EeX2EQpTnCDrIdkaSa26Xy/9ktXJtF8oUKNyam4q67TiZkoWsVJhjSvPcE5+/ESw3nUm1GNlHvVCiDc9zwoRVLx3don3SCi4Z08aSh0WpRdiU53lgaLQslSwVp4bliwcoVjTLFNN6KJ0KqB21aEdbbkIn/wQ2U0z4rJKVzhdIzfBOYJg3Fi1aFgzssiTnGuxWJ2cjQv09KxWh9mL5SLS0dJIQ8o8as05MA8Nhza/njCh642HydD9O3BdjRFlTyhRWCa+FyKxC2yFejeOEl2MLyjhBsMYjkrGSicyJ+SijS1EDASq927Faikr+113gVCdPd3gE1WRhmL5HtI/2Hi08zdcagPxof0DrTvCwuDPpSAJZZ3er9nYagCFhD6B0OB6O4yeNOWdMJik3i8uBDASHVmbv3Z03VkdgNO+CI4XhggkzFEynkbEiTNaB71QqMycHBVM8pT1AVsKoxSXX8jKV2SCowynIyflbYqfoQHh4cCtYQ+2mA6l3Qw+poFkXU8Ae71emZ0xelpKHu6npHJBixk2V4X2dUwMfOhCs/jdZycHVtP7yefJia2fv+eaIrOTUrOyTnd1kd3P31dYe+Z/VDpCPyxNbNkDN1Lq/j6OfUOL36BkRZwNBKUxOyUxRUeVUcbOIL9YFSe0FD2JndIEe+nszWJiQwrlCiSpl9sZwwvc0l1K5i2cEFpU5r0Xb+oZC8HJSzhea2z+8hyP1x1pHIJxKE7lxwX/D0e5QwAU5Y9KvtmuHmUhtpFjP0s7eKDbjUgx50t7BDHcdtPVfD2+Da6Cj5mDqPWm/VmzCmoji5T0whAeaxHlyFoQ0zxHhsogpC42x3pDjXYsnZ9c79ouTs+sXtfDZkrcKmg6AmzcHh7dBTRo2b5O08dJ7rG/BzYVVL1FLOjmzEzmdAQNTTg8uggJOnrFkljhrEs1jQwFBbdMbmhqujXBWIp3TKrVgfhQzkkuakQnNqUjh6E65YjdW5QEdX8nKnugWxu2iS6nMwwRcL+Roo3i/1Btjw47/veADddsHyHuNVZ/h258k3W034ejsyTJC5+37ceb24Dbit9xJG6ZYdtknVz7e9WaVmzmfzZk20aQeRzj3CBZSlizzIOtq4sXRsP8/1T4evKai4ZwuOpUKwkiSGcj2SSqLFcI1WYk+t11PGE7jXEoZM0wVcBWXiqVcW10L7CgUtV9wxEIYUTXJeUp0NZ3yj2FEeObZ3Jhyf2MDH8EnrI61lpALtbCUaiQaDj5ye/Xh9TpZEM2LMl8QQ6/qXUVtOafagF8DY2lQMRfSEFD6bliew9ovXh/Vzt+VVCbV1Ur3Lq2R0SAJI8tL2P4vQBFsOrUH+JrZWZ1M4/bwGbt4fbQ2Qm/OlZA3wlvJGmARh/qRN0cCikpak70bD67ILvG05w3DWjzWGALq+b7JBkjmNoqpN2I52oHvG2RTaaaSYSkm1sjQcC0VmoPt5OijKhiYSeT0No5BBXl9dHAGoRC44qMwVEwqq93VsYLyfKDFWfGfwAReZkm6AEyrPO+RJL9Lw4xd8KomdkkwHSgY9JrynE7yrjB7kE+YMuSYC22YI7EGbsDO+tUIEGYfngJxkYPF4HTjUKYu5grX513lYJHcKHNqrATSQ6gI54DqcrwTOFkXiDnV88G0dcQU8B07j+XJqVSKWdG3EfA1RcM4MChBqJBiEYePohAXkcp7zVwwyxhWwTM0aMMHu7pxCDJMpZjiXtG8MScVmb2SakcO8VHBfUQ1SExTh5SCDgZzdqF4PAX5q7G087mVttGqAsGFXHQXHfE0Cjyt4TmWFS4vOI79F7f7jTHRgCDpBf8CDEXAGTpVNAQf12GV6ADCmCSvTkBkErk1jHJK3jCjeIrhTToOn6KCHB9uY/CUpb4pM+mcaTAqRaMTbrSLXK2BtJTbDLhuRM5yHcJymiC4cVUlXEisYoU0IYiHyMponrFopjZkCBMlLmbTL8gTmKhfdQaxZmw4DloPBMGpbnKv8tlhua5BdQh7iIswBXPtcFx/9aJGEM4FQbmx44RnIdDanegFyfh0ylSssIPZj0N4sb0H7TFcN0xQYQgT11xJUTRtRjVtHfx2Hibn2cg7ZYD+ydt3P5OTDEOhIUigajOXroD64sWLly9f7u3tvXrV8nOhiMFzbhaXf9aewMfG6kE0D7HzWKyg+xFoGo5KfYg6zKHS64xqs77VsuC5+LXhyOHExy2eHHnuBbD6Q9gGlK9vbT/f2X3xcu/VJp2kGZtu9kM8oDgQYI4jTLtQR/ZG+LIbKPloEL3xfCCKmbwTjWY7KVjGq6YyXip5zbOlHNGf7eOCs+YnTPzhjPN+6I0eEfpnpdiIzNJyFA6yVCTjM25oLlNGRfemu9GNZaFRfKBFOZv4Jx63+DqWGbvUfCaovTob97LMGDlv/HL7BX0xZ5q1E0Qa4hrcdBMuqFrApCRMqpcPOcTg8HtEqImUOaOiD20/4k8gydIShAWOcZYOFos+F9XT9akZVbHVMOwt8pIHVRtqqsGCXg6yjLuQti6WgdKZstdGakV1BKUnDr1COdyliczstZ2qRWnkTNFyzlPClJIK87g6o17TnGexR86qUarSxs9HXjN6zUgloqgtPIb+1foVfz7r8cOwN1STSqRzll6xnhj/43fv3r67fH968e79+cXx0eW7t28vlt6jCjMSB3JcnePwDYYdSD/wuzoMgKdKajk15FCqUjbC8O9dCqCRLXNf3nE8Vs+NVAzl03gre7aHpPOmyfrvdk8pRPrVr9/2HqRhYeKdD20ageRq+VitNYIo6uKgpMgXzRysyYIYKXONUWwUzAyQFcPSK5RNkQ47JPOwgwzE+pl47ec7aGKBK6XJga6ZsiJfRujMCuGRNjdnNQ8Vpilp9h432kD+PWdpGcTUFwcweUfG4c6Iv7wjDjg82Iz1dFGYnXzeKMOwZKldjQMyQIFE4Ozjzhsnp/EgUXJ4dFfNWV5GVg1QdNCLF4bWToUSC3uzGh7MVsvcWEMaHurF86wp/PGCzgYVRmOhCiYLIUQIkCW0ScVzY/XAHtAMnQ0EWU1ZDi46a5mZo5T1u6ePUtfvSF5vi+kwq8sDb8w74HbUi66jJIIcijQ7lCCKo5OCCjpD5s91TQgdIQpT5iM+EoUcx5zkqPX1HbwkevTu0HRkuNHTEHaEbvGNZuZ4z5hRNPp9cejIflwc+rcYKN2I814qWjrcMq7axCNFS4dhIWr6KVr6KVr6f3e0dHwwfVCNKy3T3q8vFTIds8KnuOmnuOnHAekpbnp5nD3FTT/FTX9PcdPRJfa9BU83QCfDRFDz0s4W3/T3hA2zRrxwqfg1NYwcvfl9rS9iGE4N6CHfVNA0ROlGxhm3UjDZ1LgxkkwWgIkjBiWGHn+FQ4RBP0Bs+3Kx0LfS8tcOiM46EuVTVPRTVPRTVPRTVPRTVPRTVHSb4J6iop+iop+iop+ior9llvbZUdFZjteL9369fg0f7y7Lu0zEFcSb5HyiqOJMk2whaIFqlEe5pJmvfOyKrIJJxv38hoqFq1IXF2l1JaMkWdFzCkmOjXlWXIFcHz6Lhh4fSzepQjV8CPBgBseDWvQ0zz3qpjLP5Q0Xs30Pzd/IES5gPefiys23IM/GSZbn4zVX+M6riFKQ37jI5I2u3z9HcN9iZM6zcaJl33vvBf+4DjJbZ+0dWBpgLHI+6RuwoOnb8+Vdgc2wvOQ7intrQf4UBvfth8G1t+yvExXXWtlTkNxQQXItRD/FzN2CJysxJkW2OxBDfHO0i1M8CB49p1sDAXT+y8HWp0G0vftiOJi2d198GlS7zn47CFS7W9sPg2ogDt3Qdp1w074261KaBS21N3rHPB1aHUlBMq6vusfmiinB8ufbiZd8l1huSc1Qat1PVZ4jxHaSztpbwB/uf3CC5QesOf18+8MnLQgsjCUVi4GWdRLKzuA0nQ0a+WSYjEBrjqLkOVuHGNdHvYhLlkSADb3alov8ExZ7RuM4gvsXZ4e/7K2V/viru24WTn/gyl4kz5NXLzY3k62XO1u7D1ii7+BzCWsdNNHNLfRziPX87ODk9CI5/o/jByzRNdAZel1ums9Z30o4jR8+Hhx7NRf+fhsUVuRNK3cjIFggRKOs/tHp+X0WiJ8asbZ2wqPTc/JHxcDSYAVVKvQNi1p32d9dYrYTWBmHZNdQSrmuee/HWpBScQm2hhkzWEkah3WDPhtnQkOa4z48P15zTXQWfpJ4dLA6+1LMaC6r2xm5EXHaEDqs0VlCdWybcDCgWH3DFKv3Di2nXOM4XSjx1fHaQyKDGyt+9Jj11QNBqFJ04ZGBWHbvo5uIpnMHBtGu6rliplIiMmj6ZniuDFgkMTAC1u0rtnAoq+N1/d7gFmjm+7I1wpEnC3J8eF63zXiHJdxxrLmV4aGtQmwEKOrl4I9+ckFu7FvHh+du+HYEkt1mS34Q9YR+fOxaAr80Q8rtc57MyYEhBRe8qIqR+7K2CrhFFVbjiztoje0sYwscpP53lsF17RsZWWErDEntaCkIK9z4No5Uk1JqzSfob8igIrm9+WltKnFGQx933A8o1STFjjaNOPYWRSZpTgeLWMecfYrROWFDfG5BhhTDofERxpRgYf8Oszw57QU9qtswiIsboI24I0YstDpFusPBKBZN8HF0+GrJRKa97wWyrIFheZTEA/q1dwTtrc3E/18vFoaMW7xoOuEtxUXpyi3QSYll7nWzcRB1xhA5JYenB2+O7YGYMIss+35+zbJRzJxWVzUZo7OkZjEmyl+QwjdekkoxXUqL4mDZiwaBc5mQk8CrhDTe094e0zc3HEN7Bh8sP7Y3D4PGpJ1tubm5SW4Jw/A7Y8wyLufbApUs7iEzB2LIrsFCajk3rBcQ0LsJ3uZE03nM2NkU+FIjz4LrlKqMZQn5nSnpc+gLsNnMXSgqstAaf5MaaThFT1x7P50OWMfgYl7XMPhEFgOk2bQYMJoxdTnNfXPIIczfcGfLKdkmOTOGKeCSODOBmRuFSEpsZVQXO9gnBwcjcnE4Iu+ORuTdwYgcHI3I4dGIHL3tkKz7uE7eHdV/NuPHB3NP2x2yS8PYvdhNTTWYjeuWt0rOFC2QAkOb3oAE+wiIZZhcEw0EWWslr/NxkDnoHg1qe2trq7FuWfbEFT/64p0nSgo0l6MYhemwzhx9xQUE0KEA25BpSWhpGkcvQS9G43FXN4fBwHIcBmVkwAw4CeMxb8XRr++P3/2jgaPAGb+YxODa/LjbAvWSe4WDBgMf8l6EC7EFWnzvBXNaqyCTkGK9VFwY6NeXzim0tFaaPJuwXN6Q59uQeGchIFvbL9ZGEe1L3Xij5uVBQ8J2TEyntLRnimpGtjbhCpnBHB+Ojo7WajH8R5peEZ1TPXca3x+VhKSmMLIbKiEXdKJHJKVKcTpjTnfQKKPmPEq/mzKWxSOkUlwz5YKDP5gR+aDwrQ8C6I85n8aD7tiwzV89FvYp/vWbiX8NRBGQPyQxhElAxastC26BdQvBDol2GYUbaA4qoUusAKCBEYaZRjVqdDXZtuvcShxWgDRGDZzXEDacjF57rcdYGSGJCEmMojyH7oJMcdkv+PYj/Sn6GNnfU/Txg6KPa/r5MgqC05PuFioODg6akrHXVS8/J4fooGOiy3NycmZlOAa1wMaxaWPcsjH4H8fe1Odoh0+nPK1ysCBVmo3IhKW00sEyfU0VZ2bhlaOYUAtqtFUK7VAOrIQcfzTKt/wD+KIKAx5Qg+3PJQGraISccS2uQst3boI5C3slZOyjfbuwVBIPjSIBvgS/M6o5hKiFEevmeiipWOF2Krt1FYN20zadNL/bam8wSMJfQhHwc/WnGp6+hVigBnQDno3V+HAEA78P2chGDtFWJgX6a15e0MOwLtcTOQgglGXGr5mG7oWRa6HRzhAeSxWLQ6UyocMoU4St7SNYFooaAG/wd+6ABhCt+aGNOWChZMqt/5ks0fqaL+wQWspwrzhtDU/HWkIORAb1WlMpasXVYbV59m93VHh7vtXjHE/o8NJg+A3V9dKGC+j48D4X0Btm6HpsrPbVmZw1evnCfve1mVbsj4orlkGhs0eIcDg+PA9+VLjHAn7tYjQxMiFjlurEPTTGCH8PRs0EQTAC1lNpg/UJIdo777QPJeS3ORO4Z7CB2LU/yGtcZDxlmqyvOyOpc2BYgCw+dc5nc5P3FaWNVgPvR8G1ObMs2upvyrUppdk/Lag+TTGds4K28E8873dL6BqVk81kM6YcpWSjENhx+GLpEGZoQ++dQS7iEsh3AXaNgMf32NC2QPkBn3NuoLJkUNAlZ1gC2aLZMwIIwk+pvYVu8PYJdgzce240y6e1ok0Fjv4AN91AyeWATDT6tNwJCOCdNrhhYvpDekgPBM7QdA8YUfB9z2K9saoxsDY0vbq00sVfIQ3qAoMvU2jenLLg+wGMWmItc/ARso+tfkZfSNANuzvCk+ZK5ZpgYovDF9jHlJV1pnHEKv5Jr2mSUzFLTqs8P5Pgjjj2j8c85LrVUfz4eomG4qGRb28hQd8duT84PJdeXcGag4qnDV4QWM6BfbTVstyyh/ad7G9iaAhWMDPHcxp4U60pvJaBM8HFwUWaV66OO3htqAmuMtC0xKweI9QUtxPVi3Dj+aGoT+ewVKaML2LvStPXDdadTR0VmpDW7sb0/m/Q/eLE7RGW9+rp0j5h5saK+TS0Y3byjLoObmaczDU4Z1DDP82ltms78DtxP7qxlIQ/x1JBbS0otpOTglFdKVZgFwAImu7DbPQYBPoaesUCDcdojsmjxnHBCgkRKkxDP203XFZj2rXVvuaBZxlWgCG/Uiwh5wz3fIzl5+xFN8Zlc+MKPANT0HUL/MiTH45wHJHgILXzamP19MYlvlw1/iWq7XyyroCjBwXBOx+a9feclSPUk8FCk3FYhIjeIidQ+hNIoBZB51R4vPpO6OPadB021zKMMSBknWbZeETG7tysw7lh8NWU52wdxfxsjL4j70Fp3AYg30dBK1gfs8yBwvpq+FeaqfWSam2RuY5hSU2ZwoE+zHZgAgwcpCmZWjXIypKHOKcvkoaBXqhhg5RKDe5IbQsDZcUZtNzW2IE88GTOmaIqncdxxO29qcU/3O6VCZ+RSQX1NlYsfNGInOmmUS2SyHPDlON2rSn23c6OycJdFkFMx94izsrlHgtjQtoENwvnO0PJmmvkWfki7kviZrSbMnad/l2KkWVj9YhEVxMPVpvqw/hejXPzgg2N5rm8sRBa3TJtbpS7d9ySIlMcNVYOga0J+kaEya5qWJm5FfWiulu3y7iPZ0o4cfJlGrk5QzQdLApyxUG/hoy4CHNRdUsfslVpFi6NjOlGZw8nYGpSiajU5YgoNqMqy+PdB+4PTxMrx1T2D6mIXR7ocaBP4UUjr5mCW8Zq8UFk8pIdj7eE+aBNlHPIyVF3G3Ze7Ow1kY8c6B5ekNXGiCZ+3WnAQTrtaNgG3I83VksNvBVuxSlXUUKNYhR4m6XOGeyJVPYzWFFKXrIcej/cQtMZtzJE6orn/F+oH2poUSLboCb+ysRtUE1sJQ+3OUNro5X3fDGeEI3TvlJOBCnslay5qVAZHrmQQ3MjSZjWHbQJ61G5kfX7j2kczSJ8pjVmLOUpJBS5Sjw5hNWgYBRbm1yEgou3RBKvmUQstsC2wKuAdNyTkLGbEW4cl2hBUkjBjazj++ohVldBLfY7Zj/6Xi5GkivGSlKV6EaAl+LD1cSqVasR0iYe7dWKJy6l+Sje2dq9G+Wmx1lV25tbL9Y3d9e3n19s7u1v7u4/30n2dl/+3oxCzKihmt1XQenzKz7gNK3ANNHACLpWwBFeYClbKjDYzOlTVoWQyl83WN+Lpo17JpezkdP/cjlbG8WTh1vESCfjLOratdF5TWURld/Ddlc12LDpiqWyKIBnQy62kCZYtmB4K/c05gZVLwTJFTKr8pr0sYYHJmuj1ENJJrH9legM03PZlDSdsyTCRdjeSi1T+LGnQlbrTS7Kylz6HwUV0kXCef2vMvEDVL/hec57n0EHG9DIVi/hHLmpGzY0Ap7AMG2TkpBPIdbtmcfPzKpNijkfpKmdfo24xj5e5BkNzC4yrwrYPeWd6iJMLBO0dduVUoPauU3aFwnSm704/fderAqA27sGfIZyAupiq6r9gGU9fqF6Tp6VTM1pqe3h08Z+M+VixhSE26yB84/euJvMSLsBFP1Ske2nkEIbZZcPJgMwvFrJsU30dT+pvr8Ofjw8+mJWvZMju5pQMj1Sxlow79Gd6e7mZtaETMxYN6l6eZnkItwJQBeBq1Kl+LWPwGRQfFTR3AWUGqk6EgbIFr7eBAgD4/rCiWXxFl16cSFfEJmmlVIsSxynrG/iXMvO6A1pKp6gYBR7ovu8ZUzwsfd1VImfBAGKaHrTqwOfCKdU2tOFSr9Vw7SuCisxCEns2kDbGQVJwd293jU1V1LIXM4aRT/sVSOvfFgA1/sNXJH/r724+hu/3eOl7uzdZGtz6/els6OveJsZfWN6rg/g+iRFF4076FG0A637Udq2SUhP8WJD/LPp1OH3XBcDcKDFFtrxIkecL1IdHKK13aRXg3bxwV5rQX6HYvus4npOaM6U8YIMnIWGdawVd4CXVnO0loyKayRzeePkcYsqgKCRLRZdcGRORZZDXOGcLcBVdmNVZWGiY6qYXTMYK+svUcwAhCiZ16vmBkaBkw5NYSAASxtLDDdzBmlqIaIdW4qCo8+AW3BW5VSFUPtadVRWuOoReXLm6n4Gp0ksUw0myOIsUY4JRD3DWtqSovOKO/UBFBTkVVVZSuVMNKkUKSsh5AmHRo0ir2YgCXQtKbVbnsJJEF56Rnn4AERBuH/XRv7c4MjjVvhZQxWsXRFgBrTP3yZnNrDuef8QeH9nmTr7aILxwJKzMFyF0/fekf8dUsMtSrSV2CEWhqF0l8n0MuphmHFtJZMMDKNYDgzUWWY5E8tqorfSv4vfgShgozi79rr0+BL3pofVn7OSbL0im3v72y/2tzbR0n14/NP+5v/5l63tnf/3nKWVXQB+ImZu7xFoEcMUfreVuEe3Nt0ftRRoeYGu4JxOK3svayPLkmX+BfyvVum/bW0m9n9bJNPm37aTrWQ72dal+bet7efNOruyMlYx+qYvF6s+ferd4tY39sF4GRMQiB1zLrwxIiMr9VgGX06tM1KeW6klGFRKpnyYdbg/oIo7GmwwnZllvSLMqTQuVQHFO5/eCzWfnSsgMvRnDRMlcgvM72pdfJZX+6ItEXev764WYkbQehctdngn8tomEi0wAv3AXgUiwO8FUYqhcXAJlLLy+hp5FtaGn12SGd7PYdA6PBdFMrdG0PXrimh1cmyoSxO0b7xP7ejRfahDxBUyZnkN1TniDV5qW6/jsBK3sXHI1k+VAnqq0SJcwqzj7GA6g4RcK91qLVPn4cN9uEXkMA3uVtcWsYPXKJi23LSWMvysZh6b3vetRDFu9G6lYhFEFlBCOeQMesBIJhny1YJe1bujmdA9V4lDa4PFDNzGdvU8xKf1nTM0IsOpwuvZh9KeL7SzPHVtzq/lLLKxFigsNS7WOijOK2b+TulpFEG0nJobqthd2VfusMB1f77QhZXO5saU2Ro2v56ib8T1OHIDt4vwhRGfYdmVUV2dZN0tcd3fQesHlVWdxGzttio0jW2ESoSROeXR9/Gdn4C8f/ea5Fxc+djqu4vZeRdIWyjwo2D1RPD58jT2ITscRiOQg0iCH4XrqJHIHykt+yCuWhaqGPK9QgrwrgAzDB4a7M3VQbLdXb2/seG6Wl0zkUmVpLLAnmsb/7K5CaaPZbVExfXVpY4u79uu82kuaW+M0TuurwiMAOKq4lJxjHBuU6h2RES0zCvQv6Psp/eaOWM+rAzM6c71gEx6zlS7GV+A/dJq9kvQ2K2LWD0F0wD/k2Uw7D0LGmFMgk4peKTCIjYt2WxtbvaYUwrKXQlLV5d2ISvY9qaB2x1VLDAH6Zg6Akg3/Rl2iBtnHtHMkpOol4FYc4GRcH1hyc2WyVKzP6olT+jDelScu4F9a7VbeC1EbrUehfBQhN87AsAUrjtuyRF4ZehVM4WcfaSpIVJlzncdVN/IPxl7J8OpDuazYJjuYOuaRR2AHqXNBGYwYrBNmKB5fhri1l3+o99CrniQ4sKIcU55lK+AT3kzt3f30ihc2jMnnTifR1V6U0gUjhF2AoJ33KzcKVGpFJprEwtEjjJjywdce/YK7K3r4C7fsJ4Js2iGvobjXM4SDb8n/vcklRkbJ573+q/rpIjYuFgHy2LNFTdFW8xtOqmQq/k2KfXRPDk6X0t8NlnjjSAXObIm3OrvNyLMiJHwVh6vQ9zDuKksMQjm9uVGURNhwd1L5GWTpg1dqkXN3W4L9Inc67hwYUCx6yKiCHRh1G7yW3wX9pz+WXeZHCAL427tobEkeyBqxmF3OCwILQsuGNHB3BRHcsVotnCU5C5rT+i1/Tm6JvEAeuIg0ioQN1w3VK00ZSVmNIdJfX4R1Cmg9vhLATL5yZGbfOW4UrJkGweFNkxltFiJsp3pZKLYNSof/vHzi5U11AXIL7/sF0XNTDjN/VPrm7v7m5sray022o26/cbMB2bO1SeGYEG0UtMy0IosWtHVZB1jsVbgph8hSWFcU3R3kFpR7cR3IXkiTx8RJux+6yhgy/HVDPydMrJI4KIg97BUdktB5nTatk/ravcb+4KhVE7hX5SdxmWVGqptyGpbexAwNhSY8xKZdM0pK3uEr5k2fOZX11S9l1AsBJxbPzSmUHCxnrHSzDuj45XUbNVO0L0GQlOIdXe5YgICb0mZ05Tdqp3copXUJ/6ztJNi4fSTYuGyrK2GAnNs7G6/3MpYNlmf7k4213e2t/bW915ON9d3aLqz93KTPt+bsru1F08PU+6M/C7G/Sf/+Y4Q9wMsTNqKh4bCHR3/EISaazKxclEzWMyFbNtfIXbOBynbsd3K/f7/BJVbXR0wJ3ZFphw44GDx9Vvko8D9ZyqyDanqxZJG1MvIVaIIdsPJAqc88XZv8qb2OvznTydv/suXTNR1vLe9ZHnK9FqCL7vwf2eF6Wn8TSHVmGWIzdZ6/HGMvMLO1PSguGmMxfoMwWT1NXVeYhJq6FrRwg/da1n1Jrh6KzWGbxlF0yswqaAVsCf8gxqj+KTqdDYeoEgR4j3MF1//4UtsFIHs+ZqqhaWN0G2G/MIUhqlBFRT2cU4rDeZLSGCXU3e3NLm1ZQvM1z7y8fTueNr7kF+zEdhyIZE4G9X9fewdBY0AYpcJ+8jSyrARmfMsY2IE4ZD4bynyxchxyBG5Udz0mA5X/3PFP7syIiv49Mp/fWql9afOEE+dIZ46Qzx1hnjqDGG+784QvaH9D5MdQA6CcUAYhLrRS4oLEFGHxNZ4vykspFH42mNJN7VA4GQuihE2kAnVL+/gb6GALQzjNhAlh6oEO864sFONncrH7VlhmoxhFeNIX8Vgf8zjwNrbwapnHx1ZTTMNw3lt0sMdV/Bu4auR9/fYVxw2SHa+ad3y1gWA2kSpW/31g7AzFJShwWHIug/qDLRyd1Eqjk3FebCZ4tdRdAQUuHRmh8gU0FnhxlwWbIPmHvNhpXa4SxzmcxfbS9xHCkRRLMR5x2qbhglgzIrl7JpGlua6dVlvNF2UPlGWTFlFFy+AhvkOrs+8r1X+4bJcCVAzYFMDYFlhks5els6uFJrmD1Zh9Ezxwl4E2O7y5Ig8+/nkaO3Oo7S6tbm51TzwtX44NITt3gE9LQbbB+CL9h76Sg2GvmIXoa/YKqiOxR8uOfPEjl3biL2gitxNhL+9Kal9VrZ3Xzzfe948LQUv2OWA1SzenLw5xjhqf7v47E+AFpTCZrciRbRRjELcyWRhIlNCpaEEgzMW3tzcJJwKmkg120CfNySAbhQs43QdLMHx38nHuSny/zw5OD2oWfx0ylNOc7Qb/9fIXRm+3FmC5YJ6csms/FGC3D9x1QTDmJjeGGK/o6X7TLtlGX8xHCW9sYQUo50LIlMrtgfqor2lRFY3X+xstkjoMyXSHoE0SJIUQolBdWgeswFLA5+2G2jhZR7q/fibso73N3FH6g7KfHHP9kUqb8RgkWpoPrYTrIIFRUHa3/330+O29/pqdX2glRh0EYv0k1FrI2FvsTRoR/ht6KdZJFQ+TPjduG3vn7qOPXUde+o69tR17Gt2HYtCefifDwzk6zF62UGsGAEyW6Qxv42Va+SeUMrHRTxwTVbsx55Cw1svnu/tNAA1VM2YufyL3FIXsBq8pyCYYlGAr/+LlZqDfQMJ9RlSYcYVeKgdJGsd6gvu5BBcMWi/ESu5gCHgPRgCVB0LHJVBfHbeshKg4HO7rSBYChhmjbs4gJ/dxzvCAH5mMq6VmVKlFpjEh04tWgv+YGrCDm2hMFGwpTdjPVwzVxleib1lobw4pmJjwCNL55A3XqcYWMhOzryLVCqnbKh1XVk9JdjGlyqhyc1iKP/Sod28XmH0jRRW72tmAmDsDBOD+btOG34uN1m3nrNUZk4OsLBdC8BKGLW45Fr2lJ1+HJThFOTk/G1/tenDg16QhtpBB07vJh5SQVvWbU/V94AyY/KylLHsFauIUsy4gYqKIiM5NfChe8L/m6zkUqzsk/WXz5MXWzt7zzdHZCWnZmWf7Owmu5u7r7b2yP+sfilVcvW9PYI+ZKglnNKAmpH3d2CQnZySmaKiyqmKXdfQTjOFCCvLbKIr9jAuRhLJFly5VGmItMZKS2SaS6lcyPwInXZxlb8wKIKXk3K+0JglB/mGI2APGCPS6tlYpzFBSCIXhFZGFsD9IvbWvegnUhsp1rO0sS+KzbgUQ56sdzDDXQdr/dfDPpgGOloOnt6T9WvFJiz9oc/O7e+v8MXtN5i9VNF4HZVq7Qlnh2d0HbzTco7EYe3LFxgftqdIo1hU8HiZsGDIDimYSyq5raUPFeT10cGZvUEPMC2z9p7F3USaLGQwIej2os+4KNeXEi2+GyFK60vxtxjnAFDyQ0+pIEefv/jP95QSnmPVHyDPmiLrnBP4neYzqbiZF6GyLFcu9CyKoWR55qLZsBIxhKXOsVUWhpq/OdodgQNjDei8VMxx64QcZJkHYxpCHjEC1w0xWUDCuEqp9kalJnDIjC2AaLvGehaQI6ZZSRU1MnQUproRXf1MC3qF8bMjgnlwc/r8cndr+yFNi7+0q+nLe5m+joPpS/qWwnmSulGb+xf/+c64ZQgSbsctu+xusDRUBsuoaENFlDx1fHgO7yZ/84fg1oz4bpwvTCpFXeQ51ntCEW1QNUGhua8YNKwVnTQtC+2cquyGKjYi11yZiuakoOmcC6ZH5EimV0yFTqLKpW78ezVhSjCIdJUZe1BVZpXOuWGpqe5NfP2UjX/bSrFuzNeRCD7uvbh8sfO1bli8C+U02jtPav6ave2OrQMrUPZMY/HVDrK6qm+7fcOIUpFTZn48eXve7fL1movqY8/YNdDRTGFEuPd9BYGeeI23pxdvz98GzNxjU5sxmXxDijSA860r0wjkN6dQx2B9I0q1BembV6wtkE/K9bepXNu9+RYV7Aiur6lkN6WugSBZ/cWNHd9IjUrBdT+DkCF941P1xx6yMSg29vy6hr5eK4T72IlD9yisj7Mep62iHBDHDR/ogEdfOo3mN3ShSQWvjCBX0FUaCEaHglHBxQwKX7i620xccyUh0KfRVt3tH/SerhSoiZUv+DaeMGqAEY3bWCjvwUJ/E0gQRnlZNz5s9V6i6QDI/cVt5m2zDkWjp3fSZ9R1EikzosqIGt8L/tEXEnGMEorK/VHRHIJ7wpiRLOfb20BlB9djPTT0qDRTiasCAl16M5byDKqtWXEUSKlm7tBVs7X5UidTWvB8qAiMt+cExyfPvJNGsQzStjM24VSMyFQxNtHZiNygONz1t+GTHbir/BFTmr+a/7Oj7uCuN6N0QsyD677WL/LS1OL7jfwnvWZtbEUFpgbY5fYacLYANqjbit64Qi4dyHeSnWRzfWtrex10cp62oX9cAepb2+s4gs6h7LbN/Y82Zry180vtrJ/PnWcr90k9ItWkEqa66wxTdcM7Z3jYkKEO8MvS49ZmsrWTNPvqDlZ2w5VXbl0rVoM/zGWVBWXc2wnqindOqsHgBSihPTbbScEyXhVjKKJzXbRKGzYsAcEm1Gish9XvwMIbu+BrOSSM2CePtKpOlEuGxd4WVXOObQpqSS4UFUAze3Pbnm/vNqe39+PXcrhA2MaQ/hZYHSsoH4qtW9WSwARe3kq6ANhr+JHD4b4af7YLXtUglvlreEroNeU5nfRkthzkE6YMOeZCG9ZiboAb9Ab9dT1+0SK/aedfBOeX9gO2gBiwc4hXPIHvgAcOyu4oDL1q8HJo3ugYlCBUSLEo+J9xN2lAYfj4PhReHMMqeDa2lIIfvPaN+k8qxRT3ql3wQGSuAngYttl0qYGnL9M8OCTEw5xdKB5PnfxqLO18LpUPtYXaEbXpv150Ixtigh0BgunHmEaAxS8XF2fw+XaH20/ebR1i/uxLUfNC1zmbjCuV+2pcmmEpThNh2AKpcg+vYn9UTD8g1MK/MJHZIomzqB5YqDN+tYncONq3BSaBWdvo3dt7eTuILuHnL3CRXjjjBm78nRj5heW5JDdSubYaHcwMsG8XEmsz3LF7zyywwLTmjFrpu6vSbO0879/Mgpm5HOo+XG2gFKdqpWZH5e2wqfOExcVtjQwBG1iV7I+KqYXVg0IX4EymVeHT38LYvvfvyomvXGp1q+PD856w9RkzI1JCh+eyMr1oggLXarDsr3du+LrwWoy5zm76jMpJLmeJz1hKZbHRgl2XUmj2xXkKTrssU4mB/Otylbtwcjtb8bj50nzFQftpjMUBjZVwehxVn19zuolTVy+o11+1s9mMtxjWiANw3WYV2wIjTZ11bpia0rRR2PCk8eXdQaFhgE4Pf4gLTaXKCBczqwljf0T8szkvaYi9kOqjWCmVK3VEhS/Mq9pFkImSFWRX5pJmZEJzKlKm1sKowWjDPoZ08TAW9KGC7kg9vfATaOFm6q4hbszQKSQMU6MAgfNjaSa0VK50e0kFsStaw6IhMRyJw08PKnpCp5aX5WjO6VA12gKJ4CzopKh3rFYvRz0OaL97gZuFst7Y2RdNaxaVXGiesRGRlXF/KJIVf4YWHzXqBS36zJLuxR/u4ZqDx+PW+Do5aiOrQd41ts5P35x1zgkhJ0c93G9z2QUOnYTp94LdThHdPHczvwf+OiVkFvOp1+7jHXGMR50Qw1BE2xcFLFg6p4LrgkSVAkMzlijZCjrL1GGN0Csl7Na9oY2d6dy4oes01BDz5VfD/FG8fNP8hPXYw0RYnd6PCZ7NuGz738aNhfi34laDnTr/rRUKaWARLIvH/1so4jupDFHUGcF9sd+/gdXDKtDww/HhuUPfA4IngVCbRPs4foS3vuOHRWSI8nGb1W3oOe2p04X4cv4GDeE5YSgFclwFnYh8uf1GkT9X+Qt7QFNDZpLV7QVgEHRJxE3HM8m0WF01oY+0FFEvJl/Nv6xMvJ+Bmizdh24DULIkNPOJex2sdXrzI9Uh0Y9vqBLjERkzpex/OPyrvrVo3tMDAIptNrfV0pIaYF8vWp2NcCJ3l0D5N6zAgrd8XS60AjKPS7LEo6Q51T5KALrzeNUwzAC3ky+5TNJKG1n0u52lmiUsp9rwFPv6JRMpjTaKlsmP/q8GsjCVHooGJDlfqhUBdCIMCO5gyI7S6pUSSqhQLrwb3ZEduNBdy3I8Ne3eUNGRaa12Z/vWpQx4HbWp4JEWF5UyNI5yLGM0XZrrL+0Vtjf5J72mvYipRDpgyYsOXtx0roLjXGYdVNyzv/Y09CxkmM6c/rgC44z5t+/USdv9zEH9jZ4IGzthU0ioKXNuMJfBkKpsNAcoqWr0xD3BqCUFlYcwl23shvVGWUReHN+E1f0VhSLWdsRmCX8WA9doJdhYhl/sqLMg39UtjIkt/FyvD+iEgLWQUideU8zsRv83E6mEoBmpiGA3wBes6FbI6/gQSJJC3daqbIP8uY1OiZauj6m91iYMbGtxaNfEx3mAde6z+51CAC04xt8sgkQZ8nPgIlzi6GGJffcVfrjsI+vO2XNXbSiW2uzzxWOxAvJY7NVdcBNzpGtO3TAJOcuZVU81Y+TdT4ea7O5s79itfL71YifpWVoypSnPfQOfx7aIrEYr9C2m/IQd2artKg7rO4jbINWrsjRkl+XOSLuaJhX+ygvdpTbDkPbd7edd4th+fieOBr6ffOcd9tGsT6hVBJZGVmsdQNQv+9biG8o9+la3tvmWxnWfvsWsHpJrskf+ViPnX4OkmjR5T93QzaobyN9D/wDXUgVYsqOeQCgw89arrZ5iMs93+9Da6IP1MNzee2LaTdnuPzF9zb9czy+L45phxKpKnRnbnrjmNIClts3t5Oh8bRRrJVat6ADvTuZM9jYJuxP00LfMKznU9bBPTat1mb0N7mpd1m7itlS/sl6eEDZ8yMyUb4EYmg38wqhLEQGYWW+hgEip/YqbH0HR7bbgdNRgLENDbmxyOo2+uicd3ZuBmzm0aI8uiko4cQzLOMlrFvoa1wm7BIWyqEGPy4HVDWuOe+KTMm796D7SwA3bbhkUOgg/IOe11rKHOi4HqMnM+DUTro9WNKuzw5RKGpnK3Kn6XkFXE24UVTwiHCwG65pVG3tYNMrIBZROc02LRiCQ0lxLmGyBikD9sL5alJFJhqd/jOzNxSZSXo2IubGynPKtzOL6rlbz0NxUTkqvq5Bj190wIpSzAljqIk/2FspCUae6uyUcqY2MaUNOzrC+lR6BI0KPSDTmDVe+qu436BmnvGiQVo8jcpmeqLc6IVfRC4neR5C4wQ8OOzKR9txAZJ/dliafHbvOofDmGISIsUW21Zu5FOF7xciVkDdiRMb+sLqfUFSJ+tnrqui5kV7sNRDgOIhZXA7msVg9wIg4aKaH5mAB2ZJ+ceTkDF16jpqoJjcszx2TC+vxx69OP2zyv9oCR6GnyTqdCamNvfkMFRlVQGO++nMYdpo36+u/ZlS5isvUhMiEGTfzagIxCZZAcj6bm42AvHWerdtLpkfo25+//Vd9uvPLv775effNPzb25ifqP87+SHd+//XPzX9rbEUgjQGsHStHfnB/+3t2bRSdTnmafBDvmF0P7Dmptev9D4J8CMj5QP5GuJjISmQfBCF/I7Iy0SfuykziJ9+JED9VAgj3g/ggfpszEY9Z0LKMWj8C08HLyykzRd0JzrlgR+FCiuwc8ZiBc0GSvSaQgAzdwTi7SRCGWyb2qJGKlEzxghmmEJAG0MvBVAPSgMD+F0QeN1k8cpg0WelayADbDbqZSnVDVcayy8/JJjw583HmdZtYd1yjn5y9rFTyYzfsY+vVdrKVbCVNKy2ngl6iOjUQgzk5OD0gZ547nKLm9uzeKu2en6wjcN0vsF571MP23PERuK98tzn/lnb8h+bQ+xw4GEg8p8z8lMsb4HAa/nLBmWHcXM68Q6By0Zl9a+rW020iWixXzfuTDE5OXE1gkthxSbPMcWPXa80yWX81XedUuIdjA6DPRkejJQwJNev//vrgFKnvj3Uu1v/ALwxFf2fUgo4c5FZWiGKmESDf9ITYiROO1kL4G0tznAD0EVQtz2SlozEBEM1E5ty4lk3ijgar7t7mdrL1B2EipaW2Jx/kLSs/tmI3WsrP74xdjchvXDE9p+oqWQsovy+swC4gcasb6DgB0rvBBY1Ak87RXzpuIFrBgPrvW6fM4WJuCyO4dTkPDPYYOq8B1ZLJgkhIqpMKaMzJvbquBuGPXXs5P0O46m98yhtglzS9Yve2jbzd3gSirhvkk4Rd926PuFv/0iPw+h9rzciJvv0i73YzYs7z6wGkrNXXLz2jrKVV5DzsYwKy5IjkwMv/SVOrw4XgjKBbfns6U0hCCHGmHuohUHjuzqrf7Eh8QH0ZEr6or2dnl/jvOE98DIkXc2sM53RhxYIqK0fEpOWI8PL6xTpPi3JEmEmTtW8P8yZtIX6gNFgXnvj2/ATasuQovt7E6aqerF9bLCYWdzuIwcg+UWqWjkjJC0Dot4dOC3QDn9/zPfpXuEGDm9+NAk87++jb+Lu76gtGMY+d5uglg95KjpeMQvF2LOzRMStip8YQSJcxw1Iz8uNjVA4G19074npTxncKpr3nsKG4btZeD6nhIdzHlxXEQSn0y1fQ8B2W2mryLsWUzypV77skqhLLI4BoOTV2usSXsmmXOfT2ej0iN2wCGiBn0JjfqAoS+xFdXIqNUsF6YVxfcsXLw7Xa/IM/wVZAdsPGIEUzgn87lxo0gM7QFqsHZ28canTyQ812An1GFm2KnT5vMWi7e8PHHPMpoWLhmRxgHdepA11oH2qJtKFr4f8OfMMqvA4WusyTNy725I+KVTgwOb54DVUypQAS8savUsmUaR1ZL8IwoZ6rYuD+SCUErFnJzOMDogOPD88fYIVncWj5o+uX/rgnLqx/LlGfqyPYwSQehWmjmg/tLmkRmcktY0Sa+FOKZuqtkQSj7/h04fMHvP2LkHOMxqeqaFic6qvG2cTbul0rLt/7TDA83+rzt4TnYywMNWwmFf+TBUiWvQFwAUlASfIUpv9gza2Dw7983H5nxd9nIH9nQd+zLBcv4TsX6TqLskx4KNuIY8PA5+U0+CKCse6O1REjw4GKeTCkNNSeKaoYBNa5y8KP7Oqh+65aI3LsXB31NXT05vcR+eXdiLxmM/uEVTHbGD2rJjlPL3EYtnTPt6fCvk+FfR8OUu+GPhX2fSrs+1TY969X2Ldd17d5qde+mC+j0/m07eGVOj/T96vVudGe1DryOdnXHST+5fW67pK/d8XOr+h71uwaa/jLqHZ+VV9Qt+MilUUciPFpul2dj05x1KZel3h21dHrQJ8Lo96j1x29+X1pVH5ayFYdklVXuem/44epBf/m4PB2ABrzDymlH9aZ0V0khM2qo0LhQbDhu3DnON47vNmI7p6zvJxWeVyjt77upnUkUHBWBAcCxWxJlteFbDCFU6oZFfxPlKkbcRFCxsnekPnIWMYypwBgKifClbOpIawozaIn5vQS4vPOf25sxFO1effDt1aB/Kna/FO1+adq848M/OdUmy+VzKr0EYv2ddJ13Qy33FwtEPX25mYDPs0Up/mwMdVed3eTOc28KVoMVpV/7srqt8usgXWeGkogYgLEwamSRTNmTrkGP1En1RCrXY+0KJlO+krS+Gh6Na7FvbG/3aE+TabhPyX8B25a+EPmOYMqNmg/sH/VQQk9OYIN7bku5xclaD0mUv8OAy9HcOeLggrTMlb1nt/H6TnpNyViiHUBkFpWgnd9dFD7+3tSKONxfCQIE4qncyQoCAFpVMwOeY2pLEoqvNRkxUCwpzaIsZXkGOdU6lDP0IqSkG1KlaJiBvE8U54b5qy9UH3ZC4lQ7gJCfgU86AXNAEa9nodUwPoKleKb4i4ZTDX4eld9TFteXKtvvgbZhmvqHK6pe0j3AoIyPf34kgP9ZCpbN+Dy1R2/S63gSSVo4eh2leA71gf+KhzikZWB71gT+ObVgDg5xtf4ctz7LPrqTqZd3/m382y447WhORauwuhbP6uH78TUpbt8x/Seofxro+DNQgKLGIfmf8ajQtGBMLQDBMd0gbD1WIb7/hVpdIkvVbjh1mblj7bjbk8e3Kd8UvE8uxyWGlcPXEpk767ZUw9Q1Ns0dfmQjiwCnwlUEb6JCriGlNFUFgU35PyXA4xSEBiFziCD2g/RUxBgujN9yfZeZdmLrcnmq729ydY2Y5ubm5NXe69evNh78fLl1mZaO3jvMWinc5Ze6Woo3nTohu8gy68Q5M5rpkKVum7W7N7k+farjL7ae/WcPd/ZfPUqfZnt0Ww3nbxKX+00de1o8oFWdNSMLoH06iYXCJC/LZkIdXiUnClagBKcUzGr7NqNdCSlwRW7oVjO6SRnG2w65SmvQ85JHfDf1A8QnZc6lW3d/hGdhxlsjZiRubyJFwx16sKOuiC7SjO1DiEtIzLL5YTmHbzg130LYcvoOxk1/S0PLOODLOBe+JqYy3nKhB7M1fEah3cFkzFXvI05f9ibzaMIJTr0IXI4hZglN2KssilZkPOzo/8gfrrXXBusH1MzI6k1n+SszrDXZfYRsuvdkHpjrctnDkqazlkYeDvZHFDS670ioilqypFNwYqaoTqEnVEzjyrx+H3jHYKKoNuotNoA0t84ZHlO1cZMbmwlW9vJq3ZnFCi5lQ6Fwl9kYUFGm0WYjLx/9zq4u7wEA50SuK5FEl6XKL296mAosyItL7PEtOx9YwWbJVb9oIqEnmIazUS698j29vP72pQ+YkE3ZxDtygLgrnThSV7ejEkM6hXbmUe+qrqZ0+YjBRW0rvBMXM6yzwTbJ6osRiQrr2YjMlHsZkSE/WLGihERFXz9T6q6Z16VxbLbOKwk5je0OUvcyWQ7eRUL/025/5j8Au1iPkXy/w2VI3ImlbGkT44/srTCP5+dHa+F+q3Li9VNi+QgsT1WZHXTNGzGlpZGvtpfRqiBp3jO1q2W0NVeodyZnBpyKFUpVTPZ8h6SGF70CkvNujLYA1d6RuMw6HtWZsceWPcIS2spFw9c1ovkefLqxeZmsvVyZ2t32fX5CtOXsNCh49DsKj+HRs/PDk5OL5Lj/zhedn3DOgjDovq8hA9c3Eo4gR8+Hhx7ZgR/t23RK3evPlp76qNdPX+MvrrbD7OUYcRP0e9FSamoPSl1h1WX+dps/wT1Jv1whGcbESm6Wl+N6udgcB/76UvotDo1VucydKF9EyicinCjWT4lVITdtasqOeaO2wdRLfFlwMB6i+DWwfTLWVFmQ4X/rh4oRReuihUgiaoZVFnQI7toBfQBeLQLohMt88owrDQaRdlB6dVwr0WyyRu6IBPm3FyImVJJw6ACq9Acuh1He9aRIdzHdZSFJ1xs6NDEd52s5+FPqyaGD1ubif3f1osOIi8h2+ZhAmNLE2NiZuZBVXfEYscGx96iv4q9C9uqsJlvXOHClZmzKLCfJlV6xQyhguYLzTWRwmrJYcjC3shhk8iN1ScCN4AWrlTFZ4i8gUKG4YUCNySq8c+dOo53hK50yVMuK123jO3IdTvLMspUZuxS85mgYJdjH7m+t97QRMqcUdGH+x/xJ4ywL+2QkJ9PwgxxjbA20KtGVWz1EyHHlnyDncL77IQpUwYNWr47YE98Y0RbvkVUqhalkTNFyzlPsXOOro9zPOo1zXkWZy1B66hKGz8fec3oNSOVqOsmuBYD/tX6FZ+nV48fhr2hmlQCjISh+XRcOPndu7fvLt+fXrx7f35xfHT57u3bi0/dsgrTVAbKsDnH4RuXM3jnoPKvelRJuLUyQPJSlq07ztLquZGKaVckqd7ons0j6ZzyOFT173bHUXaoX7/tPc9yrJwC5S9Yhpk8jQ5Wrg81arGQY9Mo0TFZQElXjdG7wJlYvkBjM9ofkEo7BPVZpx4o+zPR3M+zIHiEzzi2LI24F1qurWQ3o1xo07hiJ1xQtSCuqWyzZm33bNLGXtxz8B6Kp6KgIrtcsoHU1/HPNvfhpyrPPdzYsgpICe5L15jI3Zlt97uXesJcTvppST1I1DTP69u23fyscw1/ulzUkIfIOhRFVi25Z5kkfYhlGrD28+1xQW0pH6XvZgoZMhW83lyHwTrdA4OmwBuCleF0HM1XX2RTcgMh/40K6WCIhZxcDwgGIMDhef/+5Ghk1aJCCq/dkJ/fnxzpUXw/0qiudWGPn11qvgglprE0cKjcA0657qoPpdBGVanB/rGoNOQLN1yMOchhsCQsBSmVZYIpuHwKbvgsvmTPTo6IYpVmjVLade1rXxprCt1WcHnQN8DqkCNC7VWl2yFnxGdPWuxJbXqYbbqd7uzuZq+mr149f7m7tMuwPkPfLC9ZPtbjoKUjxbTe0JHuOM8t7HDzCU2nuzGQdiAUUZq6S51MjqXTmVVEoipVvSUpo25JEytuu0stBN/Wk/nzjl0nsP5tbESw/wAX7nEabble3EsQkT2KSZHtDsTI3hzt4hTdSfWcbg006/kvB1t3TLu9+2K4ibd3X9wx9e7W9nBT725t90z9FwkGW/UXCobxNSQEy381SV1AA3r4nYahiOYFz/vcLG2OUVJlj+3XsRsNYvx5uM1nGStujaYnq9CXtAo5xH+/xqH+BTzZiL59G9EtO/fXMRX1L/DJYjSUxagf30+Go/vQ9WQ/+kvYj9x+PpmRnsxIX92M5Gnx27cmDWMwegiKnkxKy2Pri1qWHgjWl7M9PRywL2idejhwX9B+tTxw37SF6wsZsZbHVjlbSt54UOT3SX1NOo4GsVmRpYvpBoOeMDu+vRYfutllG/plGs/eEbMeoty6ObbbO9sPBa4D3WNE1UNXcIe5VVL2g7r1QFCB0S8B661ZPlYf5QVrbKsT67t2ou3NrRfrm7vr288vNvf2N3f3n+8ke7vPf3+oBmTmitFsubKGD8LyBQxMTo4egwwclANG8Dpwe1Pacfb1pYsteqC5+V5kv8BGAeaWVGRpEb4foWKAfDXUlqM6UCumaxxSgXm9E1Y34d8PQ0YV7AglEyVvNJT3MaAxcOOA8BIoNPmhM0bSStmBcug+KCITwLL7UZUW8s8QNc9ZKkXW5Luh9VFVdpO5n28vHaruYLyR6oqL2SV2LJTqEZMrhqQfSyYOdBJAbzshOorDXBZsg+Y8XbrgZ8mS/yVJJyVL/rp5JyVL/uqpJyVL/vLZJyz535iAEiHgWxT8A3BfXqwPU39toT3k5H5DInm4ar+iwN2C4VsQpwNI37Sw/AlRNd+fJO3x8/XkZA/B9yMFL08YjyAi11UWZlwbhxWX+/gu/u725MefMHnRNYW1lOHzwv0AvoAfNEsnS6YGQt44VCcYiJ+svnXCFNZAIDeKG8NcauWEavZihzCRygyKaoXN+UmqsEDVXWBdW+qcmb/TvGLHH8H7+Y7Nfq2YWrjvRk2PP6RP6hJpXNbOO2hBhQ69cV5e2u/GSQh5kb41wqQyXm6px5wwY5giiqXymik64Tk3C4CldkfUznF78t8d/3z548npwbt/4MqZa2vd48j6/dcfq4PDzYO///rjxcHBwQF8xn/+bVlhB7YYb5/7gqM+rYY+xgRgnRu7vVA9DeZzVXLrbT0LiKCaWB4JUYB9b8K+uD3yBJAAWWjoxxOGdM8HIoEpyTOL5PPfR4Ds4/84Ozg9ujz/fQ3pIXYUBRh4KNxCoGSqq/OGU7I/KiZSbFTgJgQCtqO/ef/64gTmgrH9cNAjOIx4TRXUUSI5hPnhsKKCPnOw1pqi7ZhHv719d4QEffzz5a/2UwP0iPrabYixAWHKC5oTxVy4GnrOnrFkRsYrWyvjHrfW6n+uHO5/UIZ+UCy7NKb8MOHiQ7GgZZmwj2zlv5a22gDBDVTa+dxQkVGVNfcbL1THRXyQim6vEEli2VXM+fUQCziYTBS7xkq/oBV5V6Sdr3ON/PLvr98sC/AVWwwA7y/8mmErcn7tPMxyakfq3nnnb3+6+O3g3fGHWmPzLPz04sMhyi5/R5X+w0lhBZqfeKhnYgkUm9DoDzdcWEAt3S2t0nUKLz3K8iFox44dx+TYrRrZ4eCEAu/u27gPn42QcMx7EPPhiE2qWV1z5/4CORGcQzXWhDn8Hd/tarMUxLWwVPe/D7JS/dWddSJCfLRmxl7hBaPC2OtkSlN7QVPDSMmvJca6KOj5SknJWWqX4uGDmjruA4RPwQMa+/7UEbQuBltbIRliD8WClDlNoQO+vWGOD89d1AK5iEFwQ2sGtSfFzPOCYoSlvOvbSU4hrgumQFnB3Y1cRUJNrV/i4rkgY4fFZBxWcmAZZKqYCTFKFkNxP6CRKw/ng8uhYtxcahM61quRD3iqKcK3vB2RNOdMmBHxj0I3PmzHlPjq+NklLxNyMsV65mXJXOjayZnn20bW0PNyPMJ6HVh3SjikAcao68JzckaM4tec5vliRIQkBQXRLK4+xw1MRhXLRlbcC9Hy0VT7W6+2k81kO9naHT+gysac6qFKvx3kOd4RVM+ZRjKQwiJEecJykhWGDHryh7Y/NRepNKqXENBf48+NGuqicEE0N5VrwYcV5xayWlWWFHSlGMSx1fqWA4zQfCYVN/PC0tMzDLdlik0lvGEJyrJMuPQCAGvLtzUsl0Buf68riz7HoE7OetHXVKP1YE0x/EZCrKSd7XZo7uePVd4oMvbOf76DM9pnfB2c0FQqig8Gi4aLyMNAQbGoe16EvhJ0ZgV+C4CLjvYhi4TmTBlNpCISCsUJiYXKYGG1JuALw9kpovBJN9oNSOderkUVIAIcL2K273mKByoruAZ3gRUAlcxD1Wk9Cq05JTIycnJ0vnFydl7/ENpvjcgNm/ghSwwfx54P4YFK5S5wVo8IExmojyRjhqWYUiGsfGpZsmbk2fHRuzVXTTqEbTKTPqR+T2Xm7Z4ej9cnD4p6xj0WoLlmqVmVSbEIdXIRCAg3hb8sZ5AkVYyaqNBw2CtPWYEygCs16LuTpHVuqFp/HfeCva+KAPbmG8qneFA3/0MaQPHGDYVLdDHArqUHcliPhIAVy2Vr8vCxxL3IIAfGsKK06sFJJGO8ZvRqaf1rcPfjBTa5b3seYePdhns89C/yx1ymV0RZtVobkGVK6GRPjk7PMQL4l4uLs3OyQS5en0Ngukxlrpe+K4YKIz/ANZ4cIaPi2kdHW9XbVfeCysfIO5FRRlJTbWHwDLKXcB5EMFubSwc8DVtiOFYE8luqDd/OGwJqMCbXCu00Y3dUfHX1gH0d4CWWP6jbpNF/HdcJxiqfYbPcuXj99vDfL49Ozy/tIbi8eH2+7NqGLuC7+q5RtNdIqy7cnU8Y73XY3d77IPxq0WiHT6FpNkedDbtbiEyq1VVNMplWdV5GczZQKOzJXF2t6UlIU1PRyIq/aeSdoSTn4grWQwoZ9ilHhwuiYOKl6vqac7V0Qdzp2tJ8MWImkht+xUuWcQr1re2njU/aXitrsaH89actytXMjEgpc54uRiiboEyArlx/61pFAU72g25/DOgvWN0NLjYhOfPe5Zlj+Zc/oZy1LJ6q6hvh/WB5kCoEAQQcwZWg6ztBj1qXAWd6qeugyTC718LW5ib+/9IGokGDei6iPkQbRLFrrtuiw4TZVQPtgF7vctW7S0vuWVPU59B3E3ZK0nn9zR1q0oF7zm6y7wBItfNFgKnF/iailv+pFMJtzzSI6qj0EMVmVIHhUDNQUPQoeh73f8LRtYj8dJrLG/AoqazWmX6SilwcnrlRsaOvDmAibCnj13UAChfccJqT83+cQqFuZp7pNfejG9QOWMOCbgmkxSB0tWdyDDJfdPDxQ80FPF6MokJTNzjY0JwmRGhqKswvc91HDFMFWQnjrVj+AbdaNKyHQrQA1wnQl/vZ6YmOeTPfkKa+LLzhDVv8UJfypltTxOtwVpbzxgSoQcMq3IhRFiyoof+sBBIFuGbQLube7husRq2QpjPkFFiw3cZ1OJxtpfoQh9/wS2h6f9DAQ7OMaFZQYXiKjpKPxrWvZh/TORUzNmowda5DB2sjyTW3y/W90LF5oYBkX9qwGnnLngpzTK3q7McUvoc2XiRo2nNOOW14nhOGhibMkHUt10UWmxkBYVMedeigZalkqTg1LF88RL1Gu+dQghO2CIWrz21M3ffcriEwmGLCZ5WsdL5AaoZ3ApcHj6IO2THQkJQKcnI2IpRksrAbAMbQSvCPREtLJwkh/6gxS/MbutBoWm5e2fTGw+Tpfpy4L8aIsqaMJqwUVTtRs8pn2YPRNuHl2IIyThCs8YhkrGRgnybSyQyk7vwPVlmuW8EsVCdL96e9LZ7FJf3iOITm0ICqLq9MKyOFLGSlfctDwHv9dQDQd13DgZ4dnJ+uddJs7b3NaDqvbU2ISgyGZD039O7Wi1ftNTeaXX7T6VzLR9D09rdsoOJnKWc5I69fHzbw0ROYskwwZPxas8ILhKBAaihU7474vSMJZNHdrdprNv9Cwr4Hsk/ybyM0OH7TLD1jMkm5WQxVZOSQm0X/7ryRwijW6o8E4EhhuGBisMInp42CJ26yDnynUpk5OYBgCtoDZCWMWlxyLXtSlh8HdTgFOTl/C/nFHQgPD24Fa6jddCD1bughFTTrYsr357sHnBmTl6Cc9837WooZN1WG93VODXzoxtz+N1nJpVjZJ+svnycvtnb2nm+OyEpOzco+2dlNdjd3X23tkf9Z7QA5oBFn9b1mat3fxy0DJw3tC0eEoskBpTA5JTNFRZVTFZc2MnO2IClUdrBiZ6PQgrs3TdNoxF0b55QJdC1AtHwuMVJowlSdFO9F2/qGQvByUs4Xmts/0LA4Iqk/1nEc1qk0Fk/2QZTAsWt0ZWQBF+SMydCssWPdmEhtpFjP0s7eKDbjUgx50t7BDHcdtPVfD2+Da6Cj5mDqPWm/VmzS6oPedmR2YOh3Yq7WHvrQMst1X68pCx32rY7f5OTsesd+cXJ2/aIWPlvyVkHTAXDz5uDwNqhJwzJrks9w8K5eWDXTKV6QchErChPoX3l6cBH0b1fxgTvJrD6zkpSKX1PDyNGb39cimbd5VkCbyyXNyITmVKRwWiMHoVREycoe4haS7TpLuVRqw4NSCGIE2PG/YRSgBvsAqa7Th4uZT5PhWrkunW34zDwbh/bbSBwDFpli2WWf9PiIfd4gmHA2Z9pEk3oc4dwjWEhZsiyAXE280Bm2POoRO4oCcWE4p3FOpSIrUymTGUjwSSqLFcI1WYk+t6sIohfVBRdlDGu7QKUHlnJtNSrXdwd03JxfuTQe9BDqajrlH8OI8Aw0ktzf2MBH8AmrSa0l5ALDe4xE88BHXgRz9GSBXU4XxNCreldRJ86pNsTcSJLTCcs1qt9CGkgFwFpGdu0Xr490iNxdSWVSXa10b8waGQ2SMLK8hO3/AhTBplMGJezsrE5ycXv4jF28PloboUvkSsgb4W1hDbCIQ/3ImxsBRSWtyd6NhykwHeJpzxuGtXisMQTU832TDZDMbRRTb8RytAPfN8im0kwlw1JMrHfVOS8hcily4RA5vY1jUEFeHx2c2avgAFd8FIaKSWW1uzpWUJ4PtDgr5BOYwEsm3fCvZFrl+SNn/n4184td8KomdkkwHagRd/jV8wlThhxzoQ1rNd8H3IA19asRIDrUBqdAXORgzsTbyxE6h6HzJ4LdccMHsvUQKsI5oFIc7wRO1gViwNBXX7gR+A6EmRoZde2LIw8wFhgZlCBUSLEo+J9RcBqiMHx8j6WM+ZSMYRXQrU+5D3Z149BkMJViinvVjnYQUIO7dtcQX9mxj6juzex+FFIKmhbM2YXi8dTgr8bSzkM/coKFqLnoLjriaRR4Wssz7MuXRK5h/9XdTSj92x1Ho4l/w2BJ0FHq+KeMGuqAu6GapDLPWWqijuuNVpWhTeWUiwxpLVB+LmfakXyooennhrQU9LU/wA/GyjkrmKL5gGVYj/0cMevz8W0e/Gd8CjYMLOi+1qlCngHxgC6KLkvtS4UqBkn+Guuwjt2AcLIzybQVx7oS1h7dme5ubk4byBjkqPZUoQ3xD0JghABCjIFMNTVBa9CiVFxH/ExOMdlEyIw5c2FjybWHLmSqA8GAXJqxbnn3kLPaKSEbA+MyYwt6xTThpu7nH3PmWtK2dGoJ0jdYhYMhWIdqmykb9sBY3YKnVU4VwBuGZAU3vmRyO4LsVBrnNuaYWyKY62DAWP2CxnPZAAPiwmUD7XW8ZuSgxshvvKGpIWP7nrsu7O0BHy32QX6iPQWvs+cv2S6bTNkmZS/SnVcvt7MJezXd3Hq5Q7dePH85mext77ycvmhZjgaxXTYELU9s6NePuBNgqxWmJ3pehDKr7mTCPQyJOY5eaJ7LG9z+jGuj+KSKI8fdGC4FQFWQFBFMmFDot3n1o0HCR1toQyFBFyxd9QkRwcgegX+C36ZUwwqOrdLGU5cR0zhFXgpod8ZP80qbTrt7K3v+yKjRfYOg5uguOKifXIYqAuFRu5HjWl7BLK6pPRiA7rj6dJeuWLyOdXfcmkQkMzaoA8VTEw0kAVO2+ExECeZGIi8KpGRH8C97ruilYfsbHNMooDSusAFpteDEx7SjUbQJfumBLdb+j4mvmR0GdddJgMynmPnRlqOlFkuOQOhSVAsA+yzueRRd2CRUR4OJBcFO71O1GidZMi1WV2upa06vmfempqw0uLgwG0IMKPbClQPS5StFDWeipA8JJ5qLWcX1POxafSjhSNv7glRl46p395zUFlQSS9GuzoLDi2DaW6wDS6iHb3GhJtXUDMZTzxpZR64QcOwWVVCBIWma9YgJfr71TfdPqzm0jlI6H9WTi3nCOH5rrU3pfqCcexB5fcTzg+8JeDGiGggLBh23R55tyAnhho4Ec7+SaJJjv0EnUxxEqjAGVawFXfuE3sJ6b7zkNG5w1fE9XLexHb3xtI+zI39vFsbzGxKC8hq6RXdXah5sJMmlvCLUXkmYiccMNkNp6RZRLb7A3bvYeJ5sJzuxngWxew01q/7mDi0Ln7o/ktMHB2JPA3AObTRFwuZIUcjmPcGasfvMRWx+kyGFLjjyKaTwKaTwKaTwGwkpxDPpK0zVjOQrxhUiSE9xhU9xhY8D0lNc4fI4e4orfIor/K7iCuGy+O7iCh3UZMi4Qne13xNPR3MXhFafWhlC7Xpj6qJUNmIUBWVLzL75GMNb0ZF8Jj6+wRjD5YW6Lxho2EPzXz3QMBY1nwINnwINnwINnwINnwINnwIN2wT3FGj4FGj4FGj4FGj4LbO0zw40hJ4pCIxzgF3U39zhAHP9HiwN5lRrPl34yCVs8g5lNmmaSqwsA/WrcC5i6EcpZOFNRv7itzC/4UYxcnBx8X8O/51MFS0YFOXtDT6E+hpSwTqbgLjZQTWiobYqV6GKJ+h+bsyTo/MROf35p99GUPVyzQc0hA7iHlz0lOAaEgNdxZO/ARS+erMbMS5WavUPJ+yFslRufxw2UA9d4UVJU7Oy1pyFpXMg6uRvXv2q1x5qRvv5XA1bLkCXAXGNpnMoBBUqQYINzYDb1dM5TDWCHUpTWZQ51xhlNJM09+BFVUSFPfpWt0Yf68raA/yOYUu/AI92+A1TBu/+tFJQQSgUz0SbrSefhhiL+wy/h80IMZHMqs4Q5we7RX4KU7mxeMOuTLzMHnqLQcAVlM0Ss1CClTAr4GMTCkO4mFn9FRvOS0UUM0rqEiXnPAKWzma4PF91p3Xy35xcvDt2R6upfCEpD3bDW3rmqF4jMhvU6HH3D1c821dbijlBWOQbahT/SC5wnGbx01HctSghz9jHJNS5o8bQ9Cop7JhQ5w4h0RsXB5ubO5sbYYK1NtbwgT58fSFJI8S1LI+7Gl0xN/3yuEOW1oe7oYtBXsDp9PUgK5V/pxh80Ai1vOEvjS9xpANTbOIV97n/VIf1PjpePTB642Jr59Wru861/f0WtP1FtN1GEPR3uk23ix237N3X4SxLY7chWwzEXJbH7oPGCLh2ZfK8tuBqxD6kMxz9/9n79uY2bmTf//dToOQ/Yp0iRyT1zq2cLZmU17rxQ9e0k1O7laLAGZBENDMYAxjRSt0PfwqNx2AeJIeyGCmuuFK7IjmDR3ej0d1o/BpQs31Yx5JhP2NhLqzjX2DQWsBHRKUg8QxsMgqVlACUMr5H+I5RwN/vRiSTCwfQWRhseghfg+PeuTXWCZfaUNOVX7eoTRfSbLGzSgxjXcWLphEYkQZtVXepxSzKufvapOB6JK0pvLfjyeVw9OZy8nF8Mfn16tObycXleNIfnE2Gr4aT8ZuLwfHJPzZoGDdzjWDh0W5HVLi+fNe1NeiExGnUxTFLSYlrDJLrHdK9GRuEyp3ogw+ksyqTXON6dsnXMM4FvQMFeVOf0iRcYJreIEHT0ES8/RJFSB8T6DtgDjIypqKep/Pu6ioIWhcSWTWSHZH4whbw8WntdV7Lji9Rv3BtFpCNuZoXD+JBkfBsuYClOf8oXx6bUS5kSSzsTZiFSyhrqOhQ4kz3YYxaYLEIkuh4R/wZlhRUOic842pHLCCY342OUUTBTWQzNLr86NhYzvCGC3ktVs5rfatCUCFJGprTJA26C3FHXeCp4+1l7lCqYIqODBaVFPMsIxxuoQC9qkuk9/r0ZHj6ejA8Pn71enQ6Ors8e3X2+ujV61eve8Pzy+FDeCIWuP9kTBm/uej/5blyfnl4fjg6P+wfnp2dnY0GZ2eDk5PhYHTePx70j0b9UX84vHw1uHggd4od50n4Mzg+aeaQo6F3p+DbOVS0qjn1OOvm5Oz09cnJyUXv+Ojydf/0ond2OXg96J8MLi9eHQ1fDXujwcnxZX90enZ6/Ory9OjV68PhaX8wvDgfjC5ety5NYeZIhch3ZvKMijtatviksvfz6e8kdEfregT2E1hyjfuRgZaucalKwOH7n97dj/QR2EfGJBpedNCHzz9dpTOOheR5CLHVTwQnHTQa/pTc28SR0fAnm8fQnoC/48Nd7ePmUAiuFhfp+bpfc+9UGdULttQ5mhnhStiUkI3Hbw8KQxuhBU4jscC39TPR6IgcT/tn0cn0+Dg87Q9OB2fnh4NBPzw/meLB0bbylDI5wTPZSqRW1dIfYUkOPtGE+MYylOw1eOYlq0CglEE+EzGLNVJL2V+bDfX/fxj0Bv1uT/33qdf7Ef4Ler3ev1vXnPXmO4Wrn3/ihI1t1Hqy/fPT3mNMViO6PXLyQKVcnWAoxHGs1GWKxu+vjFaVJI5LcPn6bGTBhExNfb96ZRBDPSoQ1jWuzMGV8aoC9Kuisae11ZOlwi2V4sdzosieUXNJyM/JM9eEasRfLpeBubEXhGxbgmtV+ZTquaaQC0XsyLJRISf3tkLnh88/jUr1dB5LD4s804c3E+1S7+oqnPOuTDfNtkPJl9ffLEgcs5V+ywpvfnB8MvnX8J3y5g/PjhqevhyOWjz/QxAE7Rd7zquFqHcdBFE9FmVY4KgSbr9rGne0LjS1EZsSewQJs8HxCW9deYYIiacxCH6LmU4ZiwlOmyb0Sv+EZjEuTYvObLALpWTOJNXSvsSQFxcSIWZ5jHDq3WnnOBVQ38rE1FJE0pDfQ2U+macpiVs7sin5Kic2vPanstLF9HRpHT1uEgXommjGmmLCXpIk3C+8eH9RVFh/aeOYSnlSnOpSVlgIOk+V5hAHMhZdmImy5tUcurrdlT8EXxcyiV/gOEu7doxdGon9in9lau0X5nvMlnCyLOpSp0Z5sLE0kJ8nLfJkpwJHRSUQCwJn+oX0iSLWlepIl3q3IqWtxcygzj7LqKEZ27ZRw/qUnipquGoku97XdhA19HnxIB4866ihGe53EzW03PorRw19nnwfUcOn5MpjRw0r3PlOooYtOeQ763+5qKGZ406jhuOt4oO1uGCxVXiY+E8QHzTd/44Pd+aKNgcITZXPxwoQHp4fHR318fTk+PT4iAwGvdNpn/SnR8en08OTo360JT0eI0D4iSbKgUuyWrzMBIeeQ4DQm+83Bwi3nfCfHiA0k91tvGrcOjJVUckNKkB5lnZlByFLdqICdlvf9n0OOCGle4p2p8owFxZ/TH3POJ3TFMfGv22QgGDQmtmmk10HGN4DsCf9g0TaCYfdz8UXIFzpT3PTFOWmav4uH4rj0F5+tDlR3ler86JGBciobaQZsxbSmP4gVh9j7dJwls8XLLerB6OEhpw5hGUeLqgkWjJxHCvHRrnAd5QsC8+qSPg3i8AbOPKuTiBOvuREeazdQkhs9d4lmdrfrfs04yyVXZJGFWy8rprOl5xwtfFA+XwzjwKzYYrDW//NLfKx1Oh3mPS6GhxZd1zcp7rQ3+jhimJu5oKMvpFbFB42vvKUqF0HSTYnyvoDy9A1Wdzk0/e6LMHVRhxr5nnAk5LwronqEI+StSu1R9PZ+WB2eHx6Oj08ivAJPgzJ+eA86pEeOTo9PKmS15VKfhoiu+4rpLbf2/vY9tK/w6mBOxkJwSLnBrYBLvg4YGeRe0dByoJ29IVsRbMv1MjX6816J6cY96b4vDeYnnpaIeexrxE+f3y7QRt8/vjW5j9aaFFzRgFBblinRBJT5h4W3uePb0UH0iDNk1ZjKRpMOYFL2Shiy1SJBEMiXJCEdBzyQYblwrzPkI3jtVlou73xaoxte4uNx53ibnj5eGyvjHMrWEIM0iwGeib4XifrmgD51bWa7YEioaKrvk4b33dAIlguHaqga1Xf4L8yp36qbX2F38Ok0Uicc2aRN27M0Z4BEawJTcMJnztmsJHoXZH208Ik2dr7nMKEwZRysp03mAFmNTiy5DyuoKhWmqBCY3QKAjjnVJqIZ0dxMWVSqUJ+D/nTC1hv5fcrjccEwyXCjHDKIpTkQkIjU6XrwjiPSNQAs6B9ZHh4StBels73ijiHen0vUN/VOZSZHdC7tDZPCnCYR+fKNePSA0tVRAGXR4vTixtP/iXL9irEuXlxo52WMgSFHXTl9u0sjx/RAHuyuw1XM32LX6lAuAxJE7WkzYVIKOyeC1Is2HsvVgJgoIWPQ1N0o+RZtXcDZ4cQe4EFbwDOBeJEeUdg6isnmVvfwRo8ZdxSH/WmId2+rAF+PDo6PNDovP/88lMJrfeFZFmJe3ZBfgcc/OFzmrAIkOILPQOiL5AgJC1Rto745ZVRSB36aMJSKpky57UGYFPYuSO3GUyJUjVGcDoajxwLXxQwHLYCTrNuQ70KNwgkSdHvOUAJFY4j6C61j1YxWpzkuFu67jXXLAZLf4mFG2intM83FgN5kBCp1lb8XJKvDAvhSc2jn8uZ5iteRVAZg9wVhMI1lotK355uNQTaqwxnB0hlPkJWbRxHR4c1zXF0dFgalHKh7ndpJEAHRogd5iKMV/9izr2b5uDb0XsVYavtXf+EvQvO8yI/AOH3Ahj82qBzVkvK1LuwQr2Lajp2543dlqnhOlcL+pvm0j3V8TrTk9VmimtRAymliCSZLMYDQ9dP3pi3KwDypYoPaErkkpByCoNcMm2rVjbop0ZHUyr4b2i05wONpp22XQnBGFpfrRNht9mr7Lv6FuTNj412px7vin2rHE/4G/QN/Q369iDQtx2mFH82zTfYKP4ISsEd+3lDVT4I3FUrRpQwlFzVCHhUm7dwc5bcYedfmDhDuYqEuWSr5ANK6EB5OgDC9gFx1TeUCLOjWiQplDBAq8E6REwj6ybbQBROEYZ8H2Nww24tvPhwsgUEzHeL1/eUUH1/o/Q1ovR97wB9fwFsvqeG5fsbkW8jIt+Tg/H9jcOnjYoJntswomdaoOLbFgaGbsOaGUUdWpYQA4iHppwtvTNEH13v3gS6xIItkVJeKRzv2lNlKF8WskQZh85XN6fquRuq9ZO3sAmIK0T5J2gJ01uVJfR6YQs0rRbMnQyoIF1tUGM8w5yWBvXsg8AVPeDJx6QkH9W5vmN/0DjGB8dBD73U3Pg/aHj92XAGfRij/mDS187NOxyqL/5nH11kWUx+JdOfqTw46R0H/aB/7Ib38uc3n9697eh3/kXCW7aPTHG6g/4g6KF3bEpjctA/vuwfnRlyH5z0jsw9DUd0EcxwQuNdRd0+jJFuH720PhEn0QLLDorIlOK0g2ackKmIOmhJ04gtxX79ci48WRv393Hk8yEjHHtAidY2BG/E5ue61FsOZVJWlHXSovOO/Y7vSJVat4SnZFdmfG0Oujc3bJ16gJerVshRcBT0uv3+oDsnKeE0rI7+O3EBVvDaHtN7nF7F3P+pUsZap38WZ21/Zj2HJJVMdFA+zVOZr1vDmC9pbQ3vNjWwNvi28tjvBf2qptztUCuFRdfsnEq7e/bVXWw0o7Gsfnl78b6NTaWeKxfn1BF+V3j+rDcI+l+QxPOXYt+v82mjKFjo8BcWiKZzyBlRpjnRf0L7WAgW6tt0upxzao8EwV8Ah0LN2kEMe3VPdWemErJD/zLPvdcno4GafdMsOAkZj1RzNJ3HZrYSzwFqFo5Qc0hEgMuDlnleOekvXZp2vyCShjgTuR6l6Bh3p2lkqHTa6UpxmaZ9YFzsjnUFSQXjBon434TcdtCvlBOxwPx2H84sAQrX4PHaysocz2Y0rFGCpinhK7mqm0D6ITO5gsECvbShNNOq+a08//0Vk1w/vRIo9bazXDO9EiYBJOXYcyrliUYRNZJlx1OSFSiDFOl0aUMOiedz0AWmyQ9Te8vDE24rvYEv5eYub4P82cdNk062fXcW8tfdqjCplNYJjqgIOQGnu7rCTJswAq+9VXzxyjeZ2k0d7dH5VZ62cG12FpyBCV2NtKVogKhNHrujfl1f/2PDRvwneD4fMg3YqGcALvM2c2C5FDQi6yfitH4ep4TjKY1tiUKr/ms/rN4H1DZQaqhFEB83dI1qEX17cf/ObWCtcCcNkPyO+FMqp24MAqXP/YxymIis0QXD6Y7DHreA/Sb1xppEXbe+X878GOgI3BfV1/jz+HJf/QFmLo7hQddo8QKWeAo7EUevzbrdL529FdgAX3Ic34t5jnkU6L+DkCUHX5ZkuiBxdjBjE8ggiw9uU7aMSTQnqumD0gQnFpeViGAhk//8P2jIDaxMjOLZ3/Ybs4NsaqI9Xqmffv3wnz07r73ftoDfaQCf3wUQbrkjd6mkRAURMl5YliXmFE66n9QEl5EAwSG8E+KgBlo7/GU8bksJb8TP1iuqUbVSf7VOUlh8Zs8SbgvHMeyGfm9Nb69YHuEd8fB/QYcdzPAXEPP4RXhHJnCaOPEGJyYhJ1iS6D9DKJThuvV1KyV6L778mjGhNMfwl0t/hr/V+HuVogSHH8ZIX4NDg6A/CE46fhpPmRwmUfDj9XCLW/gkzRNwena6QKwW9U5QPNgaKtawpr44mljUsDou25Jgx+jwesZGNby8Gu3bxAlTUT4rsp6bN0ukD7ADdOWfOZsa9NUOTKP2fKpO1+ru0Vb0lwssJ1RM1BKg0b6R9aqMu9Zrsn41+q2BR91Br3/e7fV6vS3gYHaLbH6BOLE1RFcpmJL9bLSNvkGSUEnn2v1xtLDMcNIfVfhSJUwzR8I57U5pqr6FcF44p/9Uf/zk6HjS729BRiV4k50Kv/EiGUcixGmzqNYmr2bS7/XPgm2EQrWfEh7ckTRiu7ph/6lcrru2wcMQkB5CHXecpHgabzDX/QkxTgJlebWYzCxmuLEY+w9j1YxOh+E4nZujr17QUxZ3vxf0dDAR/rTYUwuCEiYkEuSOcD/X/JUyMYVpkSnvU1lsQhAhEjhrA62dxYxKS5SESE5DgV5qaH10B0f5xfUTneb9FQqVZ5ze0ZjMibnMZU6JJeH6Vtt+x1RSKVr1z3xVG65d9dqcQ7NQhktnTcCY9s1Vr5BlZIUR0GB+WVMdRLcbGSy+/Zqlehwcb8dikt5RzgCfq9VR1p/E60t/WJuYjtN75C4xgJQYDnXQQzgEB7KUE8AsewYskiTJGH9O3PlkRrSJMXD2k2CZa0IrkkYGUg9m0Snt15ZX4eOti5YU3m2sHBz599hGW0pa27nOL9//MtovNnvlGlOJJb3zkVHuCAf5xOktTecQot57y5Z7HbT3jkQ0T/a0NO+9ofPFHrBAuWnobqCY6tSnaxEkQVQDkBqCwfUloauircOgZzJz7yGGGJEZTcsXuVQLxcMlHnlSBE9QgdgyBdzYCCU4xXMde3p99XH8KfjA5x10lYYBeglfKOWJPo+7GiQlZYAKOKOeq8XnOHXlWpYLppQBFfYypGRoQeIM9D5E1AUJQTiVZQt6QllfGUv9EjEEJwLhkDOhDecl43G0QkTTuyhIqZDBnN1BzKJrVBGIa10Z6MORdqJqWLJD68JxvdHCgKRWRT1QFHYTtOVfeJEKgdReyjiVhhGIkznW9Sc9FfAwCtaMeNVN6LpupGJXEeRHNNXlNHEaLhjXH7uhdZlNPPKVfqZEmf+Gtof2zospRzmFoobm6MJmRcJSimNzW04xA4JwTdFDfVpmkZDXsK80ljcWOdlwyJy5lVqeQslKmpA/bB6NbRjH1F2zy7Bc/GhCnpWHEzrXLvmPSPKclFvXcyk1y3z4GP1hsnEm/13oAUtZsLhgF5jnHMipO2uaX41o9bkp2vrPrZ0WNNrIjXrDjaxb27oisAC4jYCmQuLCfdxIJwAY1+8i+y6ikRXqMGZ5VMjvUH202whXixRHWOJmkX5nftW2QFh6FfzN4hgAR9EEHpjYJtWTIRFC+xpWwkuzhheCjDMlEUV6bHHBW//S/bpePvwULfOKWmf/gssaesba3WnonCZ4Thq6xgnt4mkY9QeHjdqw6P1KtYCuRs6N1nSyrDCy+QJdKDGBh1gc+avEDkgRLnAkASJvkLPGh9fKmdeHHWDhYq/vxk3IPb91Ty2WTqWvtuvH6y3B4YKmBBRMq87MC4H3Qtu+fK9g0kKbrn+rba9Gxtsyrra+2vbDybwwetf3UXq0sX2rjyIW3oKsGoU0sp8blpf+DQmJ4Qg5jjVODmgj/Zta12LBuJzobaGwi+wurvvrOmW0Yrd1w0INh3vlV0pKRG9NfqX0ZmJ5BGt+pZFoK7pSGmf73kDTeQtqy14rb7br9OHdmaua6AX69GH0QRk2S2WdJxhAigX5Z20sJSsDrbc00Gp9jpxO10MIrOSq/byQ2zf6U0MjV+mM+dJqtgX1OrK6xhNQ9X2jeJp943I49jNgqM35CEgogvvEoMe/MEe42NQzV65P8WblqgVzEDGrJX01a0r3IZqhzTeRd1ZQBA6KCrbX+2UimOY0rndZ56jbvff6Z6N+73yv3XA+jBH04IfNmwcSsog0roN1YxGSExku2g/G9qIvVKX3TgJv8ynhKZFwjmHk8Gf/u4Z2i9+dsVe23IpGkS+F67Vq8dJGzVoa9HqZq1I8Y1Gz2tlqMXsUyJguiFJnruoqb9DhD+3pmkXo89Wo3pH6X5Hh8PEmVbRY74xFNZX/jZ3ZbO16Z0Zd/tc3K2bv50mCs4ymc/Ps3n+1XEXeiM1GkuCsPmS4daVPw57duL2xNQ+eEyicIoh8XBYX7a5gdESymN0DaNWjdly0u6JjZQiSWR4/+pS9hld0vcEOemjHrtmN3TYbfd/er27XbDBGlxe7y7X7oqFd82OxrzintmkfKNpGW20C5Gtbs9P0EJCvJMyld5qJGkxPM+PfWcxuKe7iXLKICjioKKb/f/WvaGR+uUf+c8jzvDdGTxqa8ndhMw7X5KqooHku0CGm8rnEFiE1m55v0jHYzA3AS9Jv7pOuCyWv6O4Shwtz51DDCLrkEFPwzeBlEAqYbi7P15TbEhJzmWelmCbSgDWJzktxQUFpYJJxQqSaGDdnVcA3IsEk17AK8IX62DHJDzA0iHDjGABDhA56X113bGgJxJ1GHbhFDIdXpSFBqFsKoEwzCU2ubMZZlIdye0JCNp9bu6YZZSa6ua3r9sHiUur2B+Hunbz0et7f0LWX+LBlz/pdS+pi+p4sCMTzNNWFq5rHYYFet+7988e3BmpfuSrQnZFWGMk6ooc5b18Bquj1VwdtaOe3xMKJuHEpcS4XJJUup1PD0Lmob+XYYs+kQy0I5hJOJgwG315Fd61QO+bplcp7ZeQeejVvl6P1qzW+F4hbxa81fVq+2U71Ymy0wx+tkxJ3mnfyb/UtSr0B5sbvbGruFUCu4vS+4G7DfKO8lEXZzMbaGD4xiWMPrxFJImRTW1VeohISUOnrBii+xr5HVp3T1GKmhyyNRINd6GNvoQ1WQs7joPZC1TpoxZILgyyc89jCaZWOLG9kmN100I2Mhfq/hZTqo9ok4G9x08AmLzbTZiIVjKoHTsQ/NLQQAnrbNJxXe+ZQKz04LE/nEJuwz9Iyg91LSiavrhtmSbPaHOlKGazEjq7XjvLKH1V5JPY8rlNqD7B7aXbThLfMiWDxHYkQzRyMtDvlyTkHe4aJpuWmPIqS3JucoajGl4eEJ/VVL8YVE6yeCwE3FyqBaBAw4ighGVxfKKB56n7GgoS3k6oqeMDQLpBktyS1Bp6GZadJHkucEpaL+B7R9I7dkshi08x050LfuyxuLQLoawGJdXWt45/wsN0D7XXI0fuxSUKuTw2OVjNcV3yKTBPIcGmpgWlCTG4U2AKZzlcw16HARgVLU5qqC0LDHC/MmHWRUPWUMjlJGnkPw9fWwEnJVwn6JMpjEumXg3/YnV3kSYLhTpHd2t8ZATC/tNzRi3bQ5h1977oMowz4cTqHgSQU6lkZUx2b8WrkMiebmsMevlmUMZoanHGTb665TuUC3SQsArUX3wR7G4yFBoGF1D3C2++rhRfkBqZzq6GmK/FrhhXnRcu6RD1exzNMYxI5phtF5DFdqWwUM3abZy0ZXrTRguHFUL2OSocJqznybLewx96Hii0hT6sljldsC1zWSbPWAHNGkN0/9G1cYKUplQDhBbu5BU9lk1n1xMJbcewJ6vjD8OfxsXJVv7ZWTbaNZhqtYIrfkb6JT6IKCVZJ7NZcqazkt2MU43vCEQdJkJxmettpyw1z776RJdWBbBgMcjuVJzCuLrYOldg7/ncUW7Kph4wKqjXnKs4VaWxeI5KVSB9UXm+aNloniGidMNYmv1ogrUTqAlFWHBWvTP1tyDJWbGsplkWlqdYy6UlGSSA3qVC/OnDK5ASsunK9OVSyY1YJ6iXmMYX6OEoksDThNhNHMSz8QZQrAiszq1RRbtPA/MJ/7cb1FstHHNWTr19XZnFnK3hGU7V81VBdZ97CjDnB0b23QA1IRK1hH3uk/MuTLVSbXiFl5hs4nz5dbxmkMi00E36VeaO62W5xFpFE1MK88VC90EONm7ExaHIeu1iPIU3DYqjmp68Sw3UCPWXRfWtZ3iQ5pfK41Qbr3mULgth/b7BwhSIgJmYnDxNwMQuLYqI9TvuM0M6Gvv0TYqhwtCiveZ+qGjO22WxqnsSGCbylujSxaxrNWByzpR4r5pzegXKcUXdnPJUBeqt8LwpgD8YZozqNxpYrUYYoKNT6Rs6ie9uQekMs2DJ9dHUKS+rb9KmNYjxMnf7/2sSd8jEVHkwl1ek9lHu2SkBXktNew5IrtxbAWWutFUIGj5pzIb0szcgZD7xOK2q61mBNbTdBRBX/NLY/nRWdWYB/bgpwAaMZ1zBCGGWcACK+XNAmJa7+GU80YkS3pFbTfaUWjoRgMOzZadkrrrCJwUB0eTu1Dp/LTtPUmRW1yVYqr628MZ9JZRcO6ytNpYBdg+7W/zQZ/5YEtENJUEueTKol9Pw+N0nCWjkQJuSnrVxTPcrXPA0ao64pVhl6z9Gwa+rMSvhkQXBUchq+gcxla9nqeJ/gLsDqf6mI36Dc9Tag1qa3S0CQxXCrpP0VPwrOrfGd/+qcswbIAyNHNY+GE8kpuSORO9U3AWYYCjJjCZoHAwro0bW1Pzyb7WEFpVTXFo2VPGkDstacX3P50/C6BKooJUkyGaDLNDLmp64g6fR3rbWImiOA0gbxnPeC5yLFxq+kYeL7lVfDd9ct/UnzJtrGn7y61udC7VxJW667Zm5vdZLwXnOJzpCaHLoMF+yjrQOu9N1jxKFdy+ijpyA/kkzJQ9nKb2njP3YE2ob7Qp/bav1tFeMLt+a46sKq8ofE+rzChahFPKHy+DfFEwD9xsLcPLYb6Ej/XKLijWrej4xXlPUWbltxGPRclN8O3Oo1FC3cHPVJSJIV1IP6i0ollsn7XAj1vwEAAP//1Cb/BQ==" + return "eJzsvXtTHLmSOPr/fApdNuKHOdsUD4ONuXcjfgwwM8TamDH4zJ5Zb9DqKnW3DlVSjaQC92zsd7+hTEmlegCNTfkxy5zdGbq7SkqlUql857+Q3w7enZ6c/vz/kCNJhDSEZdwQM+eaTHnOSMYVS02+GBFuyA3VZMYEU9SwjEwWxMwZOT48J6WS/2SpGf3wL2RCNcuIFPD9NVOaS0G2kt1kM/nhX8hZzqhm5JprbsjcmFLvb2zMuJlXkySVxQbLqTY83WCpJkYSXc1mTBuSzqmYMfjKDjvlLM908sMP6+SKLfYJS/UPhBhucrZvH/iBkIzpVPHScCngK/KTe4e4t/d/IGSdCFqwfbL6fw0vmDa0KFd/IISQnF2zfJ+kUjH4rNgfFVcs2ydGVfiVWZRsn2TU4MfGfKtH1LANOya5mTMBaGLXTBgiFZ9xYdGX/ADvEXJhcc01PJSF99hHo2hq0TxVsqhHGNmJeUrzfEEUKxXTTBguZjCRG7GernfDtKxUysL8J9PoBfyNzKkmQnpocxLQM0LSuKZ5xQDoAEwpyyq307hh3WRTrrSB91tgKZYyfl1DVfKS5VzUcL1zOMf9IlOpCM1zHEEnuE/sIy1Ku+mr25tbL9Y3d9e3n19s7u1v7u4/30n2dp//vhptc04nLNe9G4y7KSeWiuEL/PMSv79iixupsp6NPqy0kYV9YANxUlKudFjDIRVkwkhlj4SRhGYZKZihhIupVAW1g9jv3ZrI+VxWeQbHMJXCUC6IYNpuHYID5Gv/Ochz3ANNqGJEG2kRRbWHNABw7BE0zmR6xdSYUJGR8dWeHjt0dDD53yu0LHOeAnQr+2RlKuX6hKqVEVlh4tp+UyqZVSn8/j8xggumNZ2xOzBs2EfTg8afpCK5nDlEAD24sdzuO3TgT/ZJ9/OIyNLwgv8Z6M7SyTVnN/ZMcEEoPG2/YCpgxU6njapSU1m85XKmyQ03c1kZQkVN9g0YRkSaOVOOfZAUtzaVIqWGiYjyjbRAFISSeVVQsa4YzegkZ0RXRUHVgsjoxMXHsKhyw8s8rF0T9pFre+TnbFFPWEy4YBnhwkgiRXi6vZG/sDyX5Dep8izaIkNnd52AmNL5TEjFLulEXrN9srW5vdPduddcG7se954OpG7ojDCazv0qmzT2nzEJIV1tr/xXTEp0xgRSimPrB+GLmZJVuU+2e+joYs7wzbBL7hg55koJndhNRjY4NTf29FgGauwFN3VbQcXC4pzaU5jn9tyNSMYM/iEVkRPN1LXdHiRXaclsLu1OSUUMvWKaFIzqSrHCPuCGDY+1T6cmXKR5lTHyI6OWD8BaNSnogtBcS6IqYd928yqdwI0GC03+5pbqhtRzyyQnrObHQNkWfspz7WkPkaQqIew5kYggC1u0PuWGvJkzFXPvOS1LZinQLhZOalgqcHaLAOGocSqlEdLYPfeL3ScnOF1qJQE5xUXDubUHcVTDl1hSIE4SmTBqkuj8Hpy9AZnE3ZzNBbkdp2W5YZfCU5aQmjZi7ptJ5lEHbBcEDcKnSC1cE3u/EjNXsprNyR8Vq+z4eqENKzTJ+RUj/06nV3RE3rGMI32USqZMay5mflPc47pK55ZLv5YzbaieE1wHOQd0O5ThQQQiRxQGcaU+Haycs4Ipml9yz3XceWYfDRNZzYs6p/rWc90+S8d+DsIze0SmnCkkH64dIp/xKXAgYFN6LdC1F2rsVaYKEA+8BEdTJbW9/bWhyp6nSWXIGLebZ2PYD7sTDhkR09ijO9Pdzc1pAxHt5Qd29llLfy/4H1a+efi6w31rSRQJG967gYt9wgiQMc9uXV7WWJ799xALdGILnK+YI3R2UBOKTyE7xCtoxq8ZyC1UuNfwaffznOXltMrtIbKH2q0wDGxuJPnJHWjChTZUpE6OafEjbScGpmSJxF2npL5OWUkVnOIwNtdEMJahAnIz5+m8O1U42aks7GRWvo7WfTK1kq/nPLBUZEn+Kzk1TJCcTQ1hRWkW3a2cStnYRbtRQ+zixaK8Y/s8t7MTEG3oQhOa39j/BNxaWVDPPWnitjpxHN+1t3lSo0YEnh2wWj+LJO6mmLD6EbjC+LSx8fWOtQmgsfkFTedWJ+iiOB7H49lpmwOg+u9Oj20iuwXTi2Qz2VxX6XYsxuiGDFMZKWQhK03O4Uq4R545EITWr+AtQp4dnK/hwXTSiQMslUIw0BhPhGFKMEPOlDQylbmD9NnJ2RpRsgJ9sVRsyj8yTSqRMbzIrbCkZG4Hs9xNKlJIxYhg5kaqKyJLq0dKZQUer+SxOc2n9gVK7H2XM0KzgguujT2Z1164smNlskBJjBri9FZcRFFIMSJpzqjKFwH7UxByA7Qy5+kCBMs5s6IvLDBZ+sIUVTEJAs1dV2Uuw63d2Ap3JeA4VhGVKQhXDqLONjl5I3wdCN7tohvo2cH56RqpYPB8Ud84GoXngHo8EyeNdUekt7W79eJVY8FSzajgfwJ7TLrXyOeICaCmXMZYjlid1+9IV+UjIGOpQu+TKc11fSNkbEqr3OCQzR8be/A2WhPM18HDz1JaGnz9+jA6g2nOW7rEYf3NHcrEgXvTHjZPj1Q7AuSG27OApO+3yR1BC95UempzSoJiM6oyEB6tbCiFHkXPo+A44Whu49Jqn9Nc3hDFUqtXNVTXi8MzNyreTDWYHdjsF/bxCDI4gJqJoDLYZ87/cUpKml4x80yvJTALarulYyGdqdCsZEW7xqRe11FgM2PawuGkcY8lo6jQFIBJyLksWJCPK416hmGqICveVibVSq1ZKzb13MqBIloL1Hj03M9OD8SdnbCgB4EeGCHAHUsLlpj5ba6niOFHjdYRkZ/A3l6VrixC3Ki1AsaFBe+flcANAH0MNSxvyewZrMavkKYzpBWscL/W4UR7E1IwPOF4G36eYCqEw4OiGs0yollBheEp8H720Tipjn1EeX2EQpTnCDrIdkaSa26Xy/9ktXJtF8oUKNyam4q67TiZkoWsVJhjSvPcE5+/ESw3nUm1GNlHvVCiDc9zwoRVLx3don3SCi4Z08aSh0WpRdiU53lgaLQslSwVp4bliwcoVjTLFNN6KJ0KqB21aEdbbkIn/wQ2U0z4rJKVzhdIzfBOYJg3Fi1aFgzssiTnGuxWJ2cjQv09KxWh9mL5SLS0dJIQ8o8as05MA8Nhza/njCh642HydD9O3BdjRFlTyhRWCa+FyKxC2yFejeOEl2MLyjhBsMYjkrGSicyJ+SijS1EDASq927Faikr+113gVCdPd3gE1WRhmL5HtI/2Hi08zdcagPxof0DrTvCwuDPpSAJZZ3er9nYagCFhD6B0OB6O4yeNOWdMJik3i8uBDASHVmbv3Z03VkdgNO+CI4XhggkzFEynkbEiTNaB71QqMycHBVM8pT1AVsKoxSXX8jKV2SCowynIyflbYqfoQHh4cCtYQ+2mA6l3Qw+poFkXU8Ae71emZ0xelpKHu6npHJBixk2V4X2dUwMfOhCs/jdZycHVtP7yefJia2fv+eaIrOTUrOyTnd1kd3P31dYe+Z/VDpCPyxNbNkDN1Lq/j6OfUOL36BkRZwNBKUxOyUxRUeVUcbOIL9YFSe0FD2JndIEe+nszWJiQwrlCiSpl9sZwwvc0l1K5i2cEFpU5r0Xb+oZC8HJSzhea2z+8hyP1x1pHIJxKE7lxwX/D0e5QwAU5Y9KvtmuHmUhtpFjP0s7eKDbjUgx50t7BDHcdtPVfD2+Da6Cj5mDqPWm/VmzCmoji5T0whAeaxHlyFoQ0zxHhsogpC42x3pDjXYsnZ9c79ouTs+sXtfDZkrcKmg6AmzcHh7dBTRo2b5O08dJ7rG/BzYVVL1FLOjmzEzmdAQNTTg8uggJOnrFkljhrEs1jQwFBbdMbmhqujXBWIp3TKrVgfhQzkkuakQnNqUjh6E65YjdW5QEdX8nKnugWxu2iS6nMwwRcL+Roo3i/1Btjw47/veADddsHyHuNVZ/h258k3W034ejsyTJC5+37ceb24Dbit9xJG6ZYdtknVz7e9WaVmzmfzZk20aQeRzj3CBZSlizzIOtq4sXRsP8/1T4evKai4ZwuOpUKwkiSGcj2SSqLFcI1WYk+t11PGE7jXEoZM0wVcBWXiqVcW10L7CgUtV9wxEIYUTXJeUp0NZ3yj2FEeObZ3Jhyf2MDH8EnrI61lpALtbCUaiQaDj5ye/Xh9TpZEM2LMl8QQ6/qXUVtOafagF8DY2lQMRfSEFD6bliew9ovXh/Vzt+VVCbV1Ur3Lq2R0SAJI8tL2P4vQBFsOrUH+JrZWZ1M4/bwGbt4fbQ2Qm/OlZA3wlvJGmARh/qRN0cCikpak70bD67ILvG05w3DWjzWGALq+b7JBkjmNoqpN2I52oHvG2RTaaaSYSkm1sjQcC0VmoPt5OijKhiYSeT0No5BBXl9dHAGoRC44qMwVEwqq93VsYLyfKDFWfGfwAReZkm6AEyrPO+RJL9Lw4xd8KomdkkwHSgY9JrynE7yrjB7kE+YMuSYC22YI7EGbsDO+tUIEGYfngJxkYPF4HTjUKYu5grX513lYJHcKHNqrATSQ6gI54DqcrwTOFkXiDnV88G0dcQU8B07j+XJqVSKWdG3EfA1RcM4MChBqJBiEYePohAXkcp7zVwwyxhWwTM0aMMHu7pxCDJMpZjiXtG8MScVmb2SakcO8VHBfUQ1SExTh5SCDgZzdqF4PAX5q7G087mVttGqAsGFXHQXHfE0Cjyt4TmWFS4vOI79F7f7jTHRgCDpBf8CDEXAGTpVNAQf12GV6ADCmCSvTkBkErk1jHJK3jCjeIrhTToOn6KCHB9uY/CUpb4pM+mcaTAqRaMTbrSLXK2BtJTbDLhuRM5yHcJymiC4cVUlXEisYoU0IYiHyMponrFopjZkCBMlLmbTL8gTmKhfdQaxZmw4DloPBMGpbnKv8tlhua5BdQh7iIswBXPtcFx/9aJGEM4FQbmx44RnIdDanegFyfh0ylSssIPZj0N4sb0H7TFcN0xQYQgT11xJUTRtRjVtHfx2Hibn2cg7ZYD+ydt3P5OTDEOhIUigajOXroD64sWLly9f7u3tvXrV8nOhiMFzbhaXf9aewMfG6kE0D7HzWKyg+xFoGo5KfYg6zKHS64xqs77VsuC5+LXhyOHExy2eHHnuBbD6Q9gGlK9vbT/f2X3xcu/VJp2kGZtu9kM8oDgQYI4jTLtQR/ZG+LIbKPloEL3xfCCKmbwTjWY7KVjGq6YyXip5zbOlHNGf7eOCs+YnTPzhjPN+6I0eEfpnpdiIzNJyFA6yVCTjM25oLlNGRfemu9GNZaFRfKBFOZv4Jx63+DqWGbvUfCaovTob97LMGDlv/HL7BX0xZ5q1E0Qa4hrcdBMuqFrApCRMqpcPOcTg8HtEqImUOaOiD20/4k8gydIShAWOcZYOFos+F9XT9akZVbHVMOwt8pIHVRtqqsGCXg6yjLuQti6WgdKZstdGakV1BKUnDr1COdyliczstZ2qRWnkTNFyzlPClJIK87g6o17TnGexR86qUarSxs9HXjN6zUgloqgtPIb+1foVfz7r8cOwN1STSqRzll6xnhj/43fv3r67fH968e79+cXx0eW7t28vlt6jCjMSB3JcnePwDYYdSD/wuzoMgKdKajk15FCqUjbC8O9dCqCRLXNf3nE8Vs+NVAzl03gre7aHpPOmyfrvdk8pRPrVr9/2HqRhYeKdD20ageRq+VitNYIo6uKgpMgXzRysyYIYKXONUWwUzAyQFcPSK5RNkQ47JPOwgwzE+pl47ec7aGKBK6XJga6ZsiJfRujMCuGRNjdnNQ8Vpilp9h432kD+PWdpGcTUFwcweUfG4c6Iv7wjDjg82Iz1dFGYnXzeKMOwZKldjQMyQIFE4Ozjzhsnp/EgUXJ4dFfNWV5GVg1QdNCLF4bWToUSC3uzGh7MVsvcWEMaHurF86wp/PGCzgYVRmOhCiYLIUQIkCW0ScVzY/XAHtAMnQ0EWU1ZDi46a5mZo5T1u6ePUtfvSF5vi+kwq8sDb8w74HbUi66jJIIcijQ7lCCKo5OCCjpD5s91TQgdIQpT5iM+EoUcx5zkqPX1HbwkevTu0HRkuNHTEHaEbvGNZuZ4z5hRNPp9cejIflwc+rcYKN2I814qWjrcMq7axCNFS4dhIWr6KVr6KVr6f3e0dHwwfVCNKy3T3q8vFTIds8KnuOmnuOnHAekpbnp5nD3FTT/FTX9PcdPRJfa9BU83QCfDRFDz0s4W3/T3hA2zRrxwqfg1NYwcvfl9rS9iGE4N6CHfVNA0ROlGxhm3UjDZ1LgxkkwWgIkjBiWGHn+FQ4RBP0Bs+3Kx0LfS8tcOiM46EuVTVPRTVPRTVPRTVPRTVPRTVHSb4J6iop+iop+iop+ior9llvbZUdFZjteL9369fg0f7y7Lu0zEFcSb5HyiqOJMk2whaIFqlEe5pJmvfOyKrIJJxv38hoqFq1IXF2l1JaMkWdFzCkmOjXlWXIFcHz6Lhh4fSzepQjV8CPBgBseDWvQ0zz3qpjLP5Q0Xs30Pzd/IES5gPefiys23IM/GSZbn4zVX+M6riFKQ37jI5I2u3z9HcN9iZM6zcaJl33vvBf+4DjJbZ+0dWBpgLHI+6RuwoOnb8+Vdgc2wvOQ7intrQf4UBvfth8G1t+yvExXXWtlTkNxQQXItRD/FzN2CJysxJkW2OxBDfHO0i1M8CB49p1sDAXT+y8HWp0G0vftiOJi2d198GlS7zn47CFS7W9sPg2ogDt3Qdp1w074261KaBS21N3rHPB1aHUlBMq6vusfmiinB8ufbiZd8l1huSc1Qat1PVZ4jxHaSztpbwB/uf3CC5QesOf18+8MnLQgsjCUVi4GWdRLKzuA0nQ0a+WSYjEBrjqLkOVuHGNdHvYhLlkSADb3alov8ExZ7RuM4gvsXZ4e/7K2V/viru24WTn/gyl4kz5NXLzY3k62XO1u7D1ii7+BzCWsdNNHNLfRziPX87ODk9CI5/o/jByzRNdAZel1ums9Z30o4jR8+Hhx7NRf+fhsUVuRNK3cjIFggRKOs/tHp+X0WiJ8asbZ2wqPTc/JHxcDSYAVVKvQNi1p32d9dYrYTWBmHZNdQSrmuee/HWpBScQm2hhkzWEkah3WDPhtnQkOa4z48P15zTXQWfpJ4dLA6+1LMaC6r2xm5EXHaEDqs0VlCdWybcDCgWH3DFKv3Di2nXOM4XSjx1fHaQyKDGyt+9Jj11QNBqFJ04ZGBWHbvo5uIpnMHBtGu6rliplIiMmj6ZniuDFgkMTAC1u0rtnAoq+N1/d7gFmjm+7I1wpEnC3J8eF63zXiHJdxxrLmV4aGtQmwEKOrl4I9+ckFu7FvHh+du+HYEkt1mS34Q9YR+fOxaAr80Q8rtc57MyYEhBRe8qIqR+7K2CrhFFVbjiztoje0sYwscpP53lsF17RsZWWErDEntaCkIK9z4No5Uk1JqzSfob8igIrm9+WltKnFGQx933A8o1STFjjaNOPYWRSZpTgeLWMecfYrROWFDfG5BhhTDofERxpRgYf8Oszw57QU9qtswiIsboI24I0YstDpFusPBKBZN8HF0+GrJRKa97wWyrIFheZTEA/q1dwTtrc3E/18vFoaMW7xoOuEtxUXpyi3QSYll7nWzcRB1xhA5JYenB2+O7YGYMIss+35+zbJRzJxWVzUZo7OkZjEmyl+QwjdekkoxXUqL4mDZiwaBc5mQk8CrhDTe094e0zc3HEN7Bh8sP7Y3D4PGpJ1tubm5SW4Jw/A7Y8wyLufbApUs7iEzB2LIrsFCajk3rBcQ0LsJ3uZE03nM2NkU+FIjz4LrlKqMZQn5nSnpc+gLsNnMXSgqstAaf5MaaThFT1x7P50OWMfgYl7XMPhEFgOk2bQYMJoxdTnNfXPIIczfcGfLKdkmOTOGKeCSODOBmRuFSEpsZVQXO9gnBwcjcnE4Iu+ORuTdwYgcHI3I4dGIHL3tkKz7uE7eHdV/NuPHB3NP2x2yS8PYvdhNTTWYjeuWt0rOFC2QAkOb3oAE+wiIZZhcEw0EWWslr/NxkDnoHg1qe2trq7FuWfbEFT/64p0nSgo0l6MYhemwzhx9xQUE0KEA25BpSWhpGkcvQS9G43FXN4fBwHIcBmVkwAw4CeMxb8XRr++P3/2jgaPAGb+YxODa/LjbAvWSe4WDBgMf8l6EC7EFWnzvBXNaqyCTkGK9VFwY6NeXzim0tFaaPJuwXN6Q59uQeGchIFvbL9ZGEe1L3Xij5uVBQ8J2TEyntLRnimpGtjbhCpnBHB+Ojo7WajH8R5peEZ1TPXca3x+VhKSmMLIbKiEXdKJHJKVKcTpjTnfQKKPmPEq/mzKWxSOkUlwz5YKDP5gR+aDwrQ8C6I85n8aD7tiwzV89FvYp/vWbiX8NRBGQPyQxhElAxastC26BdQvBDol2GYUbaA4qoUusAKCBEYaZRjVqdDXZtuvcShxWgDRGDZzXEDacjF57rcdYGSGJCEmMojyH7oJMcdkv+PYj/Sn6GNnfU/Txg6KPa/r5MgqC05PuFioODg6akrHXVS8/J4fooGOiy3NycmZlOAa1wMaxaWPcsjH4H8fe1Odoh0+nPK1ysCBVmo3IhKW00sEyfU0VZ2bhlaOYUAtqtFUK7VAOrIQcfzTKt/wD+KIKAx5Qg+3PJQGraISccS2uQst3boI5C3slZOyjfbuwVBIPjSIBvgS/M6o5hKiFEevmeiipWOF2Krt1FYN20zadNL/bam8wSMJfQhHwc/WnGp6+hVigBnQDno3V+HAEA78P2chGDtFWJgX6a15e0MOwLtcTOQgglGXGr5mG7oWRa6HRzhAeSxWLQ6UyocMoU4St7SNYFooaAG/wd+6ABhCt+aGNOWChZMqt/5ks0fqaL+wQWspwrzhtDU/HWkIORAb1WlMpasXVYbV59m93VHh7vtXjHE/o8NJg+A3V9dKGC+j48D4X0Btm6HpsrPbVmZw1evnCfve1mVbsj4orlkGhs0eIcDg+PA9+VLjHAn7tYjQxMiFjlurEPTTGCH8PRs0EQTAC1lNpg/UJIdo777QPJeS3ORO4Z7CB2LU/yGtcZDxlmqyvOyOpc2BYgCw+dc5nc5P3FaWNVgPvR8G1ObMs2upvyrUppdk/Lag+TTGds4K28E8873dL6BqVk81kM6YcpWSjENhx+GLpEGZoQ++dQS7iEsh3AXaNgMf32NC2QPkBn3NuoLJkUNAlZ1gC2aLZMwIIwk+pvYVu8PYJdgzce240y6e1ok0Fjv4AN91AyeWATDT6tNwJCOCdNrhhYvpDekgPBM7QdA8YUfB9z2K9saoxsDY0vbq00sVfIQ3qAoMvU2jenLLg+wGMWmItc/ARso+tfkZfSNANuzvCk+ZK5ZpgYovDF9jHlJV1pnHEKv5Jr2mSUzFLTqs8P5Pgjjj2j8c85LrVUfz4eomG4qGRb28hQd8duT84PJdeXcGag4qnDV4QWM6BfbTVstyyh/ad7G9iaAhWMDPHcxp4U60pvJaBM8HFwUWaV66OO3htqAmuMtC0xKweI9QUtxPVi3Dj+aGoT+ewVKaML2LvStPXDdadTR0VmpDW7sb0/m/Q/eLE7RGW9+rp0j5h5saK+TS0Y3byjLoObmaczDU4Z1DDP82ltms78DtxP7qxlIQ/x1JBbS0otpOTglFdKVZgFwAImu7DbPQYBPoaesUCDcdojsmjxnHBCgkRKkxDP203XFZj2rXVvuaBZxlWgCG/Uiwh5wz3fIzl5+xFN8Zlc+MKPANT0HUL/MiTH45wHJHgILXzamP19MYlvlw1/iWq7XyyroCjBwXBOx+a9feclSPUk8FCk3FYhIjeIidQ+hNIoBZB51R4vPpO6OPadB021zKMMSBknWbZeETG7tysw7lh8NWU52wdxfxsjL4j70Fp3AYg30dBK1gfs8yBwvpq+FeaqfWSam2RuY5hSU2ZwoE+zHZgAgwcpCmZWjXIypKHOKcvkoaBXqhhg5RKDe5IbQsDZcUZtNzW2IE88GTOmaIqncdxxO29qcU/3O6VCZ+RSQX1NlYsfNGInOmmUS2SyHPDlON2rSn23c6OycJdFkFMx94izsrlHgtjQtoENwvnO0PJmmvkWfki7kviZrSbMnad/l2KkWVj9YhEVxMPVpvqw/hejXPzgg2N5rm8sRBa3TJtbpS7d9ySIlMcNVYOga0J+kaEya5qWJm5FfWiulu3y7iPZ0o4cfJlGrk5QzQdLApyxUG/hoy4CHNRdUsfslVpFi6NjOlGZw8nYGpSiajU5YgoNqMqy+PdB+4PTxMrx1T2D6mIXR7ocaBP4UUjr5mCW8Zq8UFk8pIdj7eE+aBNlHPIyVF3G3Ze7Ow1kY8c6B5ekNXGiCZ+3WnAQTrtaNgG3I83VksNvBVuxSlXUUKNYhR4m6XOGeyJVPYzWFFKXrIcej/cQtMZtzJE6orn/F+oH2poUSLboCb+ysRtUE1sJQ+3OUNro5X3fDGeEI3TvlJOBCnslay5qVAZHrmQQ3MjSZjWHbQJ61G5kfX7j2kczSJ8pjVmLOUpJBS5Sjw5hNWgYBRbm1yEgou3RBKvmUQstsC2wKuAdNyTkLGbEW4cl2hBUkjBjazj++ohVldBLfY7Zj/6Xi5GkivGSlKV6EaAl+LD1cSqVasR0iYe7dWKJy6l+Sje2dq9G+Wmx1lV25tbL9Y3d9e3n19s7u1v7u4/30n2dl/+3oxCzKihmt1XQenzKz7gNK3ANNHACLpWwBFeYClbKjDYzOlTVoWQyl83WN+Lpo17JpezkdP/cjlbG8WTh1vESCfjLOratdF5TWURld/Ddlc12LDpiqWyKIBnQy62kCZYtmB4K/c05gZVLwTJFTKr8pr0sYYHJmuj1ENJJrH9legM03PZlDSdsyTCRdjeSi1T+LGnQlbrTS7Kylz6HwUV0kXCef2vMvEDVL/hec57n0EHG9DIVi/hHLmpGzY0Ap7AMG2TkpBPIdbtmcfPzKpNijkfpKmdfo24xj5e5BkNzC4yrwrYPeWd6iJMLBO0dduVUoPauU3aFwnSm704/fderAqA27sGfIZyAupiq6r9gGU9fqF6Tp6VTM1pqe3h08Z+M+VixhSE26yB84/euJvMSLsBFP1Ske2nkEIbZZcPJgMwvFrJsU30dT+pvr8Ofjw8+mJWvZMju5pQMj1Sxlow79Gd6e7mZtaETMxYN6l6eZnkItwJQBeBq1Kl+LWPwGRQfFTR3AWUGqk6EgbIFr7eBAgD4/rCiWXxFl16cSFfEJmmlVIsSxynrG/iXMvO6A1pKp6gYBR7ovu8ZUzwsfd1VImfBAGKaHrTqwOfCKdU2tOFSr9Vw7SuCisxCEns2kDbGQVJwd293jU1V1LIXM4aRT/sVSOvfFgA1/sNXJH/r724+hu/3eOl7uzdZGtz6/els6OveJsZfWN6rg/g+iRFF4076FG0A637Udq2SUhP8WJD/LPp1OH3XBcDcKDFFtrxIkecL1IdHKK13aRXg3bxwV5rQX6HYvus4npOaM6U8YIMnIWGdawVd4CXVnO0loyKayRzeePkcYsqgKCRLRZdcGRORZZDXOGcLcBVdmNVZWGiY6qYXTMYK+svUcwAhCiZ16vmBkaBkw5NYSAASxtLDDdzBmlqIaIdW4qCo8+AW3BW5VSFUPtadVRWuOoReXLm6n4Gp0ksUw0myOIsUY4JRD3DWtqSovOKO/UBFBTkVVVZSuVMNKkUKSsh5AmHRo0ir2YgCXQtKbVbnsJJEF56Rnn4AERBuH/XRv7c4MjjVvhZQxWsXRFgBrTP3yZnNrDuef8QeH9nmTr7aILxwJKzMFyF0/fekf8dUsMtSrSV2CEWhqF0l8n0MuphmHFtJZMMDKNYDgzUWWY5E8tqorfSv4vfgShgozi79rr0+BL3pofVn7OSbL0im3v72y/2tzbR0n14/NP+5v/5l63tnf/3nKWVXQB+ImZu7xFoEcMUfreVuEe3Nt0ftRRoeYGu4JxOK3svayPLkmX+BfyvVum/bW0m9n9bJNPm37aTrWQ72dal+bet7efNOruyMlYx+qYvF6s+ferd4tY39sF4GRMQiB1zLrwxIiMr9VgGX06tM1KeW6klGFRKpnyYdbg/oIo7GmwwnZllvSLMqTQuVQHFO5/eCzWfnSsgMvRnDRMlcgvM72pdfJZX+6ItEXev764WYkbQehctdngn8tomEi0wAv3AXgUiwO8FUYqhcXAJlLLy+hp5FtaGn12SGd7PYdA6PBdFMrdG0PXrimh1cmyoSxO0b7xP7ejRfahDxBUyZnkN1TniDV5qW6/jsBK3sXHI1k+VAnqq0SJcwqzj7GA6g4RcK91qLVPn4cN9uEXkMA3uVtcWsYPXKJi23LSWMvysZh6b3vetRDFu9G6lYhFEFlBCOeQMesBIJhny1YJe1bujmdA9V4lDa4PFDNzGdvU8xKf1nTM0IsOpwuvZh9KeL7SzPHVtzq/lLLKxFigsNS7WOijOK2b+TulpFEG0nJobqthd2VfusMB1f77QhZXO5saU2Ro2v56ib8T1OHIDt4vwhRGfYdmVUV2dZN0tcd3fQesHlVWdxGzttio0jW2ESoSROeXR9/Gdn4C8f/ea5Fxc+djqu4vZeRdIWyjwo2D1RPD58jT2ITscRiOQg0iCH4XrqJHIHykt+yCuWhaqGPK9QgrwrgAzDB4a7M3VQbLdXb2/seG6Wl0zkUmVpLLAnmsb/7K5CaaPZbVExfXVpY4u79uu82kuaW+M0TuurwiMAOKq4lJxjHBuU6h2RES0zCvQv6Psp/eaOWM+rAzM6c71gEx6zlS7GV+A/dJq9kvQ2K2LWD0F0wD/k2Uw7D0LGmFMgk4peKTCIjYt2WxtbvaYUwrKXQlLV5d2ISvY9qaB2x1VLDAH6Zg6Akg3/Rl2iBtnHtHMkpOol4FYc4GRcH1hyc2WyVKzP6olT+jDelScu4F9a7VbeC1EbrUehfBQhN87AsAUrjtuyRF4ZehVM4WcfaSpIVJlzncdVN/IPxl7J8OpDuazYJjuYOuaRR2AHqXNBGYwYrBNmKB5fhri1l3+o99CrniQ4sKIcU55lK+AT3kzt3f30ihc2jMnnTifR1V6U0gUjhF2AoJ33KzcKVGpFJprEwtEjjJjywdce/YK7K3r4C7fsJ4Js2iGvobjXM4SDb8n/vcklRkbJ573+q/rpIjYuFgHy2LNFTdFW8xtOqmQq/k2KfXRPDk6X0t8NlnjjSAXObIm3OrvNyLMiJHwVh6vQ9zDuKksMQjm9uVGURNhwd1L5GWTpg1dqkXN3W4L9Inc67hwYUCx6yKiCHRh1G7yW3wX9pz+WXeZHCAL427tobEkeyBqxmF3OCwILQsuGNHB3BRHcsVotnCU5C5rT+i1/Tm6JvEAeuIg0ioQN1w3VK00ZSVmNIdJfX4R1Cmg9vhLATL5yZGbfOW4UrJkGweFNkxltFiJsp3pZKLYNSof/vHzi5U11AXIL7/sF0XNTDjN/VPrm7v7m5sray022o26/cbMB2bO1SeGYEG0UtMy0IosWtHVZB1jsVbgph8hSWFcU3R3kFpR7cR3IXkiTx8RJux+6yhgy/HVDPydMrJI4KIg97BUdktB5nTatk/ravcb+4KhVE7hX5SdxmWVGqptyGpbexAwNhSY8xKZdM0pK3uEr5k2fOZX11S9l1AsBJxbPzSmUHCxnrHSzDuj45XUbNVO0L0GQlOIdXe5YgICb0mZ05Tdqp3copXUJ/6ztJNi4fSTYuGyrK2GAnNs7G6/3MpYNlmf7k4213e2t/bW915ON9d3aLqz93KTPt+bsru1F08PU+6M/C7G/Sf/+Y4Q9wMsTNqKh4bCHR3/EISaazKxclEzWMyFbNtfIXbOBynbsd3K/f7/BJVbXR0wJ3ZFphw44GDx9Vvko8D9ZyqyDanqxZJG1MvIVaIIdsPJAqc88XZv8qb2OvznTydv/suXTNR1vLe9ZHnK9FqCL7vwf2eF6Wn8TSHVmGWIzdZ6/HGMvMLO1PSguGmMxfoMwWT1NXVeYhJq6FrRwg/da1n1Jrh6KzWGbxlF0yswqaAVsCf8gxqj+KTqdDYeoEgR4j3MF1//4UtsFIHs+ZqqhaWN0G2G/MIUhqlBFRT2cU4rDeZLSGCXU3e3NLm1ZQvM1z7y8fTueNr7kF+zEdhyIZE4G9X9fewdBY0AYpcJ+8jSyrARmfMsY2IE4ZD4bynyxchxyBG5Udz0mA5X/3PFP7syIiv49Mp/fWql9afOEE+dIZ46Qzx1hnjqDGG+784QvaH9D5MdQA6CcUAYhLrRS4oLEFGHxNZ4vykspFH42mNJN7VA4GQuihE2kAnVL+/gb6GALQzjNhAlh6oEO864sFONncrH7VlhmoxhFeNIX8Vgf8zjwNrbwapnHx1ZTTMNw3lt0sMdV/Bu4auR9/fYVxw2SHa+ad3y1gWA2kSpW/31g7AzFJShwWHIug/qDLRyd1Eqjk3FebCZ4tdRdAQUuHRmh8gU0FnhxlwWbIPmHvNhpXa4SxzmcxfbS9xHCkRRLMR5x2qbhglgzIrl7JpGlua6dVlvNF2UPlGWTFlFFy+AhvkOrs+8r1X+4bJcCVAzYFMDYFlhks5els6uFJrmD1Zh9Ezxwl4E2O7y5Ig8+/nkaO3Oo7S6tbm51TzwtX44NITt3gE9LQbbB+CL9h76Sg2GvmIXoa/YKqiOxR8uOfPEjl3biL2gitxNhL+9Kal9VrZ3Xzzfe948LQUv2OWA1SzenLw5xjhqf7v47E+AFpTCZrciRbRRjELcyWRhIlNCpaEEgzMW3tzcJJwKmkg120CfNySAbhQs43QdLMHx38nHuSny/zw5OD2oWfx0ylNOc7Qb/9fIXRm+3FmC5YJ6csms/FGC3D9x1QTDmJjeGGK/o6X7TLtlGX8xHCW9sYQUo50LIlMrtgfqor2lRFY3X+xstkjoMyXSHoE0SJIUQolBdWgeswFLA5+2G2jhZR7q/fibso73N3FH6g7KfHHP9kUqb8RgkWpoPrYTrIIFRUHa3/330+O29/pqdX2glRh0EYv0k1FrI2FvsTRoR/ht6KdZJFQ+TPjduG3vn7qOPXUde+o69tR17Gt2HYtCefifDwzk6zF62UGsGAEyW6Qxv42Va+SeUMrHRTxwTVbsx55Cw1svnu/tNAA1VM2YufyL3FIXsBq8pyCYYlGAr/+LlZqDfQMJ9RlSYcYVeKgdJGsd6gvu5BBcMWi/ESu5gCHgPRgCVB0LHJVBfHbeshKg4HO7rSBYChhmjbs4gJ/dxzvCAH5mMq6VmVKlFpjEh04tWgv+YGrCDm2hMFGwpTdjPVwzVxleib1lobw4pmJjwCNL55A3XqcYWMhOzryLVCqnbKh1XVk9JdjGlyqhyc1iKP/Sod28XmH0jRRW72tmAmDsDBOD+btOG34uN1m3nrNUZk4OsLBdC8BKGLW45Fr2lJ1+HJThFOTk/G1/tenDg16QhtpBB07vJh5SQVvWbU/V94AyY/KylLHsFauIUsy4gYqKIiM5NfChe8L/m6zkUqzsk/WXz5MXWzt7zzdHZCWnZmWf7Owmu5u7r7b2yP+sfilVcvW9PYI+ZKglnNKAmpH3d2CQnZySmaKiyqmKXdfQTjOFCCvLbKIr9jAuRhLJFly5VGmItMZKS2SaS6lcyPwInXZxlb8wKIKXk3K+0JglB/mGI2APGCPS6tlYpzFBSCIXhFZGFsD9IvbWvegnUhsp1rO0sS+KzbgUQ56sdzDDXQdr/dfDPpgGOloOnt6T9WvFJiz9oc/O7e+v8MXtN5i9VNF4HZVq7Qlnh2d0HbzTco7EYe3LFxgftqdIo1hU8HiZsGDIDimYSyq5raUPFeT10cGZvUEPMC2z9p7F3USaLGQwIej2os+4KNeXEi2+GyFK60vxtxjnAFDyQ0+pIEefv/jP95QSnmPVHyDPmiLrnBP4neYzqbiZF6GyLFcu9CyKoWR55qLZsBIxhKXOsVUWhpq/OdodgQNjDei8VMxx64QcZJkHYxpCHjEC1w0xWUDCuEqp9kalJnDIjC2AaLvGehaQI6ZZSRU1MnQUproRXf1MC3qF8bMjgnlwc/r8cndr+yFNi7+0q+nLe5m+joPpS/qWwnmSulGb+xf/+c64ZQgSbsctu+xusDRUBsuoaENFlDx1fHgO7yZ/84fg1oz4bpwvTCpFXeQ51ntCEW1QNUGhua8YNKwVnTQtC+2cquyGKjYi11yZiuakoOmcC6ZH5EimV0yFTqLKpW78ezVhSjCIdJUZe1BVZpXOuWGpqe5NfP2UjX/bSrFuzNeRCD7uvbh8sfO1bli8C+U02jtPav6ave2OrQMrUPZMY/HVDrK6qm+7fcOIUpFTZn48eXve7fL1movqY8/YNdDRTGFEuPd9BYGeeI23pxdvz98GzNxjU5sxmXxDijSA860r0wjkN6dQx2B9I0q1BembV6wtkE/K9bepXNu9+RYV7Aiur6lkN6WugSBZ/cWNHd9IjUrBdT+DkCF941P1xx6yMSg29vy6hr5eK4T72IlD9yisj7Mep62iHBDHDR/ogEdfOo3mN3ShSQWvjCBX0FUaCEaHglHBxQwKX7i620xccyUh0KfRVt3tH/SerhSoiZUv+DaeMGqAEY3bWCjvwUJ/E0gQRnlZNz5s9V6i6QDI/cVt5m2zDkWjp3fSZ9R1EikzosqIGt8L/tEXEnGMEorK/VHRHIJ7wpiRLOfb20BlB9djPTT0qDRTiasCAl16M5byDKqtWXEUSKlm7tBVs7X5UidTWvB8qAiMt+cExyfPvJNGsQzStjM24VSMyFQxNtHZiNygONz1t+GTHbir/BFTmr+a/7Oj7uCuN6N0QsyD677WL/LS1OL7jfwnvWZtbEUFpgbY5fYacLYANqjbit64Qi4dyHeSnWRzfWtrex10cp62oX9cAepb2+s4gs6h7LbN/Y82Zry180vtrJ/PnWcr90k9ItWkEqa66wxTdcM7Z3jYkKEO8MvS49ZmsrWTNPvqDlZ2w5VXbl0rVoM/zGWVBWXc2wnqindOqsHgBSihPTbbScEyXhVjKKJzXbRKGzYsAcEm1Gish9XvwMIbu+BrOSSM2CePtKpOlEuGxd4WVXOObQpqSS4UFUAze3Pbnm/vNqe39+PXcrhA2MaQ/hZYHSsoH4qtW9WSwARe3kq6ANhr+JHD4b4af7YLXtUglvlreEroNeU5nfRkthzkE6YMOeZCG9ZiboAb9Ab9dT1+0SK/aedfBOeX9gO2gBiwc4hXPIHvgAcOyu4oDL1q8HJo3ugYlCBUSLEo+J9xN2lAYfj4PhReHMMqeDa2lIIfvPaN+k8qxRT3ql3wQGSuAngYttl0qYGnL9M8OCTEw5xdKB5PnfxqLO18LpUPtYXaEbXpv150Ixtigh0BgunHmEaAxS8XF2fw+XaH20/ebR1i/uxLUfNC1zmbjCuV+2pcmmEpThNh2AKpcg+vYn9UTD8g1MK/MJHZIomzqB5YqDN+tYncONq3BSaBWdvo3dt7eTuILuHnL3CRXjjjBm78nRj5heW5JDdSubYaHcwMsG8XEmsz3LF7zyywwLTmjFrpu6vSbO0879/Mgpm5HOo+XG2gFKdqpWZH5e2wqfOExcVtjQwBG1iV7I+KqYXVg0IX4EymVeHT38LYvvfvyomvXGp1q+PD856w9RkzI1JCh+eyMr1oggLXarDsr3du+LrwWoy5zm76jMpJLmeJz1hKZbHRgl2XUmj2xXkKTrssU4mB/Otylbtwcjtb8bj50nzFQftpjMUBjZVwehxVn19zuolTVy+o11+1s9mMtxjWiANw3WYV2wIjTZ11bpia0rRR2PCk8eXdQaFhgE4Pf4gLTaXKCBczqwljf0T8szkvaYi9kOqjWCmVK3VEhS/Mq9pFkImSFWRX5pJmZEJzKlKm1sKowWjDPoZ08TAW9KGC7kg9vfATaOFm6q4hbszQKSQMU6MAgfNjaSa0VK50e0kFsStaw6IhMRyJw08PKnpCp5aX5WjO6VA12gKJ4CzopKh3rFYvRz0OaL97gZuFst7Y2RdNaxaVXGiesRGRlXF/KJIVf4YWHzXqBS36zJLuxR/u4ZqDx+PW+Do5aiOrQd41ts5P35x1zgkhJ0c93G9z2QUOnYTp94LdThHdPHczvwf+OiVkFvOp1+7jHXGMR50Qw1BE2xcFLFg6p4LrgkSVAkMzlijZCjrL1GGN0Csl7Na9oY2d6dy4oes01BDz5VfD/FG8fNP8hPXYw0RYnd6PCZ7NuGz738aNhfi34laDnTr/rRUKaWARLIvH/1so4jupDFHUGcF9sd+/gdXDKtDww/HhuUPfA4IngVCbRPs4foS3vuOHRWSI8nGb1W3oOe2p04X4cv4GDeE5YSgFclwFnYh8uf1GkT9X+Qt7QFNDZpLV7QVgEHRJxE3HM8m0WF01oY+0FFEvJl/Nv6xMvJ+Bmizdh24DULIkNPOJex2sdXrzI9Uh0Y9vqBLjERkzpex/OPyrvrVo3tMDAIptNrfV0pIaYF8vWp2NcCJ3l0D5N6zAgrd8XS60AjKPS7LEo6Q51T5KALrzeNUwzAC3ky+5TNJKG1n0u52lmiUsp9rwFPv6JRMpjTaKlsmP/q8GsjCVHooGJDlfqhUBdCIMCO5gyI7S6pUSSqhQLrwb3ZEduNBdy3I8Ne3eUNGRaa12Z/vWpQx4HbWp4JEWF5UyNI5yLGM0XZrrL+0Vtjf5J72mvYipRDpgyYsOXtx0roLjXGYdVNyzv/Y09CxkmM6c/rgC44z5t+/USdv9zEH9jZ4IGzthU0ioKXNuMJfBkKpsNAcoqWr0xD3BqCUFlYcwl23shvVGWUReHN+E1f0VhSLWdsRmCX8WA9doJdhYhl/sqLMg39UtjIkt/FyvD+iEgLWQUideU8zsRv83E6mEoBmpiGA3wBes6FbI6/gQSJJC3daqbIP8uY1OiZauj6m91iYMbGtxaNfEx3mAde6z+51CAC04xt8sgkQZ8nPgIlzi6GGJffcVfrjsI+vO2XNXbSiW2uzzxWOxAvJY7NVdcBNzpGtO3TAJOcuZVU81Y+TdT4ea7O5s79itfL71YifpWVoypSnPfQOfx7aIrEYr9C2m/IQd2artKg7rO4jbINWrsjRkl+XOSLuaJhX+ygvdpTbDkPbd7edd4th+fieOBr6ffOcd9tGsT6hVBJZGVmsdQNQv+9biG8o9+la3tvmWxnWfvsWsHpJrskf+ViPnX4OkmjR5T93QzaobyN9D/wDXUgVYsqOeQCgw89arrZ5iMs93+9Da6IP1MNzee2LaTdnuPzF9zb9czy+L45phxKpKnRnbnrjmNIClts3t5Oh8bRRrJVat6ADvTuZM9jYJuxP00LfMKznU9bBPTat1mb0N7mpd1m7itlS/sl6eEDZ8yMyUb4EYmg38wqhLEQGYWW+hgEip/YqbH0HR7bbgdNRgLENDbmxyOo2+uicd3ZuBmzm0aI8uiko4cQzLOMlrFvoa1wm7BIWyqEGPy4HVDWuOe+KTMm796D7SwA3bbhkUOgg/IOe11rKHOi4HqMnM+DUTro9WNKuzw5RKGpnK3Kn6XkFXE24UVTwiHCwG65pVG3tYNMrIBZROc02LRiCQ0lxLmGyBikD9sL5alJFJhqd/jOzNxSZSXo2IubGynPKtzOL6rlbz0NxUTkqvq5Bj190wIpSzAljqIk/2FspCUae6uyUcqY2MaUNOzrC+lR6BI0KPSDTmDVe+qu436BmnvGiQVo8jcpmeqLc6IVfRC4neR5C4wQ8OOzKR9txAZJ/dliafHbvOofDmGISIsUW21Zu5FOF7xciVkDdiRMb+sLqfUFSJ+tnrqui5kV7sNRDgOIhZXA7msVg9wIg4aKaH5mAB2ZJ+ceTkDF16jpqoJjcszx2TC+vxx69OP2zyv9oCR6GnyTqdCamNvfkMFRlVQGO++nMYdpo36+u/ZlS5isvUhMiEGTfzagIxCZZAcj6bm42AvHWerdtLpkfo25+//Vd9uvPLv775effNPzb25ifqP87+SHd+//XPzX9rbEUgjQGsHStHfnB/+3t2bRSdTnmafBDvmF0P7Dmptev9D4J8CMj5QP5GuJjISmQfBCF/I7Iy0SfuykziJ9+JED9VAgj3g/ggfpszEY9Z0LKMWj8C08HLyykzRd0JzrlgR+FCiuwc8ZiBc0GSvSaQgAzdwTi7SRCGWyb2qJGKlEzxghmmEJAG0MvBVAPSgMD+F0QeN1k8cpg0WelayADbDbqZSnVDVcayy8/JJjw583HmdZtYd1yjn5y9rFTyYzfsY+vVdrKVbCVNKy2ngl6iOjUQgzk5OD0gZ547nKLm9uzeKu2en6wjcN0vsF571MP23PERuK98tzn/lnb8h+bQ+xw4GEg8p8z8lMsb4HAa/nLBmWHcXM68Q6By0Zl9a+rW020iWixXzfuTDE5OXE1gkthxSbPMcWPXa80yWX81XedUuIdjA6DPRkejJQwJNev//vrgFKnvj3Uu1v/ALwxFf2fUgo4c5FZWiGKmESDf9ITYiROO1kL4G0tznAD0EVQtz2SlozEBEM1E5ty4lk3ijgar7t7mdrL1B2EipaW2Jx/kLSs/tmI3WsrP74xdjchvXDE9p+oqWQsovy+swC4gcasb6DgB0rvBBY1Ak87RXzpuIFrBgPrvW6fM4WJuCyO4dTkPDPYYOq8B1ZLJgkhIqpMKaMzJvbquBuGPXXs5P0O46m98yhtglzS9Yve2jbzd3gSirhvkk4Rd926PuFv/0iPw+h9rzciJvv0i73YzYs7z6wGkrNXXLz2jrKVV5DzsYwKy5IjkwMv/SVOrw4XgjKBbfns6U0hCCHGmHuohUHjuzqrf7Eh8QH0ZEr6or2dnl/jvOE98DIkXc2sM53RhxYIqK0fEpOWI8PL6xTpPi3JEmEmTtW8P8yZtIX6gNFgXnvj2/ATasuQovt7E6aqerF9bLCYWdzuIwcg+UWqWjkjJC0Dot4dOC3QDn9/zPfpXuEGDm9+NAk87++jb+Lu76gtGMY+d5uglg95KjpeMQvF2LOzRMStip8YQSJcxw1Iz8uNjVA4G19074npTxncKpr3nsKG4btZeD6nhIdzHlxXEQSn0y1fQ8B2W2mryLsWUzypV77skqhLLI4BoOTV2usSXsmmXOfT2ej0iN2wCGiBn0JjfqAoS+xFdXIqNUsF6YVxfcsXLw7Xa/IM/wVZAdsPGIEUzgn87lxo0gM7QFqsHZ28canTyQ812An1GFm2KnT5vMWi7e8PHHPMpoWLhmRxgHdepA11oH2qJtKFr4f8OfMMqvA4WusyTNy725I+KVTgwOb54DVUypQAS8savUsmUaR1ZL8IwoZ6rYuD+SCUErFnJzOMDogOPD88fYIVncWj5o+uX/rgnLqx/LlGfqyPYwSQehWmjmg/tLmkRmcktY0Sa+FOKZuqtkQSj7/h04fMHvP2LkHOMxqeqaFic6qvG2cTbul0rLt/7TDA83+rzt4TnYywMNWwmFf+TBUiWvQFwAUlASfIUpv9gza2Dw7983H5nxd9nIH9nQd+zLBcv4TsX6TqLskx4KNuIY8PA5+U0+CKCse6O1REjw4GKeTCkNNSeKaoYBNa5y8KP7Oqh+65aI3LsXB31NXT05vcR+eXdiLxmM/uEVTHbGD2rJjlPL3EYtnTPt6fCvk+FfR8OUu+GPhX2fSrs+1TY969X2Ldd17d5qde+mC+j0/m07eGVOj/T96vVudGe1DryOdnXHST+5fW67pK/d8XOr+h71uwaa/jLqHZ+VV9Qt+MilUUciPFpul2dj05x1KZel3h21dHrQJ8Lo96j1x29+X1pVH5ayFYdklVXuem/44epBf/m4PB2ABrzDymlH9aZ0V0khM2qo0LhQbDhu3DnON47vNmI7p6zvJxWeVyjt77upnUkUHBWBAcCxWxJlteFbDCFU6oZFfxPlKkbcRFCxsnekPnIWMYypwBgKifClbOpIawozaIn5vQS4vPOf25sxFO1effDt1aB/Kna/FO1+adq848M/OdUmy+VzKr0EYv2ddJ13Qy33FwtEPX25mYDPs0Up/mwMdVed3eTOc28KVoMVpV/7srqt8usgXWeGkogYgLEwamSRTNmTrkGP1En1RCrXY+0KJlO+krS+Gh6Na7FvbG/3aE+TabhPyX8B25a+EPmOYMqNmg/sH/VQQk9OYIN7bku5xclaD0mUv8OAy9HcOeLggrTMlb1nt/H6TnpNyViiHUBkFpWgnd9dFD7+3tSKONxfCQIE4qncyQoCAFpVMwOeY2pLEoqvNRkxUCwpzaIsZXkGOdU6lDP0IqSkG1KlaJiBvE8U54b5qy9UH3ZC4lQ7gJCfgU86AXNAEa9nodUwPoKleKb4i4ZTDX4eld9TFteXKtvvgbZhmvqHK6pe0j3AoIyPf34kgP9ZCpbN+Dy1R2/S63gSSVo4eh2leA71gf+KhzikZWB71gT+ObVgDg5xtf4ctz7LPrqTqZd3/m382y447WhORauwuhbP6uH78TUpbt8x/Seofxro+DNQgKLGIfmf8ajQtGBMLQDBMd0gbD1WIb7/hVpdIkvVbjh1mblj7bjbk8e3Kd8UvE8uxyWGlcPXEpk767ZUw9Q1Ns0dfmQjiwCnwlUEb6JCriGlNFUFgU35PyXA4xSEBiFziCD2g/RUxBgujN9yfZeZdmLrcnmq729ydY2Y5ubm5NXe69evNh78fLl1mZaO3jvMWinc5Ze6Woo3nTohu8gy68Q5M5rpkKVum7W7N7k+farjL7ae/WcPd/ZfPUqfZnt0Ww3nbxKX+00de1o8oFWdNSMLoH06iYXCJC/LZkIdXiUnClagBKcUzGr7NqNdCSlwRW7oVjO6SRnG2w65SmvQ85JHfDf1A8QnZc6lW3d/hGdhxlsjZiRubyJFwx16sKOuiC7SjO1DiEtIzLL5YTmHbzg130LYcvoOxk1/S0PLOODLOBe+JqYy3nKhB7M1fEah3cFkzFXvI05f9ibzaMIJTr0IXI4hZglN2KssilZkPOzo/8gfrrXXBusH1MzI6k1n+SszrDXZfYRsuvdkHpjrctnDkqazlkYeDvZHFDS670ioilqypFNwYqaoTqEnVEzjyrx+H3jHYKKoNuotNoA0t84ZHlO1cZMbmwlW9vJq3ZnFCi5lQ6Fwl9kYUFGm0WYjLx/9zq4u7wEA50SuK5FEl6XKL296mAosyItL7PEtOx9YwWbJVb9oIqEnmIazUS698j29vP72pQ+YkE3ZxDtygLgrnThSV7ejEkM6hXbmUe+qrqZ0+YjBRW0rvBMXM6yzwTbJ6osRiQrr2YjMlHsZkSE/WLGihERFXz9T6q6Z16VxbLbOKwk5je0OUvcyWQ7eRUL/025/5j8Au1iPkXy/w2VI3ImlbGkT44/srTCP5+dHa+F+q3Li9VNi+QgsT1WZHXTNGzGlpZGvtpfRqiBp3jO1q2W0NVeodyZnBpyKFUpVTPZ8h6SGF70CkvNujLYA1d6RuMw6HtWZsceWPcIS2spFw9c1ovkefLqxeZmsvVyZ2t32fX5CtOXsNCh49DsKj+HRs/PDk5OL5Lj/zhedn3DOgjDovq8hA9c3Eo4gR8+Hhx7ZgR/t23RK3evPlp76qNdPX+MvrrbD7OUYcRP0e9FSamoPSl1h1WX+dps/wT1Jv1whGcbESm6Wl+N6udgcB/76UvotDo1VucydKF9EyicinCjWT4lVITdtasqOeaO2wdRLfFlwMB6i+DWwfTLWVFmQ4X/rh4oRReuihUgiaoZVFnQI7toBfQBeLQLohMt88owrDQaRdlB6dVwr0WyyRu6IBPm3FyImVJJw6ACq9Acuh1He9aRIdzHdZSFJ1xs6NDEd52s5+FPqyaGD1ubif3f1osOIi8h2+ZhAmNLE2NiZuZBVXfEYscGx96iv4q9C9uqsJlvXOHClZmzKLCfJlV6xQyhguYLzTWRwmrJYcjC3shhk8iN1ScCN4AWrlTFZ4i8gUKG4YUCNySq8c+dOo53hK50yVMuK123jO3IdTvLMspUZuxS85mgYJdjH7m+t97QRMqcUdGH+x/xJ4ywL+2QkJ9PwgxxjbA20KtGVWz1EyHHlnyDncL77IQpUwYNWr47YE98Y0RbvkVUqhalkTNFyzlPsXOOro9zPOo1zXkWZy1B66hKGz8fec3oNSOVqOsmuBYD/tX6FZ+nV48fhr2hmlQCjISh+XRcOPndu7fvLt+fXrx7f35xfHT57u3bi0/dsgrTVAbKsDnH4RuXM3jnoPKvelRJuLUyQPJSlq07ztLquZGKaVckqd7ons0j6ZzyOFT173bHUXaoX7/tPc9yrJwC5S9Yhpk8jQ5Wrg81arGQY9Mo0TFZQElXjdG7wJlYvkBjM9ofkEo7BPVZpx4o+zPR3M+zIHiEzzi2LI24F1qurWQ3o1xo07hiJ1xQtSCuqWyzZm33bNLGXtxz8B6Kp6KgIrtcsoHU1/HPNvfhpyrPPdzYsgpICe5L15jI3Zlt97uXesJcTvppST1I1DTP69u23fyscw1/ulzUkIfIOhRFVi25Z5kkfYhlGrD28+1xQW0pH6XvZgoZMhW83lyHwTrdA4OmwBuCleF0HM1XX2RTcgMh/40K6WCIhZxcDwgGIMDhef/+5Ghk1aJCCq/dkJ/fnxzpUXw/0qiudWGPn11qvgglprE0cKjcA0657qoPpdBGVanB/rGoNOQLN1yMOchhsCQsBSmVZYIpuHwKbvgsvmTPTo6IYpVmjVLade1rXxprCt1WcHnQN8DqkCNC7VWl2yFnxGdPWuxJbXqYbbqd7uzuZq+mr149f7m7tMuwPkPfLC9ZPtbjoKUjxbTe0JHuOM8t7HDzCU2nuzGQdiAUUZq6S51MjqXTmVVEoipVvSUpo25JEytuu0stBN/Wk/nzjl0nsP5tbESw/wAX7nEabble3EsQkT2KSZHtDsTI3hzt4hTdSfWcbg006/kvB1t3TLu9+2K4ibd3X9wx9e7W9nBT725t90z9FwkGW/UXCobxNSQEy381SV1AA3r4nYahiOYFz/vcLG2OUVJlj+3XsRsNYvx5uM1nGStujaYnq9CXtAo5xH+/xqH+BTzZiL59G9EtO/fXMRX1L/DJYjSUxagf30+Go/vQ9WQ/+kvYj9x+PpmRnsxIX92M5Gnx27cmDWMwegiKnkxKy2Pri1qWHgjWl7M9PRywL2idejhwX9B+tTxw37SF6wsZsZbHVjlbSt54UOT3SX1NOo4GsVmRpYvpBoOeMDu+vRYfutllG/plGs/eEbMeoty6ObbbO9sPBa4D3WNE1UNXcIe5VVL2g7r1QFCB0S8B661ZPlYf5QVrbKsT67t2ou3NrRfrm7vr288vNvf2N3f3n+8ke7vPf3+oBmTmitFsubKGD8LyBQxMTo4egwwclANG8Dpwe1Pacfb1pYsteqC5+V5kv8BGAeaWVGRpEb4foWKAfDXUlqM6UCumaxxSgXm9E1Y34d8PQ0YV7AglEyVvNJT3MaAxcOOA8BIoNPmhM0bSStmBcug+KCITwLL7UZUW8s8QNc9ZKkXW5Luh9VFVdpO5n28vHaruYLyR6oqL2SV2LJTqEZMrhqQfSyYOdBJAbzshOorDXBZsg+Y8XbrgZ8mS/yVJJyVL/rp5JyVL/uqpJyVL/vLZJyz535iAEiHgWxT8A3BfXqwPU39toT3k5H5DInm4ar+iwN2C4VsQpwNI37Sw/AlRNd+fJO3x8/XkZA/B9yMFL08YjyAi11UWZlwbhxWX+/gu/u725MefMHnRNYW1lOHzwv0AvoAfNEsnS6YGQt44VCcYiJ+svnXCFNZAIDeKG8NcauWEavZihzCRygyKaoXN+UmqsEDVXWBdW+qcmb/TvGLHH8H7+Y7Nfq2YWrjvRk2PP6RP6hJpXNbOO2hBhQ69cV5e2u/GSQh5kb41wqQyXm6px5wwY5giiqXymik64Tk3C4CldkfUznF78t8d/3z548npwbt/4MqZa2vd48j6/dcfq4PDzYO///rjxcHBwQF8xn/+bVlhB7YYb5/7gqM+rYY+xgRgnRu7vVA9DeZzVXLrbT0LiKCaWB4JUYB9b8K+uD3yBJAAWWjoxxOGdM8HIoEpyTOL5PPfR4Ds4/84Ozg9ujz/fQ3pIXYUBRh4KNxCoGSqq/OGU7I/KiZSbFTgJgQCtqO/ef/64gTmgrH9cNAjOIx4TRXUUSI5hPnhsKKCPnOw1pqi7ZhHv719d4QEffzz5a/2UwP0iPrabYixAWHKC5oTxVy4GnrOnrFkRsYrWyvjHrfW6n+uHO5/UIZ+UCy7NKb8MOHiQ7GgZZmwj2zlv5a22gDBDVTa+dxQkVGVNfcbL1THRXyQim6vEEli2VXM+fUQCziYTBS7xkq/oBV5V6Sdr3ON/PLvr98sC/AVWwwA7y/8mmErcn7tPMxyakfq3nnnb3+6+O3g3fGHWmPzLPz04sMhyi5/R5X+w0lhBZqfeKhnYgkUm9DoDzdcWEAt3S2t0nUKLz3K8iFox44dx+TYrRrZ4eCEAu/u27gPn42QcMx7EPPhiE2qWV1z5/4CORGcQzXWhDn8Hd/tarMUxLWwVPe/D7JS/dWddSJCfLRmxl7hBaPC2OtkSlN7QVPDSMmvJca6KOj5SknJWWqX4uGDmjruA4RPwQMa+/7UEbQuBltbIRliD8WClDlNoQO+vWGOD89d1AK5iEFwQ2sGtSfFzPOCYoSlvOvbSU4hrgumQFnB3Y1cRUJNrV/i4rkgY4fFZBxWcmAZZKqYCTFKFkNxP6CRKw/ng8uhYtxcahM61quRD3iqKcK3vB2RNOdMmBHxj0I3PmzHlPjq+NklLxNyMsV65mXJXOjayZnn20bW0PNyPMJ6HVh3SjikAcao68JzckaM4tec5vliRIQkBQXRLK4+xw1MRhXLRlbcC9Hy0VT7W6+2k81kO9naHT+gysac6qFKvx3kOd4RVM+ZRjKQwiJEecJykhWGDHryh7Y/NRepNKqXENBf48+NGuqicEE0N5VrwYcV5xayWlWWFHSlGMSx1fqWA4zQfCYVN/PC0tMzDLdlik0lvGEJyrJMuPQCAGvLtzUsl0Buf68riz7HoE7OetHXVKP1YE0x/EZCrKSd7XZo7uePVd4oMvbOf76DM9pnfB2c0FQqig8Gi4aLyMNAQbGoe16EvhJ0ZgV+C4CLjvYhi4TmTBlNpCISCsUJiYXKYGG1JuALw9kpovBJN9oNSOderkUVIAIcL2K273mKByoruAZ3gRUAlcxD1Wk9Cq05JTIycnJ0vnFydl7/ENpvjcgNm/ghSwwfx54P4YFK5S5wVo8IExmojyRjhqWYUiGsfGpZsmbk2fHRuzVXTTqEbTKTPqR+T2Xm7Z4ej9cnD4p6xj0WoLlmqVmVSbEIdXIRCAg3hb8sZ5AkVYyaqNBw2CtPWYEygCs16LuTpHVuqFp/HfeCva+KAPbmG8qneFA3/0MaQPHGDYVLdDHArqUHcliPhIAVy2Vr8vCxxL3IIAfGsKK06sFJJGO8ZvRqaf1rcPfjBTa5b3seYePdhns89C/yx1ymV0RZtVobkGVK6GRPjk7PMQL4l4uLs3OyQS5en0Ngukxlrpe+K4YKIz/ANZ4cIaPi2kdHW9XbVfeCysfIO5FRRlJTbWHwDLKXcB5EMFubSwc8DVtiOFYE8luqDd/OGwJqMCbXCu00Y3dUfHX1gH0d4CWWP6jbpNF/HdcJxiqfYbPcuXj99vDfL49Ozy/tIbi8eH2+7NqGLuC7+q5RtNdIqy7cnU8Y73XY3d77IPxq0WiHT6FpNkedDbtbiEyq1VVNMplWdV5GczZQKOzJXF2t6UlIU1PRyIq/aeSdoSTn4grWQwoZ9ilHhwuiYOKl6vqac7V0Qdzp2tJ8MWImkht+xUuWcQr1re2njU/aXitrsaH89actytXMjEgpc54uRiiboEyArlx/61pFAU72g25/DOgvWN0NLjYhOfPe5Zlj+Zc/oZy1LJ6q6hvh/WB5kCoEAQQcwZWg6ztBj1qXAWd6qeugyTC718LW5ib+/9IGokGDei6iPkQbRLFrrtuiw4TZVQPtgF7vctW7S0vuWVPU59B3E3ZK0nn9zR1q0oF7zm6y7wBItfNFgKnF/iailv+pFMJtzzSI6qj0EMVmVIHhUDNQUPQoeh73f8LRtYj8dJrLG/AoqazWmX6SilwcnrlRsaOvDmAibCnj13UAChfccJqT83+cQqFuZp7pNfejG9QOWMOCbgmkxSB0tWdyDDJfdPDxQ80FPF6MokJTNzjY0JwmRGhqKswvc91HDFMFWQnjrVj+AbdaNKyHQrQA1wnQl/vZ6YmOeTPfkKa+LLzhDVv8UJfypltTxOtwVpbzxgSoQcMq3IhRFiyoof+sBBIFuGbQLube7husRq2QpjPkFFiw3cZ1OJxtpfoQh9/wS2h6f9DAQ7OMaFZQYXiKjpKPxrWvZh/TORUzNmowda5DB2sjyTW3y/W90LF5oYBkX9qwGnnLngpzTK3q7McUvoc2XiRo2nNOOW14nhOGhibMkHUt10UWmxkBYVMedeigZalkqTg1LF88RL1Gu+dQghO2CIWrz21M3ffcriEwmGLCZ5WsdL5AaoZ3ApcHj6IO2THQkJQKcnI2IpRksrAbAMbQSvCPREtLJwkh/6gxS/MbutBoWm5e2fTGw+Tpfpy4L8aIsqaMJqwUVTtRs8pn2YPRNuHl2IIyThCs8YhkrGRgnybSyQyk7vwPVlmuW8EsVCdL96e9LZ7FJf3iOITm0ICqLq9MKyOFLGSlfctDwHv9dQDQd13DgZ4dnJ+uddJs7b3NaDqvbU2ISgyGZD039O7Wi1ftNTeaXX7T6VzLR9D09rdsoOJnKWc5I69fHzbw0ROYskwwZPxas8ILhKBAaihU7474vSMJZNHdrdprNv9Cwr4Hsk/ybyM0OH7TLD1jMkm5WQxVZOSQm0X/7ryRwijW6o8E4EhhuGBisMInp42CJ26yDnynUpk5OYBgCtoDZCWMWlxyLXtSlh8HdTgFOTl/C/nFHQgPD24Fa6jddCD1bughFTTrYsr357sHnBmTl6Cc9837WooZN1WG93VODXzoxtz+N1nJpVjZJ+svnycvtnb2nm+OyEpOzco+2dlNdjd3X23tkf9Z7QA5oBFn9b1mat3fxy0DJw3tC0eEoskBpTA5JTNFRZVTFZc2MnO2IClUdrBiZ6PQgrs3TdNoxF0b55QJdC1AtHwuMVJowlSdFO9F2/qGQvByUs4Xmts/0LA4Iqk/1nEc1qk0Fk/2QZTAsWt0ZWQBF+SMydCssWPdmEhtpFjP0s7eKDbjUgx50t7BDHcdtPVfD2+Da6Cj5mDqPWm/VmzS6oPedmR2YOh3Yq7WHvrQMst1X68pCx32rY7f5OTsesd+cXJ2/aIWPlvyVkHTAXDz5uDwNqhJwzJrks9w8K5eWDXTKV6QchErChPoX3l6cBH0b1fxgTvJrD6zkpSKX1PDyNGb39cimbd5VkCbyyXNyITmVKRwWiMHoVREycoe4haS7TpLuVRqw4NSCGIE2PG/YRSgBvsAqa7Th4uZT5PhWrkunW34zDwbh/bbSBwDFpli2WWf9PiIfd4gmHA2Z9pEk3oc4dwjWEhZsiyAXE280Bm2POoRO4oCcWE4p3FOpSIrUymTGUjwSSqLFcI1WYk+t6sIohfVBRdlDGu7QKUHlnJtNSrXdwd03JxfuTQe9BDqajrlH8OI8Aw0ktzf2MBH8AmrSa0l5ALDe4xE88BHXgRz9GSBXU4XxNCreldRJ86pNsTcSJLTCcs1qt9CGkgFwFpGdu0Xr490iNxdSWVSXa10b8waGQ2SMLK8hO3/AhTBplMGJezsrE5ycXv4jF28PloboUvkSsgb4W1hDbCIQ/3ImxsBRSWtyd6NhykwHeJpzxuGtXisMQTU832TDZDMbRRTb8RytAPfN8im0kwlw1JMrHfVOS8hcily4RA5vY1jUEFeHx2c2avgAFd8FIaKSWW1uzpWUJ4PtDgr5BOYwEsm3fCvZFrl+SNn/n4184td8KomdkkwHagRd/jV8wlThhxzoQ1rNd8H3IA19asRIDrUBqdAXORgzsTbyxE6h6HzJ4LdccMHsvUQKsI5oFIc7wRO1gViwNBXX7gR+A6EmRoZde2LIw8wFhgZlCBUSLEo+J9RcBqiMHx8j6WM+ZSMYRXQrU+5D3Z149BkMJViinvVjnYQUIO7dtcQX9mxj6juzex+FFIKmhbM2YXi8dTgr8bSzkM/coKFqLnoLjriaRR4Wssz7MuXRK5h/9XdTSj92x1Ho4l/w2BJ0FHq+KeMGuqAu6GapDLPWWqijuuNVpWhTeWUiwxpLVB+LmfakXyooennhrQU9LU/wA/GyjkrmKL5gGVYj/0cMevz8W0e/Gd8CjYMLOi+1qlCngHxgC6KLkvtS4UqBkn+Guuwjt2AcLIzybQVx7oS1h7dme5ubk4byBjkqPZUoQ3xD0JghABCjIFMNTVBa9CiVFxH/ExOMdlEyIw5c2FjybWHLmSqA8GAXJqxbnn3kLPaKSEbA+MyYwt6xTThpu7nH3PmWtK2dGoJ0jdYhYMhWIdqmykb9sBY3YKnVU4VwBuGZAU3vmRyO4LsVBrnNuaYWyKY62DAWP2CxnPZAAPiwmUD7XW8ZuSgxshvvKGpIWP7nrsu7O0BHy32QX6iPQWvs+cv2S6bTNkmZS/SnVcvt7MJezXd3Hq5Q7dePH85mext77ycvmhZjgaxXTYELU9s6NePuBNgqxWmJ3pehDKr7mTCPQyJOY5eaJ7LG9z+jGuj+KSKI8fdGC4FQFWQFBFMmFDot3n1o0HCR1toQyFBFyxd9QkRwcgegX+C36ZUwwqOrdLGU5cR0zhFXgpod8ZP80qbTrt7K3v+yKjRfYOg5uguOKifXIYqAuFRu5HjWl7BLK6pPRiA7rj6dJeuWLyOdXfcmkQkMzaoA8VTEw0kAVO2+ExECeZGIi8KpGRH8C97ruilYfsbHNMooDSusAFpteDEx7SjUbQJfumBLdb+j4mvmR0GdddJgMynmPnRlqOlFkuOQOhSVAsA+yzueRRd2CRUR4OJBcFO71O1GidZMi1WV2upa06vmfempqw0uLgwG0IMKPbClQPS5StFDWeipA8JJ5qLWcX1POxafSjhSNv7glRl46p395zUFlQSS9GuzoLDi2DaW6wDS6iHb3GhJtXUDMZTzxpZR64QcOwWVVCBIWma9YgJfr71TfdPqzm0jlI6H9WTi3nCOH5rrU3pfqCcexB5fcTzg+8JeDGiGggLBh23R55tyAnhho4Ec7+SaJJjv0EnUxxEqjAGVawFXfuE3sJ6b7zkNG5w1fE9XLexHb3xtI+zI39vFsbzGxKC8hq6RXdXah5sJMmlvCLUXkmYiccMNkNp6RZRLb7A3bvYeJ5sJzuxngWxew01q/7mDi0Ln7o/ktMHB2JPA3AObTRFwuZIUcjmPcGasfvMRWx+kyGFLjjyKaTwKaTwKaTwGwkpxDPpK0zVjOQrxhUiSE9xhU9xhY8D0lNc4fI4e4orfIor/K7iCuGy+O7iCh3UZMi4Qne13xNPR3MXhFafWhlC7Xpj6qJUNmIUBWVLzL75GMNb0ZF8Jj6+wRjD5YW6Lxho2EPzXz3QMBY1nwINnwINnwINnwINnwINnwIN2wT3FGj4FGj4FGj4FGj4LbO0zw40hJ4pCIxzgF3U39zhAHP9HiwN5lRrPl34yCVs8g5lNmmaSqwsA/WrcC5i6EcpZOFNRv7itzC/4UYxcnBx8X8O/51MFS0YFOXtDT6E+hpSwTqbgLjZQTWiobYqV6GKJ+h+bsyTo/MROf35p99GUPVyzQc0hA7iHlz0lOAaEgNdxZO/ARS+erMbMS5WavUPJ+yFslRufxw2UA9d4UVJU7Oy1pyFpXMg6uRvXv2q1x5qRvv5XA1bLkCXAXGNpnMoBBUqQYINzYDb1dM5TDWCHUpTWZQ51xhlNJM09+BFVUSFPfpWt0Yf68raA/yOYUu/AI92+A1TBu/+tFJQQSgUz0SbrSefhhiL+wy/h80IMZHMqs4Q5we7RX4KU7mxeMOuTLzMHnqLQcAVlM0Ss1CClTAr4GMTCkO4mFn9FRvOS0UUM0rqEiXnPAKWzma4PF91p3Xy35xcvDt2R6upfCEpD3bDW3rmqF4jMhvU6HH3D1c821dbijlBWOQbahT/SC5wnGbx01HctSghz9jHJNS5o8bQ9Cop7JhQ5w4h0RsXB5ubO5sbYYK1NtbwgT58fSFJI8S1LI+7Gl0xN/3yuEOW1oe7oYtBXsDp9PUgK5V/pxh80Ai1vOEvjS9xpANTbOIV97n/VIf1PjpePTB642Jr59Wru861/f0WtP1FtN1GEPR3uk23ix237N3X4SxLY7chWwzEXJbH7oPGCLh2ZfK8tuBqxD6kMxz9/9n79qc2cuXf389foSI/bDjXHmyDeeRWvqfAhhPuIQk3Jme/9T21ZeQZ2dYyHk0kDQ5b94+/pdZjNA+/AAc2tdRWFuwZPbpbre5W69OAmu3DOhYM+zELM2Ed/xyD1gI+IioFicdgk1GopASglPEDwveMAv5+MyKpnDqAztxg00P4HnRbJ9ZYJ1xqQ01Xft2gNl1I0+nWKjEMdBUvmkRgRBq0Vd2lFrMo4+5jk4LrkbSi8K4Gw/Ne/8P58MvgdPjr5c2H4en5YNjuHA97Z73h4MNpp3v4txUaxs1cI1h4tNsSFa7PPzZtDTohcRI1ccwSUuAag+R6h3Rvxgahcif64APprMpZpnE9m+R7GGeC3oOCvK1OaRhOMU1ukaBJaCLefokipI8J9B0wBxkZU1HN0/l4eRkEaxcSWTSSLZH41Bbw8WntdV7Jji9QP3dtppCNuZgXj+JBnvBsuYClOf8oXh4bUy5kQSzsTZipSyirqehQ4EzzcYyaYjENZlF3S/zpFRRUMiE85WpHzCGYP/a7KKLgJrIx6p9/cWwsZnjDhbw1Vs6FvlUhqJAkCc1pkgbdhbijLvDU8PYydyiVM0VHBvNKilmaEg63UIBe5SXSujg67B1ddHrd7tlF/6h/fH58dnxxcHZxdtHqnZz3HsMTMcXtF2PK4MNp+0/PlZPz/ZP9/sl+e//4+Pi43zk+7hwe9jr9k3a30z7ot/vtXu/8rHP6SO7kO86L8KfTPaznkKOhd6fg6RzKW9Wcep51c3h8dHF4eHja6h6cX7SPTlvH552LTvuwc356dtA767X6ncPuebt/dHzUPTs/Oji72O8dtTu905NO//Ri7dIUZo5UiGxrJk8/v6Nli08qez8b/U5Cd7SuR2D/Akuudj8y0NIVLpUJ2Pv0/uNDXx+BfWFMot5pA33++v4yGXMsJM9CiK3eEDxroH7v/ezBJo70e+9tHsP6BPwd729rHzeHQnC1OE/P1/2ae6fKqJ6yuc7RTAlXwqaEbDC42ssNbYSmOInEFN9Vz0SjA9IdtY+jw1G3Gx61O0ed45P9TqcdnhyOcOdgU3lKmBzisVxLpBbV0u9jSfZu6Iz4xjKU7DV45gWrQKCEQT4TMYs1UkvZX5s19f9/6bQ67WZL/XfTar2D/4JWq/U/a9ec9eY7gqufP3DCxjZae7Ltk6PWc0xWI7o9c/JAqVydYCjEcazUZYIGny6NVpUkjgtw+fpsZMqETEx9v2plEEM9KhDWNa7MwZXxqgL0q6Kxp7XVk4XCLaXixxOiyJ5Sc0nIz8kz14QqxJ/P54G5sReEbFOCa1X5kuq5opBzRezIslIhzx5shc7PX9/3C/V0nksPiyzVhzdD7VJv6yqc865MN/W2Q8GX159MSRyzhX7LAm++0z0c/rP3UXnz+8cHNU+f9/prPP9LEATrL/aMlwtRbzsIonrMy7DAUSXcftc0bmhdaGoj1iX2CBKmne4hX7vyDBESj2IQ/DVmOmIsJjipm9CZ/gqNY1yYFh3bYBdKyIRJqqV9jiEvLiRCjLMY4cS7085xIqC+lYmpJYgkIX+AynwySxISr+3IJuS7HNrw2g9lpYvp6dI6etwkCtA10Yw1xYS9JEm4X3j66TSvsP7WxjGV8qQ40aWssBB0kijNIfZkLJowE2XNqzk0dbsLvwi+T+UsfoPjNGnaMTZpJHZL/pWptZ+b7zGbw8myqEqdGuXeytJAfp60yGZbFTgqSoFYEDjTL6RP5LGuREe61LslKV1bzAzq7KuMGpqxbRo1rE7ppaKGi0ay7X1tC1FDnxeP4sGrjhqa4f40UUPLrT9z1NDnyc8RNXxJrjx31LDEnZ8kargmh3xn/U8XNTRz3GrUcLBRfLASF8y3Cg8T/wXig6b73/H+1lzR+gChqfL5XAHC/ZODg4M2Hh12j7oHpNNpHY3apD066B6N9g8P2tGG9HiOAOENnSkHbpZW4mUmOPQaAoTefJ8cINx0wj88QGgmu9141WDtyFRJJdeoAOVZ2pUdhGy2FRWw3fq2nzLACSncU7Q7VYq5sPhj6nPG6YQmODb+bY0EBJ21mW062XaA4RMAe9I/SKSdcNj9XHwBwpX+NFdNUa6q5u/yoTgO7eVHmxPlfbQ4L6qfg4zaRuoxayGN6Q9i9THWLg1n2WTKMrt6MJrRkDOHsMzDKZVESyaOY+XYKBf4npJ57lnlCf9mEXgDR97VCcTJt4woj7WZC4mt3jsnI/u9dZ/GnCWySZKohI3XVNP5lhGuNh4on2/mkWM2jHB457+5QT6WGv0Wk14XgyPrjvP7VKf6Ez1ckc/NXJDRN3LzwsPGVx4RtesgySZEWX9gGbom85t8+l6XJbjaiGPNPA94UhLeNFEd4lGycqX2YDQ+6Yz3u0dHo/2DCB/i/ZCcdE6iFmmRg6P9wzJ5XanklyGy675Eavu5vY9tL/07nBq4kzEjWGTcwDbABR8H7Cwy7yhIWdCOvpCtaPaFCvlarXHr8Ajj1giftDqjI08rZDz2NcLXL1crtMHXL1c2/9FCi5ozCghywzolkpgy97Dwvn65Eg1IgzRPWo2laDDiBC5lo4jNEyUSDIlwSmak4ZAPUiyn5n2GbBxvnYW23Ruvxti2t9h43MjvhhePx3aKOLeCzYhBmsVAzxl+0Mm6JkB+ea1mu6dIqOiqr9PGDw2QCJZJhyroWtU3+C/NqZ9qW1/h9zBpNBLnhFnkjVtztGdABCtCU3PC544ZbCR6W6S9mZokW3ufU5gwmFJOtvMaM8CsBkeWjMclFNVSE1RojE5BAOecShPxbCguJkwqVcgfIH96Cuut+H6p8ZhguESYEk5ZhGaZkNDISOm6MM4iEtXALGgfGR4eEbSTJpOdPM6hXt8J1GdVDqVmB/QurU1mOTjMs3PlmnHpgaUqooDLo8Xpza0n/5KlOyXi3L651U5LEYLCDrp0+3acxc9ogL3Y3YbLsb7Fr1QgXIakM7WkzYVIKOyeCZIv2AcvVgJgoLmPQxN0q+RZtXcLZ4cQe4EFbwDOBeJEeUdg6isnmVvfwRo8RdxSH/WmJt2+qAHeHRzs72l03n98e19A630jWVrgnl2QPwEHf/mazFgESPG5ngHRF0gQkhQoW0X88sooJA59dMYSKpky57UGYCPYuSO3GYyIUjVGcBoajxwLXxQwHLYCTrNuQ70KNwgkSdDvGUAJ5Y4j6C61j5YxWpzkuFu67jXXLAZLf46FG2ijsM/XFgN5lBCp1hZ8XZCvFAvhSc2zn8uZ5kteRVAag9wWhMI1ltNS355uNQTaKQ1nC0hlPkJWZRwHB/sVzXFwsF8YlHKhHrZpJEAHRogd5iKMV39jzr3r5uDb0TslYavsXf+AvQvO8yI/AOH3Ahj82qBzVkvC1LuwQr2Lajp2543dlqnhOlcL+htl0j3V8DrTk9VmimtRAykliMxSmY8Hhq6fvDVvlwDkCxUf0IjIOSHFFAY5Z9pWLW3QL42OplTwX9BorwcaTTtt2xKCAbS+WCfCbrNT2nf1Lcjbd7V2px7vgn2rGE/4C/QN/QX69ijQty2mFH81zdfYKP4ICsEd+/eKqnwQuCtXjChgKLmqEfCoNm/h5iy5x86/MHGGYhUJc8lWyQeU0IHydACE7QPiqk8oEWZHtUhSaMYArQbrEDGNrJtsA1E4QRjyfYzBDbu18OLDsw0gYH5avL6XhOr7C6WvFqXvZwfo+xNg8700LN9fiHwrEfleHIzvLxw+bVQM8cSGET3TAuWfrmFg6DasmZHXoWUzYgDx0IizuXeG6KPrPZhAl5iyOVLKK4HjXXuqDOXLQjZTxqHz1c2peuaGav3kDWwC4gpR/gAtYXors4ReT22BpsWCuZUB5aSrDGqAx5jTwqBefRC4pAc8+RgW5KM814/sDxrHeK8btNBbzY3/jXrXXw1n0OcBaneGbe3cfMSh+uC/d9FpmsbkVzL6F5V7h61u0A7aXTe8t//6cPPxqqHf+ScJ79guMsXp9tqdoIU+shGNyV67e94+ODbk3jtsHZh7Go7oIhjjGY23FXX7PEC6ffTW+kScRFMsGygiI4qTBhpzQkYiaqA5TSI2F7vVy7nwZGXcP8eRz+eUcOwBJVrbELwRm5/rUm85lElZUNZJi85H9ju+J2Vq3RGekG2Z8ZU56N7csHXqAZ4vWiEHwUHQarbbneaEJITTsDz6n8QFWMBre0zvcXoRc/+7TBlrnf4oztr+zHoOSSKZaKBslCUyW7aGMZ/TyhrebmpgZfDrymO7FbTLmnK7Qy0VFl2ycyrt7tlX97HRjMay+vfV6ad1bCr1XLE4p47wu8Lzx61O0P6GJJ68Fbt+nU8bRcFCh7+wQDSZQM6IMs2J/hXax0KwUN+m0+WcE3skCP4COBRq1g5i2Kt7qjszlZAd+pd57pM+GQ3U7OtmwUnIeKSao8kkNrOVeAJQs3CEmkEiAlwetMzzykl/a9Kk+Q2RJMSpyPQoRcO4O3UjQ4XTTleKyzTtA+Nid6wrSCIYN0jE/0PIXQP9SjkRU8zvduHMEqBwDR6vrazM8XhMwwolaJIQvpCrugmkHzKTyxks0FsbSjOtmu+K899dMMnl0yuAUm86yyXTK2ASQFKOPadSnmgUUSNZdjwFWYEySJFOlzbkkHgyAV1gmvw8src8POG20hv4Um7u8tbIn33cNOlk23dnIX/drQqTSmmd4IiKkBNwussrzLQJI/DaW8QXr3yTqd3U0B6dX+VpA9dma8EZmNBlX1uKBoja5LE76lf19d9WbMQ/wPP5nGrARj0DcJk3mQPLpKARWT4Rp/WzOCEcj2hsSxRa9V/5YvE+oLaBQkNrBPFxTdeoEtG3F/fv3Qa2Fu6kAZLfEn8K5dSNQaD0uZ9RDhORFbpgON1x2OMWsN+k3liTqOnW99uxHwPtg/ui+hp8HZzvql/AzMUxPOgazV/AEo9gJ+Lowqzb3cLZW44N8C3D8YOYZJhHgf49CNls79ucjKYkTvfGbAgZZPHeXcLmMYkmRDW9V5jg0OKyEhFM5ew//xcacgMrEiN/9rfd2uwgm5poj1eqp1+//GfHzmvntw3gd2rA57cBhFvsyF0qKVBBhIznlmWBObmT7ic1wWUkQHAI74XYq4DW9v49GKxLCW/Er9YrqlC1VH+1SlJYfGbPEm4LxzHshn5vdW8vWB7hPfHwf0GH7Y3xNxDz+E14T4Zwmjj0BieGISdYkug/PSiU4br1dSslei8+/54yoTRH79/n/gx/q/D3MkEzHH4eIH0NDnWCdic4bPhpPEVymETBL9e9DW7hkySbgdOz1QVitah3guLB1lCxhDXVxVHHoprVcb4uCbaMDq9nbFTD28v+rk2cMBXl0zzruX6zRPoAO0CX/pmzqUFf7sA0as+nqnQt7x7riv58iuWQiqFaAjTaNbJelnHXekXWL/u/1fCo2Wm1T5qtVqu1ARzMdpHNTxEntoboIgVTsJ+NttE3SGZU0ol2fxwtLDOc9EclvpQJU8+RcEKbI5qoTyGcF07oP9Qv7x0dD9vtDcioBG+4VeE3XiTjSIQ4qRfVyuTVTNqt9nGwiVCo9hPCg3uSRGxbN+xviuW6Kxs8DAHpIVRxx0mCR/EKc92fEOMkUJbXGpMZxwzXFmP/ZaCa0ekwHCcTc/TVClrK4m63gpYOJsKvFntqStCMCYkEuSfczzU/UyamMC0y5X0qi00IIsQMztpAa6cxo9ISZUYkp6FAbzW0PrqHo/z8+olO8/4OhcpTTu9pTCbEXOYyp8SScH2rbbdhKqnkrfpnvqoN1656bcKhWSjDpbMmYEy75qpXyFKywAioMb+sqQ6i24wMFt9uxVLtBt3NWEySe8oZ4HOtdZT1g3h97g9rFdNx8oDcJQaQEsOhBnoMh+BAlnICmGWvgEWSzFLGXxN3bsyIVjEGzn5mWGaa0IqkkYHUg1k0Cvu15VX4fOtiTQpvN1YOjvwnbKMtBa3tXOe3n/7d3803e+UaU4klvfeRUe4JB/nEyR1NJhCi3rli850G2vlIIprNdrQ073ygk+kOsEC5aei+o5jq1KdrESRBlAOQGoLB9SWhq7yt/aBlMnMfIIYYkTFNihe5VAv5wwUeeVIET1CB2DwB3NgIzXCCJzr2dHH5ZXATfOaTBrpMwgC9hQ+U8kRfB00NkpIwQAUcU8/V4hOcuHIt8ylTyoAKexlSMjQlcQp6HyLqgoQgnMqyBT2hrK+UJX6JGIJnAuGQM6EN5znjcbRARJP7KEiokMGE3UPMomlUEYhrVRnow5H1RNWwZIvWheN6rYUBSa2KeqAo7CZoy7/wPBUCqb2UcSoNIxAnE6zrT3oq4HEUrBjxqpvQdV1LxaYiyDs00uU0cRJOGdd/NkPrMpt45Jl+pkCZ/4K2e/bOiylHOYKihubowmZFwlKKY3NbTjEDgnB10UN9WmaRkJewrzCWDxY52XDInLkVWh5ByUo6I3/YPBrbMI6pu2aXYjl9Z0KepYdndKJd8ndI8owUW9dzKTTLfPgY/cdw5Uz+K9cDlrJgccEuMMk4kFN3Vje/CtGqc1O09Z9bOi1otJYb1YZrWbe0dUVgAXAbAU2ExLn7uJJOADCu30X2XUQjK9RhzLIol9+e+tNuI1wtUhxhietF+qP5VtsCYeFV8DfzYwAcRUN4YGibVE+GRAjta1gJL8waXghSzpRE5Omx+QVv/U3z+3L58FO0zCtqnf0TLmvoGWt3p6ZzOsMTUtM1ntEmHoVRu7Nfqw3z3i9VC+iy79xoTSfLCiObb9CpEhN4iMWRv0rsgBThAkcSIPIKOat9eKmceX3YAeYu9vJu3ITc8xv3tMbSKfW17vrxepvhcEoTAgpmrc7MC4H3wrp9+V7BcA1tuvytdXs1Mr4u4yrra91+OJnkRu/yPgqP1rZv9VHEwjuQVaOQ+vbvmuWlv0NCYjhCjmONkwPaSH+n1rWYMi6HelvI7SK7i+v+mk4ZLdht3bBQzeFe8ZWCEtFbk18pvZ5YHsHqX6kl2oKulMbZvDfQdN6C2rDX0pvrdfr47sxVTfQG3Xzuf1aGzVxZ5zMMIMWC/KMyloKVgZZbGmixPkdOp+shBFZy1X6ey+0H/VdNI5fJmPnSarYF9TqyusYTUPV5rXiafeO8N/AzYKjN+QhIKIKHmUGPf2OOcLGpZ65cn/zN0lUL5iBiFkv6YtYU7kPUQ5uvIu84pwgcFOVsr/bLRDDKaFztsspRt3vvtI/77dbJznrD+TxA0IMfNq8fSMgiUrsOlo1FSE5kOF1/MLYXfaEqeXASeJeNCE+IhHMMI4f/8j+raTf/3hl7RcstbxT5Urhcq+YvrdSshUEvl7kyxVMW1audjRazR4GU6YIoVeaqrrIaHf7Ynq5ZhL5e9qsdqX9FisPnm1TeYrUzFlVU/hM7s9na1c6Muvz7kxWz9/VwhtOUJhPz7M7f11xF3ojNRjLDaXXIcOtKn4a9unF7Y6sfPCdQOEUQ+bwszttdwOiIpDF7ANCqZ+04b3dBx8oQJOMsfvYpew0v6HqFHfTYjl2zK7utN/qe3q9u12wwRpfnu8u1+6CmXfNlvq84p7ZuH8jbRhttAuT7uman6SEg30mYSe80E9WYnmbGv7OY3VHcxJlkERVwUJFP///ob1HffPOA/OeQ53mvjJ7UNOXvwmYcrslFUUHzXKBDTMVziQ1CajY936RjsLEbgJekX98nXRZKXtDdOQ6n5s6hhhF0ySGm4JvByyAUMN1cnq8ptyUk5jJLCzFNpAFrZjovxQUFpYFJxjMi1cS4OasCvhEJJrmGVYAP1J8Nk/wAQ4MIN44BMETooPfldcOGlkDcadSAW8RweFUYEoS6pQDK1JPQ5MqmnEVZKDcnJGTzubVrmlFmopvbsm4fLS6Fbn8R7t7JW6/n3RVde4kPG/as37WkzqfvyYJAPEsSXbiqfhwW6HXj3r9+uTJQ+8pVge6MtMJIlhE9zPj6FaDyXn910IZ2fnMsnIgblxJnckoS6XI6NQydi/qWji12TDrUlGAu4WTCYPDtlHTXArVjnl6ovBdG7qFX83YxWr9Y43uBuEX8WtKn5ZvtVC/GWjv82TopcKcc86hJQy3N188F9YdT84XBGOfvNLpPjcnwVCemMC0A9/idjcwFBkiKHD3kYhS84ESjrJAXimoFszLZGyZx7CFQIkmErGtr2UQyUTsND1ywtu++3aBoYlHgQ5ZEosbS9dHE0Aq7J+NxUHmhbO8sGFKR96cGKznjsQUIKxzC3sowvW2gWxkL9b+plOpPte3B7+K2ZqF50aZ1JlJC3XrkRPxjUAuKoA0Bw3llBfS0Gofj/2QC0Rb7LC0y2L2khP/yumaWNK3MkS6UwVI07HrpKC/9URVHYk8YG4X2AI2Yprd1CNKcCBbfkwjR1AFju3OrjHOw0JiHNVF0vgpyb7KgogpfHhNw1ZfXGFdMsJo7BCRgqG2iYc2Io4RkcCEjBxuqek5TEt4Ny6rgEUM7RZLdkcSarBponiplhxPCMhE/IJrcszsSWbSdse5c6Juk+T1MgLHNQb4ur3VEFx62u7q94Nn/NDBp1dWpwWFxiquKT5FpCDk7a6p6OiMm2wusm1RnYJgLXmB1g+0sTR0JoYGbp2bMuuypekoZ0SSJvIfhY2uyJeS7BH0SZTGJ9MvB36ytIrLZDMMtKWusfDQCYL5Z00bJ20GrbZSd6yIwNCDi6awMMqNQocs4H9iMV2OxOdnUHPYQ26KU0cQgp5sMes11KqfodsYiUHvxbbCzwvypEVhIRiR8/Q089+vcwHS2OFSpJX4VtPwEbF6VqOfreIxpTCLHdKOIPKYrlY1ixu6ydE2G522swfB8qF5HheORxRx5tVvYc+9D+ZaQJeWizQu2BS6rpFlqgDkjyO4f+n4xsNIUf4CAid3cgpeyyax6YuGd6HqCOvjc+9egq5zv72urJttGPY0WMMXvSGMLkKhEgkUSuzFXSiv5aoBi/EA44iAJktNUbzvrcsMgCdSypDyQFYNBbqfyBMZV+tbBH4tacE+xJZt6yKigSnOuhl6emOc1IlmB9EHp9bppo2WCiJYJY2XyiwXSSqQueWXFUfHKVBSHvGnFtjXFMq+dtbZMepJREMhVKtSvd5wwOQSrrlhBDxXsmIKgppyoN6N36Cg4dhmxVdK5ByE6iO911Ei5KzYO9b3bOvGK990G6BzzmEIJISVjWJqIpAk1GZn4RRSLJiu7rVB0b9VM/dqIq+a0gAiPnCj0fBugKyyfcZYvrmBcZcutqZgxTZR+UUN1nXmaI+YERw+eBjG4HJWGfbiX4jcvpUnK/RRAPRYTsSSCEiw69L1crtHLKV02Udu5en9tHtZTzB3nxpBmLum9zjWskm+R/6d/FibCL6DBFdXFhU19TL9/fSqP3g5OP+0GOkMVsr3RPeYPynevq7Ke/+BMThlk2sPFF4+6cFV5lEkT4gXISI2tkttvRDTAgaynAUJvVaNzGkch5pEwF84KqMXFdah/crSDv3sVTX6pIdFC8LAywwoFh8tcquP/6jWjyTIsBWGLLS/i/1oSsEAGTNY/WOxve58AYUpNTxnsPv8cZ6sE9kjcpxPaI1xCIWv0gU6m6FSIjENy90BD6/ROa8e2kvhoaay2TM+a0GyRmEseqMRwF3USUSFpMsmgwOWP413f79awrv9U1vXefx000Of3joWXSQhlaOfzeRDRCVVN6tq0vU/vlzG6lqpPZH6eGSQrNolP5qhOL1XvHRQNidXlmcvmRIV6nVan1WwdNduHqLX/rt19t3/yv6Bw8VPUTKVY8xZmW67NvMZM2yfN1jHMtP3uoPWu0336THWNiOEdeRjieKKEdTrb8iZ4avtxYGEaDEgWClvckZpV42jxZVAV58fNOsz4/SIuP9eMb3Sw/J648zCo8xDH6oHQfJXPGzlO6IK+dapL5A+565BL6JVQIdNup/1MRCPfU5aQGj95ia1ZoMi5aSCvq0Q4FHEpCkBez3iTyR52u/tHzzRTQf9YJB2rZwk3IOkf+V2jnMWQf6ns7BGVy0ynTuvg+ClTEYRTHA91lHXLYm6Q4XWXNrALFpuT+frdEY4MQRMKSZLwoVEn8WMDrAPlckAk0ilOdO3zBqLSKwSpk7KlqQjMwPaNWaKvlmZpqmuI13QSTjHHoSR8GUu63Yuzs5PeUf/87KJ1ctw66bc7vd7pkxSSoJMEy0xR+wdp4cti7QefMW4wviL6QpTBSgDGgta6ktqB0ZFguE2HrnAyQT3+kEqGYjriypN5OyDE4cFMqJxmI8Brm7AYJ5O9CdsbxWy0N2HtoH2wJ3i4B5EqtqfcPvgnmLA3V/v7R82r/e7+7hJGKQupe9h84l5hfLVn9TlMm8Er8D2Ecz7soJbZqrwTiCnmJAomMRvhOBhjIeOHICF1Jv2f07ewdHh9PkZZg9owglrJazkZg5v3PRzTMeMJxQ109X6AE3ShfAcqQqackAvgp4ZIAH/j2TlegpP+MVuThy4N4Se9QyzRHfvrqQx31UvK1D+avLm53jBhzrSANjqYVN1sFlbPsxrRGgeTXoUB9NhjyYE5isx47MxfQ5qaKHEZK2ORnl0WIByx6OGJAUI/bCymj1j3K2LH6ucDFq5oLaTN2cnDBFy2kXOSIFfEPiN0moBGIgoxVFufloNwOVV1/ar6A8/6SayYgN1JXNNozOKYzfVYMQdrHvAsHH5lIgN0hYVEFIBnTRoF1Vf6bOlkZYuAy1zpURHFNqTeEFM2T579nAGW1NMOGmz+0doiWBjO/6tM3EXlTbVZEzkYPUhwLIwSgIo/5rx/zqmUBApFVVrLhQweNTnqelmakTMeeJ2Wzi8qDVbOM+rg6vMfXWeUjvPObLFR1bwaEjCacQ1pjlHKCVTnlPWesPoxrkbEiG5JraaHUl1uCWmccJiVFPNZSmxiMBB4B9bhhvpia4e5dZ1ZURtupPLWlTfmM6mYfIE1vFIh1a5Gd+sfTca/JAFtURLUkidDowYeJQlL5UCYZD19/Gsq2fuap0ZjVDXFohPQV3jiuWAf1RI+nBIcVWzWR5K5eIxsdbxPcJca6X+oiF+j3PU2oNamt0tAepThVkH7K37knKsu3JVn138WzlkD5JE5X5Wjfk4kp+SeRO6GkUkNhaEgM5agfjCggJ5dW/vDszfPrKAgyXEiNOpogAZKnrQBWXW7IG+XQs3Jm951ocCLlGSWygCdJ5ExP+FAKNffVZ+JmuTdwgbxmveC1yLFxq+k4cz3Ky97H6/X9CfNm2gTf/LyWmd0r+dKGmUjKub2RjnAn0yYeIzU5NB5OGVfTMOg754jg9S1jL54CvILSZU8FK38NW38584dtYl6oc9ttf42ys4LN+a46sKq8sdk6aWMb5LoXHr8SfEEQOK2kNvP7QY60r+WfNZaNe/ntJaU9QZuW54G9FqU3xbc6iUUzd0c9ZeQJM2pR77raGyJvK+FUP8/AAD//4ubiFc=" } diff --git a/heartbeat/magefile.go b/heartbeat/magefile.go index 0771f376c4c..298ee0443d1 100644 --- a/heartbeat/magefile.go +++ b/heartbeat/magefile.go @@ -20,7 +20,6 @@ package main import ( - "context" "fmt" "time" @@ -34,13 +33,17 @@ import ( // mage:import "github.com/elastic/beats/v7/dev-tools/mage/target/common" // mage:import - _ "github.com/elastic/beats/v7/dev-tools/mage/target/integtest/notests" + "github.com/elastic/beats/v7/dev-tools/mage/target/integtest" // mage:import - _ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" + "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" + // mage:import + _ "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) func init() { common.RegisterCheckDeps(Update) + unittest.RegisterPythonTestDeps(Fields) + integtest.RegisterPythonTestDeps(Fields) devtools.BeatDescription = "Ping remote services for availability and log " + "results to Elasticsearch or send to Logstash." @@ -124,13 +127,6 @@ func Imports() error { return devtools.GenerateIncludeListGo(options) } -// GoTestUnit executes the Go unit tests. -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -func GoTestUnit(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestUnitArgs()) -} - func customizePackaging() { monitorsDTarget := "monitors.d" unixMonitorsDir := "/etc/{{.BeatName}}/monitors.d" diff --git a/heartbeat/monitors.d/sample.http.yml.disabled b/heartbeat/monitors.d/sample.http.yml.disabled index e5c44c1e263..10e4c17de7b 100644 --- a/heartbeat/monitors.d/sample.http.yml.disabled +++ b/heartbeat/monitors.d/sample.http.yml.disabled @@ -4,9 +4,11 @@ # be loaded. - type: http # monitor type `http`. Connect via HTTP an optionally verify response + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + id: my-http-monitor - # Monitor name used for job name and document type - #name: http + # Human readable display name for this service in Uptime UI and elsewhere + name: My HTTP Monitor # Enable/Disable monitor #enabled: true diff --git a/heartbeat/monitors.d/sample.icmp.yml.disabled b/heartbeat/monitors.d/sample.icmp.yml.disabled index 0cae71d9de5..372c17beb07 100644 --- a/heartbeat/monitors.d/sample.icmp.yml.disabled +++ b/heartbeat/monitors.d/sample.icmp.yml.disabled @@ -4,16 +4,17 @@ # be loaded. - type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping - # configured hosts + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + id: my-icmp-monitor - # Monitor name used for job name and document type. - #name: icmp + # Human readable display name for this service in Uptime UI and elsewhere + name: My ICMP Monitor # Enable/Disable monitor #enabled: true # Configure task schedule using cron-like syntax - schedule: '*/5 * * * * * *' # exactly every 5 seconds like 10:00:00, 10:00:05, ... + schedule: '@every 5s' # every 5 seconds from start of beat # List of hosts to ping hosts: ["localhost"] diff --git a/heartbeat/monitors.d/sample.tcp.yml.disabled b/heartbeat/monitors.d/sample.tcp.yml.disabled index cf4ba84abe2..c6f98be0799 100644 --- a/heartbeat/monitors.d/sample.tcp.yml.disabled +++ b/heartbeat/monitors.d/sample.tcp.yml.disabled @@ -6,6 +6,12 @@ - type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint # by sending/receiving a custom payload + # ID used to uniquely identify this monitor in elasticsearch even if the config changes + id: my-tcp-monitor + + # Human readable display name for this service in Uptime UI and elsewhere + name: My TCP monitor + # Monitor name used for job name and document type #name: tcp diff --git a/heartbeat/monitors/active/dialchain/_meta/fields.yml b/heartbeat/monitors/active/dialchain/_meta/fields.yml index 5d4991a9704..288fe9edca7 100644 --- a/heartbeat/monitors/active/dialchain/_meta/fields.yml +++ b/heartbeat/monitors/active/dialchain/_meta/fields.yml @@ -34,10 +34,12 @@ fields: - name: certificate_not_valid_before type: date - description: Earliest time at which the connection's certificates are valid. + deprecated: 7.8.0 + description: Deprecated in favor of `tls.server.x509.not_before`. Earliest time at which the connection's certificates are valid. - name: certificate_not_valid_after + deprecated: 7.8.0 type: date - description: Latest time at which the connection's certificates are valid. + description: Deprecated in favor of `tls.server.x509.not_after`. Latest time at which the connection's certificates are valid. - name: rtt type: group description: > @@ -52,4 +54,112 @@ - name: us type: long description: Duration in microseconds + - name: server + type: group + description: Detailed x509 certificate metadata + fields: + - name: x509 + type: group + fields: + - name: alternative_names + type: keyword + ignore_above: 1024 + description: List of subject alternative names (SAN). Name types vary by certificate + authority and certificate type but commonly contain IP addresses, DNS names + (and wildcards), and email addresses. + example: '*.elastic.co' + default_field: false + - name: issuer + type: group + fields: + - name: common_name + type: keyword + ignore_above: 1024 + description: List of common name (CN) of issuing certificate authority. + example: DigiCert SHA2 High Assurance Server CA + default_field: false + multi_fields: + - name: text + type: text + analyzer: simple + - name: distinguished_name + type: keyword + ignore_above: 1024 + description: Distinguished name (DN) of issuing certificate authority. + example: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert SHA2 High Assurance + Server CA + default_field: false + - name: not_after + type: date + description: Time at which the certificate is no longer considered valid. + example: 2020-07-16 03:15:39+00:00 + default_field: false + - name: not_before + type: date + description: Time at which the certificate is first considered valid. + example: 2019-08-16 01:40:25+00:00 + default_field: false + - name: public_key_algorithm + type: keyword + ignore_above: 1024 + description: Algorithm used to generate the public key. + example: RSA + default_field: false + - name: public_key_curve + type: keyword + ignore_above: 1024 + description: The curve used by the elliptic curve public key algorithm. This + is algorithm specific. + example: nistp521 + default_field: false + - name: public_key_exponent + type: long + description: Exponent used to derive the public key. This is algorithm specific. + example: 65537 + default_field: false + - name: public_key_size + type: long + description: The size of the public key space in bits. + example: 2048 + default_field: false + - name: serial_number + type: keyword + ignore_above: 1024 + description: Unique serial number issued by the certificate authority. For consistency, + if this value is alphanumeric, it should be formatted without colons and uppercase + characters. + example: 55FBB9C7DEBF09809D12CCAA + default_field: false + - name: signature_algorithm + type: keyword + ignore_above: 1024 + description: Identifier for certificate signature algorithm. Recommend using + names found in Go Lang Crypto library (See https://github.com/golang/go/blob/go1.14/src/crypto/x509/x509.go#L337-L353). + example: SHA256-RSA + default_field: false + - name: subject + type: group + fields: + - name: subject.common_name + type: keyword + ignore_above: 1024 + description: List of common names (CN) of subject. + example: r2.shared.global.fastly.net + default_field: false + multi_fields: + - name: text + type: text + analyzer: simple + - name: subject.distinguished_name + type: keyword + ignore_above: 1024 + description: Distinguished name (DN) of the certificate subject entity. + example: C=US, ST=California, L=San Francisco, O=Fastly, Inc., CN=r2.shared.global.fastly.net + default_field: false + - name: version_number + type: keyword + ignore_above: 1024 + description: Version of x509 format. + example: 3 + default_field: false diff --git a/heartbeat/monitors/active/dialchain/tls.go b/heartbeat/monitors/active/dialchain/tls.go index 6fd2b43c27f..b4b2c006dfb 100644 --- a/heartbeat/monitors/active/dialchain/tls.go +++ b/heartbeat/monitors/active/dialchain/tls.go @@ -19,27 +19,19 @@ package dialchain import ( cryptoTLS "crypto/tls" - "crypto/x509" "fmt" "net" "time" - "github.com/elastic/beats/v7/heartbeat/look" + "github.com/elastic/beats/v7/heartbeat/monitors/active/dialchain/tlsmeta" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/transport" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" ) // TLSLayer configures the TLS layer in a DialerChain. -// -// The layer will update the active event with: -// -// { -// "tls": { -// "rtt": { "handshake": { "us": ... }} -// } -// } +// The layer will update the active event with the TLS RTT and +// crypto/cert details. func TLSLayer(cfg *tlscommon.TLSConfig, to time.Duration) Layer { return func(event *beat.Event, next transport.Dialer) (transport.Dialer, error) { var timer timer @@ -58,56 +50,12 @@ func TLSLayer(cfg *tlscommon.TLSConfig, to time.Duration) Layer { if !ok { panic(fmt.Sprintf("TLS afterDial received a non-tls connection %t. This should never happen", conn)) } - - // TODO: extract TLS connection parameters from connection object. + connState := tlsConn.ConnectionState() timer.stop() - event.PutValue("tls.rtt.handshake", look.RTT(timer.duration())) - addCertMetdata(event.Fields, tlsConn.ConnectionState().PeerCertificates) + tlsmeta.AddTLSMetadata(event.Fields, connState, timer.duration()) return conn, nil }), nil } } - -func addCertMetdata(fields common.MapStr, certs []*x509.Certificate) { - // The behavior here might seem strange. We *always* set a notBefore, but only optionally set a notAfter. - // Why might we do this? - // The root cause is that the x509.Certificate type uses time.Time for these fields instead of *time.Time - // so we have no way to know if the user actually set these fields. The x509 RFC says that only one of the - // two fields must be set. Most tools (including openssl and go's certgen) always set both. BECAUSE WHY NOT - // - // In the wild, however, there are certs missing one of these two fields. - // So, what's the correct behavior here? We cannot know if a field was omitted due to the lack of nullability. - // So, in this case, we try to do what people will want 99.99999999999999999% of the time. - // People might set notBefore to go's zero date intentionally when creating certs. So, we always set that - // field, even if we find a zero value. - // However, it would be weird to set notAfter to the zero value. That could invalidate a cert that was intended - // to be valid forever. So, in that case, we treat the zero value as non-existent. - // This is why notBefore is a time.Time and notAfter is a *time.Time - var chainNotValidBefore time.Time - var chainNotValidAfter *time.Time - - // We need the zero date later - var zeroTime time.Time - - // Here we compute the minimal bounds during which this certificate chain is valid - // To do this correctly, we take the maximum NotBefore and the minimum NotAfter. - // This *should* always wind up being the terminal cert in the chain, but we should - // compute this correctly. - for _, cert := range certs { - if chainNotValidBefore.Before(cert.NotBefore) { - chainNotValidBefore = cert.NotBefore - } - - if cert.NotAfter != zeroTime && (chainNotValidAfter == nil || chainNotValidAfter.After(cert.NotAfter)) { - chainNotValidAfter = &cert.NotAfter - } - } - - fields.Put("tls.certificate_not_valid_before", chainNotValidBefore) - - if chainNotValidAfter != nil { - fields.Put("tls.certificate_not_valid_after", *chainNotValidAfter) - } -} diff --git a/heartbeat/monitors/active/dialchain/tls_test.go b/heartbeat/monitors/active/dialchain/tls_test.go deleted file mode 100644 index 8df41fbd3b0..00000000000 --- a/heartbeat/monitors/active/dialchain/tls_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package dialchain - -import ( - "crypto/x509" - "crypto/x509/pkix" - "math/big" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/libbeat/common" -) - -func Test_addCertMetdata(t *testing.T) { - goodNotBefore := time.Now().Add(-time.Hour) - goodNotAfter := time.Now().Add(time.Hour) - goodCert := x509.Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - Organization: []string{"Acme Co"}, - }, - NotBefore: goodNotBefore, - NotAfter: goodNotAfter, - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - expiredNotAfter := time.Now().Add(-time.Hour) - expiredCert := x509.Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - Organization: []string{"Acme Co"}, - }, - NotBefore: goodNotBefore, - NotAfter: expiredNotAfter, - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - missingNotBeforeCert := x509.Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - Organization: []string{"Acme Co"}, - }, - NotAfter: goodNotAfter, - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - missingNotAfterCert := x509.Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - Organization: []string{"Acme Co"}, - }, - NotBefore: goodNotBefore, - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - // notBefore is intentionally not a pointer type because go certificates don't have nullable time types - // we cheat a bit and make not after nullable because there's no valid reason to create a cert with go's zero - // time. - // see the addCertMetadata function for more info on this. - type expected struct { - notBefore time.Time - notAfter *time.Time - } - tests := []struct { - name string - certs []*x509.Certificate - expected expected - }{ - { - "Valid cert", - []*x509.Certificate{&goodCert}, - expected{ - notBefore: goodNotBefore, - notAfter: &goodNotAfter, - }, - }, - { - "Expired cert", - []*x509.Certificate{&expiredCert}, - expected{ - notBefore: goodNotBefore, - notAfter: &expiredNotAfter, - }, - }, - { - "Missing not before", - []*x509.Certificate{&missingNotBeforeCert}, - expected{ - notAfter: &goodNotAfter, - }, - }, - { - "Missing not after", - []*x509.Certificate{&missingNotAfterCert}, - expected{ - notBefore: goodNotBefore, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - event := common.MapStr{} - addCertMetdata(event, tt.certs) - v, err := event.GetValue("tls.certificate_not_valid_before") - assert.NoError(t, err) - assert.Equal(t, tt.expected.notBefore, v) - - if tt.expected.notAfter != nil { - v, err := event.GetValue("tls.certificate_not_valid_after") - assert.NoError(t, err) - assert.Equal(t, *tt.expected.notAfter, v) - } else { - ok, _ := event.HasKey("tls.certificate_not_valid_after") - assert.False(t, ok, "event should not have not after %v", event) - } - }) - } -} diff --git a/heartbeat/monitors/active/dialchain/tlsmeta/tlsmeta.go b/heartbeat/monitors/active/dialchain/tlsmeta/tlsmeta.go new file mode 100644 index 00000000000..686da2a3241 --- /dev/null +++ b/heartbeat/monitors/active/dialchain/tlsmeta/tlsmeta.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package tlsmeta + +import ( + dsa2 "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + cryptoTLS "crypto/tls" + "crypto/x509" + "fmt" + "time" + + "github.com/elastic/beats/v7/heartbeat/look" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" +) + +func AddTLSMetadata(fields common.MapStr, connState cryptoTLS.ConnectionState, duration time.Duration) { + fields.Put("tls.established", true) + fields.Put("tls.rtt.handshake", look.RTT(duration)) + versionDetails := tlscommon.TLSVersion(connState.Version).Details() + // The only situation in which versionDetails would be nil is if an unknown TLS version were to be + // encountered. Not filling the fields here makes sense, since there's no standard 'unknown' value. + if versionDetails != nil { + fields.Put("tls.version_protocol", versionDetails.Protocol) + fields.Put("tls.version", versionDetails.Version) + } + + if connState.NegotiatedProtocol != "" { + fields.Put("tls.next_protocol", connState.NegotiatedProtocol) + } + fields.Put("tls.cipher", tlscommon.ResolveCipherSuite(connState.CipherSuite)) + + AddCertMetadata(fields, connState.PeerCertificates) +} + +func AddCertMetadata(fields common.MapStr, certs []*x509.Certificate) { + hostCert := certs[0] + + x509Fields := common.MapStr{} + serverFields := common.MapStr{"x509": x509Fields} + tlsFields := common.MapStr{"server": serverFields} + + serverFields.Put("hash.sha1", fmt.Sprintf("%x", sha1.Sum(hostCert.Raw))) + serverFields.Put("hash.sha256", fmt.Sprintf("%x", sha256.Sum256(hostCert.Raw))) + + x509Fields.Put("issuer.common_name", hostCert.Issuer.CommonName) + x509Fields.Put("issuer.distinguished_name", hostCert.Issuer.String()) + x509Fields.Put("subject.common_name", hostCert.Subject.CommonName) + x509Fields.Put("subject.distinguished_name", hostCert.Subject.String()) + x509Fields.Put("serial_number", hostCert.SerialNumber.String()) + x509Fields.Put("signature_algorithm", hostCert.SignatureAlgorithm.String()) + x509Fields.Put("public_key_algorithm", hostCert.PublicKeyAlgorithm.String()) + if rsaKey, ok := hostCert.PublicKey.(*rsa.PublicKey); ok { + sizeInBits := rsaKey.Size() * 8 + x509Fields.Put("public_key_size", sizeInBits) + x509Fields.Put("public_key_exponent", rsaKey.E) + } else if dsaKey, ok := hostCert.PublicKey.(*dsa2.PublicKey); ok { + if dsaKey.Parameters.P != nil { + x509Fields.Put("public_key_size", len(dsaKey.P.Bytes())*8) + } else { + x509Fields.Put("public_key_size", len(dsaKey.P.Bytes())*8) + } + } else if ecdsa, ok := hostCert.PublicKey.(*ecdsa.PublicKey); ok { + x509Fields.Put("public_key_curve", ecdsa.Curve.Params().Name) + } + + chainNotBefore, chainNotAfter := calculateCertTimestamps(certs) + // Legacy non-ECS field + tlsFields.Put("certificate_not_valid_before", chainNotBefore) + x509Fields.Put("not_before", chainNotBefore) + if chainNotAfter != nil { + // Legacy non-ECS field + tlsFields.Put("certificate_not_valid_after", *chainNotAfter) + x509Fields.Put("not_after", *chainNotAfter) + } + + fields.DeepUpdate(common.MapStr{"tls": tlsFields}) +} + +func calculateCertTimestamps(certs []*x509.Certificate) (chainNotBefore time.Time, chainNotAfter *time.Time) { + // The behavior here might seem strange. We *always* set a notBefore, but only optionally set a notAfter. + // Why might we do this? + // The root cause is that the x509.Certificate type uses time.Time for these tlsFields instead of *time.Time + // so we have no way to know if the user actually set these tlsFields. The x509 RFC says that only one of the + // two tlsFields must be set. Most tools (including openssl and go's certgen) always set both. BECAUSE WHY NOT + // + // In the wild, however, there are certs missing one of these two tlsFields. + // So, what's the correct behavior here? We cannot know if a field was omitted due to the lack of nullability. + // So, in this case, we try to do what people will want 99.99999999999999999% of the time. + // People might set notBefore to go's zero date intentionally when creating certs. So, we always set that + // field, even if we find a zero value. + // However, it would be weird to set notAfter to the zero value. That could invalidate a cert that was intended + // to be valid forever. So, in that case, we treat the zero value as non-existent. + // This is why notBefore is a time.Time and notAfter is a *time.Time + + // We need the zero date later + var zeroTime time.Time + + // Here we compute the minimal bounds during which this certificate chain is valid + // To do this correctly, we take the maximum NotBefore and the minimum NotAfter. + // This *should* always wind up being the terminal cert in the chain, but we should + // compute this correctly. + for _, cert := range certs { + if chainNotBefore.Before(cert.NotBefore) { + chainNotBefore = cert.NotBefore + } + + if cert.NotAfter != zeroTime && (chainNotAfter == nil || chainNotAfter.After(cert.NotAfter)) { + chainNotAfter = &cert.NotAfter + } + } + + return +} diff --git a/heartbeat/monitors/active/dialchain/tlsmeta/tlsmeta_test.go b/heartbeat/monitors/active/dialchain/tlsmeta/tlsmeta_test.go new file mode 100644 index 00000000000..609fbbb413d --- /dev/null +++ b/heartbeat/monitors/active/dialchain/tlsmeta/tlsmeta_test.go @@ -0,0 +1,398 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package tlsmeta + +import ( + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "testing" + "time" + + "github.com/elastic/go-lookslike" + "github.com/elastic/go-lookslike/testslike" + + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/heartbeat/look" + "github.com/elastic/beats/v7/libbeat/common" +) + +// Tests for the non-cert fields +func TestAddTLSMetadata(t *testing.T) { + // We always test with this one cert because addCertificateMetadata + // is tested in detail elsewhere + certs := []*x509.Certificate{parseCert(t, elasticCert)} + certMetadata := common.MapStr{} + AddCertMetadata(certMetadata, certs) + + scenarios := []struct { + name string + connState tls.ConnectionState + duration time.Duration + expected common.MapStr + }{ + { + "simple TLSv1.1", + tls.ConnectionState{ + Version: tls.VersionTLS11, + HandshakeComplete: true, + PeerCertificates: certs, + CipherSuite: tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + ServerName: "example.net", + }, + time.Duration(1), + common.MapStr{ + "established": true, + "rtt": common.MapStr{"handshake": look.RTT(time.Duration(1))}, + "version_protocol": "tls", + "version": "1.1", + "cipher": "ECDHE-ECDSA-AES-256-CBC-SHA", + }, + }, + { + "TLSv1.2 with next_protocol", + tls.ConnectionState{ + Version: tls.VersionTLS12, + HandshakeComplete: true, + PeerCertificates: certs, + CipherSuite: tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + ServerName: "example.net", + NegotiatedProtocol: "h2", + }, + time.Duration(1), + common.MapStr{ + "established": true, + "rtt": common.MapStr{"handshake": look.RTT(time.Duration(1))}, + "version_protocol": "tls", + "version": "1.2", + "cipher": "ECDHE-ECDSA-AES-256-CBC-SHA", + "next_protocol": "h2", + }, + }, + } + + for _, s := range scenarios { + t.Run(s.name, func(t *testing.T) { + // Nest under the TLS namespace to match actual output + expected := common.MapStr{"tls": s.expected} + + // Always add in the cert metadata since we test that in other test funcs, not here + expected.DeepUpdate(certMetadata) + + fields := common.MapStr{} + AddTLSMetadata(fields, s.connState, s.duration) + require.Equal(t, expected, fields) + }) + } +} + +func TestAddCertMetadata(t *testing.T) { + cert := parseCert(t, elasticCert) + chainCert := parseCert(t, elasticChainCert) + certNotBefore, err := time.Parse(time.RFC3339, "2019-08-16T01:40:25Z") + require.NoError(t, err) + certNotAfter, err := time.Parse(time.RFC3339, "2020-07-16T03:15:39Z") + require.NoError(t, err) + + expectedFields := lookslike.Strict(lookslike.MustCompile(map[string]interface{}{ + "certificate_not_valid_after": certNotAfter, + "certificate_not_valid_before": certNotBefore, + "server": common.MapStr{ + "hash": common.MapStr{ + "sha1": "b7b4b89ef0d0caf39d223736f0fdbb03c7b426f1", + "sha256": "12b00d04db0db8caa302bfde043e88f95baceb91e86ac143e93830b4bbec726d", + }, + "x509": common.MapStr{ + "issuer": common.MapStr{ + "common_name": "GlobalSign CloudSSL CA - SHA256 - G3", + "distinguished_name": "CN=GlobalSign CloudSSL CA - SHA256 - G3,O=GlobalSign nv-sa,C=BE", + }, + "subject": common.MapStr{ + "common_name": "r2.shared.global.fastly.net", + "distinguished_name": "CN=r2.shared.global.fastly.net,O=Fastly\\, Inc.,L=San Francisco,ST=California,C=US", + }, + "not_after": certNotAfter, + "not_before": certNotBefore, + "serial_number": "26610543540289562361990401194", + "signature_algorithm": "SHA256-RSA", + "public_key_algorithm": "RSA", + "public_key_size": 2048, + "public_key_exponent": 65537, + }, + }, + })) + + scenarios := []struct { + name string + certs []*x509.Certificate + }{ + { + "single cert fields should all be present", + []*x509.Certificate{cert}, + }, + { + "cert chain should still show single cert fields", + []*x509.Certificate{cert, chainCert}, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + fields := common.MapStr{} + AddCertMetadata(fields, scenario.certs) + tls, err := fields.GetValue("tls") + require.NoError(t, err) + testslike.Test(t, expectedFields, tls) + }) + } +} + +// TestCertExpirationMetadata exhaustively tests not before / not after calculation. +func TestCertExpirationMetadata(t *testing.T) { + goodNotBefore := time.Now().Add(-time.Hour) + goodNotAfter := time.Now().Add(time.Hour) + goodCert := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{"Acme Co"}, + }, + NotBefore: goodNotBefore, + NotAfter: goodNotAfter, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + expiredNotAfter := time.Now().Add(-time.Hour) + expiredCert := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{"Acme Co"}, + }, + NotBefore: goodNotBefore, + NotAfter: expiredNotAfter, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + missingNotBeforeCert := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{"Acme Co"}, + }, + NotAfter: goodNotAfter, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + missingNotAfterCert := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{"Acme Co"}, + }, + NotBefore: goodNotBefore, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + // notBefore is intentionally not a pointer type because go certificates don't have nullable time types + // we cheat a bit and make not after nullable because there's no valid reason to create a cert with go's zero + // time. + // see the AddCertMetadata function for more info on this. + type expected struct { + notBefore time.Time + notAfter *time.Time + } + tests := []struct { + name string + certs []*x509.Certificate + expected expected + }{ + { + "Valid cert", + []*x509.Certificate{&goodCert}, + expected{ + notBefore: goodNotBefore, + notAfter: &goodNotAfter, + }, + }, + { + "Expired cert", + []*x509.Certificate{&expiredCert}, + expected{ + notBefore: goodNotBefore, + notAfter: &expiredNotAfter, + }, + }, + { + "Missing not before", + []*x509.Certificate{&missingNotBeforeCert}, + expected{ + notAfter: &goodNotAfter, + }, + }, + { + "Missing not after", + []*x509.Certificate{&missingNotAfterCert}, + expected{ + notBefore: goodNotBefore, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + notBefore, notAfter := calculateCertTimestamps(tt.certs) + + require.Equal(t, tt.expected.notBefore, notBefore) + if tt.expected.notAfter != nil { + require.Equal(t, tt.expected.notAfter, notAfter) + } else { + require.Nil(t, notAfter) + } + }) + } +} + +func parseCert(t *testing.T, pemStr string) *x509.Certificate { + block, _ := pem.Decode([]byte(elasticCert)) + if block == nil { + require.Fail(t, "Test cert could not be parsed") + } + cert, err := x509.ParseCertificate(block.Bytes) + require.NoError(t, err) + return cert +} + +var elasticCert = `-----BEGIN CERTIFICATE----- +MIIPLzCCDhegAwIBAgIMVfu5x96/CYCdEsyqMA0GCSqGSIb3DQEBCwUAMFcxCzAJ +BgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMS0wKwYDVQQDEyRH +bG9iYWxTaWduIENsb3VkU1NMIENBIC0gU0hBMjU2IC0gRzMwHhcNMTkwODE2MDE0 +MDI1WhcNMjAwNzE2MDMxNTM5WjB3MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2Fs +aWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEVMBMGA1UECgwMRmFzdGx5 +LCBJbmMuMSQwIgYDVQQDDBtyMi5zaGFyZWQuZ2xvYmFsLmZhc3RseS5uZXQwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCnvoHpOqA6CM06MlGViMGMFC4G +YFFEe03GQ5jG3uEUbMNPbl0MSxaWle5xZOVaPcIrV7qyE5yKKDv1fT1e8EkwR+3t +nTK4k2QvH6dPtSPlGHVIjBtS17gM939eZvpvUPxmUc5Ov9cbWgsuStqgFpFjnPBV +R0LqD6YekvS9oXG+4GrNZnQ0wJYF0dbos+E7lRSdniDf/Ul9rF4WAzAEoQYau8pe +eIPlJy8rVrDEgqfCQabYXrLaG68EHHMGadY2EX0yyI/SZh9AU8RdatNHBwj42LGP +9dp3fyEv14usJPGuLVy+8I7TMckQPpPB+NLFECJMwRRfciPjibw1MMSYTOWnAgMB +AAGjggvZMIIL1TAOBgNVHQ8BAf8EBAMCBaAwgYoGCCsGAQUFBwEBBH4wfDBCBggr +BgEFBQcwAoY2aHR0cDovL3NlY3VyZS5nbG9iYWxzaWduLmNvbS9jYWNlcnQvY2xv +dWRzc2xzaGEyZzMuY3J0MDYGCCsGAQUFBzABhipodHRwOi8vb2NzcDIuZ2xvYmFs +c2lnbi5jb20vY2xvdWRzc2xzaGEyZzMwVgYDVR0gBE8wTTBBBgkrBgEEAaAyARQw +NDAyBggrBgEFBQcCARYmaHR0cHM6Ly93d3cuZ2xvYmFsc2lnbi5jb20vcmVwb3Np +dG9yeS8wCAYGZ4EMAQICMAkGA1UdEwQCMAAwgglrBgNVHREEggliMIIJXoIbcjIu +c2hhcmVkLmdsb2JhbC5mYXN0bHkubmV0ghEqLmFtcGlmeW11c2ljLmNvbYIPKi5h +cGkuZ2lwaHkuY29tghAqLmFwcC5yb213b2QuY29tghAqLmF3YXl0cmF2ZWwuY29t +ghIqLmJmaWZsYXJlbGl2ZS5jb22CECouYmlsbGluZ2FybS5jb22CCiouYnJhemUu +ZXWCFSouY2FsZ2FyeXN0YW1wZWRlLmNvbYIQKi5jZG4udHJpbGxlci5jb4INKi5j +aXR5bWFwcy5pb4IOKi5kZWFsZXJvbi5jb22CDSouZG92ZW1lZC5jb22CDCouZWxh +c3RpYy5jb4IPKi5mZmhhbmRiYWxsLmZyghEqLmZsZXhzaG9wcGVyLmNvbYIPKi5m +bGlwcC1hZHMuY29tghcqLmZsb3JpZGFldmVyYmxhZGVzLmNvbYIYKi5mb2N1c3Jp +dGUtbm92YXRpb24uY29tghAqLmZyZXNoYm9va3MuY29tggsqLmdpcGh5LmNvbYIV +Ki5pZGFob3N0ZWVsaGVhZHMuY29tghAqLmludGVyYWN0bm93LnR2ghEqLmtjbWF2 +ZXJpY2tzLmNvbYIMKi5rb21ldHMuY29tghEqLm1lZGlhLmdpcGh5LmNvbYIKKi5t +bnRkLm5ldIIMKi5uYXNjYXIuY29tghUqLm9tbmlnb25wcm9zdWl0ZS5jb22CHSou +b3JsYW5kb3NvbGFyYmVhcnNob2NrZXkuY29tggwqLnByZWlzMjQuZGWCDSoucWEu +bW50ZC5uZXSCEyoucmV2ZXJiLWFzc2V0cy5jb22CDCoucmV2ZXJiLmNvbYIMKi5y +b213b2QuY29tghMqLnNjb290ZXJsb3VuZ2UuY29tghgqLnN0YWdpbmcuYmlsbGlu +Z2Vudi5jb22CFiouc3RhZ2luZy5mcmVzaGVudi5jb22CEiouc3dhbXByYWJiaXRz +LmNvbYILKi52ZXJzZS5jb22CDSoudmlkeWFyZC5jb22CDioudmlld2VkaXQuY29t +ghEqLnZvdGVub3cubmJjLmNvbYIMKi52b3Rlbm93LnR2ggsqLndheWluLmNvbYIb +Ki53ZXN0bWluc3Rlcmtlbm5lbGNsdWIub3Jngg9hbXBpZnltdXNpYy5jb22CE2Fw +aS5yZXZlcmJzaXRlcy5jb22CGGFwaS5zdGFnaW5nLmZyZXNoZW52LmNvbYIbYXBp +LnN0YWdpbmcucmV2ZXJic2l0ZXMuY29tgg5hd2F5dHJhdmVsLmNvbYIQYmZpZmxh +cmVsaXZlLmNvbYITYmZsLXRlc3QuYWJjLmdvLmNvbYIOYmZsLmFiYy5nby5jb22C +CGJyYXplLmV1gh5jZG4taW1hZ2VzLmZsaXBwZW50ZXJwcmlzZS5uZXSCF2Nkbi5m +bGlwcGVudGVycHJpc2UubmV0ghJjb3Ntb3NtYWdhemluZS5jb22CDGRlYWxlcm9u +LmNvbYILZG92ZW1lZC5jb22CHWR3dHN2b3RlLWxpdmUtdGVzdC5hYmMuZ28uY29t +ghhkd3Rzdm90ZS1saXZlLmFiYy5nby5jb22CGGR3dHN2b3RlLXRlc3QuYWJjLmdv +LmNvbYITZHd0c3ZvdGUuYWJjLmdvLmNvbYIKZWxhc3RpYy5jb4IMZW1haWwua2du +LmlvghJmLmNsb3VkLmdpdGh1Yi5jb22CHWZhbmJvb3N0LXRlc3QuZmlhZm9ybXVs +YWUuY29tghhmYW5ib29zdC5maWFmb3JtdWxhZS5jb22CDWZmaGFuZGJhbGwuZnKC +D2ZsZXhzaG9wcGVyLmNvbYIVZmxvcmlkYWV2ZXJibGFkZXMuY29tgglnaXBoeS5j +b22CFWdvLmNvbmNhY2FmbGVhZ3VlLmNvbYIcZ28uY29uY2FjYWZuYXRpb25zbGVh +Z3VlLmNvbYIGZ3BoLmlzghNpZGFob3N0ZWVsaGVhZHMuY29tghNpZG9sdm90ZS5h +YmMuZ28uY29tgg1pbmZyb250LnNwb3J0gg5pbnRlcmFjdG5vdy50doIPa2NtYXZl +cmlja3MuY29tggprb21ldHMuY29tghptYWlsLmRldmVsb3BtZW50LmJyYXplLmNv +bYIWbWFuY2hlc3Rlcm1vbmFyY2hzLmNvbYIWbWVkaWEud29ya2FuZG1vbmV5LmNv +bYIXbXkuc3RhZ2luZy5mcmVzaGVudi5jb22CG29ybGFuZG9zb2xhcmJlYXJzaG9j +a2V5LmNvbYIUcGNhLXRlc3QuZW9ubGluZS5jb22CD3BjYS5lb25saW5lLmNvbYIh +cGxmcGwtZmFzdGx5LnN0YWdpbmcuaXNtZ2FtZXMuY29tggpwcmVpczI0LmRlghRw +cmVtaWVyZXNwZWFrZXJzLmNvbYILcWEudGVub3IuY2+CDHFhLnRlbm9yLmNvbYIe +cm9ib3RpYy1jb29rLnNlY3JldGNkbi1zdGcubmV0ghFzY29vdGVybG91bmdlLmNv +bYIac3RhZ2luZy13d3cuZWFzYS5ldXJvcGEuZXWCGHN0YWdpbmcuZGFpbHkuc3F1 +aXJ0Lm9yZ4IUc3RhZ2luZy5mcmVzaGVudi5jb22CEHN3YW1wcmFiYml0cy5jb22C +CHRlbm9yLmNvggl0ZW5vci5jb22CFnRyYWNrLnN3ZWVuZXktbWFpbC5jb22CEHVh +dC5mcmVzaGVudi5jb22CE3VuaWZvcm1zaW5zdG9jay5jb22CF3VzZXJzLnByZW1p +ZXJsZWFndWUuY29tghF1dGFoZ3JpenpsaWVzLmNvbYIJdmVyc2UuY29tggt2aWR5 +YXJkLmNvbYIMdmlld2VkaXQuY29tggp2b3Rlbm93LnR2ggl3YXlpbi5jb22CGXdl +c3RtaW5zdGVya2VubmVsY2x1Yi5vcmeCEXd3dy5jaGlxdWVsbGUuY29tghB3d3cu +Y2hpcXVlbGxlLnNlghJ3d3cuZWFzYS5ldXJvcGEuZXWCGnd3dy5pc3JhZWxuYXRp +b25hbG5ld3MuY29tghh3d3cua29nYW5pbnRlcm5ldC5jb20uYXWCDHd3dy50ZW5v +ci5jb4INd3d3LnRlbm9yLmNvbYIUd3d3LnVhdC5mcmVzaGVudi5jb22CF3d3dy51 +bmlmb3Jtc2luc3RvY2suY29tghV3d3cudXRhaGdyaXp6bGllcy5jb20wHQYDVR0l +BBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB8GA1UdIwQYMBaAFKkrh+HOJEc7G7/P +hTcCVZ0NlFjmMB0GA1UdDgQWBBQ7SJi8MbyN4XPx+T1QVj4sLHjhDjCCAQMGCisG +AQQB1nkCBAIEgfQEgfEA7wB1AId1v+dZfPiMQ5lfvfNu/1aNR1Y2/0q1YMG06v9e +oIMPAAABbJgVTmEAAAQDAEYwRAIgeYcRKQDCMIBnswrwBvmmSpCFWhjGl+zabCpo +E3R9nJcCIBaAx/TYKESO7iz+hU6bq7Dwzo0QpTIvho4ZdFfSAAHMAHYAsh4FzIui +zYogTodm+Su5iiUgZ2va+nDnsklTLe+LkF4AAAFsmBVLIQAABAMARzBFAiAfLUaq +ukt75a1pySCxrreQ/+/IAdyOSqXqbH1tZNKlTAIhALlcthwbBCfSSNEjTeJWOXss +clzGt9zAk256uboF0iFLMA0GCSqGSIb3DQEBCwUAA4IBAQCZXc5cmMCeqIVsRnRH +KsuGlT6tP2NdsK1+b9dJguP0zbQoxLg5qBMjRGjDo8BpGOni5mJmRJYDQ/GHKP/d +bd+n/4BDD5jI5/rtl43D+Y1G3S5tCRX/3s+At1LJcuaVRmvnywfE9OLXpI84SWtU +AainsxdCYcvopTOZG9UwkjyuEBV3tVsiQkhRSAzYStM75caRWer2pP7i3AwKNv29 +DDSHahXxUyjgAbD2XQojODT/AltEvuqcSrB2cRGXultLmJXFNDEQ5Om4GcjAk75D +pzNLvZuaXHwWoYdm+YTwdPwuZhWe9TxMYlpZbQR8dux2QXRfARF07Vi0+gOzPE9V +RG7L +-----END CERTIFICATE-----` + +var elasticChainCert = `-----BEGIN CERTIFICATE----- +MIIEizCCA3OgAwIBAgIORvCM288sVGbvMwHdXzQwDQYJKoZIhvcNAQELBQAwVzEL +MAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsT +B1Jvb3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw0xNTA4MTkw +MDAwMDBaFw0yNTA4MTkwMDAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBH +bG9iYWxTaWduIG52LXNhMS0wKwYDVQQDEyRHbG9iYWxTaWduIENsb3VkU1NMIENB +IC0gU0hBMjU2IC0gRzMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCj +wHXhMpjl2a6EfI3oI19GlVtMoiVw15AEhYDJtfSKZU2Sy6XEQqC2eSUx7fGFIM0T +UT1nrJdNaJszhlyzey2q33egYdH1PPua/NPVlMrJHoAbkJDIrI32YBecMbjFYaLi +blclCG8kmZnPlL/Hi2uwH8oU+hibbBB8mSvaSmPlsk7C/T4QC0j0dwsv8JZLOu69 +Nd6FjdoTDs4BxHHT03fFCKZgOSWnJ2lcg9FvdnjuxURbRb0pO+LGCQ+ivivc41za +Wm+O58kHa36hwFOVgongeFxyqGy+Z2ur5zPZh/L4XCf09io7h+/awkfav6zrJ2R7 +TFPrNOEvmyBNVBJrfSi9AgMBAAGjggFTMIIBTzAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMBIGA1UdEwEB/wQIMAYBAf8CAQAw +HQYDVR0OBBYEFKkrh+HOJEc7G7/PhTcCVZ0NlFjmMB8GA1UdIwQYMBaAFGB7ZhpF +DZfKiVAvfQTNNKj//P1LMD0GCCsGAQUFBwEBBDEwLzAtBggrBgEFBQcwAYYhaHR0 +cDovL29jc3AuZ2xvYmFsc2lnbi5jb20vcm9vdHIxMDMGA1UdHwQsMCowKKAmoCSG +Imh0dHA6Ly9jcmwuZ2xvYmFsc2lnbi5jb20vcm9vdC5jcmwwVgYDVR0gBE8wTTAL +BgkrBgEEAaAyARQwPgYGZ4EMAQICMDQwMgYIKwYBBQUHAgEWJmh0dHBzOi8vd3d3 +Lmdsb2JhbHNpZ24uY29tL3JlcG9zaXRvcnkvMA0GCSqGSIb3DQEBCwUAA4IBAQCi +HWmKCo7EFIMqKhJNOSeQTvCNrNKWYkc2XpLR+sWTtTcHZSnS9FNQa8n0/jT13bgd ++vzcFKxWlCecQqoETbftWNmZ0knmIC/Tp3e4Koka76fPhi3WU+kLk5xOq9lF7qSE +hf805A7Au6XOX5WJhXCqwV3szyvT2YPfA8qBpwIyt3dhECVO2XTz2XmCtSZwtFK8 +jzPXiq4Z0PySrS+6PKBIWEde/SBWlSDBch2rZpmk1Xg3SBufskw3Z3r9QtLTVp7T +HY7EDGiWtkdREPd76xUJZPX58GMWLT3fI0I6k2PMq69PVwbH/hRVYs4nERnh9ELt +IjBrNRpKBYCkZd/My2/Q +-----END CERTIFICATE-----` diff --git a/heartbeat/monitors/active/fixtures/expired.cert b/heartbeat/monitors/active/fixtures/expired.cert new file mode 100644 index 00000000000..e39ad893bd6 --- /dev/null +++ b/heartbeat/monitors/active/fixtures/expired.cert @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID3zCCAsegAwIBAgIUS+ahW2wxDZ1bT/qYnenS8jrXUcAwDQYJKoZIhvcNAQEL +BQAwfzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk1OMRQwEgYDVQQHDAtNaW5uZWFw +b2xpczEVMBMGA1UECgwMRWxhc3RpYywgSW5jMRQwEgYDVQQLDAtFbmdpbmVlcmlu +ZzEgMB4GA1UEAwwXZXhwaXJlZHRlc3QuZXhhbXBsZS5uZXQwHhcNMjAwNDIxMTQw +MDE0WhcNMjAwNDIyMTQwMDE0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTU4x +FDASBgNVBAcMC01pbm5lYXBvbGlzMRUwEwYDVQQKDAxFbGFzdGljLCBJbmMxFDAS +BgNVBAsMC0VuZ2luZWVyaW5nMSAwHgYDVQQDDBdleHBpcmVkdGVzdC5leGFtcGxl +Lm5ldDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKh1iS5EZ7bDSKgW +R3JXAepMIaEewMSdbaoBtuNQb48XJGwI0mudF983a7JxGCSfw9mhVYa4YsSv79UE +XomGrWVrS01Cmf1VRIOmxevWMPhvnE6UH+5VxKUBk5ooNSty4iHkDFy2i5WWjxiv +de6Xqnn/dVQhuT/sW+rU/grCsGcdUwqsWnC547ekqiYRTtyZrdh+U0KRKqy5iBlH +9Woua+CnXmsD7+4MgGekErg9XLRHYveLOmLucbNlAIlRyfMDZL1RlXufcGwhzItz +JNM9N0NJ5bwrpuP0RYlYbbMYal+b1Tn2e8qkMm88hniQkuu69kUpKeewIOr62vIK +tI273GECAwEAAaNTMFEwHQYDVR0OBBYEFKgd6wQcgIdUSjtJREObD+R3q3MPMB8G +A1UdIwQYMBaAFKgd6wQcgIdUSjtJREObD+R3q3MPMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQELBQADggEBADkBqmCUcvVTqu5IIZ5PLz40jdg2luaDHEA6I2Ga +1ioabETfQhXeaNJflojYm0Bzsy2aneVLGM2KaZ76wN0yvib3MZ4miu4C/mDsR3bB +wq7/CAK2AcJXv1jk0vIrK6DhZfA2HaelBkQ8UHwWK7AO+JmS6jozIt1vySwPI1E7 +lMFWbs3bmsSmunj3+66XS2XguUKzFwUIAEOfsPFqT2OMsPIa7weUWuCV/zMi7fuB +HbgVouYvMTve8wx7+ozDk6CyvlRlx20xwdOvXaH3JILw7gTQWcAEWZLcB2ct1Zks +UTtbIAjBV6s0Pm/2/6MxxkDCVVUpwXiiKBRkHxzkgoH7TQw= +-----END CERTIFICATE----- diff --git a/heartbeat/monitors/active/fixtures/expired.key b/heartbeat/monitors/active/fixtures/expired.key new file mode 100644 index 00000000000..2a11440f7aa --- /dev/null +++ b/heartbeat/monitors/active/fixtures/expired.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCodYkuRGe2w0io +FkdyVwHqTCGhHsDEnW2qAbbjUG+PFyRsCNJrnRffN2uycRgkn8PZoVWGuGLEr+/V +BF6Jhq1la0tNQpn9VUSDpsXr1jD4b5xOlB/uVcSlAZOaKDUrcuIh5AxctouVlo8Y +r3Xul6p5/3VUIbk/7Fvq1P4KwrBnHVMKrFpwueO3pKomEU7cma3YflNCkSqsuYgZ +R/VqLmvgp15rA+/uDIBnpBK4PVy0R2L3izpi7nGzZQCJUcnzA2S9UZV7n3BsIcyL +cyTTPTdDSeW8K6bj9EWJWG2zGGpfm9U59nvKpDJvPIZ4kJLruvZFKSnnsCDq+try +CrSNu9xhAgMBAAECggEBAIc32QYvWESmWeK6B11rI5lqxK+snLT1XLpSp/esb++e +dtjU9/nzXd8JgEP6bZOwPiepTZpW1MjmJA+Lc0rWtMYsqoP4ityDHfzC2CmmgyZX +iFK2qS7I35BHRLA/x/X5QDRN9fJRgJdxA6mf5Xy/dtJ4UDhY3XbHBTzo/IWsoqYQ +4V3WBQYMGlhBArCoOx07pwc9NMTnXwpfe4rUdm3EaGGpe/9JT08JcTyFZfFUeFT1 +lfSYo5i+xPOCQ/FcC5GfWdciyY0c8ej8iwdxZb0kPI4hBu36+D6zD+YoNoC3CQTb +MecRFQ0MeTTuUMCdzFWtg+2FWnJucaLiaK9fKbVzi7UCgYEA0BAlfUdXdeDYMlW3 +2ReeOgH32bchPYwn2UvHYkIhhDp40STVw3BYQ0Zj9yJQXLFaoY1SFhwRJR1kpbSd +IfME/IzR/oMFvRUNQEPQZVH0Mg9FWIXLdXlV4qbU3AyA2r4x+VUCt3jp1n/5rG7g +cmoKBdCXNUAhK30bRGTdXB06Fp8CgYEAz0V+IlkGyDKcyCkja0ypA3AdSod/43az +7HMS3nf32hOFpgQuEtVYZc3NW/rdJFPksnRd6+RlD2nIoHZEa+adl2gESjGH2asw +nhxP/Pr4m8PGZF5BwdILRTVFukf5yrM6g63FgrgA9d+QdCsqoqrctItRyCgcfpL4 +XYXEKVWELP8CgYATxbUKVsFE/n0NK4AxLUFoGc/H7NNH2g3fZIgqGka9HiFlHq8B +x6dbnVDap3QjmucV+ywV1cz7TfPGm4djNoj+xxMdsK3W7i04MjmXp1Yhe7oHU4+m +NkWnKFuKHdYQ84okO6Pqc58lNzwu2sqRlOom60+zS8jbLSRuN3ehzVU72QKBgGm0 +qCo+Ou44maqfCFg9hWiicd3Dkt5feE0bNsFMb5PBJwTO1ux175ojxhqlqshPHLBC +FnAqT7v3mAD1r9lTiIVh3+YysnS5EJdiGw0KtWVDB9fCFkkRpPvLul7RPDw7AZmM +MtGCo8LBHHuSVDEXcG2HK9MnWbjXnWCcyrjFyx3jAoGAYsNGYm+OBr16NNsPtx3S +nRQJz9wqB2mIqNU8rRSjd5EUp03jhHiTEN9DT6iEnLGaTDBUgD2RlPvEVGk1N7FT +nh9tLtg2ytWIC/P+QrKwzdUUa00MSswTxRS3Cmy459UbLBiPgHBJ2h1G7gsiHPOt +erJWqYJ8DXvLzCPdMVzQxj8= +-----END PRIVATE KEY----- diff --git a/heartbeat/monitors/active/http/http_test.go b/heartbeat/monitors/active/http/http_test.go index 2db563d0044..86e55b4536c 100644 --- a/heartbeat/monitors/active/http/http_test.go +++ b/heartbeat/monitors/active/http/http_test.go @@ -22,6 +22,7 @@ import ( "crypto/x509" "fmt" "io/ioutil" + "net" "net/http" "net/http/httptest" "net/url" @@ -48,13 +49,13 @@ import ( "github.com/elastic/go-lookslike/validator" ) -func testRequest(t *testing.T, testURL string, useUrls bool) *beat.Event { - return testTLSRequest(t, testURL, useUrls, nil) +func sendSimpleTLSRequest(t *testing.T, testURL string, useUrls bool) *beat.Event { + return sendTLSRequest(t, testURL, useUrls, nil) } -// testTLSRequest tests the given request. certPath is optional, if given +// sendTLSRequest tests the given request. certPath is optional, if given // an empty string no cert will be set. -func testTLSRequest(t *testing.T, testURL string, useUrls bool, extraConfig map[string]interface{}) *beat.Event { +func sendTLSRequest(t *testing.T, testURL string, useUrls bool, extraConfig map[string]interface{}) *beat.Event { configSrc := map[string]interface{}{ "timeout": "1s", } @@ -92,7 +93,7 @@ func testTLSRequest(t *testing.T, testURL string, useUrls bool, extraConfig map[ func checkServer(t *testing.T, handlerFunc http.HandlerFunc, useUrls bool) (*httptest.Server, *beat.Event) { server := httptest.NewServer(handlerFunc) defer server.Close() - event := testRequest(t, server.URL, useUrls) + event := sendSimpleTLSRequest(t, server.URL, useUrls) return server, event } @@ -220,13 +221,6 @@ var downStatuses = []int{ http.StatusNetworkAuthenticationRequired, } -func serverHostname(t *testing.T, server *httptest.Server) string { - surl, err := url.Parse(server.URL) - require.NoError(t, err) - - return surl.Hostname() -} - func TestUpStatuses(t *testing.T) { for _, status := range upStatuses { status := status @@ -347,7 +341,7 @@ func runHTTPSServerCheck( // we give it a few attempts to see if the server can come up before we run the real assertions. var event *beat.Event for i := 0; i < 10; i++ { - event = testTLSRequest(t, server.URL, false, mergedExtraConfig) + event = sendTLSRequest(t, server.URL, false, mergedExtraConfig) if v, err := event.GetValue("monitor.status"); err == nil && reflect.DeepEqual(v, "up") { break } @@ -373,6 +367,30 @@ func TestHTTPSServer(t *testing.T) { runHTTPSServerCheck(t, server, nil) } +func TestExpiredHTTPSServer(t *testing.T) { + tlsCert, err := tls.LoadX509KeyPair("../fixtures/expired.cert", "../fixtures/expired.key") + require.NoError(t, err) + host, port, cert, closeSrv := hbtest.StartHTTPSServer(t, tlsCert) + defer closeSrv() + u := &url.URL{Scheme: "https", Host: net.JoinHostPort(host, port)} + + extraConfig := map[string]interface{}{"ssl.certificate_authorities": "../fixtures/expired.cert"} + event := sendTLSRequest(t, u.String(), true, extraConfig) + + testslike.Test( + t, + lookslike.Strict(lookslike.Compose( + hbtest.BaseChecks("127.0.0.1", "down", "http"), + hbtest.RespondingTCPChecks(), + hbtest.SummaryChecks(0, 1), + hbtest.ExpiredCertChecks(cert), + hbtest.URLChecks(t, &url.URL{Scheme: "https", Host: net.JoinHostPort(host, port)}), + // No HTTP fields expected because we fail at the TCP level + )), + event.Fields, + ) +} + func TestHTTPSx509Auth(t *testing.T) { wd, err := os.Getwd() require.NoError(t, err) @@ -418,7 +436,7 @@ func TestConnRefusedJob(t *testing.T) { url := fmt.Sprintf("http://%s:%d", ip, port) - event := testRequest(t, url, false) + event := sendSimpleTLSRequest(t, url, false) testslike.Test( t, @@ -440,7 +458,7 @@ func TestUnreachableJob(t *testing.T) { port := uint16(1234) url := fmt.Sprintf("http://%s:%d", ip, port) - event := testRequest(t, url, false) + event := sendSimpleTLSRequest(t, url, false) testslike.Test( t, diff --git a/heartbeat/monitors/active/http/task.go b/heartbeat/monitors/active/http/task.go index 65d2f1ae62c..2c227c1d89a 100644 --- a/heartbeat/monitors/active/http/task.go +++ b/heartbeat/monitors/active/http/task.go @@ -20,15 +20,19 @@ package http import ( "bytes" "context" + "crypto/x509" "fmt" "io/ioutil" "net" "net/http" + "net/url" "strconv" "strings" "sync" "time" + "github.com/elastic/beats/v7/heartbeat/monitors/active/dialchain/tlsmeta" + "github.com/elastic/beats/v7/heartbeat/eventext" "github.com/elastic/beats/v7/heartbeat/look" "github.com/elastic/beats/v7/heartbeat/monitors" @@ -232,11 +236,16 @@ func execPing( // Send the HTTP request. We don't immediately return on error since // we may want to add additional fields to contextualize the error. start, resp, errReason := execRequest(client, req) - // If we have no response object or an error was set there probably was an IO error, we can skip the rest of the logic // since that logic is for adding metadata relating to completed HTTP transactions that have errored // in other ways if resp == nil || errReason != nil { + if urlErr, ok := errReason.Unwrap().(*url.Error); ok { + if certErr, ok := urlErr.Err.(x509.CertificateInvalidError); ok { + tlsmeta.AddCertMetadata(event.Fields, []*x509.Certificate{certErr.Cert}) + } + } + return start, time.Now(), errReason } diff --git a/heartbeat/monitors/active/icmp/icmp.go b/heartbeat/monitors/active/icmp/icmp.go index 1cb19c90798..45fdf8a54b3 100644 --- a/heartbeat/monitors/active/icmp/icmp.go +++ b/heartbeat/monitors/active/icmp/icmp.go @@ -32,46 +32,71 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" ) +var debugf = logp.MakeDebug("icmp") + func init() { monitors.RegisterActive("icmp", create) } -var debugf = logp.MakeDebug("icmp") - func create( name string, - cfg *common.Config, + commonConfig *common.Config, ) (jobs []jobs.Job, endpoints int, err error) { + loop, err := getStdLoop() + if err != nil { + logp.Warn("Failed to initialize ICMP loop %v", err) + return nil, 0, err + } + config := DefaultConfig - if err := cfg.Unpack(&config); err != nil { + if err := commonConfig.Unpack(&config); err != nil { return nil, 0, err } - ipVersion := config.Mode.Network() - if len(config.Hosts) > 0 && ipVersion == "" { - err := fmt.Errorf("pinging hosts requires ipv4 or ipv6 mode enabled") + jf, err := newJobFactory(config, monitors.NewStdResolver(), loop) + if err != nil { return nil, 0, err } + return jf.makeJobs() - var loopErr error - loopInit.Do(func() { - debugf("initializing ICMP loop") - loop, loopErr = newICMPLoop() - }) - if loopErr != nil { - logp.Warn("Failed to initialize ICMP loop %v", loopErr) - return nil, 0, loopErr +} + +type jobFactory struct { + config Config + resolver monitors.Resolver + loop ICMPLoop + ipVersion string +} + +func newJobFactory(config Config, resolver monitors.Resolver, loop ICMPLoop) (*jobFactory, error) { + jf := &jobFactory{config: config, resolver: resolver, loop: loop} + err := jf.checkConfig() + if err != nil { + return nil, err } - debugf("ICMP loop successfully initialized") - if err := loop.checkNetworkMode(ipVersion); err != nil { + return jf, nil +} + +func (jf *jobFactory) checkConfig() error { + jf.ipVersion = jf.config.Mode.Network() + if len(jf.config.Hosts) > 0 && jf.ipVersion == "" { + err := fmt.Errorf("pinging hosts requires ipv4 or ipv6 mode enabled") + return err + } + + return nil +} + +func (jf *jobFactory) makeJobs() (j []jobs.Job, endpoints int, err error) { + if err := jf.loop.checkNetworkMode(jf.ipVersion); err != nil { return nil, 0, err } - pingFactory := monitors.MakePingIPFactory(createPingIPFactory(&config)) + pingFactory := jf.pingIPFactory(&jf.config) - for _, host := range config.Hosts { - job, err := monitors.MakeByHostJob(host, config.Mode, monitors.NewStdResolver(), pingFactory) + for _, host := range jf.config.Hosts { + job, err := monitors.MakeByHostJob(host, jf.config.Mode, monitors.NewStdResolver(), pingFactory) if err != nil { return nil, 0, err @@ -82,15 +107,15 @@ func create( return nil, 0, err } - jobs = append(jobs, wrappers.WithURLField(u, job)) + j = append(j, wrappers.WithURLField(u, job)) } - return jobs, len(config.Hosts), nil + return j, len(jf.config.Hosts), nil } -func createPingIPFactory(config *Config) func(*beat.Event, *net.IPAddr) error { - return func(event *beat.Event, ip *net.IPAddr) error { - rtt, n, err := loop.ping(ip, config.Timeout, config.Wait) +func (jf *jobFactory) pingIPFactory(config *Config) func(*net.IPAddr) jobs.Job { + return monitors.MakePingIPFactory(func(event *beat.Event, ip *net.IPAddr) error { + rtt, n, err := jf.loop.ping(ip, config.Timeout, config.Wait) if err != nil { return err } @@ -98,9 +123,9 @@ func createPingIPFactory(config *Config) func(*beat.Event, *net.IPAddr) error { icmpFields := common.MapStr{"requests": n} if err == nil { icmpFields["rtt"] = look.RTT(rtt) - eventext.MergeEventFields(event, icmpFields) + eventext.MergeEventFields(event, common.MapStr{"icmp": icmpFields}) } return nil - } + }) } diff --git a/heartbeat/monitors/active/icmp/icmp_test.go b/heartbeat/monitors/active/icmp/icmp_test.go new file mode 100644 index 00000000000..11e7dae5380 --- /dev/null +++ b/heartbeat/monitors/active/icmp/icmp_test.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package icmp + +import ( + "net" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/heartbeat/hbtest" + "github.com/elastic/beats/v7/heartbeat/look" + "github.com/elastic/beats/v7/heartbeat/monitors" + "github.com/elastic/beats/v7/heartbeat/monitors/wrappers" + "github.com/elastic/beats/v7/heartbeat/scheduler/schedule" + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/go-lookslike" + "github.com/elastic/go-lookslike/testslike" +) + +func TestICMPFields(t *testing.T) { + host := "localhost" + hostURL := &url.URL{Scheme: "icmp", Host: host} + ip := "127.0.0.1" + cfg := Config{ + Hosts: []string{host}, + Mode: monitors.IPSettings{IPv4: true, IPv6: false, Mode: monitors.PingAny}, + } + testMockLoop, e := execTestICMPCheck(t, cfg) + + validator := lookslike.Strict( + lookslike.Compose( + hbtest.BaseChecks(ip, "up", "icmp"), + hbtest.SummaryChecks(1, 0), + hbtest.URLChecks(t, hostURL), + hbtest.ResolveChecks(ip), + lookslike.MustCompile(map[string]interface{}{ + "icmp.requests": 1, + "icmp.rtt": look.RTT(testMockLoop.pingRtt), + }), + ), + ) + testslike.Test(t, validator, e.Fields) +} + +func execTestICMPCheck(t *testing.T, cfg Config) (mockLoop, *beat.Event) { + tl := mockLoop{pingRtt: time.Microsecond * 1000, pingRequests: 1} + jf, err := newJobFactory(cfg, monitors.NewStdResolver(), tl) + require.NoError(t, err) + j, endpoints, err := jf.makeJobs() + require.Len(t, j, 1) + require.Equal(t, 1, endpoints) + e := &beat.Event{} + sched, _ := schedule.Parse("@every 1s") + wrapped := wrappers.WrapCommon(j, "test", "", "icmp", sched, time.Duration(0)) + wrapped[0](e) + return tl, e +} + +type mockLoop struct { + pingRtt time.Duration + pingRequests int + pingErr error + checkNetworkModeErr error +} + +func (t mockLoop) checkNetworkMode(mode string) error { + return t.checkNetworkModeErr +} + +func (t mockLoop) ping(addr *net.IPAddr, timeout time.Duration, interval time.Duration) (time.Duration, int, error) { + return t.pingRtt, t.pingRequests, t.pingErr +} diff --git a/heartbeat/monitors/active/icmp/loop.go b/heartbeat/monitors/active/icmp/loop.go index 414686c1d31..de4d0ef4dfc 100644 --- a/heartbeat/monitors/active/icmp/loop.go +++ b/heartbeat/monitors/active/icmp/loop.go @@ -18,371 +18,15 @@ package icmp import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math/rand" "net" - "os" - "runtime" - "sync" "time" - - "golang.org/x/net/icmp" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -type icmpLoop struct { - conn4, conn6 *icmp.PacketConn - recv chan packet - - mutex sync.Mutex - requests map[requestID]*requestContext -} - -type timeoutError struct { -} - -const ( - // iana types - protocolICMP = 1 - protocolIPv6ICMP = 58 -) - -type packet struct { - ts time.Time - addr net.Addr - - Type icmp.Type // type, either ipv4.ICMPType or ipv6.ICMPType - Code int // code - Checksum int // checksum - Echo icmp.Echo -} - -type requestID struct { - addr string - proto int - id int - seq int -} - -type requestContext struct { - l *icmpLoop - id requestID - ts time.Time - result chan requestResult -} - -type requestResult struct { - packet packet - err error -} - -var ( - loopInit sync.Once - loop *icmpLoop ) -func noPingCapabilityError(message string) error { - return fmt.Errorf(fmt.Sprintf("Insufficient privileges to perform ICMP ping. %s", message)) -} - -func newICMPLoop() (*icmpLoop, error) { - // Log errors at info level, as the loop is setup globally when ICMP module is loaded - // first (not yet configured). - // With multiple configurations using the icmp loop, we have to postpose - // IPv4/IPv6 checking - conn4 := createListener("IPv4", "ip4:icmp") - conn6 := createListener("IPv6", "ip6:ipv6-icmp") - unprivilegedPossible := false - l := &icmpLoop{ - conn4: conn4, - conn6: conn6, - recv: make(chan packet, 16), - requests: map[requestID]*requestContext{}, - } - - if l.conn4 == nil && l.conn6 == nil { - switch runtime.GOOS { - case "linux", "darwin": - unprivilegedPossible = true - //This is non-privileged ICMP, not udp - l.conn4 = createListener("Unprivileged IPv4", "udp4") - l.conn6 = createListener("Unprivileged IPv6", "udp6") - } - } - - if l.conn4 != nil { - go l.runICMPRecv(l.conn4, protocolICMP) - } - if l.conn6 != nil { - go l.runICMPRecv(l.conn6, protocolIPv6ICMP) - } - - if l.conn4 == nil && l.conn6 == nil { - if unprivilegedPossible { - var buffer bytes.Buffer - path, _ := os.Executable() - buffer.WriteString("You can run without root by setting cap_net_raw:\n sudo setcap cap_net_raw+eip ") - buffer.WriteString(path + " \n") - buffer.WriteString("Your system allows the use of unprivileged ping by setting net.ipv4.ping_group_range \n sysctl -w net.ipv4.ping_group_range=' ' ") - return nil, noPingCapabilityError(buffer.String()) - } - return nil, noPingCapabilityError("You must provide the appropriate permissions to this executable") - } - - return l, nil -} - -func (l *icmpLoop) checkNetworkMode(mode string) error { - ip4, ip6 := false, false - switch mode { - case "ip4": - ip4 = true - case "ip6": - ip6 = true - case "ip": - ip4, ip6 = true, true - default: - return fmt.Errorf("'%v' is not supported", mode) - } - - if ip4 && l.conn4 == nil { - return errors.New("failed to initiate IPv4 support. Check log details for permission configuration") - } - if ip6 && l.conn6 == nil { - return errors.New("failed to initiate IPv6 support. Check log details for permission configuration") - } - - return nil -} - -func (l *icmpLoop) runICMPRecv(conn *icmp.PacketConn, proto int) { - for { - bytes := make([]byte, 512) - conn.SetReadDeadline(time.Now().Add(1 * time.Second)) - _, addr, err := conn.ReadFrom(bytes) - if err != nil { - if neterr, ok := err.(*net.OpError); ok { - if neterr.Timeout() { - continue - } else { - // TODO: report error and quit loop? - return - } - } - } - - ts := time.Now() - m, err := icmp.ParseMessage(proto, bytes) - if err != nil { - continue - } - - // process echo reply only - if m.Type != ipv4.ICMPTypeEchoReply && m.Type != ipv6.ICMPTypeEchoReply { - continue - } - echo, ok := m.Body.(*icmp.Echo) - if !ok { - continue - } - - id := requestID{ - addr: addr.String(), - proto: proto, - id: echo.ID, - seq: echo.Seq, - } - - l.mutex.Lock() - ctx := l.requests[id] - if ctx != nil { - delete(l.requests, id) - } - l.mutex.Unlock() - - // no return context available for echo reply -> handle next message - if ctx == nil { - continue - } - - ctx.result <- requestResult{ - packet: packet{ - ts: ts, - addr: addr, - - Type: m.Type, - Code: m.Code, - Checksum: m.Checksum, - Echo: *echo, - }, - } - } -} - -func (l *icmpLoop) ping( - addr *net.IPAddr, - timeout time.Duration, - interval time.Duration, -) (time.Duration, int, error) { - - var err error - toTimer := time.NewTimer(timeout) - defer toTimer.Stop() - - ticker := time.NewTicker(interval) - defer ticker.Stop() - - done := false - doneSignal := make(chan struct{}) - - success := false - var rtt time.Duration - - // results accepts first response received only - results := make(chan time.Duration, 1) - requests := 0 - - awaitResponse := func(ctx *requestContext) { - select { - case <-doneSignal: - ctx.Stop() - - case r := <-ctx.result: - // ctx is removed from request tables automatically a response is - // received. No need to stop it. - - // try to push RTT. The first result available will be reported - select { - case results <- r.packet.ts.Sub(ctx.ts): - default: - } - } - } - - for !done { - var ctx *requestContext - ctx, err = l.sendEchoRequest(addr) - if err != nil { - close(doneSignal) - break - } - go awaitResponse(ctx) - requests++ - - select { - case <-toTimer.C: - // no response for any active request received. Finish loop - // and remove all requests from request table. - done = true - close(doneSignal) - - case <-ticker.C: - // No response yet. Send another request with every tick - - case rtt = <-results: - success = true - - done = true - close(doneSignal) - } - } - - if err != nil { - return 0, 0, err - } - - if !success { - return 0, requests, timeoutError{} - } - - return rtt, requests, nil -} - -func (l *icmpLoop) sendEchoRequest(addr *net.IPAddr) (*requestContext, error) { - var conn *icmp.PacketConn - var proto int - var typ icmp.Type - - if l == nil { - panic("icmp loop not initialized") - } - - if isIPv4(addr.IP) { - conn = l.conn4 - proto = protocolICMP - typ = ipv4.ICMPTypeEcho - } else if isIPv6(addr.IP) { - conn = l.conn6 - proto = protocolIPv6ICMP - typ = ipv6.ICMPTypeEchoRequest - } else { - return nil, fmt.Errorf("%v is unknown ip address", addr) - } - - id := requestID{ - addr: addr.String(), - proto: proto, - id: rand.Intn(0xffff), - seq: rand.Intn(0xffff), - } - - ctx := &requestContext{ - l: l, - id: id, - result: make(chan requestResult, 1), - } - - l.mutex.Lock() - l.requests[id] = ctx - l.mutex.Unlock() - - payloadBuf := make([]byte, 0, 8) - payload := bytes.NewBuffer(payloadBuf) - ts := time.Now() - binary.Write(payload, binary.BigEndian, ts.UnixNano()) - - msg := &icmp.Message{ - Type: typ, - Body: &icmp.Echo{ - ID: id.id, - Seq: id.seq, - Data: payload.Bytes(), - }, - } - encoded, _ := msg.Marshal(nil) - - _, err := conn.WriteTo(encoded, addr) - if err != nil { - return nil, err - } - - ctx.ts = ts - return ctx, nil -} - -func createListener(name, network string) *icmp.PacketConn { - conn, err := icmp.ListenPacket(network, "") - - // XXX: need to check for conn == nil, as 'err != nil' seems always to be - // true, even if error value itself is `nil`. Checking for conn suppresses - // misleading log message. - if conn == nil && err != nil { - return nil - } - return conn -} - -// timeoutError implements net.Error interface -func (timeoutError) Error() string { return "ping timeout" } -func (timeoutError) Timeout() bool { return true } -func (timeoutError) Temporary() bool { return true } - -func (r *requestContext) Stop() { - r.l.mutex.Lock() - delete(r.l.requests, r.id) - r.l.mutex.Unlock() +type ICMPLoop interface { + checkNetworkMode(mode string) error + ping( + addr *net.IPAddr, + timeout time.Duration, + interval time.Duration, + ) (time.Duration, int, error) } diff --git a/heartbeat/monitors/active/icmp/stdloop.go b/heartbeat/monitors/active/icmp/stdloop.go new file mode 100644 index 00000000000..932154c61ad --- /dev/null +++ b/heartbeat/monitors/active/icmp/stdloop.go @@ -0,0 +1,405 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package icmp + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math/rand" + "net" + "os" + "runtime" + "sync" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +type stdICMPLoop struct { + conn4, conn6 *icmp.PacketConn + recv chan packet + + mutex sync.Mutex + requests map[requestID]*requestContext +} + +type timeoutError struct { +} + +const ( + // iana types + protocolICMP = 1 + protocolIPv6ICMP = 58 +) + +type packet struct { + ts time.Time + addr net.Addr + + Type icmp.Type // type, either ipv4.ICMPType or ipv6.ICMPType + Code int // code + Checksum int // checksum + Echo icmp.Echo +} + +type requestID struct { + addr string + proto int + id int + seq int +} + +type requestContext struct { + l *stdICMPLoop + id requestID + ts time.Time + result chan requestResult +} + +type requestResult struct { + packet packet + err error +} + +// stdLoop is a singleton for our main ICMP loop since it doesn't +// make sense to have multiples. While having a singleton is ugly +// is mandatory for the ICMP interface in go, where all monitors +// must share a single loop. +// These vars should not be used directly, but rather getStdLoop +// should be invoked to initialize and return stdLoop. +var ( + stdICMPLoopInit sync.Once + stdICMPLoopSingleton *stdICMPLoop +) + +func getStdLoop() (*stdICMPLoop, error) { + var loopErr error + stdICMPLoopInit.Do(func() { + debugf("initializing ICMP loop") + stdICMPLoopSingleton, loopErr = newICMPLoop() + if loopErr == nil { + debugf("ICMP loop successfully initialized") + } + }) + return stdICMPLoopSingleton, loopErr +} + +func noPingCapabilityError(message string) error { + return fmt.Errorf(fmt.Sprintf("Insufficient privileges to perform ICMP ping. %s", message)) +} + +func newICMPLoop() (*stdICMPLoop, error) { + // Log errors at info level, as the loop is setup globally when ICMP module is loaded + // first (not yet configured). + // With multiple configurations using the icmp loop, we have to postpose + // IPv4/IPv6 checking + conn4 := createListener("IPv4", "ip4:icmp") + conn6 := createListener("IPv6", "ip6:ipv6-icmp") + unprivilegedPossible := false + l := &stdICMPLoop{ + conn4: conn4, + conn6: conn6, + recv: make(chan packet, 16), + requests: map[requestID]*requestContext{}, + } + + if l.conn4 == nil && l.conn6 == nil { + switch runtime.GOOS { + case "linux", "darwin": + unprivilegedPossible = true + //This is non-privileged ICMP, not udp + l.conn4 = createListener("Unprivileged IPv4", "udp4") + l.conn6 = createListener("Unprivileged IPv6", "udp6") + } + } + + if l.conn4 != nil { + go l.runICMPRecv(l.conn4, protocolICMP) + } + if l.conn6 != nil { + go l.runICMPRecv(l.conn6, protocolIPv6ICMP) + } + + if l.conn4 == nil && l.conn6 == nil { + if unprivilegedPossible { + var buffer bytes.Buffer + path, _ := os.Executable() + buffer.WriteString("You can run without root by setting cap_net_raw:\n sudo setcap cap_net_raw+eip ") + buffer.WriteString(path + " \n") + buffer.WriteString("Your system allows the use of unprivileged ping by setting net.ipv4.ping_group_range \n sysctl -w net.ipv4.ping_group_range=' ' ") + return nil, noPingCapabilityError(buffer.String()) + } + return nil, noPingCapabilityError("You must provide the appropriate permissions to this executable") + } + + return l, nil +} + +func (l *stdICMPLoop) checkNetworkMode(mode string) error { + ip4, ip6 := false, false + switch mode { + case "ip4": + ip4 = true + case "ip6": + ip6 = true + case "ip": + ip4, ip6 = true, true + default: + return fmt.Errorf("'%v' is not supported", mode) + } + + if ip4 && l.conn4 == nil { + return errors.New("failed to initiate IPv4 support. Check log details for permission configuration") + } + if ip6 && l.conn6 == nil { + return errors.New("failed to initiate IPv6 support. Check log details for permission configuration") + } + + return nil +} + +func (l *stdICMPLoop) runICMPRecv(conn *icmp.PacketConn, proto int) { + for { + bytes := make([]byte, 512) + conn.SetReadDeadline(time.Now().Add(1 * time.Second)) + _, addr, err := conn.ReadFrom(bytes) + if err != nil { + if neterr, ok := err.(*net.OpError); ok { + if neterr.Timeout() { + continue + } else { + // TODO: report error and quit loop? + return + } + } + } + + ts := time.Now() + m, err := icmp.ParseMessage(proto, bytes) + if err != nil { + continue + } + + // process echo reply only + if m.Type != ipv4.ICMPTypeEchoReply && m.Type != ipv6.ICMPTypeEchoReply { + continue + } + echo, ok := m.Body.(*icmp.Echo) + if !ok { + continue + } + + id := requestID{ + addr: addr.String(), + proto: proto, + id: echo.ID, + seq: echo.Seq, + } + + l.mutex.Lock() + ctx := l.requests[id] + if ctx != nil { + delete(l.requests, id) + } + l.mutex.Unlock() + + // no return context available for echo reply -> handle next message + if ctx == nil { + continue + } + + ctx.result <- requestResult{ + packet: packet{ + ts: ts, + addr: addr, + + Type: m.Type, + Code: m.Code, + Checksum: m.Checksum, + Echo: *echo, + }, + } + } +} + +func (l *stdICMPLoop) ping( + addr *net.IPAddr, + timeout time.Duration, + interval time.Duration, +) (time.Duration, int, error) { + var err error + toTimer := time.NewTimer(timeout) + defer toTimer.Stop() + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + done := false + doneSignal := make(chan struct{}) + + success := false + var rtt time.Duration + + // results accepts first response received only + results := make(chan time.Duration, 1) + requests := 0 + + awaitResponse := func(ctx *requestContext) { + select { + case <-doneSignal: + ctx.Stop() + + case r := <-ctx.result: + // ctx is removed from request tables automatically a response is + // received. No need to stop it. + + // try to push RTT. The first result available will be reported + select { + case results <- r.packet.ts.Sub(ctx.ts): + default: + } + } + } + + for !done { + var ctx *requestContext + ctx, err = l.sendEchoRequest(addr) + if err != nil { + close(doneSignal) + break + } + go awaitResponse(ctx) + requests++ + + select { + case <-toTimer.C: + // no response for any active request received. Finish loop + // and remove all pingRequests from request table. + done = true + close(doneSignal) + + case <-ticker.C: + // No response yet. Send another request with every tick + + case rtt = <-results: + success = true + + done = true + close(doneSignal) + } + } + + if err != nil { + return 0, 0, err + } + + if !success { + return 0, requests, timeoutError{} + } + + return rtt, requests, nil +} + +func (l *stdICMPLoop) sendEchoRequest(addr *net.IPAddr) (*requestContext, error) { + var conn *icmp.PacketConn + var proto int + var typ icmp.Type + + if l == nil { + panic("icmp loop not initialized") + } + + if isIPv4(addr.IP) { + conn = l.conn4 + proto = protocolICMP + typ = ipv4.ICMPTypeEcho + } else if isIPv6(addr.IP) { + conn = l.conn6 + proto = protocolIPv6ICMP + typ = ipv6.ICMPTypeEchoRequest + } else { + return nil, fmt.Errorf("%v is unknown ip address", addr) + } + + id := requestID{ + addr: addr.String(), + proto: proto, + id: rand.Intn(0xffff), + seq: rand.Intn(0xffff), + } + + ctx := &requestContext{ + l: l, + id: id, + result: make(chan requestResult, 1), + } + + l.mutex.Lock() + l.requests[id] = ctx + l.mutex.Unlock() + + payloadBuf := make([]byte, 0, 8) + payload := bytes.NewBuffer(payloadBuf) + ts := time.Now() + binary.Write(payload, binary.BigEndian, ts.UnixNano()) + + msg := &icmp.Message{ + Type: typ, + Body: &icmp.Echo{ + ID: id.id, + Seq: id.seq, + Data: payload.Bytes(), + }, + } + encoded, _ := msg.Marshal(nil) + + _, err := conn.WriteTo(encoded, addr) + if err != nil { + return nil, err + } + + ctx.ts = ts + return ctx, nil +} + +func createListener(name, network string) *icmp.PacketConn { + conn, err := icmp.ListenPacket(network, "") + + // XXX: need to check for conn == nil, as 'err != nil' seems always to be + // true, even if error value itself is `nil`. Checking for conn suppresses + // misleading log message. + if conn == nil && err != nil { + return nil + } + return conn +} + +// timeoutError implements net.Error interface +func (timeoutError) Error() string { return "ping timeout" } +func (timeoutError) Timeout() bool { return true } +func (timeoutError) Temporary() bool { return true } + +func (r *requestContext) Stop() { + r.l.mutex.Lock() + delete(r.l.requests, r.id) + r.l.mutex.Unlock() +} diff --git a/heartbeat/monitors/active/tcp/tcp.go b/heartbeat/monitors/active/tcp/tcp.go index 05c687dd65b..26f96d2e010 100644 --- a/heartbeat/monitors/active/tcp/tcp.go +++ b/heartbeat/monitors/active/tcp/tcp.go @@ -18,18 +18,19 @@ package tcp import ( + "crypto/x509" "net" "net/url" "time" "github.com/elastic/beats/v7/heartbeat/eventext" "github.com/elastic/beats/v7/heartbeat/look" - "github.com/elastic/beats/v7/heartbeat/reason" - "github.com/elastic/beats/v7/heartbeat/monitors" "github.com/elastic/beats/v7/heartbeat/monitors/active/dialchain" + "github.com/elastic/beats/v7/heartbeat/monitors/active/dialchain/tlsmeta" "github.com/elastic/beats/v7/heartbeat/monitors/jobs" "github.com/elastic/beats/v7/heartbeat/monitors/wrappers" + "github.com/elastic/beats/v7/heartbeat/reason" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/transport" @@ -226,6 +227,9 @@ func (jf *jobFactory) execDialer( conn, err := dialer.Dial("tcp", addr) if err != nil { debugf("dial failed with: %v", err) + if certErr, ok := err.(x509.CertificateInvalidError); ok { + tlsmeta.AddCertMetadata(event.Fields, []*x509.Certificate{certErr.Cert}) + } return reason.IOFailed(err) } defer conn.Close() diff --git a/heartbeat/monitors/active/tcp/tls_test.go b/heartbeat/monitors/active/tcp/tls_test.go index 0628b1694b4..ff4cd569db5 100644 --- a/heartbeat/monitors/active/tcp/tls_test.go +++ b/heartbeat/monitors/active/tcp/tls_test.go @@ -18,12 +18,14 @@ package tcp import ( + "crypto/tls" "crypto/x509" "net" "net/http" "net/http/httptest" "net/url" "os" + "strconv" "testing" "time" @@ -112,6 +114,35 @@ func TestTLSInvalidCert(t *testing.T) { ) } +func TestTLSExpiredCert(t *testing.T) { + certFile := "../fixtures/expired.cert" + tlsCert, err := tls.LoadX509KeyPair(certFile, "../fixtures/expired.key") + require.NoError(t, err) + + ip, portStr, cert, closeSrv := hbtest.StartHTTPSServer(t, tlsCert) + defer closeSrv() + + portInt, err := strconv.Atoi(portStr) + port := uint16(portInt) + require.NoError(t, err) + + host := "localhost" + event := testTLSTCPCheck(t, host, port, certFile, monitors.NewStdResolver()) + + testslike.Test( + t, + lookslike.Strict(lookslike.Compose( + hbtest.RespondingTCPChecks(), + hbtest.BaseChecks(ip, "down", "tcp"), + hbtest.SummaryChecks(0, 1), + hbtest.SimpleURLChecks(t, "ssl", host, port), + hbtest.ResolveChecks(ip), + hbtest.ExpiredCertChecks(cert), + )), + event.Fields, + ) +} + func setupTLSTestServer(t *testing.T) (ip string, port uint16, cert *x509.Certificate, certFile *os.File, teardown func()) { // Start up a TLS Server server, port, err := setupServer(t, func(handler http.Handler) (*httptest.Server, error) { diff --git a/heartbeat/reason/reason.go b/heartbeat/reason/reason.go index 677a87a8971..ad1823af8e3 100644 --- a/heartbeat/reason/reason.go +++ b/heartbeat/reason/reason.go @@ -22,6 +22,7 @@ import "github.com/elastic/beats/v7/libbeat/common" type Reason interface { error Type() string + Unwrap() error } type ValidateError struct { @@ -47,9 +48,11 @@ func IOFailed(err error) Reason { } func (e ValidateError) Error() string { return e.err.Error() } +func (e ValidateError) Unwrap() error { return e.err } func (ValidateError) Type() string { return "validate" } func (e IOError) Error() string { return e.err.Error() } +func (e IOError) Unwrap() error { return e.err } func (IOError) Type() string { return "io" } func FailError(typ string, err error) common.MapStr { diff --git a/journalbeat/journalbeat.reference.yml b/journalbeat/journalbeat.reference.yml index b5b8fd9c11d..0d995735fba 100644 --- a/journalbeat/journalbeat.reference.yml +++ b/journalbeat/journalbeat.reference.yml @@ -464,6 +464,27 @@ output.elasticsearch: # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. @@ -732,6 +753,9 @@ output.elasticsearch: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password @@ -1318,6 +1342,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m diff --git a/journalbeat/magefile.go b/journalbeat/magefile.go index ae30d5983a6..58fdf91dfbe 100644 --- a/journalbeat/magefile.go +++ b/journalbeat/magefile.go @@ -20,7 +20,6 @@ package main import ( - "context" "fmt" "strings" "time" @@ -37,6 +36,8 @@ import ( _ "github.com/elastic/beats/v7/dev-tools/mage/target/integtest/notests" // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" + // mage:import + _ "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) func init() { @@ -116,13 +117,6 @@ func Fields() error { return devtools.GenerateFieldsYAML() } -// GoTestUnit executes the Go unit tests. -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -func GoTestUnit(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestUnitArgs()) -} - // ----------------------------------------------------------------------------- // Customizations specific to Journalbeat. // - Install required headers on builders for different architectures. @@ -199,7 +193,15 @@ func installDependencies(arch string, pkgs ...string) error { return err } - params := append([]string{"install", "-y", "--no-install-recommends"}, pkgs...) + params := append([]string{"install", "-y", + "--no-install-recommends", + + // Journalbeat is built with old versions of Debian that don't update + // their repositories, so they have expired keys. + // Allow unauthenticated packages. + // This was not enough: "-o", "Acquire::Check-Valid-Until=false", + "--allow-unauthenticated", + }, pkgs...) return sh.Run("apt-get", params...) } diff --git a/libbeat/_meta/config.reference.yml.tmpl b/libbeat/_meta/config.reference.yml.tmpl index 9a9d2fdeb71..a72aa80ba42 100644 --- a/libbeat/_meta/config.reference.yml.tmpl +++ b/libbeat/_meta/config.reference.yml.tmpl @@ -406,6 +406,27 @@ output.elasticsearch: # # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC {{if not .ExcludeLogstash}} #----------------------------- Logstash output --------------------------------- #output.logstash: @@ -675,6 +696,9 @@ output.elasticsearch: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password @@ -1261,6 +1285,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m diff --git a/libbeat/_meta/config.yml.tmpl b/libbeat/_meta/config.yml.tmpl index 6278ae44a2b..2d5e510e33f 100644 --- a/libbeat/_meta/config.yml.tmpl +++ b/libbeat/_meta/config.yml.tmpl @@ -106,13 +106,12 @@ processors: {{else}} processors: - add_observer_metadata: - # Optional, but recommended geo settings for the location {{ .BeatName | title }} is running in - #geo: - # Token describing this location - #name: us-east-1a - - # Lat, Lon " - #location: "37.926868, -78.024902" + # Optional, but recommended geo settings for the location {{ .BeatName | title }} is running in + #geo: + # Token describing this location + #name: us-east-1a + # Lat, Lon " + #location: "37.926868, -78.024902" {{end}} #================================ Logging ===================================== diff --git a/libbeat/autodiscover/appenders/config/config.go b/libbeat/autodiscover/appenders/config/config.go index 018ee1b587d..60f8a543f4a 100644 --- a/libbeat/autodiscover/appenders/config/config.go +++ b/libbeat/autodiscover/appenders/config/config.go @@ -104,7 +104,7 @@ func (c *configAppender) Append(event bus.Event) { } // Apply the template - template.ApplyConfigTemplate(event, cfgs) + template.ApplyConfigTemplate(event, cfgs, false) } // Replace old config with newly appended configs diff --git a/libbeat/autodiscover/autodiscover.go b/libbeat/autodiscover/autodiscover.go index c4941c4b176..668a350b865 100644 --- a/libbeat/autodiscover/autodiscover.go +++ b/libbeat/autodiscover/autodiscover.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" "github.com/elastic/beats/v7/libbeat/common/reload" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -72,6 +73,7 @@ func NewAutodiscover( factory cfgfile.RunnerFactory, configurer EventConfigurer, config *Config, + keystore keystore.Keystore, ) (*Autodiscover, error) { logger := logp.NewLogger("autodiscover") @@ -81,7 +83,7 @@ func NewAutodiscover( // Init providers var providers []Provider for _, providerCfg := range config.Providers { - provider, err := Registry.BuildProvider(bus, providerCfg) + provider, err := Registry.BuildProvider(bus, providerCfg, keystore) if err != nil { return nil, errors.Wrap(err, "error in autodiscover provider settings") } @@ -191,10 +193,7 @@ func (a *Autodiscover) handleStart(event bus.Event) bool { if a.logger.IsDebug() { for _, c := range configs { - rc := map[string]interface{}{} - c.Unpack(&rc) - - a.logger.Debugf("Generated config: %+v", rc) + a.logger.Debugf("Generated config: %+v", common.DebugString(c, true)) } } @@ -202,7 +201,7 @@ func (a *Autodiscover) handleStart(event bus.Event) bool { for _, config := range configs { hash, err := cfgfile.HashConfig(config) if err != nil { - a.logger.Debugf("Could not hash config %v: %v", config, err) + a.logger.Debugf("Could not hash config %v: %v", common.DebugString(config, true), err) continue } @@ -216,7 +215,7 @@ func (a *Autodiscover) handleStart(event bus.Event) bool { dynFields := a.meta.Store(hash, meta) if a.configs[eventID][hash] != nil { - a.logger.Debugf("Config %v is already running", config) + a.logger.Debugf("Config %v is already running", common.DebugString(config, true)) continue } diff --git a/libbeat/autodiscover/autodiscover_test.go b/libbeat/autodiscover/autodiscover_test.go index 182cd99f3cb..23a3fe14da3 100644 --- a/libbeat/autodiscover/autodiscover_test.go +++ b/libbeat/autodiscover/autodiscover_test.go @@ -30,6 +30,7 @@ import ( "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/tests/resources" ) @@ -142,7 +143,7 @@ func TestAutodiscover(t *testing.T) { // Register mock autodiscover provider busChan := make(chan bus.Bus, 1) Registry = NewRegistry() - Registry.AddProvider("mock", func(b bus.Bus, uuid uuid.UUID, c *common.Config) (Provider, error) { + Registry.AddProvider("mock", func(b bus.Bus, uuid uuid.UUID, c *common.Config, k keystore.Keystore) (Provider, error) { // intercept bus to mock events busChan <- b @@ -164,9 +165,9 @@ func TestAutodiscover(t *testing.T) { config := Config{ Providers: []*common.Config{providerConfig}, } - + k, _ := keystore.NewFileKeystore("test") // Create autodiscover manager - autodiscover, err := NewAutodiscover("test", nil, &adapter, &adapter, &config) + autodiscover, err := NewAutodiscover("test", nil, &adapter, &adapter, &config, k) if err != nil { t.Fatal(err) } @@ -266,7 +267,7 @@ func TestAutodiscoverHash(t *testing.T) { busChan := make(chan bus.Bus, 1) Registry = NewRegistry() - Registry.AddProvider("mock", func(b bus.Bus, uuid uuid.UUID, c *common.Config) (Provider, error) { + Registry.AddProvider("mock", func(b bus.Bus, uuid uuid.UUID, c *common.Config, k keystore.Keystore) (Provider, error) { // intercept bus to mock events busChan <- b @@ -291,9 +292,9 @@ func TestAutodiscoverHash(t *testing.T) { config := Config{ Providers: []*common.Config{providerConfig}, } - + k, _ := keystore.NewFileKeystore("test") // Create autodiscover manager - autodiscover, err := NewAutodiscover("test", nil, &adapter, &adapter, &config) + autodiscover, err := NewAutodiscover("test", nil, &adapter, &adapter, &config, k) if err != nil { t.Fatal(err) } @@ -332,7 +333,7 @@ func TestAutodiscoverWithConfigCheckFailures(t *testing.T) { // Register mock autodiscover provider busChan := make(chan bus.Bus, 1) Registry = NewRegistry() - Registry.AddProvider("mock", func(b bus.Bus, uuid uuid.UUID, c *common.Config) (Provider, error) { + Registry.AddProvider("mock", func(b bus.Bus, uuid uuid.UUID, c *common.Config, k keystore.Keystore) (Provider, error) { // intercept bus to mock events busChan <- b @@ -357,9 +358,9 @@ func TestAutodiscoverWithConfigCheckFailures(t *testing.T) { config := Config{ Providers: []*common.Config{providerConfig}, } - + k, _ := keystore.NewFileKeystore("test") // Create autodiscover manager - autodiscover, err := NewAutodiscover("test", nil, &adapter, &adapter, &config) + autodiscover, err := NewAutodiscover("test", nil, &adapter, &adapter, &config, k) if err != nil { t.Fatal(err) } diff --git a/libbeat/autodiscover/provider.go b/libbeat/autodiscover/provider.go index f7bfefc69d0..510e09ab4bf 100644 --- a/libbeat/autodiscover/provider.go +++ b/libbeat/autodiscover/provider.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" + "github.com/elastic/beats/v7/libbeat/keystore" ) // Provider for autodiscover @@ -34,7 +35,7 @@ type Provider interface { } // ProviderBuilder creates a new provider based on the given config and returns it -type ProviderBuilder func(bus.Bus, uuid.UUID, *common.Config) (Provider, error) +type ProviderBuilder func(bus.Bus, uuid.UUID, *common.Config, keystore.Keystore) (Provider, error) // AddProvider registers a new ProviderBuilder func (r *registry) AddProvider(name string, provider ProviderBuilder) error { @@ -69,7 +70,7 @@ func (r *registry) GetProvider(name string) ProviderBuilder { } // BuildProvider reads provider configuration and instantiate one -func (r *registry) BuildProvider(bus bus.Bus, c *common.Config) (Provider, error) { +func (r *registry) BuildProvider(bus bus.Bus, c *common.Config, keystore keystore.Keystore) (Provider, error) { var config ProviderConfig err := c.Unpack(&config) if err != nil { @@ -86,5 +87,5 @@ func (r *registry) BuildProvider(bus bus.Bus, c *common.Config) (Provider, error return nil, err } - return builder(bus, uuid, c) + return builder(bus, uuid, c, keystore) } diff --git a/libbeat/autodiscover/providers/docker/docker.go b/libbeat/autodiscover/providers/docker/docker.go index 42a551ebf72..9bfa13000b1 100644 --- a/libbeat/autodiscover/providers/docker/docker.go +++ b/libbeat/autodiscover/providers/docker/docker.go @@ -33,6 +33,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common/bus" "github.com/elastic/beats/v7/libbeat/common/docker" "github.com/elastic/beats/v7/libbeat/common/safemapstr" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -55,10 +56,11 @@ type Provider struct { stoppers map[string]*time.Timer stopTrigger chan *dockerContainerMetadata logger *logp.Logger + keystore keystore.Keystore } // AutodiscoverBuilder builds and returns an autodiscover provider -func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodiscover.Provider, error) { +func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config, keystore keystore.Keystore) (autodiscover.Provider, error) { logger := logp.NewLogger("docker") errWrap := func(err error) error { @@ -115,6 +117,7 @@ func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodis stoppers: make(map[string]*time.Timer), stopTrigger: make(chan *dockerContainerMetadata), logger: logger, + keystore: keystore, }, nil } @@ -303,6 +306,8 @@ func (d *Provider) emitContainer(container *docker.Container, meta *dockerMetada } func (d *Provider) publish(event bus.Event) { + // attach keystore to the event to be consumed by the static configs + event["keystore"] = d.keystore // Try to match a config if config := d.templates.GetConfig(event); config != nil { event["config"] = config diff --git a/libbeat/autodiscover/providers/docker/docker_integration_test.go b/libbeat/autodiscover/providers/docker/docker_integration_test.go index b8afbafbb62..0e10af438ff 100644 --- a/libbeat/autodiscover/providers/docker/docker_integration_test.go +++ b/libbeat/autodiscover/providers/docker/docker_integration_test.go @@ -23,14 +23,14 @@ import ( "testing" "time" - "github.com/elastic/beats/v7/libbeat/autodiscover/template" - "github.com/elastic/beats/v7/libbeat/logp" - "github.com/gofrs/uuid" "github.com/stretchr/testify/assert" + "github.com/elastic/beats/v7/libbeat/autodiscover/template" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" + "github.com/elastic/beats/v7/libbeat/keystore" + "github.com/elastic/beats/v7/libbeat/logp" dk "github.com/elastic/beats/v7/libbeat/tests/docker" ) @@ -53,7 +53,8 @@ func TestDockerStart(t *testing.T) { s := &template.MapperSettings{nil, nil} config.Templates = *s - provider, err := AutodiscoverBuilder(bus, UUID, common.MustNewConfigFrom(config)) + k, _ := keystore.NewFileKeystore("test") + provider, err := AutodiscoverBuilder(bus, UUID, common.MustNewConfigFrom(config), k) if err != nil { t.Fatal(err) } diff --git a/libbeat/autodiscover/providers/jolokia/jolokia.go b/libbeat/autodiscover/providers/jolokia/jolokia.go index b370d747b89..4a18ffffec9 100644 --- a/libbeat/autodiscover/providers/jolokia/jolokia.go +++ b/libbeat/autodiscover/providers/jolokia/jolokia.go @@ -27,6 +27,7 @@ import ( "github.com/elastic/beats/v7/libbeat/autodiscover/template" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" + "github.com/elastic/beats/v7/libbeat/keystore" ) func init() { @@ -48,11 +49,12 @@ type Provider struct { appenders autodiscover.Appenders templates template.Mapper discovery DiscoveryProber + keystore keystore.Keystore } // AutodiscoverBuilder builds a Jolokia Discovery autodiscover provider, it fails if // there is some problem with the configuration -func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodiscover.Provider, error) { +func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config, keystore keystore.Keystore) (autodiscover.Provider, error) { errWrap := func(err error) error { return errors.Wrap(err, "error setting up jolokia autodiscover provider") } @@ -92,6 +94,7 @@ func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodis builders: builders, appenders: appenders, discovery: discovery, + keystore: keystore, }, nil } @@ -106,6 +109,8 @@ func (p *Provider) Start() { } func (p *Provider) publish(event bus.Event) { + // attach keystore to the event to be consumed by the static configs + event["keystore"] = p.keystore if config := p.templates.GetConfig(event); config != nil { event["config"] = config } else if config := p.builders.GetConfig(event); config != nil { diff --git a/libbeat/autodiscover/providers/kubernetes/kubernetes.go b/libbeat/autodiscover/providers/kubernetes/kubernetes.go index ec3480fa00b..4a4a4566f8e 100644 --- a/libbeat/autodiscover/providers/kubernetes/kubernetes.go +++ b/libbeat/autodiscover/providers/kubernetes/kubernetes.go @@ -30,6 +30,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" "github.com/elastic/beats/v7/libbeat/common/kubernetes" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -54,10 +55,11 @@ type Provider struct { appenders autodiscover.Appenders logger *logp.Logger eventer Eventer + keystore keystore.Keystore } // AutodiscoverBuilder builds and returns an autodiscover provider -func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodiscover.Provider, error) { +func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config, keystore keystore.Keystore) (autodiscover.Provider, error) { logger := logp.NewLogger("autodiscover") errWrap := func(err error) error { @@ -97,6 +99,7 @@ func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodis builders: builders, appenders: appenders, logger: logger, + keystore: keystore, } switch config.Resource { @@ -135,6 +138,8 @@ func (p *Provider) String() string { } func (p *Provider) publish(event bus.Event) { + // attach keystore to the event to be consumed by the static configs + event["keystore"] = p.keystore // Try to match a config if config := p.templates.GetConfig(event); config != nil { event["config"] = config diff --git a/libbeat/autodiscover/providers/kubernetes/node_test.go b/libbeat/autodiscover/providers/kubernetes/node_test.go index 0685adfe1bd..f2fbe78dba6 100644 --- a/libbeat/autodiscover/providers/kubernetes/node_test.go +++ b/libbeat/autodiscover/providers/kubernetes/node_test.go @@ -33,6 +33,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" "github.com/elastic/beats/v7/libbeat/common/kubernetes" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -112,6 +113,7 @@ func TestGenerateHints_Node(t *testing.T) { } func TestEmitEvent_Node(t *testing.T) { + k, _ := keystore.NewFileKeystore("test") name := "metricbeat" nodeIP := "192.168.0.1" uid := "005f3b90-4b9d-12f8-acf0-31020a840133" @@ -160,6 +162,7 @@ func TestEmitEvent_Node(t *testing.T) { "host": "192.168.0.1", "id": uid, "provider": UUID, + "keystore": k, "kubernetes": common.MapStr{ "node": common.MapStr{ "name": "metricbeat", @@ -219,6 +222,7 @@ func TestEmitEvent_Node(t *testing.T) { "host": "", "id": uid, "provider": UUID, + "keystore": k, "kubernetes": common.MapStr{ "node": common.MapStr{ "name": "metricbeat", @@ -252,6 +256,7 @@ func TestEmitEvent_Node(t *testing.T) { bus: bus.New(logp.NewLogger("bus"), "test"), templates: mapper, logger: logp.NewLogger("kubernetes"), + keystore: k, } no := &node{ diff --git a/libbeat/autodiscover/providers/kubernetes/pod_test.go b/libbeat/autodiscover/providers/kubernetes/pod_test.go index f63dbdf1e73..05b50987b2e 100644 --- a/libbeat/autodiscover/providers/kubernetes/pod_test.go +++ b/libbeat/autodiscover/providers/kubernetes/pod_test.go @@ -33,6 +33,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" "github.com/elastic/beats/v7/libbeat/common/kubernetes" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -332,6 +333,7 @@ func TestGenerateHints(t *testing.T) { } func TestEmitEvent(t *testing.T) { + k, _ := keystore.NewFileKeystore("test") name := "filebeat" namespace := "default" podIP := "127.0.0.1" @@ -395,6 +397,7 @@ func TestEmitEvent(t *testing.T) { "host": "127.0.0.1", "id": cid, "provider": UUID, + "keystore": k, "kubernetes": common.MapStr{ "container": common.MapStr{ "id": "foobar", @@ -527,6 +530,7 @@ func TestEmitEvent(t *testing.T) { "host": "", "id": cid, "provider": UUID, + "keystore": k, "kubernetes": common.MapStr{ "container": common.MapStr{ "id": "", @@ -596,6 +600,7 @@ func TestEmitEvent(t *testing.T) { "host": "127.0.0.1", "id": cid, "provider": UUID, + "keystore": k, "kubernetes": common.MapStr{ "container": common.MapStr{ "id": "", @@ -645,6 +650,7 @@ func TestEmitEvent(t *testing.T) { bus: bus.New(logp.NewLogger("bus"), "test"), templates: mapper, logger: logp.NewLogger("kubernetes"), + keystore: k, } pod := &pod{ diff --git a/libbeat/autodiscover/providers/kubernetes/service_test.go b/libbeat/autodiscover/providers/kubernetes/service_test.go index 6d6582b3ff2..0e3c8ddb0a8 100644 --- a/libbeat/autodiscover/providers/kubernetes/service_test.go +++ b/libbeat/autodiscover/providers/kubernetes/service_test.go @@ -33,6 +33,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" "github.com/elastic/beats/v7/libbeat/common/kubernetes" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -233,6 +234,7 @@ func TestGenerateHints_Service(t *testing.T) { } func TestEmitEvent_Service(t *testing.T) { + k, _ := keystore.NewFileKeystore("test") name := "metricbeat" namespace := "default" clusterIP := "192.168.0.1" @@ -280,6 +282,7 @@ func TestEmitEvent_Service(t *testing.T) { "host": "192.168.0.1", "id": uid, "provider": UUID, + "keystore": k, "port": 8080, "kubernetes": common.MapStr{ "service": common.MapStr{ @@ -369,6 +372,7 @@ func TestEmitEvent_Service(t *testing.T) { "id": uid, "port": 8080, "provider": UUID, + "keystore": k, "kubernetes": common.MapStr{ "service": common.MapStr{ "name": "metricbeat", @@ -405,6 +409,7 @@ func TestEmitEvent_Service(t *testing.T) { bus: bus.New(logp.NewLogger("bus"), "test"), templates: mapper, logger: logp.NewLogger("kubernetes"), + keystore: k, } service := &service{ diff --git a/libbeat/autodiscover/template/config.go b/libbeat/autodiscover/template/config.go index 151f76dde0f..0ce05526ecb 100644 --- a/libbeat/autodiscover/template/config.go +++ b/libbeat/autodiscover/template/config.go @@ -21,6 +21,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" "github.com/elastic/beats/v7/libbeat/conditions" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/go-ucfg" ) @@ -81,7 +82,7 @@ func (c Mapper) GetConfig(event bus.Event) []*common.Config { continue } - configs := ApplyConfigTemplate(event, mapping.Configs) + configs := ApplyConfigTemplate(event, mapping.Configs, true) if configs != nil { result = append(result, configs...) } @@ -90,7 +91,7 @@ func (c Mapper) GetConfig(event bus.Event) []*common.Config { } // ApplyConfigTemplate takes a set of templated configs and applys information in an event map -func ApplyConfigTemplate(event bus.Event, configs []*common.Config) []*common.Config { +func ApplyConfigTemplate(event bus.Event, configs []*common.Config, keystoreEnabled bool) []*common.Config { var result []*common.Config // unpack input vars, err := ucfg.NewFrom(map[string]interface{}{ @@ -105,6 +106,19 @@ func ApplyConfigTemplate(event bus.Event, configs []*common.Config) []*common.Co ucfg.ResolveEnv, ucfg.VarExp, } + + if keystoreEnabled { + if val, ok := event["keystore"]; ok { + eventKeystore := val.(keystore.Keystore) + opts = append(opts, ucfg.Resolve(keystore.ResolverWrap(eventKeystore))) + delete(event, "keystore") + } + } else { + if _, ok := event["keystore"]; ok { + delete(event, "keystore") + } + } + for _, config := range configs { c, err := ucfg.NewFrom(config, opts...) if err != nil { diff --git a/libbeat/autodiscover/template/config_test.go b/libbeat/autodiscover/template/config_test.go index 570de15a840..ccb27a7127a 100644 --- a/libbeat/autodiscover/template/config_test.go +++ b/libbeat/autodiscover/template/config_test.go @@ -18,12 +18,16 @@ package template import ( + "os" + "path/filepath" "testing" + "github.com/docker/docker/pkg/ioutils" "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" + "github.com/elastic/beats/v7/libbeat/keystore" ) func TestConfigsMapping(t *testing.T) { @@ -93,6 +97,61 @@ func TestConfigsMapping(t *testing.T) { } } +func TestConfigsMappingKeystore(t *testing.T) { + secret := "mapping_secret" + //expected config + config, _ := common.NewConfigFrom(map[string]interface{}{ + "correct": "config", + "password": secret, + }) + + path := getTemporaryKeystoreFile() + defer os.Remove(path) + // store the secret + keystore := createAnExistingKeystore(path, secret) + + tests := []struct { + mapping string + event bus.Event + expected []*common.Config + }{ + // Match config + { + mapping: ` +- condition.equals: + foo: 3 + config: + - correct: config + password: "${PASSWORD}"`, + event: bus.Event{ + "foo": 3, + "keystore": keystore, + }, + expected: []*common.Config{config}, + }, + } + + for _, test := range tests { + var mappings MapperSettings + config, err := common.NewConfigWithYAML([]byte(test.mapping), "") + if err != nil { + t.Fatal(err) + } + + if err := config.Unpack(&mappings); err != nil { + t.Fatal(err) + } + + mapper, err := NewConfigMapper(mappings) + if err != nil { + t.Fatal(err) + } + + res := mapper.GetConfig(test.event) + assert.Equal(t, test.expected, res) + } +} + func TestNilConditionConfig(t *testing.T) { var mappings MapperSettings data := ` @@ -111,3 +170,31 @@ func TestNilConditionConfig(t *testing.T) { assert.NoError(t, err) assert.Nil(t, mappings[0].ConditionConfig) } + +// create a keystore with an existing key +/// `PASSWORD` with the value of `secret` variable. +func createAnExistingKeystore(path string, secret string) keystore.Keystore { + keyStore, err := keystore.NewFileKeystore(path) + // Fail fast in the test suite + if err != nil { + panic(err) + } + + writableKeystore, err := keystore.AsWritableKeystore(keyStore) + if err != nil { + panic(err) + } + + writableKeystore.Store("PASSWORD", []byte(secret)) + writableKeystore.Save() + return keyStore +} + +// create a temporary file on disk to save the keystore. +func getTemporaryKeystoreFile() string { + path, err := ioutils.TempDir("", "testing") + if err != nil { + panic(err) + } + return filepath.Join(path, "keystore") +} diff --git a/libbeat/beat/beat.go b/libbeat/beat/beat.go index 3aaed4c6641..75585ba8992 100644 --- a/libbeat/beat/beat.go +++ b/libbeat/beat/beat.go @@ -19,6 +19,7 @@ package beat import ( "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/management" ) @@ -66,6 +67,8 @@ type Beat struct { Fields []byte // Data from fields.yml ConfigManager management.ConfigManager // config manager + + Keystore keystore.Keystore } // BeatConfig struct contains the basic configuration of every beat diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index a9e59173633..e2b4d7d6630 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -584,6 +584,7 @@ func (b *Beat) configure(settings Settings) error { } b.keystore = store + b.Beat.Keystore = store err = cloudid.OverwriteSettings(cfg) if err != nil { return err diff --git a/libbeat/common/bytes.go b/libbeat/common/bytes.go index a2a7aa390a5..6239b024634 100644 --- a/libbeat/common/bytes.go +++ b/libbeat/common/bytes.go @@ -20,8 +20,22 @@ package common import ( "bytes" "crypto/rand" + "encoding/binary" "errors" "fmt" + "io" + "unicode/utf16" + "unicode/utf8" +) + +const ( + // 0xd800-0xdc00 encodes the high 10 bits of a pair. + // 0xdc00-0xe000 encodes the low 10 bits of a pair. + // the value is those 20 bits plus 0x10000. + surr1 = 0xd800 + surr2 = 0xdc00 + surr3 = 0xe000 + replacementChar = '\uFFFD' // Unicode replacement character ) // Byte order utilities @@ -76,3 +90,46 @@ func RandomBytes(length int) ([]byte, error) { return r, nil } + +func UTF16ToUTF8Bytes(in []byte, out io.Writer) error { + if len(in)%2 != 0 { + return fmt.Errorf("input buffer must have an even length (length=%d)", len(in)) + } + + var runeBuf [4]byte + var v1, v2 uint16 + for i := 0; i < len(in); i += 2 { + v1 = uint16(in[i]) | uint16(in[i+1])<<8 + // Stop at null-terminator. + if v1 == 0 { + return nil + } + + switch { + case v1 < surr1, surr3 <= v1: + n := utf8.EncodeRune(runeBuf[:], rune(v1)) + out.Write(runeBuf[:n]) + case surr1 <= v1 && v1 < surr2 && len(in) > i+2: + v2 = uint16(in[i+2]) | uint16(in[i+3])<<8 + if surr2 <= v2 && v2 < surr3 { + // valid surrogate sequence + r := utf16.DecodeRune(rune(v1), rune(v2)) + n := utf8.EncodeRune(runeBuf[:], r) + out.Write(runeBuf[:n]) + } + i += 2 + default: + // invalid surrogate sequence + n := utf8.EncodeRune(runeBuf[:], replacementChar) + out.Write(runeBuf[:n]) + } + } + return nil +} + +func StringToUTF16Bytes(in string) []byte { + var u16 []uint16 = utf16.Encode([]rune(in)) + buf := &bytes.Buffer{} + binary.Write(buf, binary.LittleEndian, u16) + return buf.Bytes() +} diff --git a/libbeat/common/bytes_test.go b/libbeat/common/bytes_test.go index d1d41c0f22f..b3738f510c0 100644 --- a/libbeat/common/bytes_test.go +++ b/libbeat/common/bytes_test.go @@ -21,8 +21,10 @@ package common import ( "bytes" + "encoding/binary" "errors" "testing" + "unicode/utf16" "github.com/stretchr/testify/assert" ) @@ -256,3 +258,38 @@ func TestRandomBytes(t *testing.T) { // unlikely to get 2 times the same results assert.False(t, bytes.Equal(v1, v2)) } + +func TestUTF16ToUTF8(t *testing.T) { + input := "abc白鵬翔\u145A6" + buf := &bytes.Buffer{} + binary.Write(buf, binary.LittleEndian, utf16.Encode([]rune(input))) + outputBuf := &bytes.Buffer{} + err := UTF16ToUTF8Bytes(buf.Bytes(), outputBuf) + assert.NoError(t, err) + assert.Equal(t, []byte(input), outputBuf.Bytes()) +} + +func TestUTF16BytesToStringTrimNullTerm(t *testing.T) { + input := "abc" + utf16Bytes := append(StringToUTF16Bytes(input), []byte{0, 0, 0, 0, 0, 0}...) + + outputBuf := &bytes.Buffer{} + err := UTF16ToUTF8Bytes(utf16Bytes, outputBuf) + if err != nil { + t.Fatal(err) + } + b := outputBuf.Bytes() + assert.Len(t, b, 3) + assert.Equal(t, input, string(b)) +} + +func BenchmarkUTF16ToUTF8(b *testing.B) { + utf16Bytes := StringToUTF16Bytes("A logon was attempted using explicit credentials.") + outputBuf := &bytes.Buffer{} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + UTF16ToUTF8Bytes(utf16Bytes, outputBuf) + outputBuf.Reset() + } +} diff --git a/libbeat/common/seccomp/policy_linux_386.go b/libbeat/common/seccomp/policy_linux_386.go index 76b24714cac..acbc69ddd1f 100644 --- a/libbeat/common/seccomp/policy_linux_386.go +++ b/libbeat/common/seccomp/policy_linux_386.go @@ -32,6 +32,7 @@ func init() { "access", "brk", "chmod", + "chown", "clock_gettime", "clone", "close", diff --git a/libbeat/common/seccomp/policy_linux_amd64.go b/libbeat/common/seccomp/policy_linux_amd64.go index 92b5fbe488a..bf1e4bc31c5 100644 --- a/libbeat/common/seccomp/policy_linux_amd64.go +++ b/libbeat/common/seccomp/policy_linux_amd64.go @@ -35,6 +35,7 @@ func init() { "bind", "brk", "chmod", + "chown", "clock_gettime", "clone", "close", diff --git a/libbeat/common/transport/kerberos/client.go b/libbeat/common/transport/kerberos/client.go new file mode 100644 index 00000000000..1cbfb0a4338 --- /dev/null +++ b/libbeat/common/transport/kerberos/client.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package kerberos + +import ( + "fmt" + "net/http" + + krbclient "gopkg.in/jcmturner/gokrb5.v7/client" + krbconfig "gopkg.in/jcmturner/gokrb5.v7/config" + "gopkg.in/jcmturner/gokrb5.v7/keytab" + "gopkg.in/jcmturner/gokrb5.v7/spnego" +) + +type Client struct { + spClient *spnego.Client +} + +func NewClient(config *Config, httpClient *http.Client, esurl string) (*Client, error) { + var krbClient *krbclient.Client + krbConf, err := krbconfig.Load(config.ConfigPath) + if err != nil { + return nil, fmt.Errorf("error creating Kerberos client: %+v", err) + } + + switch config.AuthType { + case authKeytab: + kTab, err := keytab.Load(config.KeyTabPath) + if err != nil { + return nil, fmt.Errorf("cannot load keytab file %s: %+v", config.KeyTabPath, err) + } + krbClient = krbclient.NewClientWithKeytab(config.Username, config.Realm, kTab, krbConf) + case authPassword: + krbClient = krbclient.NewClientWithPassword(config.Username, config.Realm, config.Password, krbConf) + default: + return nil, InvalidAuthType + } + + return &Client{ + spClient: spnego.NewClient(krbClient, httpClient, ""), + }, nil +} + +func (c *Client) Do(req *http.Request) (*http.Response, error) { + return c.spClient.Do(req) +} + +func (c *Client) CloseIdleConnections() { + c.spClient.CloseIdleConnections() +} diff --git a/libbeat/common/transport/kerberos/config.go b/libbeat/common/transport/kerberos/config.go index fe2450fa639..42b779485fe 100644 --- a/libbeat/common/transport/kerberos/config.go +++ b/libbeat/common/transport/kerberos/config.go @@ -17,33 +17,44 @@ package kerberos -import "fmt" +import ( + "errors" + "fmt" +) type AuthType uint const ( - AUTH_PASSWORD = 1 - AUTH_KEYTAB = 2 + authPassword = 1 + authKeytab = 2 - authPassword = "password" - authKeytabStr = "keytab" + authPasswordStr = "password" + authKeytabStr = "keytab" ) var ( + InvalidAuthType = errors.New("invalid authentication type") + authTypes = map[string]AuthType{ - authPassword: AUTH_PASSWORD, - authKeytabStr: AUTH_KEYTAB, + authPasswordStr: authPassword, + authKeytabStr: authKeytab, } ) type Config struct { + Enabled *bool `config:"enabled" yaml:"enabled,omitempty"` AuthType AuthType `config:"auth_type" validate:"required"` KeyTabPath string `config:"keytab"` - ConfigPath string `config:"config_path"` + ConfigPath string `config:"config_path" validate:"required"` ServiceName string `config:"service_name"` Username string `config:"username"` Password string `config:"password"` - Realm string `config:"realm"` + Realm string `config:"realm" validate:"required"` +} + +// IsEnabled returns true if the `enable` field is set to true in the yaml. +func (c *Config) IsEnabled() bool { + return c != nil && (c.Enabled == nil || *c.Enabled) } // Unpack validates and unpack "auth_type" config option @@ -59,19 +70,21 @@ func (t *AuthType) Unpack(value string) error { } func (c *Config) Validate() error { - if c.AuthType == AUTH_PASSWORD { + switch c.AuthType { + case authPassword: if c.Username == "" { return fmt.Errorf("password authentication is selected for Kerberos, but username is not configured") } if c.Password == "" { return fmt.Errorf("password authentication is selected for Kerberos, but password is not configured") } - } - if c.AuthType == AUTH_KEYTAB { + case authKeytab: if c.KeyTabPath == "" { return fmt.Errorf("keytab authentication is selected for Kerberos, but path to keytab is not configured") } + default: + return InvalidAuthType } return nil diff --git a/libbeat/common/transport/tlscommon/types.go b/libbeat/common/transport/tlscommon/types.go index 3fb96712b16..93cdf95464e 100644 --- a/libbeat/common/transport/tlscommon/types.go +++ b/libbeat/common/transport/tlscommon/types.go @@ -65,6 +65,10 @@ var tlsCipherSuites = map[string]tlsCipherSuite{ "RSA-AES-128-GCM-SHA256": tlsCipherSuite(tls.TLS_RSA_WITH_AES_128_GCM_SHA256), "RSA-AES-256-CBC-SHA": tlsCipherSuite(tls.TLS_RSA_WITH_AES_256_CBC_SHA), "RSA-AES-256-GCM-SHA384": tlsCipherSuite(tls.TLS_RSA_WITH_AES_256_GCM_SHA384), + + "TLS-AES-128-GCM-SHA256": tlsCipherSuite(tls.TLS_AES_128_GCM_SHA256), + "TLS-AES-256-GCM-SHA384": tlsCipherSuite(tls.TLS_AES_256_GCM_SHA384), + "TLS-CHACHA20-POLY1305-SHA256": tlsCipherSuite(tls.TLS_CHACHA20_POLY1305_SHA256), } var tlsCipherSuitesInverse = make(map[tlsCipherSuite]string, len(tlsCipherSuites)) diff --git a/libbeat/common/transport/tlscommon/versions.go b/libbeat/common/transport/tlscommon/versions.go index 3ab3dd5a8f0..a589f0af3cd 100644 --- a/libbeat/common/transport/tlscommon/versions.go +++ b/libbeat/common/transport/tlscommon/versions.go @@ -23,12 +23,20 @@ import "fmt" type TLSVersion uint16 func (v TLSVersion) String() string { - if s, ok := tlsProtocolVersionsInverse[v]; ok { - return s + if details := v.Details(); details != nil { + return details.Combined } return "unknown" } +// Details returns a a ProtocolAndVersions struct containing detailed version metadata. +func (v TLSVersion) Details() *TLSVersionDetails { + if found, ok := tlsInverseLookup[v]; ok { + return &found + } + return nil +} + //Unpack transforms the string into a constant. func (v *TLSVersion) Unpack(s string) error { version, found := tlsProtocolVersions[s] diff --git a/libbeat/common/transport/tlscommon/versions_default.go b/libbeat/common/transport/tlscommon/versions_default.go index 057c5c59cd4..77eff7375eb 100644 --- a/libbeat/common/transport/tlscommon/versions_default.go +++ b/libbeat/common/transport/tlscommon/versions_default.go @@ -19,7 +19,9 @@ package tlscommon -import "crypto/tls" +import ( + "crypto/tls" +) // Define all the possible TLS version. const ( @@ -61,10 +63,22 @@ var tlsProtocolVersions = map[string]TLSVersion{ "TLSv1.3": TLSVersion13, } -var tlsProtocolVersionsInverse = map[TLSVersion]string{ - TLSVersionSSL30: "SSLv3", - TLSVersion10: "TLSv1.0", - TLSVersion11: "TLSv1.1", - TLSVersion12: "TLSv1.2", - TLSVersion13: "TLSv1.3", +// Intended for ECS's tls.version_protocol_field, which does not include +// numeric version and should be lower case +type TLSVersionDetails struct { + Version string + Protocol string + Combined string +} + +func (pv TLSVersionDetails) String() string { + return pv.Combined +} + +var tlsInverseLookup = map[TLSVersion]TLSVersionDetails{ + TLSVersionSSL30: TLSVersionDetails{Version: "3.0", Protocol: "ssl", Combined: "SSLv3"}, + TLSVersion10: TLSVersionDetails{Version: "1.0", Protocol: "tls", Combined: "TLSv1.0"}, + TLSVersion11: TLSVersionDetails{Version: "1.1", Protocol: "tls", Combined: "TLSv1.1"}, + TLSVersion12: TLSVersionDetails{Version: "1.2", Protocol: "tls", Combined: "TLSv1.2"}, + TLSVersion13: TLSVersionDetails{Version: "1.3", Protocol: "tls", Combined: "TLSv1.3"}, } diff --git a/libbeat/common/transport/tlscommon/versions_test.go b/libbeat/common/transport/tlscommon/versions_test.go new file mode 100644 index 00000000000..b1251109b05 --- /dev/null +++ b/libbeat/common/transport/tlscommon/versions_test.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package tlscommon + +import ( + "crypto/tls" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTLSVersion(t *testing.T) { + // These tests are a bit verbose, but given the sensitivity to changes here, it's not a bad idea. + tests := []struct { + name string + v uint16 + want *TLSVersionDetails + }{ + { + "unknown", + 0x0, + nil, + }, + { + "SSLv3", + tls.VersionSSL30, + &TLSVersionDetails{Version: "3.0", Protocol: "ssl", Combined: "SSLv3"}, + }, + { + "TLSv1.0", + tls.VersionTLS10, + &TLSVersionDetails{Version: "1.0", Protocol: "tls", Combined: "TLSv1.0"}, + }, + { + "TLSv1.1", + tls.VersionTLS11, + &TLSVersionDetails{Version: "1.1", Protocol: "tls", Combined: "TLSv1.1"}, + }, + { + "TLSv1.2", + tls.VersionTLS12, + &TLSVersionDetails{Version: "1.2", Protocol: "tls", Combined: "TLSv1.2"}, + }, + { + "TLSv1.3", + tls.VersionTLS13, + &TLSVersionDetails{Version: "1.3", Protocol: "tls", Combined: "TLSv1.3"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tv := TLSVersion(tt.v) + require.Equal(t, tt.want, tv.Details()) + if tt.want == nil { + require.Equal(t, tt.want, tv.Details()) + require.Equal(t, tt.name, "unknown") + } else { + require.Equal(t, tt.name, tv.String()) + } + }) + } +} diff --git a/libbeat/dashboards/kibana_loader.go b/libbeat/dashboards/kibana_loader.go index 93dd0e5dc0e..1733f94750c 100644 --- a/libbeat/dashboards/kibana_loader.go +++ b/libbeat/dashboards/kibana_loader.go @@ -25,6 +25,9 @@ import ( "net/url" "time" + "github.com/joeshaw/multierror" + "github.com/pkg/errors" + "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/kibana" "github.com/elastic/beats/v7/libbeat/logp" @@ -101,11 +104,18 @@ func (loader KibanaLoader) ImportIndexFile(file string) error { // ImportIndex imports the passed index pattern to Kibana func (loader KibanaLoader) ImportIndex(pattern common.MapStr) error { + var errs multierror.Errors + params := url.Values{} params.Set("force", "true") //overwrite the existing dashboards - indexContent := ReplaceIndexInIndexPattern(loader.config.Index, pattern) - return loader.client.ImportJSON(importAPI, params, indexContent) + if err := ReplaceIndexInIndexPattern(loader.config.Index, pattern); err != nil { + errs = append(errs, errors.Wrapf(err, "error setting index '%s' in index pattern", loader.config.Index)) + } + if err := loader.client.ImportJSON(importAPI, params, pattern); err != nil { + errs = append(errs, errors.Wrap(err, "error loading index pattern")) + } + return errs.Err() } // ImportDashboard imports the dashboard file diff --git a/libbeat/dashboards/modify_json.go b/libbeat/dashboards/modify_json.go index c8b1c79da6b..2e0c48e38b0 100644 --- a/libbeat/dashboards/modify_json.go +++ b/libbeat/dashboards/modify_json.go @@ -22,6 +22,8 @@ import ( "encoding/json" "fmt" + "github.com/pkg/errors" + "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -41,32 +43,50 @@ type JSONFormat struct { Objects []JSONObject `json:"objects"` } -func ReplaceIndexInIndexPattern(index string, content common.MapStr) common.MapStr { +func ReplaceIndexInIndexPattern(index string, content common.MapStr) (err error) { if index == "" { - return content + return nil } - objects, ok := content["objects"].([]interface{}) + list, ok := content["objects"] if !ok { - return content + return errors.New("empty index pattern") } - // change index pattern name - for i, object := range objects { - objectMap, ok := object.(map[string]interface{}) - if !ok { - continue + updateObject := func(obj common.MapStr) { + // This uses Put instead of DeepUpdate to avoid modifying types for + // inner objects. (DeepUpdate will replace maps with MapStr). + obj.Put("id", index) + // Only overwrite title if it exists. + if _, err := obj.GetValue("attributes.title"); err == nil { + obj.Put("attributes.title", index) } + } - objectMap["id"] = index - if attributes, ok := objectMap["attributes"].(map[string]interface{}); ok { - attributes["title"] = index + switch v := list.(type) { + case []interface{}: + for _, objIf := range v { + switch obj := objIf.(type) { + case common.MapStr: + updateObject(obj) + case map[string]interface{}: + updateObject(obj) + default: + return errors.Errorf("index pattern object has unexpected type %T", v) + } } - objects[i] = objectMap + case []map[string]interface{}: + for _, obj := range v { + updateObject(obj) + } + case []common.MapStr: + for _, obj := range v { + updateObject(obj) + } + default: + return errors.Errorf("index pattern objects have unexpected type %T", v) } - content["objects"] = objects - - return content + return nil } func replaceIndexInSearchObject(index string, savedObject string) (string, error) { diff --git a/libbeat/dashboards/modify_json_test.go b/libbeat/dashboards/modify_json_test.go index 08fedac4df3..a7424414b53 100644 --- a/libbeat/dashboards/modify_json_test.go +++ b/libbeat/dashboards/modify_json_test.go @@ -111,3 +111,137 @@ func TestReplaceIndexInDashboardObject(t *testing.T) { assert.Equal(t, test.expected, result) } } + +func TestReplaceIndexInIndexPattern(t *testing.T) { + // Test that replacing of index name in index pattern works no matter + // what the inner types are (MapStr, map[string]interface{} or interface{}). + // Also ensures that the inner types are not modified after replacement. + tests := []struct { + title string + input common.MapStr + index string + expected common.MapStr + }{ + { + title: "Replace in []interface(map).map", + input: common.MapStr{"objects": []interface{}{map[string]interface{}{ + "id": "phonybeat-*", + "type": "index-pattern", + "attributes": map[string]interface{}{ + "title": "phonybeat-*", + "timeFieldName": "@timestamp", + }}}}, + index: "otherindex-*", + expected: common.MapStr{"objects": []interface{}{map[string]interface{}{ + "id": "otherindex-*", + "type": "index-pattern", + "attributes": map[string]interface{}{ + "title": "otherindex-*", + "timeFieldName": "@timestamp", + }}}}, + }, + { + title: "Replace in []interface(map).mapstr", + input: common.MapStr{"objects": []interface{}{map[string]interface{}{ + "id": "phonybeat-*", + "type": "index-pattern", + "attributes": common.MapStr{ + "title": "phonybeat-*", + "timeFieldName": "@timestamp", + }}}}, + index: "otherindex-*", + expected: common.MapStr{"objects": []interface{}{map[string]interface{}{ + "id": "otherindex-*", + "type": "index-pattern", + "attributes": common.MapStr{ + "title": "otherindex-*", + "timeFieldName": "@timestamp", + }}}}, + }, + { + title: "Replace in []map.mapstr", + input: common.MapStr{"objects": []map[string]interface{}{{ + "id": "phonybeat-*", + "type": "index-pattern", + "attributes": common.MapStr{ + "title": "phonybeat-*", + "timeFieldName": "@timestamp", + }}}}, + index: "otherindex-*", + expected: common.MapStr{"objects": []map[string]interface{}{{ + "id": "otherindex-*", + "type": "index-pattern", + "attributes": common.MapStr{ + "title": "otherindex-*", + "timeFieldName": "@timestamp", + }}}}, + }, + { + title: "Replace in []mapstr.mapstr", + input: common.MapStr{"objects": []common.MapStr{{ + "id": "phonybeat-*", + "type": "index-pattern", + "attributes": common.MapStr{ + "title": "phonybeat-*", + "timeFieldName": "@timestamp", + }}}}, + index: "otherindex-*", + expected: common.MapStr{"objects": []common.MapStr{{ + "id": "otherindex-*", + "type": "index-pattern", + "attributes": common.MapStr{ + "title": "otherindex-*", + "timeFieldName": "@timestamp", + }}}}, + }, + { + title: "Replace in []mapstr.interface(mapstr)", + input: common.MapStr{"objects": []common.MapStr{{ + "id": "phonybeat-*", + "type": "index-pattern", + "attributes": interface{}(common.MapStr{ + "title": "phonybeat-*", + "timeFieldName": "@timestamp", + })}}}, + index: "otherindex-*", + expected: common.MapStr{"objects": []common.MapStr{{ + "id": "otherindex-*", + "type": "index-pattern", + "attributes": interface{}(common.MapStr{ + "title": "otherindex-*", + "timeFieldName": "@timestamp", + })}}}, + }, + { + title: "Do not create missing attributes", + input: common.MapStr{"objects": []common.MapStr{{ + "id": "phonybeat-*", + "type": "index-pattern", + }}}, + index: "otherindex-*", + expected: common.MapStr{"objects": []common.MapStr{{ + "id": "otherindex-*", + "type": "index-pattern", + }}}, + }, + { + title: "Create missing id", + input: common.MapStr{"objects": []common.MapStr{{ + "type": "index-pattern", + }}}, + index: "otherindex-*", + expected: common.MapStr{"objects": []common.MapStr{{ + "id": "otherindex-*", + "type": "index-pattern", + }}}, + }, + } + + for _, test := range tests { + t.Run(test.title, func(t *testing.T) { + err := ReplaceIndexInIndexPattern(test.index, test.input) + assert.NoError(t, err) + assert.Equal(t, test.expected, test.input) + }) + } +} diff --git a/libbeat/docker-compose.yml b/libbeat/docker-compose.yml index 267cc441a15..e2c759cda81 100644 --- a/libbeat/docker-compose.yml +++ b/libbeat/docker-compose.yml @@ -5,6 +5,8 @@ services: depends_on: - proxy_dep environment: + - LIBBEAT_PATH=/go/src/github.com/elastic/beats/libbeat + - BEAT_STRICT_PERMS=false - REDIS_HOST=redis - REDIS_PORT=6379 - SREDIS_HOST=sredis @@ -12,18 +14,22 @@ services: - LS_HOST=logstash - LS_TCP_PORT=5044 - LS_TLS_PORT=5055 - # Setup work environment - - LIBBEAT_PATH=/go/src/github.com/elastic/beats/libbeat - KAFKA_HOST=kafka - KAFKA_PORT=9092 - KIBANA_HOST=kibana - KIBANA_PORT=5601 + - KIBANA_USER=beats + - KIBANA_PASS=testing + - ES_HOST=elasticsearch + - ES_PORT=9200 + - ES_USER=beats + - ES_PASS=testing - ES_MONITORING_HOST=elasticsearch_monitoring - ES_MONITORING_PORT=9200 - ES_HOST_SSL=elasticsearchssl - ES_PORT_SSL=9200 - env_file: - - ${PWD}/build/test.env + - ES_SUPERUSER_USER=admin + - ES_SUPERUSER_PASS=changeme volumes: - ${PWD}/..:/go/src/github.com/elastic/beats/ # Used for docker integration tests: @@ -86,7 +92,7 @@ services: - "xpack.security.authc.realms.file.file1.order=0" volumes: - ${ES_BEATS}/testing/environments/docker/elasticsearch/pki:/usr/share/elasticsearch/config/pki:ro - ports: + expose: - 9200 command: bash -c "bin/elasticsearch-users useradd admin -r superuser -p changeme | /usr/local/bin/docker-entrypoint.sh eswrapper" @@ -95,8 +101,6 @@ services: extends: file: ${ES_BEATS}/testing/environments/${TESTING_ENVIRONMENT}.yml service: logstash - env_file: - - ${PWD}/build/test.env depends_on: elasticsearch: condition: service_healthy @@ -115,8 +119,6 @@ services: environment: - REDIS_HOST=redis - REDIS_PORT=6379 - env_file: - - ${PWD}/build/test.env kafka: build: ${ES_BEATS}/testing/environments/docker/kafka diff --git a/libbeat/docs/release-notes/highlights/highlights-7.7.0.asciidoc b/libbeat/docs/release-notes/highlights/highlights-7.7.0.asciidoc index 51fe50c30a9..959bbea91f5 100644 --- a/libbeat/docs/release-notes/highlights/highlights-7.7.0.asciidoc +++ b/libbeat/docs/release-notes/highlights/highlights-7.7.0.asciidoc @@ -7,8 +7,8 @@ Each release of {beats} brings new features and product improvements. Following are the most notable features and enhancements in 7.7. -For a complete list of related highlights, see the -https://www.elastic.co/blog/elastic-observability-7-6-0-released[Observability 7.7 release blog]. +//For a complete list of related highlights, see the +//https://www.elastic.co/blog/elastic-observability-7-7-0-released[Observability 7.7 release blog]. For a list of bug fixes and other changes, see the {beats} <> and <>. @@ -18,9 +18,135 @@ For a list of bug fixes and other changes, see the {beats} // tag::notable-highlights[] -//[float] -//==== highlight +[float] +[role="xpack"] +==== Azure Kubernetes and container monitoring -//Description +We've enhanced the {metricbeat} Azure module with three new metricsets +for monitoring Microsoft Azure container services: +{metricbeat-ref}/metricbeat-metricset-azure-container_instance.html[`container_instance`], +{metricbeat-ref}/metricbeat-metricset-azure-container_registry.html[`container_registry`], and +{metricbeat-ref}/metricbeat-metricset-azure-container_service.html[`container_service`]. +These metricsets collect metrics from the following services: + +* Azure Kubernetes Service +* Azure Container Instances +* Azure Container Registry + +Each metricset comes with a dashboard that makes it easy to get started +monitoring Azure containers. + +[float] +[role="xpack"] +==== AWS VPCs, Lambdas, and DynamoDB monitoring + +In the {metricbeat} AWS module, we've added support for monitoring +mission-critical services in the Amazon VPC ecosystem: + +* The {metricbeat-ref}/metricbeat-metricset-aws-natgateway.html[`natgateway`] +metricset enables you to monitor NAT gateway services to gain a +better perspective on how web applications or services are performing. +* The {metricbeat-ref}/metricbeat-metricset-aws-natgateway.html[`transitgateway`] +metricset collects metrics sent to CloudWatch by VPC when requests are flowing +through the gateway.  +* The {metricbeat-ref}/metricbeat-metricset-aws-vpn.html[`vpn`] metricset +enables you to monitor VPN tunnels. VPN metric data is automatically sent to +CloudWatch as it becomes available. + +Also new in this release, the +{metricbeat-ref}/metricbeat-metricset-aws-lambda.html[`lambda`] metricset monitors +Lambda functions across multiple accounts and regions. The metricset collects +metrics such as total invocations, errors, duration, throttles, dead-letter queue +errors, and iterator age for stream-based invocations. You can use these metrics +to configure alerts to respond to events such as changes in performance and +error rates. + +We’ve also added the +{metricbeat-ref}/metricbeat-metricset-aws-dynamodb.html[`dynamodb`] metricset to +monitor AWS DynamoDB instances. This metricset collects metrics, such as request +latency, transaction conflicts, provisioned and consumed capacity, and many +others.   
 + +For Amazon Aurora users, we've enhanced the +{metricbeat-ref}/metricbeat-metricset-aws-rds.html[`rds`] metricset to collect +metrics about your Aurora instances. + +[float] +[role="xpack"] +==== Google Cloud Platform (GCP) Pub/Sub and Load Balancer monitoring + +We've enhanced the {metricbeat} Google Cloud Platform module with support +for monitoring additional services: + +* The {metricbeat-ref}/metricbeat-metricset-googlecloud-pubsub.html[`pubsub`] +metricset connects to the Stackdriver API and collects metrics for topics, +subscriptions, and snapshots used by a specified account.  +* The {metricbeat-ref}/metricbeat-metricset-googlecloud-loadbalancing.html[`loadbalancing`] +metricset captures load balancing performance metrics for HTTP(S), TCP, and UDP +applications. + +[float] +[role="xpack"] +==== Pivotal Cloud Foundry (PCF) monitoring + +We continue to expand coverage of container platforms by adding support for +Pivotal Cloud Foundry. + +The new {metricbeat} +{metricbeat-ref}/metricbeat-module-cloudfoundry.html[Cloudfoundry module] +connects to the Cloud Foundry API and pulls container, counter, and value +metrics from it. These metrics are stored in `cloudfoundry.container`, +`cloudfoundry.counter` and `cloudfoundry.value` metricsets. + +In {filebeat}, the new +{filebeat-ref}/filebeat-input-cloudfoundry.html[`cloudfoundry`] input collects +http access logs, container logs, and error logs from Cloud Foundry. + +To learn how to run {beats} on Cloud Foundry, see: + +* {metricbeat-ref}/running-on-cloudfoundry.html[Run {metricbeat} on Cloud Foundry] +* {filebeat-ref}/running-on-cloudfoundry.html[Run {filebeat} on Cloud Foundry] + +[float] +[role="xpack"] +==== IBM MQ monitoring + +Prior to this release, we offered support in {filebeat} for collecting and +parsing queue manager error logs from IBM MQ. + +In this release, we’ve added the missing piece: metrics. The new {metricbeat} +{metricbeat-ref}/metricbeat-module-ibmmq.html[IBM MQ module] pulls status +information for the Queue Manager, which is responsible for maintaining queues +and ensuring that messages in the queues reach their destination. + +[float] +[role="xpack"] +==== Redis Enterprise monitoring + +In addition to our existing Redis module, which focuses on the open source +version of the database, we’ve added the new {metricbeat} +{metricbeat-ref}/metricbeat-module-redisenterprise.html[Redis Enterprise] module +to monitor features such as nodes and proxies in a Redis cluster. + +[float] +[role="xpack"] +==== Istio monitoring + +For Istio users, we've introduced the {metricbeat} +{metricbeat-ref}/metricbeat-module-istio.html[Istio module] to +collect metrics about service traffic (in, out, and within a service mesh), +control-plane metrics for Istio Pilot, Galley, Mixer components, and much +more. + +[float] +==== ECS field improvements in {filebeat} + +The {ecs-ref}/index.html[Elastic Common Schema] (ECS) defines a common set of +fields to be used when storing event data in {es}. + +In 7.7, we've improved ECS field mappings in numerous {filebeat} modules, +making it easier for you to analyze, visualize, and correlate data across +events. For a list of affected modules, see the +{beats-ref}/release-notes.html[Release Notes] for 7.7.0. // end::notable-highlights[] diff --git a/libbeat/docs/shared-autodiscover.asciidoc b/libbeat/docs/shared-autodiscover.asciidoc index a3911a65490..22a72a9b52e 100644 --- a/libbeat/docs/shared-autodiscover.asciidoc +++ b/libbeat/docs/shared-autodiscover.asciidoc @@ -18,6 +18,7 @@ to set conditions that, when met, launch specific configurations. On start, {beatname_uc} will scan existing containers and launch the proper configs for them. Then it will watch for new start/stop events. This ensures you don't need to worry about state, but only define your desired configs. +ifdef::autodiscoverDocker[] [float] ===== Docker @@ -124,6 +125,10 @@ running configuration for a container, 60s by default. ======================================= endif::[] +endif::autodiscoverDocker[] + + +ifdef::autodiscoverKubernetes[] [float] ===== Kubernetes @@ -243,6 +248,7 @@ running configuration for a container, 60s by default. include::../../{beatname_lc}/docs/autodiscover-kubernetes-config.asciidoc[] +endif::autodiscoverKubernetes[] [float] ===== Manually Defining Ports with Kubernetes diff --git a/libbeat/docs/shared-kerberos-config.asciidoc b/libbeat/docs/shared-kerberos-config.asciidoc new file mode 100644 index 00000000000..7accd6f7df9 --- /dev/null +++ b/libbeat/docs/shared-kerberos-config.asciidoc @@ -0,0 +1,85 @@ +[[configuration-kerberos]] +== Configure Kerberos + +You can specify Kerberos options with any output or input that supports Kerberos, like {es} and Kafka. + +The following encryption types are supported: + +* aes128-cts-hmac-sha1-96 +* aes128-cts-hmac-sha256-128 +* aes256-cts-hmac-sha1-96 +* aes256-cts-hmac-sha384-192 +* des3-cbc-sha1-kd +* rc4-hmac + +Example output config with Kerberos password based authentication: + +[source,yaml] +---- +output.elasticsearch.hosts: ["http://my-elasticsearch.elastic.co:9200"] +output.elasticsearch.kerberos.auth_type: password +output.elasticsearch.kerberos.username: "elastic" +output.elasticsearch.kerberos.password: "changeme" +output.elasticsearch.kerberos.config_path: "/etc/krb5.conf" +output.elasticsearch.kerberos.realm: "ELASTIC.CO" +---- + +The service principal name for the Elasticsearch instance is contructed from these options. Based on this configuration +it is going to be `HTTP/my-elasticsearch.elastic.co@ELASTIC.CO`. + +[float] +=== Configuration options + +You can specify the following options in the `kerberos` section of the +{beatname_lc}.yml+ config file: + +[float] +==== `enabled` + +The `enabled` setting can be used to enable the kerberos configuration by setting +it to `false`. The default value is `true`. + +NOTE: Kerberos settings are disabled if either `enabled` is set to `false` or the +`kerberos` section is missing. + +[float] +==== `auth_type` + +There are two options to authenticate with Kerberos KDC: `password` and `keytab`. + +`password` expects the principal name and its password. When choosing `keytab`, you +have to specify a princial name and a path to a keytab. The keytab must contain +the keys of the selected principal. Otherwise, authentication will fail. + +[float] +==== `config_path` + +You need to set the path to the `krb5.conf`, so +{beatname_lc} can find the Kerberos KDC to +retrieve a ticket. + +[float] +==== `username` + +Name of the principal used to connect to the output. + +[float] +==== `password` + +If you configured `password` for `auth_type`, you have to provide a password +for the selected principal. + +[float] +==== `keytab` + +If you configured `keytab` for `auth_type`, you have to provide the path to the +keytab of the selected principal. + +[float] +==== `service_name` + +This option can only be configured for Kafka. It is the name of the Kafka service, usually `kafka`. + +[float] +==== `realm` + +Name of the realm where the output resides. + diff --git a/libbeat/esleg/eslegclient/config.go b/libbeat/esleg/eslegclient/config.go index 5c171a4eb2b..d9a299d68c7 100644 --- a/libbeat/esleg/eslegclient/config.go +++ b/libbeat/esleg/eslegclient/config.go @@ -22,6 +22,7 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/transport/kerberos" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" ) @@ -32,7 +33,8 @@ type config struct { Params map[string]string `config:"parameters"` Headers map[string]string `config:"headers"` - TLS *tlscommon.Config `config:"ssl"` + TLS *tlscommon.Config `config:"ssl"` + Kerberos *kerberos.Config `config:"kerberos"` ProxyURL string `config:"proxy_url"` ProxyDisable bool `config:"proxy_disable"` diff --git a/libbeat/esleg/eslegclient/connection.go b/libbeat/esleg/eslegclient/connection.go index b591307c444..7001d2e453d 100644 --- a/libbeat/esleg/eslegclient/connection.go +++ b/libbeat/esleg/eslegclient/connection.go @@ -28,17 +28,23 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/transport" + "github.com/elastic/beats/v7/libbeat/common/transport/kerberos" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/testing" ) +type esHTTPClient interface { + Do(req *http.Request) (resp *http.Response, err error) + CloseIdleConnections() +} + // Connection manages the connection for a given client. type Connection struct { ConnectionSettings Encoder BodyEncoder - HTTP *http.Client + HTTP esHTTPClient version common.Version log *logp.Logger @@ -55,7 +61,8 @@ type ConnectionSettings struct { APIKey string Headers map[string]string - TLS *tlscommon.TLSConfig + TLS *tlscommon.TLSConfig + Kerberos *kerberos.Config OnConnectCallback func() error Observer transport.IOStatser @@ -63,11 +70,15 @@ type ConnectionSettings struct { Parameters map[string]string CompressionLevel int EscapeHTML bool - Timeout time.Duration + + Timeout time.Duration + IdleConnTimeout time.Duration } // NewConnection returns a new Elasticsearch client func NewConnection(s ConnectionSettings) (*Connection, error) { + s = settingsWithDefaults(s) + u, err := url.Parse(s.URL) if err != nil { return nil, fmt.Errorf("failed to parse elasticsearch URL: %v", err) @@ -116,22 +127,51 @@ func NewConnection(s ConnectionSettings) (*Connection, error) { } } - return &Connection{ - ConnectionSettings: s, - HTTP: &http.Client{ + var httpClient esHTTPClient + httpClient = &http.Client{ + Transport: &http.Transport{ + Dial: dialer.Dial, + DialTLS: tlsDialer.Dial, + TLSClientConfig: s.TLS.ToConfig(), + Proxy: proxy, + IdleConnTimeout: s.IdleConnTimeout, + }, + Timeout: s.Timeout, + } + + if s.Kerberos.IsEnabled() { + c := &http.Client{ Transport: &http.Transport{ Dial: dialer.Dial, - DialTLS: tlsDialer.Dial, - TLSClientConfig: s.TLS.ToConfig(), Proxy: proxy, + IdleConnTimeout: s.IdleConnTimeout, }, Timeout: s.Timeout, - }, - Encoder: encoder, - log: logp.NewLogger("esclientleg"), + } + httpClient, err = kerberos.NewClient(s.Kerberos, c, s.URL) + if err != nil { + return nil, err + } + logp.Info("kerberos client created") + } + + return &Connection{ + ConnectionSettings: s, + HTTP: httpClient, + Encoder: encoder, + log: logp.NewLogger("esclientleg"), }, nil } +func settingsWithDefaults(s ConnectionSettings) ConnectionSettings { + settings := s + if settings.IdleConnTimeout == 0 { + settings.IdleConnTimeout = 1 * time.Minute + } + + return settings +} + // NewClients returns a list of Elasticsearch clients based on the given // configuration. It accepts the same configuration parameters as the Elasticsearch // output, except for the output specific configuration options. If multiple hosts @@ -176,6 +216,7 @@ func NewClients(cfg *common.Config) ([]Connection, error) { Proxy: proxyURL, ProxyDisable: config.ProxyDisable, TLS: tlsConfig, + Kerberos: config.Kerberos, Username: config.Username, Password: config.Password, APIKey: config.APIKey, @@ -266,6 +307,7 @@ func (conn *Connection) Ping() (string, error) { // Close closes a connection. func (conn *Connection) Close() error { + conn.HTTP.CloseIdleConnections() return nil } diff --git a/libbeat/magefile.go b/libbeat/magefile.go index ff168daec53..e5a22799b6d 100644 --- a/libbeat/magefile.go +++ b/libbeat/magefile.go @@ -20,16 +20,23 @@ package main import ( - "context" - devtools "github.com/elastic/beats/v7/dev-tools/mage" // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/common" // mage:import - _ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" + "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" + // mage:import + "github.com/elastic/beats/v7/dev-tools/mage/target/integtest" + // mage:import + _ "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) +func init() { + unittest.RegisterPythonTestDeps(Fields) + integtest.RegisterGoTestDeps(Fields) +} + // Build builds the Beat binary. func Build() error { return devtools.Build(devtools.DefaultBuildArgs()) @@ -40,21 +47,7 @@ func Fields() error { return devtools.GenerateFieldsYAML("processors") } -// GoTestUnit executes the Go unit tests. -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -func GoTestUnit(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestUnitArgs()) -} - // Config generates example and reference configuration for libbeat. func Config() error { return devtools.Config(devtools.ShortConfigType|devtools.ReferenceConfigType, devtools.ConfigFileParams{}, ".") } - -// GoIntegTest executes the Go integration tests. -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -func GoIntegTest(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestIntegrationArgs()) -} diff --git a/libbeat/mapping/field.go b/libbeat/mapping/field.go index 7b2ba52e618..79f771bb7dd 100644 --- a/libbeat/mapping/field.go +++ b/libbeat/mapping/field.go @@ -124,7 +124,7 @@ func (f *Field) Validate() error { func (f *Field) validateType() error { switch strings.ToLower(f.Type) { - case "text", "keyword": + case "text", "keyword", "wildcard": return stringType.validate(f.Format) case "long", "integer", "short", "byte", "double", "float", "half_float", "scaled_float": return numberType.validate(f.Format) diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index 2969c0f057b..bee2769cb9e 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -84,6 +84,7 @@ func NewClient( APIKey: base64.StdEncoding.EncodeToString([]byte(s.APIKey)), Headers: s.Headers, TLS: s.TLS, + Kerberos: s.Kerberos, Proxy: s.Proxy, ProxyDisable: s.ProxyDisable, Parameters: s.Parameters, @@ -150,12 +151,13 @@ func (client *Client) Clone() *Client { // empty. ProxyDisable: client.conn.Proxy == nil, TLS: client.conn.TLS, + Kerberos: client.conn.Kerberos, Username: client.conn.Username, Password: client.conn.Password, APIKey: client.conn.APIKey, Parameters: nil, // XXX: do not pass params? Headers: client.conn.Headers, - Timeout: client.conn.HTTP.Timeout, + Timeout: client.conn.Timeout, CompressionLevel: client.conn.CompressionLevel, OnConnectCallback: nil, Observer: nil, diff --git a/libbeat/outputs/elasticsearch/config.go b/libbeat/outputs/elasticsearch/config.go index 499bba2eeff..d094f005df5 100644 --- a/libbeat/outputs/elasticsearch/config.go +++ b/libbeat/outputs/elasticsearch/config.go @@ -22,6 +22,7 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/transport/kerberos" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" ) @@ -39,6 +40,7 @@ type elasticsearchConfig struct { CompressionLevel int `config:"compression_level" validate:"min=0, max=9"` EscapeHTML bool `config:"escape_html"` TLS *tlscommon.Config `config:"ssl"` + Kerberos *kerberos.Config `config:"kerberos"` BulkMaxSize int `config:"bulk_max_size"` MaxRetries int `config:"max_retries"` Timeout time.Duration `config:"timeout"` @@ -69,6 +71,7 @@ var ( CompressionLevel: 0, EscapeHTML: false, TLS: nil, + Kerberos: nil, LoadBalance: true, Backoff: Backoff{ Init: 1 * time.Second, diff --git a/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc b/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc index c36e6b24163..254349f39bc 100644 --- a/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc +++ b/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc @@ -676,3 +676,11 @@ for HTTPS-based connections. If the `ssl` section is missing, the host CAs are u Elasticsearch. See <> for more information. + +===== `kebreros` + +Configuration options for Kerberos authentication. + +See <> for more information. + +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] diff --git a/libbeat/outputs/elasticsearch/elasticsearch.go b/libbeat/outputs/elasticsearch/elasticsearch.go index b6c3bd797a9..512b74895ea 100644 --- a/libbeat/outputs/elasticsearch/elasticsearch.go +++ b/libbeat/outputs/elasticsearch/elasticsearch.go @@ -97,6 +97,7 @@ func makeES( Proxy: proxyURL, ProxyDisable: config.ProxyDisable, TLS: tlsConfig, + Kerberos: config.Kerberos, Username: config.Username, Password: config.Password, APIKey: config.APIKey, diff --git a/libbeat/outputs/kafka/config.go b/libbeat/outputs/kafka/config.go index d2b645f075d..3174646da4a 100644 --- a/libbeat/outputs/kafka/config.go +++ b/libbeat/outputs/kafka/config.go @@ -216,7 +216,7 @@ func newSaramaConfig(log *logp.Logger, config *kafkaConfig) (*sarama.Config, err k.Net.TLS.Config = tls.BuildModuleConfig("") } - if config.Kerberos != nil { + if config.Kerberos.IsEnabled() { cfgwarn.Beta("Kerberos authentication for Kafka is beta.") k.Net.SASL.Enable = true diff --git a/libbeat/processors/actions/docs/replace.asciidoc b/libbeat/processors/actions/docs/replace.asciidoc new file mode 100644 index 00000000000..3faf3e0bcce --- /dev/null +++ b/libbeat/processors/actions/docs/replace.asciidoc @@ -0,0 +1,49 @@ +[[replace-fields]] +=== Replace fields from events + +++++ +replace +++++ + +The `replace` processor takes a list of fields to replace the field value +matching a pattern with replacement string. Under the `fields` key, each entry +contains a `field: field-name`, `pattern: regex-pattern` and +`replacement: replacement-string`, where: + +* `field` is the original field name +* `pattern` is regex pattern to match field's value +* `replacement` is the replacement string to use for updating the field's value + +The `replace` processor cannot be used to replace value with a completely new value. + +TIP: You can replace field value to truncate part of field value or replace +it with a new string. It can also be used for masking PII information. + +Following example will change path from /usr/bin to /usr/local/bin + +[source,yaml] +------- +processors: +- replace: + fields: + - field: "file.path" + pattern: "/usr/" + replacement: "/usr/local/" + ignore_missing: false + fail_on_error: true +------- + +The `replace` processor has following configuration settings: + +`ignore_missing`:: (Optional) If set to true, no error is logged in case a specifiedfield +is missing. Default is `false`. + +`fail_on_error`:: (Optional) If set to true, in case of an error the replacement of +field values is stopped and the original event is returned. If set to false, replacement +continues even if an error occurs during replacement. Default is `true`. + +See <> for a list of supported conditions. + +You can specify multiple `ignore_missing` processors under the `processors` +section. + diff --git a/libbeat/processors/actions/replace.go b/libbeat/processors/actions/replace.go new file mode 100644 index 00000000000..37245817050 --- /dev/null +++ b/libbeat/processors/actions/replace.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package actions + +import ( + "fmt" + "regexp" + + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/processors" + "github.com/elastic/beats/v7/libbeat/processors/checks" + jsprocessor "github.com/elastic/beats/v7/libbeat/processors/script/javascript/module/processor" +) + +type replaceString struct { + config replaceStringConfig +} + +type replaceStringConfig struct { + Fields []replaceConfig `config:"fields"` + IgnoreMissing bool `config:"ignore_missing"` + FailOnError bool `config:"fail_on_error"` +} + +type replaceConfig struct { + Field string `config:"field"` + Pattern *regexp.Regexp `config:"pattern"` + Replacement string `config:"replacement"` +} + +func init() { + processors.RegisterPlugin("replace", + checks.ConfigChecked(NewReplaceString, + checks.RequireFields("fields"))) + + jsprocessor.RegisterPlugin("Replace", NewReplaceString) +} + +// NewReplaceString returns a new replace processor. +func NewReplaceString(c *common.Config) (processors.Processor, error) { + config := replaceStringConfig{ + IgnoreMissing: false, + FailOnError: true, + } + err := c.Unpack(&config) + if err != nil { + return nil, fmt.Errorf("failed to unpack the replace configuration: %s", err) + } + + f := &replaceString{ + config: config, + } + return f, nil +} + +func (f *replaceString) Run(event *beat.Event) (*beat.Event, error) { + var backup common.MapStr + // Creates a copy of the event to revert in case of failure + if f.config.FailOnError { + backup = event.Fields.Clone() + } + + for _, field := range f.config.Fields { + err := f.replaceField(field.Field, field.Pattern, field.Replacement, event.Fields) + if err != nil { + errMsg := fmt.Errorf("Failed to replace fields in processor: %s", err) + logp.Debug("replace", errMsg.Error()) + if f.config.FailOnError { + event.Fields = backup + event.PutValue("error.message", errMsg.Error()) + return event, err + } + } + } + + return event, nil +} + +func (f *replaceString) replaceField(field string, pattern *regexp.Regexp, replacement string, fields common.MapStr) error { + currentValue, err := fields.GetValue(field) + if err != nil { + // Ignore ErrKeyNotFound errors + if f.config.IgnoreMissing && errors.Cause(err) == common.ErrKeyNotFound { + return nil + } + return fmt.Errorf("could not fetch value for key: %s, Error: %s", field, err) + } + + updatedString := pattern.ReplaceAllString(currentValue.(string), replacement) + _, err = fields.Put(field, updatedString) + if err != nil { + return fmt.Errorf("could not put value: %s: %v, %v", replacement, currentValue, err) + } + return nil +} + +func (f *replaceString) String() string { + return "replace=" + fmt.Sprintf("%+v", f.config.Fields) +} diff --git a/libbeat/processors/actions/replace_test.go b/libbeat/processors/actions/replace_test.go new file mode 100644 index 00000000000..e54d16c5012 --- /dev/null +++ b/libbeat/processors/actions/replace_test.go @@ -0,0 +1,248 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package actions + +import ( + "reflect" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestReplaceRun(t *testing.T) { + var tests = []struct { + description string + Fields []replaceConfig + IgnoreMissing bool + FailOnError bool + Input common.MapStr + Output common.MapStr + error bool + }{ + { + description: "simple field replacing", + Fields: []replaceConfig{ + { + Field: "f", + Pattern: regexp.MustCompile(`a`), + Replacement: "b", + }, + }, + Input: common.MapStr{ + "f": "abc", + }, + Output: common.MapStr{ + "f": "bbc", + }, + error: false, + IgnoreMissing: false, + FailOnError: true, + }, + { + description: "Add one more hierarchy to event", + Fields: []replaceConfig{ + { + Field: "f.b", + Pattern: regexp.MustCompile(`a`), + Replacement: "b", + }, + }, + Input: common.MapStr{ + "f": common.MapStr{ + "b": "abc", + }, + }, + Output: common.MapStr{ + "f": common.MapStr{ + "b": "bbc", + }, + }, + error: false, + IgnoreMissing: false, + FailOnError: true, + }, + { + description: "replace two fields at the same time.", + Fields: []replaceConfig{ + { + Field: "f", + Pattern: regexp.MustCompile(`a.*c`), + Replacement: "cab", + }, + { + Field: "g", + Pattern: regexp.MustCompile(`ef`), + Replacement: "oor", + }, + }, + Input: common.MapStr{ + "f": "abbbc", + "g": "def", + }, + Output: common.MapStr{ + "f": "cab", + "g": "door", + }, + error: false, + IgnoreMissing: false, + FailOnError: true, + }, + { + description: "test missing fields", + Fields: []replaceConfig{ + { + Field: "f", + Pattern: regexp.MustCompile(`abc`), + Replacement: "xyz", + }, + { + Field: "g", + Pattern: regexp.MustCompile(`def`), + Replacement: "", + }, + }, + Input: common.MapStr{ + "m": "abc", + "n": "def", + }, + Output: common.MapStr{ + "m": "abc", + "n": "def", + "error": common.MapStr{ + "message": "Failed to replace fields in processor: could not fetch value for key: f, Error: key not found", + }, + }, + error: true, + IgnoreMissing: false, + FailOnError: true, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + f := &replaceString{ + config: replaceStringConfig{ + Fields: test.Fields, + IgnoreMissing: test.IgnoreMissing, + FailOnError: test.FailOnError, + }, + } + event := &beat.Event{ + Fields: test.Input, + } + + newEvent, err := f.Run(event) + if !test.error { + assert.Nil(t, err) + } else { + assert.NotNil(t, err) + } + + assert.True(t, reflect.DeepEqual(newEvent.Fields, test.Output)) + }) + } +} + +func TestReplaceField(t *testing.T) { + var tests = []struct { + Field string + Pattern *regexp.Regexp + Replacement string + ignoreMissing bool + failOnError bool + Input common.MapStr + Output common.MapStr + error bool + description string + }{ + { + description: "replace part of field value with another string", + Field: "f", + Pattern: regexp.MustCompile(`a`), + Replacement: "b", + Input: common.MapStr{ + "f": "abc", + }, + Output: common.MapStr{ + "f": "bbc", + }, + error: false, + failOnError: true, + ignoreMissing: false, + }, + { + description: "Add hierarchy to event and replace", + Field: "f.b", + Pattern: regexp.MustCompile(`a`), + Replacement: "b", + Input: common.MapStr{ + "f": common.MapStr{ + "b": "abc", + }, + }, + Output: common.MapStr{ + "f": common.MapStr{ + "b": "bbc", + }, + }, + error: false, + ignoreMissing: false, + failOnError: true, + }, + { + description: "try replacing value of missing fields in event", + Field: "f", + Pattern: regexp.MustCompile(`abc`), + Replacement: "xyz", + Input: common.MapStr{ + "m": "abc", + "n": "def", + }, + Output: common.MapStr{ + "m": "abc", + "n": "def", + }, + error: true, + ignoreMissing: false, + failOnError: true, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + + f := &replaceString{ + config: replaceStringConfig{ + IgnoreMissing: test.ignoreMissing, + FailOnError: test.failOnError, + }, + } + + err := f.replaceField(test.Field, test.Pattern, test.Replacement, test.Input) + if err != nil { + assert.Equal(t, test.error, true) + } + + assert.True(t, reflect.DeepEqual(test.Input, test.Output)) + }) + } +} diff --git a/libbeat/publisher/pipeline/batch.go b/libbeat/publisher/pipeline/batch.go index 5a8903c5814..54ba2058d74 100644 --- a/libbeat/publisher/pipeline/batch.go +++ b/libbeat/publisher/pipeline/batch.go @@ -24,7 +24,13 @@ import ( "github.com/elastic/beats/v7/libbeat/publisher/queue" ) -type Batch struct { +type Batch interface { + publisher.Batch + + reduceTTL() bool +} + +type batch struct { original queue.Batch ctx *batchContext ttl int @@ -38,17 +44,17 @@ type batchContext struct { var batchPool = sync.Pool{ New: func() interface{} { - return &Batch{} + return &batch{} }, } -func newBatch(ctx *batchContext, original queue.Batch, ttl int) *Batch { +func newBatch(ctx *batchContext, original queue.Batch, ttl int) *batch { if original == nil { panic("empty batch") } - b := batchPool.Get().(*Batch) - *b = Batch{ + b := batchPool.Get().(*batch) + *b = batch{ original: original, ctx: ctx, ttl: ttl, @@ -57,45 +63,47 @@ func newBatch(ctx *batchContext, original queue.Batch, ttl int) *Batch { return b } -func releaseBatch(b *Batch) { - *b = Batch{} // clear batch +func releaseBatch(b *batch) { + *b = batch{} // clear batch batchPool.Put(b) } -func (b *Batch) Events() []publisher.Event { +func (b *batch) Events() []publisher.Event { return b.events } -func (b *Batch) ACK() { - b.ctx.observer.outBatchACKed(len(b.events)) +func (b *batch) ACK() { + if b.ctx != nil { + b.ctx.observer.outBatchACKed(len(b.events)) + } b.original.ACK() releaseBatch(b) } -func (b *Batch) Drop() { +func (b *batch) Drop() { b.original.ACK() releaseBatch(b) } -func (b *Batch) Retry() { +func (b *batch) Retry() { b.ctx.retryer.retry(b) } -func (b *Batch) Cancelled() { +func (b *batch) Cancelled() { b.ctx.retryer.cancelled(b) } -func (b *Batch) RetryEvents(events []publisher.Event) { +func (b *batch) RetryEvents(events []publisher.Event) { b.updEvents(events) b.Retry() } -func (b *Batch) CancelledEvents(events []publisher.Event) { +func (b *batch) CancelledEvents(events []publisher.Event) { b.updEvents(events) b.Cancelled() } -func (b *Batch) updEvents(events []publisher.Event) { +func (b *batch) updEvents(events []publisher.Event) { l1 := len(b.events) l2 := len(events) if l1 > l2 { @@ -105,3 +113,33 @@ func (b *Batch) updEvents(events []publisher.Event) { b.events = events } + +// reduceTTL reduces the time to live for all events that have no 'guaranteed' +// sending requirements. reduceTTL returns true if the batch is still alive. +func (b *batch) reduceTTL() bool { + if b.ttl <= 0 { + return true + } + + b.ttl-- + if b.ttl > 0 { + return true + } + + // filter for evens with guaranteed send flags + events := b.events[:0] + for _, event := range b.events { + if event.Guaranteed() { + events = append(events, event) + } + } + b.events = events + + if len(b.events) > 0 { + b.ttl = -1 // we need infinite retry for all events left in this batch + return true + } + + // all events have been dropped: + return false +} diff --git a/libbeat/publisher/pipeline/consumer.go b/libbeat/publisher/pipeline/consumer.go index 4dd211052c2..a5c4a97e25a 100644 --- a/libbeat/publisher/pipeline/consumer.go +++ b/libbeat/publisher/pipeline/consumer.go @@ -138,7 +138,7 @@ func (c *eventConsumer) loop(consumer queue.Consumer) { var ( out workQueue - batch *Batch + batch Batch paused = true ) @@ -154,7 +154,7 @@ func (c *eventConsumer) loop(consumer queue.Consumer) { } paused = c.paused() - if !paused && c.out != nil && batch != nil { + if c.out != nil && batch != nil { out = c.out.workQueue } else { out = nil @@ -195,6 +195,9 @@ func (c *eventConsumer) loop(consumer queue.Consumer) { handleSignal(sig) case out <- batch: batch = nil + if paused { + out = nil + } } } } diff --git a/libbeat/publisher/pipeline/controller.go b/libbeat/publisher/pipeline/controller.go index 05bd65338a9..837a70eab77 100644 --- a/libbeat/publisher/pipeline/controller.go +++ b/libbeat/publisher/pipeline/controller.go @@ -22,6 +22,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/reload" "github.com/elastic/beats/v7/libbeat/outputs" + "github.com/elastic/beats/v7/libbeat/publisher" "github.com/elastic/beats/v7/libbeat/publisher/queue" ) @@ -34,7 +35,8 @@ type outputController struct { monitors Monitors observer outputObserver - queue queue.Queue + queue queue.Queue + workQueue workQueue retryer *retryer consumer *eventConsumer @@ -50,7 +52,7 @@ type outputGroup struct { timeToLive int // event lifetime } -type workQueue chan *Batch +type workQueue chan publisher.Batch // outputWorker instances pass events from the shared workQueue to the outputs.Client // instances. @@ -62,18 +64,19 @@ func newOutputController( beat beat.Info, monitors Monitors, observer outputObserver, - b queue.Queue, + queue queue.Queue, ) *outputController { c := &outputController{ - beat: beat, - monitors: monitors, - observer: observer, - queue: b, + beat: beat, + monitors: monitors, + observer: observer, + queue: queue, + workQueue: makeWorkQueue(), } ctx := &batchContext{} - c.consumer = newEventConsumer(monitors.Logger, b, ctx) - c.retryer = newRetryer(monitors.Logger, observer, nil, c.consumer) + c.consumer = newEventConsumer(monitors.Logger, queue, ctx) + c.retryer = newRetryer(monitors.Logger, observer, c.workQueue, c.consumer) ctx.observer = observer ctx.retryer = c.retryer @@ -86,27 +89,26 @@ func (c *outputController) Close() error { c.consumer.sigPause() c.consumer.close() c.retryer.close() + close(c.workQueue) if c.out != nil { for _, out := range c.out.outputs { out.Close() } - close(c.out.workQueue) } return nil } func (c *outputController) Set(outGrp outputs.Group) { - // create new outputGroup with shared work queue + // create new output group with the shared work queue clients := outGrp.Clients - queue := makeWorkQueue() worker := make([]outputWorker, len(clients)) for i, client := range clients { - worker[i] = makeClientWorker(c.observer, queue, client) + worker[i] = makeClientWorker(c.observer, c.workQueue, client) } grp := &outputGroup{ - workQueue: queue, + workQueue: c.workQueue, outputs: worker, timeToLive: outGrp.Retry + 1, batchSize: outGrp.BatchSize, @@ -119,7 +121,6 @@ func (c *outputController) Set(outGrp outputs.Group) { c.retryer.sigOutputRemoved() } } - c.retryer.updOutput(queue) for range clients { c.retryer.sigOutputAdded() } @@ -141,7 +142,7 @@ func (c *outputController) Set(outGrp outputs.Group) { } func makeWorkQueue() workQueue { - return workQueue(make(chan *Batch, 0)) + return workQueue(make(chan publisher.Batch, 0)) } // Reload the output diff --git a/libbeat/publisher/pipeline/controller_test.go b/libbeat/publisher/pipeline/controller_test.go new file mode 100644 index 00000000000..32bdc54109a --- /dev/null +++ b/libbeat/publisher/pipeline/controller_test.go @@ -0,0 +1,114 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package pipeline + +import ( + "sync" + "testing" + "testing/quick" + "time" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common/atomic" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/outputs" + "github.com/elastic/beats/v7/libbeat/publisher" + "github.com/elastic/beats/v7/libbeat/publisher/queue" + "github.com/elastic/beats/v7/libbeat/publisher/queue/memqueue" + "github.com/elastic/beats/v7/libbeat/tests/resources" + + "github.com/stretchr/testify/require" +) + +func TestOutputReload(t *testing.T) { + tests := map[string]func(mockPublishFn) outputs.Client{ + "client": newMockClient, + "network_client": newMockNetworkClient, + } + + for name, ctor := range tests { + t.Run(name, func(t *testing.T) { + seedPRNG(t) + + goroutines := resources.NewGoroutinesChecker() + defer goroutines.Check(t) + + err := quick.Check(func(q uint) bool { + numEventsToPublish := 15000 + (q % 500) // 15000 to 19999 + numOutputReloads := 350 + (q % 150) // 350 to 499 + + queueFactory := func(ackListener queue.ACKListener) (queue.Queue, error) { + return memqueue.NewQueue( + logp.L(), + memqueue.Settings{ + ACKListener: ackListener, + Events: int(numEventsToPublish), + }), nil + } + + var publishedCount atomic.Uint + countingPublishFn := func(batch publisher.Batch) error { + publishedCount.Add(uint(len(batch.Events()))) + return nil + } + + pipeline, err := New( + beat.Info{}, + Monitors{}, + queueFactory, + outputs.Group{}, + Settings{}, + ) + require.NoError(t, err) + defer pipeline.Close() + + pipelineClient, err := pipeline.Connect() + require.NoError(t, err) + defer pipelineClient.Close() + + var wg sync.WaitGroup + wg.Add(1) + go func() { + for i := uint(0); i < numEventsToPublish; i++ { + pipelineClient.Publish(beat.Event{}) + } + wg.Done() + }() + + for i := uint(0); i < numOutputReloads; i++ { + outputClient := ctor(countingPublishFn) + out := outputs.Group{ + Clients: []outputs.Client{outputClient}, + } + pipeline.output.Set(out) + } + + wg.Wait() + + timeout := 20 * time.Second + return waitUntilTrue(timeout, func() bool { + return uint(numEventsToPublish) == publishedCount.Load() + }) + }, &quick.Config{MaxCount: 25}) + + if err != nil { + t.Error(err) + } + }) + } +} diff --git a/libbeat/publisher/pipeline/output.go b/libbeat/publisher/pipeline/output.go index 02ec2975db6..fa2ce73a28c 100644 --- a/libbeat/publisher/pipeline/output.go +++ b/libbeat/publisher/pipeline/output.go @@ -18,25 +18,27 @@ package pipeline import ( - "github.com/elastic/beats/v7/libbeat/common/atomic" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/outputs" ) -// clientWorker manages output client of type outputs.Client, not supporting reconnect. -type clientWorker struct { +type worker struct { + id uint observer outputObserver qu workQueue - client outputs.Client - closed atomic.Bool + done chan struct{} +} + +// clientWorker manages output client of type outputs.Client, not supporting reconnect. +type clientWorker struct { + worker + client outputs.Client } // netClientWorker manages reconnectable output clients of type outputs.NetworkClient. type netClientWorker struct { - observer outputObserver - qu workQueue - client outputs.NetworkClient - closed atomic.Bool + worker + client outputs.NetworkClient batchSize int batchSizer func() int @@ -44,96 +46,114 @@ type netClientWorker struct { } func makeClientWorker(observer outputObserver, qu workQueue, client outputs.Client) outputWorker { + w := worker{ + observer: observer, + qu: qu, + done: make(chan struct{}), + } + + var c interface { + outputWorker + run() + } + if nc, ok := client.(outputs.NetworkClient); ok { - c := &netClientWorker{ - observer: observer, - qu: qu, - client: nc, - logger: logp.NewLogger("publisher_pipeline_output"), + c = &netClientWorker{ + worker: w, + client: nc, + logger: logp.NewLogger("publisher_pipeline_output"), } - go c.run() - return c + } else { + c = &clientWorker{worker: w, client: client} } - c := &clientWorker{observer: observer, qu: qu, client: client} + go c.run() return c } +func (w *worker) close() { + close(w.done) +} + func (w *clientWorker) Close() error { - w.closed.Store(true) + w.worker.close() return w.client.Close() } func (w *clientWorker) run() { - for !w.closed.Load() { - for batch := range w.qu { - if w.closed.Load() { - if batch != nil { - batch.Cancelled() - } - return - } + for { + // We wait for either the worker to be closed or for there to be a batch of + // events to publish. + select { + + case <-w.done: + return - w.observer.outBatchSend(len(batch.events)) + case batch := <-w.qu: + if batch == nil { + continue + } + w.observer.outBatchSend(len(batch.Events())) if err := w.client.Publish(batch); err != nil { - break + return } } } } func (w *netClientWorker) Close() error { - w.closed.Store(true) + w.worker.close() return w.client.Close() } func (w *netClientWorker) run() { - for !w.closed.Load() { - reconnectAttempts := 0 - - // start initial connect loop from first batch, but return - // batch to pipeline for other outputs to catch up while we're trying to connect - for batch := range w.qu { - batch.Cancelled() + var ( + connected = false + reconnectAttempts = 0 + ) - if w.closed.Load() { - w.logger.Infof("Closed connection to %v", w.client) - return - } + for { + // We wait for either the worker to be closed or for there to be a batch of + // events to publish. + select { - if reconnectAttempts > 0 { - w.logger.Infof("Attempting to reconnect to %v with %d reconnect attempt(s)", w.client, reconnectAttempts) - } else { - w.logger.Infof("Connecting to %v", w.client) - } + case <-w.done: + return - err := w.client.Connect() - if err != nil { - w.logger.Errorf("Failed to connect to %v: %v", w.client, err) - reconnectAttempts++ + case batch := <-w.qu: + if batch == nil { continue } - w.logger.Infof("Connection to %v established", w.client) - reconnectAttempts = 0 - break - } + // Try to (re)connect so we can publish batch + if !connected { + // Return batch to other output workers while we try to (re)connect + batch.Cancelled() - // send loop - for batch := range w.qu { - if w.closed.Load() { - if batch != nil { - batch.Cancelled() + if reconnectAttempts == 0 { + w.logger.Infof("Connecting to %v", w.client) + } else { + w.logger.Infof("Attempting to reconnect to %v with %d reconnect attempt(s)", w.client, reconnectAttempts) } - return + + err := w.client.Connect() + connected = err == nil + if connected { + w.logger.Infof("Connection to %v established", w.client) + reconnectAttempts = 0 + } else { + w.logger.Errorf("Failed to connect to %v: %v", w.client, err) + reconnectAttempts++ + } + + continue } - err := w.client.Publish(batch) - if err != nil { + if err := w.client.Publish(batch); err != nil { w.logger.Errorf("Failed to publish events: %v", err) // on error return to connect loop - break + connected = false } } } diff --git a/libbeat/publisher/pipeline/output_test.go b/libbeat/publisher/pipeline/output_test.go index d89c166ee15..5f471ddf396 100644 --- a/libbeat/publisher/pipeline/output_test.go +++ b/libbeat/publisher/pipeline/output_test.go @@ -18,9 +18,7 @@ package pipeline import ( - "flag" "math" - "math/rand" "sync" "testing" "testing/quick" @@ -32,11 +30,6 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/publisher" - "github.com/elastic/beats/v7/libbeat/publisher/queue" -) - -var ( - SeedFlag = flag.Int64("seed", 0, "Randomization seed") ) func TestMakeClientWorker(t *testing.T) { @@ -51,6 +44,11 @@ func TestMakeClientWorker(t *testing.T) { err := quick.Check(func(i uint) bool { numBatches := 300 + (i % 100) // between 300 and 399 + numEvents := atomic.MakeUint(0) + + wqu := makeWorkQueue() + retryer := newRetryer(logp.NewLogger("test"), nilObserver, wqu, nil) + defer retryer.close() var published atomic.Uint publishFn := func(batch publisher.Batch) error { @@ -58,13 +56,13 @@ func TestMakeClientWorker(t *testing.T) { return nil } - wqu := makeWorkQueue() client := ctor(publishFn) - makeClientWorker(nilObserver, wqu, client) - numEvents := atomic.MakeUint(0) - for batchIdx := uint(0); batchIdx <= numBatches; batchIdx++ { - batch := randomBatch(50, 150, wqu) + worker := makeClientWorker(nilObserver, wqu, client) + defer worker.Close() + + for i := uint(0); i < numBatches; i++ { + batch := randomBatch(50, 150).withRetryer(retryer) numEvents.Add(uint(len(batch.Events()))) wqu <- batch } @@ -85,13 +83,14 @@ func TestMakeClientWorker(t *testing.T) { } } -func TestMakeClientWorkerAndClose(t *testing.T) { +func TestReplaceClientWorker(t *testing.T) { tests := map[string]func(mockPublishFn) outputs.Client{ "client": newMockClient, "network_client": newMockNetworkClient, } const minEventsInBatch = 50 + const maxEventsInBatch = 150 for name, ctor := range tests { t.Run(name, func(t *testing.T) { @@ -101,21 +100,28 @@ func TestMakeClientWorkerAndClose(t *testing.T) { numBatches := 1000 + (i % 100) // between 1000 and 1099 wqu := makeWorkQueue() - numEvents := atomic.MakeUint(0) + retryer := newRetryer(logp.NewLogger("test"), nilObserver, wqu, nil) + defer retryer.close() + + var batches []publisher.Batch + var numEvents int + for i := uint(0); i < numBatches; i++ { + batch := randomBatch(minEventsInBatch, maxEventsInBatch).withRetryer(retryer) + numEvents += batch.Len() + batches = append(batches, batch) + } var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() - for batchIdx := uint(0); batchIdx <= numBatches; batchIdx++ { - batch := randomBatch(minEventsInBatch, 150, wqu) - numEvents.Add(uint(len(batch.Events()))) + for _, batch := range batches { wqu <- batch } }() // Publish at least 1 batch worth of events but no more than 20% events - publishLimit := uint(math.Max(minEventsInBatch, float64(numEvents.Load())*0.2)) + publishLimit := uint(math.Max(minEventsInBatch, float64(numEvents)*0.2)) var publishedFirst atomic.Uint blockCtrl := make(chan struct{}) @@ -145,6 +151,7 @@ func TestMakeClientWorkerAndClose(t *testing.T) { // Close worker before all batches have had time to be published err := worker.Close() require.NoError(t, err) + close(blockCtrl) // Start new worker to drain work queue @@ -161,7 +168,7 @@ func TestMakeClientWorkerAndClose(t *testing.T) { // Make sure that all events have eventually been published timeout = 20 * time.Second return waitUntilTrue(timeout, func() bool { - return numEvents.Load() == publishedFirst.Load()+publishedLater.Load() + return numEvents == int(publishedFirst.Load()+publishedLater.Load()) }) }, &quick.Config{MaxCount: 25}) @@ -171,91 +178,3 @@ func TestMakeClientWorkerAndClose(t *testing.T) { }) } } - -type mockPublishFn func(publisher.Batch) error - -func newMockClient(publishFn mockPublishFn) outputs.Client { - return &mockClient{publishFn: publishFn} -} - -type mockClient struct { - publishFn mockPublishFn -} - -func (c *mockClient) String() string { return "mock_client" } -func (c *mockClient) Close() error { return nil } -func (c *mockClient) Publish(batch publisher.Batch) error { - return c.publishFn(batch) -} - -func newMockNetworkClient(publishFn mockPublishFn) outputs.Client { - return &mockNetworkClient{newMockClient(publishFn)} -} - -type mockNetworkClient struct { - outputs.Client -} - -func (c *mockNetworkClient) Connect() error { return nil } - -type mockQueue struct{} - -func (q mockQueue) Close() error { return nil } -func (q mockQueue) BufferConfig() queue.BufferConfig { return queue.BufferConfig{} } -func (q mockQueue) Producer(cfg queue.ProducerConfig) queue.Producer { return mockProducer{} } -func (q mockQueue) Consumer() queue.Consumer { return mockConsumer{} } - -type mockProducer struct{} - -func (p mockProducer) Publish(event publisher.Event) bool { return true } -func (p mockProducer) TryPublish(event publisher.Event) bool { return true } -func (p mockProducer) Cancel() int { return 0 } - -type mockConsumer struct{} - -func (c mockConsumer) Get(eventCount int) (queue.Batch, error) { return &Batch{}, nil } -func (c mockConsumer) Close() error { return nil } - -func randomBatch(min, max int, wqu workQueue) *Batch { - numEvents := randIntBetween(min, max) - events := make([]publisher.Event, numEvents) - - consumer := newEventConsumer(logp.L(), mockQueue{}, &batchContext{}) - retryer := newRetryer(logp.L(), nilObserver, wqu, consumer) - - batch := Batch{ - events: events, - ctx: &batchContext{ - observer: nilObserver, - retryer: retryer, - }, - } - - return &batch -} - -// randIntBetween returns a random integer in [min, max) -func randIntBetween(min, max int) int { - return rand.Intn(max-min) + min -} - -func seedPRNG(t *testing.T) { - seed := *SeedFlag - if seed == 0 { - seed = time.Now().UnixNano() - } - - t.Logf("reproduce test with `go test ... -seed %v`", seed) - rand.Seed(seed) -} - -func waitUntilTrue(duration time.Duration, fn func() bool) bool { - end := time.Now().Add(duration) - for time.Now().Before(end) { - if fn() { - return true - } - time.Sleep(1 * time.Millisecond) - } - return false -} diff --git a/libbeat/publisher/pipeline/retry.go b/libbeat/publisher/pipeline/retry.go index a65a7d227c8..0d724e80278 100644 --- a/libbeat/publisher/pipeline/retry.go +++ b/libbeat/publisher/pipeline/retry.go @@ -36,7 +36,7 @@ type retryer struct { done chan struct{} - consumer *eventConsumer + consumer interruptor sig chan retryerSignal out workQueue @@ -44,6 +44,11 @@ type retryer struct { doneWaiter sync.WaitGroup } +type interruptor interface { + sigWait() + sigUnWait() +} + type retryQueue chan batchEvent type retryerSignal struct { @@ -53,7 +58,7 @@ type retryerSignal struct { type batchEvent struct { tag retryerBatchTag - batch *Batch + batch Batch } type retryerEventTag uint8 @@ -75,7 +80,7 @@ func newRetryer( log *logp.Logger, observer outputObserver, out workQueue, - c *eventConsumer, + c interruptor, ) *retryer { r := &retryer{ logger: log, @@ -106,18 +111,11 @@ func (r *retryer) sigOutputRemoved() { r.sig <- retryerSignal{tag: sigRetryerOutputRemoved} } -func (r *retryer) updOutput(ch workQueue) { - r.sig <- retryerSignal{ - tag: sigRetryerUpdateOutput, - channel: ch, - } -} - -func (r *retryer) retry(b *Batch) { +func (r *retryer) retry(b Batch) { r.in <- batchEvent{tag: retryBatch, batch: b} } -func (r *retryer) cancelled(b *Batch) { +func (r *retryer) cancelled(b Batch) { r.in <- batchEvent{tag: cancelledBatch, batch: b} } @@ -127,9 +125,9 @@ func (r *retryer) loop() { out workQueue consumerBlocked bool - active *Batch + active Batch activeSize int - buffer []*Batch + buffer []Batch numOutputs int log = r.logger @@ -144,21 +142,22 @@ func (r *retryer) loop() { countFailed int countDropped int batch = evt.batch - countRetry = len(batch.events) + countRetry = len(batch.Events()) + alive = true ) if evt.tag == retryBatch { - countFailed = len(batch.events) + countFailed = len(batch.Events()) r.observer.eventsFailed(countFailed) - decBatch(batch) + alive = batch.reduceTTL() - countRetry = len(batch.events) + countRetry = len(batch.Events()) countDropped = countFailed - countRetry r.observer.eventsDropped(countDropped) } - if len(batch.events) == 0 { + if !alive { log.Info("Drop batch") batch.Drop() } else { @@ -166,14 +165,9 @@ func (r *retryer) loop() { buffer = append(buffer, batch) out = r.out active = buffer[0] - activeSize = len(active.events) + activeSize = len(active.Events()) if !consumerBlocked { - consumerBlocked = blockConsumer(numOutputs, len(buffer)) - if consumerBlocked { - log.Info("retryer: send wait signal to consumer") - r.consumer.sigWait() - log.Info(" done") - } + consumerBlocked = r.checkConsumerBlock(numOutputs, len(buffer)) } } @@ -187,51 +181,53 @@ func (r *retryer) loop() { out = nil } else { active = buffer[0] - activeSize = len(active.events) + activeSize = len(active.Events()) } if consumerBlocked { - consumerBlocked = blockConsumer(numOutputs, len(buffer)) - if !consumerBlocked { - log.Info("retryer: send unwait-signal to consumer") - r.consumer.sigUnWait() - log.Info(" done") - } + consumerBlocked = r.checkConsumerBlock(numOutputs, len(buffer)) } case sig := <-r.sig: switch sig.tag { - case sigRetryerUpdateOutput: - r.out = sig.channel case sigRetryerOutputAdded: numOutputs++ + if consumerBlocked { + consumerBlocked = r.checkConsumerBlock(numOutputs, len(buffer)) + } case sigRetryerOutputRemoved: numOutputs-- + if !consumerBlocked { + consumerBlocked = r.checkConsumerBlock(numOutputs, len(buffer)) + } } } } } -func blockConsumer(numOutputs, numBatches int) bool { - return numBatches/3 >= numOutputs -} - -func decBatch(batch *Batch) { - if batch.ttl <= 0 { - return +func (r *retryer) checkConsumerBlock(numOutputs, numBatches int) bool { + consumerBlocked := blockConsumer(numOutputs, numBatches) + if r.consumer == nil { + return consumerBlocked } - batch.ttl-- - if batch.ttl > 0 { - return - } - - // filter for evens with guaranteed send flags - events := batch.events[:0] - for _, event := range batch.events { - if event.Guaranteed() { - events = append(events, event) + if consumerBlocked { + r.logger.Info("retryer: send wait signal to consumer") + if r.consumer != nil { + r.consumer.sigWait() + } + r.logger.Info(" done") + } else { + r.logger.Info("retryer: send unwait signal to consumer") + if r.consumer != nil { + r.consumer.sigUnWait() } + r.logger.Info(" done") } - batch.events = events + + return consumerBlocked +} + +func blockConsumer(numOutputs, numBatches int) bool { + return numBatches/3 >= numOutputs } diff --git a/libbeat/publisher/pipeline/testing.go b/libbeat/publisher/pipeline/testing.go new file mode 100644 index 00000000000..0db2780ba56 --- /dev/null +++ b/libbeat/publisher/pipeline/testing.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package pipeline + +import ( + "flag" + "math/rand" + "sync" + "testing" + "time" + + "github.com/elastic/beats/v7/libbeat/outputs" + "github.com/elastic/beats/v7/libbeat/publisher" + "github.com/elastic/beats/v7/libbeat/publisher/queue" +) + +var ( + SeedFlag = flag.Int64("seed", 0, "Randomization seed") +) + +type mockPublishFn func(publisher.Batch) error + +func newMockClient(publishFn mockPublishFn) outputs.Client { + return &mockClient{publishFn: publishFn} +} + +type mockClient struct { + publishFn mockPublishFn +} + +func (c *mockClient) String() string { return "mock_client" } +func (c *mockClient) Close() error { return nil } +func (c *mockClient) Publish(batch publisher.Batch) error { + return c.publishFn(batch) +} + +func newMockNetworkClient(publishFn mockPublishFn) outputs.Client { + return &mockNetworkClient{newMockClient(publishFn)} +} + +type mockNetworkClient struct { + outputs.Client +} + +func (c *mockNetworkClient) Connect() error { return nil } + +type mockQueue struct{} + +func (q mockQueue) Close() error { return nil } +func (q mockQueue) BufferConfig() queue.BufferConfig { return queue.BufferConfig{} } +func (q mockQueue) Producer(cfg queue.ProducerConfig) queue.Producer { return mockProducer{} } +func (q mockQueue) Consumer() queue.Consumer { return mockConsumer{} } + +type mockProducer struct{} + +func (p mockProducer) Publish(event publisher.Event) bool { return true } +func (p mockProducer) TryPublish(event publisher.Event) bool { return true } +func (p mockProducer) Cancel() int { return 0 } + +type mockConsumer struct{} + +func (c mockConsumer) Get(eventCount int) (queue.Batch, error) { return &batch{}, nil } +func (c mockConsumer) Close() error { return nil } + +type mockBatch struct { + mu sync.Mutex + events []publisher.Event + + onEvents func() + onACK func() + onDrop func() + onRetry func() + onCancelled func() + onReduceTTL func() bool +} + +func (b *mockBatch) Events() []publisher.Event { + b.mu.Lock() + defer b.mu.Unlock() + signalFn(b.onEvents) + return b.events +} + +func (b *mockBatch) ACK() { signalFn(b.onACK) } +func (b *mockBatch) Drop() { signalFn(b.onDrop) } +func (b *mockBatch) Retry() { signalFn(b.onRetry) } +func (b *mockBatch) Cancelled() { signalFn(b.onCancelled) } +func (b *mockBatch) RetryEvents(events []publisher.Event) { + b.updateEvents(events) + signalFn(b.onRetry) +} + +func (b *mockBatch) reduceTTL() bool { + if b.onReduceTTL != nil { + return b.onReduceTTL() + } + return true +} + +func (b *mockBatch) CancelledEvents(events []publisher.Event) { + b.updateEvents(events) + signalFn(b.onCancelled) +} + +func (b *mockBatch) updateEvents(events []publisher.Event) { + b.mu.Lock() + defer b.mu.Unlock() + b.events = events +} + +func (b *mockBatch) Len() int { + b.mu.Lock() + defer b.mu.Unlock() + return len(b.events) +} + +func (b *mockBatch) withRetryer(r *retryer) *mockBatch { + return &mockBatch{ + events: b.events, + onACK: b.onACK, + onDrop: b.onDrop, + onRetry: func() { r.retry(b) }, + onCancelled: func() { r.cancelled(b) }, + onReduceTTL: b.onReduceTTL, + } +} + +func signalFn(fn func()) { + if fn != nil { + fn() + } +} + +func randomBatch(min, max int) *mockBatch { + return &mockBatch{ + events: make([]publisher.Event, randIntBetween(min, max)), + } +} + +// randIntBetween returns a random integer in [min, max) +func randIntBetween(min, max int) int { + return rand.Intn(max-min) + min +} + +func seedPRNG(t *testing.T) { + seed := *SeedFlag + if seed == 0 { + seed = time.Now().UnixNano() + } + + t.Logf("reproduce test with `go test ... -seed %v`", seed) + rand.Seed(seed) +} + +func waitUntilTrue(duration time.Duration, fn func() bool) bool { + end := time.Now().Add(duration) + for time.Now().Before(end) { + if fn() { + return true + } + time.Sleep(10 * time.Millisecond) + } + return false +} diff --git a/libbeat/scripts/Makefile b/libbeat/scripts/Makefile index 97df237f194..f634e9fd40f 100755 --- a/libbeat/scripts/Makefile +++ b/libbeat/scripts/Makefile @@ -128,7 +128,7 @@ endif # # Includes # -include $(ES_BEATS)/dev-tools/make/mage.mk +include $(ES_BEATS)/dev-tools/make/mage-install.mk ### BUILDING ### @@ -204,7 +204,8 @@ prepare-tests: .PHONY: unit-tests unit-tests: ## @testing Runs the unit tests with coverage. Race is not enabled for unit tests because tests run much slower. unit-tests: prepare-tests - $(COVERAGE_TOOL) $(RACE) -coverprofile=${COVERAGE_DIR}/unit.cov ${GOPACKAGES} + GOFLAGS="${INSTALL_FLAG}" \ + $(COVERAGE_TOOL) $(RACE) -coverprofile=${COVERAGE_DIR}/unit.cov ${GOPACKAGES} .PHONY: unit unit: ## @testing Runs the unit tests without coverage reports. @@ -214,7 +215,8 @@ unit: ## @testing Runs the unit tests without coverage reports. integration-tests: ## @testing Run integration tests. Unit tests are run as part of the integration tests. integration-tests: prepare-tests mage rm -f docker-compose.yml.lock - $(COVERAGE_TOOL) -tags=integration $(RACE) -coverprofile=${COVERAGE_DIR}/integration.cov ${GOPACKAGES} + GOFLAGS="${INSTALL_FLAG}" \ + $(COVERAGE_TOOL) -tags=integration $(RACE) -coverprofile=${COVERAGE_DIR}/integration.cov ${GOPACKAGES} .PHONY: integration-tests-environment integration-tests-environment: ## @testing Runs the integration inside a virtual environment. This can be run on any docker-machine (local, remote) @@ -228,7 +230,7 @@ integration-tests-environment: prepare-tests build-image -e RACE_DETECTOR=$(RACE_DETECTOR) \ -e DOCKER_COMPOSE_PROJECT_NAME=${DOCKER_COMPOSE_PROJECT_NAME} \ -e TEST_ENVIRONMENT=${TEST_ENVIRONMENT} \ - -e BEATS_DOCKER_INTEGRATION_TEST_ENV=${BEATS_DOCKER_INTEGRATION_TEST_ENV} \ + -e BEATS_INSIDE_INTEGRATION_TEST_ENV=${BEATS_INSIDE_INTEGRATION_TEST_ENV} \ -e GOFLAGS=${INSTALL_FLAG} \ beat make integration-tests @@ -359,7 +361,7 @@ update: python-env fields collect config ## @build Update expects the most recen ifneq ($(shell [[ $(BEAT_NAME) == libbeat || $(BEAT_NAME) == metricbeat ]] && echo true ),true) mkdir -p include - go run ${ES_BEATS}/dev-tools/cmd/asset/asset.go -license $(LICENSE) -pkg include -in fields.yml -out include/fields.go $(BEAT_NAME) + go run ${INSTALL_FLAG} ${ES_BEATS}/dev-tools/cmd/asset/asset.go -license $(LICENSE) -pkg include -in fields.yml -out include/fields.go $(BEAT_NAME) endif ifneq ($(shell [[ $(BEAT_NAME) == libbeat || $(BEAT_NAME) == metricbeat ]] && echo true ),true) diff --git a/libbeat/tests/system/keystore.py b/libbeat/tests/system/keystore.py index 5ba96a4d58a..12418ba5a73 100644 --- a/libbeat/tests/system/keystore.py +++ b/libbeat/tests/system/keystore.py @@ -12,14 +12,17 @@ def add_secret(self, key, value="hello world\n", force=False): """ Add new secret using the --stdin option """ - args = [self.test_binary, - "-systemTest", + args = [self.test_binary, "-systemTest"] + if os.getenv("TEST_COVERAGE") == "true": + args += [ "-test.coverprofile", os.path.join(self.working_dir, "coverage.cov"), - "-c", os.path.join(self.working_dir, "mockbeat.yml"), - "-e", "-v", "-d", "*", - "keystore", "add", key, "--stdin", - ] + ] + args += [ + "-c", os.path.join(self.working_dir, "mockbeat.yml"), + "-e", "-v", "-d", "*", + "keystore", "add", key, "--stdin", + ] if force: args.append("--force") diff --git a/libbeat/tests/system/requirements.txt b/libbeat/tests/system/requirements.txt index 52c36033406..f979754ed33 100644 --- a/libbeat/tests/system/requirements.txt +++ b/libbeat/tests/system/requirements.txt @@ -32,3 +32,4 @@ jsondiff==1.1.2 semver==2.8.1 stomp.py==4.1.22 deepdiff==4.2.0 +kafka-python==1.4.3 diff --git a/metricbeat/Makefile b/metricbeat/Makefile index fd2c60935e7..7a05a566775 100644 --- a/metricbeat/Makefile +++ b/metricbeat/Makefile @@ -1,85 +1,8 @@ -# Name can be overwritten, as Metricbeat is also a library -BEAT_NAME?=metricbeat -BEAT_TITLE?=Metricbeat -SYSTEM_TESTS?=true -TEST_ENVIRONMENT?=true -BEATS_DOCKER_INTEGRATION_TEST_ENV?=true -ES_BEATS?=.. +ES_BEATS ?= .. -# Metricbeat can only be cross-compiled on platforms not requiring CGO. -GOX_OS=netbsd linux windows -GOX_FLAGS=-arch="amd64 386 arm ppc64 ppc64le" - -DOCS_BRANCH=$(shell grep doc-branch ../libbeat/docs/version.asciidoc | cut -c 14-) - -include ${ES_BEATS}/libbeat/scripts/Makefile - -# Collects all module dashboards -.PHONY: kibana -kibana: - @rm -rf _meta/kibana.generated - @mkdir -p _meta/kibana.generated - @-cp -pr module/*/_meta/kibana/* _meta/kibana.generated - -# Collects all module docs -.PHONY: collect-docs -collect-docs: - mage CollectAll - -# Collects all module configs -.PHONY: configs -configs: python-env - @mkdir -p _meta - @cp ${ES_BEATS}/metricbeat/_meta/common.yml _meta/beat.yml - @cat ${ES_BEATS}/metricbeat/_meta/setup.yml >> _meta/beat.yml - @cat ${ES_BEATS}/metricbeat/_meta/common.reference.yml > _meta/beat.reference.yml - @${PYTHON_ENV_EXE} ${ES_BEATS}/script/config_collector.py --beat ${BEAT_NAME} --full $(PWD) >> _meta/beat.reference.yml - @rm -rf modules.d - mage config - @chmod go-w modules.d/* - @# Enable system by default: - @if [ -f modules.d/system.yml.disabled ]; then mv modules.d/system.yml.disabled modules.d/system.yml; fi - -# Generates imports for all modules and metricsets -.PHONY: imports -imports: - @mkdir -p include - mage imports - -# Runs all collection steps and updates afterwards -.PHONY: collect -collect: assets collect-docs configs kibana imports +include $(ES_BEATS)/dev-tools/make/mage.mk # Creates a new metricset. Requires the params MODULE and METRICSET .PHONY: create-metricset -create-metricset: python-env - @${PYTHON_ENV_EXE} ${ES_BEATS}/metricbeat/scripts/create_metricset.py --path=$(PWD) --es_beats=$(ES_BEATS) --module=$(MODULE) --metricset=$(METRICSET) - -# Generates the data.json example documents -.PHONY: generate-json -generate-json: build-image - ${DOCKER_COMPOSE} run beat go test -tags=integration github.com/elastic/beats/metricbeat/module/... -data - -.PHONY: run-module -run-module: ## @testing Runs the given module with exposing the port. Needs $MODULE and $PORT as param -run-module: - ${DOCKER_COMPOSE} build ${MODULE} - ${DOCKER_COMPOSE} run -p ${PORT}:${PORT} ${MODULE} - -.PHONY: test-module -test-module: ## @testing Tests the given module. Needs $MODULE as param an run-module must be started first. -test-module: python-env update metricbeat.test - go test -tags=integration ${BEAT_PATH}/module/${MODULE}/... -v - . ${PYTHON_ENV}/bin/activate && INTEGRATION_TESTS=1 nosetests module/${MODULE} - -.PHONY: assets -assets: - go run ${INSTALL_FLAG} ${ES_BEATS}/metricbeat/scripts/assets/assets.go ${ES_BEATS}/metricbeat/module - mkdir -p include/fields - go run ${INSTALL_FLAG} ${ES_BEATS}/libbeat/scripts/cmd/global_fields/main.go -es_beats_path ${ES_BEATS} -beat_path ${PWD} | go run ${ES_BEATS}/dev-tools/cmd/asset/asset.go -license ${LICENSE} -out ./include/fields/fields.go -pkg include -priority asset.LibbeatFieldsPri ${ES_BEATS}/libbeat/fields.yml $(BEAT_NAME) - -.PHONY: integration-tests -integration-tests: ## @testing Run golang integration tests. -integration-tests: prepare-tests mage - rm -f docker-compose.yml.lock - mage goIntegTest +create-metricset: + mage createMetricset diff --git a/metricbeat/autodiscover/builder/hints/metrics.go b/metricbeat/autodiscover/builder/hints/metrics.go index 3e859b23d11..1647fb9fbc7 100644 --- a/metricbeat/autodiscover/builder/hints/metrics.go +++ b/metricbeat/autodiscover/builder/hints/metrics.go @@ -84,6 +84,7 @@ func (m *metricHints) CreateConfig(event bus.Event) []*common.Config { } modulesConfig := m.getModules(hints) + // here we handle raw configs if provided if modulesConfig != nil { configs := []*common.Config{} for _, cfg := range modulesConfig { @@ -93,7 +94,7 @@ func (m *metricHints) CreateConfig(event bus.Event) []*common.Config { } logp.Debug("hints.builder", "generated config %+v", configs) // Apply information in event to the template to generate the final config - return template.ApplyConfigTemplate(event, configs) + return template.ApplyConfigTemplate(event, configs, false) } @@ -154,7 +155,7 @@ func (m *metricHints) CreateConfig(event bus.Event) []*common.Config { // Apply information in event to the template to generate the final config // This especially helps in a scenario where endpoints are configured as: // co.elastic.metrics/hosts= "${data.host}:9090" - return template.ApplyConfigTemplate(event, config) + return template.ApplyConfigTemplate(event, config, false) } func (m *metricHints) getModule(hints common.MapStr) string { diff --git a/metricbeat/autodiscover/builder/hints/metrics_test.go b/metricbeat/autodiscover/builder/hints/metrics_test.go index f3f9d5a5200..4b3f7e0430b 100644 --- a/metricbeat/autodiscover/builder/hints/metrics_test.go +++ b/metricbeat/autodiscover/builder/hints/metrics_test.go @@ -18,13 +18,17 @@ package hints import ( + "os" + "path/filepath" "sort" "testing" + "github.com/docker/docker/pkg/ioutils" "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/metricbeat/mb" ) @@ -324,6 +328,78 @@ func TestGenerateHints(t *testing.T) { } } +func TestGenerateHintsDoesNotAccessKeystore(t *testing.T) { + path := getTemporaryKeystoreFile() + defer os.Remove(path) + // store the secret + keystore := createAnExistingKeystore(path, "stored_secret") + os.Setenv("PASSWORD", "env_secret") + + tests := []struct { + message string + event bus.Event + len int + result common.MapStr + }{ + { + message: "Module, namespace, host hint should return valid config", + event: bus.Event{ + "host": "1.2.3.4", + "port": 9090, + "hints": common.MapStr{ + "metrics": common.MapStr{ + "module": "mockmoduledefaults", + "hosts": "${data.host}:9090", + "password": "${PASSWORD}", + }, + }, + "keystore": keystore, + }, + len: 1, + result: common.MapStr{ + "module": "mockmoduledefaults", + "metricsets": []string{"default"}, + "hosts": []interface{}{"1.2.3.4:9090"}, + "timeout": "3s", + "period": "1m", + "enabled": true, + "password": "env_secret", + }, + }, + } + for _, test := range tests { + mockRegister := mb.NewRegister() + mockRegister.MustAddMetricSet("mockmoduledefaults", "default", NewMockMetricSet, mb.DefaultMetricSet()) + + m := metricHints{ + Key: defaultConfig().Key, + Registry: mockRegister, + } + cfgs := m.CreateConfig(test.event) + assert.Equal(t, len(cfgs), test.len) + if len(cfgs) != 0 { + config := common.MapStr{} + err := cfgs[0].Unpack(&config) + assert.Nil(t, err, test.message) + + // metricsets order is random, order it for tests + if v, err := config.GetValue("metricsets"); err == nil { + if msets, ok := v.([]interface{}); ok { + metricsets := make([]string, len(msets)) + for i, v := range msets { + metricsets[i] = v.(string) + } + sort.Strings(metricsets) + config["metricsets"] = metricsets + } + } + + assert.Equal(t, test.result, config, test.message) + } + + } +} + type MockMetricSet struct { mb.BaseMetricSet } @@ -335,3 +411,31 @@ func NewMockMetricSet(base mb.BaseMetricSet) (mb.MetricSet, error) { func (ms *MockMetricSet) Fetch(report mb.Reporter) { } + +// create a keystore with an existing key +/// `PASSWORD` with the value of `secret` variable. +func createAnExistingKeystore(path string, secret string) keystore.Keystore { + keyStore, err := keystore.NewFileKeystore(path) + // Fail fast in the test suite + if err != nil { + panic(err) + } + + writableKeystore, err := keystore.AsWritableKeystore(keyStore) + if err != nil { + panic(err) + } + + writableKeystore.Store("PASSWORD", []byte(secret)) + writableKeystore.Save() + return keyStore +} + +// create a temporary file on disk to save the keystore. +func getTemporaryKeystoreFile() string { + path, err := ioutils.TempDir("", "testing") + if err != nil { + panic(err) + } + return filepath.Join(path, "keystore") +} diff --git a/metricbeat/beater/metricbeat.go b/metricbeat/beater/metricbeat.go index 050e7f265c1..fbf9d23110f 100644 --- a/metricbeat/beater/metricbeat.go +++ b/metricbeat/beater/metricbeat.go @@ -181,7 +181,12 @@ func newMetricbeat(b *beat.Beat, c *common.Config, options ...Option) (*Metricbe if config.Autodiscover != nil { var err error metricbeat.autodiscover, err = autodiscover.NewAutodiscover( - "metricbeat", b.Publisher, factory, autodiscover.QueryConfig(), config.Autodiscover) + "metricbeat", + b.Publisher, + factory, autodiscover.QueryConfig(), + config.Autodiscover, + b.Keystore, + ) if err != nil { return nil, err } diff --git a/metricbeat/docs/autodiscover-docker-config.asciidoc b/metricbeat/docs/autodiscover-docker-config.asciidoc index 79dce08e7a6..bd026ec0c69 100644 --- a/metricbeat/docs/autodiscover-docker-config.asciidoc +++ b/metricbeat/docs/autodiscover-docker-config.asciidoc @@ -19,3 +19,24 @@ metricbeat.autodiscover: This configuration launches a `redis` module for all containers running an image with `redis` in the name. `labels.dedot` defaults to be `true` for docker autodiscover, which means dots in docker labels are replaced with '_' by default. +Also Metricbeat autodiscover supports leveraging <> in order to retrieve sensitive data like passwords. +Here is an example of how a configuration using keystore would look like: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +metricbeat.autodiscover: + providers: + - type: docker + labels.dedot: true + templates: + - condition: + contains: + docker.container.image: redis + config: + - module: redis + metricsets: ["info", "keyspace"] + hosts: "${data.host}:6379" + password: "${REDIS_PASSWORD}" +------------------------------------------------------------------------------------- + +where `REDIS_PASSWORD` is a key stored in local keystore of Metricbeat. diff --git a/metricbeat/docs/autodiscover-hints.asciidoc b/metricbeat/docs/autodiscover-hints.asciidoc index 1bc56d8f54d..a34b623bd36 100644 --- a/metricbeat/docs/autodiscover-hints.asciidoc +++ b/metricbeat/docs/autodiscover-hints.asciidoc @@ -43,7 +43,9 @@ The username to use for authentication [float] ===== `co.elastic.metrics/password` -The password to use for authentication. It is recommended to retrieve this sensitive information from an ENV variable or a keystore and avoid placing passwords in plain text +The password to use for authentication. It is recommended to retrieve this sensitive information from an ENV variable +and avoid placing passwords in plain text. Unlike static autodiscover configuration, hints based autodiscover has +no access to the keystore of Metricbeat since it could be a potential security issue. [float] ===== `co.elastic.metrics/ssl.*` diff --git a/metricbeat/docs/autodiscover-kubernetes-config.asciidoc b/metricbeat/docs/autodiscover-kubernetes-config.asciidoc index 81e4ad15949..f3e6a74cdfb 100644 --- a/metricbeat/docs/autodiscover-kubernetes-config.asciidoc +++ b/metricbeat/docs/autodiscover-kubernetes-config.asciidoc @@ -17,3 +17,24 @@ metricbeat.autodiscover: ------------------------------------------------------------------------------------- This configuration launches a `prometheus` module for all containers of pods annotated `prometheus.io/scrape=true`. + +Also Metricbeat autodiscover supports leveraging <> in order to retrieve sensitive data like passwords. +Here is an example of how a configuration using keystore would look like: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +metricbeat.autodiscover: + providers: + - type: kubernetes + templates: + - condition: + contains: + kubernetes.labels.app: "redis" + config: + - module: redis + metricsets: ["info", "keyspace"] + hosts: "${data.host}:6379" + password: "${REDIS_PASSWORD}" +------------------------------------------------------------------------------------- + +where `REDIS_PASSWORD` is a key stored in local keystore of Metricbeat. diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index 8996357d382..263124a748a 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -1577,6 +1577,17 @@ type: object `billing` contains the estimated charges for your AWS account in Cloudwatch. + + +*`aws.billing.metrics.EstimatedCharges.max`*:: ++ +-- +Maximum estimated charges for AWS acccount. + +type: long + +-- + [float] === cloudwatch @@ -1883,6 +1894,107 @@ type: double `ebs` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS EBS. + + +*`aws.ebs.metrics.VolumeReadBytes.avg`*:: ++ +-- +Average size of each read operation during the period, except on volumes attached to a Nitro-based instance, where the average represents the average over the specified period. + +type: double + +-- + +*`aws.ebs.metrics.VolumeWriteBytes.avg`*:: ++ +-- +Average size of each write operation during the period, except on volumes attached to a Nitro-based instance, where the average represents the average over the specified period. + +type: double + +-- + +*`aws.ebs.metrics.VolumeReadOps.avg`*:: ++ +-- +The total number of read operations in a specified period of time. + +type: double + +-- + +*`aws.ebs.metrics.VolumeWriteOps.avg`*:: ++ +-- +The total number of write operations in a specified period of time. + +type: double + +-- + +*`aws.ebs.metrics.VolumeQueueLength.avg`*:: ++ +-- +The number of read and write operation requests waiting to be completed in a specified period of time. + +type: double + +-- + +*`aws.ebs.metrics.VolumeThroughputPercentage.avg`*:: ++ +-- +The percentage of I/O operations per second (IOPS) delivered of the total IOPS provisioned for an Amazon EBS volume. Used with Provisioned IOPS SSD volumes only. + +type: double + +-- + +*`aws.ebs.metrics.VolumeConsumedReadWriteOps.avg`*:: ++ +-- +The total amount of read and write operations (normalized to 256K capacity units) consumed in a specified period of time. Used with Provisioned IOPS SSD volumes only. + +type: double + +-- + +*`aws.ebs.metrics.BurstBalance.avg`*:: ++ +-- +Used with General Purpose SSD (gp2), Throughput Optimized HDD (st1), and Cold HDD (sc1) volumes only. Provides information about the percentage of I/O credits (for gp2) or throughput credits (for st1 and sc1) remaining in the burst bucket. + +type: double + +-- + +*`aws.ebs.metrics.VolumeTotalReadTime.sum`*:: ++ +-- +The total number of seconds spent by all read operations that completed in a specified period of time. + +type: double + +-- + +*`aws.ebs.metrics.VolumeTotalWriteTime.sum`*:: ++ +-- +The total number of seconds spent by all write operations that completed in a specified period of time. + +type: double + +-- + +*`aws.ebs.metrics.VolumeIdleTime.sum`*:: ++ +-- +The total number of seconds in a specified period of time when no read or write operations were submitted. + +type: double + +-- + [float] === ec2 @@ -2244,1782 +2356,1689 @@ type: integer `elb` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS ELB. -[float] -=== lambda - -`lambda` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS Lambda. - - -[float] -=== natgateway - -`natgateway` contains the metrics from Cloudwatch to track usage of NAT gateway related resources. - -[float] -=== rds -`rds` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS RDS. +*`aws.elb.metrics.BackendConnectionErrors.sum`*:: ++ +-- +The number of connections that were not successfully established between the load balancer and the registered instances. +type: long +-- -*`aws.rds.cpu.total.pct`*:: +*`aws.elb.metrics.HTTPCode_Backend_2XX.sum`*:: + -- -The percentage of CPU utilization. +The number of HTTP 2XX response code generated by registered instances. - -type: scaled_float - -format: percent +type: long -- -*`aws.rds.cpu.credit_usage`*:: +*`aws.elb.metrics.HTTPCode_Backend_3XX.sum`*:: + -- -The number of CPU credits spent by the instance for CPU utilization. - +The number of HTTP 3XX response code generated by registered instances. type: long -- -*`aws.rds.cpu.credit_balance`*:: +*`aws.elb.metrics.HTTPCode_Backend_4XX.sum`*:: + -- -The number of earned CPU credits that an instance has accrued since it was launched or started. - +The number of HTTP 4XX response code generated by registered instances. type: long -- -*`aws.rds.database_connections`*:: +*`aws.elb.metrics.HTTPCode_Backend_5XX.sum`*:: + -- -The number of database connections in use. - +The number of HTTP 5XX response code generated by registered instances. type: long -- -*`aws.rds.db_instance.arn`*:: +*`aws.elb.metrics.HTTPCode_ELB_4XX.sum`*:: + -- -Amazon Resource Name(ARN) for each rds. +The number of HTTP 4XX client error codes generated by the load balancer. - -type: keyword +type: long -- -*`aws.rds.db_instance.class`*:: +*`aws.elb.metrics.HTTPCode_ELB_5XX.sum`*:: + -- -Contains the name of the compute and memory capacity class of the DB instance. - +The number of HTTP 5XX server error codes generated by the load balancer. -type: keyword +type: long -- -*`aws.rds.db_instance.identifier`*:: +*`aws.elb.metrics.RequestCount.sum`*:: + -- -Contains a user-supplied database identifier. This identifier is the unique key that identifies a DB instance. +The number of requests completed or connections made during the specified interval. - -type: keyword +type: long -- -*`aws.rds.db_instance.status`*:: +*`aws.elb.metrics.SpilloverCount.sum`*:: + -- -Specifies the current state of this database. - +The total number of requests that were rejected because the surge queue is full. -type: keyword +type: long -- -*`aws.rds.disk_queue_depth`*:: +*`aws.elb.metrics.HealthyHostCount.max`*:: + -- -The number of outstanding IOs (read/write requests) waiting to access the disk. +The number of healthy instances registered with your load balancer. - -type: float +type: long -- -*`aws.rds.failed_sql_server_agent_jobs`*:: +*`aws.elb.metrics.SurgeQueueLength.max`*:: + -- -The number of failed SQL Server Agent jobs during the last minute. - +The total number of requests (HTTP listener) or connections (TCP listener) that are pending routing to a healthy instance. type: long -- -*`aws.rds.freeable_memory.bytes`*:: +*`aws.elb.metrics.UnHealthyHostCount.max`*:: + -- -The amount of available random access memory. - +The number of unhealthy instances registered with your load balancer. type: long -format: bytes - -- -*`aws.rds.free_storage.bytes`*:: +*`aws.elb.metrics.Latency.avg`*:: + -- -The amount of available storage space. - - -type: long +The total time elapsed, in seconds, from the time the load balancer sent the request to a registered instance until the instance started to send the response headers. -format: bytes +type: double -- -*`aws.rds.maximum_used_transaction_ids`*:: +*`aws.elb.metrics.EstimatedALBActiveConnectionCount.avg`*:: + -- -The maximum transaction ID that has been used. Applies to PostgreSQL. +The estimated number of concurrent TCP connections active from clients to the load balancer and from the load balancer to targets. - -type: long +type: double -- -*`aws.rds.oldest_replication_slot_lag.mb`*:: +*`aws.elb.metrics.EstimatedALBConsumedLCUs.avg`*:: + -- -The lagging size of the replica lagging the most in terms of WAL data received. Applies to PostgreSQL. - +The estimated number of load balancer capacity units (LCU) used by an Application Load Balancer. -type: long +type: double -- -*`aws.rds.read_io.ops_per_sec`*:: +*`aws.elb.metrics.EstimatedALBNewConnectionCount.avg`*:: + -- -The average number of disk read I/O operations per second. +The estimated number of new TCP connections established from clients to the load balancer and from the load balancer to targets. - -type: float +type: double -- -*`aws.rds.replica_lag.sec`*:: +*`aws.elb.metrics.EstimatedProcessedBytes.avg`*:: + -- -The amount of time a Read Replica DB instance lags behind the source DB instance. Applies to MySQL, MariaDB, and PostgreSQL Read Replicas. +The estimated number of bytes processed by an Application Load Balancer. +type: double -type: long +-- -format: duration +[float] +=== applicationelb --- +`applicationelb` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS ApplicationELB. -*`aws.rds.swap_usage.bytes`*:: + + + +*`aws.applicationelb.metrics.ActiveConnectionCount.sum`*:: + -- -The amount of swap space used on the DB instance. This metric is not available for SQL Server. - +The total number of concurrent TCP connections active from clients to the load balancer and from the load balancer to targets. type: long -format: bytes - -- -*`aws.rds.transaction_logs_generation`*:: +*`aws.applicationelb.metrics.ClientTLSNegotiationErrorCount.sum`*:: + -- -The disk space used by transaction logs. Applies to PostgreSQL. - +The number of TLS connections initiated by the client that did not establish a session with the load balancer due to a TLS error. type: long -- -*`aws.rds.write_io.ops_per_sec`*:: +*`aws.applicationelb.metrics.HTTP_Fixed_Response_Count.sum`*:: + -- -The average number of disk write I/O operations per second. +The number of fixed-response actions that were successful. - -type: float +type: long -- -*`aws.rds.queries`*:: +*`aws.applicationelb.metrics.HTTP_Redirect_Count.sum`*:: + -- -The average number of queries executed per second. - +The number of redirect actions that were successful. type: long -- -*`aws.rds.deadlocks`*:: +*`aws.applicationelb.metrics.HTTP_Redirect_Url_Limit_Exceeded_Count.sum`*:: + -- -The average number of deadlocks in the database per second. - +The number of redirect actions that couldn't be completed because the URL in the response location header is larger than 8K. type: long -- -*`aws.rds.volume_used.bytes`*:: +*`aws.applicationelb.metrics.HTTPCode_ELB_3XX_Count.sum`*:: + -- -The amount of storage used by your Aurora DB instance, in bytes. - +The number of HTTP 3XX redirection codes that originate from the load balancer. type: long -format: bytes - -- -*`aws.rds.volume.read.iops`*:: +*`aws.applicationelb.metrics.HTTPCode_ELB_4XX_Count.sum`*:: + -- -The number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. - +The number of HTTP 4XX client error codes that originate from the load balancer. type: long -format: bytes - -- -*`aws.rds.volume.write.iops`*:: +*`aws.applicationelb.metrics.HTTPCode_ELB_5XX_Count.sum`*:: + -- -The number of write disk I/O operations to the cluster volume, reported at 5-minute intervals. - +The number of HTTP 5XX server error codes that originate from the load balancer. type: long -format: bytes - -- -*`aws.rds.free_local_storage.bytes`*:: +*`aws.applicationelb.metrics.HTTPCode_ELB_500_Count.sum`*:: + -- -The amount of storage available for temporary tables and logs, in bytes. - +The number of HTTP 500 error codes that originate from the load balancer. type: long -format: bytes - -- -*`aws.rds.login_failures`*:: +*`aws.applicationelb.metrics.HTTPCode_ELB_502_Count.sum`*:: + -- -The average number of failed login attempts per second. - +The number of HTTP 502 error codes that originate from the load balancer. type: long -- -*`aws.rds.throughput.commit`*:: +*`aws.applicationelb.metrics.HTTPCode_ELB_503_Count.sum`*:: + -- -The average number of commit operations per second. +The number of HTTP 503 error codes that originate from the load balancer. - -type: float +type: long -- -*`aws.rds.throughput.delete`*:: +*`aws.applicationelb.metrics.HTTPCode_ELB_504_Count.sum`*:: + -- -The average number of delete queries per second. - +The number of HTTP 504 error codes that originate from the load balancer. -type: float +type: long -- -*`aws.rds.throughput.ddl`*:: +*`aws.applicationelb.metrics.IPv6ProcessedBytes.sum`*:: + -- -The average number of DDL requests per second. +The total number of bytes processed by the load balancer over IPv6. - -type: float +type: long -- -*`aws.rds.throughput.dml`*:: +*`aws.applicationelb.metrics.IPv6RequestCount.sum`*:: + -- -The average number of inserts, updates, and deletes per second. - +The number of IPv6 requests received by the load balancer. -type: float +type: long -- -*`aws.rds.throughput.insert`*:: +*`aws.applicationelb.metrics.NewConnectionCount.sum`*:: + -- -The average number of insert queries per second. +The total number of new TCP connections established from clients to the load balancer and from the load balancer to targets. - -type: float +type: long -- -*`aws.rds.throughput.network`*:: +*`aws.applicationelb.metrics.ProcessedBytes.sum`*:: + -- -The amount of network throughput both received from and transmitted to clients by each instance in the Aurora MySQL DB cluster, in bytes per second. - +The total number of bytes processed by the load balancer over IPv4 and IPv6. -type: float +type: long -- -*`aws.rds.throughput.network_receive`*:: +*`aws.applicationelb.metrics.RejectedConnectionCount.sum`*:: + -- -The incoming (Receive) network traffic on the DB instance, including both customer database traffic and Amazon RDS traffic used for monitoring and replication. +The number of connections that were rejected because the load balancer had reached its maximum number of connections. - -type: float +type: long -- -*`aws.rds.throughput.network_transmit`*:: +*`aws.applicationelb.metrics.RequestCount.sum`*:: + -- -The outgoing (Transmit) network traffic on the DB instance, including both customer database traffic and Amazon RDS traffic used for monitoring and replication. - +The number of requests processed over IPv4 and IPv6. -type: float +type: long -- -*`aws.rds.throughput.read`*:: +*`aws.applicationelb.metrics.RuleEvaluations.sum`*:: + -- -The average amount of time taken per disk I/O operation. +The number of rules processed by the load balancer given a request rate averaged over an hour. - -type: float +type: long -- -*`aws.rds.throughput.select`*:: +*`aws.applicationelb.metrics.ConsumedLCUs.avg`*:: + -- -The average number of select queries per second. - +The number of load balancer capacity units (LCU) used by your load balancer. -type: float +type: double -- -*`aws.rds.throughput.update`*:: -+ --- -The average number of update queries per second. +[float] +=== networkelb +`networkelb` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS NetworkELB. -type: float --- -*`aws.rds.throughput.write`*:: + +*`aws.networkelb.metrics.ActiveFlowCount.avg`*:: + -- -The average number of bytes written to disk per second. +The total number of concurrent flows (or connections) from clients to targets. - -type: float +type: double -- -*`aws.rds.latency.commit`*:: +*`aws.networkelb.metrics.ActiveFlowCount_TCP.avg`*:: + -- -The amount of latency for commit operations, in milliseconds. - +The total number of concurrent TCP flows (or connections) from clients to targets. -type: float - -format: duration +type: double -- -*`aws.rds.latency.ddl`*:: +*`aws.networkelb.metrics.ActiveFlowCount_TLS.avg`*:: + -- -The amount of latency for data definition language (DDL) requests, in milliseconds. - - -type: float +The total number of concurrent TLS flows (or connections) from clients to targets. -format: duration +type: double -- -*`aws.rds.latency.dml`*:: +*`aws.networkelb.metrics.ActiveFlowCount_UDP.avg`*:: + -- -The amount of latency for inserts, updates, and deletes, in milliseconds. - - -type: float +The total number of concurrent UDP flows (or connections) from clients to targets. -format: duration +type: double -- -*`aws.rds.latency.insert`*:: +*`aws.networkelb.metrics.ConsumedLCUs.avg`*:: + -- -The amount of latency for insert queries, in milliseconds. +The number of load balancer capacity units (LCU) used by your load balancer. - -type: float - -format: duration +type: double -- -*`aws.rds.latency.read`*:: +*`aws.networkelb.metrics.ClientTLSNegotiationErrorCount.sum`*:: + -- -The average amount of time taken per disk I/O operation. - +The total number of TLS handshakes that failed during negotiation between a client and a TLS listener. -type: float - -format: duration +type: long -- -*`aws.rds.latency.select`*:: +*`aws.networkelb.metrics.NewFlowCount.sum`*:: + -- -The amount of latency for select queries, in milliseconds. - - -type: float +The total number of new flows (or connections) established from clients to targets in the time period. -format: duration +type: long -- -*`aws.rds.latency.update`*:: +*`aws.networkelb.metrics.NewFlowCount_TLS.sum`*:: + -- -The amount of latency for update queries, in milliseconds. - - -type: float +The total number of new TLS flows (or connections) established from clients to targets in the time period. -format: duration +type: long -- -*`aws.rds.latency.write`*:: +*`aws.networkelb.metrics.ProcessedBytes.sum`*:: + -- -The average amount of time taken per disk I/O operation. +The total number of bytes processed by the load balancer, including TCP/IP headers. - -type: float - -format: duration +type: long -- -*`aws.rds.latency.delete`*:: +*`aws.networkelb.metrics.ProcessedBytes_TLS.sum`*:: + -- -The amount of latency for delete queries, in milliseconds. - +The total number of bytes processed by TLS listeners. -type: float - -format: duration +type: long -- -*`aws.rds.disk_usage.bin_log.bytes`*:: +*`aws.networkelb.metrics.TargetTLSNegotiationErrorCount.sum`*:: + -- -The amount of disk space occupied by binary logs on the master. Applies to MySQL read replicas. - +The total number of TLS handshakes that failed during negotiation between a TLS listener and a target. type: long -format: bytes - -- -*`aws.rds.disk_usage.replication_slot.mb`*:: +*`aws.networkelb.metrics.TCP_Client_Reset_Count.sum`*:: + -- -The disk space used by replication slot files. Applies to PostgreSQL. - +The total number of reset (RST) packets sent from a client to a target. type: long -- -*`aws.rds.disk_usage.transaction_logs.mb`*:: +*`aws.networkelb.metrics.TCP_ELB_Reset_Count.sum`*:: + -- -The disk space used by transaction logs. Applies to PostgreSQL. - +The total number of reset (RST) packets generated by the load balancer. type: long -- -*`aws.rds.transactions.active`*:: +*`aws.networkelb.metrics.TCP_Target_Reset_Count.sum`*:: + -- -The average number of current transactions executing on an Aurora database instance per second. - +The total number of reset (RST) packets sent from a target to a client. type: long -- -*`aws.rds.transactions.blocked`*:: +*`aws.networkelb.metrics.HealthyHostCount.max`*:: + -- -The average number of transactions in the database that are blocked per second. - +The number of targets that are considered healthy. type: long -- -*`aws.rds.db_instance.db_cluster_identifier`*:: +*`aws.networkelb.metrics.UnHealthyHostCount.max`*:: + -- -This identifier is the unique key that identifies a DB cluster specifically for Amazon Aurora DB cluster. - +The number of targets that are considered unhealthy. -type: keyword +type: long -- -*`aws.rds.db_instance.role`*:: -+ --- -DB roles like WRITER or READER, specifically for Amazon Aurora DB cluster. +[float] +=== lambda +`lambda` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS Lambda. -type: keyword --- -*`aws.rds.db_instance.engine_name`*:: + +*`aws.lambda.metrics.Invocations.avg`*:: + -- -Each DB instance runs a DB engine, like MySQL, MariaDB, PostgreSQL and etc. +The number of times your function code is executed, including successful executions and executions that result in a function error. - -type: keyword +type: double -- -*`aws.rds.aurora_bin_log_replica_lag`*:: +*`aws.lambda.metrics.Errors.avg`*:: + -- -The amount of time a replica DB cluster running on Aurora with MySQL compatibility lags behind the source DB cluster. - +The number of invocations that result in a function error. -type: long +type: double -- -*`aws.rds.aurora_global_db.replicated_write_io.bytes`*:: +*`aws.lambda.metrics.DeadLetterErrors.avg`*:: + -- -In an Aurora Global Database, the number of write I/O operations replicated from the primary AWS Region to the cluster volume in a secondary AWS Region. +For asynchronous invocation, the number of times Lambda attempts to send an event to a dead-letter queue but fails. - -type: long +type: double -- -*`aws.rds.aurora_global_db.data_transfer.bytes`*:: +*`aws.lambda.metrics.DestinationDeliveryFailures.avg`*:: + -- -In an Aurora Global Database, the amount of redo log data transferred from the master AWS Region to a secondary AWS Region. - +For asynchronous invocation, the number of times Lambda attempts to send an event to a destination but fails. -type: long +type: double -- -*`aws.rds.aurora_global_db.replication_lag.ms`*:: +*`aws.lambda.metrics.Duration.avg`*:: + -- -For an Aurora Global Database, the amount of lag when replicating updates from the primary AWS Region, in milliseconds. +The amount of time that your function code spends processing an event. - -type: long +type: double -- -*`aws.rds.aurora_replica.lag.ms`*:: +*`aws.lambda.metrics.Throttles.avg`*:: + -- -For an Aurora Replica, the amount of lag when replicating updates from the primary instance, in milliseconds. - +The number of invocation requests that are throttled. -type: long +type: double -- -*`aws.rds.aurora_replica.lag_max.ms`*:: +*`aws.lambda.metrics.IteratorAge.avg`*:: + -- -The maximum amount of lag between the primary instance and each Aurora DB instance in the DB cluster, in milliseconds. +For event source mappings that read from streams, the age of the last record in the event. - -type: long +type: double -- -*`aws.rds.aurora_replica.lag_min.ms`*:: +*`aws.lambda.metrics.ConcurrentExecutions.avg`*:: + -- -The minimum amount of lag between the primary instance and each Aurora DB instance in the DB cluster, in milliseconds. - +The number of function instances that are processing events. -type: long +type: double -- -*`aws.rds.backtrack_change_records.creation_rate`*:: +*`aws.lambda.metrics.UnreservedConcurrentExecutions.avg`*:: + -- -The number of backtrack change records created over five minutes for your DB cluster. +For an AWS Region, the number of events that are being processed by functions that don't have reserved concurrency. - -type: long +type: double -- -*`aws.rds.backtrack_change_records.stored`*:: +*`aws.lambda.metrics.ProvisionedConcurrentExecutions.max`*:: + -- -The actual number of backtrack change records used by your DB cluster. - +The number of function instances that are processing events on provisioned concurrency. type: long -- -*`aws.rds.backtrack_window.actual`*:: +*`aws.lambda.metrics.ProvisionedConcurrencyUtilization.max`*:: + -- -The difference between the target backtrack window and the actual backtrack window. - +For a version or alias, the value of ProvisionedConcurrentExecutions divided by the total amount of provisioned concurrency allocated. type: long -- -*`aws.rds.backtrack_window.alert`*:: +*`aws.lambda.metrics.ProvisionedConcurrencyInvocations.sum`*:: + -- -The number of times that the actual backtrack window is smaller than the target backtrack window for a given period of time. - +The number of times your function code is executed on provisioned concurrency. type: long -- -*`aws.rds.storage_used.backup_retention_period.bytes`*:: +*`aws.lambda.metrics.ProvisionedConcurrencySpilloverInvocations.sum`*:: + -- -The total amount of backup storage in bytes used to support the point-in-time restore feature within the Aurora DB cluster's backup retention window. - +The number of times your function code is executed on standard concurrency when all provisioned concurrency is in use. type: long -- -*`aws.rds.storage_used.snapshot.bytes`*:: -+ --- -The total amount of backup storage in bytes consumed by all Aurora snapshots for an Aurora DB cluster outside its backup retention window. +[float] +=== natgateway + +`natgateway` contains the metrics from Cloudwatch to track usage of NAT gateway related resources. -type: long --- -*`aws.rds.cache_hit_ratio.buffer`*:: +*`aws.natgateway.metrics.BytesInFromDestination.sum`*:: + -- -The percentage of requests that are served by the buffer cache. - +The number of bytes received by the NAT gateway from the destination. type: long -- -*`aws.rds.cache_hit_ratio.result_set`*:: +*`aws.natgateway.metrics.BytesInFromSource.sum`*:: + -- -The percentage of requests that are served by the Resultset cache. - +The number of bytes received by the NAT gateway from clients in your VPC. type: long -- -*`aws.rds.engine_uptime.sec`*:: +*`aws.natgateway.metrics.BytesOutToDestination.sum`*:: + -- -The amount of time that the instance has been running, in seconds. - +The number of bytes sent out through the NAT gateway to the destination. type: long -- -*`aws.rds.rds_to_aurora_postgresql_replica_lag.sec`*:: +*`aws.natgateway.metrics.BytesOutToSource.sum`*:: + -- -The amount of lag in seconds when replicating updates from the primary RDS PostgreSQL instance to other nodes in the cluster. - +The number of bytes sent through the NAT gateway to the clients in your VPC. type: long -- -*`aws.rds.backup_storage_billed_total.bytes`*:: +*`aws.natgateway.metrics.ConnectionAttemptCount.sum`*:: + -- -The total amount of backup storage in bytes for which you are billed for a given Aurora DB cluster. - +The number of connection attempts made through the NAT gateway. type: long -- -*`aws.rds.aurora_volume_left_total.bytes`*:: +*`aws.natgateway.metrics.ConnectionEstablishedCount.sum`*:: + -- -The remaining available space for the cluster volume, measured in bytes. - +The number of connections established through the NAT gateway. type: long -- -[float] -=== s3_daily_storage - -`s3_daily_storage` contains the daily storage metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS S3. - - - -*`aws.s3_daily_storage.bucket.size.bytes`*:: +*`aws.natgateway.metrics.ErrorPortAllocation.sum`*:: + -- -The amount of data in bytes stored in a bucket. - +The number of times the NAT gateway could not allocate a source port. type: long -format: bytes - -- -*`aws.s3_daily_storage.number_of_objects`*:: +*`aws.natgateway.metrics.IdleTimeoutCount.sum`*:: + -- -The total number of objects stored in a bucket for all storage classes. - +The number of connections that transitioned from the active state to the idle state. type: long -- -[float] -=== s3_request - -`s3_request` contains request metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS S3. - - - -*`aws.s3_request.requests.total`*:: +*`aws.natgateway.metrics.PacketsDropCount.sum`*:: + -- -The total number of HTTP requests made to an Amazon S3 bucket, regardless of type. - +The number of packets dropped by the NAT gateway. type: long -- -*`aws.s3_request.requests.get`*:: +*`aws.natgateway.metrics.PacketsInFromDestination.sum`*:: + -- -The number of HTTP GET requests made for objects in an Amazon S3 bucket. - +The number of packets received by the NAT gateway from the destination. type: long -- -*`aws.s3_request.requests.put`*:: +*`aws.natgateway.metrics.PacketsInFromSource.sum`*:: + -- -The number of HTTP PUT requests made for objects in an Amazon S3 bucket. - +The number of packets received by the NAT gateway from clients in your VPC. type: long -- -*`aws.s3_request.requests.delete`*:: +*`aws.natgateway.metrics.PacketsOutToDestination.sum`*:: + -- -The number of HTTP DELETE requests made for objects in an Amazon S3 bucket. - +The number of packets sent out through the NAT gateway to the destination. type: long -- -*`aws.s3_request.requests.head`*:: +*`aws.natgateway.metrics.PacketsOutToSource.sum`*:: + -- -The number of HTTP HEAD requests made to an Amazon S3 bucket. - +The number of packets sent through the NAT gateway to the clients in your VPC. type: long -- -*`aws.s3_request.requests.post`*:: +*`aws.natgateway.metrics.ActiveConnectionCount.max`*:: + -- -The number of HTTP POST requests made to an Amazon S3 bucket. - +The total number of concurrent active TCP connections through the NAT gateway. type: long -- -*`aws.s3_request.requests.select`*:: -+ --- -The number of Amazon S3 SELECT Object Content requests made for objects in an Amazon S3 bucket. +[float] +=== rds +`rds` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS RDS. -type: long --- -*`aws.s3_request.requests.select_scanned.bytes`*:: +*`aws.rds.cpu.total.pct`*:: + -- -The number of bytes of data scanned with Amazon S3 SELECT Object Content requests in an Amazon S3 bucket. +The percentage of CPU utilization. -type: long +type: scaled_float -format: bytes +format: percent -- -*`aws.s3_request.requests.select_returned.bytes`*:: +*`aws.rds.cpu.credit_usage`*:: + -- -The number of bytes of data returned with Amazon S3 SELECT Object Content requests in an Amazon S3 bucket. +The number of CPU credits spent by the instance for CPU utilization. type: long -format: bytes - -- -*`aws.s3_request.requests.list`*:: +*`aws.rds.cpu.credit_balance`*:: + -- -The number of HTTP requests that list the contents of a bucket. +The number of earned CPU credits that an instance has accrued since it was launched or started. type: long -- -*`aws.s3_request.downloaded.bytes`*:: +*`aws.rds.database_connections`*:: + -- -The number bytes downloaded for requests made to an Amazon S3 bucket, where the response includes a body. +The number of database connections in use. type: long -format: bytes - -- -*`aws.s3_request.uploaded.bytes`*:: +*`aws.rds.db_instance.arn`*:: + -- -The number bytes uploaded that contain a request body, made to an Amazon S3 bucket. - +Amazon Resource Name(ARN) for each rds. -type: long -format: bytes +type: keyword -- -*`aws.s3_request.errors.4xx`*:: +*`aws.rds.db_instance.class`*:: + -- -The number of HTTP 4xx client error status code requests made to an Amazon S3 bucket with a value of either 0 or 1. +Contains the name of the compute and memory capacity class of the DB instance. -type: long +type: keyword -- -*`aws.s3_request.errors.5xx`*:: +*`aws.rds.db_instance.identifier`*:: + -- -The number of HTTP 5xx server error status code requests made to an Amazon S3 bucket with a value of either 0 or 1. +Contains a user-supplied database identifier. This identifier is the unique key that identifies a DB instance. -type: long +type: keyword -- -*`aws.s3_request.latency.first_byte.ms`*:: +*`aws.rds.db_instance.status`*:: + -- -The per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned. - +Specifies the current state of this database. -type: long -format: duration +type: keyword -- -*`aws.s3_request.latency.total_request.ms`*:: +*`aws.rds.disk_queue_depth`*:: + -- -The elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket. +The number of outstanding IOs (read/write requests) waiting to access the disk. -type: long +type: float -format: duration +-- +*`aws.rds.failed_sql_server_agent_jobs`*:: ++ -- +The number of failed SQL Server Agent jobs during the last minute. -[float] -=== sns -`sns` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS SNS. +type: long +-- -[float] -=== sqs +*`aws.rds.freeable_memory.bytes`*:: ++ +-- +The amount of available random access memory. -`sqs` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS SQS. +type: long +format: bytes -*`aws.sqs.oldest_message_age.sec`*:: +-- + +*`aws.rds.free_storage.bytes`*:: + -- -The approximate age of the oldest non-deleted message in the queue. +The amount of available storage space. type: long -format: duration +format: bytes -- -*`aws.sqs.messages.delayed`*:: +*`aws.rds.maximum_used_transaction_ids`*:: + -- -TThe number of messages in the queue that are delayed and not available for reading immediately. +The maximum transaction ID that has been used. Applies to PostgreSQL. type: long -- -*`aws.sqs.messages.not_visible`*:: +*`aws.rds.oldest_replication_slot_lag.mb`*:: + -- -The number of messages that are in flight. +The lagging size of the replica lagging the most in terms of WAL data received. Applies to PostgreSQL. type: long -- -*`aws.sqs.messages.visible`*:: +*`aws.rds.read_io.ops_per_sec`*:: + -- -The number of messages available for retrieval from the queue. +The average number of disk read I/O operations per second. -type: long +type: float -- -*`aws.sqs.messages.deleted`*:: +*`aws.rds.replica_lag.sec`*:: + -- -The number of messages deleted from the queue. +The amount of time a Read Replica DB instance lags behind the source DB instance. Applies to MySQL, MariaDB, and PostgreSQL Read Replicas. type: long +format: duration + -- -*`aws.sqs.messages.received`*:: +*`aws.rds.swap_usage.bytes`*:: + -- -The number of messages returned by calls to the ReceiveMessage action. +The amount of swap space used on the DB instance. This metric is not available for SQL Server. type: long +format: bytes + -- -*`aws.sqs.messages.sent`*:: +*`aws.rds.transaction_logs_generation`*:: + -- -The number of messages added to a queue. +The disk space used by transaction logs. Applies to PostgreSQL. type: long -- -*`aws.sqs.empty_receives`*:: +*`aws.rds.write_io.ops_per_sec`*:: + -- -The number of ReceiveMessage API calls that did not return a message. +The average number of disk write I/O operations per second. -type: long +type: float -- -*`aws.sqs.sent_message_size.bytes`*:: +*`aws.rds.queries`*:: + -- -The size of messages added to a queue. +The average number of queries executed per second. type: long -format: bytes - -- -*`aws.sqs.queue.name`*:: +*`aws.rds.deadlocks`*:: + -- -SQS queue name +The average number of deadlocks in the database per second. -type: keyword +type: long -- -[float] -=== transitgateway - -`transitgateway` contains the metrics from Cloudwatch to track usage of transit gateway related resources. - - -[float] -=== usage +*`aws.rds.volume_used.bytes`*:: ++ +-- +The amount of storage used by your Aurora DB instance, in bytes. -`usage` contains the metrics from Cloudwatch to track usage of some AWS resources. +type: long -[float] -=== vpn +format: bytes -`vpn` contains the metrics from Cloudwatch to track usage of VPN related resources. +-- +*`aws.rds.volume.read.iops`*:: ++ +-- +The number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. -[[exported-fields-azure]] -== azure fields -azure module +type: long +format: bytes +-- -[float] -=== azure +*`aws.rds.volume.write.iops`*:: ++ +-- +The number of write disk I/O operations to the cluster volume, reported at 5-minute intervals. +type: long +format: bytes -*`azure.timegrain`*:: -+ -- -The Azure metric timegrain - - -type: keyword +*`aws.rds.free_local_storage.bytes`*:: ++ -- +The amount of storage available for temporary tables and logs, in bytes. -[float] -=== resource -The resource specified +type: long +format: bytes +-- -*`azure.resource.type`*:: +*`aws.rds.login_failures`*:: + -- -The type of the resource +The average number of failed login attempts per second. -type: keyword +type: long -- -*`azure.resource.group`*:: +*`aws.rds.throughput.commit`*:: + -- -The resource group +The average number of commit operations per second. -type: keyword +type: float -- -*`azure.resource.tags.*`*:: +*`aws.rds.throughput.delete`*:: + -- -Azure resource tags. +The average number of delete queries per second. -type: object +type: float -- -*`azure.namespace`*:: +*`aws.rds.throughput.ddl`*:: + -- -The namespace selected +The average number of DDL requests per second. -type: keyword +type: float -- -*`azure.subscription_id`*:: +*`aws.rds.throughput.dml`*:: + -- -The subscription ID +The average number of inserts, updates, and deletes per second. -type: keyword +type: float -- -*`azure.dimensions.*`*:: +*`aws.rds.throughput.insert`*:: + -- -Azure metric dimensions. +The average number of insert queries per second. -type: object +type: float -- -*`azure.compute_vm.*.*`*:: +*`aws.rds.throughput.network`*:: + -- -compute_vm +The amount of network throughput both received from and transmitted to clients by each instance in the Aurora MySQL DB cluster, in bytes per second. -type: object +type: float -- -*`azure.compute_vm_scaleset.*.*`*:: +*`aws.rds.throughput.network_receive`*:: + -- -compute_vm_scaleset +The incoming (Receive) network traffic on the DB instance, including both customer database traffic and Amazon RDS traffic used for monitoring and replication. -type: object +type: float -- -*`azure.container_instance.*.*`*:: +*`aws.rds.throughput.network_transmit`*:: + -- -container instance +The outgoing (Transmit) network traffic on the DB instance, including both customer database traffic and Amazon RDS traffic used for monitoring and replication. -type: object +type: float -- -*`azure.container_registry.*.*`*:: +*`aws.rds.throughput.read`*:: + -- -container registry +The average amount of time taken per disk I/O operation. -type: object +type: float -- -*`azure.container_service.*.*`*:: +*`aws.rds.throughput.select`*:: + -- -container service +The average number of select queries per second. -type: object +type: float -- -*`azure.database_account.*.*`*:: +*`aws.rds.throughput.update`*:: + -- -database account +The average number of update queries per second. -type: object +type: float -- -[float] -=== monitor +*`aws.rds.throughput.write`*:: ++ +-- +The average number of bytes written to disk per second. -monitor +type: float +-- -*`azure.monitor.metrics.*.*`*:: +*`aws.rds.latency.commit`*:: + -- -Metrics returned. +The amount of latency for commit operations, in milliseconds. -type: object +type: float + +format: duration -- -*`azure.storage.*.*`*:: +*`aws.rds.latency.ddl`*:: + -- -storage account +The amount of latency for data definition language (DDL) requests, in milliseconds. -type: object +type: float + +format: duration -- -[[exported-fields-beat-common]] -== Beat fields +*`aws.rds.latency.dml`*:: ++ +-- +The amount of latency for inserts, updates, and deletes, in milliseconds. -Contains common beat fields available in all event types. +type: float + +format: duration +-- -*`agent.hostname`*:: +*`aws.rds.latency.insert`*:: + -- -Hostname of the agent. +The amount of latency for insert queries, in milliseconds. -type: keyword + +type: float + +format: duration -- -*`beat.timezone`*:: +*`aws.rds.latency.read`*:: + -- -type: alias +The average amount of time taken per disk I/O operation. -alias to: event.timezone + +type: float + +format: duration -- -*`fields`*:: +*`aws.rds.latency.select`*:: + -- -Contains user configurable fields. +The amount of latency for select queries, in milliseconds. -type: object +type: float + +format: duration -- -*`beat.name`*:: +*`aws.rds.latency.update`*:: + -- -type: alias +The amount of latency for update queries, in milliseconds. -alias to: host.name + +type: float + +format: duration -- -*`beat.hostname`*:: +*`aws.rds.latency.write`*:: + -- -type: alias +The average amount of time taken per disk I/O operation. -alias to: agent.hostname + +type: float + +format: duration -- -*`timeseries.instance`*:: +*`aws.rds.latency.delete`*:: + -- -Time series instance id - -type: keyword +The amount of latency for delete queries, in milliseconds. --- -[[exported-fields-beat]] -== Beat fields +type: float -Beat module +format: duration +-- +*`aws.rds.disk_usage.bin_log.bytes`*:: ++ +-- +The amount of disk space occupied by binary logs on the master. Applies to MySQL read replicas. -[float] -=== beat +type: long +format: bytes +-- -*`beat.id`*:: +*`aws.rds.disk_usage.replication_slot.mb`*:: + -- -Beat ID. +The disk space used by replication slot files. Applies to PostgreSQL. -type: keyword +type: long -- -*`beat.type`*:: +*`aws.rds.disk_usage.transaction_logs.mb`*:: + -- -Beat type. +The disk space used by transaction logs. Applies to PostgreSQL. -type: keyword +type: long -- -[float] -=== state +*`aws.rds.transactions.active`*:: ++ +-- +The average number of current transactions executing on an Aurora database instance per second. -Beat state +type: long +-- -*`beat.state.management.enabled`*:: +*`aws.rds.transactions.blocked`*:: + -- -Is central management enabled? +The average number of transactions in the database that are blocked per second. -type: boolean +type: long -- -*`beat.state.module.count`*:: +*`aws.rds.db_instance.db_cluster_identifier`*:: + -- -Number of modules enabled +This identifier is the unique key that identifies a DB cluster specifically for Amazon Aurora DB cluster. -type: integer +type: keyword -- -*`beat.state.output.name`*:: +*`aws.rds.db_instance.role`*:: + -- -Name of output used by Beat +DB roles like WRITER or READER, specifically for Amazon Aurora DB cluster. type: keyword -- -*`beat.state.queue.name`*:: +*`aws.rds.db_instance.engine_name`*:: + -- -Name of queue being used by Beat +Each DB instance runs a DB engine, like MySQL, MariaDB, PostgreSQL and etc. type: keyword -- -[float] -=== stats +*`aws.rds.aurora_bin_log_replica_lag`*:: ++ +-- +The amount of time a replica DB cluster running on Aurora with MySQL compatibility lags behind the source DB cluster. -Beat stats +type: long +-- -*`beat.stats.uptime.ms`*:: +*`aws.rds.aurora_global_db.replicated_write_io.bytes`*:: + -- -Beat uptime +In an Aurora Global Database, the number of write I/O operations replicated from the primary AWS Region to the cluster volume in a secondary AWS Region. type: long -- -*`beat.stats.runtime.goroutines`*:: +*`aws.rds.aurora_global_db.data_transfer.bytes`*:: + -- -Number of goroutines running in Beat +In an Aurora Global Database, the amount of redo log data transferred from the master AWS Region to a secondary AWS Region. type: long -- -[float] -=== libbeat +*`aws.rds.aurora_global_db.replication_lag.ms`*:: ++ +-- +For an Aurora Global Database, the amount of lag when replicating updates from the primary AWS Region, in milliseconds. -Fields common to all Beats +type: long +-- -[float] -=== output +*`aws.rds.aurora_replica.lag.ms`*:: ++ +-- +For an Aurora Replica, the amount of lag when replicating updates from the primary instance, in milliseconds. -Output stats +type: long +-- -*`beat.stats.libbeat.output.type`*:: +*`aws.rds.aurora_replica.lag_max.ms`*:: + -- -Type of output +The maximum amount of lag between the primary instance and each Aurora DB instance in the DB cluster, in milliseconds. -type: keyword +type: long -- -[float] -=== events +*`aws.rds.aurora_replica.lag_min.ms`*:: ++ +-- +The minimum amount of lag between the primary instance and each Aurora DB instance in the DB cluster, in milliseconds. -Event counters +type: long +-- -*`beat.stats.libbeat.output.events.acked`*:: +*`aws.rds.backtrack_change_records.creation_rate`*:: + -- -Number of events acknowledged +The number of backtrack change records created over five minutes for your DB cluster. type: long -- -*`beat.stats.libbeat.output.events.active`*:: +*`aws.rds.backtrack_change_records.stored`*:: + -- -Number of active events +The actual number of backtrack change records used by your DB cluster. type: long -- -*`beat.stats.libbeat.output.events.batches`*:: +*`aws.rds.backtrack_window.actual`*:: + -- -Number of event batches +The difference between the target backtrack window and the actual backtrack window. type: long -- -*`beat.stats.libbeat.output.events.dropped`*:: +*`aws.rds.backtrack_window.alert`*:: + -- -Number of events dropped +The number of times that the actual backtrack window is smaller than the target backtrack window for a given period of time. type: long -- -*`beat.stats.libbeat.output.events.duplicates`*:: +*`aws.rds.storage_used.backup_retention_period.bytes`*:: + -- -Number of events duplicated +The total amount of backup storage in bytes used to support the point-in-time restore feature within the Aurora DB cluster's backup retention window. type: long -- -*`beat.stats.libbeat.output.events.failed`*:: +*`aws.rds.storage_used.snapshot.bytes`*:: + -- -Number of events failed +The total amount of backup storage in bytes consumed by all Aurora snapshots for an Aurora DB cluster outside its backup retention window. type: long -- -*`beat.stats.libbeat.output.events.toomany`*:: +*`aws.rds.cache_hit_ratio.buffer`*:: + -- -Number of too many events +The percentage of requests that are served by the buffer cache. type: long -- -*`beat.stats.libbeat.output.events.total`*:: +*`aws.rds.cache_hit_ratio.result_set`*:: + -- -Total number of events +The percentage of requests that are served by the Resultset cache. type: long -- -[float] -=== read +*`aws.rds.engine_uptime.sec`*:: ++ +-- +The amount of time that the instance has been running, in seconds. -Read stats +type: long +-- -*`beat.stats.libbeat.output.read.bytes`*:: +*`aws.rds.rds_to_aurora_postgresql_replica_lag.sec`*:: + -- -Number of bytes read +The amount of lag in seconds when replicating updates from the primary RDS PostgreSQL instance to other nodes in the cluster. type: long -- -*`beat.stats.libbeat.output.read.errors`*:: +*`aws.rds.backup_storage_billed_total.bytes`*:: + -- -Number of read errors +The total amount of backup storage in bytes for which you are billed for a given Aurora DB cluster. type: long -- -[float] -=== write - -Write stats - - - -*`beat.stats.libbeat.output.write.bytes`*:: +*`aws.rds.aurora_volume_left_total.bytes`*:: + -- -Number of bytes written +The remaining available space for the cluster volume, measured in bytes. type: long -- -*`beat.stats.libbeat.output.write.errors`*:: -+ --- -Number of write errors +[float] +=== s3_daily_storage +`s3_daily_storage` contains the daily storage metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS S3. -type: long + +*`aws.s3_daily_storage.bucket.size.bytes`*:: ++ -- +The amount of data in bytes stored in a bucket. -[[exported-fields-ceph]] -== Ceph fields -Ceph module +type: long +format: bytes +-- -[float] -=== ceph +*`aws.s3_daily_storage.number_of_objects`*:: ++ +-- +The total number of objects stored in a bucket for all storage classes. -`ceph` contains the metrics that were scraped from CEPH. +type: long +-- [float] -=== cluster_disk +=== s3_request -cluster_disk +`s3_request` contains request metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS S3. -*`ceph.cluster_disk.available.bytes`*:: +*`aws.s3_request.requests.total`*:: + -- -Available bytes of the cluster +The total number of HTTP requests made to an Amazon S3 bucket, regardless of type. type: long -format: bytes - -- -*`ceph.cluster_disk.total.bytes`*:: +*`aws.s3_request.requests.get`*:: + -- -Total bytes of the cluster +The number of HTTP GET requests made for objects in an Amazon S3 bucket. type: long -format: bytes - -- -*`ceph.cluster_disk.used.bytes`*:: +*`aws.s3_request.requests.put`*:: + -- -Used bytes of the cluster +The number of HTTP PUT requests made for objects in an Amazon S3 bucket. type: long -format: bytes - -- -[float] -=== cluster_health +*`aws.s3_request.requests.delete`*:: ++ +-- +The number of HTTP DELETE requests made for objects in an Amazon S3 bucket. -cluster_health +type: long +-- -*`ceph.cluster_health.overall_status`*:: +*`aws.s3_request.requests.head`*:: + -- -Overall status of the cluster +The number of HTTP HEAD requests made to an Amazon S3 bucket. -type: keyword +type: long -- -*`ceph.cluster_health.timechecks.epoch`*:: +*`aws.s3_request.requests.post`*:: + -- -Map version +The number of HTTP POST requests made to an Amazon S3 bucket. type: long -- -*`ceph.cluster_health.timechecks.round.value`*:: +*`aws.s3_request.requests.select`*:: + -- -timecheck round +The number of Amazon S3 SELECT Object Content requests made for objects in an Amazon S3 bucket. type: long -- -*`ceph.cluster_health.timechecks.round.status`*:: +*`aws.s3_request.requests.select_scanned.bytes`*:: + -- -Status of the round +The number of bytes of data scanned with Amazon S3 SELECT Object Content requests in an Amazon S3 bucket. -type: keyword +type: long + +format: bytes -- -[float] -=== cluster_status +*`aws.s3_request.requests.select_returned.bytes`*:: ++ +-- +The number of bytes of data returned with Amazon S3 SELECT Object Content requests in an Amazon S3 bucket. -cluster_status +type: long + +format: bytes +-- -*`ceph.cluster_status.version`*:: +*`aws.s3_request.requests.list`*:: + -- -Ceph Status version +The number of HTTP requests that list the contents of a bucket. type: long -- -*`ceph.cluster_status.traffic.read_bytes`*:: +*`aws.s3_request.downloaded.bytes`*:: + -- -Cluster read throughput per second +The number bytes downloaded for requests made to an Amazon S3 bucket, where the response includes a body. type: long @@ -4028,10 +4047,10 @@ format: bytes -- -*`ceph.cluster_status.traffic.write_bytes`*:: +*`aws.s3_request.uploaded.bytes`*:: + -- -Cluster write throughput per second +The number bytes uploaded that contain a request body, made to an Amazon S3 bucket. type: long @@ -4040,709 +4059,755 @@ format: bytes -- -*`ceph.cluster_status.traffic.read_op_per_sec`*:: +*`aws.s3_request.errors.4xx`*:: + -- -Cluster read iops per second +The number of HTTP 4xx client error status code requests made to an Amazon S3 bucket with a value of either 0 or 1. type: long -- -*`ceph.cluster_status.traffic.write_op_per_sec`*:: +*`aws.s3_request.errors.5xx`*:: + -- -Cluster write iops per second +The number of HTTP 5xx server error status code requests made to an Amazon S3 bucket with a value of either 0 or 1. type: long -- -*`ceph.cluster_status.misplace.total`*:: +*`aws.s3_request.latency.first_byte.ms`*:: + -- -Cluster misplace pg number +The per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned. type: long +format: duration + -- -*`ceph.cluster_status.misplace.objects`*:: +*`aws.s3_request.latency.total_request.ms`*:: + -- -Cluster misplace objects number +The elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket. type: long +format: duration + -- -*`ceph.cluster_status.misplace.ratio`*:: +[float] +=== sns + +`sns` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS SNS. + + + + +*`aws.sns.metrics.PublishSize.avg`*:: + -- -Cluster misplace ratio +The size of messages published. +type: double -type: scaled_float +-- -format: percent +*`aws.sns.metrics.SMSSuccessRate.avg`*:: ++ +-- +The rate of successful SMS message deliveries. + +type: double -- -*`ceph.cluster_status.degraded.total`*:: +*`aws.sns.metrics.NumberOfMessagesPublished.sum`*:: + -- -Cluster degraded pg number - +The number of messages published to your Amazon SNS topics. type: long -- -*`ceph.cluster_status.degraded.objects`*:: +*`aws.sns.metrics.NumberOfNotificationsDelivered.sum`*:: + -- -Cluster degraded objects number - +The number of messages successfully delivered from your Amazon SNS topics to subscribing endpoints. type: long -- -*`ceph.cluster_status.degraded.ratio`*:: +*`aws.sns.metrics.NumberOfNotificationsFailed.sum`*:: + -- -Cluster degraded ratio +The number of messages that Amazon SNS failed to deliver. +type: long -type: scaled_float +-- -format: percent +*`aws.sns.metrics.NumberOfNotificationsFilteredOut.sum`*:: ++ +-- +The number of messages that were rejected by subscription filter policies. + +type: long -- -*`ceph.cluster_status.pg.data_bytes`*:: +*`aws.sns.metrics.NumberOfNotificationsFilteredOut-InvalidAttributes.sum`*:: + -- -Cluster pg data bytes - +The number of messages that were rejected by subscription filter policies because the messages' attributes are invalid – for example, because the attribute JSON is incorrectly formatted. type: long -format: bytes - -- -*`ceph.cluster_status.pg.avail_bytes`*:: +*`aws.sns.metrics.NumberOfNotificationsFilteredOut-NoMessageAttributes.sum`*:: + -- -Cluster available bytes - +The number of messages that were rejected by subscription filter policies because the messages have no attributes. type: long -format: bytes - -- -*`ceph.cluster_status.pg.total_bytes`*:: +*`aws.sns.metrics.NumberOfNotificationsRedrivenToDlq.sum`*:: + -- -Cluster total bytes - +The number of messages that have been moved to a dead-letter queue. type: long -format: bytes - -- -*`ceph.cluster_status.pg.used_bytes`*:: +*`aws.sns.metrics.NumberOfNotificationsFailedToRedriveToDlq.sum`*:: + -- -Cluster used bytes - +The number of messages that couldn't be moved to a dead-letter queue. type: long -format: bytes - -- -*`ceph.cluster_status.pg_state.state_name`*:: +*`aws.sns.metrics.SMSMonthToDateSpentUSD.sum`*:: + -- -Pg state description - +The charges you have accrued since the start of the current calendar month for sending SMS messages. type: long -- -*`ceph.cluster_status.pg_state.count`*:: +[float] +=== sqs + +`sqs` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS SQS. + + + +*`aws.sqs.oldest_message_age.sec`*:: + -- -Shows how many pgs are in state of pg_state.state_name +The approximate age of the oldest non-deleted message in the queue. type: long +format: duration + -- -*`ceph.cluster_status.pg_state.version`*:: +*`aws.sqs.messages.delayed`*:: + -- -Cluster status version +TThe number of messages in the queue that are delayed and not available for reading immediately. type: long -- -*`ceph.cluster_status.osd.full`*:: +*`aws.sqs.messages.not_visible`*:: + -- -Is osd full +The number of messages that are in flight. -type: boolean +type: long -- -*`ceph.cluster_status.osd.nearfull`*:: +*`aws.sqs.messages.visible`*:: + -- -Is osd near full +The number of messages available for retrieval from the queue. -type: boolean +type: long -- -*`ceph.cluster_status.osd.num_osds`*:: +*`aws.sqs.messages.deleted`*:: + -- -Shows how many osds in the cluster +The number of messages deleted from the queue. type: long -- -*`ceph.cluster_status.osd.num_up_osds`*:: +*`aws.sqs.messages.received`*:: + -- -Shows how many osds are on the state of UP +The number of messages returned by calls to the ReceiveMessage action. type: long -- -*`ceph.cluster_status.osd.num_in_osds`*:: +*`aws.sqs.messages.sent`*:: + -- -Shows how many osds are on the state of IN +The number of messages added to a queue. type: long -- -*`ceph.cluster_status.osd.num_remapped_pgs`*:: +*`aws.sqs.empty_receives`*:: + -- -Shows how many osds are on the state of REMAPPED +The number of ReceiveMessage API calls that did not return a message. type: long -- -*`ceph.cluster_status.osd.epoch`*:: +*`aws.sqs.sent_message_size.bytes`*:: + -- -epoch number +The size of messages added to a queue. type: long +format: bytes + -- -[float] -=== mgr_cluster_disk +*`aws.sqs.queue.name`*:: ++ +-- +SQS queue name -see: cluster_disk +type: keyword + +-- [float] -=== mgr_cluster_health +=== transitgateway -see: cluster_health +`transitgateway` contains the metrics from Cloudwatch to track usage of transit gateway related resources. -[float] -=== mgr_osd_perf -OSD performance metrics of Ceph cluster +*`aws.transitgateway.metrics.BytesIn.sum`*:: ++ +-- +The number of bytes received by the transit gateway. +type: long -*`ceph.mgr_osd_perf.id`*:: +-- + +*`aws.transitgateway.metrics.BytesOut.sum`*:: + -- -OSD ID +The number of bytes sent from the transit gateway. type: long -- -*`ceph.mgr_osd_perf.stats.commit_latency_ms`*:: +*`aws.transitgateway.metrics.PacketsIn.sum`*:: + -- -Commit latency in ms +The number of packets received by the transit gateway. type: long -- -*`ceph.mgr_osd_perf.stats.apply_latency_ms`*:: +*`aws.transitgateway.metrics.PacketsOut.sum`*:: + -- -Apply latency in ms +The number of packets sent by the transit gateway. type: long -- -*`ceph.mgr_osd_perf.stats.commit_latency_ns`*:: +*`aws.transitgateway.metrics.PacketDropCountBlackhole.sum`*:: + -- -Commit latency in ns +The number of packets dropped because they matched a blackhole route. type: long -- -*`ceph.mgr_osd_perf.stats.apply_latency_ns`*:: +*`aws.transitgateway.metrics.PacketDropCountNoRoute.sum`*:: + -- -Apply latency in ns +The number of packets dropped because they did not match a route. type: long -- [float] -=== mgr_osd_pool_stats +=== usage -OSD pool stats of Ceph cluster +`usage` contains the metrics from Cloudwatch to track usage of some AWS resources. -*`ceph.mgr_osd_pool_stats.pool_name`*:: + +*`aws.usage.metrics.CallCount.sum`*:: + -- -Pool name +The number of specified API operations performed in your account. -type: keyword +type: long -- -*`ceph.mgr_osd_pool_stats.pool_id`*:: +*`aws.usage.metrics.ResourceCount.sum`*:: + -- -Pool ID +The number of the specified resources running in your account. The resources are defined by the dimensions associated with the metric. type: long -- -*`ceph.mgr_osd_pool_stats.client_io_rate`*:: +[float] +=== vpn + +`vpn` contains the metrics from Cloudwatch to track usage of VPN related resources. + + + + +*`aws.vpn.metrics.TunnelState.avg`*:: + -- -Client I/O rates +The state of the tunnel. For static VPNs, 0 indicates DOWN and 1 indicates UP. For BGP VPNs, 1 indicates ESTABLISHED and 0 is used for all other states. -type: object +type: double -- -[float] -=== mgr_osd_tree +*`aws.vpn.metrics.TunnelDataIn.sum`*:: ++ +-- +The bytes received through the VPN tunnel. -see: osd_tree +type: double +-- -[float] -=== mgr_pool_disk +*`aws.vpn.metrics.TunnelDataOut.sum`*:: ++ +-- +The bytes sent through the VPN tunnel. + +type: double + +-- + +[[exported-fields-azure]] +== azure fields + +azure module -see: pool_disk [float] -=== monitor_health +=== azure -monitor_health stats data -*`ceph.monitor_health.available.pct`*:: +*`azure.timegrain`*:: + -- -Available percent of the MON +The Azure metric timegrain -type: long +type: keyword -- -*`ceph.monitor_health.health`*:: +[float] +=== resource + +The resource specified + + + +*`azure.resource.type`*:: + -- -Health of the MON +The type of the resource type: keyword -- -*`ceph.monitor_health.available.kb`*:: +*`azure.resource.group`*:: + -- -Available KB of the MON +The resource group -type: long +type: keyword -- -*`ceph.monitor_health.total.kb`*:: +*`azure.resource.tags.*`*:: + -- -Total KB of the MON +Azure resource tags. -type: long +type: object -- -*`ceph.monitor_health.used.kb`*:: +*`azure.namespace`*:: + -- -Used KB of the MON +The namespace selected -type: long +type: keyword -- -*`ceph.monitor_health.last_updated`*:: +*`azure.subscription_id`*:: + -- -Time when was updated +The subscription ID -type: date +type: keyword -- -*`ceph.monitor_health.name`*:: +*`azure.dimensions.*`*:: + -- -Name of the MON +Azure metric dimensions. -type: keyword +type: object -- -*`ceph.monitor_health.store_stats.log.bytes`*:: +*`azure.compute_vm.*.*`*:: + -- -Log bytes of MON - +compute_vm -type: long -format: bytes +type: object -- -*`ceph.monitor_health.store_stats.misc.bytes`*:: +*`azure.compute_vm_scaleset.*.*`*:: + -- -Misc bytes of MON - +compute_vm_scaleset -type: long -format: bytes +type: object -- -*`ceph.monitor_health.store_stats.sst.bytes`*:: +*`azure.container_instance.*.*`*:: + -- -SST bytes of MON - +container instance -type: long -format: bytes +type: object -- -*`ceph.monitor_health.store_stats.total.bytes`*:: +*`azure.container_registry.*.*`*:: + -- -Total bytes of MON +container registry -type: long +type: object -format: bytes +-- + +*`azure.container_service.*.*`*:: ++ +-- +container service + + +type: object -- -*`ceph.monitor_health.store_stats.last_updated`*:: +*`azure.database_account.*.*`*:: + -- -Last updated +database account -type: long +type: object -- [float] -=== osd_df +=== monitor -ceph osd disk usage information +monitor -*`ceph.osd_df.id`*:: +*`azure.monitor.metrics.*.*`*:: + -- -osd node id +Metrics returned. -type: long +type: object -- -*`ceph.osd_df.name`*:: +*`azure.storage.*.*`*:: + -- -osd node name +storage account -type: keyword +type: object -- -*`ceph.osd_df.device_class`*:: +[[exported-fields-beat-common]] +== Beat fields + +Contains common beat fields available in all event types. + + + +*`agent.hostname`*:: + -- -osd node type, illegal type include hdd, ssd etc. - +Hostname of the agent. type: keyword -- -*`ceph.osd_df.total.byte`*:: +*`beat.timezone`*:: + -- -osd disk total volume - - -type: long +type: alias -format: bytes +alias to: event.timezone -- -*`ceph.osd_df.used.byte`*:: +*`fields`*:: + -- -osd disk usage volume - +Contains user configurable fields. -type: long -format: bytes +type: object -- -*`ceph.osd_df.available.bytes`*:: +*`beat.name`*:: + -- -osd disk available volume - - -type: long +type: alias -format: bytes +alias to: host.name -- -*`ceph.osd_df.pg_num`*:: +*`beat.hostname`*:: + -- -shows how many pg located on this osd - +type: alias -type: long +alias to: agent.hostname -- -*`ceph.osd_df.used.pct`*:: +*`timeseries.instance`*:: + -- -osd disk usage percentage +Time series instance id +type: keyword -type: scaled_float +-- + +[[exported-fields-beat]] +== Beat fields + +Beat module -format: percent --- [float] -=== osd_tree +=== beat -ceph osd tree info -*`ceph.osd_tree.id`*:: +*`beat.id`*:: + -- -osd or bucket node id +Beat ID. -type: long +type: keyword -- -*`ceph.osd_tree.name`*:: +*`beat.type`*:: + -- -osd or bucket node name +Beat type. type: keyword -- -*`ceph.osd_tree.type`*:: -+ --- -osd or bucket node type, illegal type include osd, host, root etc. +[float] +=== state +Beat state -type: keyword --- -*`ceph.osd_tree.type_id`*:: +*`beat.state.management.enabled`*:: + -- -osd or bucket node typeID +Is central management enabled? -type: long +type: boolean -- -*`ceph.osd_tree.children`*:: +*`beat.state.module.count`*:: + -- -bucket children list, separated by comma. +Number of modules enabled -type: keyword +type: integer -- -*`ceph.osd_tree.crush_weight`*:: +*`beat.state.output.name`*:: + -- -osd node crush weight +Name of output used by Beat -type: float +type: keyword -- -*`ceph.osd_tree.depth`*:: +*`beat.state.queue.name`*:: + -- -node depth +Name of queue being used by Beat -type: long +type: keyword -- -*`ceph.osd_tree.exists`*:: -+ --- -is node still exist or not(1-yes, 0-no) +[float] +=== stats +Beat stats -type: boolean --- -*`ceph.osd_tree.primary_affinity`*:: +*`beat.stats.uptime.ms`*:: + -- -the weight of reading data from primary osd +Beat uptime -type: float +type: long -- -*`ceph.osd_tree.reweight`*:: +*`beat.stats.runtime.goroutines`*:: + -- -the reweight of osd +Number of goroutines running in Beat type: long -- -*`ceph.osd_tree.status`*:: -+ --- -status of osd, it should be up or down +[float] +=== libbeat +Fields common to all Beats -type: keyword --- -*`ceph.osd_tree.device_class`*:: -+ --- -the device class of osd, like hdd, ssd etc. +[float] +=== output +Output stats -type: keyword --- -*`ceph.osd_tree.father`*:: +*`beat.stats.libbeat.output.type`*:: + -- -the parent node of this osd or bucket node +Type of output type: keyword @@ -4750,200 +4815,244 @@ type: keyword -- [float] -=== pool_disk +=== events -pool_disk +Event counters -*`ceph.pool_disk.id`*:: +*`beat.stats.libbeat.output.events.acked`*:: + -- -Id of the pool +Number of events acknowledged type: long -- -*`ceph.pool_disk.name`*:: +*`beat.stats.libbeat.output.events.active`*:: + -- -Name of the pool +Number of active events -type: keyword +type: long -- -*`ceph.pool_disk.stats.available.bytes`*:: +*`beat.stats.libbeat.output.events.batches`*:: + -- -Available bytes of the pool +Number of event batches type: long -format: bytes - -- -*`ceph.pool_disk.stats.objects`*:: +*`beat.stats.libbeat.output.events.dropped`*:: + -- -Number of objects of the pool +Number of events dropped type: long -- -*`ceph.pool_disk.stats.used.bytes`*:: +*`beat.stats.libbeat.output.events.duplicates`*:: + -- -Used bytes of the pool +Number of events duplicated type: long -format: bytes - -- -*`ceph.pool_disk.stats.used.kb`*:: +*`beat.stats.libbeat.output.events.failed`*:: + -- -Used kb of the pool +Number of events failed type: long -- -[[exported-fields-cloud]] -== Cloud provider metadata fields +*`beat.stats.libbeat.output.events.toomany`*:: ++ +-- +Number of too many events -Metadata from cloud providers added by the add_cloud_metadata processor. +type: long +-- -*`cloud.project.id`*:: +*`beat.stats.libbeat.output.events.total`*:: + -- -Name of the project in Google Cloud. +Total number of events -example: project-x +type: long -- -*`cloud.image.id`*:: -+ --- -Image ID for the cloud instance. +[float] +=== read +Read stats -example: ami-abcd1234 --- -*`meta.cloud.provider`*:: +*`beat.stats.libbeat.output.read.bytes`*:: + -- -type: alias +Number of bytes read -alias to: cloud.provider + +type: long -- -*`meta.cloud.instance_id`*:: +*`beat.stats.libbeat.output.read.errors`*:: + -- -type: alias +Number of read errors -alias to: cloud.instance.id + +type: long -- -*`meta.cloud.instance_name`*:: +[float] +=== write + +Write stats + + + +*`beat.stats.libbeat.output.write.bytes`*:: + -- -type: alias +Number of bytes written -alias to: cloud.instance.name + +type: long -- -*`meta.cloud.machine_type`*:: +*`beat.stats.libbeat.output.write.errors`*:: + -- -type: alias +Number of write errors -alias to: cloud.machine.type + +type: long -- -*`meta.cloud.availability_zone`*:: +[[exported-fields-ceph]] +== Ceph fields + +Ceph module + + + +[float] +=== ceph + +`ceph` contains the metrics that were scraped from CEPH. + + + +[float] +=== cluster_disk + +cluster_disk + + + +*`ceph.cluster_disk.available.bytes`*:: + -- -type: alias +Available bytes of the cluster -alias to: cloud.availability_zone + +type: long + +format: bytes -- -*`meta.cloud.project_id`*:: +*`ceph.cluster_disk.total.bytes`*:: + -- -type: alias +Total bytes of the cluster -alias to: cloud.project.id + +type: long + +format: bytes -- -*`meta.cloud.region`*:: +*`ceph.cluster_disk.used.bytes`*:: + -- -type: alias +Used bytes of the cluster -alias to: cloud.region + +type: long + +format: bytes -- -[[exported-fields-cloudfoundry]] -== cloudfoundry fields +[float] +=== cluster_health -Cloud Foundry module +cluster_health -[float] -=== cloudfoundry +*`ceph.cluster_health.overall_status`*:: ++ +-- +Overall status of the cluster +type: keyword +-- -*`cloudfoundry.type`*:: +*`ceph.cluster_health.timechecks.epoch`*:: + -- -The type of event from Cloud Foundry. Possible values include 'container', 'counter' and 'value'. +Map version -type: keyword +type: long -- -[float] -=== app +*`ceph.cluster_health.timechecks.round.value`*:: ++ +-- +timecheck round -The application the metric is associated with. +type: long +-- -*`cloudfoundry.app.id`*:: +*`ceph.cluster_health.timechecks.round.status`*:: + -- -The ID of the application. +Status of the round type: keyword @@ -4951,765 +5060,786 @@ type: keyword -- [float] -=== container +=== cluster_status -`container` contains container metrics from Cloud Foundry. +cluster_status -*`cloudfoundry.container.instance_index`*:: +*`ceph.cluster_status.version`*:: + -- -Index of the instance the metric belongs to. +Ceph Status version type: long -- -*`cloudfoundry.container.cpu.pct`*:: +*`ceph.cluster_status.traffic.read_bytes`*:: + -- -CPU usage percentage. +Cluster read throughput per second -type: float +type: long + +format: bytes -- -*`cloudfoundry.container.memory.bytes`*:: +*`ceph.cluster_status.traffic.write_bytes`*:: + -- -Bytes of used memory. +Cluster write throughput per second type: long +format: bytes + -- -*`cloudfoundry.container.memory.quota.bytes`*:: +*`ceph.cluster_status.traffic.read_op_per_sec`*:: + -- -Bytes of available memory. +Cluster read iops per second type: long -- -*`cloudfoundry.container.disk.bytes`*:: +*`ceph.cluster_status.traffic.write_op_per_sec`*:: + -- -Bytes of used storage. +Cluster write iops per second type: long -- -*`cloudfoundry.container.disk.quota.bytes`*:: +*`ceph.cluster_status.misplace.total`*:: + -- -Bytes of available storage. +Cluster misplace pg number type: long -- -[float] -=== counter +*`ceph.cluster_status.misplace.objects`*:: ++ +-- +Cluster misplace objects number -`counter` contains counter metrics from Cloud Foundry. +type: long +-- -*`cloudfoundry.counter.name`*:: +*`ceph.cluster_status.misplace.ratio`*:: + -- -The name of the counter. +Cluster misplace ratio -type: keyword +type: scaled_float + +format: percent -- -*`cloudfoundry.counter.delta`*:: +*`ceph.cluster_status.degraded.total`*:: + -- -The difference between the last time the counter event occurred. +Cluster degraded pg number type: long -- -*`cloudfoundry.counter.total`*:: +*`ceph.cluster_status.degraded.objects`*:: + -- -The total value for the counter. +Cluster degraded objects number type: long -- -[float] -=== value +*`ceph.cluster_status.degraded.ratio`*:: ++ +-- +Cluster degraded ratio -`value` contains counter metrics from Cloud Foundry. +type: scaled_float +format: percent -*`cloudfoundry.value.name`*:: +-- + +*`ceph.cluster_status.pg.data_bytes`*:: + -- -The name of the value. +Cluster pg data bytes -type: keyword +type: long + +format: bytes -- -*`cloudfoundry.value.unit`*:: +*`ceph.cluster_status.pg.avail_bytes`*:: + -- -The unit of the value. +Cluster available bytes -type: keyword +type: long + +format: bytes -- -*`cloudfoundry.value.value`*:: +*`ceph.cluster_status.pg.total_bytes`*:: + -- -The value of the value. +Cluster total bytes -type: float +type: long + +format: bytes -- -[[exported-fields-cockroachdb]] -== CockroachDB fields +*`ceph.cluster_status.pg.used_bytes`*:: ++ +-- +Cluster used bytes -CockroachDB module +type: long +format: bytes +-- -[[exported-fields-common]] -== Common fields +*`ceph.cluster_status.pg_state.state_name`*:: ++ +-- +Pg state description -Contains common fields available in all event types. +type: long +-- -*`metricset.module`*:: +*`ceph.cluster_status.pg_state.count`*:: + -- -The name of the module that generated the event. - +Shows how many pgs are in state of pg_state.state_name -type: alias -alias to: event.module +type: long -- -*`metricset.name`*:: +*`ceph.cluster_status.pg_state.version`*:: + -- -The name of the metricset that generated the event. +Cluster status version +type: long + -- -*`metricset.period`*:: +*`ceph.cluster_status.osd.full`*:: + -- -Current data collection period for this event in milliseconds. +Is osd full -type: integer +type: boolean -- -*`service.address`*:: +*`ceph.cluster_status.osd.nearfull`*:: + -- -Address of the machine where the service is running. This field may not be present when the data was collected locally. +Is osd near full +type: boolean + -- -*`service.hostname`*:: +*`ceph.cluster_status.osd.num_osds`*:: + -- -Host name of the machine where the service is running. +Shows how many osds in the cluster +type: long + -- -*`type`*:: +*`ceph.cluster_status.osd.num_up_osds`*:: + -- -The document type. Always set to "doc". - +Shows how many osds are on the state of UP -example: metricsets -required: True +type: long -- -*`systemd.fragment_path`*:: +*`ceph.cluster_status.osd.num_in_osds`*:: + -- -the location of the systemd unit path +Shows how many osds are on the state of IN -type: keyword + +type: long -- -*`systemd.unit`*:: +*`ceph.cluster_status.osd.num_remapped_pgs`*:: + -- -the unit name of the systemd service +Shows how many osds are on the state of REMAPPED -type: keyword --- +type: long -[[exported-fields-consul]] -== consul fields +-- -Consul module +*`ceph.cluster_status.osd.epoch`*:: ++ +-- +epoch number +type: long +-- [float] -=== agent - -Agent Metricset fetches metrics information from a Consul instance running as Agent - +=== mgr_cluster_disk +see: cluster_disk -*`consul.agent.autopilot.healthy`*:: -+ --- -Overall health of the local server cluster +[float] +=== mgr_cluster_health -type: boolean +see: cluster_health --- [float] -=== runtime +=== mgr_osd_perf -Runtime related metrics +OSD performance metrics of Ceph cluster -*`consul.agent.runtime.sys.bytes`*:: +*`ceph.mgr_osd_perf.id`*:: + -- -Number of bytes of memory obtained from the OS. +OSD ID type: long -- -*`consul.agent.runtime.malloc_count`*:: +*`ceph.mgr_osd_perf.stats.commit_latency_ms`*:: + -- -Heap objects allocated +Commit latency in ms type: long -- -*`consul.agent.runtime.heap_objects`*:: +*`ceph.mgr_osd_perf.stats.apply_latency_ms`*:: + -- -Objects allocated on the heap and is a general memory pressure indicator. This may burst from time to time but should return to a steady state value. +Apply latency in ms type: long -- -*`consul.agent.runtime.goroutines`*:: +*`ceph.mgr_osd_perf.stats.commit_latency_ns`*:: + -- -Running goroutines and is a general load pressure indicator. This may burst from time to time but should return to a steady state value. +Commit latency in ns type: long -- - -*`consul.agent.runtime.alloc.bytes`*:: +*`ceph.mgr_osd_perf.stats.apply_latency_ns`*:: + -- -Bytes allocated by the Consul process. +Apply latency in ns type: long -- [float] -=== garbage_collector +=== mgr_osd_pool_stats -Garbage collector metrics +OSD pool stats of Ceph cluster -*`consul.agent.runtime.garbage_collector.runs`*:: + +*`ceph.mgr_osd_pool_stats.pool_name`*:: + -- -Garbage collector total executions +Pool name -type: long +type: keyword -- -[float] -=== pause - -Time that the garbage collector has paused the app - - - -*`consul.agent.runtime.garbage_collector.pause.current.ns`*:: +*`ceph.mgr_osd_pool_stats.pool_id`*:: + -- -Garbage collector pause time in nanoseconds +Pool ID type: long -- - -*`consul.agent.runtime.garbage_collector.pause.total.ns`*:: +*`ceph.mgr_osd_pool_stats.client_io_rate`*:: + -- -Nanoseconds consumed by stop-the-world garbage collection pauses since Consul started. +Client I/O rates -type: long +type: object -- -[[exported-fields-coredns]] -== coredns fields - -coredns Module +[float] +=== mgr_osd_tree +see: osd_tree [float] -=== coredns - -`coredns` contains statistics that were read from coreDNS +=== mgr_pool_disk +see: pool_disk [float] -=== stats +=== monitor_health -Contains statistics related to the coreDNS service +monitor_health stats data -*`coredns.stats.panic.count`*:: +*`ceph.monitor_health.available.pct`*:: + -- -Total number of panics +Available percent of the MON type: long -- -*`coredns.stats.dns.request.count`*:: +*`ceph.monitor_health.health`*:: + -- -Total query count +Health of the MON -type: long +type: keyword -- -*`coredns.stats.dns.request.duration.ns.bucket.*`*:: +*`ceph.monitor_health.available.kb`*:: + -- -Request duration histogram buckets in nanoseconds +Available KB of the MON -type: object +type: long -- -*`coredns.stats.dns.request.duration.ns.sum`*:: +*`ceph.monitor_health.total.kb`*:: + -- -Requests duration, sum of durations in nanoseconds +Total KB of the MON type: long -format: duration - -- -*`coredns.stats.dns.request.duration.ns.count`*:: +*`ceph.monitor_health.used.kb`*:: + -- -Requests duration, number of requests +Used KB of the MON type: long -- -*`coredns.stats.dns.request.size.bytes.bucket.*`*:: +*`ceph.monitor_health.last_updated`*:: + -- -Request Size histogram buckets +Time when was updated -type: object +type: date -- -*`coredns.stats.dns.request.size.bytes.sum`*:: +*`ceph.monitor_health.name`*:: + -- -Request Size histogram sum +Name of the MON -type: long +type: keyword -- -*`coredns.stats.dns.request.size.bytes.count`*:: +*`ceph.monitor_health.store_stats.log.bytes`*:: + -- -Request Size histogram count +Log bytes of MON type: long +format: bytes + -- -*`coredns.stats.dns.request.do.count`*:: +*`ceph.monitor_health.store_stats.misc.bytes`*:: + -- -Number of queries that have the DO bit set +Misc bytes of MON type: long +format: bytes + -- -*`coredns.stats.dns.request.type.count`*:: +*`ceph.monitor_health.store_stats.sst.bytes`*:: + -- -Counter of queries per zone and type +SST bytes of MON type: long +format: bytes + -- -*`coredns.stats.type`*:: +*`ceph.monitor_health.store_stats.total.bytes`*:: + -- -Holds the query type of the request +Total bytes of MON -type: keyword +type: long + +format: bytes -- -*`coredns.stats.dns.response.rcode.count`*:: +*`ceph.monitor_health.store_stats.last_updated`*:: + -- -Counter of responses per zone and rcode +Last updated type: long -- -*`coredns.stats.rcode`*:: +[float] +=== osd_df + +ceph osd disk usage information + + + +*`ceph.osd_df.id`*:: + -- -Holds the rcode of the response +osd node id -type: keyword +type: long -- -*`coredns.stats.family`*:: +*`ceph.osd_df.name`*:: + -- -The address family of the transport (1 = IP (IP version 4), 2 = IP6 (IP version 6)) +osd node name type: keyword -- -*`coredns.stats.dns.response.size.bytes.bucket.*`*:: +*`ceph.osd_df.device_class`*:: + -- -Response Size histogram buckets +osd node type, illegal type include hdd, ssd etc. -type: object +type: keyword -- -*`coredns.stats.dns.response.size.bytes.sum`*:: +*`ceph.osd_df.total.byte`*:: + -- -Response Size histogram sum +osd disk total volume type: long +format: bytes + -- -*`coredns.stats.dns.response.size.bytes.count`*:: +*`ceph.osd_df.used.byte`*:: + -- -Response Size histogram count +osd disk usage volume type: long +format: bytes + -- -*`coredns.stats.server`*:: +*`ceph.osd_df.available.bytes`*:: + -- -The server responsible for the request +osd disk available volume -type: keyword +type: long + +format: bytes -- -*`coredns.stats.zone`*:: +*`ceph.osd_df.pg_num`*:: + -- -The zonename used for the request/response +shows how many pg located on this osd -type: keyword +type: long -- -*`coredns.stats.proto`*:: +*`ceph.osd_df.used.pct`*:: + -- -The transport of the response ("udp" or "tcp") +osd disk usage percentage -type: keyword +type: scaled_float --- +format: percent -*`coredns.stats.dns.cache.hits.count`*:: -+ -- -Cache hits count for the cache plugin +[float] +=== osd_tree -type: long +ceph osd tree info --- -*`coredns.stats.dns.cache.misses.count`*:: + +*`ceph.osd_tree.id`*:: + -- -Cache misses count for the cache plugin +osd or bucket node id type: long -- -[[exported-fields-couchbase]] -== Couchbase fields - -Metrics collected from Couchbase servers. - - - -[float] -=== couchbase +*`ceph.osd_tree.name`*:: ++ +-- +osd or bucket node name -`couchbase` contains the metrics that were scraped from Couchbase. +type: keyword +-- -[float] -=== bucket +*`ceph.osd_tree.type`*:: ++ +-- +osd or bucket node type, illegal type include osd, host, root etc. -Couchbase bucket metrics. +type: keyword +-- -*`couchbase.bucket.name`*:: +*`ceph.osd_tree.type_id`*:: + -- -Name of the bucket. +osd or bucket node typeID -type: keyword +type: long -- -*`couchbase.bucket.type`*:: +*`ceph.osd_tree.children`*:: + -- -Type of the bucket. +bucket children list, separated by comma. type: keyword -- -*`couchbase.bucket.data.used.bytes`*:: +*`ceph.osd_tree.crush_weight`*:: + -- -Size of user data within buckets of the specified state that are resident in RAM. - +osd node crush weight -type: long -format: bytes +type: float -- -*`couchbase.bucket.disk.fetches`*:: +*`ceph.osd_tree.depth`*:: + -- -Number of disk fetches. +node depth type: long -- -*`couchbase.bucket.disk.used.bytes`*:: +*`ceph.osd_tree.exists`*:: + -- -Amount of disk used (bytes). - +is node still exist or not(1-yes, 0-no) -type: long -format: bytes +type: boolean -- -*`couchbase.bucket.memory.used.bytes`*:: +*`ceph.osd_tree.primary_affinity`*:: + -- -Amount of memory used by the bucket (bytes). - +the weight of reading data from primary osd -type: long -format: bytes +type: float -- -*`couchbase.bucket.quota.ram.bytes`*:: +*`ceph.osd_tree.reweight`*:: + -- -Amount of RAM used by the bucket (bytes). +the reweight of osd type: long -format: bytes - -- -*`couchbase.bucket.quota.use.pct`*:: +*`ceph.osd_tree.status`*:: + -- -Percentage of RAM used (for active objects) against the configured bucket size (%). - +status of osd, it should be up or down -type: scaled_float -format: percent +type: keyword -- -*`couchbase.bucket.ops_per_sec`*:: +*`ceph.osd_tree.device_class`*:: + -- -Number of operations per second. +the device class of osd, like hdd, ssd etc. -type: long +type: keyword -- -*`couchbase.bucket.item_count`*:: +*`ceph.osd_tree.father`*:: + -- -Number of items associated with the bucket. +the parent node of this osd or bucket node -type: long +type: keyword -- [float] -=== cluster +=== pool_disk -Couchbase cluster metrics. +pool_disk -*`couchbase.cluster.hdd.free.bytes`*:: +*`ceph.pool_disk.id`*:: + -- -Free hard drive space in the cluster (bytes). +Id of the pool type: long -format: bytes - -- -*`couchbase.cluster.hdd.quota.total.bytes`*:: +*`ceph.pool_disk.name`*:: + -- -Hard drive quota total for the cluster (bytes). - +Name of the pool -type: long -format: bytes +type: keyword -- -*`couchbase.cluster.hdd.total.bytes`*:: +*`ceph.pool_disk.stats.available.bytes`*:: + -- -Total hard drive space available to the cluster (bytes). +Available bytes of the pool type: long @@ -5718,22 +5848,20 @@ format: bytes -- -*`couchbase.cluster.hdd.used.value.bytes`*:: +*`ceph.pool_disk.stats.objects`*:: + -- -Hard drive space used by the cluster (bytes). +Number of objects of the pool type: long -format: bytes - -- -*`couchbase.cluster.hdd.used.by_data.bytes`*:: +*`ceph.pool_disk.stats.used.bytes`*:: + -- -Hard drive space used by the data in the cluster (bytes). +Used bytes of the pool type: long @@ -5742,1059 +5870,1073 @@ format: bytes -- -*`couchbase.cluster.max_bucket_count`*:: +*`ceph.pool_disk.stats.used.kb`*:: + -- -Max bucket count setting. +Used kb of the pool type: long -- -*`couchbase.cluster.quota.index_memory.mb`*:: -+ --- -Memory quota setting for the Index service (Mbyte). +[[exported-fields-cloud]] +== Cloud provider metadata fields +Metadata from cloud providers added by the add_cloud_metadata processor. -type: long --- -*`couchbase.cluster.quota.memory.mb`*:: +*`cloud.project.id`*:: + -- -Memory quota setting for the cluster (Mbyte). +Name of the project in Google Cloud. -type: long +example: project-x -- -*`couchbase.cluster.ram.quota.total.value.bytes`*:: +*`cloud.image.id`*:: + -- -RAM quota total for the cluster (bytes). - +Image ID for the cloud instance. -type: long -format: bytes +example: ami-abcd1234 -- -*`couchbase.cluster.ram.quota.total.per_node.bytes`*:: +*`meta.cloud.provider`*:: + -- -RAM quota used by the current node in the cluster (bytes). +type: alias +alias to: cloud.provider -type: long +-- -format: bytes +*`meta.cloud.instance_id`*:: ++ +-- +type: alias + +alias to: cloud.instance.id -- -*`couchbase.cluster.ram.quota.used.value.bytes`*:: +*`meta.cloud.instance_name`*:: + -- -RAM quota used by the cluster (bytes). +type: alias +alias to: cloud.instance.name -type: long +-- -format: bytes +*`meta.cloud.machine_type`*:: ++ +-- +type: alias + +alias to: cloud.machine.type -- -*`couchbase.cluster.ram.quota.used.per_node.bytes`*:: +*`meta.cloud.availability_zone`*:: + -- -Ram quota used by the current node in the cluster (bytes) +type: alias + +alias to: cloud.availability_zone +-- -type: long +*`meta.cloud.project_id`*:: ++ +-- +type: alias -format: bytes +alias to: cloud.project.id -- -*`couchbase.cluster.ram.total.bytes`*:: +*`meta.cloud.region`*:: + -- -Total RAM available to cluster (bytes). +type: alias +alias to: cloud.region -type: long +-- -format: bytes +[[exported-fields-cloudfoundry]] +== cloudfoundry fields --- +Cloud Foundry module -*`couchbase.cluster.ram.used.value.bytes`*:: -+ --- -RAM used by the cluster (bytes). -type: long +[float] +=== cloudfoundry -format: bytes --- -*`couchbase.cluster.ram.used.by_data.bytes`*:: + +*`cloudfoundry.type`*:: + -- -RAM used by the data in the cluster (bytes). - +The type of event from Cloud Foundry. Possible values include 'container', 'counter' and 'value'. -type: long -format: bytes +type: keyword -- [float] -=== node +=== app -Couchbase node metrics. +The application the metric is associated with. -*`couchbase.node.cmd_get`*:: +*`cloudfoundry.app.id`*:: + -- -Number of get commands +The ID of the application. -type: long +type: keyword -- -*`couchbase.node.couch.docs.disk_size.bytes`*:: +[float] +=== container + +`container` contains container metrics from Cloud Foundry. + + + +*`cloudfoundry.container.instance_index`*:: + -- -Amount of disk space used by Couch docs (bytes). +Index of the instance the metric belongs to. type: long -format: bytes - -- -*`couchbase.node.couch.docs.data_size.bytes`*:: +*`cloudfoundry.container.cpu.pct`*:: + -- -Data size of Couch docs associated with a node (bytes). - +CPU usage percentage. -type: long -format: bytes +type: float -- -*`couchbase.node.couch.spatial.data_size.bytes`*:: +*`cloudfoundry.container.memory.bytes`*:: + -- -Size of object data for spatial views (bytes). +Bytes of used memory. type: long -- -*`couchbase.node.couch.spatial.disk_size.bytes`*:: +*`cloudfoundry.container.memory.quota.bytes`*:: + -- -Amount of disk space used by spatial views (bytes). +Bytes of available memory. type: long -- -*`couchbase.node.couch.views.disk_size.bytes`*:: +*`cloudfoundry.container.disk.bytes`*:: + -- -Amount of disk space used by Couch views (bytes). +Bytes of used storage. type: long -- -*`couchbase.node.couch.views.data_size.bytes`*:: +*`cloudfoundry.container.disk.quota.bytes`*:: + -- -Size of object data for Couch views (bytes). +Bytes of available storage. type: long -- -*`couchbase.node.cpu_utilization_rate.pct`*:: -+ --- -The CPU utilization rate (%). +[float] +=== counter +`counter` contains counter metrics from Cloud Foundry. -type: scaled_float --- -*`couchbase.node.current_items.value`*:: +*`cloudfoundry.counter.name`*:: + -- -Number of current items. +The name of the counter. -type: long +type: keyword -- -*`couchbase.node.current_items.total`*:: +*`cloudfoundry.counter.delta`*:: + -- -Total number of items associated with the node. +The difference between the last time the counter event occurred. type: long -- -*`couchbase.node.ep_bg_fetched`*:: +*`cloudfoundry.counter.total`*:: + -- -Number of disk fetches performed since the server was started. +The total value for the counter. type: long -- -*`couchbase.node.get_hits`*:: -+ --- -Number of get hits. +[float] +=== value +`value` contains counter metrics from Cloud Foundry. -type: long --- -*`couchbase.node.hostname`*:: +*`cloudfoundry.value.name`*:: + -- -The hostname of the node. +The name of the value. type: keyword -- -*`couchbase.node.mcd_memory.allocated.bytes`*:: +*`cloudfoundry.value.unit`*:: + -- -Amount of memcached memory allocated (bytes). - +The unit of the value. -type: long -format: bytes +type: keyword -- -*`couchbase.node.mcd_memory.reserved.bytes`*:: +*`cloudfoundry.value.value`*:: + -- -Amount of memcached memory reserved (bytes). +The value of the value. -type: long +type: float -- -*`couchbase.node.memory.free.bytes`*:: -+ --- -Amount of memory free for the node (bytes). +[[exported-fields-cockroachdb]] +== CockroachDB fields +CockroachDB module -type: long --- -*`couchbase.node.memory.total.bytes`*:: + +[[exported-fields-common]] +== Common fields + +Contains common fields available in all event types. + + + +*`metricset.module`*:: + -- -Total memory available to the node (bytes). +The name of the module that generated the event. -type: long +type: alias + +alias to: event.module -- -*`couchbase.node.memory.used.bytes`*:: +*`metricset.name`*:: + -- -Memory used by the node (bytes). - +The name of the metricset that generated the event. -type: long -- -*`couchbase.node.ops`*:: +*`metricset.period`*:: + -- -Number of operations performed on Couchbase. +Current data collection period for this event in milliseconds. -type: long +type: integer -- -*`couchbase.node.swap.total.bytes`*:: +*`service.address`*:: + -- -Total swap size allocated (bytes). - +Address of the machine where the service is running. This field may not be present when the data was collected locally. -type: long -- -*`couchbase.node.swap.used.bytes`*:: +*`service.hostname`*:: + -- -Amount of swap space used (bytes). - +Host name of the machine where the service is running. -type: long -- -*`couchbase.node.uptime.sec`*:: +*`type`*:: + -- -Time during which the node was in operation (sec). +The document type. Always set to "doc". -type: long +example: metricsets + +required: True -- -*`couchbase.node.vb_replica_curr_items`*:: +*`systemd.fragment_path`*:: + -- -Number of items/documents that are replicas. +the location of the systemd unit path +type: keyword -type: long +-- +*`systemd.unit`*:: ++ -- +the unit name of the systemd service -[[exported-fields-couchdb]] -== CouchDB fields +type: keyword -couchdb module +-- +[[exported-fields-consul]] +== consul fields +Consul module -[float] -=== couchdb -Couchdb metrics [float] -=== server +=== agent -Contains CouchDB server stats +Agent Metricset fetches metrics information from a Consul instance running as Agent + + + + +*`consul.agent.autopilot.healthy`*:: ++ +-- +Overall health of the local server cluster +type: boolean +-- [float] -=== httpd +=== runtime -HTTP statistics +Runtime related metrics -*`couchdb.server.httpd.view_reads`*:: +*`consul.agent.runtime.sys.bytes`*:: + -- -Number of view reads - +Number of bytes of memory obtained from the OS. type: long -- -*`couchdb.server.httpd.bulk_requests`*:: +*`consul.agent.runtime.malloc_count`*:: + -- -Number of bulk requests - +Heap objects allocated type: long -- -*`couchdb.server.httpd.clients_requesting_changes`*:: +*`consul.agent.runtime.heap_objects`*:: + -- -Number of clients for continuous _changes - +Objects allocated on the heap and is a general memory pressure indicator. This may burst from time to time but should return to a steady state value. type: long -- -*`couchdb.server.httpd.temporary_view_reads`*:: +*`consul.agent.runtime.goroutines`*:: + -- -Number of temporary view reads - +Running goroutines and is a general load pressure indicator. This may burst from time to time but should return to a steady state value. type: long -- -*`couchdb.server.httpd.requests`*:: + +*`consul.agent.runtime.alloc.bytes`*:: + -- -Number of HTTP requests - +Bytes allocated by the Consul process. type: long -- [float] -=== httpd_request_methods - -HTTP request methods +=== garbage_collector +Garbage collector metrics -*`couchdb.server.httpd_request_methods.COPY`*:: +*`consul.agent.runtime.garbage_collector.runs`*:: + -- -Number of HTTP COPY requests - +Garbage collector total executions type: long -- -*`couchdb.server.httpd_request_methods.HEAD`*:: -+ --- -Number of HTTP HEAD requests +[float] +=== pause +Time that the garbage collector has paused the app -type: long --- -*`couchdb.server.httpd_request_methods.POST`*:: +*`consul.agent.runtime.garbage_collector.pause.current.ns`*:: + -- -Number of HTTP POST requests - +Garbage collector pause time in nanoseconds type: long -- -*`couchdb.server.httpd_request_methods.DELETE`*:: + +*`consul.agent.runtime.garbage_collector.pause.total.ns`*:: + -- -Number of HTTP DELETE requests - +Nanoseconds consumed by stop-the-world garbage collection pauses since Consul started. type: long -- -*`couchdb.server.httpd_request_methods.GET`*:: -+ --- -Number of HTTP GET requests +[[exported-fields-coredns]] +== coredns fields +coredns Module -type: long --- -*`couchdb.server.httpd_request_methods.PUT`*:: -+ --- -Number of HTTP PUT requests +[float] +=== coredns +`coredns` contains statistics that were read from coreDNS -type: long --- [float] -=== httpd_status_codes +=== stats -HTTP status codes statistics +Contains statistics related to the coreDNS service -*`couchdb.server.httpd_status_codes.200`*:: +*`coredns.stats.panic.count`*:: + -- -Number of HTTP 200 OK responses +Total number of panics type: long -- -*`couchdb.server.httpd_status_codes.201`*:: +*`coredns.stats.dns.request.count`*:: + -- -Number of HTTP 201 Created responses +Total query count type: long -- -*`couchdb.server.httpd_status_codes.202`*:: +*`coredns.stats.dns.request.duration.ns.bucket.*`*:: + -- -Number of HTTP 202 Accepted responses +Request duration histogram buckets in nanoseconds -type: long +type: object -- -*`couchdb.server.httpd_status_codes.301`*:: +*`coredns.stats.dns.request.duration.ns.sum`*:: + -- -Number of HTTP 301 Moved Permanently responses +Requests duration, sum of durations in nanoseconds type: long +format: duration + -- -*`couchdb.server.httpd_status_codes.304`*:: +*`coredns.stats.dns.request.duration.ns.count`*:: + -- -Number of HTTP 304 Not Modified responses +Requests duration, number of requests type: long -- -*`couchdb.server.httpd_status_codes.400`*:: +*`coredns.stats.dns.request.size.bytes.bucket.*`*:: + -- -Number of HTTP 400 Bad Request responses +Request Size histogram buckets -type: long +type: object -- -*`couchdb.server.httpd_status_codes.401`*:: +*`coredns.stats.dns.request.size.bytes.sum`*:: + -- -Number of HTTP 401 Unauthorized responses +Request Size histogram sum type: long -- -*`couchdb.server.httpd_status_codes.403`*:: +*`coredns.stats.dns.request.size.bytes.count`*:: + -- -Number of HTTP 403 Forbidden responses +Request Size histogram count type: long -- -*`couchdb.server.httpd_status_codes.404`*:: +*`coredns.stats.dns.request.do.count`*:: + -- -Number of HTTP 404 Not Found responses +Number of queries that have the DO bit set type: long -- -*`couchdb.server.httpd_status_codes.405`*:: +*`coredns.stats.dns.request.type.count`*:: + -- -Number of HTTP 405 Method Not Allowed responses +Counter of queries per zone and type type: long -- -*`couchdb.server.httpd_status_codes.409`*:: +*`coredns.stats.type`*:: + -- -Number of HTTP 409 Conflict responses +Holds the query type of the request -type: long +type: keyword -- -*`couchdb.server.httpd_status_codes.412`*:: +*`coredns.stats.dns.response.rcode.count`*:: + -- -Number of HTTP 412 Precondition Failed responses +Counter of responses per zone and rcode type: long -- -*`couchdb.server.httpd_status_codes.500`*:: +*`coredns.stats.rcode`*:: + -- -Number of HTTP 500 Internal Server Error responses +Holds the rcode of the response -type: long +type: keyword -- -[float] -=== couchdb - -couchdb statistics - - - -*`couchdb.server.couchdb.database_writes`*:: +*`coredns.stats.family`*:: + -- -Number of times a database was changed +The address family of the transport (1 = IP (IP version 4), 2 = IP6 (IP version 6)) -type: long +type: keyword -- -*`couchdb.server.couchdb.open_databases`*:: +*`coredns.stats.dns.response.size.bytes.bucket.*`*:: + -- -Number of open databases +Response Size histogram buckets -type: long +type: object -- -*`couchdb.server.couchdb.auth_cache_misses`*:: +*`coredns.stats.dns.response.size.bytes.sum`*:: + -- -Number of authentication cache misses +Response Size histogram sum type: long -- -*`couchdb.server.couchdb.request_time`*:: +*`coredns.stats.dns.response.size.bytes.count`*:: + -- -Length of a request inside CouchDB without MochiWeb +Response Size histogram count type: long -- -*`couchdb.server.couchdb.database_reads`*:: +*`coredns.stats.server`*:: + -- -Number of times a document was read from a database +The server responsible for the request -type: long +type: keyword -- -*`couchdb.server.couchdb.auth_cache_hits`*:: +*`coredns.stats.zone`*:: + -- -Number of authentication cache hits +The zonename used for the request/response -type: long +type: keyword -- -*`couchdb.server.couchdb.open_os_files`*:: +*`coredns.stats.proto`*:: + -- -Number of file descriptors CouchDB has open +The transport of the response ("udp" or "tcp") -type: long +type: keyword -- -[[exported-fields-docker-processor]] -== Docker fields - -Docker stats collected from Docker. - - - - -*`docker.container.id`*:: +*`coredns.stats.dns.cache.hits.count`*:: + -- -type: alias - -alias to: container.id - --- +Cache hits count for the cache plugin -*`docker.container.image`*:: -+ --- -type: alias -alias to: container.image.name +type: long -- -*`docker.container.name`*:: +*`coredns.stats.dns.cache.misses.count`*:: + -- -type: alias +Cache misses count for the cache plugin -alias to: container.name --- +type: long -*`docker.container.labels`*:: -+ -- -Image labels. +[[exported-fields-couchbase]] +== Couchbase fields -type: object +Metrics collected from Couchbase servers. --- -[[exported-fields-docker]] -== Docker fields -Docker stats collected from Docker. +[float] +=== couchbase + +`couchbase` contains the metrics that were scraped from Couchbase. [float] -=== docker +=== bucket -Information and statistics about docker's running containers. +Couchbase bucket metrics. -[float] -=== container +*`couchbase.bucket.name`*:: ++ +-- +Name of the bucket. -Docker container metrics. +type: keyword +-- -*`docker.container.command`*:: +*`couchbase.bucket.type`*:: + -- -Command that was executed in the Docker container. +Type of the bucket. type: keyword -- -*`docker.container.created`*:: +*`couchbase.bucket.data.used.bytes`*:: + -- -Date when the container was created. +Size of user data within buckets of the specified state that are resident in RAM. -type: date +type: long + +format: bytes -- -*`docker.container.status`*:: +*`couchbase.bucket.disk.fetches`*:: + -- -Container status. +Number of disk fetches. -type: keyword +type: long -- -*`docker.container.ip_addresses`*:: +*`couchbase.bucket.disk.used.bytes`*:: + -- -Container IP addresses. +Amount of disk used (bytes). -type: ip +type: long + +format: bytes -- -[float] -=== size +*`couchbase.bucket.memory.used.bytes`*:: ++ +-- +Amount of memory used by the bucket (bytes). -Container size metrics. +type: long + +format: bytes +-- -*`docker.container.size.root_fs`*:: +*`couchbase.bucket.quota.ram.bytes`*:: + -- -Total size of all the files in the container. +Amount of RAM used by the bucket (bytes). type: long +format: bytes + -- -*`docker.container.size.rw`*:: +*`couchbase.bucket.quota.use.pct`*:: + -- -Size of the files that have been created or changed since creation. +Percentage of RAM used (for active objects) against the configured bucket size (%). + + +type: scaled_float + +format: percent + +-- + +*`couchbase.bucket.ops_per_sec`*:: ++ +-- +Number of operations per second. type: long -- -*`docker.container.tags`*:: +*`couchbase.bucket.item_count`*:: + -- -Image tags. +Number of items associated with the bucket. -type: keyword +type: long -- [float] -=== cpu +=== cluster -Runtime CPU metrics. +Couchbase cluster metrics. -*`docker.cpu.kernel.pct`*:: +*`couchbase.cluster.hdd.free.bytes`*:: + -- -Percentage of time in kernel space. +Free hard drive space in the cluster (bytes). -type: scaled_float +type: long -format: percent +format: bytes -- -*`docker.cpu.kernel.norm.pct`*:: +*`couchbase.cluster.hdd.quota.total.bytes`*:: + -- -Percentage of time in kernel space normalized by the number of CPU cores. +Hard drive quota total for the cluster (bytes). -type: scaled_float +type: long -format: percent +format: bytes -- -*`docker.cpu.kernel.ticks`*:: +*`couchbase.cluster.hdd.total.bytes`*:: + -- -CPU ticks in kernel space. +Total hard drive space available to the cluster (bytes). type: long +format: bytes + -- -*`docker.cpu.system.pct`*:: +*`couchbase.cluster.hdd.used.value.bytes`*:: + -- -Percentage of total CPU time in the system. +Hard drive space used by the cluster (bytes). -type: scaled_float +type: long -format: percent +format: bytes -- -*`docker.cpu.system.norm.pct`*:: +*`couchbase.cluster.hdd.used.by_data.bytes`*:: + -- -Percentage of total CPU time in the system normalized by the number of CPU cores. +Hard drive space used by the data in the cluster (bytes). -type: scaled_float +type: long -format: percent +format: bytes -- -*`docker.cpu.system.ticks`*:: +*`couchbase.cluster.max_bucket_count`*:: + -- -CPU system ticks. +Max bucket count setting. type: long -- -*`docker.cpu.user.pct`*:: +*`couchbase.cluster.quota.index_memory.mb`*:: + -- -Percentage of time in user space. - +Memory quota setting for the Index service (Mbyte). -type: scaled_float -format: percent +type: long -- -*`docker.cpu.user.norm.pct`*:: +*`couchbase.cluster.quota.memory.mb`*:: + -- -Percentage of time in user space normalized by the number of CPU cores. - +Memory quota setting for the cluster (Mbyte). -type: scaled_float -format: percent +type: long -- -*`docker.cpu.user.ticks`*:: +*`couchbase.cluster.ram.quota.total.value.bytes`*:: + -- -CPU ticks in user space. +RAM quota total for the cluster (bytes). type: long +format: bytes + -- -*`docker.cpu.total.pct`*:: +*`couchbase.cluster.ram.quota.total.per_node.bytes`*:: + -- -Total CPU usage. +RAM quota used by the current node in the cluster (bytes). -type: scaled_float +type: long -format: percent +format: bytes -- -*`docker.cpu.total.norm.pct`*:: +*`couchbase.cluster.ram.quota.used.value.bytes`*:: + -- -Total CPU usage normalized by the number of CPU cores. +RAM quota used by the cluster (bytes). -type: scaled_float +type: long -format: percent +format: bytes -- -*`docker.cpu.core.*.pct`*:: +*`couchbase.cluster.ram.quota.used.per_node.bytes`*:: + -- -Percentage of CPU time in this core. +Ram quota used by the current node in the cluster (bytes) -type: object +type: long -format: percent +format: bytes -- -*`docker.cpu.core.*.norm.pct`*:: +*`couchbase.cluster.ram.total.bytes`*:: + -- -Percentage of CPU time in this core, normalized by the number of CPU cores. +Total RAM available to cluster (bytes). -type: object +type: long -format: percent +format: bytes -- -*`docker.cpu.core.*.ticks`*:: +*`couchbase.cluster.ram.used.value.bytes`*:: + -- -Number of CPU ticks in this core. +RAM used by the cluster (bytes). -type: object +type: long + +format: bytes -- -[float] -=== diskio +*`couchbase.cluster.ram.used.by_data.bytes`*:: ++ +-- +RAM used by the data in the cluster (bytes). -Disk I/O metrics. +type: long + +format: bytes +-- [float] -=== read +=== node -Accumulated reads during the life of the container +Couchbase node metrics. -*`docker.diskio.read.ops`*:: +*`couchbase.node.cmd_get`*:: + -- -Number of reads during the life of the container +Number of get commands type: long -- -*`docker.diskio.read.bytes`*:: +*`couchbase.node.couch.docs.disk_size.bytes`*:: + -- -Bytes read during the life of the container +Amount of disk space used by Couch docs (bytes). type: long @@ -6803,541 +6945,495 @@ format: bytes -- -*`docker.diskio.read.rate`*:: +*`couchbase.node.couch.docs.data_size.bytes`*:: + -- -Number of current reads per second +Data size of Couch docs associated with a node (bytes). type: long +format: bytes + -- -*`docker.diskio.read.service_time`*:: +*`couchbase.node.couch.spatial.data_size.bytes`*:: + -- -Total time to service IO requests, in nanoseconds +Size of object data for spatial views (bytes). type: long -- -*`docker.diskio.read.wait_time`*:: +*`couchbase.node.couch.spatial.disk_size.bytes`*:: + -- -Total time requests spent waiting in queues for service, in nanoseconds +Amount of disk space used by spatial views (bytes). type: long -- -*`docker.diskio.read.queued`*:: +*`couchbase.node.couch.views.disk_size.bytes`*:: + -- -Total number of queued requests +Amount of disk space used by Couch views (bytes). type: long -- -*`docker.diskio.reads`*:: +*`couchbase.node.couch.views.data_size.bytes`*:: + -- - -deprecated:[6.4] - -Number of current reads per second +Size of object data for Couch views (bytes). -type: scaled_float +type: long -- -[float] -=== write +*`couchbase.node.cpu_utilization_rate.pct`*:: ++ +-- +The CPU utilization rate (%). -Accumulated writes during the life of the container +type: scaled_float +-- -*`docker.diskio.write.ops`*:: +*`couchbase.node.current_items.value`*:: + -- -Number of writes during the life of the container +Number of current items. type: long -- -*`docker.diskio.write.bytes`*:: +*`couchbase.node.current_items.total`*:: + -- -Bytes written during the life of the container +Total number of items associated with the node. type: long -format: bytes - -- -*`docker.diskio.write.rate`*:: +*`couchbase.node.ep_bg_fetched`*:: + -- -Number of current writes per second +Number of disk fetches performed since the server was started. type: long -- -*`docker.diskio.write.service_time`*:: +*`couchbase.node.get_hits`*:: + -- -Total time to service IO requests, in nanoseconds +Number of get hits. type: long -- -*`docker.diskio.write.wait_time`*:: +*`couchbase.node.hostname`*:: + -- -Total time requests spent waiting in queues for service, in nanoseconds +The hostname of the node. -type: long +type: keyword -- -*`docker.diskio.write.queued`*:: +*`couchbase.node.mcd_memory.allocated.bytes`*:: + -- -Total number of queued requests +Amount of memcached memory allocated (bytes). type: long +format: bytes + -- -*`docker.diskio.writes`*:: +*`couchbase.node.mcd_memory.reserved.bytes`*:: + -- - -deprecated:[6.4] - -Number of current writes per second +Amount of memcached memory reserved (bytes). -type: scaled_float +type: long -- -[float] -=== summary +*`couchbase.node.memory.free.bytes`*:: ++ +-- +Amount of memory free for the node (bytes). -Accumulated reads and writes during the life of the container +type: long +-- -*`docker.diskio.summary.ops`*:: +*`couchbase.node.memory.total.bytes`*:: + -- -Number of I/O operations during the life of the container +Total memory available to the node (bytes). type: long -- -*`docker.diskio.summary.bytes`*:: +*`couchbase.node.memory.used.bytes`*:: + -- -Bytes read and written during the life of the container +Memory used by the node (bytes). type: long -format: bytes - -- -*`docker.diskio.summary.rate`*:: +*`couchbase.node.ops`*:: + -- -Number of current operations per second +Number of operations performed on Couchbase. type: long -- -*`docker.diskio.summary.service_time`*:: +*`couchbase.node.swap.total.bytes`*:: + -- -Total time to service IO requests, in nanoseconds +Total swap size allocated (bytes). type: long -- -*`docker.diskio.summary.wait_time`*:: +*`couchbase.node.swap.used.bytes`*:: + -- -Total time requests spent waiting in queues for service, in nanoseconds +Amount of swap space used (bytes). type: long -- -*`docker.diskio.summary.queued`*:: +*`couchbase.node.uptime.sec`*:: + -- -Total number of queued requests +Time during which the node was in operation (sec). type: long -- -*`docker.diskio.total`*:: +*`couchbase.node.vb_replica_curr_items`*:: + -- +Number of items/documents that are replicas. -deprecated:[6.4] -Number of reads and writes per second +type: long +-- -type: scaled_float +[[exported-fields-couchdb]] +== CouchDB fields --- +couchdb module -[float] -=== event -Docker event +[float] +=== couchdb +Couchdb metrics -*`docker.event.status`*:: -+ --- -Event status +[float] +=== server -type: keyword +Contains CouchDB server stats --- -*`docker.event.id`*:: -+ --- -Event id when available +[float] +=== httpd -type: keyword +HTTP statistics --- -*`docker.event.from`*:: + +*`couchdb.server.httpd.view_reads`*:: + -- -Event source +Number of view reads -type: keyword +type: long -- -*`docker.event.type`*:: +*`couchdb.server.httpd.bulk_requests`*:: + -- -The type of object emitting the event +Number of bulk requests -type: keyword +type: long -- -*`docker.event.action`*:: +*`couchdb.server.httpd.clients_requesting_changes`*:: + -- -The type of event +Number of clients for continuous _changes -type: keyword +type: long -- -[float] -=== actor - -Actor - - - -*`docker.event.actor.id`*:: +*`couchdb.server.httpd.temporary_view_reads`*:: + -- -The ID of the object emitting the event +Number of temporary view reads -type: keyword +type: long -- -*`docker.event.actor.attributes`*:: +*`couchdb.server.httpd.requests`*:: + -- -Various key/value attributes of the object, depending on its type +Number of HTTP requests -type: object +type: long -- [float] -=== healthcheck +=== httpd_request_methods -Docker healthcheck metrics. -Healthcheck data will only be available from docker containers where the docker `HEALTHCHECK` instruction has been used to build the docker image. +HTTP request methods -*`docker.healthcheck.failingstreak`*:: +*`couchdb.server.httpd_request_methods.COPY`*:: + -- -concurent failed check +Number of HTTP COPY requests -type: integer +type: long -- -*`docker.healthcheck.status`*:: +*`couchdb.server.httpd_request_methods.HEAD`*:: + -- -Healthcheck status code +Number of HTTP HEAD requests -type: keyword +type: long -- -[float] -=== event - -event fields. - - - -*`docker.healthcheck.event.end_date`*:: +*`couchdb.server.httpd_request_methods.POST`*:: + -- -Healthcheck end date +Number of HTTP POST requests -type: date +type: long -- -*`docker.healthcheck.event.start_date`*:: +*`couchdb.server.httpd_request_methods.DELETE`*:: + -- -Healthcheck start date +Number of HTTP DELETE requests -type: date +type: long -- -*`docker.healthcheck.event.output`*:: +*`couchdb.server.httpd_request_methods.GET`*:: + -- -Healthcheck output +Number of HTTP GET requests -type: keyword +type: long -- -*`docker.healthcheck.event.exit_code`*:: +*`couchdb.server.httpd_request_methods.PUT`*:: + -- -Healthcheck status code +Number of HTTP PUT requests -type: integer +type: long -- [float] -=== image - -Docker image metrics. - - - -[float] -=== id +=== httpd_status_codes -The image layers identifier. +HTTP status codes statistics -*`docker.image.id.current`*:: +*`couchdb.server.httpd_status_codes.200`*:: + -- -Unique image identifier given upon its creation. +Number of HTTP 200 OK responses -type: keyword +type: long -- -*`docker.image.id.parent`*:: +*`couchdb.server.httpd_status_codes.201`*:: + -- -Identifier of the image, if it exists, from which the current image directly descends. +Number of HTTP 201 Created responses -type: keyword +type: long -- -*`docker.image.created`*:: +*`couchdb.server.httpd_status_codes.202`*:: + -- -Date and time when the image was created. +Number of HTTP 202 Accepted responses -type: date +type: long -- -[float] -=== size - -Image size layers. - - - -*`docker.image.size.virtual`*:: +*`couchdb.server.httpd_status_codes.301`*:: + -- -Size of the image. +Number of HTTP 301 Moved Permanently responses type: long -- -*`docker.image.size.regular`*:: +*`couchdb.server.httpd_status_codes.304`*:: + -- -Total size of the all cached images associated to the current image. +Number of HTTP 304 Not Modified responses type: long -- -*`docker.image.labels`*:: +*`couchdb.server.httpd_status_codes.400`*:: + -- -Image labels. +Number of HTTP 400 Bad Request responses -type: object +type: long -- -*`docker.image.tags`*:: +*`couchdb.server.httpd_status_codes.401`*:: + -- -Image tags. +Number of HTTP 401 Unauthorized responses -type: keyword +type: long -- -[float] -=== info - -Info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information. - - - -[float] -=== containers - -Overall container stats. - - - -*`docker.info.containers.paused`*:: +*`couchdb.server.httpd_status_codes.403`*:: + -- -Total number of paused containers. +Number of HTTP 403 Forbidden responses type: long -- -*`docker.info.containers.running`*:: +*`couchdb.server.httpd_status_codes.404`*:: + -- -Total number of running containers. +Number of HTTP 404 Not Found responses type: long -- -*`docker.info.containers.stopped`*:: +*`couchdb.server.httpd_status_codes.405`*:: + -- -Total number of stopped containers. +Number of HTTP 405 Method Not Allowed responses type: long -- -*`docker.info.containers.total`*:: +*`couchdb.server.httpd_status_codes.409`*:: + -- -Total number of existing containers. +Number of HTTP 409 Conflict responses type: long -- -*`docker.info.id`*:: +*`couchdb.server.httpd_status_codes.412`*:: + -- -Unique Docker host identifier. +Number of HTTP 412 Precondition Failed responses -type: keyword +type: long -- -*`docker.info.images`*:: +*`couchdb.server.httpd_status_codes.500`*:: + -- -Total number of existing images. +Number of HTTP 500 Internal Server Error responses type: long @@ -7345,1001 +7441,3234 @@ type: long -- [float] -=== memory +=== couchdb -Memory metrics. +couchdb statistics -*`docker.memory.stats.*`*:: +*`couchdb.server.couchdb.database_writes`*:: + -- -Raw memory stats from the cgroups memory.stat interface +Number of times a database was changed -type: object +type: long -- -[float] -=== commit - -Committed bytes on Windows - - - -*`docker.memory.commit.total`*:: +*`couchdb.server.couchdb.open_databases`*:: + -- -Total bytes +Number of open databases type: long -format: bytes - -- -*`docker.memory.commit.peak`*:: +*`couchdb.server.couchdb.auth_cache_misses`*:: + -- -Peak committed bytes on Windows +Number of authentication cache misses type: long -format: bytes - -- -*`docker.memory.private_working_set.total`*:: +*`couchdb.server.couchdb.request_time`*:: + -- -private working sets on Windows +Length of a request inside CouchDB without MochiWeb type: long -format: bytes - -- -*`docker.memory.fail.count`*:: +*`couchdb.server.couchdb.database_reads`*:: + -- -Fail counter. +Number of times a document was read from a database -type: scaled_float +type: long -- -*`docker.memory.limit`*:: +*`couchdb.server.couchdb.auth_cache_hits`*:: + -- -Memory limit. +Number of authentication cache hits type: long -format: bytes - -- -[float] -=== rss - -RSS memory stats. - - - -*`docker.memory.rss.total`*:: +*`couchdb.server.couchdb.open_os_files`*:: + -- -Total memory resident set size. +Number of file descriptors CouchDB has open type: long -format: bytes - --- - -*`docker.memory.rss.pct`*:: -+ -- -Memory resident set size percentage. +[[exported-fields-docker-processor]] +== Docker fields -type: scaled_float +Docker stats collected from Docker. -format: percent --- -[float] -=== usage -Usage memory stats. +*`docker.container.id`*:: ++ +-- +type: alias +alias to: container.id +-- -*`docker.memory.usage.max`*:: +*`docker.container.image`*:: + -- -Max memory usage. - - -type: long +type: alias -format: bytes +alias to: container.image.name -- -*`docker.memory.usage.pct`*:: +*`docker.container.name`*:: + -- -Memory usage percentage. - - -type: scaled_float +type: alias -format: percent +alias to: container.name -- -*`docker.memory.usage.total`*:: +*`docker.container.labels`*:: + -- -Total memory usage. - +Image labels. -type: long -format: bytes +type: object -- -[float] -=== network +[[exported-fields-docker]] +== Docker fields -Network metrics. +Docker stats collected from Docker. -*`docker.network.interface`*:: -+ --- -Network interface name. +[float] +=== docker +Information and statistics about docker's running containers. -type: keyword --- [float] -=== in +=== container -Incoming network stats per second. +Docker container metrics. -*`docker.network.in.bytes`*:: +*`docker.container.command`*:: + -- -Total number of incoming bytes. - +Command that was executed in the Docker container. -type: long -format: bytes +type: keyword -- -*`docker.network.in.dropped`*:: +*`docker.container.created`*:: + -- -Total number of dropped incoming packets. +Date when the container was created. -type: scaled_float +type: date -- -*`docker.network.in.errors`*:: +*`docker.container.status`*:: + -- -Total errors on incoming packets. +Container status. -type: long +type: keyword -- -*`docker.network.in.packets`*:: +*`docker.container.ip_addresses`*:: + -- -Total number of incoming packets. +Container IP addresses. -type: long +type: ip -- [float] -=== out +=== size -Outgoing network stats per second. +Container size metrics. -*`docker.network.out.bytes`*:: +*`docker.container.size.root_fs`*:: + -- -Total number of outgoing bytes. +Total size of all the files in the container. type: long -format: bytes - --- - -*`docker.network.out.dropped`*:: -+ --- -Total number of dropped outgoing packets. - - -type: scaled_float - -- -*`docker.network.out.errors`*:: +*`docker.container.size.rw`*:: + -- -Total errors on outgoing packets. +Size of the files that have been created or changed since creation. type: long -- -*`docker.network.out.packets`*:: +*`docker.container.tags`*:: + -- -Total number of outgoing packets. +Image tags. -type: long +type: keyword -- [float] -=== inbound +=== cpu -Incoming network stats since the container started. +Runtime CPU metrics. -*`docker.network.inbound.bytes`*:: +*`docker.cpu.kernel.pct`*:: + -- -Total number of incoming bytes. +Percentage of time in kernel space. -type: long +type: scaled_float -format: bytes +format: percent -- -*`docker.network.inbound.dropped`*:: +*`docker.cpu.kernel.norm.pct`*:: + -- -Total number of dropped incoming packets. +Percentage of time in kernel space normalized by the number of CPU cores. -type: long +type: scaled_float + +format: percent -- -*`docker.network.inbound.errors`*:: +*`docker.cpu.kernel.ticks`*:: + -- -Total errors on incoming packets. +CPU ticks in kernel space. type: long -- -*`docker.network.inbound.packets`*:: +*`docker.cpu.system.pct`*:: + -- -Total number of incoming packets. - - -type: long - --- +Percentage of total CPU time in the system. -[float] -=== outbound -Outgoing network stats since the container started. +type: scaled_float +format: percent +-- -*`docker.network.outbound.bytes`*:: +*`docker.cpu.system.norm.pct`*:: + -- -Total number of outgoing bytes. +Percentage of total CPU time in the system normalized by the number of CPU cores. -type: long +type: scaled_float -format: bytes +format: percent -- -*`docker.network.outbound.dropped`*:: +*`docker.cpu.system.ticks`*:: + -- -Total number of dropped outgoing packets. +CPU system ticks. type: long -- -*`docker.network.outbound.errors`*:: +*`docker.cpu.user.pct`*:: + -- -Total errors on outgoing packets. +Percentage of time in user space. -type: long +type: scaled_float + +format: percent -- -*`docker.network.outbound.packets`*:: +*`docker.cpu.user.norm.pct`*:: + -- -Total number of outgoing packets. - +Percentage of time in user space normalized by the number of CPU cores. -type: long --- +type: scaled_float -[[exported-fields-dropwizard]] -== Dropwizard fields +format: percent -Stats collected from Dropwizard. +-- +*`docker.cpu.user.ticks`*:: ++ +-- +CPU ticks in user space. -[float] -=== dropwizard +type: long +-- +*`docker.cpu.total.pct`*:: ++ +-- +Total CPU usage. -[[exported-fields-ecs]] -== ECS fields +type: scaled_float -ECS Fields. +format: percent +-- -*`@timestamp`*:: +*`docker.cpu.total.norm.pct`*:: + -- -Date/time when the event originated. -This is the date/time extracted from the event, typically representing when the event was generated by the source. -If the event source has no original timestamp, this value is typically populated by the first time the event was received by the pipeline. -Required field for all events. +Total CPU usage normalized by the number of CPU cores. -type: date -example: 2016-05-23T08:05:34.853Z +type: scaled_float -required: True +format: percent -- -*`labels`*:: +*`docker.cpu.core.*.pct`*:: + -- -Custom key/value pairs. -Can be used to add meta information to events. Should not contain nested objects. All values are stored as keyword. -Example: `docker` and `k8s` labels. +Percentage of CPU time in this core. + type: object -example: {"application": "foo-bar", "env": "production"} +format: percent -- -*`message`*:: +*`docker.cpu.core.*.norm.pct`*:: + -- -For log events the message field contains the log message, optimized for viewing in a log viewer. -For structured logs without an original message field, other fields can be concatenated to form a human-readable summary of the event. -If multiple messages exist, they can be combined into one message. +Percentage of CPU time in this core, normalized by the number of CPU cores. -type: text -example: Hello World +type: object + +format: percent -- -*`tags`*:: +*`docker.cpu.core.*.ticks`*:: + -- -List of keywords used to tag each event. +Number of CPU ticks in this core. -type: keyword -example: ["production", "env2"] +type: object -- [float] -=== agent +=== diskio -The agent fields contain the data about the software entity, if any, that collects, detects, or observes events on a host, or takes measurements on a host. -Examples include Beats. Agents may also run on observers. ECS agent.* fields shall be populated with details of the agent running on the host or observer where the event happened or the measurement was taken. +Disk I/O metrics. -*`agent.ephemeral_id`*:: -+ --- -Ephemeral identifier of this agent (if one exists). -This id normally changes across restarts, but `agent.id` does not. -type: keyword +[float] +=== read -example: 8a4f500f +Accumulated reads during the life of the container --- -*`agent.id`*:: + +*`docker.diskio.read.ops`*:: + -- -Unique identifier of this agent (if one exists). -Example: For Beats this would be beat.id. +Number of reads during the life of the container -type: keyword -example: 8a4f500d +type: long -- -*`agent.name`*:: +*`docker.diskio.read.bytes`*:: + -- -Custom name of the agent. -This is a name that can be given to an agent. This can be helpful if for example two Filebeat instances are running on the same host but a human readable separation is needed on which Filebeat instance data is coming from. -If no name is given, the name is often left empty. +Bytes read during the life of the container -type: keyword -example: foo +type: long + +format: bytes -- -*`agent.type`*:: +*`docker.diskio.read.rate`*:: + -- -Type of the agent. -The agent type stays always the same and should be given by the agent used. In case of Filebeat the agent would always be Filebeat also if two Filebeat instances are run on the same machine. +Number of current reads per second -type: keyword -example: filebeat +type: long -- -*`agent.version`*:: +*`docker.diskio.read.service_time`*:: + -- -Version of the agent. +Total time to service IO requests, in nanoseconds -type: keyword -example: 6.0.0-rc2 +type: long -- -[float] -=== as - -An autonomous system (AS) is a collection of connected Internet Protocol (IP) routing prefixes under the control of one or more network operators on behalf of a single administrative entity or domain that presents a common, clearly defined routing policy to the internet. - - -*`as.number`*:: +*`docker.diskio.read.wait_time`*:: + -- -Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. +Total time requests spent waiting in queues for service, in nanoseconds -type: long -example: 15169 +type: long -- -*`as.organization.name`*:: +*`docker.diskio.read.queued`*:: + -- -Organization name. +Total number of queued requests -type: keyword -example: Google LLC +type: long -- -*`as.organization.name.text`*:: +*`docker.diskio.reads`*:: + -- -type: text + +deprecated:[6.4] + +Number of current reads per second + + +type: scaled_float -- [float] -=== client +=== write -A client is defined as the initiator of a network connection for events regarding sessions, connections, or bidirectional flow records. -For TCP events, the client is the initiator of the TCP connection that sends the SYN packet(s). For other protocols, the client is generally the initiator or requestor in the network transaction. Some systems use the term "originator" to refer the client in TCP connections. The client fields describe details about the system acting as the client in the network event. Client fields are usually populated in conjunction with server fields. Client fields are generally not populated for packet-level events. -Client / server representations can add semantic context to an exchange, which is helpful to visualize the data in certain situations. If your context falls in that category, you should still ensure that source and destination are filled appropriately. +Accumulated writes during the life of the container -*`client.address`*:: + +*`docker.diskio.write.ops`*:: + -- -Some event client addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. -Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. +Number of writes during the life of the container -type: keyword + +type: long -- -*`client.as.number`*:: +*`docker.diskio.write.bytes`*:: + -- -Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. +Bytes written during the life of the container + type: long -example: 15169 +format: bytes -- -*`client.as.organization.name`*:: +*`docker.diskio.write.rate`*:: + -- -Organization name. +Number of current writes per second -type: keyword -example: Google LLC +type: long -- -*`client.as.organization.name.text`*:: +*`docker.diskio.write.service_time`*:: + -- -type: text - --- +Total time to service IO requests, in nanoseconds -*`client.bytes`*:: -+ --- -Bytes sent from the client to the server. type: long -example: 184 - -format: bytes - -- -*`client.domain`*:: +*`docker.diskio.write.wait_time`*:: + -- -Client domain. +Total time requests spent waiting in queues for service, in nanoseconds -type: keyword + +type: long -- -*`client.geo.city_name`*:: +*`docker.diskio.write.queued`*:: + -- -City name. +Total number of queued requests -type: keyword -example: Montreal +type: long -- -*`client.geo.continent_name`*:: +*`docker.diskio.writes`*:: + -- -Name of the continent. -type: keyword +deprecated:[6.4] -example: North America +Number of current writes per second --- -*`client.geo.country_iso_code`*:: -+ +type: scaled_float + -- -Country ISO code. -type: keyword +[float] +=== summary -example: CA +Accumulated reads and writes during the life of the container --- -*`client.geo.country_name`*:: + +*`docker.diskio.summary.ops`*:: + -- -Country name. +Number of I/O operations during the life of the container -type: keyword -example: Canada +type: long -- -*`client.geo.location`*:: +*`docker.diskio.summary.bytes`*:: + -- -Longitude and latitude. +Bytes read and written during the life of the container -type: geo_point -example: { "lon": -73.614830, "lat": 45.505918 } +type: long + +format: bytes -- -*`client.geo.name`*:: +*`docker.diskio.summary.rate`*:: + -- -User-defined description of a location, at the level of granularity they care about. -Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. -Not typically used in automated geolocation. +Number of current operations per second -type: keyword -example: boston-dc +type: long -- -*`client.geo.region_iso_code`*:: +*`docker.diskio.summary.service_time`*:: + -- -Region ISO code. +Total time to service IO requests, in nanoseconds -type: keyword -example: CA-QC +type: long -- -*`client.geo.region_name`*:: +*`docker.diskio.summary.wait_time`*:: + -- -Region name. +Total time requests spent waiting in queues for service, in nanoseconds -type: keyword -example: Quebec +type: long -- -*`client.ip`*:: +*`docker.diskio.summary.queued`*:: + -- -IP address of the client. -Can be one or multiple IPv4 or IPv6 addresses. +Total number of queued requests -type: ip + +type: long -- -*`client.mac`*:: +*`docker.diskio.total`*:: + -- -MAC address of the client. -type: keyword +deprecated:[6.4] --- +Number of reads and writes per second -*`client.nat.ip`*:: -+ --- -Translated IP of source based NAT sessions (e.g. internal client to internet). -Typically connections traversing load balancers, firewalls, or routers. -type: ip +type: scaled_float -- -*`client.nat.port`*:: -+ --- -Translated port of source based NAT sessions (e.g. internal client to internet). -Typically connections traversing load balancers, firewalls, or routers. +[float] +=== event -type: long +Docker event -format: string --- -*`client.packets`*:: +*`docker.event.status`*:: + -- -Packets sent from the client to the server. +Event status -type: long -example: 12 +type: keyword -- -*`client.port`*:: +*`docker.event.id`*:: + -- -Port of the client. +Event id when available -type: long -format: string +type: keyword -- -*`client.registered_domain`*:: +*`docker.event.from`*:: + -- -The highest registered client domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". +Event source -type: keyword -example: google.com +type: keyword -- -*`client.top_level_domain`*:: +*`docker.event.type`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". +The type of object emitting the event -type: keyword -example: co.uk +type: keyword -- -*`client.user.domain`*:: +*`docker.event.action`*:: + -- -Name of the directory the user is a member of. -For example, an LDAP or Active Directory domain name. +The type of event + type: keyword -- -*`client.user.email`*:: +[float] +=== actor + +Actor + + + +*`docker.event.actor.id`*:: + -- -User email address. +The ID of the object emitting the event + type: keyword -- -*`client.user.full_name`*:: +*`docker.event.actor.attributes`*:: + -- -User's full name, if available. +Various key/value attributes of the object, depending on its type -type: keyword -example: Albert Einstein +type: object -- -*`client.user.full_name.text`*:: -+ --- -type: text +[float] +=== healthcheck --- +Docker healthcheck metrics. +Healthcheck data will only be available from docker containers where the docker `HEALTHCHECK` instruction has been used to build the docker image. -*`client.user.group.domain`*:: + + +*`docker.healthcheck.failingstreak`*:: + -- -Name of the directory the group is a member of. -For example, an LDAP or Active Directory domain name. +concurent failed check -type: keyword + +type: integer -- -*`client.user.group.id`*:: +*`docker.healthcheck.status`*:: + -- -Unique identifier for the group on the system/platform. +Healthcheck status code + type: keyword -- -*`client.user.group.name`*:: +[float] +=== event + +event fields. + + + +*`docker.healthcheck.event.end_date`*:: + -- -Name of the group. +Healthcheck end date -type: keyword + +type: date -- -*`client.user.hash`*:: +*`docker.healthcheck.event.start_date`*:: + -- -Unique user hash to correlate information for a user in anonymized form. -Useful if `user.id` or `user.name` contain confidential information and cannot be used. +Healthcheck start date -type: keyword + +type: date -- -*`client.user.id`*:: +*`docker.healthcheck.event.output`*:: + -- -Unique identifiers of the user. +Healthcheck output + type: keyword -- -*`client.user.name`*:: +*`docker.healthcheck.event.exit_code`*:: + -- -Short name or login of the user. +Healthcheck status code -type: keyword -example: albert +type: integer -- -*`client.user.name.text`*:: -+ --- -type: text +[float] +=== image + +Docker image metrics. + --- [float] -=== cloud +=== id -Fields related to the cloud or infrastructure the events are coming from. +The image layers identifier. -*`cloud.account.id`*:: + +*`docker.image.id.current`*:: + -- -The cloud account or organization id used to identify different entities in a multi-tenant environment. -Examples: AWS account id, Google Cloud ORG Id, or other unique identifier. +Unique image identifier given upon its creation. -type: keyword -example: 666777888999 +type: keyword -- -*`cloud.availability_zone`*:: +*`docker.image.id.parent`*:: + -- -Availability zone in which this host is running. +Identifier of the image, if it exists, from which the current image directly descends. -type: keyword -example: us-east-1c +type: keyword -- -*`cloud.instance.id`*:: +*`docker.image.created`*:: + -- -Instance ID of the host machine. +Date and time when the image was created. -type: keyword -example: i-1234567890abcdef0 +type: date -- -*`cloud.instance.name`*:: +[float] +=== size + +Image size layers. + + + +*`docker.image.size.virtual`*:: + -- -Instance name of the host machine. +Size of the image. -type: keyword + +type: long -- -*`cloud.machine.type`*:: +*`docker.image.size.regular`*:: + -- -Machine type of the host machine. +Total size of the all cached images associated to the current image. -type: keyword -example: t2.medium +type: long -- -*`cloud.provider`*:: +*`docker.image.labels`*:: + -- -Name of the cloud provider. Example values are aws, azure, gcp, or digitalocean. +Image labels. -type: keyword -example: aws +type: object -- -*`cloud.region`*:: +*`docker.image.tags`*:: + -- -Region in which this host is running. +Image tags. -type: keyword -example: us-east-1 +type: keyword -- [float] -=== code_signature +=== info -These fields contain information about binary code signatures. +Info metrics based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/display-system-wide-information. -*`code_signature.exists`*:: -+ --- -Boolean to capture if a signature is present. -type: boolean +[float] +=== containers -example: true +Overall container stats. --- -*`code_signature.status`*:: + +*`docker.info.containers.paused`*:: + -- -Additional information about the certificate status. -This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. +Total number of paused containers. + + +type: long + +-- + +*`docker.info.containers.running`*:: ++ +-- +Total number of running containers. + + +type: long + +-- + +*`docker.info.containers.stopped`*:: ++ +-- +Total number of stopped containers. + + +type: long + +-- + +*`docker.info.containers.total`*:: ++ +-- +Total number of existing containers. + + +type: long + +-- + +*`docker.info.id`*:: ++ +-- +Unique Docker host identifier. + + +type: keyword + +-- + +*`docker.info.images`*:: ++ +-- +Total number of existing images. + + +type: long + +-- + +[float] +=== memory + +Memory metrics. + + + +*`docker.memory.stats.*`*:: ++ +-- +Raw memory stats from the cgroups memory.stat interface + + +type: object + +-- + +[float] +=== commit + +Committed bytes on Windows + + + +*`docker.memory.commit.total`*:: ++ +-- +Total bytes + + +type: long + +format: bytes + +-- + +*`docker.memory.commit.peak`*:: ++ +-- +Peak committed bytes on Windows + + +type: long + +format: bytes + +-- + +*`docker.memory.private_working_set.total`*:: ++ +-- +private working sets on Windows + + +type: long + +format: bytes + +-- + +*`docker.memory.fail.count`*:: ++ +-- +Fail counter. + + +type: scaled_float + +-- + +*`docker.memory.limit`*:: ++ +-- +Memory limit. + + +type: long + +format: bytes + +-- + +[float] +=== rss + +RSS memory stats. + + + +*`docker.memory.rss.total`*:: ++ +-- +Total memory resident set size. + + +type: long + +format: bytes + +-- + +*`docker.memory.rss.pct`*:: ++ +-- +Memory resident set size percentage. + + +type: scaled_float + +format: percent + +-- + +[float] +=== usage + +Usage memory stats. + + + +*`docker.memory.usage.max`*:: ++ +-- +Max memory usage. + + +type: long + +format: bytes + +-- + +*`docker.memory.usage.pct`*:: ++ +-- +Memory usage percentage. + + +type: scaled_float + +format: percent + +-- + +*`docker.memory.usage.total`*:: ++ +-- +Total memory usage. + + +type: long + +format: bytes + +-- + +[float] +=== network + +Network metrics. + + + +*`docker.network.interface`*:: ++ +-- +Network interface name. + + +type: keyword + +-- + +[float] +=== in + +Incoming network stats per second. + + + +*`docker.network.in.bytes`*:: ++ +-- +Total number of incoming bytes. + + +type: long + +format: bytes + +-- + +*`docker.network.in.dropped`*:: ++ +-- +Total number of dropped incoming packets. + + +type: scaled_float + +-- + +*`docker.network.in.errors`*:: ++ +-- +Total errors on incoming packets. + + +type: long + +-- + +*`docker.network.in.packets`*:: ++ +-- +Total number of incoming packets. + + +type: long + +-- + +[float] +=== out + +Outgoing network stats per second. + + + +*`docker.network.out.bytes`*:: ++ +-- +Total number of outgoing bytes. + + +type: long + +format: bytes + +-- + +*`docker.network.out.dropped`*:: ++ +-- +Total number of dropped outgoing packets. + + +type: scaled_float + +-- + +*`docker.network.out.errors`*:: ++ +-- +Total errors on outgoing packets. + + +type: long + +-- + +*`docker.network.out.packets`*:: ++ +-- +Total number of outgoing packets. + + +type: long + +-- + +[float] +=== inbound + +Incoming network stats since the container started. + + + +*`docker.network.inbound.bytes`*:: ++ +-- +Total number of incoming bytes. + + +type: long + +format: bytes + +-- + +*`docker.network.inbound.dropped`*:: ++ +-- +Total number of dropped incoming packets. + + +type: long + +-- + +*`docker.network.inbound.errors`*:: ++ +-- +Total errors on incoming packets. + + +type: long + +-- + +*`docker.network.inbound.packets`*:: ++ +-- +Total number of incoming packets. + + +type: long + +-- + +[float] +=== outbound + +Outgoing network stats since the container started. + + + +*`docker.network.outbound.bytes`*:: ++ +-- +Total number of outgoing bytes. + + +type: long + +format: bytes + +-- + +*`docker.network.outbound.dropped`*:: ++ +-- +Total number of dropped outgoing packets. + + +type: long + +-- + +*`docker.network.outbound.errors`*:: ++ +-- +Total errors on outgoing packets. + + +type: long + +-- + +*`docker.network.outbound.packets`*:: ++ +-- +Total number of outgoing packets. + + +type: long + +-- + +[[exported-fields-dropwizard]] +== Dropwizard fields + +Stats collected from Dropwizard. + + + +[float] +=== dropwizard + + + + +[[exported-fields-ecs]] +== ECS fields + +ECS Fields. + + +*`@timestamp`*:: ++ +-- +Date/time when the event originated. +This is the date/time extracted from the event, typically representing when the event was generated by the source. +If the event source has no original timestamp, this value is typically populated by the first time the event was received by the pipeline. +Required field for all events. + +type: date + +example: 2016-05-23T08:05:34.853Z + +required: True + +-- + +*`labels`*:: ++ +-- +Custom key/value pairs. +Can be used to add meta information to events. Should not contain nested objects. All values are stored as keyword. +Example: `docker` and `k8s` labels. + +type: object + +example: {"application": "foo-bar", "env": "production"} + +-- + +*`message`*:: ++ +-- +For log events the message field contains the log message, optimized for viewing in a log viewer. +For structured logs without an original message field, other fields can be concatenated to form a human-readable summary of the event. +If multiple messages exist, they can be combined into one message. + +type: text + +example: Hello World + +-- + +*`tags`*:: ++ +-- +List of keywords used to tag each event. + +type: keyword + +example: ["production", "env2"] + +-- + +[float] +=== agent + +The agent fields contain the data about the software entity, if any, that collects, detects, or observes events on a host, or takes measurements on a host. +Examples include Beats. Agents may also run on observers. ECS agent.* fields shall be populated with details of the agent running on the host or observer where the event happened or the measurement was taken. + + +*`agent.ephemeral_id`*:: ++ +-- +Ephemeral identifier of this agent (if one exists). +This id normally changes across restarts, but `agent.id` does not. + +type: keyword + +example: 8a4f500f + +-- + +*`agent.id`*:: ++ +-- +Unique identifier of this agent (if one exists). +Example: For Beats this would be beat.id. + +type: keyword + +example: 8a4f500d + +-- + +*`agent.name`*:: ++ +-- +Custom name of the agent. +This is a name that can be given to an agent. This can be helpful if for example two Filebeat instances are running on the same host but a human readable separation is needed on which Filebeat instance data is coming from. +If no name is given, the name is often left empty. + +type: keyword + +example: foo + +-- + +*`agent.type`*:: ++ +-- +Type of the agent. +The agent type stays always the same and should be given by the agent used. In case of Filebeat the agent would always be Filebeat also if two Filebeat instances are run on the same machine. + +type: keyword + +example: filebeat + +-- + +*`agent.version`*:: ++ +-- +Version of the agent. + +type: keyword + +example: 6.0.0-rc2 + +-- + +[float] +=== as + +An autonomous system (AS) is a collection of connected Internet Protocol (IP) routing prefixes under the control of one or more network operators on behalf of a single administrative entity or domain that presents a common, clearly defined routing policy to the internet. + + +*`as.number`*:: ++ +-- +Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. + +type: long + +example: 15169 + +-- + +*`as.organization.name`*:: ++ +-- +Organization name. + +type: keyword + +example: Google LLC + +-- + +*`as.organization.name.text`*:: ++ +-- +type: text + +-- + +[float] +=== client + +A client is defined as the initiator of a network connection for events regarding sessions, connections, or bidirectional flow records. +For TCP events, the client is the initiator of the TCP connection that sends the SYN packet(s). For other protocols, the client is generally the initiator or requestor in the network transaction. Some systems use the term "originator" to refer the client in TCP connections. The client fields describe details about the system acting as the client in the network event. Client fields are usually populated in conjunction with server fields. Client fields are generally not populated for packet-level events. +Client / server representations can add semantic context to an exchange, which is helpful to visualize the data in certain situations. If your context falls in that category, you should still ensure that source and destination are filled appropriately. + + +*`client.address`*:: ++ +-- +Some event client addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. +Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + +type: keyword + +-- + +*`client.as.number`*:: ++ +-- +Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. + +type: long + +example: 15169 + +-- + +*`client.as.organization.name`*:: ++ +-- +Organization name. + +type: keyword + +example: Google LLC + +-- + +*`client.as.organization.name.text`*:: ++ +-- +type: text + +-- + +*`client.bytes`*:: ++ +-- +Bytes sent from the client to the server. + +type: long + +example: 184 + +format: bytes + +-- + +*`client.domain`*:: ++ +-- +Client domain. + +type: keyword + +-- + +*`client.geo.city_name`*:: ++ +-- +City name. + +type: keyword + +example: Montreal + +-- + +*`client.geo.continent_name`*:: ++ +-- +Name of the continent. + +type: keyword + +example: North America + +-- + +*`client.geo.country_iso_code`*:: ++ +-- +Country ISO code. + +type: keyword + +example: CA + +-- + +*`client.geo.country_name`*:: ++ +-- +Country name. + +type: keyword + +example: Canada + +-- + +*`client.geo.location`*:: ++ +-- +Longitude and latitude. + +type: geo_point + +example: { "lon": -73.614830, "lat": 45.505918 } + +-- + +*`client.geo.name`*:: ++ +-- +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. + +type: keyword + +example: boston-dc + +-- + +*`client.geo.region_iso_code`*:: ++ +-- +Region ISO code. + +type: keyword + +example: CA-QC + +-- + +*`client.geo.region_name`*:: ++ +-- +Region name. + +type: keyword + +example: Quebec + +-- + +*`client.ip`*:: ++ +-- +IP address of the client. +Can be one or multiple IPv4 or IPv6 addresses. + +type: ip + +-- + +*`client.mac`*:: ++ +-- +MAC address of the client. + +type: keyword + +-- + +*`client.nat.ip`*:: ++ +-- +Translated IP of source based NAT sessions (e.g. internal client to internet). +Typically connections traversing load balancers, firewalls, or routers. + +type: ip + +-- + +*`client.nat.port`*:: ++ +-- +Translated port of source based NAT sessions (e.g. internal client to internet). +Typically connections traversing load balancers, firewalls, or routers. + +type: long + +format: string + +-- + +*`client.packets`*:: ++ +-- +Packets sent from the client to the server. + +type: long + +example: 12 + +-- + +*`client.port`*:: ++ +-- +Port of the client. + +type: long + +format: string + +-- + +*`client.registered_domain`*:: ++ +-- +The highest registered client domain, stripped of the subdomain. +For example, the registered domain for "foo.google.com" is "google.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". + +type: keyword + +example: google.com + +-- + +*`client.top_level_domain`*:: ++ +-- +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". + +type: keyword + +example: co.uk + +-- + +*`client.user.domain`*:: ++ +-- +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. + +type: keyword + +-- + +*`client.user.email`*:: ++ +-- +User email address. + +type: keyword + +-- + +*`client.user.full_name`*:: ++ +-- +User's full name, if available. + +type: keyword + +example: Albert Einstein + +-- + +*`client.user.full_name.text`*:: ++ +-- +type: text + +-- + +*`client.user.group.domain`*:: ++ +-- +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. + +type: keyword + +-- + +*`client.user.group.id`*:: ++ +-- +Unique identifier for the group on the system/platform. + +type: keyword + +-- + +*`client.user.group.name`*:: ++ +-- +Name of the group. + +type: keyword + +-- + +*`client.user.hash`*:: ++ +-- +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. + +type: keyword + +-- + +*`client.user.id`*:: ++ +-- +Unique identifiers of the user. + +type: keyword + +-- + +*`client.user.name`*:: ++ +-- +Short name or login of the user. + +type: keyword + +example: albert + +-- + +*`client.user.name.text`*:: ++ +-- +type: text + +-- + +[float] +=== cloud + +Fields related to the cloud or infrastructure the events are coming from. + + +*`cloud.account.id`*:: ++ +-- +The cloud account or organization id used to identify different entities in a multi-tenant environment. +Examples: AWS account id, Google Cloud ORG Id, or other unique identifier. + +type: keyword + +example: 666777888999 + +-- + +*`cloud.availability_zone`*:: ++ +-- +Availability zone in which this host is running. + +type: keyword + +example: us-east-1c + +-- + +*`cloud.instance.id`*:: ++ +-- +Instance ID of the host machine. + +type: keyword + +example: i-1234567890abcdef0 + +-- + +*`cloud.instance.name`*:: ++ +-- +Instance name of the host machine. + +type: keyword + +-- + +*`cloud.machine.type`*:: ++ +-- +Machine type of the host machine. + +type: keyword + +example: t2.medium + +-- + +*`cloud.provider`*:: ++ +-- +Name of the cloud provider. Example values are aws, azure, gcp, or digitalocean. + +type: keyword + +example: aws + +-- + +*`cloud.region`*:: ++ +-- +Region in which this host is running. + +type: keyword + +example: us-east-1 + +-- + +[float] +=== code_signature + +These fields contain information about binary code signatures. + + +*`code_signature.exists`*:: ++ +-- +Boolean to capture if a signature is present. + +type: boolean + +example: true + +-- + +*`code_signature.status`*:: ++ +-- +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. + +type: keyword + +example: ERROR_UNTRUSTED_ROOT + +-- + +*`code_signature.subject_name`*:: ++ +-- +Subject name of the code signer + +type: keyword + +example: Microsoft Corporation + +-- + +*`code_signature.trusted`*:: ++ +-- +Stores the trust status of the certificate chain. +Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. + +type: boolean + +example: true + +-- + +*`code_signature.valid`*:: ++ +-- +Boolean to capture if the digital signature is verified against the binary content. +Leave unpopulated if a certificate was unchecked. + +type: boolean + +example: true + +-- + +[float] +=== container + +Container fields are used for meta information about the specific container that is the source of information. +These fields help correlate data based containers from any runtime. + + +*`container.id`*:: ++ +-- +Unique container id. + +type: keyword + +-- + +*`container.image.name`*:: ++ +-- +Name of the image the container was built on. + +type: keyword + +-- + +*`container.image.tag`*:: ++ +-- +Container image tags. + +type: keyword + +-- + +*`container.labels`*:: ++ +-- +Image labels. + +type: object + +-- + +*`container.name`*:: ++ +-- +Container name. + +type: keyword + +-- + +*`container.runtime`*:: ++ +-- +Runtime managing this container. + +type: keyword + +example: docker + +-- + +[float] +=== destination + +Destination fields describe details about the destination of a packet/event. +Destination fields are usually populated in conjunction with source fields. + + +*`destination.address`*:: ++ +-- +Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. +Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. + +type: keyword + +-- + +*`destination.as.number`*:: ++ +-- +Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. + +type: long + +example: 15169 + +-- + +*`destination.as.organization.name`*:: ++ +-- +Organization name. + +type: keyword + +example: Google LLC + +-- + +*`destination.as.organization.name.text`*:: ++ +-- +type: text + +-- + +*`destination.bytes`*:: ++ +-- +Bytes sent from the destination to the source. + +type: long + +example: 184 + +format: bytes + +-- + +*`destination.domain`*:: ++ +-- +Destination domain. + +type: keyword + +-- + +*`destination.geo.city_name`*:: ++ +-- +City name. + +type: keyword + +example: Montreal + +-- + +*`destination.geo.continent_name`*:: ++ +-- +Name of the continent. + +type: keyword + +example: North America + +-- + +*`destination.geo.country_iso_code`*:: ++ +-- +Country ISO code. + +type: keyword + +example: CA + +-- + +*`destination.geo.country_name`*:: ++ +-- +Country name. + +type: keyword + +example: Canada + +-- + +*`destination.geo.location`*:: ++ +-- +Longitude and latitude. + +type: geo_point + +example: { "lon": -73.614830, "lat": 45.505918 } + +-- + +*`destination.geo.name`*:: ++ +-- +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. + +type: keyword + +example: boston-dc + +-- + +*`destination.geo.region_iso_code`*:: ++ +-- +Region ISO code. + +type: keyword + +example: CA-QC + +-- + +*`destination.geo.region_name`*:: ++ +-- +Region name. + +type: keyword + +example: Quebec + +-- + +*`destination.ip`*:: ++ +-- +IP address of the destination. +Can be one or multiple IPv4 or IPv6 addresses. + +type: ip + +-- + +*`destination.mac`*:: ++ +-- +MAC address of the destination. + +type: keyword + +-- + +*`destination.nat.ip`*:: ++ +-- +Translated ip of destination based NAT sessions (e.g. internet to private DMZ) +Typically used with load balancers, firewalls, or routers. + +type: ip + +-- + +*`destination.nat.port`*:: ++ +-- +Port the source session is translated to by NAT Device. +Typically used with load balancers, firewalls, or routers. + +type: long + +format: string + +-- + +*`destination.packets`*:: ++ +-- +Packets sent from the destination to the source. + +type: long + +example: 12 + +-- + +*`destination.port`*:: ++ +-- +Port of the destination. + +type: long + +format: string + +-- + +*`destination.registered_domain`*:: ++ +-- +The highest registered destination domain, stripped of the subdomain. +For example, the registered domain for "foo.google.com" is "google.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". + +type: keyword + +example: google.com + +-- + +*`destination.top_level_domain`*:: ++ +-- +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". + +type: keyword + +example: co.uk + +-- + +*`destination.user.domain`*:: ++ +-- +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. + +type: keyword + +-- + +*`destination.user.email`*:: ++ +-- +User email address. + +type: keyword + +-- + +*`destination.user.full_name`*:: ++ +-- +User's full name, if available. + +type: keyword + +example: Albert Einstein + +-- + +*`destination.user.full_name.text`*:: ++ +-- +type: text + +-- + +*`destination.user.group.domain`*:: ++ +-- +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. + +type: keyword + +-- + +*`destination.user.group.id`*:: ++ +-- +Unique identifier for the group on the system/platform. + +type: keyword + +-- + +*`destination.user.group.name`*:: ++ +-- +Name of the group. + +type: keyword + +-- + +*`destination.user.hash`*:: ++ +-- +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. + +type: keyword + +-- + +*`destination.user.id`*:: ++ +-- +Unique identifiers of the user. + +type: keyword + +-- + +*`destination.user.name`*:: ++ +-- +Short name or login of the user. + +type: keyword + +example: albert + +-- + +*`destination.user.name.text`*:: ++ +-- +type: text + +-- + +[float] +=== dll + +These fields contain information about code libraries dynamically loaded into processes. + +Many operating systems refer to "shared code libraries" with different names, but this field set refers to all of the following: +* Dynamic-link library (`.dll`) commonly used on Windows +* Shared Object (`.so`) commonly used on Unix-like operating systems +* Dynamic library (`.dylib`) commonly used on macOS + + +*`dll.code_signature.exists`*:: ++ +-- +Boolean to capture if a signature is present. + +type: boolean + +example: true + +-- + +*`dll.code_signature.status`*:: ++ +-- +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. + +type: keyword + +example: ERROR_UNTRUSTED_ROOT + +-- + +*`dll.code_signature.subject_name`*:: ++ +-- +Subject name of the code signer + +type: keyword + +example: Microsoft Corporation + +-- + +*`dll.code_signature.trusted`*:: ++ +-- +Stores the trust status of the certificate chain. +Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. + +type: boolean + +example: true + +-- + +*`dll.code_signature.valid`*:: ++ +-- +Boolean to capture if the digital signature is verified against the binary content. +Leave unpopulated if a certificate was unchecked. + +type: boolean + +example: true + +-- + +*`dll.hash.md5`*:: ++ +-- +MD5 hash. + +type: keyword + +-- + +*`dll.hash.sha1`*:: ++ +-- +SHA1 hash. + +type: keyword + +-- + +*`dll.hash.sha256`*:: ++ +-- +SHA256 hash. + +type: keyword + +-- + +*`dll.hash.sha512`*:: ++ +-- +SHA512 hash. + +type: keyword + +-- + +*`dll.name`*:: ++ +-- +Name of the library. +This generally maps to the name of the file on disk. + +type: keyword + +example: kernel32.dll + +-- + +*`dll.path`*:: ++ +-- +Full file path of the library. + +type: keyword + +example: C:\Windows\System32\kernel32.dll + +-- + +*`dll.pe.company`*:: ++ +-- +Internal company name of the file, provided at compile-time. + +type: keyword + +example: Microsoft Corporation + +-- + +*`dll.pe.description`*:: ++ +-- +Internal description of the file, provided at compile-time. + +type: keyword + +example: Paint + +-- + +*`dll.pe.file_version`*:: ++ +-- +Internal version of the file, provided at compile-time. + +type: keyword + +example: 6.3.9600.17415 + +-- + +*`dll.pe.original_file_name`*:: ++ +-- +Internal name of the file, provided at compile-time. + +type: keyword + +example: MSPAINT.EXE + +-- + +*`dll.pe.product`*:: ++ +-- +Internal product name of the file, provided at compile-time. + +type: keyword + +example: Microsoft® Windows® Operating System + +-- + +[float] +=== dns + +Fields describing DNS queries and answers. +DNS events should either represent a single DNS query prior to getting answers (`dns.type:query`) or they should represent a full exchange and contain the query details as well as all of the answers that were provided for this query (`dns.type:answer`). + + +*`dns.answers`*:: ++ +-- +An array containing an object for each answer section returned by the server. +The main keys that should be present in these objects are defined by ECS. Records that have more information may contain more keys than what ECS defines. +Not all DNS data sources give all details about DNS answers. At minimum, answer objects must contain the `data` key. If more information is available, map as much of it to ECS as possible, and add any additional fields to the answer objects as custom fields. + +type: object + +-- + +*`dns.answers.class`*:: ++ +-- +The class of DNS data contained in this resource record. + +type: keyword + +example: IN + +-- + +*`dns.answers.data`*:: ++ +-- +The data describing the resource. +The meaning of this data depends on the type and class of the resource record. + +type: keyword + +example: 10.10.10.10 + +-- + +*`dns.answers.name`*:: ++ +-- +The domain name to which this resource record pertains. +If a chain of CNAME is being resolved, each answer's `name` should be the one that corresponds with the answer's `data`. It should not simply be the original `question.name` repeated. + +type: keyword + +example: www.google.com + +-- + +*`dns.answers.ttl`*:: ++ +-- +The time interval in seconds that this resource record may be cached before it should be discarded. Zero values mean that the data should not be cached. + +type: long + +example: 180 + +-- + +*`dns.answers.type`*:: ++ +-- +The type of data contained in this resource record. + +type: keyword + +example: CNAME + +-- + +*`dns.header_flags`*:: ++ +-- +Array of 2 letter DNS header flags. +Expected values are: AA, TC, RD, RA, AD, CD, DO. + +type: keyword + +example: ['RD', 'RA'] + +-- + +*`dns.id`*:: ++ +-- +The DNS packet identifier assigned by the program that generated the query. The identifier is copied to the response. + +type: keyword + +example: 62111 + +-- + +*`dns.op_code`*:: ++ +-- +The DNS operation code that specifies the kind of query in the message. This value is set by the originator of a query and copied into the response. + +type: keyword + +example: QUERY + +-- + +*`dns.question.class`*:: ++ +-- +The class of records being queried. + +type: keyword + +example: IN + +-- + +*`dns.question.name`*:: ++ +-- +The name being queried. +If the name field contains non-printable characters (below 32 or above 126), those characters should be represented as escaped base 10 integers (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and line feeds should be converted to \t, \r, and \n respectively. + +type: keyword + +example: www.google.com + +-- + +*`dns.question.registered_domain`*:: ++ +-- +The highest registered domain, stripped of the subdomain. +For example, the registered domain for "foo.google.com" is "google.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". + +type: keyword + +example: google.com + +-- + +*`dns.question.subdomain`*:: ++ +-- +The subdomain is all of the labels under the registered_domain. +If the domain has multiple levels of subdomain, such as "sub2.sub1.example.com", the subdomain field should contain "sub2.sub1", with no trailing period. + +type: keyword + +example: www + +-- + +*`dns.question.top_level_domain`*:: ++ +-- +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". + +type: keyword + +example: co.uk + +-- + +*`dns.question.type`*:: ++ +-- +The type of record being queried. + +type: keyword + +example: AAAA + +-- + +*`dns.resolved_ip`*:: ++ +-- +Array containing all IPs seen in `answers.data`. +The `answers` array can be difficult to use, because of the variety of data formats it can contain. Extracting all IP addresses seen in there to `dns.resolved_ip` makes it possible to index them as IP addresses, and makes them easier to visualize and query for. + +type: ip + +example: ['10.10.10.10', '10.10.10.11'] + +-- + +*`dns.response_code`*:: ++ +-- +The DNS response code. + +type: keyword + +example: NOERROR + +-- + +*`dns.type`*:: ++ +-- +The type of DNS event captured, query or answer. +If your source of DNS events only gives you DNS queries, you should only create dns events of type `dns.type:query`. +If your source of DNS events gives you answers as well, you should create one event per query (optionally as soon as the query is seen). And a second event containing all query details as well as an array of answers. + +type: keyword + +example: answer + +-- + +[float] +=== ecs + +Meta-information specific to ECS. + + +*`ecs.version`*:: ++ +-- +ECS version this event conforms to. `ecs.version` is a required field and must exist in all events. +When querying across multiple indices -- which may conform to slightly different ECS versions -- this field lets integrations adjust to the schema version of the events. + +type: keyword + +example: 1.0.0 + +required: True + +-- + +[float] +=== error + +These fields can represent errors of any kind. +Use them for errors that happen while fetching events or in cases where the event itself contains an error. + + +*`error.code`*:: ++ +-- +Error code describing the error. + +type: keyword + +-- + +*`error.id`*:: ++ +-- +Unique identifier for the error. + +type: keyword + +-- + +*`error.message`*:: ++ +-- +Error message. + +type: text + +-- + +*`error.stack_trace`*:: ++ +-- +The stack trace of this error in plain text. + +type: keyword + +-- + +*`error.stack_trace.text`*:: ++ +-- +type: text + +-- + +*`error.type`*:: ++ +-- +The type of the error, for example the class name of the exception. + +type: keyword + +example: java.lang.NullPointerException + +-- + +[float] +=== event + +The event fields are used for context information about the log or metric event itself. +A log is defined as an event containing details of something that happened. Log events must include the time at which the thing happened. Examples of log events include a process starting on a host, a network packet being sent from a source to a destination, or a network connection between a client and a server being initiated or closed. A metric is defined as an event containing one or more numerical measurements and the time at which the measurement was taken. Examples of metric events include memory pressure measured on a host and device temperature. See the `event.kind` definition in this section for additional details about metric and state events. + + +*`event.action`*:: ++ +-- +The action captured by the event. +This describes the information in the event. It is more specific than `event.category`. Examples are `group-add`, `process-started`, `file-created`. The value is normally defined by the implementer. + +type: keyword + +example: user-password-change + +-- + +*`event.category`*:: ++ +-- +This is one of four ECS Categorization Fields, and indicates the second level in the ECS category hierarchy. +`event.category` represents the "big buckets" of ECS categories. For example, filtering on `event.category:process` yields all events relating to process activity. This field is closely related to `event.type`, which is used as a subcategory. +This field is an array. This will allow proper categorization of some events that fall in multiple categories. + +type: keyword + +example: authentication + +-- + +*`event.code`*:: ++ +-- +Identification code for this event, if one exists. +Some event sources use event codes to identify messages unambiguously, regardless of message language or wording adjustments over time. An example of this is the Windows Event ID. + +type: keyword + +example: 4648 + +-- + +*`event.created`*:: ++ +-- +event.created contains the date/time when the event was first read by an agent, or by your pipeline. +This field is distinct from @timestamp in that @timestamp typically contain the time extracted from the original event. +In most situations, these two timestamps will be slightly different. The difference can be used to calculate the delay between your source generating an event, and the time when your agent first processed it. This can be used to monitor your agent's or pipeline's ability to keep up with your event source. +In case the two timestamps are identical, @timestamp should be used. + +type: date + +example: 2016-05-23T08:05:34.857Z + +-- + +*`event.dataset`*:: ++ +-- +Name of the dataset. +If an event source publishes more than one type of log or events (e.g. access log, error log), the dataset is used to specify which one the event comes from. +It's recommended but not required to start the dataset name with the module name, followed by a dot, then the dataset name. + +type: keyword + +example: apache.access + +-- + +*`event.duration`*:: ++ +-- +Duration of the event in nanoseconds. +If event.start and event.end are known this value should be the difference between the end and start time. + +type: long + +format: duration + +-- + +*`event.end`*:: ++ +-- +event.end contains the date when the event ended or when the activity was last observed. + +type: date + +-- + +*`event.hash`*:: ++ +-- +Hash (perhaps logstash fingerprint) of raw field to be able to demonstrate log integrity. + +type: keyword + +example: 123456789012345678901234567890ABCD + +-- + +*`event.id`*:: ++ +-- +Unique ID to describe the event. + +type: keyword + +example: 8a4f500d + +-- + +*`event.ingested`*:: ++ +-- +Timestamp when an event arrived in the central data store. +This is different from `@timestamp`, which is when the event originally occurred. It's also different from `event.created`, which is meant to capture the first time an agent saw the event. +In normal conditions, assuming no tampering, the timestamps should chronologically look like this: `@timestamp` < `event.created` < `event.ingested`. + +type: date + +example: 2016-05-23T08:05:35.101Z + +-- + +*`event.kind`*:: ++ +-- +This is one of four ECS Categorization Fields, and indicates the highest level in the ECS category hierarchy. +`event.kind` gives high-level information about what type of information the event contains, without being specific to the contents of the event. For example, values of this field distinguish alert events from metric events. +The value of this field can be used to inform how these kinds of events should be handled. They may warrant different retention, different access control, it may also help understand whether the data coming in at a regular interval or not. + +type: keyword + +example: alert + +-- + +*`event.module`*:: ++ +-- +Name of the module this data is coming from. +If your monitoring agent supports the concept of modules or plugins to process events of a given source (e.g. Apache logs), `event.module` should contain the name of this module. + +type: keyword + +example: apache + +-- + +*`event.original`*:: ++ +-- +Raw text message of entire event. Used to demonstrate log integrity. +This field is not indexed and doc_values are disabled. It cannot be searched, but it can be retrieved from `_source`. + +type: keyword + +example: Sep 19 08:26:10 host CEF:0|Security| threatmanager|1.0|100| worm successfully stopped|10|src=10.0.0.1 dst=2.1.2.2spt=1232 + +-- + +*`event.outcome`*:: ++ +-- +This is one of four ECS Categorization Fields, and indicates the lowest level in the ECS category hierarchy. +`event.outcome` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event. +Note that when a single transaction is described in multiple events, each event may populate different values of `event.outcome`, according to their perspective. +Also note that in the case of a compound event (a single event that contains multiple logical events), this field should be populated with the value that best captures the overall success or failure from the perspective of the event producer. +Further note that not all events will have an associated outcome. For example, this field is generally not populated for metric events, events with `event.type:info`, or any events for which an outcome does not make logical sense. + +type: keyword + +example: success + +-- + +*`event.provider`*:: ++ +-- +Source of the event. +Event transports such as Syslog or the Windows Event Log typically mention the source of an event. It can be the name of the software that generated the event (e.g. Sysmon, httpd), or of a subsystem of the operating system (kernel, Microsoft-Windows-Security-Auditing). + +type: keyword + +example: kernel + +-- + +*`event.reference`*:: ++ +-- +Reference URL linking to additional information about this event. +This URL links to a static definition of the this event. Alert events, indicated by `event.kind:alert`, are a common use case for this field. + +type: keyword + +example: https://system.vendor.com/event/#0001234 + +-- + +*`event.risk_score`*:: ++ +-- +Risk score or priority of the event (e.g. security solutions). Use your system's original value here. + +type: float + +-- + +*`event.risk_score_norm`*:: ++ +-- +Normalized risk score or priority of the event, on a scale of 0 to 100. +This is mainly useful if you use more than one system that assigns risk scores, and you want to see a normalized value across all systems. + +type: float + +-- + +*`event.sequence`*:: ++ +-- +Sequence number of the event. +The sequence number is a value published by some event sources, to make the exact ordering of events unambiguous, regardless of the timestamp precision. + +type: long + +format: string + +-- + +*`event.severity`*:: ++ +-- +The numeric severity of the event according to your event source. +What the different severity values mean can be different between sources and use cases. It's up to the implementer to make sure severities are consistent across events from the same source. +The Syslog severity belongs in `log.syslog.severity.code`. `event.severity` is meant to represent the severity according to the event source (e.g. firewall, IDS). If the event source does not publish its own severity, you may optionally copy the `log.syslog.severity.code` to `event.severity`. + +type: long + +example: 7 + +format: string + +-- + +*`event.start`*:: ++ +-- +event.start contains the date when the event started or when the activity was first observed. + +type: date + +-- + +*`event.timezone`*:: ++ +-- +This field should be populated when the event's timestamp does not include timezone information already (e.g. default Syslog timestamps). It's optional otherwise. +Acceptable timezone formats are: a canonical ID (e.g. "Europe/Amsterdam"), abbreviated (e.g. "EST") or an HH:mm differential (e.g. "-05:00"). + +type: keyword + +-- + +*`event.type`*:: ++ +-- +This is one of four ECS Categorization Fields, and indicates the third level in the ECS category hierarchy. +`event.type` represents a categorization "sub-bucket" that, when used along with the `event.category` field values, enables filtering events down to a level appropriate for single visualization. +This field is an array. This will allow proper categorization of some events that fall in multiple event types. + +type: keyword + +-- + +*`event.url`*:: ++ +-- +URL linking to an external system to continue investigation of this event. +This URL links to another system where in-depth investigation of the specific occurence of this event can take place. Alert events, indicated by `event.kind:alert`, are a common use case for this field. + +type: keyword + +example: https://mysystem.mydomain.com/alert/5271dedb-f5b0-4218-87f0-4ac4870a38fe + +-- + +[float] +=== file + +A file is defined as a set of information that has been created on, or has existed on a filesystem. +File objects can be associated with host events, network events, and/or file events (e.g., those produced by File Integrity Monitoring [FIM] products or services). File fields provide details about the affected file associated with the event or metric. + + +*`file.accessed`*:: ++ +-- +Last time the file was accessed. +Note that not all filesystems keep track of access time. + +type: date + +-- + +*`file.attributes`*:: ++ +-- +Array of file attributes. +Attributes names will vary by platform. Here's a non-exhaustive list of values that are expected in this field: archive, compressed, directory, encrypted, execute, hidden, read, readonly, system, write. + +type: keyword + +example: ["readonly", "system"] + +-- + +*`file.code_signature.exists`*:: ++ +-- +Boolean to capture if a signature is present. + +type: boolean + +example: true + +-- + +*`file.code_signature.status`*:: ++ +-- +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. type: keyword @@ -8347,7 +10676,7 @@ example: ERROR_UNTRUSTED_ROOT -- -*`code_signature.subject_name`*:: +*`file.code_signature.subject_name`*:: + -- Subject name of the code signer @@ -8358,7 +10687,7 @@ example: Microsoft Corporation -- -*`code_signature.trusted`*:: +*`file.code_signature.trusted`*:: + -- Stores the trust status of the certificate chain. @@ -8370,7 +10699,7 @@ example: true -- -*`code_signature.valid`*:: +*`file.code_signature.valid`*:: + -- Boolean to capture if the digital signature is verified against the binary content. @@ -8382,138 +10711,322 @@ example: true -- -[float] -=== container +*`file.created`*:: ++ +-- +File creation time. +Note that not all filesystems store the creation time. -Container fields are used for meta information about the specific container that is the source of information. -These fields help correlate data based containers from any runtime. +type: date +-- -*`container.id`*:: +*`file.ctime`*:: + -- -Unique container id. +Last time the file attributes or metadata changed. +Note that changes to the file content will update `mtime`. This implies `ctime` will be adjusted at the same time, since `mtime` is an attribute of the file. + +type: date + +-- + +*`file.device`*:: ++ +-- +Device that is the source of the file. type: keyword +example: sda + -- -*`container.image.name`*:: +*`file.directory`*:: + -- -Name of the image the container was built on. +Directory where the file is located. It should include the drive letter, when appropriate. type: keyword +example: /home/alice + -- -*`container.image.tag`*:: +*`file.drive_letter`*:: + -- -Container image tags. +Drive letter where the file is located. This field is only relevant on Windows. +The value should be uppercase, and not include the colon. type: keyword +example: C + -- -*`container.labels`*:: +*`file.extension`*:: + -- -Image labels. +File extension. -type: object +type: keyword + +example: png -- -*`container.name`*:: +*`file.gid`*:: + -- -Container name. +Primary group ID (GID) of the file. type: keyword +example: 1001 + -- -*`container.runtime`*:: +*`file.group`*:: + -- -Runtime managing this container. +Primary group name of the file. type: keyword -example: docker +example: alice -- -[float] -=== destination +*`file.hash.md5`*:: ++ +-- +MD5 hash. -Destination fields describe details about the destination of a packet/event. -Destination fields are usually populated in conjunction with source fields. +type: keyword +-- -*`destination.address`*:: +*`file.hash.sha1`*:: + -- -Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. -Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. +SHA1 hash. type: keyword -- -*`destination.as.number`*:: +*`file.hash.sha256`*:: + -- -Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. +SHA256 hash. -type: long +type: keyword -example: 15169 +-- + +*`file.hash.sha512`*:: ++ +-- +SHA512 hash. + +type: keyword -- -*`destination.as.organization.name`*:: +*`file.inode`*:: + -- -Organization name. +Inode representing the file in the filesystem. type: keyword -example: Google LLC +example: 256383 -- -*`destination.as.organization.name.text`*:: +*`file.mime_type`*:: ++ +-- +MIME type should identify the format of the file or stream of bytes using https://www.iana.org/assignments/media-types/media-types.xhtml[IANA official types], where possible. When more than one type is applicable, the most specific type should be used. + +type: keyword + +-- + +*`file.mode`*:: ++ +-- +Mode of the file in octal representation. + +type: keyword + +example: 0640 + +-- + +*`file.mtime`*:: ++ +-- +Last time the file content was modified. + +type: date + +-- + +*`file.name`*:: ++ +-- +Name of the file including the extension, without the directory. + +type: keyword + +example: example.png + +-- + +*`file.owner`*:: ++ +-- +File owner's username. + +type: keyword + +example: alice + +-- + +*`file.path`*:: ++ +-- +Full path to the file, including the file name. It should include the drive letter, when appropriate. + +type: keyword + +example: /home/alice/example.png + +-- + +*`file.path.text`*:: + -- type: text -- -*`destination.bytes`*:: +*`file.pe.company`*:: + -- -Bytes sent from the destination to the source. +Internal company name of the file, provided at compile-time. + +type: keyword + +example: Microsoft Corporation + +-- + +*`file.pe.description`*:: ++ +-- +Internal description of the file, provided at compile-time. + +type: keyword + +example: Paint + +-- + +*`file.pe.file_version`*:: ++ +-- +Internal version of the file, provided at compile-time. + +type: keyword + +example: 6.3.9600.17415 + +-- + +*`file.pe.original_file_name`*:: ++ +-- +Internal name of the file, provided at compile-time. + +type: keyword + +example: MSPAINT.EXE + +-- + +*`file.pe.product`*:: ++ +-- +Internal product name of the file, provided at compile-time. + +type: keyword + +example: Microsoft® Windows® Operating System + +-- + +*`file.size`*:: ++ +-- +File size in bytes. +Only relevant when `file.type` is "file". type: long -example: 184 +example: 16384 -format: bytes +-- +*`file.target_path`*:: ++ -- +Target path for symlinks. -*`destination.domain`*:: +type: keyword + +-- + +*`file.target_path.text`*:: + -- -Destination domain. +type: text + +-- + +*`file.type`*:: ++ +-- +File type (file, dir, or symlink). type: keyword +example: file + -- -*`destination.geo.city_name`*:: +*`file.uid`*:: ++ +-- +The user ID (UID) or security identifier (SID) of the file owner. + +type: keyword + +example: 1001 + +-- + +[float] +=== geo + +Geo fields can carry data about a specific location related to an event. +This geolocation information can be derived from techniques such as Geo IP, or be user-supplied. + + +*`geo.city_name`*:: + -- City name. @@ -8524,7 +11037,7 @@ example: Montreal -- -*`destination.geo.continent_name`*:: +*`geo.continent_name`*:: + -- Name of the continent. @@ -8535,7 +11048,7 @@ example: North America -- -*`destination.geo.country_iso_code`*:: +*`geo.country_iso_code`*:: + -- Country ISO code. @@ -8546,7 +11059,7 @@ example: CA -- -*`destination.geo.country_name`*:: +*`geo.country_name`*:: + -- Country name. @@ -8557,7 +11070,7 @@ example: Canada -- -*`destination.geo.location`*:: +*`geo.location`*:: + -- Longitude and latitude. @@ -8568,7 +11081,7 @@ example: { "lon": -73.614830, "lat": 45.505918 } -- -*`destination.geo.name`*:: +*`geo.name`*:: + -- User-defined description of a location, at the level of granularity they care about. @@ -8581,7 +11094,7 @@ example: boston-dc -- -*`destination.geo.region_iso_code`*:: +*`geo.region_iso_code`*:: + -- Region ISO code. @@ -8592,7 +11105,7 @@ example: CA-QC -- -*`destination.geo.region_name`*:: +*`geo.region_name`*:: + -- Region name. @@ -8603,6519 +11116,6432 @@ example: Quebec -- -*`destination.ip`*:: +[float] +=== group + +The group fields are meant to represent groups that are relevant to the event. + + +*`group.domain`*:: + -- -IP address of the destination. -Can be one or multiple IPv4 or IPv6 addresses. +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. -type: ip +type: keyword -- -*`destination.mac`*:: +*`group.id`*:: + -- -MAC address of the destination. +Unique identifier for the group on the system/platform. type: keyword -- -*`destination.nat.ip`*:: +*`group.name`*:: + -- -Translated ip of destination based NAT sessions (e.g. internet to private DMZ) -Typically used with load balancers, firewalls, or routers. +Name of the group. -type: ip +type: keyword -- -*`destination.nat.port`*:: +[float] +=== hash + +The hash fields represent different hash algorithms and their values. +Field names for common hashes (e.g. MD5, SHA1) are predefined. Add fields for other hashes by lowercasing the hash algorithm name and using underscore separators as appropriate (snake case, e.g. sha3_512). + + +*`hash.md5`*:: + -- -Port the source session is translated to by NAT Device. -Typically used with load balancers, firewalls, or routers. +MD5 hash. -type: long +type: keyword -format: string +-- +*`hash.sha1`*:: ++ -- +SHA1 hash. -*`destination.packets`*:: +type: keyword + +-- + +*`hash.sha256`*:: + -- -Packets sent from the destination to the source. +SHA256 hash. -type: long +type: keyword -example: 12 +-- +*`hash.sha512`*:: ++ -- +SHA512 hash. -*`destination.port`*:: +type: keyword + +-- + +[float] +=== host + +A host is defined as a general computing instance. +ECS host.* fields should be populated with details about the host on which the event happened, or from which the measurement was taken. Host types include hardware, virtual machines, Docker containers, and Kubernetes nodes. + + +*`host.architecture`*:: + -- -Port of the destination. +Operating system architecture. + +type: keyword + +example: x86_64 + +-- + +*`host.domain`*:: ++ +-- +Name of the domain of which the host is a member. +For example, on Windows this could be the host's Active Directory domain or NetBIOS domain name. For Linux this could be the domain of the host's LDAP provider. + +type: keyword + +example: CONTOSO + +-- + +*`host.geo.city_name`*:: ++ +-- +City name. + +type: keyword + +example: Montreal + +-- + +*`host.geo.continent_name`*:: ++ +-- +Name of the continent. + +type: keyword + +example: North America + +-- + +*`host.geo.country_iso_code`*:: ++ +-- +Country ISO code. + +type: keyword + +example: CA + +-- + +*`host.geo.country_name`*:: ++ +-- +Country name. + +type: keyword + +example: Canada + +-- + +*`host.geo.location`*:: ++ +-- +Longitude and latitude. + +type: geo_point + +example: { "lon": -73.614830, "lat": 45.505918 } + +-- + +*`host.geo.name`*:: ++ +-- +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. -type: long +type: keyword -format: string +example: boston-dc -- -*`destination.registered_domain`*:: +*`host.geo.region_iso_code`*:: + -- -The highest registered destination domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". +Region ISO code. type: keyword -example: google.com +example: CA-QC -- -*`destination.top_level_domain`*:: +*`host.geo.region_name`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". +Region name. type: keyword -example: co.uk +example: Quebec -- -*`destination.user.domain`*:: +*`host.hostname`*:: + -- -Name of the directory the user is a member of. -For example, an LDAP or Active Directory domain name. +Hostname of the host. +It normally contains what the `hostname` command returns on the host machine. type: keyword -- -*`destination.user.email`*:: +*`host.id`*:: + -- -User email address. +Unique host id. +As hostname is not always unique, use values that are meaningful in your environment. +Example: The current usage of `beat.name`. type: keyword -- -*`destination.user.full_name`*:: +*`host.ip`*:: + -- -User's full name, if available. - -type: keyword +Host ip addresses. -example: Albert Einstein +type: ip -- -*`destination.user.full_name.text`*:: +*`host.mac`*:: + -- -type: text +Host mac addresses. + +type: keyword -- -*`destination.user.group.domain`*:: +*`host.name`*:: + -- -Name of the directory the group is a member of. -For example, an LDAP or Active Directory domain name. +Name of the host. +It can contain what `hostname` returns on Unix systems, the fully qualified domain name, or a name specified by the user. The sender decides which value to use. type: keyword -- -*`destination.user.group.id`*:: +*`host.os.family`*:: + -- -Unique identifier for the group on the system/platform. +OS family (such as redhat, debian, freebsd, windows). type: keyword +example: debian + -- -*`destination.user.group.name`*:: +*`host.os.full`*:: + -- -Name of the group. +Operating system name, including the version or code name. type: keyword +example: Mac OS Mojave + -- -*`destination.user.hash`*:: +*`host.os.full.text`*:: + -- -Unique user hash to correlate information for a user in anonymized form. -Useful if `user.id` or `user.name` contain confidential information and cannot be used. - -type: keyword +type: text -- -*`destination.user.id`*:: +*`host.os.kernel`*:: + -- -Unique identifiers of the user. +Operating system kernel version as a raw string. type: keyword +example: 4.4.0-112-generic + -- -*`destination.user.name`*:: +*`host.os.name`*:: + -- -Short name or login of the user. +Operating system name, without the version. type: keyword -example: albert +example: Mac OS X -- -*`destination.user.name.text`*:: +*`host.os.name.text`*:: + -- type: text -- -[float] -=== dll - -These fields contain information about code libraries dynamically loaded into processes. - -Many operating systems refer to "shared code libraries" with different names, but this field set refers to all of the following: -* Dynamic-link library (`.dll`) commonly used on Windows -* Shared Object (`.so`) commonly used on Unix-like operating systems -* Dynamic library (`.dylib`) commonly used on macOS - - -*`dll.code_signature.exists`*:: +*`host.os.platform`*:: + -- -Boolean to capture if a signature is present. +Operating system platform (such centos, ubuntu, windows). -type: boolean +type: keyword -example: true +example: darwin -- -*`dll.code_signature.status`*:: +*`host.os.version`*:: + -- -Additional information about the certificate status. -This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. +Operating system version as a raw string. type: keyword -example: ERROR_UNTRUSTED_ROOT +example: 10.14.1 -- -*`dll.code_signature.subject_name`*:: +*`host.type`*:: + -- -Subject name of the code signer +Type of host. +For Cloud providers this can be the machine type like `t2.medium`. If vm, this could be the container, for example, or other information meaningful in your environment. type: keyword -example: Microsoft Corporation - -- -*`dll.code_signature.trusted`*:: +*`host.uptime`*:: + -- -Stores the trust status of the certificate chain. -Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. +Seconds the host has been up. -type: boolean +type: long -example: true +example: 1325 -- -*`dll.code_signature.valid`*:: +*`host.user.domain`*:: + -- -Boolean to capture if the digital signature is verified against the binary content. -Leave unpopulated if a certificate was unchecked. - -type: boolean +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. -example: true +type: keyword -- -*`dll.hash.md5`*:: +*`host.user.email`*:: + -- -MD5 hash. +User email address. type: keyword -- -*`dll.hash.sha1`*:: +*`host.user.full_name`*:: + -- -SHA1 hash. +User's full name, if available. type: keyword +example: Albert Einstein + -- -*`dll.hash.sha256`*:: +*`host.user.full_name.text`*:: + -- -SHA256 hash. - -type: keyword +type: text -- -*`dll.hash.sha512`*:: +*`host.user.group.domain`*:: + -- -SHA512 hash. +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. type: keyword -- -*`dll.name`*:: +*`host.user.group.id`*:: + -- -Name of the library. -This generally maps to the name of the file on disk. +Unique identifier for the group on the system/platform. type: keyword -example: kernel32.dll - -- -*`dll.path`*:: +*`host.user.group.name`*:: + -- -Full file path of the library. +Name of the group. type: keyword -example: C:\Windows\System32\kernel32.dll - -- -*`dll.pe.company`*:: +*`host.user.hash`*:: + -- -Internal company name of the file, provided at compile-time. +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. type: keyword -example: Microsoft Corporation - -- -*`dll.pe.description`*:: +*`host.user.id`*:: + -- -Internal description of the file, provided at compile-time. +Unique identifiers of the user. type: keyword -example: Paint - -- -*`dll.pe.file_version`*:: +*`host.user.name`*:: + -- -Internal version of the file, provided at compile-time. +Short name or login of the user. type: keyword -example: 6.3.9600.17415 +example: albert -- -*`dll.pe.original_file_name`*:: +*`host.user.name.text`*:: + -- -Internal name of the file, provided at compile-time. +type: text -type: keyword +-- -example: MSPAINT.EXE +[float] +=== http --- +Fields related to HTTP activity. Use the `url` field set to store the url of the request. -*`dll.pe.product`*:: + +*`http.request.body.bytes`*:: + -- -Internal product name of the file, provided at compile-time. +Size in bytes of the request body. -type: keyword +type: long -example: Microsoft® Windows® Operating System +example: 887 + +format: bytes -- -[float] -=== dns +*`http.request.body.content`*:: ++ +-- +The full HTTP request body. -Fields describing DNS queries and answers. -DNS events should either represent a single DNS query prior to getting answers (`dns.type:query`) or they should represent a full exchange and contain the query details as well as all of the answers that were provided for this query (`dns.type:answer`). +type: keyword +example: Hello world -*`dns.answers`*:: -+ -- -An array containing an object for each answer section returned by the server. -The main keys that should be present in these objects are defined by ECS. Records that have more information may contain more keys than what ECS defines. -Not all DNS data sources give all details about DNS answers. At minimum, answer objects must contain the `data` key. If more information is available, map as much of it to ECS as possible, and add any additional fields to the answer objects as custom fields. -type: object +*`http.request.body.content.text`*:: ++ +-- +type: text -- -*`dns.answers.class`*:: +*`http.request.bytes`*:: + -- -The class of DNS data contained in this resource record. +Total size in bytes of the request (body and headers). -type: keyword +type: long -example: IN +example: 1437 + +format: bytes -- -*`dns.answers.data`*:: +*`http.request.method`*:: + -- -The data describing the resource. -The meaning of this data depends on the type and class of the resource record. +HTTP request method. +The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". type: keyword -example: 10.10.10.10 +example: get, post, put -- -*`dns.answers.name`*:: +*`http.request.referrer`*:: + -- -The domain name to which this resource record pertains. -If a chain of CNAME is being resolved, each answer's `name` should be the one that corresponds with the answer's `data`. It should not simply be the original `question.name` repeated. +Referrer for this HTTP request. type: keyword -example: www.google.com +example: https://blog.example.com/ -- -*`dns.answers.ttl`*:: +*`http.response.body.bytes`*:: + -- -The time interval in seconds that this resource record may be cached before it should be discarded. Zero values mean that the data should not be cached. +Size in bytes of the response body. type: long -example: 180 +example: 887 + +format: bytes -- -*`dns.answers.type`*:: +*`http.response.body.content`*:: + -- -The type of data contained in this resource record. +The full HTTP response body. type: keyword -example: CNAME +example: Hello world -- -*`dns.header_flags`*:: +*`http.response.body.content.text`*:: + -- -Array of 2 letter DNS header flags. -Expected values are: AA, TC, RD, RA, AD, CD, DO. - -type: keyword - -example: ['RD', 'RA'] +type: text -- -*`dns.id`*:: +*`http.response.bytes`*:: + -- -The DNS packet identifier assigned by the program that generated the query. The identifier is copied to the response. +Total size in bytes of the response (body and headers). -type: keyword +type: long -example: 62111 +example: 1437 + +format: bytes -- -*`dns.op_code`*:: +*`http.response.status_code`*:: + -- -The DNS operation code that specifies the kind of query in the message. This value is set by the originator of a query and copied into the response. +HTTP response status code. -type: keyword +type: long -example: QUERY +example: 404 + +format: string -- -*`dns.question.class`*:: +*`http.version`*:: + -- -The class of records being queried. +HTTP version. type: keyword -example: IN +example: 1.1 -- -*`dns.question.name`*:: +[float] +=== interface + +The interface fields are used to record ingress and egress interface information when reported by an observer (e.g. firewall, router, load balancer) in the context of the observer handling a network connection. In the case of a single observer interface (e.g. network sensor on a span port) only the observer.ingress information should be populated. + + +*`interface.alias`*:: + -- -The name being queried. -If the name field contains non-printable characters (below 32 or above 126), those characters should be represented as escaped base 10 integers (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and line feeds should be converted to \t, \r, and \n respectively. +Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. type: keyword -example: www.google.com +example: outside -- -*`dns.question.registered_domain`*:: +*`interface.id`*:: + -- -The highest registered domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". +Interface ID as reported by an observer (typically SNMP interface ID). type: keyword -example: google.com +example: 10 -- -*`dns.question.subdomain`*:: +*`interface.name`*:: + -- -The subdomain is all of the labels under the registered_domain. -If the domain has multiple levels of subdomain, such as "sub2.sub1.example.com", the subdomain field should contain "sub2.sub1", with no trailing period. +Interface name as reported by the system. type: keyword -example: www +example: eth0 -- -*`dns.question.top_level_domain`*:: +[float] +=== log + +Details about the event's logging mechanism or logging transport. +The log.* fields are typically populated with details about the logging mechanism used to create and/or transport the event. For example, syslog details belong under `log.syslog.*`. +The details specific to your event source are typically not logged under `log.*`, but rather in `event.*` or in other ECS fields. + + +*`log.level`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". +Original log level of the log event. +If the source of the event provides a log level or textual severity, this is the one that goes in `log.level`. If your source doesn't specify one, you may put your event transport's severity here (e.g. Syslog severity). +Some examples are `warn`, `err`, `i`, `informational`. type: keyword -example: co.uk +example: error -- -*`dns.question.type`*:: +*`log.logger`*:: + -- -The type of record being queried. +The name of the logger inside an application. This is usually the name of the class which initialized the logger, or can be a custom name. type: keyword -example: AAAA +example: org.elasticsearch.bootstrap.Bootstrap -- -*`dns.resolved_ip`*:: +*`log.origin.file.line`*:: + -- -Array containing all IPs seen in `answers.data`. -The `answers` array can be difficult to use, because of the variety of data formats it can contain. Extracting all IP addresses seen in there to `dns.resolved_ip` makes it possible to index them as IP addresses, and makes them easier to visualize and query for. +The line number of the file containing the source code which originated the log event. -type: ip +type: integer -example: ['10.10.10.10', '10.10.10.11'] +example: 42 -- -*`dns.response_code`*:: +*`log.origin.file.name`*:: + -- -The DNS response code. +The name of the file containing the source code which originated the log event. Note that this is not the name of the log file. type: keyword -example: NOERROR +example: Bootstrap.java -- -*`dns.type`*:: +*`log.origin.function`*:: + -- -The type of DNS event captured, query or answer. -If your source of DNS events only gives you DNS queries, you should only create dns events of type `dns.type:query`. -If your source of DNS events gives you answers as well, you should create one event per query (optionally as soon as the query is seen). And a second event containing all query details as well as an array of answers. +The name of the function or method which originated the log event. type: keyword -example: answer +example: init -- -[float] -=== ecs - -Meta-information specific to ECS. - - -*`ecs.version`*:: +*`log.original`*:: + -- -ECS version this event conforms to. `ecs.version` is a required field and must exist in all events. -When querying across multiple indices -- which may conform to slightly different ECS versions -- this field lets integrations adjust to the schema version of the events. +This is the original log message and contains the full log message before splitting it up in multiple parts. +In contrast to the `message` field which can contain an extracted part of the log message, this field contains the original, full log message. It can have already some modifications applied like encoding or new lines removed to clean up the log message. +This field is not indexed and doc_values are disabled so it can't be queried but the value can be retrieved from `_source`. type: keyword -example: 1.0.0 - -required: True +example: Sep 19 08:26:10 localhost My log -- -[float] -=== error - -These fields can represent errors of any kind. -Use them for errors that happen while fetching events or in cases where the event itself contains an error. - - -*`error.code`*:: +*`log.syslog`*:: + -- -Error code describing the error. +The Syslog metadata of the event, if the event was transmitted via Syslog. Please see RFCs 5424 or 3164. -type: keyword +type: object -- -*`error.id`*:: +*`log.syslog.facility.code`*:: + -- -Unique identifier for the error. +The Syslog numeric facility of the log event, if available. +According to RFCs 5424 and 3164, this value should be an integer between 0 and 23. -type: keyword +type: long + +example: 23 + +format: string -- -*`error.message`*:: +*`log.syslog.facility.name`*:: + -- -Error message. +The Syslog text-based facility of the log event, if available. -type: text +type: keyword + +example: local7 -- -*`error.stack_trace`*:: +*`log.syslog.priority`*:: + -- -The stack trace of this error in plain text. +Syslog numeric priority of the event, if available. +According to RFCs 5424 and 3164, the priority is 8 * facility + severity. This number is therefore expected to contain a value between 0 and 191. -type: keyword +type: long + +example: 135 + +format: string -- -*`error.stack_trace.text`*:: +*`log.syslog.severity.code`*:: + -- -type: text +The Syslog numeric severity of the log event, if available. +If the event source publishing via Syslog provides a different numeric severity value (e.g. firewall, IDS), your source's numeric severity should go to `event.severity`. If the event source does not specify a distinct severity, you can optionally copy the Syslog severity to `event.severity`. + +type: long + +example: 3 -- -*`error.type`*:: +*`log.syslog.severity.name`*:: + -- -The type of the error, for example the class name of the exception. +The Syslog numeric severity of the log event, if available. +If the event source publishing via Syslog provides a different severity value (e.g. firewall, IDS), your source's text severity should go to `log.level`. If the event source does not specify a distinct severity, you can optionally copy the Syslog severity to `log.level`. type: keyword -example: java.lang.NullPointerException +example: Error -- [float] -=== event +=== network -The event fields are used for context information about the log or metric event itself. -A log is defined as an event containing details of something that happened. Log events must include the time at which the thing happened. Examples of log events include a process starting on a host, a network packet being sent from a source to a destination, or a network connection between a client and a server being initiated or closed. A metric is defined as an event containing one or more numerical measurements and the time at which the measurement was taken. Examples of metric events include memory pressure measured on a host and device temperature. See the `event.kind` definition in this section for additional details about metric and state events. +The network is defined as the communication path over which a host or network event happens. +The network.* fields should be populated with details about the network activity associated with an event. -*`event.action`*:: +*`network.application`*:: + -- -The action captured by the event. -This describes the information in the event. It is more specific than `event.category`. Examples are `group-add`, `process-started`, `file-created`. The value is normally defined by the implementer. +A name given to an application level protocol. This can be arbitrarily assigned for things like microservices, but also apply to things like skype, icq, facebook, twitter. This would be used in situations where the vendor or service can be decoded such as from the source/dest IP owners, ports, or wire format. +The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". type: keyword -example: user-password-change +example: aim -- -*`event.category`*:: +*`network.bytes`*:: + -- -This is one of four ECS Categorization Fields, and indicates the second level in the ECS category hierarchy. -`event.category` represents the "big buckets" of ECS categories. For example, filtering on `event.category:process` yields all events relating to process activity. This field is closely related to `event.type`, which is used as a subcategory. -This field is an array. This will allow proper categorization of some events that fall in multiple categories. +Total bytes transferred in both directions. +If `source.bytes` and `destination.bytes` are known, `network.bytes` is their sum. -type: keyword +type: long -example: authentication +example: 368 + +format: bytes -- -*`event.code`*:: +*`network.community_id`*:: + -- -Identification code for this event, if one exists. -Some event sources use event codes to identify messages unambiguously, regardless of message language or wording adjustments over time. An example of this is the Windows Event ID. +A hash of source and destination IPs and ports, as well as the protocol used in a communication. This is a tool-agnostic standard to identify flows. +Learn more at https://github.com/corelight/community-id-spec. type: keyword -example: 4648 +example: 1:hO+sN4H+MG5MY/8hIrXPqc4ZQz0= -- -*`event.created`*:: +*`network.direction`*:: + -- -event.created contains the date/time when the event was first read by an agent, or by your pipeline. -This field is distinct from @timestamp in that @timestamp typically contain the time extracted from the original event. -In most situations, these two timestamps will be slightly different. The difference can be used to calculate the delay between your source generating an event, and the time when your agent first processed it. This can be used to monitor your agent's or pipeline's ability to keep up with your event source. -In case the two timestamps are identical, @timestamp should be used. +Direction of the network traffic. +Recommended values are: + * inbound + * outbound + * internal + * external + * unknown -type: date +When mapping events from a host-based monitoring context, populate this field from the host's point of view. +When mapping events from a network or perimeter-based monitoring context, populate this field from the point of view of your network perimeter. -example: 2016-05-23T08:05:34.857Z +type: keyword + +example: inbound -- -*`event.dataset`*:: +*`network.forwarded_ip`*:: + -- -Name of the dataset. -If an event source publishes more than one type of log or events (e.g. access log, error log), the dataset is used to specify which one the event comes from. -It's recommended but not required to start the dataset name with the module name, followed by a dot, then the dataset name. +Host IP address when the source IP address is the proxy. -type: keyword +type: ip -example: apache.access +example: 192.1.1.2 -- -*`event.duration`*:: +*`network.iana_number`*:: + -- -Duration of the event in nanoseconds. -If event.start and event.end are known this value should be the difference between the end and start time. +IANA Protocol Number (https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). Standardized list of protocols. This aligns well with NetFlow and sFlow related logs which use the IANA Protocol Number. -type: long +type: keyword -format: duration +example: 6 -- -*`event.end`*:: +*`network.inner`*:: + -- -event.end contains the date when the event ended or when the activity was last observed. +Network.inner fields are added in addition to network.vlan fields to describe the innermost VLAN when q-in-q VLAN tagging is present. Allowed fields include vlan.id and vlan.name. Inner vlan fields are typically used when sending traffic with multiple 802.1q encapsulations to a network sensor (e.g. Zeek, Wireshark.) -type: date +type: object -- -*`event.hash`*:: +*`network.inner.vlan.id`*:: + -- -Hash (perhaps logstash fingerprint) of raw field to be able to demonstrate log integrity. +VLAN ID as reported by the observer. type: keyword -example: 123456789012345678901234567890ABCD +example: 10 -- -*`event.id`*:: +*`network.inner.vlan.name`*:: + -- -Unique ID to describe the event. +Optional VLAN name as reported by the observer. type: keyword -example: 8a4f500d +example: outside -- -*`event.ingested`*:: +*`network.name`*:: + -- -Timestamp when an event arrived in the central data store. -This is different from `@timestamp`, which is when the event originally occurred. It's also different from `event.created`, which is meant to capture the first time an agent saw the event. -In normal conditions, assuming no tampering, the timestamps should chronologically look like this: `@timestamp` < `event.created` < `event.ingested`. +Name given by operators to sections of their network. -type: date +type: keyword -example: 2016-05-23T08:05:35.101Z +example: Guest Wifi -- -*`event.kind`*:: +*`network.packets`*:: + -- -This is one of four ECS Categorization Fields, and indicates the highest level in the ECS category hierarchy. -`event.kind` gives high-level information about what type of information the event contains, without being specific to the contents of the event. For example, values of this field distinguish alert events from metric events. -The value of this field can be used to inform how these kinds of events should be handled. They may warrant different retention, different access control, it may also help understand whether the data coming in at a regular interval or not. +Total packets transferred in both directions. +If `source.packets` and `destination.packets` are known, `network.packets` is their sum. -type: keyword +type: long -example: alert +example: 24 -- -*`event.module`*:: +*`network.protocol`*:: + -- -Name of the module this data is coming from. -If your monitoring agent supports the concept of modules or plugins to process events of a given source (e.g. Apache logs), `event.module` should contain the name of this module. +L7 Network protocol name. ex. http, lumberjack, transport protocol. +The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". type: keyword -example: apache +example: http -- -*`event.original`*:: +*`network.transport`*:: + -- -Raw text message of entire event. Used to demonstrate log integrity. -This field is not indexed and doc_values are disabled. It cannot be searched, but it can be retrieved from `_source`. +Same as network.iana_number, but instead using the Keyword name of the transport layer (udp, tcp, ipv6-icmp, etc.) +The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". type: keyword -example: Sep 19 08:26:10 host CEF:0|Security| threatmanager|1.0|100| worm successfully stopped|10|src=10.0.0.1 dst=2.1.2.2spt=1232 +example: tcp -- -*`event.outcome`*:: +*`network.type`*:: + -- -This is one of four ECS Categorization Fields, and indicates the lowest level in the ECS category hierarchy. -`event.outcome` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event. -Note that when a single transaction is described in multiple events, each event may populate different values of `event.outcome`, according to their perspective. -Also note that in the case of a compound event (a single event that contains multiple logical events), this field should be populated with the value that best captures the overall success or failure from the perspective of the event producer. -Further note that not all events will have an associated outcome. For example, this field is generally not populated for metric events, events with `event.type:info`, or any events for which an outcome does not make logical sense. +In the OSI Model this would be the Network Layer. ipv4, ipv6, ipsec, pim, etc +The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". type: keyword -example: success +example: ipv4 -- -*`event.provider`*:: +*`network.vlan.id`*:: + -- -Source of the event. -Event transports such as Syslog or the Windows Event Log typically mention the source of an event. It can be the name of the software that generated the event (e.g. Sysmon, httpd), or of a subsystem of the operating system (kernel, Microsoft-Windows-Security-Auditing). +VLAN ID as reported by the observer. type: keyword -example: kernel +example: 10 -- -*`event.reference`*:: +*`network.vlan.name`*:: + -- -Reference URL linking to additional information about this event. -This URL links to a static definition of the this event. Alert events, indicated by `event.kind:alert`, are a common use case for this field. +Optional VLAN name as reported by the observer. type: keyword -example: https://system.vendor.com/event/#0001234 +example: outside -- -*`event.risk_score`*:: -+ --- -Risk score or priority of the event (e.g. security solutions). Use your system's original value here. +[float] +=== observer -type: float +An observer is defined as a special network, security, or application device used to detect, observe, or create network, security, or application-related events and metrics. +This could be a custom hardware appliance or a server that has been configured to run special network, security, or application software. Examples include firewalls, web proxies, intrusion detection/prevention systems, network monitoring sensors, web application firewalls, data loss prevention systems, and APM servers. The observer.* fields shall be populated with details of the system, if any, that detects, observes and/or creates a network, security, or application event or metric. Message queues and ETL components used in processing events or metrics are not considered observers in ECS. --- -*`event.risk_score_norm`*:: +*`observer.egress`*:: + -- -Normalized risk score or priority of the event, on a scale of 0 to 100. -This is mainly useful if you use more than one system that assigns risk scores, and you want to see a normalized value across all systems. +Observer.egress holds information like interface number and name, vlan, and zone information to classify egress traffic. Single armed monitoring such as a network sensor on a span port should only use observer.ingress to categorize traffic. -type: float +type: object -- -*`event.sequence`*:: +*`observer.egress.interface.alias`*:: + -- -Sequence number of the event. -The sequence number is a value published by some event sources, to make the exact ordering of events unambiguous, regardless of the timestamp precision. +Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. -type: long +type: keyword -format: string +example: outside -- -*`event.severity`*:: +*`observer.egress.interface.id`*:: + -- -The numeric severity of the event according to your event source. -What the different severity values mean can be different between sources and use cases. It's up to the implementer to make sure severities are consistent across events from the same source. -The Syslog severity belongs in `log.syslog.severity.code`. `event.severity` is meant to represent the severity according to the event source (e.g. firewall, IDS). If the event source does not publish its own severity, you may optionally copy the `log.syslog.severity.code` to `event.severity`. - -type: long - -example: 7 - -format: string - --- +Interface ID as reported by an observer (typically SNMP interface ID). -*`event.start`*:: -+ --- -event.start contains the date when the event started or when the activity was first observed. +type: keyword -type: date +example: 10 -- -*`event.timezone`*:: +*`observer.egress.interface.name`*:: + -- -This field should be populated when the event's timestamp does not include timezone information already (e.g. default Syslog timestamps). It's optional otherwise. -Acceptable timezone formats are: a canonical ID (e.g. "Europe/Amsterdam"), abbreviated (e.g. "EST") or an HH:mm differential (e.g. "-05:00"). +Interface name as reported by the system. type: keyword +example: eth0 + -- -*`event.type`*:: +*`observer.egress.vlan.id`*:: + -- -This is one of four ECS Categorization Fields, and indicates the third level in the ECS category hierarchy. -`event.type` represents a categorization "sub-bucket" that, when used along with the `event.category` field values, enables filtering events down to a level appropriate for single visualization. -This field is an array. This will allow proper categorization of some events that fall in multiple event types. +VLAN ID as reported by the observer. type: keyword +example: 10 + -- -*`event.url`*:: +*`observer.egress.vlan.name`*:: + -- -URL linking to an external system to continue investigation of this event. -This URL links to another system where in-depth investigation of the specific occurence of this event can take place. Alert events, indicated by `event.kind:alert`, are a common use case for this field. +Optional VLAN name as reported by the observer. type: keyword -example: https://mysystem.mydomain.com/alert/5271dedb-f5b0-4218-87f0-4ac4870a38fe +example: outside -- -[float] -=== file - -A file is defined as a set of information that has been created on, or has existed on a filesystem. -File objects can be associated with host events, network events, and/or file events (e.g., those produced by File Integrity Monitoring [FIM] products or services). File fields provide details about the affected file associated with the event or metric. - - -*`file.accessed`*:: +*`observer.egress.zone`*:: + -- -Last time the file was accessed. -Note that not all filesystems keep track of access time. +Network zone of outbound traffic as reported by the observer to categorize the destination area of egress traffic, e.g. Internal, External, DMZ, HR, Legal, etc. -type: date +type: keyword + +example: Public_Internet -- -*`file.attributes`*:: +*`observer.geo.city_name`*:: + -- -Array of file attributes. -Attributes names will vary by platform. Here's a non-exhaustive list of values that are expected in this field: archive, compressed, directory, encrypted, execute, hidden, read, readonly, system, write. +City name. type: keyword -example: ["readonly", "system"] +example: Montreal -- -*`file.code_signature.exists`*:: +*`observer.geo.continent_name`*:: + -- -Boolean to capture if a signature is present. +Name of the continent. -type: boolean +type: keyword -example: true +example: North America -- -*`file.code_signature.status`*:: +*`observer.geo.country_iso_code`*:: + -- -Additional information about the certificate status. -This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. +Country ISO code. type: keyword -example: ERROR_UNTRUSTED_ROOT +example: CA -- -*`file.code_signature.subject_name`*:: +*`observer.geo.country_name`*:: + -- -Subject name of the code signer +Country name. type: keyword -example: Microsoft Corporation +example: Canada -- -*`file.code_signature.trusted`*:: +*`observer.geo.location`*:: + -- -Stores the trust status of the certificate chain. -Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. +Longitude and latitude. -type: boolean +type: geo_point -example: true +example: { "lon": -73.614830, "lat": 45.505918 } -- -*`file.code_signature.valid`*:: +*`observer.geo.name`*:: + -- -Boolean to capture if the digital signature is verified against the binary content. -Leave unpopulated if a certificate was unchecked. +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. -type: boolean +type: keyword -example: true +example: boston-dc -- -*`file.created`*:: +*`observer.geo.region_iso_code`*:: + -- -File creation time. -Note that not all filesystems store the creation time. - -type: date - --- +Region ISO code. -*`file.ctime`*:: -+ --- -Last time the file attributes or metadata changed. -Note that changes to the file content will update `mtime`. This implies `ctime` will be adjusted at the same time, since `mtime` is an attribute of the file. +type: keyword -type: date +example: CA-QC -- -*`file.device`*:: +*`observer.geo.region_name`*:: + -- -Device that is the source of the file. +Region name. type: keyword -example: sda +example: Quebec -- -*`file.directory`*:: +*`observer.hostname`*:: + -- -Directory where the file is located. It should include the drive letter, when appropriate. +Hostname of the observer. type: keyword -example: /home/alice - -- -*`file.drive_letter`*:: +*`observer.ingress`*:: + -- -Drive letter where the file is located. This field is only relevant on Windows. -The value should be uppercase, and not include the colon. - -type: keyword +Observer.ingress holds information like interface number and name, vlan, and zone information to classify ingress traffic. Single armed monitoring such as a network sensor on a span port should only use observer.ingress to categorize traffic. -example: C +type: object -- -*`file.extension`*:: +*`observer.ingress.interface.alias`*:: + -- -File extension. +Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. type: keyword -example: png +example: outside -- -*`file.gid`*:: +*`observer.ingress.interface.id`*:: + -- -Primary group ID (GID) of the file. +Interface ID as reported by an observer (typically SNMP interface ID). type: keyword -example: 1001 +example: 10 -- -*`file.group`*:: +*`observer.ingress.interface.name`*:: + -- -Primary group name of the file. +Interface name as reported by the system. type: keyword -example: alice +example: eth0 -- -*`file.hash.md5`*:: +*`observer.ingress.vlan.id`*:: + -- -MD5 hash. +VLAN ID as reported by the observer. type: keyword +example: 10 + -- -*`file.hash.sha1`*:: +*`observer.ingress.vlan.name`*:: + -- -SHA1 hash. +Optional VLAN name as reported by the observer. type: keyword +example: outside + -- -*`file.hash.sha256`*:: +*`observer.ingress.zone`*:: + -- -SHA256 hash. +Network zone of incoming traffic as reported by the observer to categorize the source area of ingress traffic. e.g. internal, External, DMZ, HR, Legal, etc. type: keyword +example: DMZ + -- -*`file.hash.sha512`*:: +*`observer.ip`*:: + -- -SHA512 hash. +IP addresses of the observer. -type: keyword +type: ip -- -*`file.inode`*:: +*`observer.mac`*:: + -- -Inode representing the file in the filesystem. +MAC addresses of the observer type: keyword -example: 256383 - -- -*`file.mime_type`*:: +*`observer.name`*:: + -- -MIME type should identify the format of the file or stream of bytes using https://www.iana.org/assignments/media-types/media-types.xhtml[IANA official types], where possible. When more than one type is applicable, the most specific type should be used. +Custom name of the observer. +This is a name that can be given to an observer. This can be helpful for example if multiple firewalls of the same model are used in an organization. +If no custom name is needed, the field can be left empty. type: keyword +example: 1_proxySG + -- -*`file.mode`*:: +*`observer.os.family`*:: + -- -Mode of the file in octal representation. +OS family (such as redhat, debian, freebsd, windows). type: keyword -example: 0640 +example: debian -- -*`file.mtime`*:: +*`observer.os.full`*:: + -- -Last time the file content was modified. +Operating system name, including the version or code name. -type: date +type: keyword + +example: Mac OS Mojave -- -*`file.name`*:: +*`observer.os.full.text`*:: + -- -Name of the file including the extension, without the directory. - -type: keyword - -example: example.png +type: text -- -*`file.owner`*:: +*`observer.os.kernel`*:: + -- -File owner's username. +Operating system kernel version as a raw string. type: keyword -example: alice +example: 4.4.0-112-generic -- -*`file.path`*:: +*`observer.os.name`*:: + -- -Full path to the file, including the file name. It should include the drive letter, when appropriate. +Operating system name, without the version. type: keyword -example: /home/alice/example.png +example: Mac OS X -- -*`file.path.text`*:: +*`observer.os.name.text`*:: + -- type: text -- -*`file.pe.company`*:: +*`observer.os.platform`*:: + -- -Internal company name of the file, provided at compile-time. +Operating system platform (such centos, ubuntu, windows). type: keyword -example: Microsoft Corporation +example: darwin -- -*`file.pe.description`*:: +*`observer.os.version`*:: + -- -Internal description of the file, provided at compile-time. +Operating system version as a raw string. type: keyword -example: Paint +example: 10.14.1 -- -*`file.pe.file_version`*:: +*`observer.product`*:: + -- -Internal version of the file, provided at compile-time. +The product name of the observer. type: keyword -example: 6.3.9600.17415 +example: s200 -- -*`file.pe.original_file_name`*:: +*`observer.serial_number`*:: + -- -Internal name of the file, provided at compile-time. +Observer serial number. type: keyword -example: MSPAINT.EXE - -- -*`file.pe.product`*:: +*`observer.type`*:: + -- -Internal product name of the file, provided at compile-time. +The type of the observer the data is coming from. +There is no predefined list of observer types. Some examples are `forwarder`, `firewall`, `ids`, `ips`, `proxy`, `poller`, `sensor`, `APM server`. type: keyword -example: Microsoft® Windows® Operating System +example: firewall -- -*`file.size`*:: +*`observer.vendor`*:: + -- -File size in bytes. -Only relevant when `file.type` is "file". +Vendor name of the observer. -type: long +type: keyword -example: 16384 +example: Symantec -- -*`file.target_path`*:: +*`observer.version`*:: + -- -Target path for symlinks. +Observer version. type: keyword -- -*`file.target_path.text`*:: -+ --- -type: text +[float] +=== organization --- +The organization fields enrich data with information about the company or entity the data is associated with. +These fields help you arrange or filter data stored in an index by one or multiple organizations. -*`file.type`*:: + +*`organization.id`*:: + -- -File type (file, dir, or symlink). +Unique identifier for the organization. type: keyword -example: file - -- -*`file.uid`*:: +*`organization.name`*:: + -- -The user ID (UID) or security identifier (SID) of the file owner. +Organization name. type: keyword -example: 1001 +-- + +*`organization.name.text`*:: ++ +-- +type: text -- [float] -=== geo +=== os -Geo fields can carry data about a specific location related to an event. -This geolocation information can be derived from techniques such as Geo IP, or be user-supplied. +The OS fields contain information about the operating system. -*`geo.city_name`*:: +*`os.family`*:: + -- -City name. +OS family (such as redhat, debian, freebsd, windows). type: keyword -example: Montreal +example: debian -- -*`geo.continent_name`*:: +*`os.full`*:: + -- -Name of the continent. +Operating system name, including the version or code name. type: keyword -example: North America +example: Mac OS Mojave -- -*`geo.country_iso_code`*:: +*`os.full.text`*:: + -- -Country ISO code. +type: text + +-- + +*`os.kernel`*:: ++ +-- +Operating system kernel version as a raw string. type: keyword -example: CA +example: 4.4.0-112-generic -- -*`geo.country_name`*:: +*`os.name`*:: + -- -Country name. +Operating system name, without the version. type: keyword -example: Canada +example: Mac OS X -- -*`geo.location`*:: +*`os.name.text`*:: + -- -Longitude and latitude. - -type: geo_point - -example: { "lon": -73.614830, "lat": 45.505918 } +type: text -- -*`geo.name`*:: +*`os.platform`*:: + -- -User-defined description of a location, at the level of granularity they care about. -Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. -Not typically used in automated geolocation. +Operating system platform (such centos, ubuntu, windows). type: keyword -example: boston-dc +example: darwin -- -*`geo.region_iso_code`*:: +*`os.version`*:: + -- -Region ISO code. +Operating system version as a raw string. type: keyword -example: CA-QC +example: 10.14.1 -- -*`geo.region_name`*:: +[float] +=== package + +These fields contain information about an installed software package. It contains general information about a package, such as name, version or size. It also contains installation details, such as time or location. + + +*`package.architecture`*:: + -- -Region name. +Package architecture. type: keyword -example: Quebec +example: x86_64 -- -[float] -=== group +*`package.build_version`*:: ++ +-- +Additional information about the build version of the installed package. +For example use the commit SHA of a non-released package. + +type: keyword -The group fields are meant to represent groups that are relevant to the event. +example: 36f4f7e89dd61b0988b12ee000b98966867710cd +-- -*`group.domain`*:: +*`package.checksum`*:: + -- -Name of the directory the group is a member of. -For example, an LDAP or Active Directory domain name. +Checksum of the installed package for verification. type: keyword +example: 68b329da9893e34099c7d8ad5cb9c940 + -- -*`group.id`*:: +*`package.description`*:: + -- -Unique identifier for the group on the system/platform. +Description of the package. type: keyword +example: Open source programming language to build simple/reliable/efficient software. + -- -*`group.name`*:: +*`package.install_scope`*:: + -- -Name of the group. +Indicating how the package was installed, e.g. user-local, global. type: keyword --- - -[float] -=== hash - -The hash fields represent different hash algorithms and their values. -Field names for common hashes (e.g. MD5, SHA1) are predefined. Add fields for other hashes by lowercasing the hash algorithm name and using underscore separators as appropriate (snake case, e.g. sha3_512). +example: global +-- -*`hash.md5`*:: +*`package.installed`*:: + -- -MD5 hash. +Time when package was installed. -type: keyword +type: date -- -*`hash.sha1`*:: +*`package.license`*:: + -- -SHA1 hash. +License under which the package was released. +Use a short name, e.g. the license identifier from SPDX License List where possible (https://spdx.org/licenses/). type: keyword +example: Apache License 2.0 + -- -*`hash.sha256`*:: +*`package.name`*:: + -- -SHA256 hash. +Package name type: keyword +example: go + -- -*`hash.sha512`*:: +*`package.path`*:: + -- -SHA512 hash. +Path where the package is installed. type: keyword --- - -[float] -=== host - -A host is defined as a general computing instance. -ECS host.* fields should be populated with details about the host on which the event happened, or from which the measurement was taken. Host types include hardware, virtual machines, Docker containers, and Kubernetes nodes. +example: /usr/local/Cellar/go/1.12.9/ +-- -*`host.architecture`*:: +*`package.reference`*:: + -- -Operating system architecture. +Home page or reference URL of the software in this package, if available. type: keyword -example: x86_64 +example: https://golang.org -- -*`host.domain`*:: +*`package.size`*:: + -- -Name of the domain of which the host is a member. -For example, on Windows this could be the host's Active Directory domain or NetBIOS domain name. For Linux this could be the domain of the host's LDAP provider. +Package size in bytes. -type: keyword +type: long -example: CONTOSO +example: 62231 + +format: string -- -*`host.geo.city_name`*:: +*`package.type`*:: + -- -City name. +Type of package. +This should contain the package file type, rather than the package manager name. Examples: rpm, dpkg, brew, npm, gem, nupkg, jar. type: keyword -example: Montreal +example: rpm -- -*`host.geo.continent_name`*:: +*`package.version`*:: + -- -Name of the continent. +Package version type: keyword -example: North America +example: 1.12.9 -- -*`host.geo.country_iso_code`*:: +[float] +=== pe + +These fields contain Windows Portable Executable (PE) metadata. + + +*`pe.company`*:: + -- -Country ISO code. +Internal company name of the file, provided at compile-time. type: keyword -example: CA +example: Microsoft Corporation -- -*`host.geo.country_name`*:: +*`pe.description`*:: + -- -Country name. +Internal description of the file, provided at compile-time. type: keyword -example: Canada +example: Paint -- -*`host.geo.location`*:: +*`pe.file_version`*:: + -- -Longitude and latitude. +Internal version of the file, provided at compile-time. -type: geo_point +type: keyword -example: { "lon": -73.614830, "lat": 45.505918 } +example: 6.3.9600.17415 -- -*`host.geo.name`*:: +*`pe.original_file_name`*:: + -- -User-defined description of a location, at the level of granularity they care about. -Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. -Not typically used in automated geolocation. +Internal name of the file, provided at compile-time. type: keyword -example: boston-dc +example: MSPAINT.EXE -- -*`host.geo.region_iso_code`*:: +*`pe.product`*:: + -- -Region ISO code. +Internal product name of the file, provided at compile-time. type: keyword -example: CA-QC +example: Microsoft® Windows® Operating System -- -*`host.geo.region_name`*:: +[float] +=== process + +These fields contain information about a process. +These fields can help you correlate metrics information with a process id/name from a log message. The `process.pid` often stays in the metric itself and is copied to the global field for correlation. + + +*`process.args`*:: + -- -Region name. +Array of process arguments, starting with the absolute path to the executable. +May be filtered to protect sensitive information. type: keyword -example: Quebec +example: ['/usr/bin/ssh', '-l', 'user', '10.0.0.16'] -- -*`host.hostname`*:: +*`process.args_count`*:: + -- -Hostname of the host. -It normally contains what the `hostname` command returns on the host machine. +Length of the process.args array. +This field can be useful for querying or performing bucket analysis on how many arguments were provided to start a process. More arguments may be an indication of suspicious activity. -type: keyword +type: long + +example: 4 -- -*`host.id`*:: +*`process.code_signature.exists`*:: + -- -Unique host id. -As hostname is not always unique, use values that are meaningful in your environment. -Example: The current usage of `beat.name`. +Boolean to capture if a signature is present. -type: keyword +type: boolean + +example: true -- -*`host.ip`*:: +*`process.code_signature.status`*:: + -- -Host ip addresses. +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. -type: ip +type: keyword + +example: ERROR_UNTRUSTED_ROOT -- -*`host.mac`*:: +*`process.code_signature.subject_name`*:: + -- -Host mac addresses. +Subject name of the code signer type: keyword +example: Microsoft Corporation + -- -*`host.name`*:: +*`process.code_signature.trusted`*:: + -- -Name of the host. -It can contain what `hostname` returns on Unix systems, the fully qualified domain name, or a name specified by the user. The sender decides which value to use. +Stores the trust status of the certificate chain. +Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. -type: keyword +type: boolean + +example: true -- -*`host.os.family`*:: +*`process.code_signature.valid`*:: + -- -OS family (such as redhat, debian, freebsd, windows). +Boolean to capture if the digital signature is verified against the binary content. +Leave unpopulated if a certificate was unchecked. -type: keyword +type: boolean -example: debian +example: true -- -*`host.os.full`*:: +*`process.command_line`*:: + -- -Operating system name, including the version or code name. +Full command line that started the process, including the absolute path to the executable, and all arguments. +Some arguments may be filtered to protect sensitive information. type: keyword -example: Mac OS Mojave +example: /usr/bin/ssh -l user 10.0.0.16 -- -*`host.os.full.text`*:: +*`process.command_line.text`*:: + -- type: text -- -*`host.os.kernel`*:: +*`process.entity_id`*:: + -- -Operating system kernel version as a raw string. +Unique identifier for the process. +The implementation of this is specified by the data source, but some examples of what could be used here are a process-generated UUID, Sysmon Process GUIDs, or a hash of some uniquely identifying components of a process. +Constructing a globally unique identifier is a common practice to mitigate PID reuse as well as to identify a specific process over time, across multiple monitored hosts. type: keyword -example: 4.4.0-112-generic +example: c2c455d9f99375d -- -*`host.os.name`*:: +*`process.executable`*:: + -- -Operating system name, without the version. +Absolute path to the process executable. type: keyword -example: Mac OS X +example: /usr/bin/ssh -- -*`host.os.name.text`*:: +*`process.executable.text`*:: + -- type: text -- -*`host.os.platform`*:: +*`process.exit_code`*:: + -- -Operating system platform (such centos, ubuntu, windows). +The exit code of the process, if this is a termination event. +The field should be absent if there is no exit code for the event (e.g. process start). -type: keyword +type: long -example: darwin +example: 137 -- -*`host.os.version`*:: +*`process.hash.md5`*:: + -- -Operating system version as a raw string. +MD5 hash. type: keyword -example: 10.14.1 - -- -*`host.type`*:: +*`process.hash.sha1`*:: + -- -Type of host. -For Cloud providers this can be the machine type like `t2.medium`. If vm, this could be the container, for example, or other information meaningful in your environment. +SHA1 hash. type: keyword -- -*`host.uptime`*:: +*`process.hash.sha256`*:: + -- -Seconds the host has been up. - -type: long +SHA256 hash. -example: 1325 +type: keyword -- -*`host.user.domain`*:: +*`process.hash.sha512`*:: + -- -Name of the directory the user is a member of. -For example, an LDAP or Active Directory domain name. +SHA512 hash. type: keyword -- -*`host.user.email`*:: +*`process.name`*:: + -- -User email address. +Process name. +Sometimes called program name or similar. type: keyword +example: ssh + -- -*`host.user.full_name`*:: +*`process.name.text`*:: + -- -User's full name, if available. +type: text + +-- + +*`process.parent.args`*:: ++ +-- +Array of process arguments. +May be filtered to protect sensitive information. type: keyword -example: Albert Einstein +example: ['ssh', '-l', 'user', '10.0.0.16'] -- -*`host.user.full_name.text`*:: +*`process.parent.args_count`*:: + -- -type: text +Length of the process.args array. +This field can be useful for querying or performing bucket analysis on how many arguments were provided to start a process. More arguments may be an indication of suspicious activity. + +type: long + +example: 4 -- -*`host.user.group.domain`*:: +*`process.parent.code_signature.exists`*:: + -- -Name of the directory the group is a member of. -For example, an LDAP or Active Directory domain name. +Boolean to capture if a signature is present. -type: keyword +type: boolean + +example: true -- -*`host.user.group.id`*:: +*`process.parent.code_signature.status`*:: + -- -Unique identifier for the group on the system/platform. +Additional information about the certificate status. +This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. type: keyword +example: ERROR_UNTRUSTED_ROOT + -- -*`host.user.group.name`*:: +*`process.parent.code_signature.subject_name`*:: + -- -Name of the group. +Subject name of the code signer type: keyword +example: Microsoft Corporation + -- -*`host.user.hash`*:: +*`process.parent.code_signature.trusted`*:: + -- -Unique user hash to correlate information for a user in anonymized form. -Useful if `user.id` or `user.name` contain confidential information and cannot be used. +Stores the trust status of the certificate chain. +Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. -type: keyword +type: boolean + +example: true -- -*`host.user.id`*:: +*`process.parent.code_signature.valid`*:: + -- -Unique identifiers of the user. +Boolean to capture if the digital signature is verified against the binary content. +Leave unpopulated if a certificate was unchecked. -type: keyword +type: boolean + +example: true -- -*`host.user.name`*:: +*`process.parent.command_line`*:: + -- -Short name or login of the user. +Full command line that started the process, including the absolute path to the executable, and all arguments. +Some arguments may be filtered to protect sensitive information. type: keyword -example: albert +example: /usr/bin/ssh -l user 10.0.0.16 -- -*`host.user.name.text`*:: +*`process.parent.command_line.text`*:: + -- type: text -- -[float] -=== http - -Fields related to HTTP activity. Use the `url` field set to store the url of the request. - - -*`http.request.body.bytes`*:: +*`process.parent.entity_id`*:: + -- -Size in bytes of the request body. - -type: long +Unique identifier for the process. +The implementation of this is specified by the data source, but some examples of what could be used here are a process-generated UUID, Sysmon Process GUIDs, or a hash of some uniquely identifying components of a process. +Constructing a globally unique identifier is a common practice to mitigate PID reuse as well as to identify a specific process over time, across multiple monitored hosts. -example: 887 +type: keyword -format: bytes +example: c2c455d9f99375d -- -*`http.request.body.content`*:: +*`process.parent.executable`*:: + -- -The full HTTP request body. +Absolute path to the process executable. type: keyword -example: Hello world +example: /usr/bin/ssh -- -*`http.request.body.content.text`*:: +*`process.parent.executable.text`*:: + -- type: text -- -*`http.request.bytes`*:: +*`process.parent.exit_code`*:: + -- -Total size in bytes of the request (body and headers). +The exit code of the process, if this is a termination event. +The field should be absent if there is no exit code for the event (e.g. process start). type: long -example: 1437 - -format: bytes +example: 137 -- -*`http.request.method`*:: +*`process.parent.hash.md5`*:: + -- -HTTP request method. -The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". +MD5 hash. type: keyword -example: get, post, put - -- -*`http.request.referrer`*:: +*`process.parent.hash.sha1`*:: + -- -Referrer for this HTTP request. +SHA1 hash. type: keyword -example: https://blog.example.com/ - -- -*`http.response.body.bytes`*:: +*`process.parent.hash.sha256`*:: + -- -Size in bytes of the response body. +SHA256 hash. -type: long +type: keyword -example: 887 +-- -format: bytes +*`process.parent.hash.sha512`*:: ++ +-- +SHA512 hash. + +type: keyword -- -*`http.response.body.content`*:: +*`process.parent.name`*:: + -- -The full HTTP response body. +Process name. +Sometimes called program name or similar. type: keyword -example: Hello world +example: ssh -- -*`http.response.body.content.text`*:: +*`process.parent.name.text`*:: + -- type: text -- -*`http.response.bytes`*:: +*`process.parent.pgid`*:: + -- -Total size in bytes of the response (body and headers). +Identifier of the group of processes the process belongs to. type: long -example: 1437 - -format: bytes +format: string -- -*`http.response.status_code`*:: +*`process.parent.pid`*:: + -- -HTTP response status code. +Process id. type: long -example: 404 +example: 4242 format: string -- -*`http.version`*:: +*`process.parent.ppid`*:: + -- -HTTP version. - -type: keyword - -example: 1.1 +Parent process' pid. --- +type: long -[float] -=== interface +example: 4241 -The interface fields are used to record ingress and egress interface information when reported by an observer (e.g. firewall, router, load balancer) in the context of the observer handling a network connection. In the case of a single observer interface (e.g. network sensor on a span port) only the observer.ingress information should be populated. +format: string +-- -*`interface.alias`*:: +*`process.parent.start`*:: + -- -Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. +The time the process started. -type: keyword +type: date -example: outside +example: 2016-05-23T08:05:34.853Z -- -*`interface.id`*:: +*`process.parent.thread.id`*:: + -- -Interface ID as reported by an observer (typically SNMP interface ID). +Thread ID. -type: keyword +type: long -example: 10 +example: 4242 + +format: string -- -*`interface.name`*:: +*`process.parent.thread.name`*:: + -- -Interface name as reported by the system. +Thread name. type: keyword -example: eth0 +example: thread-0 -- -[float] -=== log - -Details about the event's logging mechanism or logging transport. -The log.* fields are typically populated with details about the logging mechanism used to create and/or transport the event. For example, syslog details belong under `log.syslog.*`. -The details specific to your event source are typically not logged under `log.*`, but rather in `event.*` or in other ECS fields. - - -*`log.level`*:: +*`process.parent.title`*:: + -- -Original log level of the log event. -If the source of the event provides a log level or textual severity, this is the one that goes in `log.level`. If your source doesn't specify one, you may put your event transport's severity here (e.g. Syslog severity). -Some examples are `warn`, `err`, `i`, `informational`. +Process title. +The proctitle, some times the same as process name. Can also be different: for example a browser setting its title to the web page currently opened. type: keyword -example: error - -- -*`log.logger`*:: +*`process.parent.title.text`*:: + -- -The name of the logger inside an application. This is usually the name of the class which initialized the logger, or can be a custom name. - -type: keyword - -example: org.elasticsearch.bootstrap.Bootstrap +type: text -- -*`log.origin.file.line`*:: +*`process.parent.uptime`*:: + -- -The line number of the file containing the source code which originated the log event. +Seconds the process has been up. -type: integer +type: long -example: 42 +example: 1325 -- -*`log.origin.file.name`*:: +*`process.parent.working_directory`*:: + -- -The name of the file containing the source code which originated the log event. Note that this is not the name of the log file. +The working directory of the process. type: keyword -example: Bootstrap.java +example: /home/alice -- -*`log.origin.function`*:: +*`process.parent.working_directory.text`*:: + -- -The name of the function or method which originated the log event. +type: text + +-- + +*`process.pe.company`*:: ++ +-- +Internal company name of the file, provided at compile-time. type: keyword -example: init +example: Microsoft Corporation -- -*`log.original`*:: +*`process.pe.description`*:: + -- -This is the original log message and contains the full log message before splitting it up in multiple parts. -In contrast to the `message` field which can contain an extracted part of the log message, this field contains the original, full log message. It can have already some modifications applied like encoding or new lines removed to clean up the log message. -This field is not indexed and doc_values are disabled so it can't be queried but the value can be retrieved from `_source`. +Internal description of the file, provided at compile-time. type: keyword -example: Sep 19 08:26:10 localhost My log +example: Paint -- -*`log.syslog`*:: +*`process.pe.file_version`*:: + -- -The Syslog metadata of the event, if the event was transmitted via Syslog. Please see RFCs 5424 or 3164. +Internal version of the file, provided at compile-time. -type: object +type: keyword + +example: 6.3.9600.17415 -- -*`log.syslog.facility.code`*:: +*`process.pe.original_file_name`*:: + -- -The Syslog numeric facility of the log event, if available. -According to RFCs 5424 and 3164, this value should be an integer between 0 and 23. - -type: long +Internal name of the file, provided at compile-time. -example: 23 +type: keyword -format: string +example: MSPAINT.EXE -- -*`log.syslog.facility.name`*:: +*`process.pe.product`*:: + -- -The Syslog text-based facility of the log event, if available. +Internal product name of the file, provided at compile-time. type: keyword -example: local7 +example: Microsoft® Windows® Operating System -- -*`log.syslog.priority`*:: +*`process.pgid`*:: + -- -Syslog numeric priority of the event, if available. -According to RFCs 5424 and 3164, the priority is 8 * facility + severity. This number is therefore expected to contain a value between 0 and 191. +Identifier of the group of processes the process belongs to. type: long -example: 135 - format: string -- -*`log.syslog.severity.code`*:: +*`process.pid`*:: + -- -The Syslog numeric severity of the log event, if available. -If the event source publishing via Syslog provides a different numeric severity value (e.g. firewall, IDS), your source's numeric severity should go to `event.severity`. If the event source does not specify a distinct severity, you can optionally copy the Syslog severity to `event.severity`. +Process id. type: long -example: 3 +example: 4242 + +format: string -- -*`log.syslog.severity.name`*:: +*`process.ppid`*:: + -- -The Syslog numeric severity of the log event, if available. -If the event source publishing via Syslog provides a different severity value (e.g. firewall, IDS), your source's text severity should go to `log.level`. If the event source does not specify a distinct severity, you can optionally copy the Syslog severity to `log.level`. - -type: keyword - -example: Error +Parent process' pid. --- +type: long -[float] -=== network +example: 4241 -The network is defined as the communication path over which a host or network event happens. -The network.* fields should be populated with details about the network activity associated with an event. +format: string +-- -*`network.application`*:: +*`process.start`*:: + -- -A name given to an application level protocol. This can be arbitrarily assigned for things like microservices, but also apply to things like skype, icq, facebook, twitter. This would be used in situations where the vendor or service can be decoded such as from the source/dest IP owners, ports, or wire format. -The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". +The time the process started. -type: keyword +type: date -example: aim +example: 2016-05-23T08:05:34.853Z -- -*`network.bytes`*:: +*`process.thread.id`*:: + -- -Total bytes transferred in both directions. -If `source.bytes` and `destination.bytes` are known, `network.bytes` is their sum. +Thread ID. type: long -example: 368 +example: 4242 -format: bytes +format: string -- -*`network.community_id`*:: +*`process.thread.name`*:: + -- -A hash of source and destination IPs and ports, as well as the protocol used in a communication. This is a tool-agnostic standard to identify flows. -Learn more at https://github.com/corelight/community-id-spec. +Thread name. type: keyword -example: 1:hO+sN4H+MG5MY/8hIrXPqc4ZQz0= +example: thread-0 -- -*`network.direction`*:: +*`process.title`*:: + -- -Direction of the network traffic. -Recommended values are: - * inbound - * outbound - * internal - * external - * unknown - -When mapping events from a host-based monitoring context, populate this field from the host's point of view. -When mapping events from a network or perimeter-based monitoring context, populate this field from the point of view of your network perimeter. +Process title. +The proctitle, some times the same as process name. Can also be different: for example a browser setting its title to the web page currently opened. type: keyword -example: inbound - -- -*`network.forwarded_ip`*:: +*`process.title.text`*:: + -- -Host IP address when the source IP address is the proxy. - -type: ip - -example: 192.1.1.2 +type: text -- -*`network.iana_number`*:: +*`process.uptime`*:: + -- -IANA Protocol Number (https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). Standardized list of protocols. This aligns well with NetFlow and sFlow related logs which use the IANA Protocol Number. - -type: keyword - -example: 6 - --- +Seconds the process has been up. -*`network.inner`*:: -+ --- -Network.inner fields are added in addition to network.vlan fields to describe the innermost VLAN when q-in-q VLAN tagging is present. Allowed fields include vlan.id and vlan.name. Inner vlan fields are typically used when sending traffic with multiple 802.1q encapsulations to a network sensor (e.g. Zeek, Wireshark.) +type: long -type: object +example: 1325 -- -*`network.inner.vlan.id`*:: +*`process.working_directory`*:: + -- -VLAN ID as reported by the observer. +The working directory of the process. type: keyword -example: 10 +example: /home/alice -- -*`network.inner.vlan.name`*:: +*`process.working_directory.text`*:: + -- -Optional VLAN name as reported by the observer. +type: text -type: keyword +-- -example: outside +[float] +=== registry --- +Fields related to Windows Registry operations. -*`network.name`*:: + +*`registry.data.bytes`*:: + -- -Name given by operators to sections of their network. +Original bytes written with base64 encoding. +For Windows registry operations, such as SetValueEx and RegQueryValueEx, this corresponds to the data pointed by `lp_data`. This is optional but provides better recoverability and should be populated for REG_BINARY encoded values. type: keyword -example: Guest Wifi +example: ZQBuAC0AVQBTAAAAZQBuAAAAAAA= -- -*`network.packets`*:: +*`registry.data.strings`*:: + -- -Total packets transferred in both directions. -If `source.packets` and `destination.packets` are known, `network.packets` is their sum. +Content when writing string types. +Populated as an array when writing string data to the registry. For single string registry types (REG_SZ, REG_EXPAND_SZ), this should be an array with one string. For sequences of string with REG_MULTI_SZ, this array will be variable length. For numeric data, such as REG_DWORD and REG_QWORD, this should be populated with the decimal representation (e.g `"1"`). -type: long +type: keyword -example: 24 +example: ["C:\rta\red_ttp\bin\myapp.exe"] -- -*`network.protocol`*:: +*`registry.data.type`*:: + -- -L7 Network protocol name. ex. http, lumberjack, transport protocol. -The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". +Standard registry type for encoding contents type: keyword -example: http +example: REG_SZ -- -*`network.transport`*:: +*`registry.hive`*:: + -- -Same as network.iana_number, but instead using the Keyword name of the transport layer (udp, tcp, ipv6-icmp, etc.) -The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". +Abbreviated name for the hive. type: keyword -example: tcp +example: HKLM -- -*`network.type`*:: +*`registry.key`*:: + -- -In the OSI Model this would be the Network Layer. ipv4, ipv6, ipsec, pim, etc -The field value must be normalized to lowercase for querying. See the documentation section "Implementing ECS". +Hive-relative path of keys. type: keyword -example: ipv4 +example: SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe -- -*`network.vlan.id`*:: +*`registry.path`*:: + -- -VLAN ID as reported by the observer. +Full path, including hive, key and value type: keyword -example: 10 +example: HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe\Debugger -- -*`network.vlan.name`*:: +*`registry.value`*:: + -- -Optional VLAN name as reported by the observer. +Name of the value written. type: keyword -example: outside +example: Debugger -- [float] -=== observer +=== related -An observer is defined as a special network, security, or application device used to detect, observe, or create network, security, or application-related events and metrics. -This could be a custom hardware appliance or a server that has been configured to run special network, security, or application software. Examples include firewalls, web proxies, intrusion detection/prevention systems, network monitoring sensors, web application firewalls, data loss prevention systems, and APM servers. The observer.* fields shall be populated with details of the system, if any, that detects, observes and/or creates a network, security, or application event or metric. Message queues and ETL components used in processing events or metrics are not considered observers in ECS. +This field set is meant to facilitate pivoting around a piece of data. +Some pieces of information can be seen in many places in an ECS event. To facilitate searching for them, store an array of all seen values to their corresponding field in `related.`. +A concrete example is IP addresses, which can be under host, observer, source, destination, client, server, and network.forwarded_ip. If you append all IPs to `related.ip`, you can then search for a given IP trivially, no matter where it appeared, by querying `related.ip:192.0.2.15`. -*`observer.egress`*:: +*`related.hash`*:: + -- -Observer.egress holds information like interface number and name, vlan, and zone information to classify egress traffic. Single armed monitoring such as a network sensor on a span port should only use observer.ingress to categorize traffic. +All the hashes seen on your event. Populating this field, then using it to search for hashes can help in situations where you're unsure what the hash algorithm is (and therefore which key name to search). -type: object +type: keyword -- -*`observer.egress.interface.alias`*:: +*`related.ip`*:: + -- -Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. - -type: keyword +All of the IPs seen on your event. -example: outside +type: ip -- -*`observer.egress.interface.id`*:: +*`related.user`*:: + -- -Interface ID as reported by an observer (typically SNMP interface ID). +All the user names seen on your event. type: keyword -example: 10 - --- - -*`observer.egress.interface.name`*:: -+ -- -Interface name as reported by the system. -type: keyword +[float] +=== rule -example: eth0 +Rule fields are used to capture the specifics of any observer or agent rules that generate alerts or other notable events. +Examples of data sources that would populate the rule fields include: network admission control platforms, network or host IDS/IPS, network firewalls, web application firewalls, url filters, endpoint detection and response (EDR) systems, etc. --- -*`observer.egress.vlan.id`*:: +*`rule.author`*:: + -- -VLAN ID as reported by the observer. +Name, organization, or pseudonym of the author or authors who created the rule used to generate this event. type: keyword -example: 10 +example: ['Star-Lord'] -- -*`observer.egress.vlan.name`*:: +*`rule.category`*:: + -- -Optional VLAN name as reported by the observer. +A categorization value keyword used by the entity using the rule for detection of this event. type: keyword -example: outside +example: Attempted Information Leak -- -*`observer.egress.zone`*:: +*`rule.description`*:: + -- -Network zone of outbound traffic as reported by the observer to categorize the destination area of egress traffic, e.g. Internal, External, DMZ, HR, Legal, etc. +The description of the rule generating the event. type: keyword -example: Public_Internet +example: Block requests to public DNS over HTTPS / TLS protocols -- -*`observer.geo.city_name`*:: +*`rule.id`*:: + -- -City name. +A rule ID that is unique within the scope of an agent, observer, or other entity using the rule for detection of this event. type: keyword -example: Montreal +example: 101 -- -*`observer.geo.continent_name`*:: +*`rule.license`*:: + -- -Name of the continent. +Name of the license under which the rule used to generate this event is made available. type: keyword -example: North America +example: Apache 2.0 -- -*`observer.geo.country_iso_code`*:: +*`rule.name`*:: + -- -Country ISO code. +The name of the rule or signature generating the event. type: keyword -example: CA +example: BLOCK_DNS_over_TLS -- -*`observer.geo.country_name`*:: +*`rule.reference`*:: + -- -Country name. +Reference URL to additional information about the rule used to generate this event. +The URL can point to the vendor's documentation about the rule. If that's not available, it can also be a link to a more general page describing this type of alert. type: keyword -example: Canada +example: https://en.wikipedia.org/wiki/DNS_over_TLS -- -*`observer.geo.location`*:: +*`rule.ruleset`*:: + -- -Longitude and latitude. +Name of the ruleset, policy, group, or parent category in which the rule used to generate this event is a member. -type: geo_point +type: keyword -example: { "lon": -73.614830, "lat": 45.505918 } +example: Standard_Protocol_Filters -- -*`observer.geo.name`*:: +*`rule.uuid`*:: + -- -User-defined description of a location, at the level of granularity they care about. -Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. -Not typically used in automated geolocation. +A rule ID that is unique within the scope of a set or group of agents, observers, or other entities using the rule for detection of this event. type: keyword -example: boston-dc +example: 1100110011 -- -*`observer.geo.region_iso_code`*:: +*`rule.version`*:: + -- -Region ISO code. +The version / revision of the rule being used for analysis. type: keyword -example: CA-QC - --- +example: 1.1 -*`observer.geo.region_name`*:: -+ -- -Region name. -type: keyword +[float] +=== server -example: Quebec +A Server is defined as the responder in a network connection for events regarding sessions, connections, or bidirectional flow records. +For TCP events, the server is the receiver of the initial SYN packet(s) of the TCP connection. For other protocols, the server is generally the responder in the network transaction. Some systems actually use the term "responder" to refer the server in TCP connections. The server fields describe details about the system acting as the server in the network event. Server fields are usually populated in conjunction with client fields. Server fields are generally not populated for packet-level events. +Client / server representations can add semantic context to an exchange, which is helpful to visualize the data in certain situations. If your context falls in that category, you should still ensure that source and destination are filled appropriately. --- -*`observer.hostname`*:: +*`server.address`*:: + -- -Hostname of the observer. +Some event server addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. +Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. type: keyword -- -*`observer.ingress`*:: +*`server.as.number`*:: + -- -Observer.ingress holds information like interface number and name, vlan, and zone information to classify ingress traffic. Single armed monitoring such as a network sensor on a span port should only use observer.ingress to categorize traffic. +Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. -type: object +type: long + +example: 15169 -- -*`observer.ingress.interface.alias`*:: +*`server.as.organization.name`*:: + -- -Interface alias as reported by the system, typically used in firewall implementations for e.g. inside, outside, or dmz logical interface naming. +Organization name. type: keyword -example: outside +example: Google LLC -- -*`observer.ingress.interface.id`*:: +*`server.as.organization.name.text`*:: + -- -Interface ID as reported by an observer (typically SNMP interface ID). - -type: keyword - -example: 10 +type: text -- -*`observer.ingress.interface.name`*:: +*`server.bytes`*:: + -- -Interface name as reported by the system. +Bytes sent from the server to the client. -type: keyword +type: long -example: eth0 +example: 184 + +format: bytes -- -*`observer.ingress.vlan.id`*:: +*`server.domain`*:: + -- -VLAN ID as reported by the observer. +Server domain. type: keyword -example: 10 - -- -*`observer.ingress.vlan.name`*:: +*`server.geo.city_name`*:: + -- -Optional VLAN name as reported by the observer. +City name. type: keyword -example: outside +example: Montreal -- -*`observer.ingress.zone`*:: +*`server.geo.continent_name`*:: + -- -Network zone of incoming traffic as reported by the observer to categorize the source area of ingress traffic. e.g. internal, External, DMZ, HR, Legal, etc. +Name of the continent. type: keyword -example: DMZ +example: North America -- -*`observer.ip`*:: +*`server.geo.country_iso_code`*:: + -- -IP addresses of the observer. +Country ISO code. -type: ip +type: keyword + +example: CA -- -*`observer.mac`*:: +*`server.geo.country_name`*:: + -- -MAC addresses of the observer +Country name. type: keyword +example: Canada + -- -*`observer.name`*:: +*`server.geo.location`*:: + -- -Custom name of the observer. -This is a name that can be given to an observer. This can be helpful for example if multiple firewalls of the same model are used in an organization. -If no custom name is needed, the field can be left empty. +Longitude and latitude. -type: keyword +type: geo_point -example: 1_proxySG +example: { "lon": -73.614830, "lat": 45.505918 } -- -*`observer.os.family`*:: +*`server.geo.name`*:: + -- -OS family (such as redhat, debian, freebsd, windows). +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. type: keyword -example: debian +example: boston-dc -- -*`observer.os.full`*:: +*`server.geo.region_iso_code`*:: + -- -Operating system name, including the version or code name. +Region ISO code. type: keyword -example: Mac OS Mojave - --- - -*`observer.os.full.text`*:: -+ --- -type: text +example: CA-QC -- -*`observer.os.kernel`*:: +*`server.geo.region_name`*:: + -- -Operating system kernel version as a raw string. +Region name. type: keyword -example: 4.4.0-112-generic +example: Quebec -- -*`observer.os.name`*:: +*`server.ip`*:: + -- -Operating system name, without the version. - -type: keyword +IP address of the server. +Can be one or multiple IPv4 or IPv6 addresses. -example: Mac OS X +type: ip -- -*`observer.os.name.text`*:: +*`server.mac`*:: + -- -type: text +MAC address of the server. + +type: keyword -- -*`observer.os.platform`*:: +*`server.nat.ip`*:: + -- -Operating system platform (such centos, ubuntu, windows). - -type: keyword +Translated ip of destination based NAT sessions (e.g. internet to private DMZ) +Typically used with load balancers, firewalls, or routers. -example: darwin +type: ip -- -*`observer.os.version`*:: +*`server.nat.port`*:: + -- -Operating system version as a raw string. +Translated port of destination based NAT sessions (e.g. internet to private DMZ) +Typically used with load balancers, firewalls, or routers. -type: keyword +type: long -example: 10.14.1 +format: string -- -*`observer.product`*:: +*`server.packets`*:: + -- -The product name of the observer. +Packets sent from the server to the client. -type: keyword +type: long -example: s200 +example: 12 -- -*`observer.serial_number`*:: +*`server.port`*:: + -- -Observer serial number. +Port of the server. -type: keyword +type: long + +format: string -- -*`observer.type`*:: +*`server.registered_domain`*:: + -- -The type of the observer the data is coming from. -There is no predefined list of observer types. Some examples are `forwarder`, `firewall`, `ids`, `ips`, `proxy`, `poller`, `sensor`, `APM server`. +The highest registered server domain, stripped of the subdomain. +For example, the registered domain for "foo.google.com" is "google.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". type: keyword -example: firewall +example: google.com -- -*`observer.vendor`*:: +*`server.top_level_domain`*:: + -- -Vendor name of the observer. +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". type: keyword -example: Symantec +example: co.uk -- -*`observer.version`*:: +*`server.user.domain`*:: + -- -Observer version. +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. type: keyword -- -[float] -=== organization - -The organization fields enrich data with information about the company or entity the data is associated with. -These fields help you arrange or filter data stored in an index by one or multiple organizations. - - -*`organization.id`*:: +*`server.user.email`*:: + -- -Unique identifier for the organization. +User email address. type: keyword -- -*`organization.name`*:: +*`server.user.full_name`*:: + -- -Organization name. +User's full name, if available. type: keyword +example: Albert Einstein + -- -*`organization.name.text`*:: +*`server.user.full_name.text`*:: + -- type: text -- -[float] -=== os +*`server.user.group.domain`*:: ++ +-- +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. -The OS fields contain information about the operating system. +type: keyword +-- -*`os.family`*:: +*`server.user.group.id`*:: + -- -OS family (such as redhat, debian, freebsd, windows). +Unique identifier for the group on the system/platform. type: keyword -example: debian - -- -*`os.full`*:: +*`server.user.group.name`*:: + -- -Operating system name, including the version or code name. +Name of the group. type: keyword -example: Mac OS Mojave - -- -*`os.full.text`*:: +*`server.user.hash`*:: + -- -type: text +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. + +type: keyword -- -*`os.kernel`*:: +*`server.user.id`*:: + -- -Operating system kernel version as a raw string. +Unique identifiers of the user. type: keyword -example: 4.4.0-112-generic - -- -*`os.name`*:: +*`server.user.name`*:: + -- -Operating system name, without the version. +Short name or login of the user. type: keyword -example: Mac OS X +example: albert -- -*`os.name.text`*:: +*`server.user.name.text`*:: + -- type: text -- -*`os.platform`*:: +[float] +=== service + +The service fields describe the service for or from which the data was collected. +These fields help you find and correlate logs for a specific service and version. + + +*`service.ephemeral_id`*:: + -- -Operating system platform (such centos, ubuntu, windows). +Ephemeral identifier of this service (if one exists). +This id normally changes across restarts, but `service.id` does not. type: keyword -example: darwin +example: 8a4f500f -- -*`os.version`*:: +*`service.id`*:: + -- -Operating system version as a raw string. +Unique identifier of the running service. If the service is comprised of many nodes, the `service.id` should be the same for all nodes. +This id should uniquely identify the service. This makes it possible to correlate logs and metrics for one specific service, no matter which particular node emitted the event. +Note that if you need to see the events from one specific host of the service, you should filter on that `host.name` or `host.id` instead. type: keyword -example: 10.14.1 +example: d37e5ebfe0ae6c4972dbe9f0174a1637bb8247f6 -- -[float] -=== package - -These fields contain information about an installed software package. It contains general information about a package, such as name, version or size. It also contains installation details, such as time or location. - - -*`package.architecture`*:: +*`service.name`*:: + -- -Package architecture. +Name of the service data is collected from. +The name of the service is normally user given. This allows for distributed services that run on multiple hosts to correlate the related instances based on the name. +In the case of Elasticsearch the `service.name` could contain the cluster name. For Beats the `service.name` is by default a copy of the `service.type` field if no name is specified. type: keyword -example: x86_64 +example: elasticsearch-metrics -- -*`package.build_version`*:: +*`service.node.name`*:: + -- -Additional information about the build version of the installed package. -For example use the commit SHA of a non-released package. +Name of a service node. +This allows for two nodes of the same service running on the same host to be differentiated. Therefore, `service.node.name` should typically be unique across nodes of a given service. +In the case of Elasticsearch, the `service.node.name` could contain the unique node name within the Elasticsearch cluster. In cases where the service doesn't have the concept of a node name, the host name or container name can be used to distinguish running instances that make up this service. If those do not provide uniqueness (e.g. multiple instances of the service running on the same host) - the node name can be manually set. type: keyword -example: 36f4f7e89dd61b0988b12ee000b98966867710cd +example: instance-0000000016 -- -*`package.checksum`*:: +*`service.state`*:: + -- -Checksum of the installed package for verification. +Current state of the service. type: keyword -example: 68b329da9893e34099c7d8ad5cb9c940 - -- -*`package.description`*:: +*`service.type`*:: + -- -Description of the package. +The type of the service data is collected from. +The type can be used to group and correlate logs and metrics from one service type. +Example: If logs or metrics are collected from Elasticsearch, `service.type` would be `elasticsearch`. type: keyword -example: Open source programming language to build simple/reliable/efficient software. +example: elasticsearch -- -*`package.install_scope`*:: +*`service.version`*:: + -- -Indicating how the package was installed, e.g. user-local, global. +Version of the service the data was collected from. +This allows to look at a data set only for a specific version of a service. type: keyword -example: global +example: 3.2.4 -- -*`package.installed`*:: -+ --- -Time when package was installed. +[float] +=== source -type: date +Source fields describe details about the source of a packet/event. +Source fields are usually populated in conjunction with destination fields. --- -*`package.license`*:: +*`source.address`*:: + -- -License under which the package was released. -Use a short name, e.g. the license identifier from SPDX License List where possible (https://spdx.org/licenses/). +Some event source addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. +Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. type: keyword -example: Apache License 2.0 - -- -*`package.name`*:: +*`source.as.number`*:: + -- -Package name +Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. -type: keyword +type: long -example: go +example: 15169 -- -*`package.path`*:: +*`source.as.organization.name`*:: + -- -Path where the package is installed. +Organization name. type: keyword -example: /usr/local/Cellar/go/1.12.9/ +example: Google LLC -- -*`package.reference`*:: +*`source.as.organization.name.text`*:: + -- -Home page or reference URL of the software in this package, if available. - -type: keyword - -example: https://golang.org +type: text -- -*`package.size`*:: +*`source.bytes`*:: + -- -Package size in bytes. +Bytes sent from the source to the destination. type: long -example: 62231 +example: 184 -format: string +format: bytes -- -*`package.type`*:: +*`source.domain`*:: + -- -Type of package. -This should contain the package file type, rather than the package manager name. Examples: rpm, dpkg, brew, npm, gem, nupkg, jar. +Source domain. type: keyword -example: rpm - -- -*`package.version`*:: +*`source.geo.city_name`*:: + -- -Package version +City name. type: keyword -example: 1.12.9 +example: Montreal -- -[float] -=== pe - -These fields contain Windows Portable Executable (PE) metadata. - - -*`pe.company`*:: +*`source.geo.continent_name`*:: + -- -Internal company name of the file, provided at compile-time. +Name of the continent. type: keyword -example: Microsoft Corporation +example: North America -- -*`pe.description`*:: +*`source.geo.country_iso_code`*:: + -- -Internal description of the file, provided at compile-time. +Country ISO code. type: keyword -example: Paint +example: CA -- -*`pe.file_version`*:: +*`source.geo.country_name`*:: + -- -Internal version of the file, provided at compile-time. +Country name. type: keyword -example: 6.3.9600.17415 +example: Canada -- -*`pe.original_file_name`*:: +*`source.geo.location`*:: + -- -Internal name of the file, provided at compile-time. +Longitude and latitude. -type: keyword +type: geo_point -example: MSPAINT.EXE +example: { "lon": -73.614830, "lat": 45.505918 } -- -*`pe.product`*:: +*`source.geo.name`*:: + -- -Internal product name of the file, provided at compile-time. +User-defined description of a location, at the level of granularity they care about. +Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. +Not typically used in automated geolocation. type: keyword -example: Microsoft® Windows® Operating System +example: boston-dc -- -[float] -=== process - -These fields contain information about a process. -These fields can help you correlate metrics information with a process id/name from a log message. The `process.pid` often stays in the metric itself and is copied to the global field for correlation. - - -*`process.args`*:: +*`source.geo.region_iso_code`*:: + -- -Array of process arguments, starting with the absolute path to the executable. -May be filtered to protect sensitive information. +Region ISO code. type: keyword -example: ['/usr/bin/ssh', '-l', 'user', '10.0.0.16'] +example: CA-QC -- -*`process.args_count`*:: +*`source.geo.region_name`*:: + -- -Length of the process.args array. -This field can be useful for querying or performing bucket analysis on how many arguments were provided to start a process. More arguments may be an indication of suspicious activity. +Region name. -type: long +type: keyword -example: 4 +example: Quebec -- -*`process.code_signature.exists`*:: +*`source.ip`*:: + -- -Boolean to capture if a signature is present. - -type: boolean +IP address of the source. +Can be one or multiple IPv4 or IPv6 addresses. -example: true +type: ip -- -*`process.code_signature.status`*:: +*`source.mac`*:: + -- -Additional information about the certificate status. -This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. +MAC address of the source. type: keyword -example: ERROR_UNTRUSTED_ROOT - -- -*`process.code_signature.subject_name`*:: +*`source.nat.ip`*:: + -- -Subject name of the code signer - -type: keyword +Translated ip of source based NAT sessions (e.g. internal client to internet) +Typically connections traversing load balancers, firewalls, or routers. -example: Microsoft Corporation +type: ip -- -*`process.code_signature.trusted`*:: +*`source.nat.port`*:: + -- -Stores the trust status of the certificate chain. -Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. +Translated port of source based NAT sessions. (e.g. internal client to internet) +Typically used with load balancers, firewalls, or routers. -type: boolean +type: long -example: true +format: string -- -*`process.code_signature.valid`*:: +*`source.packets`*:: + -- -Boolean to capture if the digital signature is verified against the binary content. -Leave unpopulated if a certificate was unchecked. +Packets sent from the source to the destination. -type: boolean +type: long -example: true +example: 12 -- -*`process.command_line`*:: +*`source.port`*:: + -- -Full command line that started the process, including the absolute path to the executable, and all arguments. -Some arguments may be filtered to protect sensitive information. +Port of the source. -type: keyword +type: long -example: /usr/bin/ssh -l user 10.0.0.16 +format: string -- -*`process.command_line.text`*:: +*`source.registered_domain`*:: + -- -type: text +The highest registered source domain, stripped of the subdomain. +For example, the registered domain for "foo.google.com" is "google.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". + +type: keyword + +example: google.com -- -*`process.entity_id`*:: +*`source.top_level_domain`*:: + -- -Unique identifier for the process. -The implementation of this is specified by the data source, but some examples of what could be used here are a process-generated UUID, Sysmon Process GUIDs, or a hash of some uniquely identifying components of a process. -Constructing a globally unique identifier is a common practice to mitigate PID reuse as well as to identify a specific process over time, across multiple monitored hosts. +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". type: keyword -example: c2c455d9f99375d +example: co.uk -- -*`process.executable`*:: +*`source.user.domain`*:: + -- -Absolute path to the process executable. +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. type: keyword -example: /usr/bin/ssh - -- -*`process.executable.text`*:: +*`source.user.email`*:: + -- -type: text +User email address. + +type: keyword -- -*`process.exit_code`*:: +*`source.user.full_name`*:: + -- -The exit code of the process, if this is a termination event. -The field should be absent if there is no exit code for the event (e.g. process start). +User's full name, if available. -type: long +type: keyword -example: 137 +example: Albert Einstein -- -*`process.hash.md5`*:: +*`source.user.full_name.text`*:: + -- -MD5 hash. - -type: keyword +type: text -- -*`process.hash.sha1`*:: +*`source.user.group.domain`*:: + -- -SHA1 hash. +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. type: keyword -- -*`process.hash.sha256`*:: +*`source.user.group.id`*:: + -- -SHA256 hash. +Unique identifier for the group on the system/platform. type: keyword -- -*`process.hash.sha512`*:: +*`source.user.group.name`*:: + -- -SHA512 hash. +Name of the group. type: keyword -- -*`process.name`*:: +*`source.user.hash`*:: + -- -Process name. -Sometimes called program name or similar. +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. type: keyword -example: ssh - -- -*`process.name.text`*:: +*`source.user.id`*:: + -- -type: text +Unique identifiers of the user. + +type: keyword -- -*`process.parent.args`*:: +*`source.user.name`*:: + -- -Array of process arguments. -May be filtered to protect sensitive information. +Short name or login of the user. type: keyword -example: ['ssh', '-l', 'user', '10.0.0.16'] +example: albert -- -*`process.parent.args_count`*:: +*`source.user.name.text`*:: + -- -Length of the process.args array. -This field can be useful for querying or performing bucket analysis on how many arguments were provided to start a process. More arguments may be an indication of suspicious activity. +type: text -type: long +-- -example: 4 +[float] +=== threat --- +Fields to classify events and alerts according to a threat taxonomy such as the Mitre ATT&CK framework. +These fields are for users to classify alerts from all of their sources (e.g. IDS, NGFW, etc.) within a common taxonomy. The threat.tactic.* are meant to capture the high level category of the threat (e.g. "impact"). The threat.technique.* fields are meant to capture which kind of approach is used by this detected threat, to accomplish the goal (e.g. "endpoint denial of service"). -*`process.parent.code_signature.exists`*:: + +*`threat.framework`*:: + -- -Boolean to capture if a signature is present. +Name of the threat framework used to further categorize and classify the tactic and technique of the reported threat. Framework classification can be provided by detecting systems, evaluated at ingest time, or retrospectively tagged to events. -type: boolean +type: keyword -example: true +example: MITRE ATT&CK -- -*`process.parent.code_signature.status`*:: +*`threat.tactic.id`*:: + -- -Additional information about the certificate status. -This is useful for logging cryptographic errors with the certificate validity or trust status. Leave unpopulated if the validity or trust of the certificate was unchecked. +The id of tactic used by this threat. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/tactics/TA0040/ ) type: keyword -example: ERROR_UNTRUSTED_ROOT +example: TA0040 -- -*`process.parent.code_signature.subject_name`*:: +*`threat.tactic.name`*:: + -- -Subject name of the code signer +Name of the type of tactic used by this threat. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/tactics/TA0040/ ) type: keyword -example: Microsoft Corporation +example: impact -- -*`process.parent.code_signature.trusted`*:: +*`threat.tactic.reference`*:: + -- -Stores the trust status of the certificate chain. -Validating the trust of the certificate chain may be complicated, and this field should only be populated by tools that actively check the status. +The reference url of tactic used by this threat. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/tactics/TA0040/ ) -type: boolean +type: keyword -example: true +example: https://attack.mitre.org/tactics/TA0040/ -- -*`process.parent.code_signature.valid`*:: +*`threat.technique.id`*:: + -- -Boolean to capture if the digital signature is verified against the binary content. -Leave unpopulated if a certificate was unchecked. +The id of technique used by this tactic. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/techniques/T1499/ ) -type: boolean +type: keyword -example: true +example: T1499 -- -*`process.parent.command_line`*:: +*`threat.technique.name`*:: + -- -Full command line that started the process, including the absolute path to the executable, and all arguments. -Some arguments may be filtered to protect sensitive information. +The name of technique used by this tactic. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/techniques/T1499/ ) type: keyword -example: /usr/bin/ssh -l user 10.0.0.16 +example: endpoint denial of service -- -*`process.parent.command_line.text`*:: +*`threat.technique.name.text`*:: + -- type: text -- -*`process.parent.entity_id`*:: +*`threat.technique.reference`*:: + -- -Unique identifier for the process. -The implementation of this is specified by the data source, but some examples of what could be used here are a process-generated UUID, Sysmon Process GUIDs, or a hash of some uniquely identifying components of a process. -Constructing a globally unique identifier is a common practice to mitigate PID reuse as well as to identify a specific process over time, across multiple monitored hosts. +The reference url of technique used by this tactic. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/techniques/T1499/ ) type: keyword -example: c2c455d9f99375d - --- +example: https://attack.mitre.org/techniques/T1499/ -*`process.parent.executable`*:: -+ -- -Absolute path to the process executable. - -type: keyword -example: /usr/bin/ssh - --- +[float] +=== tls -*`process.parent.executable.text`*:: -+ --- -type: text +Fields related to a TLS connection. These fields focus on the TLS protocol itself and intentionally avoids in-depth analysis of the related x.509 certificate files. --- -*`process.parent.exit_code`*:: +*`tls.cipher`*:: + -- -The exit code of the process, if this is a termination event. -The field should be absent if there is no exit code for the event (e.g. process start). +String indicating the cipher used during the current connection. -type: long +type: keyword -example: 137 +example: TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 -- -*`process.parent.hash.md5`*:: +*`tls.client.certificate`*:: + -- -MD5 hash. +PEM-encoded stand-alone certificate offered by the client. This is usually mutually-exclusive of `client.certificate_chain` since this value also exists in that list. type: keyword +example: MII... + -- -*`process.parent.hash.sha1`*:: +*`tls.client.certificate_chain`*:: + -- -SHA1 hash. +Array of PEM-encoded certificates that make up the certificate chain offered by the client. This is usually mutually-exclusive of `client.certificate` since that value should be the first certificate in the chain. type: keyword +example: ['MII...', 'MII...'] + -- -*`process.parent.hash.sha256`*:: +*`tls.client.hash.md5`*:: + -- -SHA256 hash. +Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. type: keyword +example: 0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC + -- -*`process.parent.hash.sha512`*:: +*`tls.client.hash.sha1`*:: + -- -SHA512 hash. +Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. type: keyword +example: 9E393D93138888D288266C2D915214D1D1CCEB2A + -- -*`process.parent.name`*:: +*`tls.client.hash.sha256`*:: + -- -Process name. -Sometimes called program name or similar. +Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. type: keyword -example: ssh - --- - -*`process.parent.name.text`*:: -+ --- -type: text +example: 0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0 -- -*`process.parent.pgid`*:: +*`tls.client.issuer`*:: + -- -Identifier of the group of processes the process belongs to. +Distinguished name of subject of the issuer of the x.509 certificate presented by the client. -type: long +type: keyword -format: string +example: CN=MyDomain Root CA, OU=Infrastructure Team, DC=mydomain, DC=com -- -*`process.parent.pid`*:: +*`tls.client.ja3`*:: + -- -Process id. - -type: long +A hash that identifies clients based on how they perform an SSL/TLS handshake. -example: 4242 +type: keyword -format: string +example: d4e5b18d6b55c71272893221c96ba240 -- -*`process.parent.ppid`*:: +*`tls.client.not_after`*:: + -- -Parent process' pid. - -type: long +Date/Time indicating when client certificate is no longer considered valid. -example: 4241 +type: date -format: string +example: 2021-01-01T00:00:00.000Z -- -*`process.parent.start`*:: +*`tls.client.not_before`*:: + -- -The time the process started. +Date/Time indicating when client certificate is first considered valid. type: date -example: 2016-05-23T08:05:34.853Z +example: 1970-01-01T00:00:00.000Z -- -*`process.parent.thread.id`*:: +*`tls.client.server_name`*:: + -- -Thread ID. - -type: long +Also called an SNI, this tells the server which hostname to which the client is attempting to connect. When this value is available, it should get copied to `destination.domain`. -example: 4242 +type: keyword -format: string +example: www.elastic.co -- -*`process.parent.thread.name`*:: +*`tls.client.subject`*:: + -- -Thread name. +Distinguished name of subject of the x.509 certificate presented by the client. type: keyword -example: thread-0 +example: CN=myclient, OU=Documentation Team, DC=mydomain, DC=com -- -*`process.parent.title`*:: +*`tls.client.supported_ciphers`*:: + -- -Process title. -The proctitle, some times the same as process name. Can also be different: for example a browser setting its title to the web page currently opened. +Array of ciphers offered by the client during the client hello. type: keyword +example: ['TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', '...'] + -- -*`process.parent.title.text`*:: +*`tls.curve`*:: + -- -type: text +String indicating the curve used for the given cipher, when applicable. + +type: keyword + +example: secp256r1 -- -*`process.parent.uptime`*:: +*`tls.established`*:: + -- -Seconds the process has been up. - -type: long +Boolean flag indicating if the TLS negotiation was successful and transitioned to an encrypted tunnel. -example: 1325 +type: boolean -- -*`process.parent.working_directory`*:: +*`tls.next_protocol`*:: + -- -The working directory of the process. +String indicating the protocol being tunneled. Per the values in the IANA registry (https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), this string should be lower case. type: keyword -example: /home/alice +example: http/1.1 -- -*`process.parent.working_directory.text`*:: +*`tls.resumed`*:: + -- -type: text +Boolean flag indicating if this TLS connection was resumed from an existing TLS negotiation. + +type: boolean -- -*`process.pe.company`*:: +*`tls.server.certificate`*:: + -- -Internal company name of the file, provided at compile-time. +PEM-encoded stand-alone certificate offered by the server. This is usually mutually-exclusive of `server.certificate_chain` since this value also exists in that list. type: keyword -example: Microsoft Corporation +example: MII... -- -*`process.pe.description`*:: +*`tls.server.certificate_chain`*:: + -- -Internal description of the file, provided at compile-time. +Array of PEM-encoded certificates that make up the certificate chain offered by the server. This is usually mutually-exclusive of `server.certificate` since that value should be the first certificate in the chain. type: keyword -example: Paint +example: ['MII...', 'MII...'] -- -*`process.pe.file_version`*:: +*`tls.server.hash.md5`*:: + -- -Internal version of the file, provided at compile-time. +Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. type: keyword -example: 6.3.9600.17415 +example: 0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC -- -*`process.pe.original_file_name`*:: +*`tls.server.hash.sha1`*:: + -- -Internal name of the file, provided at compile-time. +Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. type: keyword -example: MSPAINT.EXE +example: 9E393D93138888D288266C2D915214D1D1CCEB2A -- -*`process.pe.product`*:: +*`tls.server.hash.sha256`*:: + -- -Internal product name of the file, provided at compile-time. +Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. type: keyword -example: Microsoft® Windows® Operating System +example: 0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0 -- -*`process.pgid`*:: +*`tls.server.issuer`*:: + -- -Identifier of the group of processes the process belongs to. +Subject of the issuer of the x.509 certificate presented by the server. -type: long +type: keyword -format: string +example: CN=MyDomain Root CA, OU=Infrastructure Team, DC=mydomain, DC=com -- -*`process.pid`*:: +*`tls.server.ja3s`*:: + -- -Process id. - -type: long +A hash that identifies servers based on how they perform an SSL/TLS handshake. -example: 4242 +type: keyword -format: string +example: 394441ab65754e2207b1e1b457b3641d -- -*`process.ppid`*:: +*`tls.server.not_after`*:: + -- -Parent process' pid. - -type: long +Timestamp indicating when server certificate is no longer considered valid. -example: 4241 +type: date -format: string +example: 2021-01-01T00:00:00.000Z -- -*`process.start`*:: +*`tls.server.not_before`*:: + -- -The time the process started. +Timestamp indicating when server certificate is first considered valid. type: date -example: 2016-05-23T08:05:34.853Z +example: 1970-01-01T00:00:00.000Z -- -*`process.thread.id`*:: +*`tls.server.subject`*:: + -- -Thread ID. - -type: long +Subject of the x.509 certificate presented by the server. -example: 4242 +type: keyword -format: string +example: CN=www.mydomain.com, OU=Infrastructure Team, DC=mydomain, DC=com -- -*`process.thread.name`*:: +*`tls.version`*:: + -- -Thread name. +Numeric part of the version parsed from the original string. type: keyword -example: thread-0 +example: 1.2 -- -*`process.title`*:: +*`tls.version_protocol`*:: + -- -Process title. -The proctitle, some times the same as process name. Can also be different: for example a browser setting its title to the web page currently opened. +Normalized lowercase protocol name parsed from original string. type: keyword --- - -*`process.title.text`*:: -+ --- -type: text - --- +example: tls -*`process.uptime`*:: -+ -- -Seconds the process has been up. -type: long +[float] +=== tracing -example: 1325 +Distributed tracing makes it possible to analyze performance throughout a microservice architecture all in one view. This is accomplished by tracing all of the requests - from the initial web request in the front-end service - to queries made through multiple back-end services. --- -*`process.working_directory`*:: +*`tracing.trace.id`*:: + -- -The working directory of the process. +Unique identifier of the trace. +A trace groups multiple events like transactions that belong together. For example, a user request handled by multiple inter-connected services. type: keyword -example: /home/alice +example: 4bf92f3577b34da6a3ce929d0e0e4736 -- -*`process.working_directory.text`*:: +*`tracing.transaction.id`*:: + -- -type: text +Unique identifier of the transaction. +A transaction is the highest level of work measured within a service, such as a request to a server. + +type: keyword + +example: 00f067aa0ba902b7 -- [float] -=== registry +=== url -Fields related to Windows Registry operations. +URL fields provide support for complete or partial URLs, and supports the breaking down into scheme, domain, path, and so on. -*`registry.data.bytes`*:: +*`url.domain`*:: + -- -Original bytes written with base64 encoding. -For Windows registry operations, such as SetValueEx and RegQueryValueEx, this corresponds to the data pointed by `lp_data`. This is optional but provides better recoverability and should be populated for REG_BINARY encoded values. +Domain of the url, such as "www.elastic.co". +In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the `domain` field. type: keyword -example: ZQBuAC0AVQBTAAAAZQBuAAAAAAA= +example: www.elastic.co -- -*`registry.data.strings`*:: +*`url.extension`*:: + -- -Content when writing string types. -Populated as an array when writing string data to the registry. For single string registry types (REG_SZ, REG_EXPAND_SZ), this should be an array with one string. For sequences of string with REG_MULTI_SZ, this array will be variable length. For numeric data, such as REG_DWORD and REG_QWORD, this should be populated with the decimal representation (e.g `"1"`). +The field contains the file extension from the original request url. +The file extension is only set if it exists, as not every url has a file extension. +The leading period must not be included. For example, the value must be "png", not ".png". type: keyword -example: ["C:\rta\red_ttp\bin\myapp.exe"] +example: png -- -*`registry.data.type`*:: +*`url.fragment`*:: + -- -Standard registry type for encoding contents +Portion of the url after the `#`, such as "top". +The `#` is not part of the fragment. type: keyword -example: REG_SZ - -- -*`registry.hive`*:: +*`url.full`*:: + -- -Abbreviated name for the hive. +If full URLs are important to your use case, they should be stored in `url.full`, whether this field is reconstructed or present in the event source. type: keyword -example: HKLM +example: https://www.elastic.co:443/search?q=elasticsearch#top -- -*`registry.key`*:: +*`url.full.text`*:: + -- -Hive-relative path of keys. - -type: keyword - -example: SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe +type: text -- -*`registry.path`*:: +*`url.original`*:: + -- -Full path, including hive, key and value +Unmodified original url as seen in the event source. +Note that in network monitoring, the observed URL may be a full URL, whereas in access logs, the URL is often just represented as a path. +This field is meant to represent the URL as it was observed, complete or not. type: keyword -example: HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Image File Execution Options\winword.exe\Debugger +example: https://www.elastic.co:443/search?q=elasticsearch#top or /search?q=elasticsearch -- -*`registry.value`*:: +*`url.original.text`*:: + -- -Name of the value written. - -type: keyword - -example: Debugger +type: text -- -[float] -=== related - -This field set is meant to facilitate pivoting around a piece of data. -Some pieces of information can be seen in many places in an ECS event. To facilitate searching for them, store an array of all seen values to their corresponding field in `related.`. -A concrete example is IP addresses, which can be under host, observer, source, destination, client, server, and network.forwarded_ip. If you append all IPs to `related.ip`, you can then search for a given IP trivially, no matter where it appeared, by querying `related.ip:192.0.2.15`. - - -*`related.hash`*:: +*`url.password`*:: + -- -All the hashes seen on your event. Populating this field, then using it to search for hashes can help in situations where you're unsure what the hash algorithm is (and therefore which key name to search). +Password of the request. type: keyword -- -*`related.ip`*:: +*`url.path`*:: + -- -All of the IPs seen on your event. +Path of the request, such as "/search". -type: ip +type: keyword -- -*`related.user`*:: +*`url.port`*:: + -- -All the user names seen on your event. - -type: keyword +Port of the request, such as 443. --- +type: long -[float] -=== rule +example: 443 -Rule fields are used to capture the specifics of any observer or agent rules that generate alerts or other notable events. -Examples of data sources that would populate the rule fields include: network admission control platforms, network or host IDS/IPS, network firewalls, web application firewalls, url filters, endpoint detection and response (EDR) systems, etc. +format: string +-- -*`rule.author`*:: +*`url.query`*:: + -- -Name, organization, or pseudonym of the author or authors who created the rule used to generate this event. +The query field describes the query string of the request, such as "q=elasticsearch". +The `?` is excluded from the query string. If a URL contains no `?`, there is no query field. If there is a `?` but no query, the query field exists with an empty string. The `exists` query can be used to differentiate between the two cases. type: keyword -example: ['Star-Lord'] - -- -*`rule.category`*:: +*`url.registered_domain`*:: + -- -A categorization value keyword used by the entity using the rule for detection of this event. +The highest registered url domain, stripped of the subdomain. +For example, the registered domain for "foo.google.com" is "google.com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". type: keyword -example: Attempted Information Leak +example: google.com -- -*`rule.description`*:: +*`url.scheme`*:: + -- -The description of the rule generating the event. +Scheme of the request, such as "https". +Note: The `:` is not part of the scheme. type: keyword -example: Block requests to public DNS over HTTPS / TLS protocols +example: https -- -*`rule.id`*:: +*`url.top_level_domain`*:: + -- -A rule ID that is unique within the scope of an agent, observer, or other entity using the rule for detection of this event. +The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". +This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". type: keyword -example: 101 +example: co.uk -- -*`rule.license`*:: +*`url.username`*:: + -- -Name of the license under which the rule used to generate this event is made available. +Username of the request. type: keyword -example: Apache 2.0 - --- - -*`rule.name`*:: -+ -- -The name of the rule or signature generating the event. -type: keyword +[float] +=== user -example: BLOCK_DNS_over_TLS +The user fields describe information about the user that is relevant to the event. +Fields can have one entry or multiple entries. If a user has more than one id, provide an array that includes all of them. --- -*`rule.reference`*:: +*`user.domain`*:: + -- -Reference URL to additional information about the rule used to generate this event. -The URL can point to the vendor's documentation about the rule. If that's not available, it can also be a link to a more general page describing this type of alert. +Name of the directory the user is a member of. +For example, an LDAP or Active Directory domain name. type: keyword -example: https://en.wikipedia.org/wiki/DNS_over_TLS - -- -*`rule.ruleset`*:: +*`user.email`*:: + -- -Name of the ruleset, policy, group, or parent category in which the rule used to generate this event is a member. +User email address. type: keyword -example: Standard_Protocol_Filters - -- -*`rule.uuid`*:: +*`user.full_name`*:: + -- -A rule ID that is unique within the scope of a set or group of agents, observers, or other entities using the rule for detection of this event. +User's full name, if available. type: keyword -example: 1100110011 +example: Albert Einstein -- -*`rule.version`*:: +*`user.full_name.text`*:: + -- -The version / revision of the rule being used for analysis. - -type: keyword - -example: 1.1 +type: text -- -[float] -=== server - -A Server is defined as the responder in a network connection for events regarding sessions, connections, or bidirectional flow records. -For TCP events, the server is the receiver of the initial SYN packet(s) of the TCP connection. For other protocols, the server is generally the responder in the network transaction. Some systems actually use the term "responder" to refer the server in TCP connections. The server fields describe details about the system acting as the server in the network event. Server fields are usually populated in conjunction with client fields. Server fields are generally not populated for packet-level events. -Client / server representations can add semantic context to an exchange, which is helpful to visualize the data in certain situations. If your context falls in that category, you should still ensure that source and destination are filled appropriately. - - -*`server.address`*:: +*`user.group.domain`*:: + -- -Some event server addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. -Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. +Name of the directory the group is a member of. +For example, an LDAP or Active Directory domain name. type: keyword -- -*`server.as.number`*:: +*`user.group.id`*:: + -- -Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. - -type: long +Unique identifier for the group on the system/platform. -example: 15169 +type: keyword -- -*`server.as.organization.name`*:: +*`user.group.name`*:: + -- -Organization name. +Name of the group. type: keyword -example: Google LLC - -- -*`server.as.organization.name.text`*:: +*`user.hash`*:: + -- -type: text +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. + +type: keyword -- -*`server.bytes`*:: +*`user.id`*:: + -- -Bytes sent from the server to the client. - -type: long - -example: 184 +Unique identifiers of the user. -format: bytes +type: keyword -- -*`server.domain`*:: +*`user.name`*:: + -- -Server domain. +Short name or login of the user. type: keyword +example: albert + -- -*`server.geo.city_name`*:: +*`user.name.text`*:: + -- -City name. +type: text -type: keyword +-- -example: Montreal +[float] +=== user_agent --- +The user_agent fields normally come from a browser request. +They often show up in web service logs coming from the parsed user agent string. -*`server.geo.continent_name`*:: + +*`user_agent.device.name`*:: + -- -Name of the continent. +Name of the device. type: keyword -example: North America +example: iPhone -- -*`server.geo.country_iso_code`*:: +*`user_agent.name`*:: + -- -Country ISO code. +Name of the user agent. type: keyword -example: CA +example: Safari -- -*`server.geo.country_name`*:: +*`user_agent.original`*:: + -- -Country name. +Unparsed user_agent string. type: keyword -example: Canada +example: Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 -- -*`server.geo.location`*:: +*`user_agent.original.text`*:: + -- -Longitude and latitude. - -type: geo_point - -example: { "lon": -73.614830, "lat": 45.505918 } +type: text -- -*`server.geo.name`*:: +*`user_agent.os.family`*:: + -- -User-defined description of a location, at the level of granularity they care about. -Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. -Not typically used in automated geolocation. +OS family (such as redhat, debian, freebsd, windows). type: keyword -example: boston-dc +example: debian -- -*`server.geo.region_iso_code`*:: +*`user_agent.os.full`*:: + -- -Region ISO code. +Operating system name, including the version or code name. type: keyword -example: CA-QC +example: Mac OS Mojave -- -*`server.geo.region_name`*:: +*`user_agent.os.full.text`*:: + -- -Region name. - -type: keyword - -example: Quebec +type: text -- -*`server.ip`*:: +*`user_agent.os.kernel`*:: + -- -IP address of the server. -Can be one or multiple IPv4 or IPv6 addresses. +Operating system kernel version as a raw string. -type: ip +type: keyword + +example: 4.4.0-112-generic -- -*`server.mac`*:: +*`user_agent.os.name`*:: + -- -MAC address of the server. +Operating system name, without the version. type: keyword +example: Mac OS X + -- -*`server.nat.ip`*:: +*`user_agent.os.name.text`*:: + -- -Translated ip of destination based NAT sessions (e.g. internet to private DMZ) -Typically used with load balancers, firewalls, or routers. - -type: ip +type: text -- -*`server.nat.port`*:: +*`user_agent.os.platform`*:: + -- -Translated port of destination based NAT sessions (e.g. internet to private DMZ) -Typically used with load balancers, firewalls, or routers. +Operating system platform (such centos, ubuntu, windows). -type: long +type: keyword -format: string +example: darwin -- -*`server.packets`*:: +*`user_agent.os.version`*:: + -- -Packets sent from the server to the client. +Operating system version as a raw string. -type: long +type: keyword -example: 12 +example: 10.14.1 -- -*`server.port`*:: +*`user_agent.version`*:: + -- -Port of the server. +Version of the user agent. -type: long +type: keyword -format: string +example: 12.0 -- -*`server.registered_domain`*:: +[float] +=== vlan + +The VLAN fields are used to identify 802.1q tag(s) of a packet, as well as ingress and egress VLAN associations of an observer in relation to a specific packet or connection. +Network.vlan fields are used to record a single VLAN tag, or the outer tag in the case of q-in-q encapsulations, for a packet or connection as observed, typically provided by a network sensor (e.g. Zeek, Wireshark) passively reporting on traffic. +Network.inner VLAN fields are used to report inner q-in-q 802.1q tags (multiple 802.1q encapsulations) as observed, typically provided by a network sensor (e.g. Zeek, Wireshark) passively reporting on traffic. Network.inner VLAN fields should only be used in addition to network.vlan fields to indicate q-in-q tagging. +Observer.ingress and observer.egress VLAN values are used to record observer specific information when observer events contain discrete ingress and egress VLAN information, typically provided by firewalls, routers, or load balancers. + + +*`vlan.id`*:: + -- -The highest registered server domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". +VLAN ID as reported by the observer. type: keyword -example: google.com +example: 10 -- -*`server.top_level_domain`*:: +*`vlan.name`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". +Optional VLAN name as reported by the observer. type: keyword -example: co.uk +example: outside -- -*`server.user.domain`*:: +[float] +=== vulnerability + +The vulnerability fields describe information about a vulnerability that is relevant to an event. + + +*`vulnerability.category`*:: + -- -Name of the directory the user is a member of. -For example, an LDAP or Active Directory domain name. +The type of system or architecture that the vulnerability affects. These may be platform-specific (for example, Debian or SUSE) or general (for example, Database or Firewall). For example (https://qualysguard.qualys.com/qwebhelp/fo_portal/knowledgebase/vulnerability_categories.htm[Qualys vulnerability categories]) +This field must be an array. type: keyword +example: ["Firewall"] + -- -*`server.user.email`*:: +*`vulnerability.classification`*:: + -- -User email address. +The classification of the vulnerability scoring system. For example (https://www.first.org/cvss/) type: keyword +example: CVSS + -- -*`server.user.full_name`*:: +*`vulnerability.description`*:: + -- -User's full name, if available. +The description of the vulnerability that provides additional context of the vulnerability. For example (https://cve.mitre.org/about/faqs.html#cve_entry_descriptions_created[Common Vulnerabilities and Exposure CVE description]) type: keyword -example: Albert Einstein +example: In macOS before 2.12.6, there is a vulnerability in the RPC... -- -*`server.user.full_name.text`*:: +*`vulnerability.description.text`*:: + -- type: text -- -*`server.user.group.domain`*:: +*`vulnerability.enumeration`*:: + -- -Name of the directory the group is a member of. -For example, an LDAP or Active Directory domain name. +The type of identifier used for this vulnerability. For example (https://cve.mitre.org/about/) type: keyword +example: CVE + -- -*`server.user.group.id`*:: +*`vulnerability.id`*:: + -- -Unique identifier for the group on the system/platform. +The identification (ID) is the number portion of a vulnerability entry. It includes a unique identification number for the vulnerability. For example (https://cve.mitre.org/about/faqs.html#what_is_cve_id)[Common Vulnerabilities and Exposure CVE ID] type: keyword +example: CVE-2019-00001 + -- -*`server.user.group.name`*:: +*`vulnerability.reference`*:: + -- -Name of the group. +A resource that provides additional information, context, and mitigations for the identified vulnerability. type: keyword +example: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6111 + -- -*`server.user.hash`*:: +*`vulnerability.report_id`*:: + -- -Unique user hash to correlate information for a user in anonymized form. -Useful if `user.id` or `user.name` contain confidential information and cannot be used. +The report or scan identification number. type: keyword +example: 20191018.0001 + -- -*`server.user.id`*:: +*`vulnerability.scanner.vendor`*:: + -- -Unique identifiers of the user. +The name of the vulnerability scanner vendor. type: keyword +example: Tenable + -- -*`server.user.name`*:: +*`vulnerability.score.base`*:: + -- -Short name or login of the user. +Scores can range from 0.0 to 10.0, with 10.0 being the most severe. +Base scores cover an assessment for exploitability metrics (attack vector, complexity, privileges, and user interaction), impact metrics (confidentiality, integrity, and availability), and scope. For example (https://www.first.org/cvss/specification-document) -type: keyword +type: float -example: albert +example: 5.5 -- -*`server.user.name.text`*:: +*`vulnerability.score.environmental`*:: + -- -type: text +Scores can range from 0.0 to 10.0, with 10.0 being the most severe. +Environmental scores cover an assessment for any modified Base metrics, confidentiality, integrity, and availability requirements. For example (https://www.first.org/cvss/specification-document) + +type: float + +example: 5.5 -- -[float] -=== service +*`vulnerability.score.temporal`*:: ++ +-- +Scores can range from 0.0 to 10.0, with 10.0 being the most severe. +Temporal scores cover an assessment for code maturity, remediation level, and confidence. For example (https://www.first.org/cvss/specification-document) -The service fields describe the service for or from which the data was collected. -These fields help you find and correlate logs for a specific service and version. +type: float +-- -*`service.ephemeral_id`*:: +*`vulnerability.score.version`*:: + -- -Ephemeral identifier of this service (if one exists). -This id normally changes across restarts, but `service.id` does not. +The National Vulnerability Database (NVD) provides qualitative severity rankings of "Low", "Medium", and "High" for CVSS v2.0 base score ranges in addition to the severity ratings for CVSS v3.0 as they are defined in the CVSS v3.0 specification. +CVSS is owned and managed by FIRST.Org, Inc. (FIRST), a US-based non-profit organization, whose mission is to help computer security incident response teams across the world. For example (https://nvd.nist.gov/vuln-metrics/cvss) type: keyword -example: 8a4f500f +example: 2.0 -- -*`service.id`*:: +*`vulnerability.severity`*:: + -- -Unique identifier of the running service. If the service is comprised of many nodes, the `service.id` should be the same for all nodes. -This id should uniquely identify the service. This makes it possible to correlate logs and metrics for one specific service, no matter which particular node emitted the event. -Note that if you need to see the events from one specific host of the service, you should filter on that `host.name` or `host.id` instead. +The severity of the vulnerability can help with metrics and internal prioritization regarding remediation. For example (https://nvd.nist.gov/vuln-metrics/cvss) type: keyword -example: d37e5ebfe0ae6c4972dbe9f0174a1637bb8247f6 +example: Critical -- -*`service.name`*:: +[[exported-fields-elasticsearch]] +== Elasticsearch fields + +Elasticsearch module + + + +[float] +=== elasticsearch + + + + +*`elasticsearch.cluster.name`*:: + -- -Name of the service data is collected from. -The name of the service is normally user given. This allows for distributed services that run on multiple hosts to correlate the related instances based on the name. -In the case of Elasticsearch the `service.name` could contain the cluster name. For Beats the `service.name` is by default a copy of the `service.type` field if no name is specified. +Elasticsearch cluster name. -type: keyword -example: elasticsearch-metrics +type: keyword -- -*`service.node.name`*:: +*`elasticsearch.cluster.id`*:: + -- -Name of a service node. -This allows for two nodes of the same service running on the same host to be differentiated. Therefore, `service.node.name` should typically be unique across nodes of a given service. -In the case of Elasticsearch, the `service.node.name` could contain the unique node name within the Elasticsearch cluster. In cases where the service doesn't have the concept of a node name, the host name or container name can be used to distinguish running instances that make up this service. If those do not provide uniqueness (e.g. multiple instances of the service running on the same host) - the node name can be manually set. +Elasticsearch cluster id. -type: keyword -example: instance-0000000016 +type: keyword -- -*`service.state`*:: +*`elasticsearch.cluster.state.id`*:: + -- -Current state of the service. +Elasticsearch state id. + type: keyword -- -*`service.type`*:: +*`elasticsearch.node.id`*:: + -- -The type of the service data is collected from. -The type can be used to group and correlate logs and metrics from one service type. -Example: If logs or metrics are collected from Elasticsearch, `service.type` would be `elasticsearch`. +Node ID -type: keyword -example: elasticsearch +type: keyword -- -*`service.version`*:: +*`elasticsearch.node.name`*:: + -- -Version of the service the data was collected from. -This allows to look at a data set only for a specific version of a service. +Node name. -type: keyword -example: 3.2.4 +type: keyword -- [float] -=== source +=== ccr -Source fields describe details about the source of a packet/event. -Source fields are usually populated in conjunction with destination fields. +Cross-cluster replication stats -*`source.address`*:: + + +*`elasticsearch.ccr.leader.index`*:: + -- -Some event source addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. -Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. +Name of leader index + type: keyword -- -*`source.as.number`*:: +*`elasticsearch.ccr.leader.max_seq_no`*:: + -- -Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. +Maximum sequence number of operation on the leader shard -type: long -example: 15169 +type: long -- -*`source.as.organization.name`*:: + +*`elasticsearch.ccr.follower.index`*:: + -- -Organization name. +Name of follower index -type: keyword -example: Google LLC +type: keyword -- -*`source.as.organization.name.text`*:: +*`elasticsearch.ccr.follower.shard.number`*:: + -- -type: text +Number of the shard within the index + + +type: long -- -*`source.bytes`*:: +*`elasticsearch.ccr.follower.operations_written`*:: + -- -Bytes sent from the source to the destination. - -type: long +Number of operations indexed (replicated) into the follower shard from the leader shard -example: 184 -format: bytes +type: long -- -*`source.domain`*:: +*`elasticsearch.ccr.follower.time_since_last_read.ms`*:: + -- -Source domain. +Time, in ms, since the follower last fetched from the leader -type: keyword + +type: long -- -*`source.geo.city_name`*:: +*`elasticsearch.ccr.follower.global_checkpoint`*:: + -- -City name. +Global checkpoint value on follower shard -type: keyword -example: Montreal +type: long -- -*`source.geo.continent_name`*:: -+ --- -Name of the continent. +[float] +=== cluster.stats -type: keyword +Cluster stats -example: North America --- -*`source.geo.country_iso_code`*:: +*`elasticsearch.cluster.stats.status`*:: + -- -Country ISO code. +Cluster status (green, yellow, red). -type: keyword -example: CA +type: keyword -- -*`source.geo.country_name`*:: -+ --- -Country name. +[float] +=== nodes -type: keyword +Nodes statistics. -example: Canada --- -*`source.geo.location`*:: +*`elasticsearch.cluster.stats.nodes.count`*:: + -- -Longitude and latitude. +Total number of nodes in cluster. -type: geo_point -example: { "lon": -73.614830, "lat": 45.505918 } +type: long -- -*`source.geo.name`*:: +*`elasticsearch.cluster.stats.nodes.master`*:: + -- -User-defined description of a location, at the level of granularity they care about. -Could be the name of their data centers, the floor number, if this describes a local physical entity, city names. -Not typically used in automated geolocation. +Number of master-eligible nodes in cluster. -type: keyword -example: boston-dc +type: long -- -*`source.geo.region_iso_code`*:: +*`elasticsearch.cluster.stats.nodes.data`*:: + -- -Region ISO code. +Number of data nodes in cluster. -type: keyword -example: CA-QC +type: long -- -*`source.geo.region_name`*:: -+ --- -Region name. +[float] +=== indices -type: keyword +Indices statistics. -example: Quebec --- -*`source.ip`*:: +*`elasticsearch.cluster.stats.indices.count`*:: + -- -IP address of the source. -Can be one or multiple IPv4 or IPv6 addresses. +Total number of indices in cluster. -type: ip --- +type: long -*`source.mac`*:: -+ -- -MAC address of the source. -type: keyword +[float] +=== shards --- +Shard statistics. -*`source.nat.ip`*:: + + +*`elasticsearch.cluster.stats.indices.shards.count`*:: + -- -Translated ip of source based NAT sessions (e.g. internal client to internet) -Typically connections traversing load balancers, firewalls, or routers. +Total number of shards in cluster. -type: ip + +type: long -- -*`source.nat.port`*:: +*`elasticsearch.cluster.stats.indices.shards.primaries`*:: + -- -Translated port of source based NAT sessions. (e.g. internal client to internet) -Typically used with load balancers, firewalls, or routers. +Total number of primary shards in cluster. -type: long -format: string +type: long -- -*`source.packets`*:: +*`elasticsearch.cluster.stats.indices.fielddata.memory.bytes`*:: + -- -Packets sent from the source to the destination. +Memory used for fielddata. -type: long -example: 12 +type: long -- -*`source.port`*:: -+ --- -Port of the source. +[float] +=== enrich -type: long +Enrich stats -format: string --- -*`source.registered_domain`*:: +*`elasticsearch.enrich.queue.size`*:: + -- -The highest registered source domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". +Number of search requests in the queue. -type: keyword -example: google.com +type: long -- -*`source.top_level_domain`*:: + +*`elasticsearch.enrich.remote_requests.current`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". +Current number of outstanding remote requests. -type: keyword -example: co.uk +type: long -- -*`source.user.domain`*:: +*`elasticsearch.enrich.remote_requests.total`*:: + -- -Name of the directory the user is a member of. -For example, an LDAP or Active Directory domain name. +Number of outstanding remote requests executed since node startup. -type: keyword + +type: long -- -*`source.user.email`*:: +*`elasticsearch.enrich.executed_searches.total`*:: + -- -User email address. +Number of search requests that enrich processors have executed since node startup. -type: keyword --- +type: long -*`source.user.full_name`*:: -+ -- -User's full name, if available. -type: keyword +[float] +=== index -example: Albert Einstein +index --- -*`source.user.full_name.text`*:: + +*`elasticsearch.index.name`*:: + -- -type: text +Index name. + + +type: keyword -- -*`source.user.group.domain`*:: + +*`elasticsearch.index.total.docs.count`*:: + -- -Name of the directory the group is a member of. -For example, an LDAP or Active Directory domain name. +Total number of documents in the index. -type: keyword + +type: long -- -*`source.user.group.id`*:: +*`elasticsearch.index.total.docs.deleted`*:: + -- -Unique identifier for the group on the system/platform. +Total number of deleted documents in the index. -type: keyword + +type: long -- -*`source.user.group.name`*:: +*`elasticsearch.index.total.store.size.bytes`*:: + -- -Name of the group. +Total size of the index in bytes. -type: keyword + +type: long + +format: bytes -- -*`source.user.hash`*:: +*`elasticsearch.index.total.segments.count`*:: + -- -Unique user hash to correlate information for a user in anonymized form. -Useful if `user.id` or `user.name` contain confidential information and cannot be used. +Total number of index segments. -type: keyword + +type: long -- -*`source.user.id`*:: +*`elasticsearch.index.total.segments.memory.bytes`*:: + -- -Unique identifiers of the user. +Total number of memory used by the segments in bytes. -type: keyword + +type: long + +format: bytes -- -*`source.user.name`*:: +[float] +=== index.recovery + +index + + + +*`elasticsearch.index.recovery.id`*:: + -- -Short name or login of the user. +Shard recovery id. -type: keyword -example: albert +type: long -- -*`source.user.name.text`*:: +*`elasticsearch.index.recovery.type`*:: + -- -type: text - --- +Shard recovery type. -[float] -=== threat -Fields to classify events and alerts according to a threat taxonomy such as the Mitre ATT&CK framework. -These fields are for users to classify alerts from all of their sources (e.g. IDS, NGFW, etc.) within a common taxonomy. The threat.tactic.* are meant to capture the high level category of the threat (e.g. "impact"). The threat.technique.* fields are meant to capture which kind of approach is used by this detected threat, to accomplish the goal (e.g. "endpoint denial of service"). +type: keyword +-- -*`threat.framework`*:: +*`elasticsearch.index.recovery.primary`*:: + -- -Name of the threat framework used to further categorize and classify the tactic and technique of the reported threat. Framework classification can be provided by detecting systems, evaluated at ingest time, or retrospectively tagged to events. +True if primary shard. -type: keyword -example: MITRE ATT&CK +type: boolean -- -*`threat.tactic.id`*:: +*`elasticsearch.index.recovery.stage`*:: + -- -The id of tactic used by this threat. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/tactics/TA0040/ ) +Recovery stage. -type: keyword -example: TA0040 +type: keyword -- -*`threat.tactic.name`*:: +*`elasticsearch.index.recovery.target.id`*:: + -- -Name of the type of tactic used by this threat. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/tactics/TA0040/ ) +Target node id. -type: keyword -example: impact +type: keyword -- -*`threat.tactic.reference`*:: +*`elasticsearch.index.recovery.target.host`*:: + -- -The reference url of tactic used by this threat. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/tactics/TA0040/ ) +Target node host address (could be IP address or hostname). -type: keyword -example: https://attack.mitre.org/tactics/TA0040/ +type: keyword -- -*`threat.technique.id`*:: +*`elasticsearch.index.recovery.target.name`*:: + -- -The id of technique used by this tactic. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/techniques/T1499/ ) +Target node name. -type: keyword -example: T1499 +type: keyword -- -*`threat.technique.name`*:: +*`elasticsearch.index.recovery.source.id`*:: + -- -The name of technique used by this tactic. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/techniques/T1499/ ) +Source node id. -type: keyword -example: endpoint denial of service +type: keyword -- -*`threat.technique.name.text`*:: +*`elasticsearch.index.recovery.source.host`*:: + -- -type: text +Source node host address (could be IP address or hostname). + + +type: keyword -- -*`threat.technique.reference`*:: +*`elasticsearch.index.recovery.source.name`*:: + -- -The reference url of technique used by this tactic. You can use the Mitre ATT&CK Matrix Tactic categorization, for example. (ex. https://attack.mitre.org/techniques/T1499/ ) +Source node name. -type: keyword -example: https://attack.mitre.org/techniques/T1499/ +type: keyword -- [float] -=== tls +=== index.summary -Fields related to a TLS connection. These fields focus on the TLS protocol itself and intentionally avoids in-depth analysis of the related x.509 certificate files. +index -*`tls.cipher`*:: + + +*`elasticsearch.index.summary.primaries.docs.count`*:: + -- -String indicating the cipher used during the current connection. +Total number of documents in the index. -type: keyword -example: TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +type: long -- -*`tls.client.certificate`*:: +*`elasticsearch.index.summary.primaries.docs.deleted`*:: + -- -PEM-encoded stand-alone certificate offered by the client. This is usually mutually-exclusive of `client.certificate_chain` since this value also exists in that list. +Total number of deleted documents in the index. -type: keyword -example: MII... +type: long -- -*`tls.client.certificate_chain`*:: +*`elasticsearch.index.summary.primaries.store.size.bytes`*:: + -- -Array of PEM-encoded certificates that make up the certificate chain offered by the client. This is usually mutually-exclusive of `client.certificate` since that value should be the first certificate in the chain. +Total size of the index in bytes. -type: keyword -example: ['MII...', 'MII...'] +type: long + +format: bytes -- -*`tls.client.hash.md5`*:: +*`elasticsearch.index.summary.primaries.segments.count`*:: + -- -Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. +Total number of index segments. -type: keyword -example: 0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC +type: long -- -*`tls.client.hash.sha1`*:: +*`elasticsearch.index.summary.primaries.segments.memory.bytes`*:: + -- -Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. +Total number of memory used by the segments in bytes. -type: keyword -example: 9E393D93138888D288266C2D915214D1D1CCEB2A +type: long + +format: bytes -- -*`tls.client.hash.sha256`*:: + +*`elasticsearch.index.summary.total.docs.count`*:: + -- -Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. +Total number of documents in the index. -type: keyword -example: 0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0 +type: long -- -*`tls.client.issuer`*:: +*`elasticsearch.index.summary.total.docs.deleted`*:: + -- -Distinguished name of subject of the issuer of the x.509 certificate presented by the client. +Total number of deleted documents in the index. -type: keyword -example: CN=MyDomain Root CA, OU=Infrastructure Team, DC=mydomain, DC=com +type: long -- -*`tls.client.ja3`*:: +*`elasticsearch.index.summary.total.store.size.bytes`*:: + -- -A hash that identifies clients based on how they perform an SSL/TLS handshake. +Total size of the index in bytes. -type: keyword -example: d4e5b18d6b55c71272893221c96ba240 +type: long + +format: bytes -- -*`tls.client.not_after`*:: +*`elasticsearch.index.summary.total.segments.count`*:: + -- -Date/Time indicating when client certificate is no longer considered valid. +Total number of index segments. -type: date -example: 2021-01-01T00:00:00.000Z +type: long -- -*`tls.client.not_before`*:: +*`elasticsearch.index.summary.total.segments.memory.bytes`*:: + -- -Date/Time indicating when client certificate is first considered valid. +Total number of memory used by the segments in bytes. -type: date -example: 1970-01-01T00:00:00.000Z +type: long --- +format: bytes -*`tls.client.server_name`*:: -+ -- -Also called an SNI, this tells the server which hostname to which the client is attempting to connect. When this value is available, it should get copied to `destination.domain`. -type: keyword +[float] +=== ml.job -example: www.elastic.co +ml --- -*`tls.client.subject`*:: + +*`elasticsearch.ml.job.id`*:: + -- -Distinguished name of subject of the x.509 certificate presented by the client. +Unique ml job id. -type: keyword -example: CN=myclient, OU=Documentation Team, DC=mydomain, DC=com +type: keyword -- -*`tls.client.supported_ciphers`*:: +*`elasticsearch.ml.job.state`*:: + -- -Array of ciphers offered by the client during the client hello. +Job state. -type: keyword -example: ['TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', '...'] +type: keyword -- -*`tls.curve`*:: +*`elasticsearch.ml.job.data_counts.processed_record_count`*:: + -- -String indicating the curve used for the given cipher, when applicable. +Processed data events. -type: keyword -example: secp256r1 +type: long -- -*`tls.established`*:: +*`elasticsearch.ml.job.data_counts.invalid_date_count`*:: + -- -Boolean flag indicating if the TLS negotiation was successful and transitioned to an encrypted tunnel. +The number of records with either a missing date field or a date that could not be parsed. -type: boolean --- +type: long -*`tls.next_protocol`*:: -+ -- -String indicating the protocol being tunneled. Per the values in the IANA registry (https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), this string should be lower case. -type: keyword +[float] +=== node -example: http/1.1 +node --- -*`tls.resumed`*:: + +*`elasticsearch.node.version`*:: + -- -Boolean flag indicating if this TLS connection was resumed from an existing TLS negotiation. +Node version. -type: boolean --- +type: keyword -*`tls.server.certificate`*:: -+ -- -PEM-encoded stand-alone certificate offered by the server. This is usually mutually-exclusive of `server.certificate_chain` since this value also exists in that list. -type: keyword +[float] +=== jvm -example: MII... +JVM Info. --- -*`tls.server.certificate_chain`*:: + +*`elasticsearch.node.jvm.version`*:: + -- -Array of PEM-encoded certificates that make up the certificate chain offered by the server. This is usually mutually-exclusive of `server.certificate` since that value should be the first certificate in the chain. +JVM version. -type: keyword -example: ['MII...', 'MII...'] +type: keyword -- -*`tls.server.hash.md5`*:: +*`elasticsearch.node.jvm.memory.heap.init.bytes`*:: + -- -Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. +Heap init used by the JVM in bytes. -type: keyword -example: 0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC +type: long + +format: bytes -- -*`tls.server.hash.sha1`*:: +*`elasticsearch.node.jvm.memory.heap.max.bytes`*:: + -- -Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. +Heap max used by the JVM in bytes. -type: keyword -example: 9E393D93138888D288266C2D915214D1D1CCEB2A +type: long + +format: bytes -- -*`tls.server.hash.sha256`*:: +*`elasticsearch.node.jvm.memory.nonheap.init.bytes`*:: + -- -Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. +Non-Heap init used by the JVM in bytes. -type: keyword -example: 0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0 +type: long + +format: bytes -- -*`tls.server.issuer`*:: +*`elasticsearch.node.jvm.memory.nonheap.max.bytes`*:: + -- -Subject of the issuer of the x.509 certificate presented by the server. +Non-Heap max used by the JVM in bytes. -type: keyword -example: CN=MyDomain Root CA, OU=Infrastructure Team, DC=mydomain, DC=com +type: long + +format: bytes -- -*`tls.server.ja3s`*:: +*`elasticsearch.node.process.mlockall`*:: + -- -A hash that identifies servers based on how they perform an SSL/TLS handshake. +If process locked in memory. -type: keyword -example: 394441ab65754e2207b1e1b457b3641d +type: boolean -- -*`tls.server.not_after`*:: -+ --- -Timestamp indicating when server certificate is no longer considered valid. +[float] +=== node.stats -type: date +node_stats -example: 2021-01-01T00:00:00.000Z --- -*`tls.server.not_before`*:: -+ --- -Timestamp indicating when server certificate is first considered valid. +[float] +=== indices -type: date +Node indices stats -example: 1970-01-01T00:00:00.000Z --- -*`tls.server.subject`*:: +*`elasticsearch.node.stats.indices.docs.count`*:: + -- -Subject of the x.509 certificate presented by the server. +Total number of existing documents. -type: keyword -example: CN=www.mydomain.com, OU=Infrastructure Team, DC=mydomain, DC=com +type: long -- -*`tls.version`*:: +*`elasticsearch.node.stats.indices.docs.deleted`*:: + -- -Numeric part of the version parsed from the original string. +Total number of deleted documents. -type: keyword -example: 1.2 +type: long -- -*`tls.version_protocol`*:: +*`elasticsearch.node.stats.indices.segments.count`*:: + -- -Normalized lowercase protocol name parsed from original string. +Total number of segments. -type: keyword -example: tls +type: long -- -[float] -=== tracing - -Distributed tracing makes it possible to analyze performance throughout a microservice architecture all in one view. This is accomplished by tracing all of the requests - from the initial web request in the front-end service - to queries made through multiple back-end services. - - -*`tracing.trace.id`*:: +*`elasticsearch.node.stats.indices.segments.memory.bytes`*:: + -- -Unique identifier of the trace. -A trace groups multiple events like transactions that belong together. For example, a user request handled by multiple inter-connected services. +Total size of segments in bytes. -type: keyword -example: 4bf92f3577b34da6a3ce929d0e0e4736 +type: long + +format: bytes -- -*`tracing.transaction.id`*:: +*`elasticsearch.node.stats.indices.store.size.bytes`*:: + -- -Unique identifier of the transaction. -A transaction is the highest level of work measured within a service, such as a request to a server. +Total size of the store in bytes. -type: keyword -example: 00f067aa0ba902b7 +type: long -- [float] -=== url +=== jvm.mem.pools -URL fields provide support for complete or partial URLs, and supports the breaking down into scheme, domain, path, and so on. +JVM memory pool stats -*`url.domain`*:: -+ --- -Domain of the url, such as "www.elastic.co". -In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the `domain` field. -type: keyword +[float] +=== old -example: www.elastic.co +Old memory pool stats. --- -*`url.extension`*:: + +*`elasticsearch.node.stats.jvm.mem.pools.old.max.bytes`*:: + -- -The field contains the file extension from the original request url. -The file extension is only set if it exists, as not every url has a file extension. -The leading period must not be included. For example, the value must be "png", not ".png". +Max bytes. -type: keyword +type: long -example: png +format: bytes -- -*`url.fragment`*:: +*`elasticsearch.node.stats.jvm.mem.pools.old.peak.bytes`*:: + -- -Portion of the url after the `#`, such as "top". -The `#` is not part of the fragment. +Peak bytes. -type: keyword +type: long + +format: bytes -- -*`url.full`*:: +*`elasticsearch.node.stats.jvm.mem.pools.old.peak_max.bytes`*:: + -- -If full URLs are important to your use case, they should be stored in `url.full`, whether this field is reconstructed or present in the event source. +Peak max bytes. -type: keyword +type: long -example: https://www.elastic.co:443/search?q=elasticsearch#top +format: bytes -- -*`url.full.text`*:: +*`elasticsearch.node.stats.jvm.mem.pools.old.used.bytes`*:: + -- -type: text +Used bytes. --- +type: long + +format: bytes -*`url.original`*:: -+ -- -Unmodified original url as seen in the event source. -Note that in network monitoring, the observed URL may be a full URL, whereas in access logs, the URL is often just represented as a path. -This field is meant to represent the URL as it was observed, complete or not. -type: keyword +[float] +=== young -example: https://www.elastic.co:443/search?q=elasticsearch#top or /search?q=elasticsearch +Young memory pool stats. --- -*`url.original.text`*:: + +*`elasticsearch.node.stats.jvm.mem.pools.young.max.bytes`*:: + -- -type: text +Max bytes. + +type: long + +format: bytes -- -*`url.password`*:: +*`elasticsearch.node.stats.jvm.mem.pools.young.peak.bytes`*:: + -- -Password of the request. +Peak bytes. -type: keyword +type: long + +format: bytes -- -*`url.path`*:: +*`elasticsearch.node.stats.jvm.mem.pools.young.peak_max.bytes`*:: + -- -Path of the request, such as "/search". +Peak max bytes. -type: keyword +type: long + +format: bytes -- -*`url.port`*:: +*`elasticsearch.node.stats.jvm.mem.pools.young.used.bytes`*:: + -- -Port of the request, such as 443. +Used bytes. type: long -example: 443 - -format: string +format: bytes -- -*`url.query`*:: -+ --- -The query field describes the query string of the request, such as "q=elasticsearch". -The `?` is excluded from the query string. If a URL contains no `?`, there is no query field. If there is a `?` but no query, the query field exists with an empty string. The `exists` query can be used to differentiate between the two cases. +[float] +=== survivor -type: keyword +Survivor memory pool stats. --- -*`url.registered_domain`*:: + +*`elasticsearch.node.stats.jvm.mem.pools.survivor.max.bytes`*:: + -- -The highest registered url domain, stripped of the subdomain. -For example, the registered domain for "foo.google.com" is "google.com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last two labels will not work well for TLDs such as "co.uk". +Max bytes. -type: keyword +type: long -example: google.com +format: bytes -- -*`url.scheme`*:: +*`elasticsearch.node.stats.jvm.mem.pools.survivor.peak.bytes`*:: + -- -Scheme of the request, such as "https". -Note: The `:` is not part of the scheme. +Peak bytes. -type: keyword +type: long -example: https +format: bytes -- -*`url.top_level_domain`*:: +*`elasticsearch.node.stats.jvm.mem.pools.survivor.peak_max.bytes`*:: + -- -The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for google.com is "com". -This value can be determined precisely with a list like the public suffix list (http://publicsuffix.org). Trying to approximate this by simply taking the last label will not work well for effective TLDs such as "co.uk". +Peak max bytes. -type: keyword +type: long -example: co.uk +format: bytes -- -*`url.username`*:: +*`elasticsearch.node.stats.jvm.mem.pools.survivor.used.bytes`*:: + -- -Username of the request. +Used bytes. -type: keyword +type: long + +format: bytes -- [float] -=== user - -The user fields describe information about the user that is relevant to the event. -Fields can have one entry or multiple entries. If a user has more than one id, provide an array that includes all of them. +=== jvm.gc.collectors +GC collector stats. -*`user.domain`*:: -+ --- -Name of the directory the user is a member of. -For example, an LDAP or Active Directory domain name. -type: keyword --- +[float] +=== old.collection -*`user.email`*:: -+ --- -User email address. +Old collection gc. -type: keyword --- -*`user.full_name`*:: +*`elasticsearch.node.stats.jvm.gc.collectors.old.collection.count`*:: + -- -User's full name, if available. -type: keyword -example: Albert Einstein +type: long -- -*`user.full_name.text`*:: +*`elasticsearch.node.stats.jvm.gc.collectors.old.collection.ms`*:: + -- -type: text --- -*`user.group.domain`*:: -+ +type: long + -- -Name of the directory the group is a member of. -For example, an LDAP or Active Directory domain name. -type: keyword +[float] +=== young.collection --- +Young collection gc. -*`user.group.id`*:: + + +*`elasticsearch.node.stats.jvm.gc.collectors.young.collection.count`*:: + -- -Unique identifier for the group on the system/platform. -type: keyword + +type: long -- -*`user.group.name`*:: +*`elasticsearch.node.stats.jvm.gc.collectors.young.collection.ms`*:: + -- -Name of the group. -type: keyword --- +type: long -*`user.hash`*:: -+ -- -Unique user hash to correlate information for a user in anonymized form. -Useful if `user.id` or `user.name` contain confidential information and cannot be used. -type: keyword +[float] +=== fs.summary --- +File system summary -*`user.id`*:: + + +*`elasticsearch.node.stats.fs.summary.total.bytes`*:: + -- -Unique identifiers of the user. -type: keyword + +type: long + +format: bytes -- -*`user.name`*:: +*`elasticsearch.node.stats.fs.summary.free.bytes`*:: + -- -Short name or login of the user. -type: keyword -example: albert +type: long + +format: bytes -- -*`user.name.text`*:: +*`elasticsearch.node.stats.fs.summary.available.bytes`*:: + -- -type: text + + +type: long + +format: bytes -- [float] -=== user_agent +=== cluster.pending_task -The user_agent fields normally come from a browser request. -They often show up in web service logs coming from the parsed user agent string. +`cluster.pending_task` contains a pending task description. -*`user_agent.device.name`*:: + +*`elasticsearch.cluster.pending_task.insert_order`*:: + -- -Name of the device. +Insert order -type: keyword -example: iPhone +type: long -- -*`user_agent.name`*:: +*`elasticsearch.cluster.pending_task.priority`*:: + -- -Name of the user agent. +Priority -type: keyword -example: Safari +type: long -- -*`user_agent.original`*:: +*`elasticsearch.cluster.pending_task.source`*:: + -- -Unparsed user_agent string. +Source. For example: put-mapping -type: keyword -example: Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +type: keyword -- -*`user_agent.original.text`*:: +*`elasticsearch.cluster.pending_task.time_in_queue.ms`*:: + -- -type: text +Time in queue --- -*`user_agent.os.family`*:: -+ +type: long + -- -OS family (such as redhat, debian, freebsd, windows). -type: keyword +[float] +=== shard -example: debian +shard fields --- -*`user_agent.os.full`*:: + +*`elasticsearch.shard.primary`*:: + -- -Operating system name, including the version or code name. +True if this is the primary shard. -type: keyword -example: Mac OS Mojave +type: boolean -- -*`user_agent.os.full.text`*:: +*`elasticsearch.shard.number`*:: + -- -type: text +The number of this shard. + + +type: long -- -*`user_agent.os.kernel`*:: +*`elasticsearch.shard.state`*:: + -- -Operating system kernel version as a raw string. +The state of this shard. -type: keyword -example: 4.4.0-112-generic +type: keyword -- -*`user_agent.os.name`*:: +*`elasticsearch.shard.relocating_node.name`*:: + -- -Operating system name, without the version. +The node the shard was relocated from. -type: keyword -example: Mac OS X +type: keyword -- -*`user_agent.os.name.text`*:: -+ --- -type: text +[[exported-fields-envoyproxy]] +== envoyproxy fields --- +envoyproxy module -*`user_agent.os.platform`*:: -+ --- -Operating system platform (such centos, ubuntu, windows). -type: keyword -example: darwin +[float] +=== envoyproxy --- -*`user_agent.os.version`*:: -+ --- -Operating system version as a raw string. -type: keyword -example: 10.14.1 +[float] +=== server --- +Contains envoy proxy server stats -*`user_agent.version`*:: -+ --- -Version of the user agent. -type: keyword -example: 12.0 +*`envoyproxy.server.cluster_manager.active_clusters`*:: ++ -- +Number of currently active (warmed) clusters -[float] -=== vlan -The VLAN fields are used to identify 802.1q tag(s) of a packet, as well as ingress and egress VLAN associations of an observer in relation to a specific packet or connection. -Network.vlan fields are used to record a single VLAN tag, or the outer tag in the case of q-in-q encapsulations, for a packet or connection as observed, typically provided by a network sensor (e.g. Zeek, Wireshark) passively reporting on traffic. -Network.inner VLAN fields are used to report inner q-in-q 802.1q tags (multiple 802.1q encapsulations) as observed, typically provided by a network sensor (e.g. Zeek, Wireshark) passively reporting on traffic. Network.inner VLAN fields should only be used in addition to network.vlan fields to indicate q-in-q tagging. -Observer.ingress and observer.egress VLAN values are used to record observer specific information when observer events contain discrete ingress and egress VLAN information, typically provided by firewalls, routers, or load balancers. +type: integer +-- -*`vlan.id`*:: +*`envoyproxy.server.cluster_manager.cluster_added`*:: + -- -VLAN ID as reported by the observer. +Total clusters added (either via static config or CDS) -type: keyword -example: 10 +type: integer -- -*`vlan.name`*:: +*`envoyproxy.server.cluster_manager.cluster_modified`*:: + -- -Optional VLAN name as reported by the observer. +Total clusters modified (via CDS) -type: keyword -example: outside +type: integer -- -[float] -=== vulnerability +*`envoyproxy.server.cluster_manager.cluster_removed`*:: ++ +-- +Total clusters removed (via CDS) -The vulnerability fields describe information about a vulnerability that is relevant to an event. +type: integer -*`vulnerability.category`*:: +-- + +*`envoyproxy.server.cluster_manager.warming_clusters`*:: + -- -The type of system or architecture that the vulnerability affects. These may be platform-specific (for example, Debian or SUSE) or general (for example, Database or Firewall). For example (https://qualysguard.qualys.com/qwebhelp/fo_portal/knowledgebase/vulnerability_categories.htm[Qualys vulnerability categories]) -This field must be an array. +Number of currently warming (not active) clusters -type: keyword -example: ["Firewall"] +type: integer -- -*`vulnerability.classification`*:: +*`envoyproxy.server.cluster_manager.cluster_updated`*:: + -- -The classification of the vulnerability scoring system. For example (https://www.first.org/cvss/) +Total cluster updates -type: keyword -example: CVSS +type: integer -- -*`vulnerability.description`*:: +*`envoyproxy.server.cluster_manager.cluster_updated_via_merge`*:: + -- -The description of the vulnerability that provides additional context of the vulnerability. For example (https://cve.mitre.org/about/faqs.html#cve_entry_descriptions_created[Common Vulnerabilities and Exposure CVE description]) +Total cluster updates applied as merged updates -type: keyword -example: In macOS before 2.12.6, there is a vulnerability in the RPC... +type: integer -- -*`vulnerability.description.text`*:: +*`envoyproxy.server.cluster_manager.update_merge_cancelled`*:: + -- -type: text +Total merged updates that got cancelled and delivered early + + +type: integer -- -*`vulnerability.enumeration`*:: +*`envoyproxy.server.cluster_manager.update_out_of_merge_window`*:: + -- -The type of identifier used for this vulnerability. For example (https://cve.mitre.org/about/) +Total updates which arrived out of a merge window -type: keyword -example: CVE +type: integer -- -*`vulnerability.id`*:: + +*`envoyproxy.server.filesystem.flushed_by_timer`*:: + -- -The identification (ID) is the number portion of a vulnerability entry. It includes a unique identification number for the vulnerability. For example (https://cve.mitre.org/about/faqs.html#what_is_cve_id)[Common Vulnerabilities and Exposure CVE ID] +Total number of times internal flush buffers are written to a file due to flush timeout -type: keyword -example: CVE-2019-00001 +type: integer -- -*`vulnerability.reference`*:: +*`envoyproxy.server.filesystem.reopen_failed`*:: + -- -A resource that provides additional information, context, and mitigations for the identified vulnerability. +Total number of times a file was failed to be opened -type: keyword -example: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6111 +type: integer -- -*`vulnerability.report_id`*:: +*`envoyproxy.server.filesystem.write_buffered`*:: + -- -The report or scan identification number. +Total number of times file data is moved to Envoys internal flush buffer -type: keyword -example: 20191018.0001 +type: integer -- -*`vulnerability.scanner.vendor`*:: +*`envoyproxy.server.filesystem.write_completed`*:: + -- -The name of the vulnerability scanner vendor. +Total number of times a file was written -type: keyword -example: Tenable +type: integer -- -*`vulnerability.score.base`*:: +*`envoyproxy.server.filesystem.write_total_buffered`*:: + -- -Scores can range from 0.0 to 10.0, with 10.0 being the most severe. -Base scores cover an assessment for exploitability metrics (attack vector, complexity, privileges, and user interaction), impact metrics (confidentiality, integrity, and availability), and scope. For example (https://www.first.org/cvss/specification-document) +Current total size of internal flush buffer in bytes -type: float -example: 5.5 +type: integer -- -*`vulnerability.score.environmental`*:: +*`envoyproxy.server.filesystem.write_failed`*:: + -- -Scores can range from 0.0 to 10.0, with 10.0 being the most severe. -Environmental scores cover an assessment for any modified Base metrics, confidentiality, integrity, and availability requirements. For example (https://www.first.org/cvss/specification-document) +Total number of times an error occurred during a file write operation -type: float -example: 5.5 +type: integer -- -*`vulnerability.score.temporal`*:: + +*`envoyproxy.server.runtime.load_error`*:: + -- -Scores can range from 0.0 to 10.0, with 10.0 being the most severe. -Temporal scores cover an assessment for code maturity, remediation level, and confidence. For example (https://www.first.org/cvss/specification-document) +Total number of load attempts that resulted in an error in any layer -type: float + +type: integer -- -*`vulnerability.score.version`*:: +*`envoyproxy.server.runtime.load_success`*:: + -- -The National Vulnerability Database (NVD) provides qualitative severity rankings of "Low", "Medium", and "High" for CVSS v2.0 base score ranges in addition to the severity ratings for CVSS v3.0 as they are defined in the CVSS v3.0 specification. -CVSS is owned and managed by FIRST.Org, Inc. (FIRST), a US-based non-profit organization, whose mission is to help computer security incident response teams across the world. For example (https://nvd.nist.gov/vuln-metrics/cvss) +Total number of load attempts that were successful at all layers -type: keyword -example: 2.0 +type: integer -- -*`vulnerability.severity`*:: +*`envoyproxy.server.runtime.num_keys`*:: + -- -The severity of the vulnerability can help with metrics and internal prioritization regarding remediation. For example (https://nvd.nist.gov/vuln-metrics/cvss) +Number of keys currently loaded -type: keyword -example: Critical +type: integer -- -[[exported-fields-elasticsearch]] -== Elasticsearch fields +*`envoyproxy.server.runtime.override_dir_exists`*:: ++ +-- +Total number of loads that did use an override directory -Elasticsearch module +type: integer +-- -[float] -=== elasticsearch +*`envoyproxy.server.runtime.override_dir_not_exists`*:: ++ +-- +Total number of loads that did not use an override directory +type: integer +-- -*`elasticsearch.cluster.name`*:: +*`envoyproxy.server.runtime.admin_overrides_active`*:: + -- -Elasticsearch cluster name. +1 if any admin overrides are active otherwise 0 -type: keyword +type: integer -- -*`elasticsearch.cluster.id`*:: +*`envoyproxy.server.runtime.deprecated_feature_use`*:: + -- -Elasticsearch cluster id. +Total number of times deprecated features were used. -type: keyword +type: integer -- -*`elasticsearch.cluster.state.id`*:: +*`envoyproxy.server.runtime.num_layers`*:: + -- -Elasticsearch state id. +Number of layers currently active (without loading errors) -type: keyword +type: integer -- -*`elasticsearch.node.id`*:: + +*`envoyproxy.server.listener_manager.listener_added`*:: + -- -Node ID +Total listeners added (either via static config or LDS) -type: keyword +type: integer -- -*`elasticsearch.node.name`*:: +*`envoyproxy.server.listener_manager.listener_create_failure`*:: + -- -Node name. +Total failed listener object additions to workers -type: keyword +type: integer -- -[float] -=== ccr - -Cross-cluster replication stats +*`envoyproxy.server.listener_manager.listener_create_success`*:: ++ +-- +Total listener objects successfully added to workers +type: integer +-- -*`elasticsearch.ccr.leader.index`*:: +*`envoyproxy.server.listener_manager.listener_modified`*:: + -- -Name of leader index +Total listeners modified (via LDS) -type: keyword +type: integer -- -*`elasticsearch.ccr.leader.max_seq_no`*:: +*`envoyproxy.server.listener_manager.listener_removed`*:: + -- -Maximum sequence number of operation on the leader shard +Total listeners removed (via LDS) -type: long +type: integer -- - -*`elasticsearch.ccr.follower.index`*:: +*`envoyproxy.server.listener_manager.total_listeners_active`*:: + -- -Name of follower index +Number of currently active listeners -type: keyword +type: integer -- -*`elasticsearch.ccr.follower.shard.number`*:: +*`envoyproxy.server.listener_manager.total_listeners_draining`*:: + -- -Number of the shard within the index +Number of currently draining listeners -type: long +type: integer -- -*`elasticsearch.ccr.follower.operations_written`*:: +*`envoyproxy.server.listener_manager.total_listeners_warming`*:: + -- -Number of operations indexed (replicated) into the follower shard from the leader shard +Number of currently warming listeners -type: long +type: integer -- -*`elasticsearch.ccr.follower.time_since_last_read.ms`*:: +*`envoyproxy.server.listener_manager.listener_stopped`*:: + -- -Time, in ms, since the follower last fetched from the leader +Total listeners stopped -type: long +type: integer -- -*`elasticsearch.ccr.follower.global_checkpoint`*:: + +*`envoyproxy.server.stats.overflow`*:: + -- -Global checkpoint value on follower shard +Total number of times Envoy cannot allocate a statistic due to a shortage of shared memory -type: long +type: integer -- -[float] -=== cluster.stats -Cluster stats +*`envoyproxy.server.server.days_until_first_cert_expiring`*:: ++ +-- +Number of days until the next certificate being managed will expire +type: integer -*`elasticsearch.cluster.stats.status`*:: +-- + +*`envoyproxy.server.server.live`*:: + -- -Cluster status (green, yellow, red). +1 if the server is not currently draining, 0 otherwise -type: keyword +type: integer -- -[float] -=== nodes +*`envoyproxy.server.server.memory_allocated`*:: ++ +-- +Current amount of allocated memory in bytes -Nodes statistics. +type: integer +-- -*`elasticsearch.cluster.stats.nodes.count`*:: +*`envoyproxy.server.server.memory_heap_size`*:: + -- -Total number of nodes in cluster. +Current reserved heap size in bytes -type: long +type: integer -- -*`elasticsearch.cluster.stats.nodes.master`*:: +*`envoyproxy.server.server.parent_connections`*:: + -- -Number of master-eligible nodes in cluster. +Total connections of the old Envoy process on hot restart -type: long +type: integer -- -*`elasticsearch.cluster.stats.nodes.data`*:: +*`envoyproxy.server.server.total_connections`*:: + -- -Number of data nodes in cluster. +Total connections of both new and old Envoy processes -type: long +type: integer -- -[float] -=== indices +*`envoyproxy.server.server.uptime`*:: ++ +-- +Current server uptime in seconds -Indices statistics. +type: integer +-- -*`elasticsearch.cluster.stats.indices.count`*:: +*`envoyproxy.server.server.version`*:: + -- -Total number of indices in cluster. +Integer represented version number based on SCM revision -type: long +type: integer -- -[float] -=== shards +*`envoyproxy.server.server.watchdog_mega_miss`*:: ++ +-- +type: integer -Shard statistics. +-- +*`envoyproxy.server.server.watchdog_miss`*:: ++ +-- +type: integer +-- -*`elasticsearch.cluster.stats.indices.shards.count`*:: +*`envoyproxy.server.server.hot_restart_epoch`*:: + -- -Total number of shards in cluster. +Current hot restart epoch -type: long +type: integer -- -*`elasticsearch.cluster.stats.indices.shards.primaries`*:: +*`envoyproxy.server.server.concurrency`*:: + -- -Total number of primary shards in cluster. +Number of worker threads -type: long +type: integer -- -*`elasticsearch.cluster.stats.indices.fielddata.memory.bytes`*:: +*`envoyproxy.server.server.debug_assertion_failures`*:: + -- -Memory used for fielddata. - - -type: long +type: integer -- -[float] -=== enrich +*`envoyproxy.server.server.dynamic_unknown_fields`*:: ++ +-- +Number of messages in dynamic configuration with unknown fields -Enrich stats +type: integer +-- -*`elasticsearch.enrich.queue.size`*:: +*`envoyproxy.server.server.state`*:: + -- -Number of search requests in the queue. +Current state of the Server -type: long +type: integer -- - -*`elasticsearch.enrich.remote_requests.current`*:: +*`envoyproxy.server.server.static_unknown_fields`*:: + -- -Current number of outstanding remote requests. +Number of messages in static configuration with unknown fields -type: long +type: integer -- -*`elasticsearch.enrich.remote_requests.total`*:: +*`envoyproxy.server.server.stats_recent_lookups`*:: + -- -Number of outstanding remote requests executed since node startup. - - -type: long +type: integer -- -*`elasticsearch.enrich.executed_searches.total`*:: + +*`envoyproxy.server.http2.header_overflow`*:: + -- -Number of search requests that enrich processors have executed since node startup. +Total number of connections reset due to the headers being larger than Envoy::Http::Http2::ConnectionImpl::StreamImpl::MAX_HEADER_SIZE (63k) -type: long +type: integer -- -[float] -=== index +*`envoyproxy.server.http2.headers_cb_no_stream`*:: ++ +-- +Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug -index +type: integer +-- -*`elasticsearch.index.name`*:: +*`envoyproxy.server.http2.rx_messaging_error`*:: + -- -Index name. +Total number of invalid received frames that violated section 8 of the HTTP/2 spec. This will result in a tx_reset -type: keyword +type: integer -- - -*`elasticsearch.index.total.docs.count`*:: +*`envoyproxy.server.http2.rx_reset`*:: + -- -Total number of documents in the index. +Total number of reset stream frames received by Envoy -type: long +type: integer -- -*`elasticsearch.index.total.docs.deleted`*:: +*`envoyproxy.server.http2.too_many_header_frames`*:: + -- -Total number of deleted documents in the index. +Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers -type: long +type: integer -- -*`elasticsearch.index.total.store.size.bytes`*:: +*`envoyproxy.server.http2.trailers`*:: + -- -Total size of the index in bytes. - +Total number of trailers seen on requests coming from downstream -type: long -format: bytes +type: integer -- -*`elasticsearch.index.total.segments.count`*:: +*`envoyproxy.server.http2.tx_reset`*:: + -- -Total number of index segments. - +Total number of reset stream frames transmitted by Envoy -type: long --- +type: integer -*`elasticsearch.index.total.segments.memory.bytes`*:: -+ -- -Total number of memory used by the segments in bytes. +[[exported-fields-etcd]] +== Etcd fields -type: long +etcd Module -format: bytes --- [float] -=== index.recovery +=== etcd -index +`etcd` contains statistics that were read from Etcd -*`elasticsearch.index.recovery.id`*:: +*`etcd.api_version`*:: + -- -Shard recovery id. +Etcd API version for metrics retrieval -type: long +type: keyword -- -*`elasticsearch.index.recovery.type`*:: -+ --- -Shard recovery type. +[float] +=== leader +Contains etcd leader statistics. -type: keyword --- -*`elasticsearch.index.recovery.primary`*:: -+ --- -True if primary shard. +[float] +=== followers.counts +The number of failed and successful Raft RPC requests. -type: boolean --- -*`elasticsearch.index.recovery.stage`*:: +*`etcd.leader.followers.counts.followers.counts.success`*:: + -- -Recovery stage. - +successful Raft RPC requests -type: keyword +type: integer -- -*`elasticsearch.index.recovery.target.id`*:: +*`etcd.leader.followers.counts.followers.counts.fail`*:: + -- -Target node id. - +failed Raft RPC requests -type: keyword +type: integer -- -*`elasticsearch.index.recovery.target.host`*:: -+ --- -Target node host address (could be IP address or hostname). +[float] +=== followers.latency +latency to each peer in the cluster -type: keyword --- -*`elasticsearch.index.recovery.target.name`*:: +*`etcd.leader.followers.latency.followers.latency.average`*:: + -- -Target node name. - - -type: keyword +type: scaled_float -- -*`elasticsearch.index.recovery.source.id`*:: +*`etcd.leader.followers.latency.followers.latency.current`*:: + -- -Source node id. +type: scaled_float +-- -type: keyword +*`etcd.leader.followers.latency.followers.latency.maximum`*:: ++ +-- +type: scaled_float -- -*`elasticsearch.index.recovery.source.host`*:: +*`etcd.leader.followers.latency.followers.latency.minimum`*:: + -- -Source node host address (could be IP address or hostname). +type: integer +-- -type: keyword +*`etcd.leader.followers.latency.follower.latency.standardDeviation`*:: ++ +-- +type: scaled_float -- -*`elasticsearch.index.recovery.source.name`*:: +*`etcd.leader.leader`*:: + -- -Source node name. - +ID of actual leader type: keyword -- [float] -=== index.summary - -index +=== server +Server metrics from the Etcd V3 /metrics endpoint -*`elasticsearch.index.summary.primaries.docs.count`*:: +*`etcd.server.has_leader`*:: + -- -Total number of documents in the index. +Whether a leader exists in the cluster -type: long +type: byte -- -*`elasticsearch.index.summary.primaries.docs.deleted`*:: +*`etcd.server.leader_changes.count`*:: + -- -Total number of deleted documents in the index. +Number of leader changes seen at the cluster type: long -- -*`elasticsearch.index.summary.primaries.store.size.bytes`*:: +*`etcd.server.proposals_committed.count`*:: + -- -Total size of the index in bytes. +Number of consensus proposals commited type: long -format: bytes - -- -*`elasticsearch.index.summary.primaries.segments.count`*:: +*`etcd.server.proposals_pending.count`*:: + -- -Total number of index segments. +Number of consensus proposals pending type: long -- -*`elasticsearch.index.summary.primaries.segments.memory.bytes`*:: +*`etcd.server.proposals_failed.count`*:: + -- -Total number of memory used by the segments in bytes. +Number of consensus proposals failed type: long -format: bytes - -- - -*`elasticsearch.index.summary.total.docs.count`*:: +*`etcd.server.grpc_started.count`*:: + -- -Total number of documents in the index. +Number of sent gRPC requests type: long -- -*`elasticsearch.index.summary.total.docs.deleted`*:: +*`etcd.server.grpc_handled.count`*:: + -- -Total number of deleted documents in the index. +Number of received gRPC requests type: long -- -*`elasticsearch.index.summary.total.store.size.bytes`*:: +[float] +=== disk + +Disk metrics from the Etcd V3 /metrics endpoint + + + +*`etcd.disk.mvcc_db_total_size.bytes`*:: + -- -Total size of the index in bytes. +Size of stored data at MVCC type: long @@ -15124,69 +17550,60 @@ format: bytes -- -*`elasticsearch.index.summary.total.segments.count`*:: +*`etcd.disk.wal_fsync_duration.ns.bucket.*`*:: + -- -Total number of index segments. +Latency for writing ahead logs to disk -type: long +type: object -- -*`elasticsearch.index.summary.total.segments.memory.bytes`*:: +*`etcd.disk.wal_fsync_duration.ns.count`*:: + -- -Total number of memory used by the segments in bytes. +Write ahead logs count type: long -format: bytes - -- -[float] -=== ml.job - -ml - - - -*`elasticsearch.ml.job.id`*:: +*`etcd.disk.wal_fsync_duration.ns.sum`*:: + -- -Unique ml job id. +Write ahead logs latency sum -type: keyword +type: long -- -*`elasticsearch.ml.job.state`*:: +*`etcd.disk.backend_commit_duration.ns.bucket.*`*:: + -- -Job state. +Latency for writing backend changes to disk -type: keyword +type: object -- -*`elasticsearch.ml.job.data_counts.processed_record_count`*:: +*`etcd.disk.backend_commit_duration.ns.count`*:: + -- -Processed data events. +Backend commits count type: long -- -*`elasticsearch.ml.job.data_counts.invalid_date_count`*:: +*`etcd.disk.backend_commit_duration.ns.sum`*:: + -- -The number of records with either a missing date field or a date that could not be parsed. +Backend commits latency sum type: long @@ -15194,43 +17611,35 @@ type: long -- [float] -=== node +=== memory -node +Memory metrics from the Etcd V3 /metrics endpoint -*`elasticsearch.node.version`*:: +*`etcd.memory.go_memstats_alloc.bytes`*:: + -- -Node version. +Memory allocated bytes as of MemStats Go -type: keyword +type: long + +format: bytes -- [float] -=== jvm - -JVM Info. - - - -*`elasticsearch.node.jvm.version`*:: -+ --- -JVM version. +=== network +Network metrics from the Etcd V3 /metrics endpoint -type: keyword --- -*`elasticsearch.node.jvm.memory.heap.init.bytes`*:: +*`etcd.network.client_grpc_sent.bytes`*:: + -- -Heap init used by the JVM in bytes. +gRPC sent bytes total type: long @@ -15239,10 +17648,10 @@ format: bytes -- -*`elasticsearch.node.jvm.memory.heap.max.bytes`*:: +*`etcd.network.client_grpc_received.bytes`*:: + -- -Heap max used by the JVM in bytes. +gRPC received bytes total type: long @@ -15251,1933 +17660,1823 @@ format: bytes -- -*`elasticsearch.node.jvm.memory.nonheap.init.bytes`*:: -+ --- -Non-Heap init used by the JVM in bytes. - +[float] +=== self -type: long +Contains etcd self statistics. -format: bytes --- -*`elasticsearch.node.jvm.memory.nonheap.max.bytes`*:: +*`etcd.self.id`*:: + -- -Non-Heap max used by the JVM in bytes. - +the unique identifier for the member -type: long -format: bytes +type: keyword -- -*`elasticsearch.node.process.mlockall`*:: +*`etcd.self.leaderinfo.leader`*:: + -- -If process locked in memory. +id of the current leader member -type: boolean +type: keyword -- -[float] -=== node.stats - -node_stats - - - -[float] -=== indices - -Node indices stats - - - -*`elasticsearch.node.stats.indices.docs.count`*:: +*`etcd.self.leaderinfo.starttime`*:: + -- -Total number of existing documents. +the time when this node was started -type: long +type: keyword -- -*`elasticsearch.node.stats.indices.docs.deleted`*:: +*`etcd.self.leaderinfo.uptime`*:: + -- -Total number of deleted documents. +amount of time the leader has been leader -type: long +type: keyword -- -*`elasticsearch.node.stats.indices.segments.count`*:: +*`etcd.self.name`*:: + -- -Total number of segments. +this member's name -type: long +type: keyword -- -*`elasticsearch.node.stats.indices.segments.memory.bytes`*:: +*`etcd.self.recv.appendrequest.count`*:: + -- -Total size of segments in bytes. - +number of append requests this node has processed -type: long -format: bytes +type: integer -- -*`elasticsearch.node.stats.indices.store.size.bytes`*:: +*`etcd.self.recv.bandwidthrate`*:: + -- -Total size of the store in bytes. +number of bytes per second this node is receiving (follower only) -type: long +type: scaled_float -- -[float] -=== jvm.mem.pools +*`etcd.self.recv.pkgrate`*:: ++ +-- +number of requests per second this node is receiving (follower only) -JVM memory pool stats +type: scaled_float +-- -[float] -=== old +*`etcd.self.send.appendrequest.count`*:: ++ +-- +number of requests that this node has sent -Old memory pool stats. +type: integer +-- -*`elasticsearch.node.stats.jvm.mem.pools.old.max.bytes`*:: +*`etcd.self.send.bandwidthrate`*:: + -- -Max bytes. +number of bytes per second this node is sending (leader only). This value is undefined on single member clusters. -type: long -format: bytes +type: scaled_float -- -*`elasticsearch.node.stats.jvm.mem.pools.old.peak.bytes`*:: +*`etcd.self.send.pkgrate`*:: + -- -Peak bytes. +number of requests per second this node is sending (leader only). This value is undefined on single member clusters. -type: long -format: bytes +type: scaled_float -- -*`elasticsearch.node.stats.jvm.mem.pools.old.peak_max.bytes`*:: +*`etcd.self.starttime`*:: + -- -Peak max bytes. +the time when this node was started -type: long -format: bytes +type: keyword -- -*`elasticsearch.node.stats.jvm.mem.pools.old.used.bytes`*:: +*`etcd.self.state`*:: + -- -Used bytes. +either leader or follower -type: long -format: bytes +type: keyword -- [float] -=== young +=== store -Young memory pool stats. +The store statistics include information about the operations that this node has handled. -*`elasticsearch.node.stats.jvm.mem.pools.young.max.bytes`*:: +*`etcd.store.gets.success`*:: + -- -Max bytes. - -type: long - -format: bytes +type: integer -- -*`elasticsearch.node.stats.jvm.mem.pools.young.peak.bytes`*:: +*`etcd.store.gets.fail`*:: + -- -Peak bytes. - -type: long - -format: bytes +type: integer -- -*`elasticsearch.node.stats.jvm.mem.pools.young.peak_max.bytes`*:: +*`etcd.store.sets.success`*:: + -- -Peak max bytes. - -type: long - -format: bytes +type: integer -- -*`elasticsearch.node.stats.jvm.mem.pools.young.used.bytes`*:: +*`etcd.store.sets.fail`*:: + -- -Used bytes. - -type: long - -format: bytes +type: integer -- -[float] -=== survivor - -Survivor memory pool stats. - - - -*`elasticsearch.node.stats.jvm.mem.pools.survivor.max.bytes`*:: +*`etcd.store.delete.success`*:: + -- -Max bytes. - -type: long - -format: bytes +type: integer -- -*`elasticsearch.node.stats.jvm.mem.pools.survivor.peak.bytes`*:: +*`etcd.store.delete.fail`*:: + -- -Peak bytes. - -type: long - -format: bytes +type: integer -- -*`elasticsearch.node.stats.jvm.mem.pools.survivor.peak_max.bytes`*:: +*`etcd.store.update.success`*:: + -- -Peak max bytes. - -type: long - -format: bytes +type: integer -- -*`elasticsearch.node.stats.jvm.mem.pools.survivor.used.bytes`*:: +*`etcd.store.update.fail`*:: + -- -Used bytes. +type: integer -type: long +-- -format: bytes +*`etcd.store.create.success`*:: ++ +-- +type: integer -- -[float] -=== jvm.gc.collectors +*`etcd.store.create.fail`*:: ++ +-- +type: integer -GC collector stats. +-- +*`etcd.store.compareandswap.success`*:: ++ +-- +type: integer +-- -[float] -=== old.collection +*`etcd.store.compareandswap.fail`*:: ++ +-- +type: integer -Old collection gc. +-- +*`etcd.store.compareanddelete.success`*:: ++ +-- +type: integer +-- -*`elasticsearch.node.stats.jvm.gc.collectors.old.collection.count`*:: +*`etcd.store.compareanddelete.fail`*:: + -- +type: integer +-- -type: long +*`etcd.store.expire.count`*:: ++ +-- +type: integer -- -*`elasticsearch.node.stats.jvm.gc.collectors.old.collection.ms`*:: +*`etcd.store.watchers`*:: + -- +type: integer +-- + +[[exported-fields-golang]] +== Golang fields + +Golang module -type: long --- [float] -=== young.collection +=== golang -Young collection gc. -*`elasticsearch.node.stats.jvm.gc.collectors.young.collection.count`*:: -+ --- +[float] +=== expvar +expvar -type: long --- -*`elasticsearch.node.stats.jvm.gc.collectors.young.collection.ms`*:: +*`golang.expvar.cmdline`*:: + -- +The cmdline of this Go program start with. -type: long +type: keyword -- [float] -=== fs.summary - -File system summary - - - -*`elasticsearch.node.stats.fs.summary.total.bytes`*:: -+ --- - +=== heap -type: long +The Go program heap information exposed by expvar. -format: bytes --- -*`elasticsearch.node.stats.fs.summary.free.bytes`*:: +*`golang.heap.cmdline`*:: + -- +The cmdline of this Go program start with. -type: long - -format: bytes - --- +type: keyword -*`elasticsearch.node.stats.fs.summary.available.bytes`*:: -+ -- +[float] +=== gc -type: long +Garbage collector summary. -format: bytes --- [float] -=== cluster.pending_task +=== total_pause -`cluster.pending_task` contains a pending task description. +Total GC pause duration over lifetime of process. -*`elasticsearch.cluster.pending_task.insert_order`*:: +*`golang.heap.gc.total_pause.ns`*:: + -- -Insert order +Duration in Ns. type: long -- -*`elasticsearch.cluster.pending_task.priority`*:: +*`golang.heap.gc.total_count`*:: + -- -Priority +Total number of GC was happened. type: long -- -*`elasticsearch.cluster.pending_task.source`*:: +*`golang.heap.gc.next_gc_limit`*:: + -- -Source. For example: put-mapping +Next collection will happen when HeapAlloc > this amount. -type: keyword +type: long + +format: bytes -- -*`elasticsearch.cluster.pending_task.time_in_queue.ms`*:: +*`golang.heap.gc.cpu_fraction`*:: + -- -Time in queue +Fraction of CPU time used by GC. -type: long +type: float -- [float] -=== shard +=== pause -shard fields +Last GC pause durations during the monitoring period. -*`elasticsearch.shard.primary`*:: +*`golang.heap.gc.pause.count`*:: + -- -True if this is the primary shard. +Count of GC pause duration during this collect period. -type: boolean +type: long -- -*`elasticsearch.shard.number`*:: +[float] +=== sum + +Total GC pause duration during this collect period. + + + +*`golang.heap.gc.pause.sum.ns`*:: + -- -The number of this shard. +Duration in Ns. type: long -- -*`elasticsearch.shard.state`*:: -+ --- -The state of this shard. +[float] +=== max +Max GC pause duration during this collect period. -type: keyword --- -*`elasticsearch.shard.relocating_node.name`*:: +*`golang.heap.gc.pause.max.ns`*:: + -- -The node the shard was relocated from. +Duration in Ns. -type: keyword +type: long -- -[[exported-fields-envoyproxy]] -== envoyproxy fields +[float] +=== avg -envoyproxy module +Average GC pause duration during this collect period. -[float] -=== envoyproxy +*`golang.heap.gc.pause.avg.ns`*:: ++ +-- +Duration in Ns. +type: long +-- [float] -=== server - -Contains envoy proxy server stats +=== system +Heap summary,which bytes was obtained from system. -*`envoyproxy.server.cluster_manager.active_clusters`*:: +*`golang.heap.system.total`*:: + -- -Number of currently active (warmed) clusters +Total bytes obtained from system (sum of XxxSys below). -type: integer +type: long + +format: bytes -- -*`envoyproxy.server.cluster_manager.cluster_added`*:: +*`golang.heap.system.obtained`*:: + -- -Total clusters added (either via static config or CDS) +Via HeapSys, bytes obtained from system. heap_sys = heap_idle + heap_inuse. -type: integer +type: long + +format: bytes -- -*`envoyproxy.server.cluster_manager.cluster_modified`*:: +*`golang.heap.system.stack`*:: + -- -Total clusters modified (via CDS) +Bytes used by stack allocator, and these bytes was obtained from system. -type: integer +type: long + +format: bytes -- -*`envoyproxy.server.cluster_manager.cluster_removed`*:: +*`golang.heap.system.released`*:: + -- -Total clusters removed (via CDS) +Bytes released to the OS. -type: integer +type: long --- +format: bytes -*`envoyproxy.server.cluster_manager.warming_clusters`*:: -+ -- -Number of currently warming (not active) clusters +[float] +=== allocations -type: integer +Heap allocations summary. --- -*`envoyproxy.server.cluster_manager.cluster_updated`*:: + +*`golang.heap.allocations.mallocs`*:: + -- -Total cluster updates +Number of mallocs. -type: integer +type: long -- -*`envoyproxy.server.cluster_manager.cluster_updated_via_merge`*:: +*`golang.heap.allocations.frees`*:: + -- -Total cluster updates applied as merged updates +Number of frees. -type: integer +type: long -- -*`envoyproxy.server.cluster_manager.update_merge_cancelled`*:: +*`golang.heap.allocations.objects`*:: + -- -Total merged updates that got cancelled and delivered early +Total number of allocated objects. -type: integer +type: long -- -*`envoyproxy.server.cluster_manager.update_out_of_merge_window`*:: +*`golang.heap.allocations.total`*:: + -- -Total updates which arrived out of a merge window +Bytes allocated (even if freed) throughout the lifetime. -type: integer +type: long --- +format: bytes +-- -*`envoyproxy.server.filesystem.flushed_by_timer`*:: +*`golang.heap.allocations.allocated`*:: + -- -Total number of times internal flush buffers are written to a file due to flush timeout +Bytes allocated and not yet freed (same as Alloc above). -type: integer +type: long + +format: bytes -- -*`envoyproxy.server.filesystem.reopen_failed`*:: +*`golang.heap.allocations.idle`*:: + -- -Total number of times a file was failed to be opened +Bytes in idle spans. -type: integer +type: long + +format: bytes -- -*`envoyproxy.server.filesystem.write_buffered`*:: +*`golang.heap.allocations.active`*:: + -- -Total number of times file data is moved to Envoys internal flush buffer +Bytes in non-idle span. -type: integer +type: long --- +format: bytes -*`envoyproxy.server.filesystem.write_completed`*:: -+ -- -Total number of times a file was written +[[exported-fields-googlecloud]] +== Google Cloud Platform fields -type: integer +GCP module --- -*`envoyproxy.server.filesystem.write_total_buffered`*:: -+ --- -Current total size of internal flush buffer in bytes -type: integer +*`googlecloud.labels`*:: ++ +-- +type: object -- -*`envoyproxy.server.filesystem.write_failed`*:: +*`googlecloud.stackdriver.*.*.*.*`*:: + -- -Total number of times an error occurred during a file write operation +Metrics that returned from StackDriver API query. -type: integer +type: object -- +[float] +=== compute -*`envoyproxy.server.runtime.load_error`*:: -+ --- -Total number of load attempts that resulted in an error in any layer +Google Cloud Compute metrics -type: integer --- -*`envoyproxy.server.runtime.load_success`*:: +*`googlecloud.compute.instance.firewall.dropped_bytes_count.value`*:: + -- -Total number of load attempts that were successful at all layers +Incoming bytes dropped by the firewall - -type: integer +type: long -- -*`envoyproxy.server.runtime.num_keys`*:: +*`googlecloud.compute.instance.firewall.dropped_packets_count.value`*:: + -- -Number of keys currently loaded - +Incoming packets dropped by the firewall -type: integer +type: long -- -*`envoyproxy.server.runtime.override_dir_exists`*:: + +*`googlecloud.compute.instance.cpu.reserved_cores.value`*:: + -- -Total number of loads that did use an override directory - +Number of cores reserved on the host of the instance -type: integer +type: double -- -*`envoyproxy.server.runtime.override_dir_not_exists`*:: +*`googlecloud.compute.instance.cpu.utilization.value`*:: + -- -Total number of loads that did not use an override directory +The fraction of the allocated CPU that is currently in use on the instance - -type: integer +type: double -- -*`envoyproxy.server.runtime.admin_overrides_active`*:: +*`googlecloud.compute.instance.cpu.usage_time.value`*:: + -- -1 if any admin overrides are active otherwise 0 - +Usage for all cores in seconds -type: integer +type: double -- -*`envoyproxy.server.runtime.deprecated_feature_use`*:: + +*`googlecloud.compute.instance.disk.read_bytes_count.value`*:: + -- -Total number of times deprecated features were used. - +Count of bytes read from disk -type: integer +type: long -- -*`envoyproxy.server.runtime.num_layers`*:: +*`googlecloud.compute.instance.disk.read_ops_count.value`*:: + -- -Number of layers currently active (without loading errors) +Count of disk read IO operations - -type: integer +type: long -- - -*`envoyproxy.server.listener_manager.listener_added`*:: +*`googlecloud.compute.instance.disk.write_bytes_count.value`*:: + -- -Total listeners added (either via static config or LDS) +Count of bytes written to disk - -type: integer +type: long -- -*`envoyproxy.server.listener_manager.listener_create_failure`*:: +*`googlecloud.compute.instance.disk.write_ops_count.value`*:: + -- -Total failed listener object additions to workers - +Count of disk write IO operations -type: integer +type: long -- -*`envoyproxy.server.listener_manager.listener_create_success`*:: +*`googlecloud.compute.instance.uptime.value`*:: + -- -Total listener objects successfully added to workers +How long the VM has been running, in seconds - -type: integer +type: long -- -*`envoyproxy.server.listener_manager.listener_modified`*:: + +*`googlecloud.compute.instance.network.received_bytes_count.value`*:: + -- -Total listeners modified (via LDS) +Count of bytes received from the network - -type: integer +type: long -- -*`envoyproxy.server.listener_manager.listener_removed`*:: +*`googlecloud.compute.instance.network.received_packets_count.value`*:: + -- -Total listeners removed (via LDS) - +Count of packets received from the network -type: integer +type: long -- -*`envoyproxy.server.listener_manager.total_listeners_active`*:: +*`googlecloud.compute.instance.network.sent_bytes_count.value`*:: + -- -Number of currently active listeners +Count of bytes sent over the network - -type: integer +type: long -- -*`envoyproxy.server.listener_manager.total_listeners_draining`*:: +*`googlecloud.compute.instance.network.sent_packets_count.value`*:: + -- -Number of currently draining listeners - +Count of packets sent over the network -type: integer +type: long -- -*`envoyproxy.server.listener_manager.total_listeners_warming`*:: -+ --- -Number of currently warming listeners +[float] +=== loadbalancing +Google Cloud Load Balancing metrics -type: integer --- +[float] +=== https -*`envoyproxy.server.listener_manager.listener_stopped`*:: -+ --- -Total listeners stopped +Google Cloud Load Balancing metrics -type: integer +[float] +=== backend_latencies --- +A distribution of the latency calculated from when the request was sent by the proxy to the backend until the proxy received from the backend the last byte of response. -*`envoyproxy.server.stats.overflow`*:: +*`googlecloud.loadbalancing.https.backend_latencies.count.value`*:: + -- -Total number of times Envoy cannot allocate a statistic due to a shortage of shared memory - - -type: integer +type: long -- - -*`envoyproxy.server.server.days_until_first_cert_expiring`*:: +*`googlecloud.loadbalancing.https.backend_latencies.mean.value`*:: + -- -Number of days until the next certificate being managed will expire - - -type: integer +type: long -- -*`envoyproxy.server.server.live`*:: +*`googlecloud.loadbalancing.https.backend_latencies.bucket_counts.value`*:: + -- -1 if the server is not currently draining, 0 otherwise +type: long + +-- -type: integer --- -*`envoyproxy.server.server.memory_allocated`*:: +*`googlecloud.loadbalancing.https.backend_latencies.bucket_options.Options.ExponentialBuckets.growth_factor.value`*:: + -- -Current amount of allocated memory in bytes - - -type: integer +type: double -- -*`envoyproxy.server.server.memory_heap_size`*:: +*`googlecloud.loadbalancing.https.backend_latencies.bucket_options.Options.ExponentialBuckets.scale.value`*:: + -- -Current reserved heap size in bytes - - -type: integer +type: long -- -*`envoyproxy.server.server.parent_connections`*:: +*`googlecloud.loadbalancing.https.backend_latencies.bucket_options.Options.ExponentialBuckets.num_finite_buckets.value`*:: + -- -Total connections of the old Envoy process on hot restart - - -type: integer +type: long -- -*`envoyproxy.server.server.total_connections`*:: +*`googlecloud.loadbalancing.https.backend_request_bytes_count.value`*:: + -- -Total connections of both new and old Envoy processes - +The number of bytes sent as requests from HTTP/S load balancer to backends. -type: integer +type: long -- -*`envoyproxy.server.server.uptime`*:: +*`googlecloud.loadbalancing.https.backend_request_count.value`*:: + -- -Current server uptime in seconds +The number of requests served by backends of HTTP/S load balancer. - -type: integer +type: long -- -*`envoyproxy.server.server.version`*:: +*`googlecloud.loadbalancing.https.backend_response_bytes_count.value`*:: + -- -Integer represented version number based on SCM revision - +The number of bytes sent as responses from backends (or cache) to HTTP/S load balancer. -type: integer +type: long -- -*`envoyproxy.server.server.watchdog_mega_miss`*:: +[float] +=== frontend_tcp_rtt + +A distribution of the RTT measured for each connection between client and proxy. + + +*`googlecloud.loadbalancing.https.frontend_tcp_rtt.count.value`*:: + -- -type: integer +type: long -- -*`envoyproxy.server.server.watchdog_miss`*:: +*`googlecloud.loadbalancing.https.frontend_tcp_rtt.mean.value`*:: + -- -type: integer +type: long -- -*`envoyproxy.server.server.hot_restart_epoch`*:: +*`googlecloud.loadbalancing.https.frontend_tcp_rtt.bucket_counts.value`*:: + -- -Current hot restart epoch +type: long +-- -type: integer --- -*`envoyproxy.server.server.concurrency`*:: + +*`googlecloud.loadbalancing.https.frontend_tcp_rtt.bucket_options.Options.ExponentialBuckets.growth_factor.value`*:: + -- -Number of worker threads - - -type: integer +type: double -- -*`envoyproxy.server.server.debug_assertion_failures`*:: +*`googlecloud.loadbalancing.https.frontend_tcp_rtt.bucket_options.Options.ExponentialBuckets.scale.value`*:: + -- -type: integer +type: long -- -*`envoyproxy.server.server.dynamic_unknown_fields`*:: +*`googlecloud.loadbalancing.https.frontend_tcp_rtt.bucket_options.Options.ExponentialBuckets.num_finite_buckets.value`*:: + -- -Number of messages in dynamic configuration with unknown fields - - -type: integer +type: long -- -*`envoyproxy.server.server.state`*:: -+ --- -Current state of the Server +[float] +=== backend_latencies -type: integer +A distribution of the latency calculated from when the request was sent by the proxy to the backend until the proxy received from the backend the last byte of response. --- -*`envoyproxy.server.server.static_unknown_fields`*:: +*`googlecloud.loadbalancing.https.internal.backend_latencies.count.value`*:: + -- -Number of messages in static configuration with unknown fields - - -type: integer +type: long -- -*`envoyproxy.server.server.stats_recent_lookups`*:: +*`googlecloud.loadbalancing.https.internal.backend_latencies.mean.value`*:: + -- -type: integer +type: long -- - -*`envoyproxy.server.http2.header_overflow`*:: +*`googlecloud.loadbalancing.https.internal.backend_latencies.bucket_counts.value`*:: + -- -Total number of connections reset due to the headers being larger than Envoy::Http::Http2::ConnectionImpl::StreamImpl::MAX_HEADER_SIZE (63k) +type: long +-- -type: integer --- -*`envoyproxy.server.http2.headers_cb_no_stream`*:: + +*`googlecloud.loadbalancing.https.internal.backend_latencies.bucket_options.Options.ExponentialBuckets.growth_factor.value`*:: + -- -Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug - - -type: integer +type: double -- -*`envoyproxy.server.http2.rx_messaging_error`*:: +*`googlecloud.loadbalancing.https.internal.backend_latencies.bucket_options.Options.ExponentialBuckets.scale.value`*:: + -- -Total number of invalid received frames that violated section 8 of the HTTP/2 spec. This will result in a tx_reset - - -type: integer +type: long -- -*`envoyproxy.server.http2.rx_reset`*:: +*`googlecloud.loadbalancing.https.internal.backend_latencies.bucket_options.Options.ExponentialBuckets.num_finite_buckets.value`*:: + -- -Total number of reset stream frames received by Envoy - - -type: integer +type: long -- -*`envoyproxy.server.http2.too_many_header_frames`*:: +*`googlecloud.loadbalancing.https.internal.request_bytes_count.value`*:: + -- -Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers +The number of bytes sent as requests from clients to HTTP/S load balancer. - -type: integer +type: long -- -*`envoyproxy.server.http2.trailers`*:: +*`googlecloud.loadbalancing.https.internal.request_count.value`*:: + -- -Total number of trailers seen on requests coming from downstream - +The number of requests served by HTTP/S load balancer. -type: integer +type: long -- -*`envoyproxy.server.http2.tx_reset`*:: +*`googlecloud.loadbalancing.https.internal.response_bytes_count.value`*:: + -- -Total number of reset stream frames transmitted by Envoy +The number of bytes sent as responses from HTTP/S load balancer to clients. - -type: integer +type: long -- -[[exported-fields-etcd]] -== Etcd fields - -etcd Module +[float] +=== total_latencies +A distribution of the latency calculated from when the request was received by the proxy until the proxy got ACK from client on last response byte. -[float] -=== etcd +*`googlecloud.loadbalancing.https.internal.total_latencies.count.value`*:: ++ +-- +type: long -`etcd` contains statistics that were read from Etcd +-- +*`googlecloud.loadbalancing.https.internal.total_latencies.mean.value`*:: ++ +-- +type: long +-- -*`etcd.api_version`*:: +*`googlecloud.loadbalancing.https.internal.total_latencies.bucket_counts.value`*:: + -- -Etcd API version for metrics retrieval +type: long + +-- + -type: keyword +*`googlecloud.loadbalancing.https.internal.total_latencies.bucket_options.Options.ExponentialBuckets.growth_factor.value`*:: ++ -- +type: double -[float] -=== leader +-- -Contains etcd leader statistics. +*`googlecloud.loadbalancing.https.internal.total_latencies.bucket_options.Options.ExponentialBuckets.scale.value`*:: ++ +-- +type: long +-- +*`googlecloud.loadbalancing.https.internal.total_latencies.bucket_options.Options.ExponentialBuckets.num_finite_buckets.value`*:: ++ +-- +type: long -[float] -=== followers.counts +-- -The number of failed and successful Raft RPC requests. +*`googlecloud.loadbalancing.https.request_bytes_count.value`*:: ++ +-- +The number of bytes sent as requests from clients to HTTP/S load balancer. +type: long +-- -*`etcd.leader.followers.counts.followers.counts.success`*:: +*`googlecloud.loadbalancing.https.request_count.value`*:: + -- -successful Raft RPC requests +The number of requests served by HTTP/S load balancer. -type: integer +type: long -- -*`etcd.leader.followers.counts.followers.counts.fail`*:: +*`googlecloud.loadbalancing.https.response_bytes_count.value`*:: + -- -failed Raft RPC requests +The number of bytes sent as responses from HTTP/S load balancer to clients. -type: integer +type: long -- [float] -=== followers.latency - -latency to each peer in the cluster +=== total_latencies +A distribution of the latency calculated from when the request was received by the proxy until the proxy got ACK from client on last response byte. -*`etcd.leader.followers.latency.followers.latency.average`*:: +*`googlecloud.loadbalancing.https.total_latencies.count.value`*:: + -- -type: scaled_float +type: long -- -*`etcd.leader.followers.latency.followers.latency.current`*:: +*`googlecloud.loadbalancing.https.total_latencies.mean.value`*:: + -- -type: scaled_float +type: long -- -*`etcd.leader.followers.latency.followers.latency.maximum`*:: +*`googlecloud.loadbalancing.https.total_latencies.bucket_counts.value`*:: + -- -type: scaled_float +type: long -- -*`etcd.leader.followers.latency.followers.latency.minimum`*:: + + + +*`googlecloud.loadbalancing.https.total_latencies.bucket_options.Options.ExponentialBuckets.growth_factor.value`*:: + -- -type: integer +type: double -- -*`etcd.leader.followers.latency.follower.latency.standardDeviation`*:: +*`googlecloud.loadbalancing.https.total_latencies.bucket_options.Options.ExponentialBuckets.scale.value`*:: + -- -type: scaled_float +type: long -- -*`etcd.leader.leader`*:: +*`googlecloud.loadbalancing.https.total_latencies.bucket_options.Options.ExponentialBuckets.num_finite_buckets.value`*:: + -- -ID of actual leader - -type: keyword +type: long -- [float] -=== server - -Server metrics from the Etcd V3 /metrics endpoint +=== l3.internal +Google Cloud Load Balancing metrics -*`etcd.server.has_leader`*:: +*`googlecloud.loadbalancing.l3.internal.egress_bytes_count.value`*:: + -- -Whether a leader exists in the cluster +The number of bytes sent from ILB backend to client (for TCP flows it's counting bytes on application stream only). - -type: byte +type: long -- -*`etcd.server.leader_changes.count`*:: +*`googlecloud.loadbalancing.l3.internal.egress_packets_count.value`*:: + -- -Number of leader changes seen at the cluster - +The number of packets sent from ILB backend to client of the flow. type: long -- -*`etcd.server.proposals_committed.count`*:: +*`googlecloud.loadbalancing.l3.internal.ingress_bytes_count.value`*:: + -- -Number of consensus proposals commited - +The number of bytes sent from client to ILB backend (for TCP flows it's counting bytes on application stream only). type: long -- -*`etcd.server.proposals_pending.count`*:: +*`googlecloud.loadbalancing.l3.internal.ingress_packets_count.value`*:: + -- -Number of consensus proposals pending - +The number of packets sent from client to ILB backend. type: long -- -*`etcd.server.proposals_failed.count`*:: -+ --- -Number of consensus proposals failed +[float] +=== rtt_latencies + +A distribution of RTT measured over TCP connections for ILB flows. +*`googlecloud.loadbalancing.l3.internal.rtt_latencies.count.value`*:: ++ +-- type: long -- -*`etcd.server.grpc_started.count`*:: +*`googlecloud.loadbalancing.l3.internal.rtt_latencies.mean.value`*:: + -- -Number of sent gRPC requests - - type: long -- -*`etcd.server.grpc_handled.count`*:: +*`googlecloud.loadbalancing.l3.internal.rtt_latencies.bucket_counts.value`*:: + -- -Number of received gRPC requests - - type: long -- -[float] -=== disk - -Disk metrics from the Etcd V3 /metrics endpoint -*`etcd.disk.mvcc_db_total_size.bytes`*:: +*`googlecloud.loadbalancing.l3.internal.rtt_latencies.bucket_options.Options.ExponentialBuckets.growth_factor.value`*:: + -- -Size of stored data at MVCC +type: double +-- +*`googlecloud.loadbalancing.l3.internal.rtt_latencies.bucket_options.Options.ExponentialBuckets.scale.value`*:: ++ +-- type: long -format: bytes - -- -*`etcd.disk.wal_fsync_duration.ns.bucket.*`*:: +*`googlecloud.loadbalancing.l3.internal.rtt_latencies.bucket_options.Options.ExponentialBuckets.num_finite_buckets.value`*:: + -- -Latency for writing ahead logs to disk +type: long + +-- +[float] +=== tcp_ssl_proxy -type: object +Google Cloud Load Balancing metrics --- -*`etcd.disk.wal_fsync_duration.ns.count`*:: +*`googlecloud.loadbalancing.tcp_ssl_proxy.closed_connections.value`*:: + -- -Write ahead logs count - +Number of connections that were terminated over TCP/SSL proxy. type: long -- -*`etcd.disk.wal_fsync_duration.ns.sum`*:: +*`googlecloud.loadbalancing.tcp_ssl_proxy.egress_bytes_count.value`*:: + -- -Write ahead logs latency sum - +Number of bytes sent from VM to client using proxy. type: long -- -*`etcd.disk.backend_commit_duration.ns.bucket.*`*:: -+ --- -Latency for writing backend changes to disk - +[float] +=== frontend_tcp_rtt -type: object +A distribution of the smoothed RTT (in ms) measured by the proxy's TCP stack, each minute application layer bytes pass from proxy to client. --- -*`etcd.disk.backend_commit_duration.ns.count`*:: +*`googlecloud.loadbalancing.tcp_ssl_proxy.frontend_tcp_rtt.count.value`*:: + -- -Backend commits count - - type: long -- -*`etcd.disk.backend_commit_duration.ns.sum`*:: +*`googlecloud.loadbalancing.tcp_ssl_proxy.frontend_tcp_rtt.mean.value`*:: + -- -Backend commits latency sum +type: long +-- +*`googlecloud.loadbalancing.tcp_ssl_proxy.frontend_tcp_rtt.bucket_counts.value`*:: ++ +-- type: long -- -[float] -=== memory - -Memory metrics from the Etcd V3 /metrics endpoint -*`etcd.memory.go_memstats_alloc.bytes`*:: +*`googlecloud.loadbalancing.tcp_ssl_proxy.frontend_tcp_rtt.bucket_options.Options.ExponentialBuckets.growth_factor.value`*:: + -- -Memory allocated bytes as of MemStats Go +type: double +-- +*`googlecloud.loadbalancing.tcp_ssl_proxy.frontend_tcp_rtt.bucket_options.Options.ExponentialBuckets.scale.value`*:: ++ +-- type: long -format: bytes +-- +*`googlecloud.loadbalancing.tcp_ssl_proxy.frontend_tcp_rtt.bucket_options.Options.ExponentialBuckets.num_finite_buckets.value`*:: ++ -- +type: long -[float] -=== network +-- -Network metrics from the Etcd V3 /metrics endpoint +*`googlecloud.loadbalancing.tcp_ssl_proxy.ingress_bytes_count.value`*:: ++ +-- +Number of bytes sent from client to VM using proxy. +type: long +-- -*`etcd.network.client_grpc_sent.bytes`*:: +*`googlecloud.loadbalancing.tcp_ssl_proxy.new_connections.value`*:: + -- -gRPC sent bytes total - +Number of connections that were created over TCP/SSL proxy. type: long -format: bytes - -- -*`etcd.network.client_grpc_received.bytes`*:: +*`googlecloud.loadbalancing.tcp_ssl_proxy.open_connections.value`*:: + -- -gRPC received bytes total - +Current number of outstanding connections through the TCP/SSL proxy. type: long -format: bytes - -- [float] -=== self +=== pubsub -Contains etcd self statistics. +Google Cloud PubSub metrics +[float] +=== subscription -*`etcd.self.id`*:: +Suscription related metrics + + +*`googlecloud.pubsub.subscription.ack_message_count.value`*:: + -- -the unique identifier for the member - +Cumulative count of messages acknowledged by Acknowledge requests, grouped by delivery type. -type: keyword +type: long -- -*`etcd.self.leaderinfo.leader`*:: +*`googlecloud.pubsub.subscription.backlog_bytes.value`*:: + -- -id of the current leader member +Total byte size of the unacknowledged messages (a.k.a. backlog messages) in a subscription. - -type: keyword +type: long -- -*`etcd.self.leaderinfo.starttime`*:: +*`googlecloud.pubsub.subscription.num_outstanding_messages.value`*:: + -- -the time when this node was started - +Number of messages delivered to a subscription's push endpoint, but not yet acknowledged. -type: keyword +type: long -- -*`etcd.self.leaderinfo.uptime`*:: +*`googlecloud.pubsub.subscription.num_undelivered_messages.value`*:: + -- -amount of time the leader has been leader +Number of unacknowledged messages (a.k.a. backlog messages) in a subscription. - -type: keyword +type: long -- -*`etcd.self.name`*:: +*`googlecloud.pubsub.subscription.oldest_unacked_message_age.value`*:: + -- -this member's name - +Age (in seconds) of the oldest unacknowledged message (a.k.a. backlog message) in a subscription. -type: keyword +type: long -- -*`etcd.self.recv.appendrequest.count`*:: +*`googlecloud.pubsub.subscription.pull_ack_message_operation_count.value`*:: + -- -number of append requests this node has processed +Cumulative count of acknowledge message operations, grouped by result. For a definition of message operations, see Cloud Pub/Sub metric subscription/mod_ack_deadline_message_operation_count. - -type: integer +type: long -- -*`etcd.self.recv.bandwidthrate`*:: +*`googlecloud.pubsub.subscription.pull_ack_request_count.value`*:: + -- -number of bytes per second this node is receiving (follower only) - +Cumulative count of acknowledge requests, grouped by result. -type: scaled_float +type: long -- -*`etcd.self.recv.pkgrate`*:: +*`googlecloud.pubsub.subscription.pull_message_operation_count.value`*:: + -- -number of requests per second this node is receiving (follower only) +Cumulative count of pull message operations, grouped by result. For a definition of message operations, see Cloud Pub/Sub metric subscription/mod_ack_deadline_message_operation_count. - -type: scaled_float +type: long -- -*`etcd.self.send.appendrequest.count`*:: +*`googlecloud.pubsub.subscription.pull_request_count.value`*:: + -- -number of requests that this node has sent - +Cumulative count of pull requests, grouped by result. -type: integer +type: long -- -*`etcd.self.send.bandwidthrate`*:: +*`googlecloud.pubsub.subscription.push_request_count.value`*:: + -- -number of bytes per second this node is sending (leader only). This value is undefined on single member clusters. +Cumulative count of push attempts, grouped by result. Unlike pulls, the push server implementation does not batch user messages. So each request only contains one user message. The push server retries on errors, so a given user message can appear multiple times. - -type: scaled_float +type: long -- -*`etcd.self.send.pkgrate`*:: +*`googlecloud.pubsub.subscription.push_request_latencies.value`*:: + -- -number of requests per second this node is sending (leader only). This value is undefined on single member clusters. - +Distribution of push request latencies (in microseconds), grouped by result. -type: scaled_float +type: long -- -*`etcd.self.starttime`*:: +*`googlecloud.pubsub.subscription.sent_message_count.value`*:: + -- -the time when this node was started +Cumulative count of messages sent by Cloud Pub/Sub to subscriber clients, grouped by delivery type. - -type: keyword +type: long -- -*`etcd.self.state`*:: +*`googlecloud.pubsub.subscription.streaming_pull_ack_message_operation_count.value`*:: + -- -either leader or follower - +Cumulative count of StreamingPull acknowledge message operations, grouped by result. For a definition of message operations, see Cloud Pub/Sub metric subscription/mod_ack_deadline_message_operation_count. -type: keyword +type: long -- -[float] -=== store - -The store statistics include information about the operations that this node has handled. - - - -*`etcd.store.gets.success`*:: +*`googlecloud.pubsub.subscription.streaming_pull_ack_request_count.value`*:: + -- -type: integer +Cumulative count of streaming pull requests with non-empty acknowledge ids, grouped by result. + +type: long -- -*`etcd.store.gets.fail`*:: +*`googlecloud.pubsub.subscription.streaming_pull_message_operation_count.value`*:: + -- -type: integer +Cumulative count of streaming pull message operations, grouped by result. For a definition of message operations, see Cloud Pub/Sub metric subscription/mod_ack_deadline_message_operation_count + +type: long -- -*`etcd.store.sets.success`*:: +*`googlecloud.pubsub.subscription.streaming_pull_response_count.value`*:: + -- -type: integer +Cumulative count of streaming pull responses, grouped by result. + +type: long -- -*`etcd.store.sets.fail`*:: +*`googlecloud.pubsub.subscription.dead_letter_message_count.value`*:: + -- -type: integer +Cumulative count of messages published to dead letter topic, grouped by result. + +type: long -- -*`etcd.store.delete.success`*:: +*`googlecloud.pubsub.subscription.mod_ack_deadline_message_count.value`*:: + -- -type: integer +Cumulative count of messages whose deadline was updated by ModifyAckDeadline requests, grouped by delivery type. + +type: long -- -*`etcd.store.delete.fail`*:: +*`googlecloud.pubsub.subscription.mod_ack_deadline_message_operation_count.value`*:: + -- -type: integer +Cumulative count of ModifyAckDeadline message operations, grouped by result. + +type: long -- -*`etcd.store.update.success`*:: +*`googlecloud.pubsub.subscription.mod_ack_deadline_request_count.value`*:: + -- -type: integer +Cumulative count of ModifyAckDeadline requests, grouped by result. + +type: long -- -*`etcd.store.update.fail`*:: +*`googlecloud.pubsub.subscription.oldest_retained_acked_message_age.value`*:: + -- -type: integer +Age (in seconds) of the oldest acknowledged message retained in a subscription. + +type: long -- -*`etcd.store.create.success`*:: +*`googlecloud.pubsub.subscription.oldest_retained_acked_message_age_by_region.value`*:: + -- -type: integer +Age (in seconds) of the oldest acknowledged message retained in a subscription, broken down by Cloud region. + +type: long -- -*`etcd.store.create.fail`*:: +*`googlecloud.pubsub.subscription.oldest_unacked_message_age_by_region.value`*:: + -- -type: integer +Age (in seconds) of the oldest unacknowledged message in a subscription, broken down by Cloud region. + +type: long -- -*`etcd.store.compareandswap.success`*:: +*`googlecloud.pubsub.subscription.retained_acked_bytes.value`*:: + -- -type: integer +Total byte size of the acknowledged messages retained in a subscription. + +type: long -- -*`etcd.store.compareandswap.fail`*:: +*`googlecloud.pubsub.subscription.retained_acked_bytes_by_region.value`*:: + -- -type: integer +Total byte size of the acknowledged messages retained in a subscription, broken down by Cloud region. + +type: long -- -*`etcd.store.compareanddelete.success`*:: +*`googlecloud.pubsub.subscription.seek_request_count.value`*:: + -- -type: integer +Cumulative count of seek attempts, grouped by result. + +type: long -- -*`etcd.store.compareanddelete.fail`*:: +*`googlecloud.pubsub.subscription.streaming_pull_mod_ack_deadline_message_operation_count.value`*:: + -- -type: integer +Cumulative count of StreamingPull ModifyAckDeadline operations, grouped by result. + +type: long -- -*`etcd.store.expire.count`*:: +*`googlecloud.pubsub.subscription.streaming_pull_mod_ack_deadline_request_count.value`*:: + -- -type: integer +Cumulative count of streaming pull requests with non-empty ModifyAckDeadline fields, grouped by result. --- +type: long -*`etcd.store.watchers`*:: -+ -- -type: integer +*`googlecloud.pubsub.subscription.byte_cost.value`*:: ++ -- +Cumulative cost of operations, measured in bytes. This is used to measure quota utilization. -[[exported-fields-golang]] -== Golang fields - -Golang module - - - -[float] -=== golang - - - +type: long -[float] -=== expvar +-- -expvar +*`googlecloud.pubsub.subscription.config_updates_count.value`*:: ++ +-- +Cumulative count of configuration changes for each subscription, grouped by operation type and result. +type: long +-- -*`golang.expvar.cmdline`*:: +*`googlecloud.pubsub.subscription.unacked_bytes_by_region.value`*:: + -- -The cmdline of this Go program start with. +Total byte size of the unacknowledged messages in a subscription, broken down by Cloud region. - -type: keyword +type: long -- [float] -=== heap - -The Go program heap information exposed by expvar. +=== topic +Topic related metrics -*`golang.heap.cmdline`*:: +*`googlecloud.pubsub.topic.streaming_pull_response_count.value`*:: + -- -The cmdline of this Go program start with. - +Cumulative count of streaming pull responses, grouped by result. -type: keyword +type: long -- -[float] -=== gc - -Garbage collector summary. - - - -[float] -=== total_pause - -Total GC pause duration over lifetime of process. +*`googlecloud.pubsub.topic.send_message_operation_count.value`*:: ++ +-- +Cumulative count of publish message operations, grouped by result. For a definition of message operations, see Cloud Pub/Sub metric subscription/mod_ack_deadline_message_operation_count. +type: long +-- -*`golang.heap.gc.total_pause.ns`*:: +*`googlecloud.pubsub.topic.send_request_count.value`*:: + -- -Duration in Ns. - +Cumulative count of publish requests, grouped by result. type: long -- -*`golang.heap.gc.total_count`*:: +*`googlecloud.pubsub.topic.oldest_retained_acked_message_age_by_region.value`*:: + -- -Total number of GC was happened. - +Age (in seconds) of the oldest acknowledged message retained in a topic, broken down by Cloud region. type: long -- -*`golang.heap.gc.next_gc_limit`*:: +*`googlecloud.pubsub.topic.oldest_unacked_message_age_by_region.value`*:: + -- -Next collection will happen when HeapAlloc > this amount. - +Age (in seconds) of the oldest unacknowledged message in a topic, broken down by Cloud region. type: long -format: bytes - -- -*`golang.heap.gc.cpu_fraction`*:: +*`googlecloud.pubsub.topic.retained_acked_bytes_by_region.value`*:: + -- -Fraction of CPU time used by GC. +Total byte size of the acknowledged messages retained in a topic, broken down by Cloud region. - -type: float +type: long -- -[float] -=== pause - -Last GC pause durations during the monitoring period. +*`googlecloud.pubsub.topic.byte_cost.value`*:: ++ +-- +Cost of operations, measured in bytes. This is used to measure utilization for quotas. +type: long +-- -*`golang.heap.gc.pause.count`*:: +*`googlecloud.pubsub.topic.config_updates_count.value`*:: + -- -Count of GC pause duration during this collect period. - +Cumulative count of configuration changes, grouped by operation type and result. type: long -- -[float] -=== sum - -Total GC pause duration during this collect period. +*`googlecloud.pubsub.topic.message_sizes.value`*:: ++ +-- +Distribution of publish message sizes (in bytes) +type: long +-- -*`golang.heap.gc.pause.sum.ns`*:: +*`googlecloud.pubsub.topic.unacked_bytes_by_region.value`*:: + -- -Duration in Ns. - +Total byte size of the unacknowledged messages in a topic, broken down by Cloud region. type: long -- [float] -=== max - -Max GC pause duration during this collect period. +=== snapshot +Snapshot related metrics -*`golang.heap.gc.pause.max.ns`*:: +*`googlecloud.pubsub.snapshot.oldest_message_age.value`*:: + -- -Duration in Ns. - +Age (in seconds) of the oldest message retained in a snapshot. type: long -- -[float] -=== avg - -Average GC pause duration during this collect period. - - - -*`golang.heap.gc.pause.avg.ns`*:: +*`googlecloud.pubsub.snapshot.oldest_message_age_by_region.value`*:: + -- -Duration in Ns. - +Age (in seconds) of the oldest message retained in a snapshot, broken down by Cloud region. type: long -- -[float] -=== system - -Heap summary,which bytes was obtained from system. +*`googlecloud.pubsub.snapshot.backlog_bytes.value`*:: ++ +-- +Total byte size of the messages retained in a snapshot. +type: long +-- -*`golang.heap.system.total`*:: +*`googlecloud.pubsub.snapshot.backlog_bytes_by_region.value`*:: + -- -Total bytes obtained from system (sum of XxxSys below). - +Total byte size of the messages retained in a snapshot, broken down by Cloud region. type: long -format: bytes - -- -*`golang.heap.system.obtained`*:: +*`googlecloud.pubsub.snapshot.num_messages.value`*:: + -- -Via HeapSys, bytes obtained from system. heap_sys = heap_idle + heap_inuse. - +Number of messages retained in a snapshot. type: long -format: bytes - -- -*`golang.heap.system.stack`*:: +*`googlecloud.pubsub.snapshot.num_messages_by_region.value`*:: + -- -Bytes used by stack allocator, and these bytes was obtained from system. - +Number of messages retained in a snapshot, broken down by Cloud region. type: long -format: bytes - -- -*`golang.heap.system.released`*:: +*`googlecloud.pubsub.snapshot.config_updates_count.value`*:: + -- -Bytes released to the OS. - +Cumulative count of configuration changes, grouped by operation type and result. type: long -format: bytes - -- [float] -=== allocations +=== storage -Heap allocations summary. +Google Cloud Storage metrics -*`golang.heap.allocations.mallocs`*:: +*`googlecloud.storage.api.request_count.value`*:: + -- -Number of mallocs. - +Delta count of API calls, grouped by the API method name and response code. type: long -- -*`golang.heap.allocations.frees`*:: + +*`googlecloud.storage.authz.acl_based_object_access_count.value`*:: + -- -Number of frees. - +Delta count of requests that result in an object being granted access solely due to object ACLs. type: long -- -*`golang.heap.allocations.objects`*:: +*`googlecloud.storage.authz.acl_operations_count.value`*:: + -- -Total number of allocated objects. - +Usage of ACL operations broken down by type. type: long -- -*`golang.heap.allocations.total`*:: +*`googlecloud.storage.authz.object_specific_acl_mutation_count.value`*:: + -- -Bytes allocated (even if freed) throughout the lifetime. - +Delta count of changes made to object specific ACLs. type: long -format: bytes - -- -*`golang.heap.allocations.allocated`*:: + +*`googlecloud.storage.network.received_bytes_count.value`*:: + -- -Bytes allocated and not yet freed (same as Alloc above). - +Delta count of bytes received over the network, grouped by the API method name and response code. type: long -format: bytes - -- -*`golang.heap.allocations.idle`*:: +*`googlecloud.storage.network.sent_bytes_count.value`*:: + -- -Bytes in idle spans. - +Delta count of bytes sent over the network, grouped by the API method name and response code. type: long -format: bytes - -- -*`golang.heap.allocations.active`*:: + +*`googlecloud.storage.storage.object_count.value`*:: + -- -Bytes in non-idle span. - +Total number of objects per bucket, grouped by storage class. This value is measured once per day, and the value is repeated at each sampling interval throughout the day. type: long -format: bytes - -- -[[exported-fields-googlecloud]] -== Google Cloud Platform fields - -GCP module +*`googlecloud.storage.storage.total_byte_seconds.value`*:: ++ +-- +Delta count of bytes received over the network, grouped by the API method name and response code. +type: long +-- -*`googlecloud.labels`*:: +*`googlecloud.storage.storage.total_bytes.value`*:: + -- -type: object +Total size of all objects in the bucket, grouped by storage class. This value is measured once per day, and the value is repeated at each sampling interval throughout the day. + +type: long -- @@ -18569,20 +20868,13 @@ type: object -- -[float] -=== website - -website - - - -*`iis.website.name`*:: +*`iis.website.*.*`*:: + -- -website name +website -type: keyword +type: object -- @@ -22329,6 +24621,16 @@ type: date -- +*`kubernetes.event.metadata.generate_name`*:: ++ +-- +Generate name of the event + + +type: keyword + +-- + *`kubernetes.event.metadata.name`*:: + -- @@ -24633,6 +26935,50 @@ type: long Domain name +type: keyword + +-- + +[float] +=== status + +status + + + +[float] +=== stat + +Memory stat + + + +*`kvm.status.stat.state`*:: ++ +-- +domain state + + +type: keyword + +-- + +*`kvm.status.id`*:: ++ +-- +Domain id + + +type: long + +-- + +*`kvm.status.name`*:: ++ +-- +Domain name + + type: keyword -- @@ -38972,6 +41318,33 @@ Module for Windows +[float] +=== perfmon + +perfmon + + + +*`windows.perfmon.instance`*:: ++ +-- +Instance value. + + +type: keyword + +-- + +*`windows.perfmon.metrics.*.*`*:: ++ +-- +Metric values returned. + + +type: object + +-- + [float] === service diff --git a/metricbeat/docs/modules/googlecloud.asciidoc b/metricbeat/docs/modules/googlecloud.asciidoc index 19a741eb5d0..3dcf9b20db9 100644 --- a/metricbeat/docs/modules/googlecloud.asciidoc +++ b/metricbeat/docs/modules/googlecloud.asciidoc @@ -16,18 +16,88 @@ Note: extra GCP charges on Stackdriver Monitoring API requests will be generated == Module config and parameters This is a list of the possible module parameters you can tune: -* *zone*: A single string with the zone you want to monitor like "us-central1-a". If you need to fetch from multiple regions, you have to setup a different configuration for each (but you don't need a new instance of Metricbeat running) -* *region*: A single string with the region you want to monitor like "us-central1". This will enable monitoring for all zones under this region. +* *zone*: A single string with the zone you want to monitor like `us-central1-a`. +Or you can specific a partial zone name like `us-central1-` or `us-central1-*`, +which will monitor all zones start with `us-central1-`: `us-central1-a`, +`us-central1-b`, `us-central1-c` and `us-central1-f`. +Please see https://cloud.google.com/compute/docs/regions-zones#available[GCP zones] +for zones that are available in GCP. + +* *region*: A single string with the region you want to monitor like `us-central1`. +This will enable monitoring for all zones under this region. Or you can specific +a partial region name like `us-east` or `us-east*`, which will monitor all regions start with +`us-east`: `us-east1` and `us-east4`. If both region and zone are configured, +only region will be used. +Please see https://cloud.google.com/compute/docs/regions-zones#available[GCP regions] +for regions that are available in GCP. + * *project_id*: A single string with your GCP Project ID -* *credentials_file_path*: A single string pointing to the JSON file path reachable by Metricbeat that you have created using IAM. -* *exclude_labels*: (`true`/`false` default `false`) Do not extract extra labels and metadata information from Metricsets and fetch metrics onlly. At the moment, *labels and metadata extraction is only supported* in Compute Metricset. + +* *credentials_file_path*: A single string pointing to the JSON file path +reachable by Metricbeat that you have created using IAM. + +* *exclude_labels*: (`true`/`false` default `false`) Do not extract extra labels +and metadata information from metricsets and fetch metrics only. At the moment, +*labels and metadata extraction is only supported* in `compute` metricset. + +* *period*: A single time duration specified for this module collection frequency. + +[float] +== Example configuration +* `compute` metricset is enabled to collect metrics from `us-central1-a` zone +in `elastic-observability` project. ++ +[source,yaml] +---- +- module: googlecloud + metricsets: + - compute + zone: "us-central1-a" + project_id: "elastic-observability" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 60s +---- + +* `compute` and `pubsub` metricsets are enabled to collect metrics from all zones +under `us-central1` region in `elastic-observability` project. ++ +[source,yaml] +---- +- module: googlecloud + metricsets: + - compute + - pubsub + region: "us-central1" + project_id: "elastic-observability" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 60s +---- + +* `compute` metricset is enabled to collect metrics from all regions starts with +`us-west` in `elastic-observability` project, which includes all zones under +`us-west1`, `us-west2`, `us-west3` and `us-west4`. ++ +[source,yaml] +---- +- module: googlecloud + metricsets: + - compute + - pubsub + region: "us-west" + project_id: "elastic-observability" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 60s +---- [float] == Authentication, authorization and permissions. Authentication and authorization in Google Cloud Platform can be achieved in many ways. For the current version of the Google Cloud Platform module for Metricbeat, the only supported method is using Service Account JSON files. A typical JSON with a private key looks like this: [float] -==== Example Credentials +=== Example Credentials [source,json] ---- { @@ -62,7 +132,9 @@ Google Cloud Platform offers the https://cloud.google.com/monitoring/api/metrics If you also want to *extract service labels* (by setting `exclude_labels` to false, which is the default state). You also make a new API check on the corresponding service. Service labels requires a new API call to extract those metrics. In the worst case the number of API calls will be doubled. In the best case, all metrics come from the same GCP entity and 100% of the required information is included in the first API call (which is cached for subsequent calls). -A recommended `period` value between fetches is between 5 and 10 minutes, depending on how granular you want your metrics. GCP restricts information for less than 5 minutes. +If `period` value is set to 5-minute and sample period of the metric type is 60-second, then this module will collect data from this metric type once every 5 minutes with aggregation. +GCP monitoring data has a up to 240 seconds latency, which means latest monitoring data will be up to 4 minutes old. Please see https://cloud.google.com/monitoring/api/v3/latency-n-retention[Latency of GCP Monitoring Metric Data] for more details. +In googlecloud module, metrics are collected based on this ingest delay, which is also obtained from ListMetricDescriptors API. [float] === Rough estimation of the number of API Calls @@ -101,13 +173,21 @@ metricbeat.modules: - module: googlecloud metricsets: - compute + region: "us-central1" + project_id: "your project id" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 300s + +- module: googlecloud + metricsets: - pubsub - loadbalancing zone: "us-central1-a" project_id: "your project id" credentials_file_path: "your JSON credentials file path" exclude_labels: false - period: 300s + period: 60s - module: googlecloud metricsets: @@ -117,6 +197,15 @@ metricbeat.modules: credentials_file_path: "your JSON credentials file path" exclude_labels: false period: 300s + +- module: googlecloud + metricsets: + - compute + region: "us-" + project_id: "your project id" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 60s ---- [float] @@ -130,6 +219,8 @@ The following metricsets are available: * <> +* <> + * <> include::googlecloud/compute.asciidoc[] @@ -138,5 +229,7 @@ include::googlecloud/loadbalancing.asciidoc[] include::googlecloud/pubsub.asciidoc[] +include::googlecloud/stackdriver.asciidoc[] + include::googlecloud/storage.asciidoc[] diff --git a/metricbeat/docs/modules/googlecloud/stackdriver.asciidoc b/metricbeat/docs/modules/googlecloud/stackdriver.asciidoc new file mode 100644 index 00000000000..16609f7b01e --- /dev/null +++ b/metricbeat/docs/modules/googlecloud/stackdriver.asciidoc @@ -0,0 +1,23 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-googlecloud-stackdriver]] +=== Google Cloud Platform stackdriver metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/googlecloud/stackdriver/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/googlecloud/stackdriver/_meta/data.json[] +---- diff --git a/metricbeat/docs/modules/kvm.asciidoc b/metricbeat/docs/modules/kvm.asciidoc index f8373184ba1..bc29173f280 100644 --- a/metricbeat/docs/modules/kvm.asciidoc +++ b/metricbeat/docs/modules/kvm.asciidoc @@ -20,7 +20,7 @@ in <>. Here is an example configuration: ---- metricbeat.modules: - module: kvm - metricsets: ["dommemstat"] + metricsets: ["dommemstat", "status"] enabled: true period: 10s hosts: ["unix:///var/run/libvirt/libvirt-sock"] @@ -39,5 +39,9 @@ The following metricsets are available: * <> +* <> + include::kvm/dommemstat.asciidoc[] +include::kvm/status.asciidoc[] + diff --git a/metricbeat/docs/modules/kvm/status.asciidoc b/metricbeat/docs/modules/kvm/status.asciidoc new file mode 100644 index 00000000000..5fea3653349 --- /dev/null +++ b/metricbeat/docs/modules/kvm/status.asciidoc @@ -0,0 +1,24 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-kvm-status]] +=== kvm status metricset + +beta[] + +include::../../../module/kvm/status/_meta/docs.asciidoc[] + +This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/kvm/status/_meta/data.json[] +---- diff --git a/metricbeat/docs/modules/windows.asciidoc b/metricbeat/docs/modules/windows.asciidoc index e343a359dcc..60faf2cf642 100644 --- a/metricbeat/docs/modules/windows.asciidoc +++ b/metricbeat/docs/modules/windows.asciidoc @@ -5,8 +5,13 @@ This file is generated! See scripts/mage/docs_collector.go [[metricbeat-module-windows]] == Windows module -This is the Windows module. It collects metrics from Windows systems, -by default metricset `service` is enabled. +This is the `windows` module which collects metrics from Windows systems. +The module contains the `service` metricset, which is set up by default when the `windows` module is enabled. +The `service` metricset will retrieve status information of the services on the Windows machines. The second `windows` +metricset is `perfmon` which collects Windows performance counter values. + + + [float] @@ -24,11 +29,14 @@ metricbeat.modules: period: 10s perfmon.ignore_non_existent_counters: false perfmon.group_measurements_by_instance: false - perfmon.counters: - # - instance_label: processor.name - # instance_name: total - # measurement_label: processor.time.total.pct - # query: '\Processor Information(_Total)\% Processor Time' + perfmon.queries: +# - object: 'Process' +# instance: ["*"] +# counters: +# - name: 'Disk Writes/sec' +# field: physical_disk.write.per_sec +# format: "float" +# - name: "% Disk Write Time" - module: windows metricsets: ["service"] diff --git a/metricbeat/docs/modules/windows/perfmon.asciidoc b/metricbeat/docs/modules/windows/perfmon.asciidoc index f3f53901205..20df688c5eb 100644 --- a/metricbeat/docs/modules/windows/perfmon.asciidoc +++ b/metricbeat/docs/modules/windows/perfmon.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/mage/docs_collector.go [[metricbeat-metricset-windows-perfmon]] === Windows perfmon metricset -beta[] - include::../../../module/windows/perfmon/_meta/docs.asciidoc[] diff --git a/metricbeat/docs/modules_list.asciidoc b/metricbeat/docs/modules_list.asciidoc index 38ae179698b..b162894b34c 100644 --- a/metricbeat/docs/modules_list.asciidoc +++ b/metricbeat/docs/modules_list.asciidoc @@ -109,9 +109,10 @@ This file is generated! See scripts/mage/docs_collector.go .2+| .2+| |<> |<> |<> beta[] |image:./images/icon-yes.png[Prebuilt dashboards are available] | -.4+| .4+| |<> beta[] +.5+| .5+| |<> beta[] |<> beta[] |<> beta[] +|<> beta[] |<> beta[] |<> |image:./images/icon-no.png[No prebuilt dashboards] | .1+| .1+| |<> @@ -167,7 +168,8 @@ This file is generated! See scripts/mage/docs_collector.go |<> |<> |<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | -.1+| .1+| |<> beta[] +.2+| .2+| |<> beta[] +|<> beta[] |<> |image:./images/icon-no.png[No prebuilt dashboards] | .2+| .2+| |<> |<> @@ -264,7 +266,7 @@ This file is generated! See scripts/mage/docs_collector.go |<> |<> |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | -.2+| .2+| |<> beta[] +.2+| .2+| |<> |<> |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | .3+| .3+| |<> diff --git a/metricbeat/docs/running-on-cloudfoundry.asciidoc b/metricbeat/docs/running-on-cloudfoundry.asciidoc index e6c25d02587..2988e4d3a8b 100644 --- a/metricbeat/docs/running-on-cloudfoundry.asciidoc +++ b/metricbeat/docs/running-on-cloudfoundry.asciidoc @@ -1,5 +1,5 @@ [[running-on-cloudfoundry]] -=== Running {beatname_uc} on Cloud Foundry +=== Run {beatname_uc} on Cloud Foundry You can use {beatname_uc} on Cloud Foundry to retrieve and ship metrics. @@ -14,18 +14,19 @@ endif::[] [float] ==== Cloud Foundry credentials -{beatname_uc} needs credentials created with UAA so it can connect to loggregator to receive the logs. The uaac +{beatname_uc} needs credentials created with UAA so it can connect to loggregator to receive the logs. The `uaac` command will create the required credentials for connecting to loggregator. -["source", "sh"] +["source","sh",subs="attributes"] ------------------------------------------------ uaac client add {beatname_lc} --name {beatname_lc} --secret changeme --authorized_grant_types client_credentials,refresh_token --authorities doppler.firehose,cloud_controller.admin_read_only ------------------------------------------------ [WARNING] ======================================= -*Use a unique secret:* The uaac command above is just an example and the secret should be changed and the -`{beatname_lc}.yml` should be updated with your choosen secret. +*Use a unique secret:* The `uaac` command shown here is an example. Remember to +replace `changeme` with your secret, and update the +{beatname_lc}.yml+ file to +use your chosen secret. ======================================= diff --git a/metricbeat/docs/running-on-kubernetes.asciidoc b/metricbeat/docs/running-on-kubernetes.asciidoc index dfa6cbb25d4..7267c0f9872 100644 --- a/metricbeat/docs/running-on-kubernetes.asciidoc +++ b/metricbeat/docs/running-on-kubernetes.asciidoc @@ -1,5 +1,5 @@ [[running-on-kubernetes]] -=== Running Metricbeat on Kubernetes +=== Run Metricbeat on Kubernetes You can use {beatname_uc} <> on Kubernetes to retrieve cluster metrics. @@ -85,6 +85,15 @@ spec: If you are using Red Hat OpenShift, you need to specify additional settings in the manifest file and enable the container to run as privileged. +. Modify the `DaemonSet` container spec in the manifest file: ++ +[source,yaml] +----- + securityContext: + runAsUser: 0 + privileged: true +----- + . In the manifest file, edit the `metricbeat-daemonset-modules` ConfigMap, and specify the following settings under `kubernetes.yml` in the `data` section: + @@ -103,7 +112,26 @@ specify the following settings under `kubernetes.yml` in the `data` section: hosts: ["https://${NODE_NAME}:10250"] bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token ssl.certificate_authorities: - - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + - /path/to/kubelet-service-ca.crt +----- +NOTE: `kubelet-service-ca.crt` can be any CA bundle that contains the issuer of the certificate used in the Kubelet API. +According to each specific installation of Openshift this can be found either in `secrets` or in `configmaps`. +In some installations it can be available as part of the service account secret, in +`/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt`. +In case of using Openshift installer[https://github.com/openshift/installer/blob/master/docs/user/gcp/install.md] +for GCP then the following `configmap` can be mounted in Metricbeat Pod and use `ca-bundle.crt` +in `ssl.certificate_authorities`: ++ +[source,shell] +----- +Name: kubelet-serving-ca +Namespace: openshift-kube-apiserver +Labels: +Annotations: + +Data +==== +ca-bundle.crt: ----- . Under the `metricbeat` ClusterRole, add the following resources: diff --git a/metricbeat/include/list_common.go b/metricbeat/include/list_common.go index f15d6a4be16..ced01fa0d57 100644 --- a/metricbeat/include/list_common.go +++ b/metricbeat/include/list_common.go @@ -92,6 +92,7 @@ import ( _ "github.com/elastic/beats/v7/metricbeat/module/kibana/status" _ "github.com/elastic/beats/v7/metricbeat/module/kvm" _ "github.com/elastic/beats/v7/metricbeat/module/kvm/dommemstat" + _ "github.com/elastic/beats/v7/metricbeat/module/kvm/status" _ "github.com/elastic/beats/v7/metricbeat/module/logstash" _ "github.com/elastic/beats/v7/metricbeat/module/logstash/node" _ "github.com/elastic/beats/v7/metricbeat/module/logstash/node_stats" diff --git a/metricbeat/magefile.go b/metricbeat/magefile.go index d100e555c78..6e78e1559b9 100644 --- a/metricbeat/magefile.go +++ b/metricbeat/magefile.go @@ -31,6 +31,9 @@ import ( devtools "github.com/elastic/beats/v7/dev-tools/mage" metricbeat "github.com/elastic/beats/v7/metricbeat/scripts/mage" + // register kubernetes runner + _ "github.com/elastic/beats/v7/dev-tools/mage/kubernetes" + // mage:import "github.com/elastic/beats/v7/dev-tools/mage/target/build" // mage:import @@ -46,27 +49,21 @@ import ( // mage:import "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" // mage:import - "github.com/elastic/beats/v7/dev-tools/mage/target/update" - // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/compose" + // mage:import + _ "github.com/elastic/beats/v7/metricbeat/scripts/mage/target/metricset" ) func init() { - common.RegisterCheckDeps(update.Update) - test.RegisterDeps(GoIntegTest) + common.RegisterCheckDeps(Update) + test.RegisterDeps(IntegTest) unittest.RegisterGoTestDeps(Fields) unittest.RegisterPythonTestDeps(Fields) devtools.BeatDescription = "Metricbeat is a lightweight shipper for metrics." } -// Aliases provides compatibility with CI while we transition all Beats -// to having common testing targets. -var Aliases = map[string]interface{}{ - "goTestUnit": unittest.GoUnitTest, // dev-tools/jenkins_ci.ps1 uses this. -} - -//CollectAll generates the docs and the fields. +// CollectAll generates the docs and the fields. func CollectAll() { mg.Deps(CollectDocs, FieldsDocs) } @@ -81,8 +78,9 @@ func Package() { devtools.UseElasticBeatOSSPackaging() metricbeat.CustomizePackaging() + devtools.PackageKibanaDashboardsFromBuildDir() - mg.Deps(update.Update, metricbeat.PrepareModulePackagingOSS) + mg.Deps(Update) mg.Deps(build.CrossBuild, build.CrossBuildGoDaemon) mg.SerialDeps(devtools.Package, TestPackages) } @@ -108,12 +106,6 @@ func Config() { mg.Deps(configYML, metricbeat.GenerateDirModulesD) } -// Imports generates an include/list_{suffix}.go file containing -// a import statement for each module and dataset. -func Imports() error { - return metricbeat.GenerateOSSMetricbeatModuleIncludeListGo() -} - func configYML() error { return devtools.Config(devtools.AllConfigTypes, metricbeat.OSSConfigFileParams(), ".") } @@ -139,18 +131,25 @@ func MockedTests(ctx context.Context) error { return devtools.GoTest(ctx, params) } -// Fields generates a fields.yml for the Beat. -func Fields() error { +// Fields generates a fields.yml and fields.go for each module. +func Fields() { + mg.Deps(fieldsYML, moduleFieldsGo) +} + +func fieldsYML() error { return devtools.GenerateFieldsYAML("module") } -// ExportDashboard exports a dashboard and writes it into the correct directory -// -// Required ENV variables: -// * MODULE: Name of the module -// * ID: Dashboard id -func ExportDashboard() error { - return devtools.ExportDashboard() +func moduleFieldsGo() error { + return devtools.GenerateModuleFieldsGo("module") +} + +// Update is an alias for running fields, dashboards, config. +func Update() { + mg.SerialDeps( + Fields, Dashboards, Config, CollectAll, + metricbeat.PrepareModulePackagingOSS, + metricbeat.GenerateOSSMetricbeatModuleIncludeListGo) } // FieldsDocs generates docs/fields.asciidoc containing all fields @@ -172,12 +171,38 @@ func CollectDocs() error { return metricbeat.CollectDocs() } +// IntegTest executes integration tests (it uses Docker to run the tests). +func IntegTest() { + mg.SerialDeps(GoIntegTest, PythonIntegTest) +} + // GoIntegTest executes the Go integration tests. // Use TEST_COVERAGE=true to enable code coverage profiling. // Use RACE_DETECTOR=true to enable the race detector. // Use TEST_TAGS=tag1,tag2 to add additional build tags. // Use MODULE=module to run only tests for `module`. func GoIntegTest(ctx context.Context) error { - mg.Deps(Fields) + if !devtools.IsInIntegTestEnv() { + mg.SerialDeps(Fields, Dashboards) + } return devtools.GoTestIntegrationForModule(ctx) } + +// PythonIntegTest executes the python system tests in the integration +// environment (Docker). +// Use NOSE_TESTMATCH=pattern to only run tests matching the specified pattern. +// Use any other NOSE_* environment variable to influence the behavior of +// nosetests. +func PythonIntegTest(ctx context.Context) error { + if !devtools.IsInIntegTestEnv() { + mg.SerialDeps(Fields, Dashboards) + } + runner, err := devtools.NewDockerIntegrationRunner(devtools.ListMatchingEnvVars("NOSE_")...) + if err != nil { + return err + } + return runner.Test("pythonIntegTest", func() error { + mg.Deps(devtools.BuildSystemTestBinary) + return devtools.PythonNoseTest(devtools.DefaultPythonTestIntegrationArgs()) + }) +} diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index 1adafc85f44..9dcfe20071b 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -556,7 +556,7 @@ metricbeat.modules: #--------------------------------- Kvm Module --------------------------------- - module: kvm - metricsets: ["dommemstat"] + metricsets: ["dommemstat", "status"] enabled: true period: 10s hosts: ["unix:///var/run/libvirt/libvirt-sock"] @@ -848,11 +848,14 @@ metricbeat.modules: period: 10s perfmon.ignore_non_existent_counters: false perfmon.group_measurements_by_instance: false - perfmon.counters: - # - instance_label: processor.name - # instance_name: total - # measurement_label: processor.time.total.pct - # query: '\Processor Information(_Total)\% Processor Time' + perfmon.queries: +# - object: 'Process' +# instance: ["*"] +# counters: +# - name: 'Disk Writes/sec' +# field: physical_disk.write.per_sec +# format: "float" +# - name: "% Disk Write Time" - module: windows metricsets: ["service"] @@ -1276,6 +1279,27 @@ output.elasticsearch: # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. @@ -1544,6 +1568,9 @@ output.elasticsearch: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password @@ -2130,6 +2157,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m diff --git a/metricbeat/module/aerospike/fields.go b/metricbeat/module/aerospike/fields.go index 4e03f23d432..0a063636e18 100644 --- a/metricbeat/module/aerospike/fields.go +++ b/metricbeat/module/aerospike/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetAerospike returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/aerospike. +// This is the base64 encoded gzipped contents of module/aerospike. func AssetAerospike() string { return "eJzUmMGO4zYMhu95CmIve2nyADkUWLSXHnZRFL0VxYCR6FgdWTQkalIDffhCdjzj2HKSycxuPDrkYEvk/8mkRGYNj9RsAclzqM0jrQDEiKUtfPrSP/u0AtAUlDe1GHZb+HkFAPD8HirW0aalnixhoC3scQVQGLI6bNvJa3BY0amjNKSp03TPsT4+yXg6NTU0l35DjYqe3+RMzprtRs7IKUk/xjKGUpQ15OTk1ZyWC3rS+KW1BkFQwmb0NqdiqESTJaHJ63NqrlA0UNU5APHoAqq0IOSVnlM7VEzes8/O6EVbdvuZCVfoTuNbrHbkgYvjl8pSSIkCBRpLGg5GSkDXicuRDcKQ5aHg6PRiGDyFaIU0GAeY9EGr7zxGiEpRCD8M4uiviPYMz3nJYiriOE677yf50r4nPRo4ylR2L9kT5uLkvZIzmf/oqTlm6FRcCIW7JeFE7SQDP0z+TYJnkdmX3/ArUu/gzXe9GFv7Hz35MhA3Xov3j+cpyyIDem7LZ0L6pdB7MmoczzeXnL+a8AhB2OOeurB9Zd2JT2gs7ixtapXbwk5ZUGhJPxSWMTepYF+hbKEmr6b19BUYaXwlDNFT2kWCyjhTxQoUOzH7yDGAblFTyQ+oPIcAaG37NBwLpr4lmD9LCk/3B/29W5y+GBcdlsIalZGmFZi8gJQmXEMkLGg3u0Yol7Jng7+HmVt8BcqfyXtn4Jnl+ImsZYXpKhUewUB7+KcnrM+QxUB6gWBJFuya65l6nvJQPew8oSppXO10NDtmS+hedwD8VoD4SD8NOvsSA/SO4HNp9uX6gEJ+/VfC+K+iin3z97pW8vliqPXiu0XvdWp9ba2dnluvbZgXmMrdJl1M5tfEv0bBuyXBl4qjkwEZKxVr08V/UvZGOOM0/btMunQHtfLeiBgWzBhIsdPom46UwhtZF3UVHXlvPq3Tb/a4e6TmwH58gl8Q+O3Z88TuSyesaVNyyP8beZtX1gQTkycO3xkzOZwl5N0/pCYF6s2XyB+k2OuZZu3S7VFhEMp1aWfD9IowfGkT/FHfMNbgUBpVAvpUw4p5oqOQTIdzUuD9EKHGjdPk5TyYzZMgXD+0rVD+w76lqBnLMQFU9J6c2Kb9eyYVmQfj9l0vFjar/wMAAP//lvJazw==" } diff --git a/metricbeat/module/apache/fields.go b/metricbeat/module/apache/fields.go index 619e56f1a83..332b5f2c1a0 100644 --- a/metricbeat/module/apache/fields.go +++ b/metricbeat/module/apache/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetApache returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/apache. +// This is the base64 encoded gzipped contents of module/apache. func AssetApache() string { return "eJzMl8GO2zYQhu9+ioHOjZE95OJDgTQB2qKLNtjdoIeiUGhqJBOmOSyHWsNvX5CUVa1XUmzXDKLTwlr9/zfD4Qz5BrZ4WIGwQm5wAeCV17iC4n38oVgAVMjSKesVmRX8uAAASC/hl6enTx+B0T2jgx16pySjZ5CkNUqPFdSOduA3ePxij+vu/5cLAN6Q86UkU6tmBbXQHAgcahSMK2hE+B/0XpmGV/BXwayLH6DYeG+LvxcAtUJd8SoivQEjdjgIJDz+YIOOo9Z2v4wEE54v6bMvIMl4oQxH6C4k8BvhYY8OgaUT9hhXimnZiQxhhkDshW+5/3kMKjwvoz4+E7gROQlfhjxYivQ5WNH0IYyFMQxlQ+zDXy9eHgPa4mFPrjp5NxPAoJCOwstRW09e6FJIiczIo+aaTHOZ81MQBdPu1uiAakjy4PCfFtnzHMl2ffDZOLZKU9RP+6QaBzlilhZdyShHYVgKjVVZaxL+MqiHTh4sOmCUZCY4ImkmiJ9iFs4n6HKSjaLTH8fYk9ui4+W65cONKuP3viaCaO8wa68qPb47/499EJ23b61XE23htM+d4fw5qsUGdeI33p6GJGm4lKNAs9k4gys8j2nYJX1QpqvNV5xfyc3NUcbXRdr2Vovy4dPn61ZEkzgdCnDOvjwzC4HrnkQ1swCMLjNAsIiBTlPwgT3uMnI8RoOw5NMQcqN05dCU3yAnVPd2MT9nUGXP0SlXMpzYO2QMyqA2Puqv2UO95HVbKR4/MnSSdAYZBDy9VoIPRi73ToVDeQaU90F/gAKd1deItoi2FFo95+i0CSpYYAXR5JJkSU2cNVmaGKtpoplGfHUlh44L4hmdaPDiMi7uimv3eHitTFPWQnpyK7h7+/a61A0DgJpcvBlpwR52yrQepxe1ePc907/r+Geqsrj7riO4mwihH6SSHK5JvLpqXl3Mj73i8fp88anTCxe6VPnKGm5x0OvUobXTq+pQVIFg/BZ0C4yH5DBxRX6RDjQdi9Wnt6GbJCTpw0PQn8YIDTvXSPgN0aZRMO1fGS410TZLUXw0DPdRfGYhutFT/jcbMpB8SCbnjURNTZNnGN5PKB+dGyck1q3Wh7JWRvEmD8bPvQ30NtPpCFfrUmoUJkuR/Bpu7p38zKKQDYdvTTm6xh82nLM1zfWLvYhnvLIml7dU/0xGceCcVa5ZT9zLxb8BAAD//2GFvEU=" } diff --git a/metricbeat/module/beat/fields.go b/metricbeat/module/beat/fields.go index 33bbffd7dc0..58ff1927451 100644 --- a/metricbeat/module/beat/fields.go +++ b/metricbeat/module/beat/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetBeat returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/beat. +// This is the base64 encoded gzipped contents of module/beat. func AssetBeat() string { return "eJzcl0+L2zwQxu/5FIPP7+YD+PAWSlvYSwtloYdSimxPvGJljSuNdvG3L5LXiWPJdtJmE6hOiRU/z0/zR1Lu4Am7HAoUvAFgyQpzyN6j4GwDUKEtjWxZks7h/w0AgJ+ChiqncANgUKGwmEMtNgAWmaWubQ7fM2tV9h9kj8xt9sPPPZLhnyXpnaxz2All/fs7iaqyeVC+Ay0a3LP4wV3rtQ259vVJguhYZawkq/2jQesJuxcy4+dJxX6Etd5/2EbCXusC0v69WNyy4Fh9HIRTtKcqx5kaxjRyY45GaFFjg5q3qEWhsDr62QBWECkUejK3gOfHvYUSNRuhRjbwavMujRNqbluS05wEkZqxRnMeyGfXFGiAdq/6FlJrHRjIcet4678kEeISOAVBNOgBenFwFisoupDFJMQvhw7fiCFoQ4FS12mQcZHaixSp/asidS3LBreNTcZCka7PC0TA6kWTfsbpYFiTIcdS46WMD4V4kPZu2qdC6vlyULIopnNz+TiB41MINpTUNKSBCYRSwXy60FRWIOqVaHoJ7QQ8P770jTItnjUwmN/AY7x0F50B6cdD1446exEIn1EnVwOrETsD6KN3gbCFoplzW4rfGFmUT9GREFMnOuEPwOGoQ/pgeQBNLwqreoHjQMvyeS7nb4vbWy+neHT9EVw+RtvKdUgD4irBQFoZatvblsAawh7VtUqWgm8ZV3ugWAfeCRlfuK4Ku0Kw30mJGqG7m5Aykb8+dqe2FhMLdUXSB+8HehLZxXPAoJiL+cVOga8oqtnTE844AYruVg0VnJeCNTpZjaHZs+5tKT3fkv+A+GIkL19HLpD1b97kn0i7Dxfj9B9nzHnDzIeMTgGWPvwOAAD//2j1+Zk=" } diff --git a/metricbeat/module/ceph/fields.go b/metricbeat/module/ceph/fields.go index f401f784976..a2623b56a06 100644 --- a/metricbeat/module/ceph/fields.go +++ b/metricbeat/module/ceph/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetCeph returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/ceph. +// This is the base64 encoded gzipped contents of module/ceph. func AssetCeph() string { return "eJzEm89v47oRx+/5KwY5tUDitlcfCuwvYIM2m2Czix6KQkuTI4k1RRIkFT//9w+kZNnWb9u0V6f3bO93PhoOh8Mh8whr3C6Bos7vABx3Apdw/wl1fn8HwNBSw7XjSi7hn3cAAP4rKBQrBd4B2FwZl1AlU54tISXC+k8NCiQWl5AR/xt0jsvMLuG/99aK+we4z53T9/+7A0g5CmaXQfkRJCmwYfGP22qvYlSp6096iPzzy/+jX0CVdIRLCy5HKNAZTv1/EwcbNAiWGqKRQWpUAZ++vH5d1AKHGEcoorQOTcK4XTdf9mGNoPlnQOfYT7unDXMIRN4JF2QlcLHaOrRHv9lxCSWz1hcjaP75sFOFoAoqDQ6sqVs/T5UpiFtCF2AH6ZQjIirgD68YB660yKKy/bTIzkdrR1qORLg8Rqx1lE6PNvWOhgiRWEdc2e+vNW43yrDTXPZS6UKlO+a2JqR4gTRHurYL1IrmkcbumWh4R2O5klNmjSolW7wTUWIk4404BO15ALGH4u1oCI5B2rHZsd0fm/1xNiNiO/pjsdk3bOePRVjVal+MBoQhacrpwiBhyUlZZDgzTcNV/gFvFFxuVJnlunSg0YBFqoZip2bdGO7w5rDB6hm0wbNKJ9oHBNJY43voQq60Pc15V+OpvDQHqOBWC0JxEVbXyBg7cdAZyLJYDWThhkGt/o/UxVpBOxS1/CwUQxxXvSCWEoEsSYUibiC+NRqKsv3tqbhdhB0lw8wQhuwqg7YTnxi0huE6g9ZQzBi0BuX3DVqDOzxoOlsw4sjNM6bOwNsdqV11tggbgJujkeMNwhBciPKbw7n95mAIzBf9N+cqm43BAFYoeDCUdJj4DyNNzdcsVNZ4+LtxBKrKzoQ61/pbrjYWcrWBgsgt6MwCMQhc1lAqnXz1Dl/kYq8eIDtd7ynLFmkp+lP3SimBpE01YfzJelHoiB5alEjMVax64QnTZZEoy2KtEq1g8Mo+Eqa2ezuSUl8bxoemqoCa8Pz5OgrF5e+Aevo2CmWwIFojS3R2a7LvX54/vL5++TzIF3PHHrTaNUZTFGYmidWyszjc/2t2uCt0Y72UPq7LGzxHZMNdnhPYlGV+f5NeQvXy9tlXZmHdlHTffVVptbnuTve5pIe0vN3emBdHHu6pP0B9GNsFVUXBXSKIQ0m3SXHeHPoUVKBW8Ymu6F/7K6NEa7G91OYHLzLbZOs9Zaz3lPPf80ybnfeU3SZqE8tKVU3L6WbVVEQrVbUpYwdyQBys+Wa08l492XDl5OXPnCxBeWC2UMFRuoSrxBDXz17tA0fDJ4jA099e/C6sU173ue2QYd9762nIjL6mf2rXLuFjaNmHhtBA6+XQ6EETLYJVr+ZQzjE83g+bb/i7f1GlMex75WC7qfvOEWz/J3S5xo2357AziBevkj1C56yPYTZFKSj6lE4mUpI7NbuSOLkdf6xfZz9GZqe2/Rml7uSBy08o6+7P7sTi+aW/Lu44By47J/laOWPC6v7V16vob/6vj1Pmq4PXaKarU9dps+FINZrVcJ46bVQQ65JSM+Kwf5lj3QVq6n15gbDJUcKGWOjT3hm/ZOHu2v1GCpx6XeuUwaqmWQiVRT3B/rfK9gfYbYA5Z+qHcAW3NCrdM7c0Hp61Lird29uPeHBXvzhxKeDkpDsj+Ih1nbl20D1I2PS29PQTZ1/IK8vAL8VQWpIhcFn54rgVeIXNaNcHoTWnGLYVr5NuGmuDuweG75xiQgWxEa8aNHa9xgNwITAjIvwfcElFyRByxh7AWgbo6GJkjfPhOtf5F7TymxCpzhjelSgHXNZcK7olVRW4I1RnXheLwbY/Mhrh01kiyyLSLLLtgwcQivq0UnUseWiBDw/eULF6rRPJ1ijWGiTD3ix46XaoyXheKOS6yfx5s9SnDKxKukZ34yTYsjuYDr2Zq1oeSYjKsgfIlXUPYJRyI3lxq/Hcjs9syKGuUM4FM9h/PneWk2rDO2EQ3HvAoiYmzOjVFqgqCtLvC2pKmycb5FneP6f7JvPc1SuIQ4/4fvXUAzvP08chWOwK7kzhH9wO3Og465iQ28qidVyISt2HgVTuL/943KJ9gL8/SvXX/lxueEHMNiFpyiV321iO97uiytu+gjVIGJf1TYlwj7u2O5jcDY4Ewhm3NnNsJD3QkNnYFzX3d2VDSuDOr3elYLBCKLUfJaY2/YfY1ynnvCMqZQjKDZrg6zmFXEpc3rkkfSGPJgZlna/Chrpa9FuprLPARunvjbb2br+wPrFdS8GD3WBFPexiDJqsD4Ru+VcMHZZ5m17PGffG3LdwaB2mSX1Vbp63bvAXCxf46ArNv/XqiOvPAAAA//+0W4ni" } diff --git a/metricbeat/module/consul/fields.go b/metricbeat/module/consul/fields.go index 3e396c8a0b3..9890d32f00e 100644 --- a/metricbeat/module/consul/fields.go +++ b/metricbeat/module/consul/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetConsul returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/consul. +// This is the base64 encoded gzipped contents of module/consul. func AssetConsul() string { return "eJzcVsFu2zAMvecriN7TD8hhwLbDdlkLdL0XtMzY2mTRIKlu/vtBdtzYiZ2m2IAN0yWBJD6+90gT2sJ36nbgOGoKGwDzFmgHN8PGzQagJHXiW/Mcd/BuAwDwsT+EhssUaAMgFAiVdlCQ4QZg7ymUuuvvbiFiQ5MMeVnX0g4q4dQedqYh0zCsKNrL7lJkXqcExrXAfVzvMzB8IRPvlAz2ZK4mhWbYAR/3LA3mUNgLN4CjbB/VMDoCSTH6WAHqgDbBP5Uzk5SMWx/YZqdr0tbQpog1YbC6OzsfMQvmQBgXzmcG3T+TYAgHOOA9WE0Q2GEAJXkmAReSGsmiLknRfENXq5rlfhiCcyXRqBzL8EYjtDuNuEzhEuYUt+iMlpCP6IFjtXJhpvMuNQVJ9rbHzH8aalg64MLQRyqHbsvO33+9XVXaYAjsnhyneNpHr5KaEfpM2AIX38iZQg+a/b/Uau3T4fpv5b0/TQkce9U5A2AswSsgVBRzU44mtUKqSQh8LL1DY7mFx9orNNhBkUTtYF/uJePht0gGWnMKJQhZkphPENQIyw7U0AieMSRat7ti4WQ+LnbB9aIfDhPjCHeuNDCWf01nX41//BP60H84x74pur5vDtO5FXakeqGUKAVW9OQ4BHLG8ma5MzafBjh4gVsZXdf6JCn+GZvOiRkbBqCf5FK+spxm5NFi0tNZfp09Z0we+yat0fo6VWe8atQhXdlfwHYN9ZJ/U+4uidDiXLxewTX5pjlXqzZPeaF643qlir1TwwfvI0SMrOQ4luv5R4p9/f8HU+6Oood3ZTOMATVut1bT9gdLKE87Lb/kevMU1Of322FiqKEYlbebXwEAAP//P1vjFg==" } diff --git a/metricbeat/module/couchbase/fields.go b/metricbeat/module/couchbase/fields.go index 5168ad70677..8ddf5d937fd 100644 --- a/metricbeat/module/couchbase/fields.go +++ b/metricbeat/module/couchbase/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetCouchbase returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/couchbase. +// This is the base64 encoded gzipped contents of module/couchbase. func AssetCouchbase() string { return "eJzMmVFv28YPwN/zKYgAfyAF/tXe/TCg6DB0DymKLnsaBpW+o61bJJ12pOK6n37gnWTLruXYiWpPD3mwFPLHI4/k8d7CI61nYHxrijky3QCIk5JmcPu+/+32BsASm+Aacb6ewc83AAD3JMEZBuPLkoyQhUXwFWz+DZjCEwXObgC48EFy4+uFW85ggWXUFKgkZJrBEvUbEnH1kmfw5y1zeft/uC1Emtu/bgAWjkrLs6j3LdRY0S6zPrJuVFTwbdP9cgBany+b//wCxteCrmaQgqDqLJICBVYUCNgEbL6zLOtEDamGZPPWPJJsfj6EdgRPn+0iJlk9Wjb4anf1+mefacilf3de9GSPtF75YPfeHeHT5yNWBH4RVy5RZge1qorptD6sm5O0WhTMWiabzddCvCdn4UOFMoND7xJc6evleWS/u2+RrGUKUT2snBSu7jC5p+aGjFs4ssCCQinaMBAEYmepFnA1fH53P2KX48dsQWKKycA/ttWcguKpcOiEH1F/0WV9V/m2lg2d6oa7qODNYcSKKh/W14JM2hPmfD0I0+PQ/7ReMAtYXQH587v7F/K2TFljZIS2oWCo3n+beNlgSTZflB73P3iG+1OSikvaYb9b+ABoxD0R+PnfZITfAC41t0u0KhWfNqidyUDWDXv3vxELfcN5QyFnMpPvM99QQP2MdZGAyfjaHsZwQlVu1FWTU6hoBmT2xqHWb81XBxPrpuSWLQuFaSpbJ2ya0lZYmy0C0cW2z6+BCAoMFmzQmOMGDWnmjrHWmXZ0Cyly2kbiBcuLkX/YQkf1ENWrqvPYL0v9ECG/W3B8QlfivCQQfx5+rA9PWLaXi5kP+/TDrHse+Hydxw7nP4Eem51zQr/Cr3nKMZOmtnv82uf2KLY/UhwrYa629DXveoZqPhVJ6gLS/uooNjvsN1UZT0fOENzd60odLbMXpds48BiXNirD3HXZfaRF/8Wpax9da3zt7TXod7Z/G4L2/opy1l7a2nOFjDZiywu4L+8GrF7mhlFrrlEQ1QE7NfDktb9StLwoTq5S8vZpT6pym1GLtzRNmxwjcZIe2VQ2X9L054hlrLdVhbXlw4rVmMx6w5me4XM9eF1rfrDbvcRlBiU7HoRDC1Dwshb8ooHH3XRpQLx/csMUKycYwg2Kw/IZW147CEvH8LRvtER3WuHJ0eqk9d5gHg2aHxIYZ7PGD69BmgLiXM6LOv50xKbNW3Gl+xanI3lAOTRgeu0I6aEgeP/pDxioAlU1Pgzq2oI8jkxS1Zw8jfatR9JxAkbsNybtJuoTZkOxSTtIR00+X+ZpfLw/1Z92Ng0NBc20ZIFdbSjN1eN1E6yQgQWD0MhEbUmSF06mn55rHVTBI0d3zzLtBYyGcS+1v1wY905lbH/MxbL0Rr16nfm4QQ2QflK+gXlmZLDFDxRdfZj+BxD2+k66dhgdQk52taAaNkfe5yt+xzV+OHldyui9uD+HOxls9J7mldONYev+PIxvps8Hu1P+Ll/5+vt75F0UXmHzw9ylwlMveeK+izST+2gb1Alo29EchWkbcRVl013JPLiKwLbB1UtYFc5sS1ysI67eOhHumMwI19M8D9SUzmCuJTrV58njKUr9yXrTVlQLDy+Oo27Obv4NAAD//w/9DiA=" } diff --git a/metricbeat/module/couchbase/test_couchbase.py b/metricbeat/module/couchbase/test_couchbase.py index 7b8ac868e7d..24e1266cfb3 100644 --- a/metricbeat/module/couchbase/test_couchbase.py +++ b/metricbeat/module/couchbase/test_couchbase.py @@ -7,13 +7,9 @@ import metricbeat -@unittest.skip("See https://github.com/elastic/beats/issues/14660") class Test(metricbeat.BaseTest): - # Commented out as part of skipping test. See https://github.com/elastic/beats/issues/14660. - # Otherwise, the tests are skipped but Docker Compose still tries to bring up - # the Couchbase service container and fails. - # COMPOSE_SERVICES = ['couchbase'] + COMPOSE_SERVICES = ['couchbase'] FIELDS = ['couchbase'] @parameterized.expand([ diff --git a/metricbeat/module/couchdb/fields.go b/metricbeat/module/couchdb/fields.go index a6fc61d4264..5c0e6f2d20b 100644 --- a/metricbeat/module/couchdb/fields.go +++ b/metricbeat/module/couchdb/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetCouchdb returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/couchdb. +// This is the base64 encoded gzipped contents of module/couchdb. func AssetCouchdb() string { return "eJy8mEtv6jgUx/d8iiP2lcJrURaVWkrb0fSBplSjWUXGPhCriZ2xnSLup79yIJAUaK5b2Vkm9vn/ch5+nAt4x80YqCxowhYdAMNNimPoTuyb25tuB4ChpornhksxhqsOAFTjIZOsSLEDoDBFonEMK9IBWHJMmR6XQy9AkAzrEvYxm9wOVrLId28aKpPKPhrFqd4NqZutm9aoPlDtX5+yfqRwVfsAMJHCEC407H57ZxK0IUbXRjZ/s3o+c9XZEmNy1vhyDq8F0T4P8/msZOLaHNzyFUYd5YPjOlZI2OeJB6ZUitWJjy1Y5fNcZAtUIJelDjR1zjItivQ9Vvh/gdr4wDpQWSU4UjoLRlOOwuiKjYtVTBMiVuiXcicLS6mASmG4KGSh4Uj7LLfBLJeKqE3sNdwH4r2gU9yDhLwsl69D3ijTKthxhiaRR377WdnubMNp2221O3mZ/effV1bFoUYepte3/qGsigPU7OV17h/KqjhA3U4fp/Opf6ytjgPY/TSAs+6nLr6avYWI31sbUnNlsJtuoWMq2dEG8PPdvNBQGv7+1t7tR1HXv9f6UQQvf4NCnUuh/2Q76vajXhCwHkwUEoPMka4fhK4P15Ri7oo3COK8QdSDJ/mBDGaoMiJQmHTjyDkMwjmEZ2ngSTK+5I6uHAYpkGEUwQ1h8M9us3cjDBHsYdSDN0EKk0jFfzk7cRAEcQB3Ui04Yygc+bzk4THgNhHvZCFcHTgK4sARPJXHzBLzOk3l2jnSl0FAL+3Ve5ly6lgrvRDr9rDXh5lCKgXjdhrcEZ46OnIUZN0ZRRH8JQwqQVJ43fYupkpJ1cZ6ujvTpPzG0abqD337QMOIIQuiMV4rbjzfuQ3PUAPZa8KaaNheuFl7hGWOIq6m+gW1UnAsdRbNrvIxJTTBOOPaN51VQ2E4JWWtlLrwSfcsanXztrHwQPmIYmWSknJ/EedCc4b7jt+am0QW9nRBE/4vLtqZ9zkaoMlSpaikRYbClClqZWGpZFbLXaekSLjnBszJlGiofl1WUsdLnnrOW6uwnyHVoQecEF1idH4HAAD//3gQco4=" } diff --git a/metricbeat/module/docker/fields.go b/metricbeat/module/docker/fields.go index 89cd5fc99a9..eafb7a969e9 100644 --- a/metricbeat/module/docker/fields.go +++ b/metricbeat/module/docker/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetDocker returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/docker. +// This is the base64 encoded gzipped contents of module/docker. func AssetDocker() string { return "eJzsXFuP27oRft9fMUgfCgSJjRYHfdiHAqebFFm0OVnk0j760NTIZk2RCknZcX59wYtkWaIkX2Tv7sH60bSGH+fyzZAc+S2scHsLiaQrVDcAhhmOt/Dqnfvi1Q1AgpoqlhsmxS38/QYAwA+CNsRooJJzpAYTSJXMwtjkBkAhR6LxFhbkBkAvpTIzKkXKFreQEq7xBiBlyBN966S+BUEyrGGxH7PNrQQlizx8E8FjP/cilSoj9msgInHgmDaMaiBzWZgg9s8aVCEEEwugUhjCBCo9CVLqaOqIql9WIzFgPeBqSqtkQYZGMVpNbj/7Kis/TVj70LKMiGRvrAS3wu1GquZYD0T7ufMCwSyJgQ3RgD+QFta8TIBZYmsdkzguhcRgHFdCDB4H6h0xCJslegQ7FVp8YaY4DOsFhR5TO+XUXnJ8VpbPSJIo1Brjc7P81GnvH6AS3bFk9rOp3bivHrdc9hNjHgsd/llHpKQ0s7SpiR0wLsUiMjiAzX2+SkO4BydTIJw7B0kZR136a4ej7gHcXALbl4Bqh8jF1JKsEeaIovRckArokogFJqCZoOgHmBRxAxuyGNGj7zOyQCdz0ua9vDiH8T4XwrAM4e7h2zhkt0IlkE9yaqLr15RwTGYpl6T5A58bbiFHRVE0RwdU9OAfsnqy5rRLYiKAAZ0TinFDBbhCquwJYgaLi3D2ExOYb52XiiKbo7IPWJNRqbo4JqzMMLqKu2IkbIao5uEbOHmH6VZvtcFHV6tjH4/cK9hqMUDrg/0UXKIH+zmuEVY4tmsEYE5sfOJCo3psnQZNWih9zuugPgUfaOE9x/JuVRejhCGdOn++uj6/VlFUaLLohfYo9m7gO8e8dmjyunMFcv4/bA35L2fX9Ol9RmPa4+5bUa9hnvSy3oxgz+6AHV768SH92x66KrgjhqpOA5heMXnWxpvpFdxPP41Tgyok8V3tCdurXyktsoK7TYCVqyEpFBMLZ0jO0mr7EDuA6AJaByvzS+y6dkY8CfQO3nxrWhvkQYBlUHU9fMAC/mEfdeBPx67ahxiD0I/SLS2UQmGCjnOb/ZDK1lFPrfJCtWYUZ5YmLoDMpxLHQUaWk8H9J1D4vUBt9BsbyYII6XG2bVMC3RBmRkAJQzBLYKBzq0g7rbU1E/C9wAK1daVyIQeDd4+2jTCWfnf07SeqFtFJRnHy7slKCeYKqSWdW/jb5JdTCfwg/6xMrlgrXEahTSf42fHmaaifCnFa9AbFM+DOoOcX8nwhz4gmnXM8Mnv2e2jlnUWWEbW9XN1JxHOlUlvYyxyVOzB/tpTqatHSCM+DW2tKf+HXF35tg3HHXo9Ery1Wi7hoiRPX+2czp97sN+Ucf7Yw9m31e4soJrW6qB6xccBPxhJ/T0/WhHEy5xidN1UyG32ZslA0Pp2VPd50X5foHrd+5o/DADNmTMnXTT/Y4SDUyrwMkt5ZZTNfnFE6tIUNlQAtLxta9gE4yuXfvytT42Gm2FOMMYrNi768Hz0DheY56Fmr+A9RTBbaCpmuCS+whmt/bW8sO6JI7OqkAGb0vmeX61oi4WZJl0hXI9BaTVr7BHXvgQ+1XybEENgwzkEKvoU57hjB94kljTYibXlDoV3untDwu98/vP/1318/3H14f/ev34EJbVThogmWRPt2ikJjYrP/vGA8cWoLz7KscTVzPDOnhHEmFtooJKtoLDFhcNGqywbsT6Wghaum7ASYQNNol8sNdWN52UBlEufPWBidzCBOWFD2sZ1EKJJZpHsM+lrLDoDU1AeKJC6pZgxlroHETdSPRRYmL2IUNQI31aF0zFOZ5gczs5YH1ZHEI+Q0pbTctco1NtZHYD0nZ5wbo44i64TQsQnPA+NkaymTJSgMS1m7uW0oksIu7jJuA98E+16UWHcgYcHWlqjzkL3ifW51mDm5IMr7HbCQZx3gN8BSYMZ6tNs4unS1WTK69FvwsP/1i0uYQmr41k2IoklpF2yHdU26didZ9cV6RMM9sSM2iPruQdd/6V3yWD9cM2WK1jYRxu6/bJUA+ygULgpOYtQ0coeqxUI4B0roEhMPSwPRWlLmjuOMbDtZR7lVgudkjvzUK/wzekb9vAPgrtesykR6VpvAvUhlSfgwJ7aYtNWlMbm+nU4TSfXE15MTKrMpigUTOFWYokJBcUpyNvXjM4WZNDgjOZut/zL56y/TP00TpnNOtm99G9vbDUvwLdu9sXDuOwBlDT1WWH9ao3JuutfufnRw58TW5BeIquZxlJ8o8kZHG1N4++MKoLrfM2mj0kbm+VVUFWY6CFXsBO8SmFym7VPVJc6rQo1S7nKlNtFyKo7D8XYUy/E9UZ3a8LO0mS7DTO5dBh3NdR+dhHHKW88Mr0/MPxF11cZnGclzJhbhx69evzpOtZ/JJmgrvKzmajmXYJ22dBid2FG3QVEp6ThEpDLL2Gi74Dsnzbi+PXfQI+C/TCRy0/SqIY49KUZHuLfyXhsXUPF/+7DkGtAekKyCuQYVXEFVbE0MzjZSrazDaTST7guMCPY+3AOYw9wQ5gaNZhBvShifUFl0nMv03rD0gvknYTbvFzYW4iTMWVccjKuWQFJuujgSpUereD5/+bLHFMeWOo8bhgG5Qu1SmPUgt+Xo2VhHT7UHnQcGu5oPxP2xA3EptbOZ3vWxj2X1b9qf8pxu94z8eASrfyQ/StSR9w7g6dnZv33QZVt4aoHUUGqrAhNoLFmfU4L95kWcXoPFC9NoCXNWnVwCrUS7qTrq8/jlZjwyz7jovxdUZjZVBkOE6m53yX9sGD9W90+z9mflwpzM7ihJVP9+cSC0T0AWZtwhzAldYZswazcCSsnWkQSMtn304t1F6MGQwg+usKXtx1S7u7lOwHwqzEL+EQNGlgt7sgFTIXw6AXM4pOsFTD+mXYKZy6Lj705Oub6I5xH/Lwz7fzXibmKbVyrPJ07GSizjGfwloVwmoYwaIB154w8YIGMlkvED5CWBnJhA/h8AAP//3HFiZg==" } diff --git a/metricbeat/module/dropwizard/fields.go b/metricbeat/module/dropwizard/fields.go index 3143c53ee41..5325527e89e 100644 --- a/metricbeat/module/dropwizard/fields.go +++ b/metricbeat/module/dropwizard/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetDropwizard returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/dropwizard. +// This is the base64 encoded gzipped contents of module/dropwizard. func AssetDropwizard() string { return "eJxsjk2KAjEQhfc5xSPr6TlAFrOaG7gUkZBUuoPpJFSVSHt6aRX/sJaveO/7BhxocYjc+imfPUcDaNZCDvb/EVoDRJLAuWtu1eHPAMBGvQpCK4WCUkTiNuPZ+jUAUyEv5DB6Awip5jqKw9aKFPsDO6l2u1t/U2Pdh1ZTHh2SL0IGSJlKFHfFDah+pg/Z9XTpK4Hbsd+TL7LvW7e9V7tLAAAA///zhlJc" } diff --git a/metricbeat/module/elasticsearch/fields.go b/metricbeat/module/elasticsearch/fields.go index b08f0c8122c..aadc37a05c3 100644 --- a/metricbeat/module/elasticsearch/fields.go +++ b/metricbeat/module/elasticsearch/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetElasticsearch returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/elasticsearch. +// This is the base64 encoded gzipped contents of module/elasticsearch. func AssetElasticsearch() string { return "eJzsXN1u47oRvvdTDHK1B0j0AL7ozfacbQpsuuhmCxRFoaWlsc2EPwpJOXafviAp2bJMWbIkx14c+y6W9c03v5yhqDzAK26mgIxoQxONRCXLCYChhuEU7n6vfn83AUhRJ4pmhkoxhb9MAAD2fgNcpjnDCYBChkTjFBZkAqDRGCoWegr/udOa3d3D3dKY7O6/9tpSKhMnUszpYgpzwrS9f06RpXrqRDyAIBwPadqP2WRWiJJ5VnwT4LgPV4VMWK4Nqsj+tb1Yor7i5l2qtPJ9ENt/9u1Q4Dop0aRRLE3PIZSmR0RqQwyOLNhhhsUKmQ6X9iRThMe/NqAP953D9646MFuiDrCr0daC/FlJrR9KxyjMGE2I/aGzma78dj9hyk89bqvUGJIU1d6lJoZNUFU4KlJcH1xttmcH7cvPE+EIcl4wbpBU8uBkHWt8i4VsJMOkWPRj8pWsKc85aHzLUSQIIuczVJaczFB510gBZoklW70kNb1LpnPJmHz/tVxQcm5xglM68sY5gxuetla3hnbC4J2aJfWWP85t6ygdvytqDIqzMtyJ87wwhU9lImP6G1BhpGO9Na3XZ64kPx5HVaUM5RhrKhKMbWmNFZI04voMmj1TjvdABXB9D07iPnsrHuZokiUeKNFIf8HkjLA4WWLymkkqzBmIf3EyYCcDVoTlaNN13/RH1z49qJwXhXx48bYIed29x/K8xUBVZrmGTwuFKO5hg9Yw96Aw/S0KErFLaJhHqIK1sLALqXYcqGsPok4FcOsmmQcC50jYtAbNszSEVWq8U9bGfhkRDUw4sVdHpbIrKB78ARld0BnDzqRSYsiZKFnoFh6VNYom40XMo4e74pgpFO7gIFd8Dkt2s206sPnu1pJm67Q3FWELjbCO1OzktT9iph2lTFFOFD2IonPQ8rI27fS2XZ21p02IiCOXahPNNibAdEiIfXXAkGu7wkpVEXmwdKFQdDvsQp8163eHMMKS9ZZjjpGm/8Ng8gdM0bZa7GLHz5HKduXaOC/ZpsNLDJJRyKXBuLxjtBY8yZXCs+TLZ49cnThyow0RKRWLQp+tBZpzx9j4Pm/D20wLcI1JbjAtGke7ZNjIUibPwn4qb4i9i1FHIQXGjyCzJKZIHsiUTFBrqTQsyQq7KdE0k52cf3WA0xOvtsEBwzrFR8unttsBLQHWP59Smejoo5agVCY5R7ErIM72zbnkyKXI0GBorB6dnpd0Mk1tpPKFt2EtaqE6l4oTM4WmmzurYimUQ7vjbBVwqEfI48Lp+mFB4IltxbYTO7rMf5Rpd/x5pTmYbfwGSUE1YO5wvYoUJnKFanPZwkXrWdW3zvsuuFTK7TSH5Fn48QplTahFCostWsyg5JmUDEl9h6pF8rPKEWitdw3L1oYsRtT5n6W2DjcQZlXZhqgFmqjBy73kPztIvyQ3etmLXUpdLycjCbbIQNJUodbwKZE5S2GG8Pht+6VU7keWT8O2SkFy3KW7SnJ/AQ/HhsxVgqP657uDPO6fQuy4/qkKHsM/Bclx/VMlGX6c5KuzznmtYFygODcN4bdebxR6t17v1uu18j+t14PblHbL3Fvm/oKZu32uxKIXORuy7nN2jomsV7fzQ9C3HIEzeJGz5m7QEDNii/V3OfOQYWkpMSR2MayjYtsP09jOcCqNQ7HddyL9VoL7Z2e4OgzjECcqVoTRNE6JwVH5PC+rZ1m8wtqdqQCkZokKCHCqNRULSwh9lNhGmfi/3Yap76WFNLafzojSmAZmjIOwtg3vkKCu3X96WK9QaSrrU/aAMHMnwgrUsFdfVrzz8tsW0//6Co9iLrs9+WzTuk3zDoRKUkEDVBkURXmJJIuooOZi5flvSDKwDPYqstWhfe2rKsHJ+rI6cLLur4KQ4vKueJLiYQR3lLpc0iNbVbp7ZTddu/Uh4kwmr4SFW/Vem4OP8xIcLDam7iyXN1qwMg8/9mRR4uEPkEc+OuJqNK2cH6kjX9XUg2uqjVt9y7nieieeK5oTfrUJoRy+WsZ4GHFw7DUYOqEtNexlxa1Ro0xKNlrW2tJZDE8Wt1fiStacFuFzVp2s9A+WHlI79FszP9g/xt7oTmhzKXQIybpSDT9xZ96bQrBKOEPyeiWMvyF57Uo5vh5DO9q8m7VtN3EltH/4xuZojdrIPEhkcM792wLfsu4aGN+y7tqyTudqRVey+Q2cAYn3vcC+5d41ML7l3qVzr7EDXiRRIhnDxEg1Whf85TNsQcNZ16EHLnkd2wMc2A7vJMAi6VsYmmZF6OL0DkS3Bah3ZLUI2euCzmx13xD9ue0ezMW5DpybgSFJ+AdlCHqjDXIIQ7cloXsKf5Edh61VFF7meXNJgKwIZWTGPpZF/c3ODN37CrEh+nVSF33CTufPEOBPSKQwhAoNBIoLYC9UkapJ2mdrVKMysVRNb/Sf/kTw0UHCIWTlAJhU1ITTqc8T0QDc/nG7oKQBJ+0i+EMqwDXhGbMK5eaBkyyjNep771hTEfvXig6qVu9Hr5S7nTQHexCh9de+Tw7J4m1yFzyDYuxsx5TNkmqg2u0rdjiyHPyvAuM893ZMjp+WHvMwwrPbSCUGu8hWyGRCjC0qoX9bMgIVd+608g8ViC6FFm/yR5P/BwAA//+sJJat" } diff --git a/metricbeat/module/envoyproxy/fields.go b/metricbeat/module/envoyproxy/fields.go index 06afeacdf12..ab451a5ef5e 100644 --- a/metricbeat/module/envoyproxy/fields.go +++ b/metricbeat/module/envoyproxy/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetEnvoyproxy returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/envoyproxy. +// This is the base64 encoded gzipped contents of module/envoyproxy. func AssetEnvoyproxy() string { return "eJzEml+P27gRwN/3UwzuaQM06SYFisIPBQ57ARKgVxTdPBR9IcbkyGKXIlVyZK/v0x+GlLS21/+SW2n1EHgdifOb0fzj0O/hkbYLIL8O2zaGp+0NAFt2tICfnr/86QbAUNLRtmyDX8DfbwBg5ylogukc3QBEcoSJFrDCG4DKkjNpke9+Dx4bOpAlF29buT+Gru2/OSJrf63d9RLFNcXx62PrybUPNlxHJZXrPnhG61MBhqJnEQaJkdPO3Ydsu3zadYkpqgY9rvZAT8OeWnN3XdRs16T65RO8uHFY3Hqml4IvaD9c/+yaJUUIFeguRvLstr1kuN1gbMi8GzRMJ1EHE6AxZKbh/BYY3UgCWRLckuWaIqwt5ldmNejgK7uCEOH+l4d3F4mbYGxlZ4IehMGtAF/FF6kJ65nwellX0IljWL9SJx1jMtfsJcOtD9z76Xf4Z9ca5FmsCUXU1UhqbVE1FFc0Ixxg2zrxR0yQZZuL2OX/C6nS6DU5N61B98GAa2RYBYZROKA3YMjZNUUyQBjd9hJ/6FiFqldjY70Jmyl1GOA3tdU1YIxW4ix0LM6NRUU4wDhaayrrKG0TU/NqZaZyXarJqOVWsW2O6vlqdvBjRIuolJeMHl2BgGVXVTm5R4JNtMzkgQNgVhtMR/JXuVWeDx2f1CpSaMmrCu3E7nmoUg+7wQRFuDAvCQTnCMmYUaNlUsUA8wIX2yIjWKlQ60L8WXqiEy/oghI6NK2jidPsGbP3nnOBkmWZ0eATNVf3pW5BFgbJ/kZCfNSoYD0st+dSb+F+A4/2QDGGCEHnOmzAdFFq8GBz4RL/jiiLn09gsfOy6qtlLxfQqIw3j0lEHiAzNS33xShS6hyTkTc42ip/3oLD7Zl4yfCp05rSRC3UFfgbigQ9RNU5QAZ0rpCfdkbfNeqRttPvSrKQ5/5PFDiTRsOaYrSGlLFR0ZNNPKNhe4Maa6BLJL4w4ICxkTSHeLox2SP3gd+SXnrr79cATWO9Gh5IqnTn0yjwEWyVAywLHSlL69BvX4NsDTc2EdydZDbURtK5Aa8IuYukujRpC36YXZ8JoCdIJSS7RObD2fg7EaKvHIBFyrHpgOVaWlhxHikHOfGld+fzv7OJyU8wLRkXnnwGMUi6agjxj3Pb6JFZR8K+tndxUvfre9JBMoTl/0izaGLl4SS93ybEx3O5/xB7hgp2wJt2CpZ4ZH4P30M+x9jn2U/25z7XucQMg59nwL3Jz1m+0jaPT06a5M+MJkeAq0FNROutX82HOkj8Adh+yjX/WO0y6uigiUPbzuWgh8KO1pbDoT38oYIiPUXlpp0LHTYDedcNGn2ebjoXpDGAvqwkqSz9EAQh1SEyrvKGMtUo27KGmt3+7LiNDg9TrjLSkQV3FzW4TUo2dk5VNiZWmqI0sK2NM3ixSIcsHbgm8PTEIAC2stl8SxLPLv2GgY11DjLay6z17OOT9q1C2Z8z2ZSb7ZdJ409w99zAngQtb1wNnjJRMA5TDGxC58vIchDYE1weXvSkNWGrkv1tIvMOpJGyfQ2IvDJ3uUjYojyqdPCedO6Fpoz8HTE5/GuC4EyfANoYpLmB4KEOWRvGeHrWWarH24AvA9fgaZOH8C80OHuQcGQO9Eqkgxv0QVZEiQck0sGb01Briml3hPWqVF/LwxBlx5fIS/j0AodCsMRERl77w/2vEGltj9I8n8Kxrk1YqYZWqBr7Ay34y6X+yCp1YNU7q6I26Hra17sTG3Bc3HjYFnxJsfrl+OKVq1HZggDXkfCMpxladiuFKUmhCn7Y+v247c3WY2O16vyjDxuvSuWeWtuGUsJVPskZCPrNb1emwbCxXEMPBSegdhu4qVOCiBgy7sOxZmgf500tujdN+FGDJhVJS2VzITx27fUKHO0fa+b206v12DWhoajmbbV3K5gkYh6aanGJQpT6ptFhXOVgRl8K22Lxhbkt/35aLO7Hpb42rVssHjgSNuXzrz//R335/PMvn/+tHr7+9zPc/vUvj6f31b1cpZfKB5XyOvOYo4ztYFNTlK1GAQGNzi1RP0qHKp9z51zmfegBUwra5gawoH6Ab7VNwBH1Yz4x6jw9taTljv7YyOvxCDevAFti6LyxuPJByt6ye7lVGI+NnlQJDOtXcx73WL9GZw1ICOUz+ypiM/wKYW2DKzYoPgB/GxLLl2/f/vXnT5Ba0r1l8rajnBTlsyHgJ5Wd75zKx2+YRNESCOVlDkqOSi+3xfvPdKBBNehzfy8BXRaYc+eMPhv90050i+vuxXfRR+KaQ5AN4XYM9wL8YdiAj9ux1LVtiJzKrwPzoSdDExJD8EOyKA9DFSJ8vLt7fx88W99RvsMH//7j3Z1wtMEnAh3MwXO5cfbU/5WzO0e07uy86NQN01i4lwaJyEt3Gun/HSVOoEOeGlUxNGDCxp/IWyP2m7s0R/Spscy7Xv17AAAA//9b1YgQ" } diff --git a/metricbeat/module/etcd/fields.go b/metricbeat/module/etcd/fields.go index b2d735b9908..2873b9f1681 100644 --- a/metricbeat/module/etcd/fields.go +++ b/metricbeat/module/etcd/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetEtcd returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/etcd. +// This is the base64 encoded gzipped contents of module/etcd. func AssetEtcd() string { return "eJzMmU9v28gOwO/+FEQur+/h1e/wbjkssJsWiwKbomiL9rBYqPQMZc1mNKMOKbvup1+M/jiy9SdO5LjVoQ0sDfkbkuKQ1Eu4o901kCi9ABAjlq7h6rUofbUA0MQqmEKMd9fwywIAqifh1uvS0gIgkCVkuoY1LgCYRIxb8zX8ecVsr/4LV5lIcfXXAiA1ZDVfVzJegsOc9lrjJbsiSgm+LJpfBnTH60tc9AWUd4LGMbCgGBajGCRDgS0FgkCoIQ0+h9f3KroEXQosTLKhwMa7/b0W6I52Wx905/cRrHhFXfDruzfQSIPUB8hJQoQL8X/aoF0segSWUFPoKe9a4wHVN605KvfU8jqmWXYePnRZex0bp4uXemv9lgIvlS+d8MFDY7APAMfrY0bgynxFAXwKKRpLGtBp4FIpYk5LC+8xFXj/7gYCfS2J5WArY+BT8MtGeG9BuxHjhNYH3hjczhTj6TBx03NJGsONU/T/GGayKOTU7lzuhUYeiAdClUFBFMA4kIxA2ZKlt7fTndnIXuKGAq5p1Ias0JJOUutRHiFWlSGQ66+YKTbHbyYv87OLNW5S7FggHUvcC2RBpzHoV7QxKIeJ8UTi0dwGE8kVjqPqzauYGlBJifZYVC+NMoXNvDT6oZKwz9nVCRKjtcrsn/4P/2vvkNOFNwcBss+rK5JTM2uGnEwYaLWT48h+4J37nJFkFADbM4C+GRYee+smPJaoDN2amkw1iGe9Wz8O7+0+2zd4jRJgIgcop0MWwRee0XKifJ4bEdLPRKq8Y3Jc8r1OqHWSPpWxIKeNW1+QsNF4KmB9hlyQr1Y4jbcOhUpYMDyfc5mcwHr61OwhZej081krkCKzIf0QVoukDd/NyXmvDN9dLuPlG6USvUrEC9qEzXdaxiw3XFMOmDH1IUepU+PxogdM/MF8p8rn4gNp0CgYU87tp5ubaadv0SYp75xKdBmq43DpeLkq1R3J8j+D5H71N6nj2Kh/TJ4aIn805VTsKrbBxE4LMIudjvVrjlXWQSg8YivnDOTPwQh1ufrCH0HGvarmbFxtddpVMUi2QnVHTjcHzc8XAw3f/iw9KRAmNnXOaPitZavUnBwME3Tni4hjtsmA2Ocvyn3Yzcm3t5WEy2XctU9yyllQOEFrvbpYwm12WilFIV3LAOSYhW8p/xCZ4Hc/bm1HsvVh1vH2thZxOXsra8hJUhcv5ORi5q7qhaqgqe1cHbHTWaDL2tYdl+XdVztDzPeNnU3nxMDhdCxKO9tszBy3rye3tj2DxKAsnflaEhhNTkxqKFSZPt7JKRaIp3RuxqV+Oa/t7jvL6PjSVu1ZPRppO7gjrhGcqooXkx+3szOIIkuUCNuMYndrGJzXBFushsKh256NUJXFeZEwj8dbZagIFgkbK2XIsIo97tgMo0sY/z2nnQw3XvoXH8oe1B5IbZZYxAayaT4mKoLhydIDRPcz31rNvsnpeDEarAheEfOIHyvOFTq9NVqygL1RyYMjtZMx69xUUAAm5Z3ucBpuUlisxF60gzTwzu7+PY5d3K2fFXhv0KczD8IzOf28sTG0ieq7zmFo8PFw9oDwZ4gKrgcw8KLJAJV1l/AxPrNBW1YPlU5Tahxp8A7YuLVtE307BTv63nGwzR8eReff4/Bmf+zhMcY0Yvkn8ZCpxratHcN+JN+vhcQHmlMMfcyoFtL9dGqcsmV0qqsLOuMd4MqX9UTWF1Q3YIPvYjsOm1VHrWn009x4NjlYPPApbXjlyAs1Qz/P16/JktCTCZrl8xjKQuMMhmb5PAYVaA5Ds3wmg88LDIRO8xaLp7McijkX08xI6Ql6PNegXPpWmECPKAcGpWxRVEZhelv/BAAA//+7EUCM" } diff --git a/metricbeat/module/golang/fields.go b/metricbeat/module/golang/fields.go index 3a81eac5023..0a3fab7f077 100644 --- a/metricbeat/module/golang/fields.go +++ b/metricbeat/module/golang/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetGolang returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/golang. +// This is the base64 encoded gzipped contents of module/golang. func AssetGolang() string { return "eJzkV9tu4zYQffdXHPgpiyb+AANdYOui7kObFkhbFCgKYyyNJDa8CCRlW39fkJIcx5G0cjaBe9GTTQkz55y5cOYOj1wvkRtJOp8BXnjJS8zX8WA+A1J2iRWlF0Yv8XEGAM1LKJNWkmeAK4z1m8ToTORLZCRdOLUsmRwvkVP4hr0XOndL/DF3Ts5vMS+8L+d/zoBMsEzdMtq+gybFJ4jC4+syHFlTle1JD6rndk5t8aHckT0e99kbtNk8Lyw8J9c95wBOQSQqlULzs3cdkkeu98amZ+9G8ITnl4I7ozAZfCEc1galNbklBefJeuyFLxYvBCmYyi+RI7g+cRXMQejMWEXh8yCXcZxiW7fKLf710vUCy5NeTOdKTkC0JrulnJEYKTnxxsJVSpGtF2ef9ul0CskbT3JTUuXO9RrHNwFjI12wj/UK0QXSyjYhNzu2kCJjL1QUtbQmYefO8Y9xOOWhXb//loI0x+bwGhYAvu2QC437HpjPBU1Mpf2goANopuupK7VlG2Rbr7Anh4LKkjWnw7g0H/wmTzZSKHExsqZUl9jWnvuEnoL8ng++y9eg415I2eLGvmCN75nKT1KaBB+bEiMVVBymlJTVJrMUzQ0yyqShPr5TIH/XGg9Cr37+FTFXq7ZTrVfDyN6vnn4g51+Wkwu/hM7hC4YyWngT/5ZshelJimk1NZTEeMOywio4aTP5rEccOQnXJc4IoyfcrlKjqIf0vwT2UF+7FPN4JDClw2FKOC7hNqHbnQJTdHh3uX+kw39X7IvUpt2Q17dT+9OObZgu/n+K945trnaez1vKq0e3cM1109rtvhBJ0Vyr8R43W09Cc4rMGtU6ftVId4UbvmmJDZc+HrhxlQqd/vfD4aF22LI0+w/Dl2hn4wpUfhMU4/RQu9sRRou4ymxc7fB181OkkvFV+1tXjofpOU/J4xW4fRPpdFNMRAEKYxd5Y29BOg1ThOMLs/KJWLuwXSNuDbcOALyJA9FPD58p8JZ+GKTetMpP7L52P1PRRp8cX7pK3B93iNbHcEQzy70ReTsI0cNYK/iLE/8eEM73qTZknHY+P7PpXS3Jn4De8I41RCNi+gG+sKbKC1P5mP3dkj1M5GjqH0AmtB9tPGr2DR/cOFIMcmh2Q9qaHY/cGqEDX42H0NE/XEl6JHPCVrm7Kkpt9N0R6WL2dwAAAP//qwVEDQ==" } diff --git a/metricbeat/module/graphite/fields.go b/metricbeat/module/graphite/fields.go index a4845943868..a8deeafd0d3 100644 --- a/metricbeat/module/graphite/fields.go +++ b/metricbeat/module/graphite/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetGraphite returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/graphite. +// This is the base64 encoded gzipped contents of module/graphite. func AssetGraphite() string { return "eJx8j80OwiAcw+88RbP7XoCDN+PJhyBSJxkbBJi6tzf7zJjTHvunv5YSNXuJKij/MIkCSCZZShSX2SoEoBlvwfhkXCtxEgDWBK5Od3YIBlqqSIlKCeBuaHWU49sSrWqYtQxKvR9N1/nZOejJSVtaZHgyrPYR7ydz0hch/8Ki/YDtCL5V4y2z27KkZv9yQe9uf/YMOk/AqVR8AgAA///94G6N" } diff --git a/metricbeat/module/haproxy/fields.go b/metricbeat/module/haproxy/fields.go index 531515409dc..f5cd629e773 100644 --- a/metricbeat/module/haproxy/fields.go +++ b/metricbeat/module/haproxy/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetHaproxy returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/haproxy. +// This is the base64 encoded gzipped contents of module/haproxy. func AssetHaproxy() string { return "eJzsXFtv27jyf++nGORlnYXjXjbbAgFaoE13/1s0TYLEwT78cWDQ0jgiQpFakorj/fQHJHWzTF0cW04fjl7aWNLMjzPDuXHsE3jA1RlEJJHiafUKQFPN8AyO/vp8bT45egUQogokTTQV/Aw+vQIAyO7CDxGmDF8BqEhIPQsEX9D7M1gQpsynEhkShWdwT8wzqDXl9+oM/v9IKXY0hqNI6+ToP68AFhRZqM4s8RPgJMYqKHPpVWIISZEm2SceXFVsMWpJAzXJblQ5VLlQvhDFhz42LazM9X/IURJm6ciYmEeAzEWqCyCJFAEqhQUUc62LJr/qIKtACzJrd3PETPD72o0W0Oa6TOM5ShALH8A2BDOexnvCcO0oArdYutjT0MuWMErqQkmIjgrAk803Y3oviUOlZYrPQ/3tawdimfLZPylu0N9WXF7imqiHXW3BSzhNNI1xojDYk5bPUymR64wwUA4KA8HDLnuLMRZyNYnJ02S+0v0N323EM/C91AH1B3micRoDiUXKtdkcDgSkitxb6JYojHSE8MsPjGPyNPvx5Rd4JCxFCAR/RKkxBC3ck8cda0wZjame8T2JOsfPi80tEuSwoAyVkQsY3Pm2aEcWiDiRqPxSrzvIFmh15XhcXJXtPKlzbOfan3Mbd1iLCN7bLUrZHkfJTaT6kOwk0TizNjck19KGOMfAPHpgKzLLfGEzsh7hkKodXKt1hjF5GppdYUguhDSqtIFVh7eESnAqTbXJMVbxaKEJGwDN1NDdEotSbDK8fG5vL56Ba1g5PQ+T32x3RZRH3u0xDYtnOywRkeFsf4D8WTH+k6LSymsce8lglQgeUCuPpvdCvljAQPQTmjQku1sETHd1xclUYb00KrlRrvEe5Q6RMmezkNgcjvfHps1wd+NSWBYq9dxs5hkpzKQpidi35CyzpgRiEGbD6MqvNOX3Mj+rwg6oroMpq4+PEFwjb3ZHfdL2Z+bqD7iataqvz3L7L9nLuiub3jfjzJXNJKYKJ0nQXjqogDAMZwsmSNODedslQRn4c9H+QItuAAke/mcTe2Bs8sFzwfmNrzAuqiwSRBjOmBAPaUsbpkcO2M5iFlNPI/t5DNpd/7+MzmcxxjPbv9s1p9oyIBwqeA+f9tCQ+VxEp2todwkNCAoHpdfobX0yc6uJVhAIxjDQGJoAE+/jUMafY2iiU3/S/oCrpZB1H9ZRwt1aejC6ux7D16u/L8dweXXxZQw/Pn+7nI5BSPe/0SMlx5PJpKvNvER6H/nVt32T2RXfjiSMFkLmblodW2QK5SPKtQfcR53d8FAsuabxrucl60BzojAqTx6OJ/BnBfcYdERV1sSnynbJG7BA0UZfRoJhTmIMXGj7sUpjEIs1EsUrmRh6dN4FR65nZuFeWfi3ddc5TE7XEoHRm495zjWGtx+Lhbz76GBaXf720RXTrxlVGjnKLhXmh4Q/32EdjN5YRSyoVBooV5rwAMfwFpyFGsMYA+EhKAGCdy3UCIkGODN/7XHfO6qWB4z+vLm6nP5x+dXhLpT15fP59/zTQm1CAuEr92K55XrrjfKDHbJ9sUdolHcgEqk+MCTDsR0TI0rPgojwhmTiWfuyPInPvBMoygO0HsMwhLvrk08mCBgdm39PPt1dg5aEK2podmDWkRRaHzCA51feOs4B5FTIfd0jwjJCDoqJpdJEbpYOVAEJNH1Ea+Zc5J56Ub5jnqHcPdW5c11GsJcGZH65WFOeuxq/r4AUCySq4DsGpDpCaYXAcblBKyvNlF2tFY3Ek5CqhOggovzeBa8smGSxy2YpIDERUtsAtkHViLuOr6qCCsIui5LG6YSTBh+/vfC+fc1Dph35eZ1BogvHivJ7o17kZM46wT3jzLE1w4MDHPucp3HKiDHdioa269xL1JK2NHOfD+5yw6ZLZKDFuo1nMPocodEYJ+TRB2pXxJ8fURonk+GEfOwkViAM0MKxvn3z7rRs57fbVfbY/o0qRD6M4m6yhWUcYI4BSRVmgSaVVK+MiAKUrRbmrl+tz5meXzt3Q1WVHIHYuCYMQQfJSSYpQ1sb/y9ThpNWsn9Npx10I61LwiZ2EJnQOulGEdshrPAAp6SOUWlSvipjw1Nne95LWCwKYjnxJdWRSHW574hS9J732nSZIIY9cnTBWSxqYu8BD6UUcqe+UPtWyBhM4FbEWIQcoRSdMwRrdgqIxI4+nTFbJJKtQKOMKXdTn7a/YAgGjCLXY5jjQkiXxuWWGxFj3ybXae6Q/goSSeig1ok2vuJuW0/XPFFUPBYwoTBsDpXlC49EUpEqmJPSquugmvd2vmwTGdyObU4uYM3X5tnOgSJaFWiVuY1vXNhsze02u6W9VDvysY7cy09ySR0MsiQrK/Yewiu1OhlsP5USLIxCR0QD8kCkXKM0mHlmw1quTAanhZdUEaTLTKKn23SdFi9Re1sTbVNswlhOrFRSwlJlC+ZKMuPEBVz4NxBRSgSU6MwHA4GESE2DlJGi6TVSaRABcQBzphF5NALgfgG4mqWrRQYDTbeV10sNtdWv0rZsWlAYWIJ5l6aWxSEjiXFn7qbfGw03s1a/Nkdwm5bx4nNmTRAlBkgfvZ66ITVWieCq//lK79z4IA7MgS9yg2mlFcwDloZY998h0cTvdiThaoFSAZkLOwc+X1X9/Cj7TsvE+KZJ5uyyR4/9husyFRuAMvfUnZ/8CktJdb4iMGVaEayz4SgYLQX/RcPcVEnGY4f1loyBeNxAfkEoSyUCSRJmffuCMm2WrUWW7tQM4qcoBwtN96sH837x9Py6j1PuKOO2PoirXjcZ8n6l3EZJ5aXZXWaZ4sq2oI6ERQdBhMGDPcE96jNLqLUvDh1yBvvt09DePnOcuXpsXvD26QkCEbYVplWQ714E5LvtQP72IiB/2w7k6YuAPN0O5O8vAvL37UDagPMCMF2gM0AVjBIptAgEc3HM54O92ci2M5q9k5Hh2kdlNpLxKI4AXnx+nFcOp3pDah9N3KHNZr9gnEqb7Fs0jtVQNdN+SqItprT6VEW5HrYviFrX0f31ob2so6JCy9FkpxyXvlX1BL7HIbltqrr+oL1eyuZSvX3UfgbSvENK0DGw0FNI2cBS1lO1ZhghYTpyK53AFTe5Zmdn9e7yu8P8CVL+wMWyqTf57fJb/iDlVFPC6L+0MSbdXp1//+PmxjydFUA2qDQ8fXF69T2jbdFDQpTZUMbfkBVKOB0DF5AmRu/2EwUalTalUHZK2Uh5enU3tZQdpbcnpx1d24vT86tLqL1S6VolUswZxmNbrOATiRPW5I/K6+i8JCBxkSoMj2CkgwSk0sc26b8UIEWq0RR1kVD6CEY0iBN/TQhw8b5DZu8bX6yJ5D2Mbm8vjrvE8v7m9hrWXqP8kTAaloXeCaznEE2kPnRA/9Dy4nn1ReMB7EwGYWy1SWZNR3D65tRmPZ3KCqkyNnUi+Mnpm9NGLDUxfoCRSbFe3/6YXncK80NNmB92EObt9Had1HqLZV0INgus5sTN6ZcImyP483OKCwvy95MPlsEY6ALII6HMSLxP2Z+62bQBkE3LNoVtrlMNWogHsx8XlFMVNbjaHqW5fXxiXh0mGkxdEyhlujki9Ia5IHSIHmiZURkGGDpYfdJbco9ct0tvj9+ds+CGLTrswp2RZbJYRpRhfTwoTfpsCH/I3h/aYhqunIDzH7RXx3m9NCsnciWpvIe5Nt4LczSx3axtbEqJyKbbxJ+b1KaAq7St5yNBlB90deSI7ng1axB7k8UdBw5Doklz17p+vNtVd5sl7ZrSuqt3Tb4xgtYlmh7igWwOF6WdGk45/ceeBSgaIhA3otanI+zX254A9tVhfqJaPdXwN4Szw47QhJy1c4o8fGfHyX1Wbo80h1547UzX+S8isZgVjZFwe/JsbugIV+aul6iLNCs7/R0Qnp+AeUYemCAhzAkjPGjcv1D7Bpu3t3AYUTj2LV888Due7Ld/NpK8oSvUhil06AogbaPfLeiq18axc5azuh98WrhZEOsVC+H0UH/TEDv0sIDBF4WxHcgp3P02C5uvXHXzE+jLLcaafI7KrscuuVwUjM6v715/+dv1nXrNWeSO7+XXWGuW28UuUZZK65yLrv6Y4jr+AbfzQMHvzoVku6Tmn+GrIvF8Swf2U+q47xMa+n29a/NPAg6niT3/tkP1qnR07cqyvu4oJk/27+PaZEH2HQ4d2bH+RWN2XoyR5oRMKv8GRnlU56L/Tu6cLthJAqbaJdmQgUP67Ilzz3/+GwAA//9AvqeT" } diff --git a/metricbeat/module/http/fields.go b/metricbeat/module/http/fields.go index d8c94f37f07..ce8ab7e8223 100644 --- a/metricbeat/module/http/fields.go +++ b/metricbeat/module/http/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetHttp returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/http. +// This is the base64 encoded gzipped contents of module/http. func AssetHttp() string { return "eJzMlDFv6jAUhff8iqPMD+kNTBne/KanN7BVHdz4AIbETn1vaPn3lYEgkhokSofe0Tbf+Y6MM8OW+wpr1a4A1GnDCuXfxeJ/WQCWUkfXqQu+wp8CANIW2mD7hgUQ2dAIK6xMAQhVnV9JhadSpCl/oUzg8rkAlo6NlerAmMGblufUNLrvEiWGfljJZI8pl6TI156i5/Uc8Cp0mEO1EwnOL0NsTTp5cWyaP2pDYxllAj16hJcNa51s3ZQBFmselU5gCL1mmksXvPBbqh9RP7F7ZE23o82m18EyG73l/i1E+8VsUaO95Oh8N22XXsr89zxr1K2jkXuczsR/QbEMvX9QeiIweG1kdKV3/1PS79FSo6uFl3c6/hAMc+25CuOO8RGRT4TbAh8BAAD//zeEPwk=" } diff --git a/metricbeat/module/jolokia/fields.go b/metricbeat/module/jolokia/fields.go index 241a0320e41..574411f4107 100644 --- a/metricbeat/module/jolokia/fields.go +++ b/metricbeat/module/jolokia/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetJolokia returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/jolokia. +// This is the base64 encoded gzipped contents of module/jolokia. func AssetJolokia() string { return "eJx8kFFOwzAQRP99ipG/6QX8wQG4AkLISjbptrbX8m5RensUkoARqPM5uzN6mhOudA+4SJIrRwcYW6IA/7I53gEj6dC4GksJeHYAsF+RZbwlcoCepdn7IGXiOWCKSVe3UaKoFDCv1UpmXGYNePWqyT/Bn82qf3PAxJRGDV/lJ5SYqYdaZfe6FjW51d35h2vTHsQgxSIXRSZrPChoqaI04oPj91Ocqdge7ik2km2dvHTtf0EewvyMdcnLQULWffxe6dDB8hkAAP//TGJ6CQ==" } diff --git a/metricbeat/module/kafka/fields.go b/metricbeat/module/kafka/fields.go index 7920163da93..72ee2cdc60c 100644 --- a/metricbeat/module/kafka/fields.go +++ b/metricbeat/module/kafka/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetKafka returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/kafka. +// This is the base64 encoded gzipped contents of module/kafka. func AssetKafka() string { return "eJzUWs2O3DYSvs9TFHwaHyyfdg9zWGDXXgQT27HhOECQi8AmS93MSKRMUj3TfvqApKTWL0W1epx4TtOSqr6PxWKxWMVX8ICnO3gg2QO5ATDc5HgHL97Z3y9uABhqqnhpuBR38J8bAAD3DgrJqhxvAPRBKpNSKTK+v4OM5No+VZgj0XgHe6s245gzfefEX4EgBZ4h7Z85lfZTJauyfjKB21fTVbVT8gFV+3hK36xO//c/pwHeSKGrAhX8ZEXhXmRSFcQKwIEcEXaIAhQSBpmSBdzWYgciWM7FvqfSHBBoo89ReZl0PhiOpTseznqPm/HkcgARHFJnWJzdTOIQxhRqPQn2gKdHqYZE4vAIO6IyXCNrIUZzZmTJaWL/H83bGDoA+8XqcTrnMFApqRIq2RhpYNFFGKcKrKpkjFYSZbiVTXrztxbpU6MGOAuiuNGlE1hh+/XAfhP8a4XAGcjMeWx5RhfugbfhMg+/Br8PHSCCuV8eNLlKQKh9t0CjONV+gftQV7/5+cPvHdk2wO3QkMh1XeyQiNCK+mA/AHMgBsyBa8AjCgNcWzRikIGR0Yu1AVX4tUJtEnogQmCefK2wwkTzbxhi8uWAYL9pJqLWAk46LjoNCZRKsopikhGeI0tLVKlGKkUwxlgeihjHwwtCrafRq6FEBZOaPLEsl8QEmWVo6OFyXjTndpqcltZQVlul8Ars+nZbIiWqYocqYK4r2CieQ9A0q5mUOaduN05yJAxVijlS+3uoasTIfw/N927qNsBXguZIRLqWRi13DToatbZUvkn5gFiiShjXVAqB1CzR+EPKd04GaC7tLl0r2+CsYzr4VHK1GGPOVPz3z8PFpmxS5Kd4No3Es9DRJ0HXzJFbQ/XcbuOSy32S5ZU+pBMuN141cg/u60sctE7w0CRcJLuTQd2E1iVYLqgsuNiDlfJR1g7YKbyYhKzMOhayMnt5bRYK/0RqkK2j0khdjUqBWpM96pQH05HeZNQy2+Cv4w4XgF5h+i9AvdZ0r4TeOr0RcA1Uc8Jdl2u35+yJbLt994Pm2y7XiQqvBRe8qAq/ooiBxwOnh37dQKNgup8+aTASyPiIE+MY3g1r7YtpHDmisi5xTuecfMOOQSYVENAlUp5xWp/NNuS7VCq2hV6t4UzwzGWS60qCawNXcz5orObWmT3Hyt4kr13c5CnNSbAQ5JyLPDnnal1pLLOE1CYsKZVFwUdHh9kByyzTaDMWJ2XH22YzKym4IuF2+HedWmO0neODaHsQPIc1H0z9A/flBTF1GEJXVzXnFPWLs81fKJIGos1QuedUKqQ2gt7Bv5N/hew3W0S8Zi0WluuxcxaAUF0WQrXZiKFCr0bbPJnFn67XwkLNdh2PIcZ0JJyO7RE14+Gsvg0DzYfpOaxox+sUjoMU2hJkXNVrFYdzrXfJED6mxVPogLROX8dFLozsFFZ3aLclu5DCDIp+3gVrZp1W2siiGx8NAUYMAW1Ud7FOIjdiE9veSgvkZO8SgXb0r33OQklOK5/xEe0iBeNZhgoFtYHGPNpY069H18Ykgg0NHB7MZDcifijjteu2yJbD6zPDbrMibF9XYFgR3IN8/qs13wtkTd3Cepb1MJdb15n+3JraEIAjY98bT+r+Ldx6w2k0xtLzbBPOXi7H4YPUQ3NdSqSnahawQJviptuHz4VBJUg+2AlrgG4UCoW/1YnIlJL1SUggBl7ip0fCc7LLsdarm1bHnh9RdPpbK31U4CMG3OPyROEXp7gJPMMG2ZBmx2w5ex5CH53iZUIXbKsXzOd5L7Uby/dI7ULbfARhGDV7m6ezLH3f4Bmm8r1vSHAGtz7VH+XGHVPpeQabMtD33HmTBQDO9DyDuhXzDHb47DVPG2LeIkKfBE2XaO2kzMcFqUhm94Jx620aeNYYALgGLmheMWRNg5yLV5ZM265Cu8PB7f2vn6NGotMFH3uWQZi2RbdMcTaBgivM///bnMknKq7yZtOD2LAWuHIS5LfpxGxGl1PmqQ1u3MD1DlRjXrFHrPoax5prT9vrC3y6fFtzueRq1HZOs4fvcVrmu/or6+6faqmpunv77getu5Mmn0t3lT27pa7sGmLxxd0nMiQHUshKuL3Hy9p8WKphu3ixuk4MPaSaf8OUHBcLtHPVdT13FosCLsjTEnBTGY4GDtzsoVKxVKNgUQ2P+Yq9xd7aOUgVGnW6mIhRHFmtqu67bCXkIvI/iJDvXNR9pL95stYuk8YO43trawBXLI8lwKX7blHzfjZuE9Cvcq1Nl1JovJyBl99Agcv0kfBFH2sh719/BCsAhs+kJ/NYq1v7/b6c7/LLyrjKoOmwWsmjbvpEWb08b8RRbfe/AgAA//9LpueC" } diff --git a/metricbeat/module/kibana/fields.go b/metricbeat/module/kibana/fields.go index eeddf8d856a..fed14792f3b 100644 --- a/metricbeat/module/kibana/fields.go +++ b/metricbeat/module/kibana/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetKibana returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/kibana. +// This is the base64 encoded gzipped contents of module/kibana. func AssetKibana() string { return "eJzMmM1u4zYQx+9+ioEve0n0AD4UKNoCLYoExbZBD0VhjMWxxYYiVc7Iifv0BSnJkWXKllsLuzoEiT7+85vhfJB5hFc6rOBVb9DiAkC0GFrB8ud4Y7kAUMS515VoZ1fwzQIAoHkIpVO1oQUAF87LOnd2q3cr2KLhcNeTIWRawS4IM4lou+MV/LFkNssHWBYi1fLPBcBWk1G8itqPYLGkHlG45FAFHe/qqr2ToDrV6WuxoPDxbkpuVLK5WoejDqBV4Gv7KLokKEm8zjnrvX7qd3cN2fp8da3VyYMOEY1GHjypUIoVMPm9zik7+7DUO4+ND+JrGjy94GPPT21Z0OYELy8/fZ8kDj+TxK90eHN+CHWj2TP1zqy2it7vZ/cZSwK3be1/YtBWyFs0CUMdQuFYsnndDyZGQyAeLVeh3lApTzxMj6mJk/76HtkTLJD/xEc/YsUE5KRDe/KsnZ3sxjSItOppCNLvXArBsaFYrLhwkoTeOGcIh7JXsH8vSAryIAV1LmxqbRRoBjzaa+6lkQSlTifDPZIyLCehkSJlqGPInc1r78nGYWApD+JpJuPs7sZircsN+VCuudFkBXomoERFIC6Gr8m/DJ6dEEiBAhvv3pg8Q44WmKyCsjaiK0PAOvyKllzNJ4riwNPfNbH0Xo7KgMwU5oCAszk9xPSWgg5R3tNjzQTEghujuSDVl82Scau8y8cqeTilJkTql0aum02D56lB9IFC+7B6xrlqrcjgISuH33dgnKMhtd4ah8M6uEoI8EOwA8EORDugLZTaGM2UO6uGRju8kkrnD1lBOIzJpXhN4OliFpRHAjcWul5vdoIm2xyEzr+9kPhR2vkSZQVjH191AOC3YL1xAI1xOQqpkMZtdoUIp9WP2xAm9cXofwzcgQA2h+nIrP+htdGlli8G/oTvGTijgCvMKRKdxv/ZKcr+4s6phwkLUYW9Zar0rrgzAfclaoc22gvyaOV1RG0rvFeHAvjc9tbc1VZu6lQfUEpz21rnCNTHvGmd52aYvJGnnmkaztVBO5iBrCl0e8Y3snJcOcu0Dst+r/X73IpCEL1t0HyQ4X43T4p/uyePOzq6HimvDJjeiMH3ebCe8F2XdTkRa3Rfd/Ph9deo0C7H/zqnzn/qS++PvtrzcQBLI7dni6yp0nkON20LyOY65MS8yVyoJmOy8Of9V79Vj8ZGVj/dXf5z73pq5M7rASb0rEknHLhDs/iusdIzOH6MgPNBPY6U3h9PYupmdlgrzTL4t1d3jYUQJk9uuBa/ibyQmJX9wX05ojBhks9JegL3bwAAAP//xlKE7A==" } diff --git a/metricbeat/module/kubernetes/_meta/Dockerfile b/metricbeat/module/kubernetes/_meta/Dockerfile deleted file mode 100644 index b3dac95e01f..00000000000 --- a/metricbeat/module/kubernetes/_meta/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM exekias/localkube-image -RUN apt-get update && apt-get install -y curl && apt-get clean -HEALTHCHECK --interval=1s --retries=300 CMD curl -f localhost:10255/stats/summary | grep kube-addon-manager -CMD exec /localkube start \ - --apiserver-insecure-address=0.0.0.0 \ - --apiserver-insecure-port=8080 \ - --logtostderr=true \ - --containerized diff --git a/metricbeat/module/kubernetes/_meta/Dockerfile.kube-state b/metricbeat/module/kubernetes/_meta/Dockerfile.kube-state deleted file mode 100644 index b064dc3065e..00000000000 --- a/metricbeat/module/kubernetes/_meta/Dockerfile.kube-state +++ /dev/null @@ -1,8 +0,0 @@ -FROM gcr.io/google_containers/kube-state-metrics:v0.5.0 - -ADD kubeconfig / - -HEALTHCHECK --interval=1s --retries=90 CMD curl -f http://localhost:8080/metrics - -ENTRYPOINT ["/kube-state-metrics"] -CMD ["--port=8080", "--in-cluster=false", "--apiserver=http://172.17.0.1:8080", "--kubeconfig=/kubeconfig"] diff --git a/metricbeat/module/kubernetes/_meta/README.md b/metricbeat/module/kubernetes/_meta/README.md new file mode 100644 index 00000000000..903e9010018 --- /dev/null +++ b/metricbeat/module/kubernetes/_meta/README.md @@ -0,0 +1,23 @@ +# Running integration tests. + +Running the integration tests for the kubernetes module has the requirement of: + +* docker +* kind +* kubectl + +Once those tools are installed its as simple as: + +``` +MODULE="kubernetes" mage goIntegTest +``` + +The integration tester will use the default context from the kubectl configuration defined +in the `KUBECONFIG` environment variable. There is no requirement that the kubernetes even +be local to your development machine, it just needs to be accessible. + +If no `KUBECONFIG` is set and `kind` is installed then the runner will use `kind` to create +a local cluster inside of your local docker to perform the intergation tests inside. The +`kind` cluster will be created and destroy before and after the test. If you would like to +keep the `kind` cluster running after the test has finished you can set `KIND_SKIP_DELETE=1` +inside of your environment. diff --git a/metricbeat/module/kubernetes/_meta/kubeconfig b/metricbeat/module/kubernetes/_meta/kubeconfig deleted file mode 100644 index cad24101463..00000000000 --- a/metricbeat/module/kubernetes/_meta/kubeconfig +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -clusters: -- cluster: - server: http://172.17.0.1:8080 - name: kubernetes -contexts: -- context: - cluster: kubernetes - user: kubernetes - name: kubernetes -current-context: kubernetes -kind: Config -preferences: {} -users: -- name: kubernetes - user: - client-certificate: - client-key: diff --git a/metricbeat/module/kubernetes/apiserver/apiserver_integration_test.go b/metricbeat/module/kubernetes/apiserver/apiserver_integration_test.go new file mode 100644 index 00000000000..807b2c0760e --- /dev/null +++ b/metricbeat/module/kubernetes/apiserver/apiserver_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package apiserver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetAPIServerConfig(t, "apiserver") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/controllermanager/controllermanager_integration_test.go b/metricbeat/module/kubernetes/controllermanager/controllermanager_integration_test.go new file mode 100644 index 00000000000..f07bb7b1071 --- /dev/null +++ b/metricbeat/module/kubernetes/controllermanager/controllermanager_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package controllermanager + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetAPIServerConfig(t, "controllermanager") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/docker-compose.yml b/metricbeat/module/kubernetes/docker-compose.yml deleted file mode 100644 index 083e326f325..00000000000 --- a/metricbeat/module/kubernetes/docker-compose.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: '2.3' - -services: - #kubernetes: - # build: ./module/kubernetes/_meta - # network_mode: host - # pid: host - # privileged: true - # volumes: - # - /:/rootfs:ro - # - /sys:/sys - # - /var/lib/docker:/var/lib/docker - # - /var/run:/var/run - # ports: - # - 10255 - - #kubestate: - # build: - # context: ./_meta - # dockerfile: Dockerfile.kube-state - # depends_on: - # - kubernetes - # ports: - # - 18080 diff --git a/metricbeat/module/kubernetes/event/_meta/fields.yml b/metricbeat/module/kubernetes/event/_meta/fields.yml index 023e81d2d11..bf66a8aec45 100644 --- a/metricbeat/module/kubernetes/event/_meta/fields.yml +++ b/metricbeat/module/kubernetes/event/_meta/fields.yml @@ -58,6 +58,10 @@ type: date description: > Timestamp of creation of the given event + - name: generate_name + type: keyword + description: > + Generate name of the event - name: name type: keyword description: > diff --git a/metricbeat/module/kubernetes/fields.go b/metricbeat/module/kubernetes/fields.go index 06292b72a57..02fae8082af 100644 --- a/metricbeat/module/kubernetes/fields.go +++ b/metricbeat/module/kubernetes/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetKubernetes returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/kubernetes. +// This is the base64 encoded gzipped contents of module/kubernetes. func AssetKubernetes() string { - return "eJzsXU9z27iSv+dToHLKbHl02NraQw5bNeN5r55rkjyvncwctrY0ENmSMCYBDgDa0fv0rwDwD0QCIClCimOTh1Qsid0/dDeA7gbQ+BE9wOE9eig3wClIEG8QkkRm8B69/bX58O0bhFIQCSeFJIy+R//zBiGE2h+gHCQniXqbQwZYwHu0w28QEiAloTvxHv3fWyGyt1fo7V7K4u3/q+/2jMt1wuiW7N6jLc4EvEFoSyBLxXvN4EdEcQ4deOqRh0Jx4Kwsqk8c8NRzQ7eM51h9jDBNkZBYEiFJIhDbooKlAuWY4h2kaHOw+KwqCjYaGxEuiAD+CLz5xgUqAKwjv59ub5AhaImyfo5FWj9daDY8Dn+VIOQqyQhQefSTGucDHJ4YTzvfBdCq51rTQ/AVklLptWYkgig4CFbyBOLhuDOUIUVO2l0AotycE4OPfA9Gwor4AJAmi94lWSkk8CvNVBQ4gatGOj8EcT0C38SD9Y/Pn29Rj2TPMlkaURSaZ49knyeVQOVaMYqvhgqDZoF6LLpYUn5Y85LGg/E7yD1wJPdQ80ClAIFSfkBdRl0wD4R2uc1A8iuhqRpdK+oDKskLRuOOUTVJtMc0zdQoZQkliKY7ds9EogZ1TRJtWa2ZEcPEI3BBWETTqAg2KPrN7ELQkjua3GZCqDuJi3CXeQ5yzyLao+6YDqK9RjMR0QybFnep1mwLzhIQwsnRZYiu+d6mlxTlSkDS+76mmbJyk3XHvV5Drm+/IAEJo2kXWcsph5zxg5rWSQpUrjaH1jPr880Y3Tm+NH7Ze+R7+QjVz+pHiFBU86wwDEF8JFyWOLskworlEMBtKlasALpKWNkb/QahHbH+VOYb4GrEVQTRlmTQ/IBxvxqFxFxCGsFo7o3BIEFoAnqIqYy75uHsACoQiGb9zbxacu3tr0qxKoAnQCXJYPUf3hayzZ+QuBRgvlhPkUPd52sQKCcJZ1V3Qi0cv05czRBlPlM/YVxJmZcZluQRkItVCNp8462haUp6hqrpDwIR5F9genZMTU8BrRBMUqsFOaTVGAPSEcaJKrZgnkPDinwAgygYFfBN1WsgTNFvH/T5FWyjHK3hPtAYKq6guEn1nf74NlU3zDnTmDTIKsTfy9sz1daJD4QFcmRZOk2O5+RF9BacuRubWYYl0ORwiiW7tCVqglfKRBUC8zcxjpM9Jw1CimdCDSY6XTCbMnkAedEpp2KN9kRItuM4RwaEH+xYV2IKipqm0eRY5Z3Hc2ixUNsRNh+OA/MN9NiiHq/JpORcjWPzZXdDtxnZ7eUIU2d0x0tKCd1FDVXa8TPRk5Z6G1WMwlllkEm6MnKPMpK3Sf9KmwJhqbk42eMyJXIFjz5FTGWv6SFNz91ew5CDggZpRJ41yS7zdq6hEhM6b43Dkm5DL8oSh44s15Lk7lRuimX3i4GEzb0iiHoErfTK6Fl8KEN5+wWVAu/AIQhfs20o+l1vP3QBClE9aiTjLsLDxIcY2Ewcg3KXjWcsqZ8B+drPdWN0SurXjEMleoqpd8I6QospU2LxgR4EPBKsMQpIBxg2sFgKq8I5J7WoRIIzSNfbjGHfD+uQo4pyYrRBSRcLhGua6m+21WkhySTONHaEs4wlWOJNBuq9YGMzkhP5/bU2hS2hkBr4Tfa9HQbfqU+8EkFki0qq34XUvYCXsd34/PFAqz6wnXLDt2ziYIQfMcmwOwk1f0DyRcJoTM8bCqfReF1r6TRNRQkucELkQbm+burNiFr98uVLx1jyeMmowe7lS0UP6eOFQtRI4F+pmDe3u713FHES+6xtoO0n3uZYCyEcwi5HLFSK0RhAHruMD0ibhgPQ8RpWtNTR6xiouxY4sAx3Plf6eQnEiMHb3GfuV3600E90LT36R8/euxzT5hkOZmUQfh/TlhDvbVNAL6qP3N3fh3tIDfiJ8QdCdwL8abCXII/fTTORADlOLgXewRaXmSOROCU96EbU5q0UG+Th08ya+E/GL4RH8/KianoPY3IbcZ/Pa4go7hiTeieLOAgJ+eTg4nU4O24p2e73a4/B3BKqPO9vF4tdIMb44ogu7Mw+Z1kG3Bx+mJXhv26IVUcp4uT3v8kW1EvuSr/0NtcLb29V/8Zj9wnnMG4X9b8Yjcj3hm45FpKXiSw59Ikvm3lNc5bNvMtm3mUz74hmLJt53UCWzbyjMS6beZfNvMtm3vmbeR1e5tTtvU+MP/xVQun2OE+Z+hRoUA6n2XI3fzr/YAg2e+uqyTzkS5R0SygR+yjuxJeG2BjWOE1j2PDvtV4UwQFDTqGQ+6g8NcXB7iM5idJfW772DmZN3R2YsRRWiQrYE8nc8fUphguPJNGeREwfWC9b1JRDBrsHnMl9jH3hLfOGKnIngs6xJz/MyeDxLFWNZ3d7tJDkb2QzJgFOga+IWOdYSE9OZsNYBrjr6A0dWt+3p9a1rolAHR5vumj0btU3XfYTElaf92CX3jC7X+ucFah5SPeN5hu5xxJhDmgHFDiWplZIvVe4GlePOBCqAlsl3F+7lUvQhGSY38A8ug5K+9pMr4oL4pAwngoj98b4JMnBfFZgLklSZpgbIaA9FoglegN66kCo35Q4Lxwo+4NJKO23JVzIdcWKeup1TN/c+7kGqNqpeaCWh/qsa1X2YY+zA1IsBvC0uRDRW4kzGCR8leOt4aOhU1kCpG1xAPII1CGOhBWHtWQuBO2chkUn1POn3oLo7jSlseAaK+wW3TiR++dD0Syxhzk68pA+ow9z1Mv2dd0KDgXj0hSuIMKhi1AHOmtFjS1nOXrak2SvhWPGBiLakdGdG4qaef6k5glFGDE6FouVc8cplni+xj5WlBAWgiVEzwpPRO6DfSikN/cQOt0ja+yAQ08hKDRgjVhZOhq0NAPCaLinnHVBoOIc5qlLIcVlrEkiUpufMb0nPNQP6nWLdfQKL79VFV5sgYSXSUoScenpCyV/lYB0Mp9siXLomAXEkcxpBlDItuuM0IeIYO4+qBGUg1Boquo/vgGc0EeWPUK6dmA817hQ83TJJTRC4ILEt5yfbm+a+kCV9QTUFbdQlOL9UBWLGmAcd/Cg1uARYHq+/lpTniD6uB32y80vA7ztdMGcaMs6AKgjvOXs33L2z/PEPvunPcXv+9jfchbA9SxnATpPvLMAy5bvDuBly7cb+LLlO7Dlm4JUdhNtvOZfX7Tx3UEC5FHn1X20muw/5671w5GYx+L56uPTZGtetkI+c0xFTqR8Pjr57NRJs2ywnK8wz0hp/n05WjFRQMupivbpCec1HKiwNgZ4Dm53QV3ixH2L6nmctW/x+M7bNz5NSb0ZnFPGbZIrD/BMtRP8c8IwgyEmaGQPR2NTJGN6OpqWSrnJtcc7fdZAI2cO9JrFOGJuQVMGu1coQvcM1ASrR2ei5uSwC5Z+lynsJSI1zxKRts/3pJDvLiJ9FWtGz2SVpAfrORaxmVIc8VUVRFRTalOzRnSL1lSVEBkFxDjKGQf7xxVhRQJzGKqXGHkVbVkw6sB+lv1uKR4VrzOeXEHqdSQNj7qLv8mdpcX1S19bNGJ56q0w+oOLF774bATSlD9QEtHnPgfEUuAdrM+2xmlAjV5vXV8CjX+11Sq88fUwJ7a3TgJpWvOv4W22vTsKzZx8HsJXu6bNNadRzj64atZYu+e7xWXmcOmR854XmCu1Y3qW6fRqtkw5snQU6zlPygbPyY6s1HLsuw7VaQl0u/D4dUqFlg60cH2WyMiClVma9Y5wXZYApBk1WY4yfJ4j1OMNY0o1lqb/dGuxnGbVk6uwBEs3jKnAEqX+Sgi+q5pCLETBog0hUPOMc3TNlS6EsSU5xmt1PNiBWhwBqH4Nzh1axtdYCcCLrUtHdRXrdMK02ipxFTmuqkoI7DlVObqaSgjgXGUG6qh0TSii3bicw6GSKaecJx9ZLKWZDg80GTUpBZk+lBswbnrlrB9o4syKD0xtZQZi5MwwLP77A01uFZw7RbZzOR7bNh8MXXPoRzfPPLz4RlyY58fkvTQv5jjjhT50a15n3bPg+sc5obtoav9kSCOL9qSLEUdCnOm7BkFOMIABlBexhnBj/CbRyxqIZA9pmc0rfGtlDhp6S9qgz+OFpQ16R1FPZDNU0tbyTMosSsPuKytFWErIC9knXfNsRoOIbFVnddFd0jFLOmYI0pKOWdIxExEt6ZglHbOkY5Z0zJKOcWII1nQ0/F0VHYMQplRz7MVi3RqKp02S8J9w+bD0bzRFkiGgqdUY97Q0EvactMQENIEO2EU0r0e4MYV6YsHSVcFBhSkKgS4Bm8+FcctS1BJFFdEAgipQisG3JhVsdSPxSkGXdPDuHcYyPJP0EM/z6VwgRk0YPRwzU6Y+K33TZdxslnvT5XLaAZP29sYYx0xOLhrVE097kT5xV7cTEssy3jHsYo+Ff7eguwHdRoT2KjfN0YzQu6qO7xV6wkTq/0jgOaE4fDMm4NR/UtxdE3kkyhahZuKW75HHpCJQ/14sQiXsesWbTwBj+AzWN+/VgrXBzNLf70ZD6F2D6lrXolRKu+ZY7D8wVvyMkwe23V6hv3Guz4zdlll2hZr/Vt/3VasexhvtqxHo3TXLiwwkpFetJK4xpUzelVSzYPwK/fOfH38lWQbpD1XzV86OMuVkyOB1AXr7se9EhKHr23U8Se3Xt190hTBhWAb0Xju1F4FUsYMUuRkeyyl0emRgw2LBIVFDwXv036v/ioG8wTJSoCHsw/Dmbsf0Sf2iVcuMEs9//deQCKoN3mbj/GDVg1qB3x53q7Z6777vvGzCGf2TbWK5NIZaFIemt/oy3qVB1xWOHo3usuBcBk46lsNYFXp394wxfFoSqGAZ6VBqDl0kymmecUNOm1MwpFRMJNp7xntGYvmdYi1KUQBNe0fVQ67REXc7nVCbEFExq4tua7m67LUjzR8IQo5j1YIleyR6if4awhMWzuLazSiFhVzXFhANhxK6viaghsFL6u4g8PVM7BXlQfYp4DQj1M95yOZ+qQg0rPFWAm+6lEaSMH3BBldO4BaTzNLEmP+E//SHeikUGTvkM68fsQbGlmCUsbHAjsoQo7tbf/b41YnUcHGFI+2MV2QkweNjwZNw1FwQoVs20ZNIQRAeKNI0K1b6pcXY7q+pOLao34kCkjkH5mJhbIuPePRmHXill4Nl8RoBrEidlx1EB2X49AHZJ2AjDQ4xq2XHTMyEUx6zAntdsNnOdqB3kpdwhbY4E/r8d0kfKHui/n5T0mqmCBrprMSMRnnEJzQYxoz2reO35wuwm6rZ9mHfcHRdl4EaADWj/mqNqSk4dblS2ZbMv1UI98l39noo9mwU802RV2jDxcKsZZez6E6fYj+Xadq6UTHSsELOCkcf4u+WrGsEDFwQIYHKR5aVeazpqiWLDN167jKXRqlf/qiGSfgxMKfB1wI4UVPtkXDOlRD4zQBVJDyp2lD/GRfPVDycFQRDax1TG2GWMXCSMJ7qW2+YpR2PX8A43sE6yXDvvP1o7veGCNJEmtRAz7LQmIDLZ6FJhkl+NjNNMvxdGOvtb9cBSzWNWc9h8DOhKaS1WPysqjTiurKfGX3jrs3e1x0tfv9QctME3LRxkoAQ67y7EX4Ch580CaRIuHmcsafd/na98nUs95Q6q/dEqn5I3Lfc9T4enxhQyG5uncz2TMj1eTgq0j62E8OuaYyr8Oi0enBnXFrvwKzW1u/qtfVboGpyWq1Wpy6px0Q3L9KsM5L+rENMrA03F96rPtpuLg5i5SwrglUZn/lDwRmThTZUf9Yyxm1JM+rc7I/vba+ygwVwdGf+uHdUhxqbx/xWuMJ9OB4q1X+nYmMbXeHnXEKrrgDVF+tVnNDmoOfqFpze5cVZ1j3nh47WkTYQGl1iSXFbZtmh5jYoTWu7kT6w9lfJjq7TnTe0WDSjDC7nWw28q7D+r8Y6tCbYldIUBIYDoVvGc0jRuz3mqZ6gBKQ/hA4Qxgk7jhvqXTqX3bu3J7CwW2h6jnr1Cv2hmvqHausfqrF/eOYPR8NPaJ8mp0VpzA8XRUZAIMn6gWr4T39gq4YDksTKuFTUQh3lwiHqfYUokFDJSiGBn+aO31AJnOIM3dw2dl8Jwc0NvpoXZoXFdaNqYuiXT/f+ftCw9LTwFIaeACNjOF1vcIZp4pfoCH4fGE7RzxWdxqo8TOf087phPRpNWEh3XAXjp7flxlDwoa8ZqLjNaxNDRmg4/MNFojPvuEf8gdL+tZSa+9UdL5ww9EgsYVtm8QKBmmK0SCAktKFMUt/R+by3RNhcro/egZrQzbx5X7Wg6y1eIDQ5El7jc50UnZzZn7VKhNTu7JGP6BMi+gZhSm+TRQhgDa512M+tZys0sJyd56XuRskW2Oeh5lq5I4B18q7dtOu8Uc/Owj4j1+sIl98BKzh7JIIw2os8Jy84tZRab8xG4Vs90Ms5a8e+1Uleu6ZS7X415+wPFOckwSqaraaSai3DvfxVrZhsiE5JzloA+MhSszExNVfQtrIhdIcwTVHFJb6zcKR2t8vQ9AZ9X1KsfmAuX7JKxUdxARyH6SZpwnF5SrP93b/t+8J3Mb2Kq2ESxs9/z1uvWlmfzcANZSOPC6JK1teMQyVwiqnnVHcH4/O4I+dM26OWO1Dsue2F31uwXJp//CyX5o/DM3yNQ9StaMf7z2Y5JHNc8r5UrP1mTm7LLebVM7L/LbeYTxXQcot5+7zKW8y/jLy7/AJXhf/dc0F4F8olrlE3Tl4F5t8BAAD//1QAahI=" + return "eJzsXUFz27iSvudXoHLKbHl02NraQw5bNeN5b59rkjyvncwctrY0ENmSMCYBDgDa0fv1WwBBEiIBkBQhxbHJQyqWxO4P3Q2guwE0fkQPcHiPHsoNcAoSxBuEJJEZvEdvf20+fPsGoRREwkkhCaPv0X+9QQih9gcoB8lJot7mkAEW8B7t8BuEBEhJ6E68R//7Vojs7RV6u5eyePt/6rs943KdMLolu/doizMBbxDaEshS8V4z+BFRnEMHnnrkoVAcOCsL84kDnnpu6JbxHKuPEaYpEhJLIiRJBGJbVLBUoBxTvIMUbQ4Wn5WhYKOxEeGCCOCPwJtvXKACwDry++n2BlUELVHWz7FI66cLzYbH4a8ShFwlGQEqj35S43yAwxPjaee7AFr1XGt6CL5CUiq91oxEEAUHwUqeQDwcdxVlSJGTdheAKDfnxOAj34ORsCI+AKTJondJVgoJ/EozFQVO4KqRzg9BXI/AN/Fg/ePz51vUI9mzTJZGFIXm2SPZ50klULlWjOKrwWDQLFCPRRdLyg9rXtJ4MH4HuQeO5B5qHqgUIFDKD6jLqAvmgdAutxlIfiU0VaOroT6gkrxgNO4YVZNEe0zTTI1SllCCaLpj90wkalDXJNGW1ZoZMUw8AheERTQNQ7BB0W9mF4KW3NHkNhNC3UlchLvMc5B7FtEedcd0EO01momIZti0uEu1ZltwloAQTo4uQ3TN9za9pChXApLe9zXNlJWbrDvu9RpyffsFCUgYTbvIWk455Iwf1LROUqBytTm0nlmfb8bozvFl5Ze9R76Xj1D9rH6ECEU1T4NhCOIj4bLE2SURGpZDALepWLEC6CphZW/0G4R2xPpTmW+AqxFXEURbkkHzA8b9ahQScwlpBKO5rwwGCUIT0EOMMe6ah7MDqEAgmvU382rJtbe/KsWqAJ4AlSSD1b95W8g2f0LiUkD1xXqKHOo+X4NAOUk4M90JtXD8OnE1Q5T5TP2EcSVlXmZYkkdALlYhaPONt4amKekZqqY/CESQf0HVs2NqegpohWCSWi3IIa3GGJCOME5UsQXzHBpW5AMYRMGogG+q3grCFP32QZ9fwTbK0RruA42hYgPFTarv9Me3qbphzpmmSoOsQvy9vD1TbZ34QFggR5al0+R4Tl5Eb8GZu7GZZVgCTQ6nWLJLW6ImeKVMVCGo/iaV42TPSYOQ4plQg4lOF8ymTB5AXnTKMazRngjJdhznqALhBzvWlZiCoqZZaXKs8s7jObRYqO0IVx+OA/MN9NiiHq/JpORcjWPzZXdDtxnZ7eUIU2d0x0tKCd1FDVXa8TPRk5Z6GxlG4awyyCRdVXKPMpK3SX+jTYGw1Fyc7HGZErmCR58iprLX9JCm525vxZCDggZpRJ41yS7zdq6hEhM6b43Dkm5DL8oSh44s15Lk7lRuimX3i4GEzb0iiHoErfTK6Fl8KEN5+wWVAu/AIQhfs20o+l1vP3QBClE9aiTjLsLDxIcY2Ewcg3KXjWcsqZ8B+drPdWN0SurXjIMRPcXUO2EdocWUKbH4QA8CHgm2MgpIBxg2sFgKq8I5J7WoRIIzSNfbjGHfD+uQw0Q5MdqgpIsFwjVN9Tfb6rSQZBJnGjvCWcYSLPEmA/VesLEZyYn8/lqbwpZQSCv4Tfa9HQbfqU+8EkFki0qq34XUvYCXsd34/PFAqz6wnXLDt2ziYIQfMcmwOwk1f0DyRcJoTM8bCqfReF1r6TRNRQkucELkQbm+burNiGp++fKlU1nyeMmowe7lS0UP6eOFQtRI4F+pmDe3u713FHES+6xtoO0n3uZYCyEcwi5HLFSK0RhAHruMD0ibhgPQ8RpWtNTR6xiouxY4sAx3Plf6eQmkEoO3uc/cr/xooZ/oWnr0j569dzmmzTMcTGMQfh/TlhDvbVNAL6qP3N3fh3tIDfiJ8QdCdwL8abCXII/fq2YiAXKcXAq8gy0uM0cicUp60I2ozVspNsjDp5k18Z+MXwiP5uVF1fQexuQ24j6f1xBR3DEm9U4WcRAS8snBxetwdtxSst3v1x6DuSVkPO9vF4tdIMb44ogu7Mw+Z1kGvDr8MCvDf90QM0cp4uT3v8kW1EvuSr/0NtcLb29V/8Zj9wnnMG4X9b8Yjcj3hm45FpKXiSw59Ikvm3mr5iybeZfNvMtm3hHNWDbzuoEsm3lHY1w28y6beZfNvPM38zq8zKnbe58Yf/irhNLtcZ4y9SnQoBzOasvd/On8Q0Ww2VtnJvOQL1HSLaFE7KO4E18aYmNY4zSNYcO/13pRBAcMOYVC7qPy1BQHu4/kJEp/bfnaO5g1dXdgxlJYJSpgTyRzx9enGC48kkR7EjF9YL1sUVMOGewecCb3MfaFt8wbqsidCDrHnvwwpwqPZ6lqPLvbo4UkfyObMQlwCnxFxDrHQnpyMhvGMsBdR2/o0Pq+PbWudU0E6vB400Wjd6u+6bKfkLD6vAe79Ea1+7XOWYGah3TfaL6ReywR5oB2QIFjWdUKqfcKm3H1iAOhKrBVwv21W7kETUiG+Q3Mo+ugtK+r6VVxQRwSxlNRyb0xPklyqD4rMJckKTPMKyGgPRaIJXoDeupAqN+UOC8cKPuDSSjttyVcyLVhRT31OqZv7v1cA1Tt1DxQy0N91rUq+7DH2QEpFgN42lyI6K3EVRgkfJXjreFjRcdYAqRtcQDyCNQhjoQVh7VkLgTtnIZFJ9Tzp96C6O40pbHgGivsFt04kfvnQ9EssYc5OvKQPqMPc9TL9nXdCg4F47IqXEGEQxehDnTWihpbznL0tCfJXgunGhuIaEdGd24oaub5k5onFGHE6FgsVs4dp1ji+Rr7aCghLARLiJ4VnojcB/tQSG/uIXS6R9bYAYeeQlBowBqxsnQ0aGkGhNFwT2kB1XpZx10Z+G9D1pjEtjUGt/cbf1liFE9dkCkuY00SkboTVB3gCQ/1xnr1ZB29zsxvps6MLZDwYk1JIi6AfaHkrxKQXlIgW6LcSmYBcaSUmmEcsu06I/QhIpi7D2oc5yAUGlODyDeNEPrIskdI1w6M5xqdap4uuYTGKVyQ+Jbz0+1NU6XIWE9AXXHLVSneD6Zk1QDjuIOHPWAFmJ6vv9aUJ4g+bof9cvPLAG87aTEn5rOOIeo4czmBuJxA9DyxTyBqf/X7Pny4nEhwPcuJhM4T70TCsvG8A3jZeO4Gvmw8D2w8pyCV3UQbr/nXF218d5AAedTZfR+tZg2Cc9cq5kjMY/F89fFpckYvWyGfOaYiJ1I+H518duqkWbxYTnlUz0hp/n054DFRQMvZjvbpCec1HOuwtid4jo93QV3i3H+L6nmc+G/x+E79Nz5NSb0ZnFPGbZIrD/BMFRz8c8IwgyEmaGQPR2NTJGN6OpqWSrnJtcc7fdZAI2cO9JrFOGJuQVMGu1coQvcM1ASrRyez5uSwC5Z+lynsJSKtniUibZ/vSSHfXUT6KtaMnskqSQ/WcyylM6VE46sqy6im1KZyjuiWzjH1GBkFxDjKGQf7x4awIoE5DFVtjLyKtiwYdWA/y363lLCK1xlPrmP1OpKGR93F3+TO0uL6pa8tVmJ56q0w+oOLF774XAmkKcKgJKJPnw6IpcA7WJ9tjbMCNXq9dX0JNP7VVqv8x9fDnNjeOo+kac2/DLjZfO8od3PyqQxfBZ0215xGOYHhqpxj7eHvlriZw6VHzntqYa7UjulZptOrHDPl4NRRrOc8rxs8rTuyXsyx7zpULSbQ7cLj1yl1YjrQwlViIiML1odp1jvC1WECkGZUhjnK8HkOco83jCk1YZr+060Ic5pVT64FEywgMaYOTJQqMCH4rpoOsRAFS0eEQM0zztGVX7oQxhYGGa/V8WAHKoIEoPo1OHdoGV/pJQAvti4dNV6s0wnTKrzEVeS42i4hsOdU5eiaLiGAc5UZqObSNaGIduNyDocKt5xyqn1kyZZmOjzQZNSkFGT6UG6gctONs36giTMrPjC1lRmIkTPDsPjvDzS5VXDuFNnOFX1s23wwdNmiH9088/DiG3Ftnx+T9+q+mOOMF/rQ3X2ddc+C6x/nhO6iqf1TRRpZtCddzzgS4kzfNQhyggEMoLyINYQb4zeJXtZAJHtIy2xe+V0rc9DQW9IGfR4vLG3QO4p6IpuhwrqWZ1JmURp2b6wUYSkhL2SfdM2zGQ0islWd1UV3Sccs6ZghSEs6ZknHTES0pGOWdMySjlnSMUs6xokhWFmy4u+qKxmEMKWmZC8W61ZyPG2ShH+Hy4elf6MpkgwBTa3GuKelkbDnpCUmoAl0wC6ieT3CjSnUEwuWrgoOKkxRCHQh2nwujFuWopYoMkQDCEygFINvTSrY6kbiRkGXdPDuHcYyPJP0EM/z6VwgRk0YPRwzU6Y+K33TZdxslnvT5XLaAZP2DskYx0xOLhrVE097nT9xV7cTEssy3jHsYo+Ff7eguwHdRoT2KjfN0YzQO1NN+Ao9YSL1fyTwnFAcvp8TcOo/Ke6uzDwSZYtQM3HL98hjUhGofy8WoRJ2vRLSJ4Cp+AxWWe9VpLXBzNLf75WG0LsG1bWuiKmUds2x2H9grPgZJw9su71Cf+Ncnxm7LbPsCjX/Nd/3VasexhvtqxHo3TXLiwwkpFetJK4xpUzelVSzYPwK/fOfH38lWQbpD6b5K2dHmXIyZPDSAr392HcioqLr23U8Se3Xt190hTBRsQzovXZqLwLJsIMUuRkeyyl0emRgw2LBIVFDwXv0n6v/iIG8wTJSoCHsw/Dmbsf0Sf2iVcsqJZ7/ErIhEZgN3tXG+cGqB7UCvz3uVm313n3fedmEM/on28RyaSpqURya3urLeJcGXRscPRrdZcG5DJx0LIfRlJt394wxfFoSqGAZ6VBqDl0kymmecU9Pm1OoSKmYSLS3nfeMxPI7xVqUogCa9o6qh1yjI+52OqE2IaJiVhfd1nJ18W1Hmj8QhBzHqgVL9kj0Ev01hCcsnCW+m1EKC7muLSAaDiV0fVlBDYOX1N1B4OuZ2CvKg+xTwGlGqJ/zkM39Ygg0rPFWAm+6lEaSMH3NB1dO4BaTzNLEmP+E//SHeikUGTvkMy9BsQbGlmCUsbHAjsoQo7tbf/b41Ym04uIKR9oZr8hIgsfHgifhqLkgQrdsoieRgiA8UKRpVqz0S4ux3V9jOLao34kCkjkH5mJhbIuPePRmHXill4Nl8RoBrEidVy5EB1Xx6QOyT8BGGhxiVsuOmZgJpzxmBfa6YLOd7UDvJC/hCm1xJvT575I+UPZE/f2mpGamCBrprMSMRnnEJzQYxoz2reO35wuwm6rZ9mHfcHRdl4EaADWj/mqNqSk4dblS2ZbMv1UI98l39noo9mwU802RG7ThYmHWsstZdKdPsZ/LNG3dqBhpWCFnhaMP8XdL1jUCBi6IkEDlI8vKPNZ01ZJFFd167qqurlK//FENk/BjYE6DrwVwoqbaI+GcKyHwWwVUkfCkakP9Z1w8Y3g4KwiG1jqmNqJaxsBJwniqb71hlnY8fgHjeAfrJMO98/ajud9XRJAm0qQGepaFxgRcPgtNMkzys5lpkuHvwlhvf7sOWGrVmFk3fP1MaAppLRY/K5NGXBv7mdE37trsfd3R4vcPJTdNwE0bJwkIsc67G+EncPhJk0CKhJvHGXva7W/XK1/Hck+ps3pPpOqHxH3XXu/j8YkBhezm1slsz4Rcn4ejIu1jOzHsmsbYhEen1YM749J6B6ZZW7+r19ZvgarJabVanbqkHhPdvEizzkj6sw4xsTbcXHiv+mi7uTiIlbM0BE0Zn/lDwRmThTZUf9Yyxm1JM+rc7I9vjzfZwQI4uqv+uHdUhxqbx/xWuMJ9OB4q1X+nYmMbXeHnXEIzV4Dqi/UMJ7Q56Lm6Bad3eXGWdc/5oaN1pA2ERpdYUtyWWXaouQ1K09pupA+s/VWyo0t95w0tFs0og8v5VgPvDNb/0ViH1gS7UpqCoOJA6JbxHFL0bo95qicoAekPoQOEccKO44Z6l85l9wbwCSzsFlY9R716hf5QTf1DtfUP1dg/PPOHo+EntE+T06KszA8XRUZAIMn6gWr4T39gq4YDksTKuBhqoY5y4RD13iAKJFSyUkjgp7njN1QCpzhDN7eN3RshuLnB1+qFWWFx3aiaGPrl072/HzQsPS08haEnwMgYTtcbnGGa+CU6gt8HhlP0s6HTWJWH6Zx+XjesR6MJC+mOq2D89LbcVBR86GsGKm7z2sSQEVYc/uEi0Zl33CP+QGn/WkrNLe+OF04YeiSWsC2zeIFATTFaJBAS2lAmqe/ofN5bImyu+EfvQE3o1bx5b1rQ9RYvEJocCa/xuU6KTs7sz1olQmp39shH9AkRfYMwpbfJIgSwBtc67OfWsxUaWM7O81J3o2QL7PNQc63cEcA6eddu2nXeqGdnYZ+R63WEy++AFZw9EkEY7UWekxecWkqtN2aj8K0e6OWctWPf6iSvXVMxu1+rc/YHinOSYBXNmqnErGW4l7/MismG6JTkrAWAjyytNiam1RW0rWwI3SFMU2S4xHcWjtTudhma3qDvS4rVD6rLl6xS8VFcAMdhukmacFye0mx/92/7vvBdTK/iapiE8fPf89arVtZnM3BD2cjjgsjI+ppxMAKnmHpOdXcwPo87cs60PWq5A8We2174vQXLpfnHz3Jp/jg8w9c4RN2Kdrz/bJZDMscl70vF2m/m5LbcYm6ekf1vucV8qoCWW8zb51XeYv5l5N3lF7gq/O+eC8K7UC5xjXrl5Bkw/x8AAP//x9qMrg==" } diff --git a/metricbeat/module/kubernetes/kubernetes.yml b/metricbeat/module/kubernetes/kubernetes.yml new file mode 100644 index 00000000000..d87cb4f5f9b --- /dev/null +++ b/metricbeat/module/kubernetes/kubernetes.yml @@ -0,0 +1,135 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-state-metrics +rules: +- apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-state-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-state-metrics +subjects: +- kind: ServiceAccount + name: kube-state-metrics + namespace: default +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-state-metrics +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-state-metrics + labels: + app: kube-state-metrics +spec: + replicas: 1 + selector: + matchLabels: + app: kube-state-metrics + template: + metadata: + labels: + app: kube-state-metrics + spec: + containers: + - name: kube-state-metrics + image: quay.io/coreos/kube-state-metrics:v1.8.0 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 8081 + name: telemetry + readinessProbe: + httpGet: + path: / + port: 8081 + initialDelaySeconds: 5 + timeoutSeconds: 5 + serviceAccountName: kube-state-metrics +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-state-metrics +spec: + type: ClusterIP + selector: + app: kube-state-metrics + ports: + - port: 8080 + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: basic-sts + labels: + app: basic-sts +spec: + serviceName: basic-sts + replicas: 1 + selector: + matchLabels: + app: basic-sts + template: + metadata: + labels: + app: basic-sts + spec: + containers: + - name: sh + image: alpine:3 + command: ["sh", "-c", "sleep infinity"] + volumeMounts: + - name: mnt + mountPath: /mnt + volumeClaimTemplates: + - metadata: + name: mnt + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Mi +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: basic-cronjob +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + name: basic-job + spec: + containers: + - name: hello + image: alpine:3 + command: ["sh", "-c", "echo Hello!"] + restartPolicy: Never +--- +apiVersion: v1 +kind: ResourceQuota +metadata: + name: object-counts +spec: + hard: + configmaps: "99" diff --git a/metricbeat/module/kubernetes/state_container/state_container_integration_test.go b/metricbeat/module/kubernetes/state_container/state_container_integration_test.go new file mode 100644 index 00000000000..6d7a7978250 --- /dev/null +++ b/metricbeat/module/kubernetes/state_container/state_container_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_container + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_container") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/state_cronjob/state_cronjob_integration_test.go b/metricbeat/module/kubernetes/state_cronjob/state_cronjob_integration_test.go new file mode 100644 index 00000000000..3a0f4ff3659 --- /dev/null +++ b/metricbeat/module/kubernetes/state_cronjob/state_cronjob_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_cronjob + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_cronjob") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/state_deployment/state_deployment_integration_test.go b/metricbeat/module/kubernetes/state_deployment/state_deployment_integration_test.go new file mode 100644 index 00000000000..17a38d56ecd --- /dev/null +++ b/metricbeat/module/kubernetes/state_deployment/state_deployment_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_deployment + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_deployment") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/state_node/state_node_integration_test.go b/metricbeat/module/kubernetes/state_node/state_node_integration_test.go new file mode 100644 index 00000000000..13b9af0f929 --- /dev/null +++ b/metricbeat/module/kubernetes/state_node/state_node_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_node + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_node") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/state_persistentvolume/state_persistentvolume_integration_test.go b/metricbeat/module/kubernetes/state_persistentvolume/state_persistentvolume_integration_test.go new file mode 100644 index 00000000000..7840febd337 --- /dev/null +++ b/metricbeat/module/kubernetes/state_persistentvolume/state_persistentvolume_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_persistentvolume + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_persistentvolume") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/state_persistentvolumeclaim/state_persistentvolumeclaim_integration_test.go b/metricbeat/module/kubernetes/state_persistentvolumeclaim/state_persistentvolumeclaim_integration_test.go new file mode 100644 index 00000000000..24529123d5d --- /dev/null +++ b/metricbeat/module/kubernetes/state_persistentvolumeclaim/state_persistentvolumeclaim_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_persistentvolumeclaim + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_persistentvolumeclaim") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/state_pod/state_pod_integration_test.go b/metricbeat/module/kubernetes/state_pod/state_pod_integration_test.go new file mode 100644 index 00000000000..c269092f067 --- /dev/null +++ b/metricbeat/module/kubernetes/state_pod/state_pod_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_pod + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_pod") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/state_replicaset/state_replicaset_integration_test.go b/metricbeat/module/kubernetes/state_replicaset/state_replicaset_integration_test.go new file mode 100644 index 00000000000..c8b55192706 --- /dev/null +++ b/metricbeat/module/kubernetes/state_replicaset/state_replicaset_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_replicaset + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_replicaset") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/state_resourcequota/state_resourcequota_integration_test.go b/metricbeat/module/kubernetes/state_resourcequota/state_resourcequota_integration_test.go new file mode 100644 index 00000000000..0d3bff3f706 --- /dev/null +++ b/metricbeat/module/kubernetes/state_resourcequota/state_resourcequota_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_resourcequota + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_resourcequota") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/state_service/state_service_integration_test.go b/metricbeat/module/kubernetes/state_service/state_service_integration_test.go new file mode 100644 index 00000000000..895b4c6cd7d --- /dev/null +++ b/metricbeat/module/kubernetes/state_service/state_service_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_service + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_service") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/state_statefulset/state_statefulset_integration_test.go b/metricbeat/module/kubernetes/state_statefulset/state_statefulset_integration_test.go new file mode 100644 index 00000000000..bce92bc6ce3 --- /dev/null +++ b/metricbeat/module/kubernetes/state_statefulset/state_statefulset_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_statefulset + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_statefulset") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/state_storageclass/state_storageclass_integration_test.go b/metricbeat/module/kubernetes/state_storageclass/state_storageclass_integration_test.go new file mode 100644 index 00000000000..2db797d0ffb --- /dev/null +++ b/metricbeat/module/kubernetes/state_storageclass/state_storageclass_integration_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration,linux + +package state_storageclass + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/metricbeat/module/kubernetes/test" +) + +func TestFetchMetricset(t *testing.T) { + config := test.GetKubeStateMetricsConfig(t, "state_storageclass") + metricSet := mbtest.NewFetcher(t, config) + events, errs := metricSet.FetchEvents() + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) +} diff --git a/metricbeat/module/kubernetes/test/integration.go b/metricbeat/module/kubernetes/test/integration.go new file mode 100644 index 00000000000..3e6dd7f11ea --- /dev/null +++ b/metricbeat/module/kubernetes/test/integration.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package test + +import ( + "testing" +) + +// GetAPIServerConfig function returns configuration for talking to Kubernetes API server. +func GetAPIServerConfig(t *testing.T, metricSetName string) map[string]interface{} { + t.Helper() + return map[string]interface{}{ + "module": "kubernetes", + "metricsets": []string{metricSetName}, + "host": "${NODE_NAME}", + "hosts": []string{"https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}"}, + "bearer_token_file": "/var/run/secrets/kubernetes.io/serviceaccount/token", + "ssl": map[string]interface{}{ + "certificate_authorities": []string{ + "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", + }, + }, + } +} + +// GetKubeStateMetricsConfig function returns configuration for talking to kube-state-metrics. +func GetKubeStateMetricsConfig(t *testing.T, metricSetName string) map[string]interface{} { + t.Helper() + return map[string]interface{}{ + "module": "kubernetes", + "metricsets": []string{metricSetName}, + "host": "${NODE_NAME}", + "hosts": []string{"kube-state-metrics:8080"}, + } +} diff --git a/metricbeat/module/kubernetes/test_kubernetes.py b/metricbeat/module/kubernetes/test_kubernetes.py deleted file mode 100644 index 911f29cfcfc..00000000000 --- a/metricbeat/module/kubernetes/test_kubernetes.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -import sys -import unittest - -sys.path.append(os.path.join(os.path.dirname(__file__), '../../tests/system')) -import metricbeat - - -KUBERNETES_FIELDS = metricbeat.COMMON_FIELDS + ["kubernetes"] - - -class Test(metricbeat.BaseTest): - - # Tests are disabled as current docker-compose settings fail to start in many cases: - # COMPOSE_SERVICES = ['kubernetes'] # 'kubestate'] - - @unittest.skipUnless(False and metricbeat.INTEGRATION_TESTS, "integration test") - def test_kubelet_node(self): - """ Kubernetes kubelet node metricset tests """ - self._test_metricset('node', 1, self.get_kubelet_hosts()) - - @unittest.skipUnless(False and metricbeat.INTEGRATION_TESTS, "integration test") - def test_kubelet_system(self): - """ Kubernetes kubelet system metricset tests """ - self._test_metricset('system', 2, self.get_kubelet_hosts()) - - @unittest.skipUnless(False and metricbeat.INTEGRATION_TESTS, "integration test") - def test_kubelet_pod(self): - """ Kubernetes kubelet pod metricset tests """ - self._test_metricset('pod', 1, self.get_kubelet_hosts()) - - @unittest.skipUnless(False and metricbeat.INTEGRATION_TESTS, "integration test") - def test_kubelet_container(self): - """ Kubernetes kubelet container metricset tests """ - self._test_metricset('container', 1, self.get_kubelet_hosts()) - - @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") - @unittest.skip("flacky kube-state-metrics container healthcheck") - def test_state_node(self): - """ Kubernetes state node metricset tests """ - self._test_metricset('state_node', 1, self.get_kube_state_hosts()) - - @unittest.skipUnless(False and metricbeat.INTEGRATION_TESTS, "integration test") - @unittest.skip("flacky kube-state-metrics container healthcheck") - def test_state_pod(self): - """ Kubernetes state pod metricset tests """ - self._test_metricset('state_pod', 1, self.get_kube_state_hosts()) - - @unittest.skipUnless(False and metricbeat.INTEGRATION_TESTS, "integration test") - @unittest.skip("flacky kube-state-metrics container healthcheck") - def test_state_container(self): - """ Kubernetes state container metricset tests """ - self._test_metricset('state_container', 1, self.get_kube_state_hosts()) - - def _test_metricset(self, metricset, expected_events, hosts): - self.render_config_template(modules=[{ - "name": "kubernetes", - "enabled": "true", - "metricsets": [metricset], - "hosts": hosts, - "period": "5s", - "extras": { - "add_metadata": "false", - } - }]) - - proc = self.start_beat() - self.wait_until(lambda: self.output_lines() > 0) - proc.check_kill_and_wait() - - # Ensure no errors or warnings exist in the log. - self.assert_no_logged_warnings() - - output = self.read_output_json() - self.assertEqual(len(output), expected_events) - evt = output[0] - - self.assertCountEqual(self.de_dot(KUBERNETES_FIELDS), evt.keys(), evt) - - self.assert_fields_are_documented(evt) - - @classmethod - def get_kubelet_hosts(cls): - return [self.compose_host("kubernetes")] - - @classmethod - def get_kube_state_hosts(cls): - return [self.compose_host("kubestate")] diff --git a/metricbeat/module/kvm/_meta/config.reference.yml b/metricbeat/module/kvm/_meta/config.reference.yml index 79f754a52cb..84e584787e1 100644 --- a/metricbeat/module/kvm/_meta/config.reference.yml +++ b/metricbeat/module/kvm/_meta/config.reference.yml @@ -1,5 +1,5 @@ - module: kvm - metricsets: ["dommemstat"] + metricsets: ["dommemstat", "status"] enabled: true period: 10s hosts: ["unix:///var/run/libvirt/libvirt-sock"] diff --git a/metricbeat/module/kvm/_meta/config.yml b/metricbeat/module/kvm/_meta/config.yml index 2df123ca14d..78509f63aa4 100644 --- a/metricbeat/module/kvm/_meta/config.yml +++ b/metricbeat/module/kvm/_meta/config.yml @@ -1,5 +1,6 @@ - module: kvm #metricsets: # - dommemstat + # - status period: 10s hosts: ["unix:///var/run/libvirt/libvirt-sock"] diff --git a/metricbeat/module/kvm/fields.go b/metricbeat/module/kvm/fields.go index 7dd21356f95..4f509af839a 100644 --- a/metricbeat/module/kvm/fields.go +++ b/metricbeat/module/kvm/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetKvm returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/kvm. +// This is the base64 encoded gzipped contents of module/kvm. func AssetKvm() string { - return "eJyskDFuwzAMRXed4iN7LqChU9ceQq3YQLBoGrLsQrcvnEZFotByh3Lw4G+//8gzBioWw8oGyCFHsjgNK58MkCiSm8ninbIzgKf5I4UpBxktXgyA7T+w+CWSAT4DRT/ba3DG6JgqeJtcJrK4JFmm2xuF9wi5B3lhJp6zy7+Rxtzl3iKN0u5Zp1V5FGogPaFDrZ95I5ZUdLDucu+zPZW4Og1UviR59YtDs8Zur6uqrC4uPZco4+V/RLSmahGel+3UH1a/CrswPlO79+/f/q+dV/R3AAAA///hct15" + return "eJzckzFuxCAQRXtO8bX9XoAiVdocgoTJCpkxFmBHvn3k7BLZeBZHStLsFC485v0HZs7oaNboJlZAdtmTxqmb+KSASJ5MIo1XykYBltJbdEN2odd4UgCWdeBgR08KeHfkbdJfjTN6w1TAS+V5II1LDONweyPwtpA1yAZm4pRN/m5JzLvcW0ui1PssVatshSpIS+hQ61ovxCHOMlh2WfssT6FdnDqaP0K04heHZpXdvayiMhk/tlx86C9/IyIlFQu332wj/jD6ObBx/Z7aPP/22f80c4Ne378x/WYYdoTHGIRl2b9Ogr3+FSnnQe/eZwAAAP//6Q5sWg==" } diff --git a/metricbeat/module/kvm/status/_meta/data.json b/metricbeat/module/kvm/status/_meta/data.json new file mode 100644 index 00000000000..fb7fd3c3595 --- /dev/null +++ b/metricbeat/module/kvm/status/_meta/data.json @@ -0,0 +1,28 @@ +{ + "@timestamp": "2017-10-12T08:05:34.853Z", + "agent": { + "hostname": "host.example.com", + "name": "host.example.com" + }, + "event":{ + "dataset":"kvm.status", + "module":"kvm", + "duration":4012216 + }, + "metricset":{ + "name":"status" + }, + "service":{ + "address":"unix:///var/run/libvirt/libvirt-sock", + "type":"kvm" + }, + "kvm":{ + "status":{ + "stat":{ + "state":"running" + }, + "id":1, + "name":"generic-2" + } + } +} diff --git a/metricbeat/module/kvm/status/_meta/docs.asciidoc b/metricbeat/module/kvm/status/_meta/docs.asciidoc new file mode 100644 index 00000000000..b94f0f0f147 --- /dev/null +++ b/metricbeat/module/kvm/status/_meta/docs.asciidoc @@ -0,0 +1 @@ +This is the status metricset of the module kvm. diff --git a/metricbeat/module/kvm/status/_meta/fields.yml b/metricbeat/module/kvm/status/_meta/fields.yml new file mode 100644 index 00000000000..9f75085f2cf --- /dev/null +++ b/metricbeat/module/kvm/status/_meta/fields.yml @@ -0,0 +1,23 @@ +- name: status + type: group + description: > + status + release: beta + fields: + - name: stat + type: group + description: > + Memory stat + fields: + - name: state + type: keyword + description: > + domain state + - name: id + type: long + description: > + Domain id + - name: name + type: keyword + description: > + Domain name diff --git a/metricbeat/module/kvm/status/status.go b/metricbeat/module/kvm/status/status.go new file mode 100644 index 00000000000..eab1b947d7f --- /dev/null +++ b/metricbeat/module/kvm/status/status.go @@ -0,0 +1,161 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package status + +import ( + "net" + "net/url" + "time" + + "github.com/pkg/errors" + + "github.com/digitalocean/go-libvirt" + "github.com/digitalocean/go-libvirt/libvirttest" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/cfgwarn" + "github.com/elastic/beats/v7/metricbeat/mb" +) + +// init registers the MetricSet with the central registry as soon as the program +// starts. The New function will be called later to instantiate an instance of +// the MetricSet for each host defined in the module's configuration. After the +// MetricSet has been created then Fetch will begin to be called periodically. +func init() { + mb.Registry.MustAddMetricSet("kvm", "status", New, + mb.DefaultMetricSet(), + ) +} + +// MetricSet holds any configuration or state information. It must implement +// the mb.MetricSet interface. And this is best achieved by embedding +// mb.BaseMetricSet because it implements all of the required mb.MetricSet +// interface methods except for Fetch. +type MetricSet struct { + mb.BaseMetricSet + Timeout time.Duration + HostURL *url.URL +} + +// New creates a new instance of the MetricSet. New is responsible for unpacking +// any MetricSet specific configuration options if there are any. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + cfgwarn.Beta("The kvm status metricset is beta.") + u, err := url.Parse(base.HostData().URI) + if err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + Timeout: base.Module().Config().Timeout, + HostURL: u, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right +// format. It publishes the event which is then forwarded to the output. In case +// of an error set the Error field of mb.Event or simply call report.Error(). +func (m *MetricSet) Fetch(report mb.ReporterV2) error { + var ( + c net.Conn + err error + ) + + u := m.HostURL + + if u.Scheme == "test" { + // when running tests, a mock Libvirt server is used + c = libvirttest.New() + } else { + address := u.Host + if u.Host == "" { + address = u.Path + } + + c, err = net.DialTimeout(u.Scheme, address, m.Timeout) + if err != nil { + return errors.Wrapf(err, "cannot connect to %v", u) + } + } + + defer c.Close() + + l := libvirt.New(c) + if err = l.Connect(); err != nil { + return errors.Wrap(err, "error connecting to libvirtd") + } + defer func() { + if err = l.Disconnect(); err != nil { + msg := errors.Wrap(err, "failed to disconnect") + report.Error(msg) + m.Logger().Error(msg) + } + }() + + domains, err := l.Domains() + if err != nil { + return errors.Wrap(err, "error listing domains") + } + + for _, d := range domains { + state, err := l.DomainState(d.Name) + if err != nil { + continue + } + reported := report.Event(mb.Event{ + ModuleFields: common.MapStr{ + "id": d.ID, + "name": d.Name, + }, + MetricSetFields: common.MapStr{ + "stat": common.MapStr{ + "state": getDomainStateName(state), + }, + }, + }) + if !reported { + return nil + } + } + + return nil +} + +func getDomainStateName(tag libvirt.DomainState) string { + switch tag { + case 0: + return "no state" + case 1: + return "running" + case 2: + return "blocked" + case 3: + return "paused" + case 4: + return "shutdown" + case 5: + return "shutoff" + case 6: + return "crashed" + case 7: + return "suspended" + default: + return "unidentified" + } +} diff --git a/metricbeat/module/kvm/status/status_test.go b/metricbeat/module/kvm/status/status_test.go new file mode 100644 index 00000000000..e484cd775be --- /dev/null +++ b/metricbeat/module/kvm/status/status_test.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package status + +import ( + "testing" + + "github.com/digitalocean/go-libvirt/libvirttest" + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" +) + +func TestFetchEventContents(t *testing.T) { + conn := libvirttest.New() + + f := mbtest.NewReportingMetricSetV2Error(t, getConfig(conn)) + + events, errs := mbtest.ReportingFetchV2Error(f) + if len(errs) > 0 { + t.Fatal(errs) + } + if len(events) == 0 { + t.Fatal("no events received") + } + + for _, e := range events { + if e.Error != nil { + t.Fatalf("received error: %+v", e.Error) + } + } + if len(events) == 0 { + t.Fatal("received no events") + } + + e := events[0] + + t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), e) + + statName, err := e.MetricSetFields.GetValue("stat.state") + if err == nil { + assert.EqualValues(t, statName.(string), "running") + } else { + t.Errorf("error while getting value from event: %v", err) + } +} + +func getConfig(conn *libvirttest.MockLibvirt) map[string]interface{} { + return map[string]interface{}{ + "module": "kvm", + "metricsets": []string{"status"}, + "hosts": []string{"test://" + conn.RemoteAddr().String() + ":123"}, + } +} diff --git a/metricbeat/module/logstash/fields.go b/metricbeat/module/logstash/fields.go index a1f6963e9c0..bf8f9746c58 100644 --- a/metricbeat/module/logstash/fields.go +++ b/metricbeat/module/logstash/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetLogstash returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/logstash. +// This is the base64 encoded gzipped contents of module/logstash. func AssetLogstash() string { return "eJyslM2OmzAQgO88xYhzwwNw6KmtmqpV95TLarWyYALeGA/yDKzy9isCZMEx+d055OCJP3/jGbOCHe5TMFSwKC4jANFiMIX477AURwA5cuZ0LZpsCt8jAIAxDRXljcEIwKFBxZhCoSIARhFtC07hOWY28TeIS5E6fokAthpNzumBswKrKpwZdCH7uiM5auphJeAwJ01plnI8LoZoi8Q+vP3z0sbwD58KlMQyS4wSymjFXqZWUvZbku6nI3j/qHThVC8qrvGzZwrp4jexwAl0NG3RsSZ7oyyja3WGSXj3Q7rHydoE2KP1W1sFjf0eX3Hen80/WNsteYlQdy/f26fJDvfv5PJA/oJPF6HSp4fXOgRebhocG1c7ypA5CRPON+5K+af+CFj/CL7LhEUJP/o6Xw8UqFCczjh56LFii1b8K7t7nn4eaOBXuSQxFdHL42TIFve1Y20zqrQthjIho8YKumTRghr/0/UVGv8bKegWja02gg6XB/1+l18D+sTlIwAA//9sYbU7" } diff --git a/metricbeat/module/memcached/fields.go b/metricbeat/module/memcached/fields.go index d9fa12ddab8..9e213961d65 100644 --- a/metricbeat/module/memcached/fields.go +++ b/metricbeat/module/memcached/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetMemcached returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/memcached. +// This is the base64 encoded gzipped contents of module/memcached. func AssetMemcached() string { return "eJzEVj2TGzcM7fUrMGpsz9j6ASrSJE2KpHJ/Q5FYiTmS2BCgZP37DMhdfcerS9a+be7EXT68B+CB/AKveFxDxGiN3aFbAIiXgGtY/jGuLRcADtlm34untIZfFgAAp/cQyZWACwDeUZYXS6nz2zV0JrCuZgxoGNewNQuAzmNwvK4QXyCZiNfh9ZFjr59nKv2w8iD+NdQlHIsRPq0+gvtXyPbcAlwrGJ/b8JcUeu+u1kcSgdL25sV3eOjza8kZk0CfySIz/P4bUAeyw4sCiOHX1WnnQ0KlFx9xxWhn4nWOzpj3mMcI36chu4zG8Uwc/ixxg7nlo+JCYXSwOdb82CFzd0yHVE5QtZQSWg3NqwFqdtrUY7oMBEIgO893nD+rt0pwsMGqjU3EO9y9CQWBEpgQhn0MrmSftpAoRxM0YDYa6g3ihcSE+aQ35Vys1qArYYwFRgRjL+ckDOVinyyCF9gZhg1iUn9mQTehYYuy2nmZv9kuuC+3KEuwFKNJjuFjLRpo1E8D78q19J/B+b13tXrxDnvo2KWN7qUitlIKqYjWzCNwNjJlMhUePTPOL70zPqAbZGf8uyCLFsWawgiJZKfNdjAMQ/t2lFsxX/F4h1vf4fhp06x7hQgoTNXXRrfa4vymvKlpRot+r05s9RyaspVVJYOlkkR1+zqY73UeMONl11DWfU/I4x8hj6/kqR5vz/KargluOmxXm6M832Gdzh/BNTzaNcH9qw4gSCcFFeJcly5TrH2UUA6UX5ubTvNjQsohexFM762GMbk2+P6jDi8Yf9xBVdHHEzUcwadLah8Guz9F8U3Hyf9J9C17FsroAJt96/90JWMFX/WHZ0gEy2i++Vhi3XyHXS0/julNETBtCR34ZLPeFus9RIMdIeGhwoxha/bwqZzh3rdj+OdmjDZ/oRU1WaQrj7VzSAi6jAil17s75WOd86PM26hwOiDO15qM7a+XDwxjqgcsRqnj9GPw0ctLNN+qlk8TqaofvdUB86Srefhsj3oN1ZSo2noy1NZ/hn/V/B7sL69cnvUOSQesM0krp2K0e80WV4t/AgAA//8M190C" } diff --git a/metricbeat/module/mongodb/fields.go b/metricbeat/module/mongodb/fields.go index ae38c9b8724..8ee97315e84 100644 --- a/metricbeat/module/mongodb/fields.go +++ b/metricbeat/module/mongodb/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetMongodb returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/mongodb. +// This is the base64 encoded gzipped contents of module/mongodb. func AssetMongodb() string { return "eJzsXV+TI7dxf79PgVIeJFXt8cpOKg9Xjqoky46V0lmKdC4/pFKz4EyThBYDjAEMefSnT6EBzGCGmD/8u3vM8iGxbsnGrxuNRqPR3XhLnmD/npRSrGWxfEOIYYbDe/LFB/sv33/3xRtCCtC5YpVhUrwn37whhJAPYBTLNckl55AbKMhKyZL4HxENagtKL94QojdSmSyXYsXW78mKcg1vCFHAgWp4T9bUfgeMYWKt35P/+UJr/sX/viFkxYAX+j2O9pYIWkKM0n7MvrIElKwr/y8JoAjWoyod6IX/QzxCPIrlSRtqdPOX1Fgj48VjegExKYilybSxYushsZ+uRMKnjzHG2QiiC/IJ9jupit7fRqDaz/fU0CXVgKRbWMlxW5YuN/4fWzHNQGD/7yXHLpdMUBxcrkgRREFFEU/fDFxGGsoXhpWwqHUSIJdifRy6j5Ym2VFmVwixtMlKKsJl/qQJE6RkuZIacimKjj71UeWyFuaimERdLkFZkVkwCJHAFoTRE2KyX08i6a8vMrACYmIKaDEg8lEWZ7CJrFqBB+lbwdvx5kj/AGNqAi6B8K/NNDTQZsxFDG+nmIFbyhAHPFaIDuX1pdiCO0Kl/1GDYqAvvfat4FQthBWcH2Lemg94LrnqWxEFKPAJ8tpAMSGcNZhSqiENO0s4VD8FpbJDkLxW2i5SuZspqIDtOoKyHGtCAyyqn6ynFMDanWZCdExoUOYaknOUrfAE7Egh87q0ej5Pah7WdYQWsPhR5i3AuirooBE7S1BI2crpSBl5RFeWkRtlnowUlHJ7FRkVwOEUGXlEV5YRopspo1yWJbV4ryAlZy5RTMHHDMPNE1cD7koCO0B1aODbw8fZRyRPglRKbllh7aQgcgtqy2Bn4VBSUWVYXnOq3NGvQbggHzdMNzPcIcs0KaU2JJciByWgIDtmNvhTspW8thYZqTfEzjqF0e06k8vfMs3+CYvl3sBslVlJVVLznnR/NHHkSlNnwsAa1DgRy+91Yc48kSZ/u2IcrouOiQI+3WCIOZTTx9q6zOCTsVbqRApy+RvkJ/9aG6no+sqzIDTSz8rlokyryzRQ1GRUmC0oPRSIOOU0WdLfpJp3vBimwcRJNMLvnQpkKwWQcabTdv4U5kRdnsnaoGYMEIqwJTWkEZmLh03uJmnzPLLH/NrG3MyGGqJgZQ0pMRs8Kyh0oXysx+5G4LaGcOZ6dBHHR+uKGirybtSpceSZcOwxKR6IoU9AKOFSPhFqyMaYSr9/966QuV74AOYil+W7koqa8ncKVqBA5PDO77rvXPDUIq/1u3/xoVT8r8WhnMa2prCNz1agCU/hF6ikMppIgfKzckt5DYdH+I8b8EDRz/HBz8bLoAqQoEWt046InR+g+YZsKa/B7vE0vfsjg26mHVhL2PTiVQf+DaGa7IBz+/8RSfPVFWUciuC6STEWT+kztviD/1/fLDwZvbEH0+4I7ozYfDWMiEJGX8aqzKFMRwZz7M4Zq+F/fLQpw8J0poGvBm1CStfG6Ma0nTiSX5kwOb0I6PEkGhdvvVawpqYfc74bBpc140Vmrdi9cmgd6Kx/YrkzDoVwR4SskvLumR2LAn/mzBXL+568wjpkIr/b+VsxkQJwF7ytwWR5WWScCchkdb9KahnlVJsMlBo5zd0DlzI1xN2wZw9o98xfRRUtwcDd6uhGanPXvqm75rpb7nRWUn3H+om7xHCA/t74TIaU7odJpk0Wwk9368Uhl+m7irvhsWJJ+vfBm5Irxu/WrVFQcQ0ms+6NWrK7XYYxny7Af++cboAqswR6t95OYNSl5WSV1CyRKH037LrLqTtXXjeX98rdbkONLve1YvfC4ZsUDZeB+WYue5MlA8JQJrTLKlKwpqpgYh3yPN1NMhUFqbvpRWTGZZphJRSZrK+R5fwxdRmKmP39+IZuwSEgsjZEM5G7u1m30q3nkYPWlkPVyQvrcyErON7qzeBgSPJ2vMDKIap5uipkZllPi55M6+ssBoifhnYCYuguV80KXCIV8v13/12D2i9+wv9cCPnRISQaDDGSVArTGokHnmK94xOLUxfjicw9ujEfJyfobIN2BsjOEgiVW0yTkjLUtpBinnM2I4V0IC/hjLQLzFlp0x0JzXERWgNTyoKtWO6qlypqDChxIN8pk+MSZIf97MsanDZH2I87bEVcaOwZkIWBx2p6TK3Sq+m60MLAZLlvyjwmfJfbg/TjTheGDF+ynLxe7IqxInLJL0iaOA8VU2A2YI1mDrhtuGIfWYHy9X+ikxrWXqofu6SQcLajzCzKq9RUNeKnpayFCdk9LpGcc+YTyZ1Nszy51B6yoZroyjJXgVpJVVoxrMH8SLX5E8qqkUYKN3E7lJObT7EmX7EFLMjua7JWQA0oO6ggv5uo5XLSuVZBV0o/B9h0LFErlpytGBSXYc9vydea/X5WF050F3jEYs+xo5pY70nX3IRFsfN4idko0BvJC+tfxCKbWM3NaJdayH+RvNAu1wOUxh1YwxYU5UgTl7Ovf7FboTWG+5jnzna+oaLgoEmtrcLjVFMerXykeOwq1zkVGRVFJlUxcsNxWS0O1Xc+zdGaOqKl3a38l/yfciqEbJa58+KlMhHPThZUuLz1KX3OpVhxdphefhM+QXg9iFengzOjNHOfuUzHy20yIRcWjyHY+8DK16lgkyxK9F4bKE/RKgFFiNDf1ETimIQZKHXAQYpaxQWozVp7W3EqCGwpr2lqjzzkqHEQbspT65aczlFStRRU3Pvel9Yrn95LFHDrSFlTbBUsGjGcxYdTg+Mv06oCqjB3m3IenIGQ1q4fsDqOmI3U4BYcVSC+NKQEZ1GwvBzJ2ZPn0YZyYPmNi2qGuEg3LX1LFZO1jltt2F2jL7mA5tRAQdiSRg+pQxxNDRIPhGf7LEe3Y+jcTuYci/skret1GXq51R8+JIojiVlYFyKl8w0U9XDskcyaKDJjsuJRBZi8HBtyJhekkw+6k6rfMONMmvAp57Vm21RY+wyyFmiWvN05k+hlKa4o47VK3n0fQTVyMOpBWpezBUxklZJrBXp6hVxYpS8/Ay9cpxsjwgGqYUN/JDUFtNhfhtRKwRibMzW3FpqtBeVQZK44flSJJ8m1W8vU3jhJSm9qbAmWFXKXukdoSS2l5EDT3+mpcMbsrr2i+ZDoHMV0E6mWGq0qnprGSzoy1mOx4wSPxV0ThINtc96InRpZcbk+1aOhxkBZGZ0ZmS0hlyVkLoBE1ZDGzpzJJTX55gzzODOg35MdCqMjwXBzZU9lnjPr+s/0b8NnrrEeOt50+Z4wbTNZJwPHHi95JwUoCM2V1Brd/5DhNshn9zoyHcK8Pi+Toc1EVBNXp78RPFgvg2skjmKdaLqOunvqz5XT1wiyn7ThI/WyXq1OSKqdgTFEzNwI+hBb0vzovciJlrXKwf+SLGElFcQzYgmBMKGPFXU6mp6QxCl24Qm3HUPIju7xbKxo/hStfPfFs853N9CCcM3QuQsOUmZiFj8t6pKOdLe4BvySfmJlXWJXgBBF9kBdf5aoWDuXGGowoddnYM+t3wcMwTBNhMRLlxVb14ou+UHyRJfjm3IbJivmNpcC23WE/x6frNaTZ4ZRntklcz0/IgzjV+ZQita89VBWQzfEZL4r4HKBsuBrnE9s5Aw3I/Np+GBzMU/OD4GtEpb7+RG8edNyK83vb8Y+9YYWkzvB+PL1vfdu7Rv6YSNb+0B2G5ZvsBGFgn/UoI0LHNKiwPxNyv1lWd+XSOeVtR+qsYlV1z7MVAHymfmbh3L9nJxLO+9MuUi7r+aJLncOeXuZXmTjPYwszxkrU0EBSk8Enq/vD7vrNL9EQBOPZ3hnrRRwSY8vGDj+cP5YKXi7ApNvHu3eugZrQ0BB4z1bHDq6ekIHx13oESaMJL98+8HqGiutN9udIrNRsl5vqnQ+35ytoZD5rc1qy6plHQrHZQmlVPtw4eaTb5zgnNzuwfIdsD7MbhRoSF+Z9vm7mInM9bkm0h2wLYvdVp+9Te4UjlvXeKgojVxRdeNL76OUd+Zsfk667GQRDuleGst9Eyjzh/pEV9wLqAF5kb7BQz/y9NBbCl2ZnSiHNykh+JaMi6YZ4EIDVfnmTUoSp/QEXNb5E5gMPm1ora+XKZpMVotid/kG8ifrfG0A7zawRBXz8mRtsKMKZioRXTNDl3xPOFVru2nmUhWEci6H1Kp1aZxffyMG47gkzphv/E23lHG65AnsI2m8Pm/l6tjH0KVYmnoPw/Tz+M/Oi2ni+03G7sp7uD726LsSYts/w/3SHDhlzcyLf/a0JY/D+fNRq4GQuNowOuIZU+tBXxX/4RoHsqT5k51tUTQXMK7nd+waH8FWp86yH9JKK9fRPUNb2oOtqaOrIh9ZiyU/1pMTDzaXWhTulDQBsr0c9FBDtINhLYFisA2qVSwXazC/tL/7QazkV18fnb/I/gkLbzmON1rpTrGzZUIGNnJdUWcdijgS5+eZCTfciAW2PNVjPSGux06alUbAcW7gID+DjK2Y0u6BCW1omfKrz7YNgXawyzikz6oHqjgDbb6OTLq/Gkis8UEuOL01E3ZEzwOn5nwOdkwUcncl01ywle8yTJZgdgAimggqCsfNCP4pNxVMJi76KNdH3w+43d8bizuJBWvSEzXbDk7iD3ML8OLSIDwR09x6SXg4lHGZLLq3IW5VNAz8CuY/wbjOzqEJ8WRRBWr1xfz9posPM9fx9n+Ie2E3AUi7J1WS+TMVdhrG8+RDVMLrrgO1sb6nlXBUm+FKk+3hEwSm/xurwZJQ17CdmX1/V/bH1WGD7i/YP0sJhIwOVLqG0uhS6bNfuPvVz5L9SAHsT36TtRKU9wgfbzg4vZhj9j1wum9MLR0qM6wUK6nau9iwsSah2ju1Hkq+mn6/4NOpDorViEQlwwxukePDHcaZLjsPMZ+ayx1oM8hhxAsb7iDwMnhZUW3SvCQVbAO0UFL2H2E4XcvSu7pH+GXI3HGeBQLu3wVxug7r5B407jTOh2fxgNcXpJEnzvLg6ktqrN9FL6WwP4eTavRgBqFLWQ/VGblBmiMb861VjlRRL6rFRh48pkIms35nzc9f7K5Fi0JhhukqXoaTqJxpuQ6un/pma/h8GxQCpTQcJT0Lzo9MuwNsGIykBzsElXaDLyolHSNjibEOUZ0TTVNjiP7YHPVn4FGQyy0oJta3mLt2tGbNjk9iBO9a0cdGXAGRP78+tmM/Ji1HXArxJORO3EKCHuSXTm4NVo9gLtCbyRKDsXPRYV+ouvr9LeTo8xvZP+frYgPv5poYRp4SIFVLZkDdQn5+qCmZBURXF1nAM3xmvdECfSxQ0+epVHGTxdiFNGxrJedLmj/d0Iy1ttaPPaXhDcbb7wYzEdZiA5SbzU18od5ugNbWD0/+g6wo13OAXl2WzVAHOhjZ1jnXcSODhtQ5H0T1dxmhDiT66vGvtI69E0k5o/1JrqjZuFAyy2GR/vWE+H7wvRXC2OknfX2Q+Ehg4T47EXOfEVcPIewg70CN/Cy1ZksOrnDDPcyHt/6aSJV8G67tM5ZO1xnUv7nCc4T7CThpEFzmlGeJ09Spgf8fLUEf6df+RcH2/rBpFOixToQXqdagjF4oWNecphvRHC+o9vVmT9ePg/folZJFnbeI3cJKCy/A21ElDlunnw/P0z0XXqkvD63UZ8Oq9UEt3vm4LNFzgdk9zx6BLrUwI30LlI+BmMTadtm7WKTp+2532Pjirm0FyEQusf1dBIDQxONdPi2KcX/RhOnO4Y3S1CxMx6Q8mits2b3WphFr/r6kh9yn3/jWpli4l6TraTKR87qA7mWo3gDnRIPGjY78UQrNCnCNd1wRoEy9DkrIY5Nx9ogXH0XhXtTFwrpPprnhKqipD/poRdoeqFxBnj2B1gJzSNpMuaQWxXJOU82pCJk7w3xh5sdIGca5vH3rUo7x9MV5mhU/eveG+wg96Yol9Jh2naVzLvVkFiF8MoqmXoA6PQ7NqcGeeL7jY45zdewS3gCtslrT9UWfxJ7NBOkkOmFVakj7sfNp0SWSnprFP1p49pPg+0jFpSB/E+zTux+ZqEdzDNeQrWjNb9Yi0I5I3IihHSJWcZGC6ae4WIv8TNfpheh/jc9wWx3v948MFFOW06TeJCOhPTNWjyENpsWXpmMyCM0N24KvMJhqk8vlkvKMy7xfrHqJdFpLtn37PGZz7oLo2ys8CiSLjSdz9mdl66MmtHn5uZJtXn7TsL6ZK0xt8v3q8XqsMWgb8KL9UeZPQwUJWMjONMGKLL4nVh22lGMKkAyLz6mDPwSNyMfvlxm2jBqRz3CJy2wBJasCcdyCLCGnPk+a4vynmR8vW+nM+cB3ZtVozK7QmCh59MztKDOhYTsm6ln9xkS9B7eB1aXv+d5OxUIBLdCVtfrR+Qumbih9kATbF4OncDNBJEUQOjcGHvg+SMUy1pcM1ogOT3+v/+vnxZvLuBlirvEb0Qpn3vm91YL0HhEUweuObu2pZ6ebMKRDF99mI9dw+BpB/Hlxa7e3brHBldsDHR8dPzFqTY4ikepAIi93PR7J2myOnm8VzuZofJKiON3TxU7637bPYnh/zXk21hAAzTfO/v/B0v/mwflkwen5QykL+CYhd8t9FWKh0e9dRNT5DA+Nh/EQ1a48kBIMxVHsMh7oN9Wh71G45hMPZPdAfsHf/t2XaSjQlQLtjtlUQfHQdjq0HpBp/+KLzPFfmu+M1ADgRCwcbwuao6PrrhIWDhXRG7lzp9lUdQ+KZkc18b8twlVL20nfkjkUQGfg9hmA+aPqANddUCAQ/Bf33pru9gtPdpUNTlBDkuxAAdkAL1xvqtBgHK+XZ7JR60Me8rqsOcXVY78T9TJrPdeutxJzMjFqAbTAXe7YaRuUVqB4dLWcWxeDJ79zXhHr6uZQu5WZnYO6xFIlFycT++WSxP5+HrFoWZ0psIjSmdKKKJ0pqojSJeRU64sIqdYXkVCtLyKeWp8rm551OVNEPWpnSqpH7UyB9aidILeG0nDY99UQvhrCV0P4agj/fxjC9lT0agovRezVFM6j9GoKh8m8msKbm8ISDM2sY/hqCS9F7NUSzqP0agmHybxawptbwlT/I/JqBV+t4KsVfLWCd2sF36TIpd8feOakRiaeNZmxbXIWnk4wiq5WLH9okhsfsD0M24ZUCJcdnExYi7ad2rx8vvCO13fMmcXVlXua9nOtmqcRuhMwM8tfVnrBqQGRHxazn6z0PzUtdRrSzVVqkwGIj53vNjL1nAxmuvZfw/VJBKGdkFEsP3YdKaBF4Hf4sbgz5gfnJpflkgkoPPf7/rXymOpYfFdrB3qgOZ1MlyibyXdCamOFPoXT10tPvEn+kiXsAd5KxCMZYyfL2Leoe8lSbiDeSs5hwFPkO2gXBx6UPj0pqtsJtrGEkXpY072vZud4t/6BBnW7x+jtYN1nTfzO0yZ6O+OdpBkMeicHfFiX8LmRG7EWnp4fZKgLPkl3kiFs0n+NmqgUR26wm02W64l9I97cYDfjzb82dCPmDt82ui533oLeiLvGXjOt67aMrS3luQCHg3Y9dGE7KN67uGXvNvv+rKx8K6RZBj9JN2UzX5bBj7ictP2XWGU3tf0Rc3O2gQtO4U23gYjNOTvCBdm87Y4Q8Tlrc7ggo7fdHCJGZ+0TScJDPtvMfcKVgF5qf8DGB66lZlyg71+ywmrlUNHhMZ/Wv2CZSqk/f5L+xMwGFPn3fyNSkX/9/QMpoAL3+I8UviDCULUGQ6jKN8xAbmoFWITQFB0kKUePeHnGc1lWjE88w9nGSzQrQJhFubySbrYhwl++/eBKW2FNXT33Vx+++/ohKnxLVXQnCU/ytWXK1JRfha2WqyQ7chVGb9WzZWt4J5zkqaRVBcUtZsqN5OEnmUzNVLrQ6ztfjxLaCtUatKf81o+zYhz0gwtLNm9rc/YEHNuGLwfsk/3LUP1yt4GAXJG9rFUUMEhf54S/T05CtmNmk/kO7S9jRlxjisYWpKyv++DUraQKDeaZWKfr2TD0li1p/qRd0XDa219KyYEe2UXso6qB7DautbcCrBLrP8VPQ5OuZvvxhbEWvAKj9oPQ/ftyGYg1E5DqJ+awn/SGx7dEG290XWGeK2PzhUnR4x5tax5EQzya8X1zxxQUmWHrgc5LJ2yevxpqmDYs19EW+nc7zkc7zCA+95ls+SNFKC03igpNky2PxhmYwcQgI/Gg0V7CcPdbqzMeIscFsJD10NuVE/ers+tS/xqXUAelcYHviLkpjvq4x5oH3Qo9y5/A6LZFyBzcodkG/vRm2N1u0YE99ZY0LZ5LN/De6STVQNTPqhkO+3GKgaifUy9i0COHPJonD43XsXs4GpZGY7sg2LI82jybb5xq/Er6iZV1OZheQOaIeyrNYCb/9vPB4fFs47t1o/itm/NiwP8atbGK2xF196vB+YriQkyZ/Ytkq3kgAiE67nzZ/Ay+rAprbFdzs5WNQ7qVja9iH4UUN4obQw3vQ02v7z5atA0DAYTr4fWjTuINWE/JoD3VeLqNnm5cq6B1c55tveJT7SZappeyQN0tP76RGi/VOW7YS2Gh1SpnZcIqaNZsshUJ6e5k2YpxyF7U1IT9zCKbsZ2teK03pwM/Wtg43mQXn25S0M3AzWua1KzInCaPo9fBZkebD20v8htC24s8CS0ZiGjfuM5QGYa6Z58fzQ9PaHdChEb6aXb9JiX2vnT9Wb1zcSg42e3vuZIqSn7EaE3ojPPhw7c/b393ZuRjeE2eG/wLHWzDm6s+B2u4900T2dwctNYPzCBYfPreL9dGqIMM4uHnsPv9JThMXVvFrfDJV6X+2jEf3W2Et2c1dttNEtZViD8wsfYvGQf9/bqnTN81vZJcM8yDKKb70KWWvDa+5/ODPc0e9oEmj14dHvFM9ki3gCHIUj8ONVT1fZLJEoyd6dAc2iX6zukOjSNcb378AK6zk5NrkGSQIalA+b0CtiBGXmbFp6yvB7WJlEcdVFttemg1Ca8we7tbkqyR8smymcuy4mDGHFeqTbZigtkFNsjgQN7CrHbWJvlMd0BW9PmZsOxt4P9KBr0d4K0CjvfQUXAfQxYuyxTflyD/NYSHdF5UdNa9qoAqlw1/kN+eMPEHFLsmH8Fg5+4WMwFht4+D2/zpQHhZXuf6uNe2MA54DzwaHF0LD+ZZhu/ifHv02GZPbSkf1nb/s9vcRIYwQve2K+b6kOMkWVwwJ3LsLsKMxFJ3PEroF8A7HqsDR276k0SRDILua8Wp8rB2R7lHEa4khGgEgmajEzRL6/x48g0zOmMic1OZaDl+CeTNq1IubR4HdTZJ5hjoK8huY497NOrjizkeG+CJ5CFH05FhovsrJgrM8CG0cZuFLIDUwjomlGyAbvdkOEzFpW/Jm1uH0FrTVa0wSaVgdC2kZiOBbqCK77Ob2Tv0cgOXvlrLerJBY5ewkr6Vvc43UNR8INZ1or6Pv9F6Rmjqh4P3gvE41O6NvZym5iJ8wthtqWKy1qTaUO0Cs9Hu5oOygwYgSXFYSPOiYsWA70cueNA99AMxp9inVwX98Ucp3yjf6VaqorT9/C2cEJBRa3WC4955zgWIxPXjSLrvugfHJkK0CqqMy3W2rFcrUM8iJ+fcWyRUee/eX4VOmNjw+bWknIMKL6w11skfbYKexW7fdEDJbrlNjs2zSYXmpqac75szZU8m5M9s8CaVEL3XBrB4GQrn+hawZTm4FbWi1m3NqSB0tYLcnCCg2Cd5NhlFooncDbqK5v4gVyf+/BmDoFcSlYKSVlml2JYayLYMds8oKQRTuceIqv1bKd66heZTBV2e1yBZC14vLr7a/E7+jGKxDkhAMQdq15d7GcDHXL1BwgMu4Jv/CwAA//+2umBX" } diff --git a/metricbeat/module/munin/fields.go b/metricbeat/module/munin/fields.go index 4201760b398..0d105e45fc3 100644 --- a/metricbeat/module/munin/fields.go +++ b/metricbeat/module/munin/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetMunin returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/munin. +// This is the base64 encoded gzipped contents of module/munin. func AssetMunin() string { return "eJx8kUFuwyAURPc+xSibSJHiA7DoDdorRNhMKA0GBN9qffvKhlaOanX5Z/5oHp8rHlwUpjm40AHixFPh9LrOpw4wLGN2SVwMCi8dAGweQjTERMluLOBXilmYOyDTUxcqWN0Bd0dvitpyVwQ9sXX1LdpfNg+QJVEhDh8cpUl1uFXHxHnw/OvcJp2SC7atnS/ntnNAvtHvkAsNhgUayc/WBcQ7dMWrz9OWQfoD+Lrfr9IT/oPLZ8zmf4I3PXGtknf+FI/Re47igl3V8nvYg+6nPpvjnJqyP3VN7b/iOwAA//84wJVl" } diff --git a/metricbeat/module/mysql/fields.go b/metricbeat/module/mysql/fields.go index 166d37fe757..283f3e4df4b 100644 --- a/metricbeat/module/mysql/fields.go +++ b/metricbeat/module/mysql/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetMysql returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/mysql. +// This is the base64 encoded gzipped contents of module/mysql. func AssetMysql() string { return "eJzkXF1z2zazvs+v2MlN0xnb0+tcnBnXcVrPxElqOe2cK3ZFrkgcgwADgJLZX38GC1KiJFKiZFHO21c3iSWSeHaxePYDC17CE1XvIa/sd/kGwAkn6T28va8mf3x6+wYgIRsbUTih1Xv4nzcAAPwbWDJzMmAdutJCTs6I2EKspaTYUQIzo/Nw6dUbAJtp46JYq5lI38MMpaU3AIYkoaX3kOIbgJkgmdj3PMYlKMxphct/XFX4S40ui/qbDnD+8zff9TfEWjkUyoLLaInQZehgQYZAT/2va1CXj/hekqmu6j/bwNrgUpRkMAoqWP7aBdR/lsJOyWHr+x4hWJC1EYYLNK34inpu/GyxPKAV/MZPvGoNsyldW0IsClmt/dIn3R5J/OfaP6wBFUa92rioC0sbj9aatn5sICW6nMqun/fg8p/f9QL0zJFikUUwbOPteGGEo0tLLihDqBR06S717FKbhAy8K9CglCTFP+hHAJrNRCxIxdXPK/F2SSRHlmglwQItWA1W6gU4HQSq7Wd1jXAZZCLNvA7ou9I/2WBdQTEJEBqvoM2pC58/UZZkIZbakvFj/AKGZuG/CKkhdGQgxcKvggWRCmBQJTBD28JhB+huIVSiF6No73pOBlOCRFiHKqYlXNaMdYxY6oX/b6xVXBpDyslqqSVWXY8MDf6YjDvV4roh48RMxMEGX7TIEips1Aj+6tplRcLc21Uw1RgVTAkKba2YtjQuFDRLEd4V2pFyAiUklBoi0DPYWKhDVqdQCT1HVvzTrwepVXqcFh4zAlXmUzIeHSlnBFkvhufueG0+GccgvI7MHMdhlGbWVpidQWUx9jdZMBSTmHvGzIQkwPavYKiQXhjqW9fLNSFL68icbFmEx71sQfiwJRLJGCagHcqWQmvpISf/jc1EAXGGKiULGRYFKUoGWMFI9noTSK4Ft4a5tNmAfgjCzZBpHeMTVQttuhQ+AOYkTLU3z0zYpUpjnRdakXJX8OhpRNgLWGTkvJ/z4JVOCIT1LOH8zQhfH+7urx/+F7SBz18+R82fqwftsWSd5+J0/M5P+/Gjp7VV7+ONoAYfSemSFcth08u9+/F2fLRvX4kyyLtrpTgV6jSBbhPfg/5uxqYanKGw8OXjx4uV8WZoQWkHFbnV4Bx4qSosB9peDVtG5P2SsJBj5b1s4r2uhlzYkLuVhh3SFdxkFD/xI8kYbUDqFGbaQGF0QQYSganS1ol4H+HTfJMHjl4jcDu38PGopUFzEW+u1X2TNQQRAHwS1oWM7du3uw8/MTOhlDxnNgzc5KCdJLouYLg63Buj8vNt6P/0OgNDqZyQUOkSDHEi438VJqTTiZ+kmKztdcawQdX9rPEyps70ImiG4xaFMozWGOvtnxP4arTTsZZ7rGgm9SKK3Wbgc7QpffRZyY1Wzmh5JNsWWNqtxQ8n4VsfOc5MTbJeWSInsMJzmdeb9JnUx0/fJr/D5PH68duEmcuzGgfQTSzWMHQA2ix1r0kuNJi20tufOwWa3aafeXsBmV5AXsZZqDlInHsEqecnn9v5hDnRi0MjhAAqUv1BwssCb8eRV1Bc4aMa4Z1XrYpghTmhLU3ILBQqbSnWKhmyZgzF8xFwP5ArTV39WQVhH2+ir9ffJrdAc8/n6/6gCcovQKhYlomfDZdpS+uX2bVwpv35pqR4Isi1XQYfczQCp5Js8D2xLv3qZfbniEsrgkRTcEaGLDkPzVRB20xKZTCHtSrbLg4i1U/OZ9XnD6ioRkl+1UfLEKWTCDtUtUdNfqlY+l6S55agowsfEHMAdNEQNRPOKjpqhYD7MOt4K1k9mrM/+0myBcU+bz5N5jedRTjVxo3CQhu5H+tiPaVelXYZRajsBoZduy5k3EIBPVNc7tA7bFSfohkKWRp6Tfk8BEo2Ch6ObF9+BVs51iuhXxr8Sz1Cl60PhLlu836UHkuHHdbeBvq9pLIrKoF9Oh0IGFqFhHdC+QzMoSJd2p9Bkkpd1pAKC8Nwduh3C3qE8z50e+KuAwR4WEJrMGPIKRPQczLLMtyQmKzbm0Crlq6VFQkZnMoKJJqUCxao4JerX3yMosIyWrqpOisIxf1VPR3Qcom9dzhkV1cBGlrV8nzQuBBSQkqKjI+KEKTmPL4dRrrMaOekUOlBc5Xj88im5v1Xjs8iL/Ne8zpsloaIJdQ5xBJqTLFWzFVIrM5BsZv1Yqxsk5SgrfKw3+mJ+AlSg6qUaIQbGD7252EnI18fGv5ryNer7Acl38kSWjf5Dk2GX0a8QiW8n9FDgz5GVuQW2jzxt2WaFaUDYe2BGn1NilwZwb+KIk8m1hlKZHdNYSz0bSyzmo+T+7pKEfhzT55lCJPuPo6j6tB/tfZN6j4gYcMgzNZxTIXjpFXs3fBrQo1TZYF/ccQzIdc8+aV7f+UotYct79eK1JbRFxeFQ62NK74DbHFauRGzOSv+oReCbccVp5rzhxYHv6z1AR1G51GhH2q1LT6EZp7oLGGYH+ZwYGfS2uHg2CLPhC5Y/1S4AzGORTI7OabBB+84cnW6vXiHNMe8AtVs67S3o6CbTHY1W56oy7LNOesNrs1nZ8dlKPSdihmv67rhy3ygFKTGqbKtVd7rTeu+0ifFWNYbFwERJIISbljUpeOew7C9Qa0n1RvSckieWN81vqB1yROdo7xw1q+9emz/Xy9Au8d6jyOdCiX1JqwjDWavJWCc0VUi7FNU2lP1GO0ZbaSBdrHY0cvtV/8wXmwHL7KeOJhv1SZHNyLhrhknj7IR1qGUDQuMtF23T8YjxWhc234BGvAu83nMySziMTzuOJvgBTBK3+E67/IwjeRDiJL7uM8ALIxzCLKepqsRsK06rAajM6VSonP402Krx9mDbENn3u12Wv3wXePux3cay0gOyuVFcE+ON97H9lB+uJk400DjibTq/JdYnWuquG9wdMUJZcm4qJvVTz4aJyujTtFMljaL6trkKOs1x+eIe69G5gVd0GYNeCRLO8sKtc4Q5uMTwegkoPmYQTeBnsYhBOM9z9wnJGlHcf5UHvfD7afbx9um5F3vK3DnbVkMOrdjt8+CnR7l3efJ7cPj0SgtSdrRJX0qlJPbT7c3x6Msi2TXdsypUH77+uH6wBlvbYH5e164trph8aZQzo2LoRmsVagK1YTQ39/0JtZ9oXXWunnnQrhMKLBOG+K28dRgbi+gDM2O/ql/lGRDyaZ55BXcuVVfI9c24ebLffT17vNvoA3/f/J4/Xg3eby7We6z7YtSvzfjvKraltrSqj7LXN/VJJqtLbFp1aSc3EbjlXG8jtnIVhpen8MeZV9sqLr5+/4x+vpw+/X64bb1zc2nL5Pbi9X83D9GD7eT28eh85OhSuSpDu7t35XrOFu10xoGWMS2VSyPJdx8ub+/e2xN3wAeOpPncSKnulxq9MJChnOCKZGqATRHS9idD4BNz0HmSOr4aST0zZ6xik29GtyaPc+0AcI4gxil9KtK8IJpAXv3M8xKxcHpBSwyEWd1h5qUFeg4Lo2FuiluSqkI6bBfhaQSPoATx2RtOJXuah4Mp8EGqCg3JhLqDPbXzO1SY6UlC8hcgSkBqVQo+smCXii4L6UTlw+oUoIHwgREXkhWb9gY5bZzFjUIP+QQhqECzRg2fL3RDU/NWFBkaMMJoIW+DH+E1e4D03D6bNgRDByh52tIE9dMmN4uy1M2t3SZCY/NR8krPtBSnxznYz5eIwN7dJ5os1lkXAEMsXfn9ehhAnougylaSkArQI9oIHbZ3+N6JvDLdqInWpuFKxgmgaLnH0ACj4JnQSgWZN+xXVjnja5u9zNL4FEIXdpjpTCdXaNnFGJrFczEsw8RtRXhEO5gOaIfz6b839wDMxM+oB3A6FrKKY4Wk3RA965p09l6eQoyM21ynp+AaeWbhqTROKdCi3E6Pg6URGJM/pcG0iHwox9nRjwS4IngiO4Qcc5ULmivhzBkvbz53CnHZUPeiWDEmbGGytRQrKt6ltLJdFOOkyaDy/37cjYjExVd79HaFe3t0Up/pLfM78q82H732+bgvbw6iFUf2Y3p1PhkxactasU0gVljbWpvh/yWGJJJw653SukPv9YKAq+gC3BGpCmZdnXDOe5Xn9WTFrUUGrGM6CKblY4PcWvTe5nSi52OdXXmE5PX1tsCTQ5l0aMmrxvvs7xe2OQtufACqZSPBRhDttAqHKbW/uH1O9GASYf1L3I6WNusGa/tUEXsUTZfNVTZfe0U+zKhQXrelQ2t99T2Bg17g4/B4Uf7WP9mF0jvgmh6/fglBehwX0jVkkoY15ckjSVWvPUCpiBes+gZU22lvSIPMht+xn+J2ay5vj3K27KXH9Nctg3l5aYBm5vQnT0248jTHZowilqm5etzXiKVob6jaWOK5Ic92cxIdD0tY2OLUY98MklyYeNXEGMfAXChHQ3BtLRVuye5alXeUUodeus5YUlyoYR1PvyYEx9SzAiTC7BlnAGG+oTU8ZOFuhaKCRZ8bYY22/vyx3W9sas4s2NqevX7ok5+NWe6c/thb+X2jK4Hsz4IcAbb42qJUPVeYnd0Gk56Y3LJUDnz9XpRTTPiYGvh+6P6JWT/0SK3TgzYchreWeNktXzBWnNMIMO5995hnfJGSAjPd59W7NNcf41wTK29NagSnb9tKcRzlnAC643roMPBwjRu9TX8hk5FjDLYQINjWJDa/ersMxJFX09hG8bYOqv3futFE+tS1q0G6ISdVavIaI2RUSWQYbKszibCUOyXC1+eCPt0gO14+t+V0Y+hDLt8y+t2TYTdUQ+TJJXC3CtPVhv5eZObh/cRbqXd/NACDebkyLSfM1hTCxQuOluU+VmbPIgZulKhn17Du4WbqGdFrVfwV0aquUMRJat6vjZ1Zze/Ni+luscFYklYu3uOk3COQuJU0kXznJA9WLA6p7XEJOxe8itnUNTVXjbutfmd+WAqq1tgml1k/tcuN/Hr1/+SpfCoQXzCaop2UuGpalMbRzItJDoUsvuC5v8PAAD//zEqUrk=" } diff --git a/metricbeat/module/nats/fields.go b/metricbeat/module/nats/fields.go index 215ffc0c5d2..39228af4d46 100644 --- a/metricbeat/module/nats/fields.go +++ b/metricbeat/module/nats/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetNats returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/nats. +// This is the base64 encoded gzipped contents of module/nats. func AssetNats() string { return "eJzUmM+S2zYMxu9+CkxO7SH7AD50ptNecugeuul5Q1OwzIlIKADojfP0HUqWLdGSLTdKtqujKeP76QP4B3wPn/GwhmBUVgDqtMI1vHs0Ku9WAAWKZVero7CG31YA0LwJf1ERK1wBMFZoBNdQmhXA1mFVyLp57z0E4/EUOT16qNObTLE+/jISPz2f0p8+gaWgxgUBUaNO1FkB3RmFF2QERlPAlsnD41miT9CnEOQ98oMrTiMdzmc8vBD3f5+ASs/HHR5DwYc/p0TUebyQKYziPI2nNn6KArQFj8rOgmU06e0LUUshoE1DciHat/qG6h+d101+z0FTho1iceToawxz3z15BvqsSmqqwUhH6oJiiZyNXeHtshGi3yAno2xkxqDVAYxVt0ewlcOgcuEYU1Rc0Kwm3tvxibF0oshY5E6c6liNLujP3vCi7sQ6m2FnxopCmQ1sib3RNRSRhxNotnU1sqMC9Dz3nUCs4RdB++sooUf/sDkMi2wW5NifZhAeSz/pEh8giimbtePx949PUDNZFBkFtcQTkAsUWkWls6ZqRRr7+jzAMQhk+Thx1XGUSqypsHjeVmR0wsIa2WLIR+8w0dZx4KBcdbCZqs/jizBcS/ldVjYq1aHdBbC4WNr6RIyepkpv4eVjRKmjcPlEG19EZsobTzFoknfBknehTBuqyUtgZNGAwbSUlNfcGriWphmAVyAnFTuksQl/k+faavF9vJcRO1CK+ZRaKKEUtaT/e0JPkG8koSfe6YRKRS9p3ZLokX/MqpUk4CTRO6BRaLaCUa6dal5P31VpKV537Djtie0+fme1MX55zo9GtxFnYHaojF8iivb7nbFimcDso0Z2o+O3QGfCZsBQOFF2m9g0DBTAU3BKnArwn78/PE3EuPYhkJ/Xv02+dmNu3flZcFHFO6fpnHLE6H/btMvD9uz12RuKe9H3hl+fPEHcCy5xI69P3lDci85E+T7788kTxBR43+NOZcFecRD27fTUxxuHKVP6x2JBvthBltlu29hANbadroALmaGVE51sGvbz+9U7O4YU+79xeaN294O41HkUMK1G6um3FEORDmNgBnzjLaqxO3wQ9238JmKRTktipSCojWOpgW5Er+DsnD7z8KYRfkrznES7ZIFhhA2mlYPTxMU9Hi9qb+FvTaCoD958XdZTb746Hz208dvTXwGbw1wgs88L7XipS3FT5WbP6X72yKbEKZ5/AwAA//9SJQAz" } diff --git a/metricbeat/module/nginx/fields.go b/metricbeat/module/nginx/fields.go index 3cd6e0856ee..e848db4b940 100644 --- a/metricbeat/module/nginx/fields.go +++ b/metricbeat/module/nginx/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetNginx returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/nginx. +// This is the base64 encoded gzipped contents of module/nginx. func AssetNginx() string { return "eJzElM9u2zAMxu9+ig8+r3kAH/YIOw3YYRhSVaJtIYrkkXTSvP0gR26cwC1SoMF0skWJ349/xCfs6NQgdj6+VoB6DdSg/pH/6wpwJJb9oD7FBt8rAJhsEOIDMUSNjoI9KXsrsCkEskoOLac9DoZ9yubkxkCyqQDpE+vWptj6rkFrglAFMAUyQg06k8+Qqo+dNPhdi4T6G+pedaj/VEDrKThpJpInRLOnC31eehqyG07jUHZWQsjrebr1DJuiGh8F2tNbHNobxZGYIJbNMMczXdkUF0uSJY3o+HJOy5tpDesDtAnv4uZzjPlI7F63OWPb7GR79rI9F2Gu2GA62iwUryswr9sYl3H2STR/XRnnSHd0OiZ2N7YP4r101ux3s6pqrPrDumZIsfuc4M+eYEdmioo47l+IkdoiARt83rcpRrLZg8BHG0bnY4dfxucWXVrf47U0qHwhsCY14Qo3K5BbAV5H6k10gW5r85VIReFuIsdpGB5KVBTuJmL6O5I8tG4FZVZa5yjd+dB+vwuEyeTGfyzI4qkd+zzZzhPByyw/TbcCip6MI17HPfL0Pv8XbpEvuDKkKIQXY3fQNG2ec/4Ou3k8u3dhdcIVbbSJYeZMb6p/AQAA//9pr1BQ" } diff --git a/metricbeat/module/php_fpm/fields.go b/metricbeat/module/php_fpm/fields.go index 7293bc9e2e2..1150b149093 100644 --- a/metricbeat/module/php_fpm/fields.go +++ b/metricbeat/module/php_fpm/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetPhpFpm returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/php_fpm. +// This is the base64 encoded gzipped contents of module/php_fpm. func AssetPhpFpm() string { return "eJzMWc9v67gRvuevGOTyHNQRdq8pUKDY7vblsFsjL+9UFPKYGltEKFJLjux4//piKEqWZTlxGreoDg+IaA6/+fjNL717eKH9A9Rlna/r6gaANRt6gNvF10X+y+LX2xuAgoLyumbt7AP85QYAYPF1cf/L4lcI5LfkITByE6Ai9loFUM4YUkwFrL2ruh9nNwChdJ5z5exabx5gjSbQDYAnQxjoATYovyFmbTfhAf55G4K5ncNtyVzf/usGYK3JFOEhYrgHixUNscvD+1oMedfU6c0EfHmWad8SlLOM2gbgknofuESGHXkCt5LVkTPJ5d5ajRsChcZk6dUQ6RFa50z/cgruG5Bb2M6ZD2KW33S4a+8UhRBxZAPLY7xDzPLv0UKH+4X2O+eL0dob6OV5LilaBLeOyI6R/J/xdGRzzNmxbi9hMlnNK7S4IX89Ur8xslZzKPYWK63AeXC2oAptkU0iUc5aUmIuTKIYU30Bhp96kzE6CEJNSq+1in/qwFqFbLRriqwDSFSKaqYxFx1G4+zmZOkdkEl/TbUiLwrUVrlK2w14+r2hwEkdQymkJFdi6AH9ecLuriQLOCAW9GEDaJY/PVVuO5Sb0YHJwu8NNTTmpmMhLl6fA9V4T5YHXAw00bJQ4pZgRWRBW80amYo5rBoG63jC6p64dziDRwlvHWCLpiFx3jp7/wd5J1zwvtaSLvdQEXbHoTETVoUm3KI2uDLUXUYKJAqAvnfF7GHVhP0c0BayzVNctW7C6sBAb5tdaz4eaemVO1Fk8IQ66AmmJdFUWYWvuSq1KTzZJcxq77a6oIihA6zQQom2MASa76QQNqaAksw4yOR5Iapb8tLlGLfL4Fle1N7V5HkPa2eM24WDlNaoOPI4YbCTciu3AFuNgBCceiGG2fNPC0kYa20IVhiouOsobAJoW5LXPM4S8gR3yOKqRI+KybdxLq9b8ych38laSGvx5FHhVxd4ha+6aqqBwPsY1zaCjufKQk22mL7eYUwEbRWB8Ci5IDB6EfoZ94au5Ybs1d0L+o+e/XSTY3+G6KfLQB8G1yoCi9RfXKkC6MJcXxgHQbTB2ethuUMtvadkgsTMMnky0xllZ/LeIf2IPdmPsSG8SzHbZsAU8ms0BlbEO0mrXJ56l1KKtnmo0VOeMC5jSHbZZrQUTwhtCZq22fV9qfOQfLxMzcLynIRRsd7+L+jvSlECeWAQ7tvGTdsYyQL6x+WE2VlwQFuy4KQErxspLCKddMI8WvEUGsOw0/EC5C7BExaw/GF5d44CdoynRemK7YeA/FNi+hCNb+XM/9KtnGbLMaiP5b9hTcw9oSqv3MP81gNlXVFo77iTudGV5ggzti/p/Pm5tq2ugL2mEFsA8QsqJ31Fwg+znfMvAZw1UnjHfbs8dQVfUjx9iZH6peu/v9xNp95g3C7vKtJk+p3g5ZLZ6pgXwK7sAb2SamJnKiuRHXpVRMXEzSzTplxgGrfJZYtreHnGGSEtjwK5kiu/DbKEcra4SH7HcATyJJoCebxwAbGyq+2LhL2zOEal9TNDbKfmz06x/8nQ2ruhj8XROoFG42g4Ri57p7PxrkpvPLYush/1em9SL8QvHv/Wfyw4IXVw5aNLPT9Ov3tiW/OPz4TZY2FoDk+Ntdpu5kCs7qaBTGnvjPLehXKsuiGegfrOwjiNyBaHtkzjDxDvQrFnQvJyUJPp7iqIBuP7CIx0AG9gyYumVea1MHX2pKmstPKuI2v2I1TamLSEicS7foJKTVCXrWfVHtzOQkHrOH07O6W2zouKuHQfidSSuc66+XZi96citvOhtQuzv//8PIfFP749t1EDs2mf33Kw8foD3jXeZM7rjbaj9u0qbn1/eoSd5rIbIv0eAntpWz/gmCR1siyz4YbLj99cqJ0NlK1csc9Wex6NcJ/yM0GDFlon0F6YsQ2K/suVfug2mzAKp3euMZDPTj4/f8o3MQmzxddF/tfvz1/z799+fhIPPNyDXsehIBDfwWzt/KVOtSdeq/ZUqG0yGf9HgYpPwjMYuGs0c1U3E0BP2rMLUL4CVq6xLPKoqHJ+337RxHDIYcrZ0FTiQfv9E80O9wF+EF+GiTp+HWRJi49xaovld0UKm0CdcYVGNQa7L6uFs9QPvP2BgwlSkj+Tr7RFpuJdYtpTrlUErhVB/w4AAP//RurCFg==" } diff --git a/metricbeat/module/postgresql/fields.go b/metricbeat/module/postgresql/fields.go index 065acbda723..75826021f79 100644 --- a/metricbeat/module/postgresql/fields.go +++ b/metricbeat/module/postgresql/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetPostgresql returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/postgresql. +// This is the base64 encoded gzipped contents of module/postgresql. func AssetPostgresql() string { return "eJzUWkuP2zgSvvevKMwlycIRsNc+LLDIHDbAJpNBsmejTJUswhSpkJTd3l+/KFIvS/Kzpd6MT922+NXHqmK9qI+wo+MzlMb5rSX3Uz0BeOkVPcNv3+KX3//8929PACk5YWXppdHP8I8nAIAv5K0UDoRRioSnFDJrCujWgSO7J+uSJwCXG+vXwuhMbp8hQ+XoCcCSInT0DFt8AsgkqdQ9B/CPoLGgATX++GPJz1tTlfU3E9T40+NRRKZJ/VtfTl8WCi/30h/bH6akXZDInz80QWpEVZD2UJKtdQClNYKcW7EiDlJvQerM2AIZg9WArD9vwOcEorKWtD/BbbiBycDn6HuAlcgBHTiPngB12qyHnxXZYwKfWvtsjieY4XfmUm7XvHrdCEl6j52aqPkMVdhXY4oeN+goMTI9eaBRpzJ6O/jhgkaDVj//HjdOLTr4XDrYoNiRTkGyG2odt+lNcpkY/zvJbEfHg7FD1lfIfcWCZmBXzqatb9E1oFFax2RacuXIJkvYioFBme2WUpA6ePdNXCbsc4cNHpCKZamkCIdx/TrhPaR4Tge2v4GMUJK0TzBNLTl3H5XP36Be1xCKaA9yyI3z9+vjX8b5gNNyaIVH3BXHK0ulsTEqAYIlzhQEv3/9DsqYXVXy4vj4mrd0kScjzeS+Pz59A4YDXRUbstGIPUVKB5XjoJkZC8IURaUbex+kz4NuR6C1rldgLHz8O8gMEP6j5Qs4I3ZUg9IZW9SLOUSf2WWK/k4T/ZAFwSEnHX2hTiZwiHmEzbICmVCyah6adKQRLD8XE970VrxF7TjLGP0G23nXZsKe3P4ep0mGtLgYvTa9q2NM6hQFnureWHaSmNKlA22GVOqKgHoGUuj8GGt6jwF5LXLU2+lM+NpNRurMI9CKks6QOaD0cnROI4+NMYpQ30nFVsT6GyXhVvO1SDAaEJQRuwtquk/2p9rlzJ4sKlUrYpiHuTp1cqMI9qgqcoCWumpqBArwt9rez/Ajp/6e6IVEFfaCdcE3uVqmary20QKHMgRNh+6QFwUOc0MfCqTuH6oRsmS99h5Ywaby5zyZP51p7tjQgAW8x01IKR+Yj3Td+XGykAot57563SSJE8L0Iqj0YHSbxgIcF/Zhf/xNX7hADuGTuKiBrDV2eicZOl+izyGrdAOl1EVD85KPJ2umoVPpcKMoHeqjzb18SCyKXVP6S3L8e7Mu7vNasRCsdOcJpRc/PBTvHBRcOVgSve7lcy8M1vEyLAodyAiXuys3iLKd4hpIDXwyjc+5P2NwtwLpu8Uj2F5oDfUAx7UI249po4Zysz1Y6cm+pqH87tFL57nRxo2pYi3CGmMMnUKU0KY813aK/cYvVizDnTVtX0NzfNzub/5ETmJXGqm9S5zIKa3U6FA8Wpp9jRWZyaBF7suLpslxT7Ah0tx9c4t9Luv0mVr6WZHzCzBtkWdi6mVBLgn2SophoRnpZsrgMMheO47GowIsTKXjseRcXpN0kaMrQ/Ub4x5HDw5UXKK35EaotU+y6x1ysgSZVDHVBa/1XCoYjjW7FQMXUinpSBidulsV4Y5a/JX1wPxza7T8b+zM7lDGpsoysi7pKWV2761ltOZKK8u76NvhMreJ2m1+VpvjdFC8gds6q5SanWDwzTOB2nlTlpQCQiDA6nQCNWwoFBAgx/6TY9o7MAYK1Md2Gxf3WGfW5f1CWhKca0Mvf7FYGFBbZ3wEFrJASyWo0JsmZ4P0DsxBQxAeyi14r40tUKlhIQNn7JijTlWwsnEEtCfdNT+N1NRwORVljWADzoeLOkKljMAl0lJjwFbC+f7HrS05mrUV5goqlyJnZblYV9clzoHDY6i2gtBkVFI1A9bXlFR/aAJrDmE83+B1g/nmm48Hmfa5nQ7S2+H5ZEU1wfL+UupXG5//klPzOKVbm2xdL1sgCdbAvZZiPE1t7xauzb1cws31KMrPEPJ6Mtq+rafZk4ozcjh77k/4WqMU6+D/y5hZ8MnDc7OajTJix6U8zh8uuSyrBQALGLG9SClfwtohwfV5hdCZhRSFikkemyo1BnsQKHKOc1NjW/RhBs+FBoYhDWjimhXtEd6HnRqtGFCoKiUHueyGIN1F6wj4VDLD8gJTksXQkLuj81S8c6Eirv+LT3+4qFHefbD0udr/gbk/J6ZY3DNwkwkis1rFm2MXDIYesJoaKt1Qxfc2dLGre9WOGPmtdmTNgU+hr6xeop82Bz6CEb3Jw2FqddOJDOQy8iJfilsN/iA1qR3ZReYQzK1Bf5BcVaaLFKOBWw3+ILWUFC1GrQa/n5owOlNSLNCYNzwEakGcF9OKuBZpJcbLSEvC7MkeG74jxCtlCxWlsWiPSZhYzJ/EGvx6IiIsXfUB+OeoYYcREFoCYSod7tIsbdFyrxbuxA95nBacLuG0N0Jt6LynZJtw4rTxzoibP5dLvf2wCu/bnAoIV3Fmu2YB6ym9ATjyjHRN6Zujn03pw8lWyAS9KdxQg2MTnPcdNskDJrhixVCJ1CZYQs8pYRqS4PzVYoMMKfnYJtwWL36ddnvUd7d3X69tvE9ejotXKvxXeGlnuiGfeFPuBFXqvanfwGhejgu43atxoqygcriNr8f5cBRCxXXHu3Hd5d/rLkje8mWrbhIVtGIxFuwTt6pv/u5e/60HW2kY3mCe3C3Op6+OS2vPS3I9vdx5g/BnvCQcrjuFFajUAtm0nTpH1bZXFra6qFyucWbNMvpytX7G9WqN87EMJ3T2a52OVr+JaXq+SuvuivgawULqGel9kVoWVTErQXyZkyC+zE6Q8KwK7/e7L4R6TnbOpynt5+P3zZSViinKedQp2hRS2ssua/WmD32mN94FRuoFFcYeE5ejpXTGqdPw+EQBcYQQpzVxHlTfw11V8SnPGQd2NxANM67HiKbSejlbj3kD11rgg3Tr4v7t6A6uY2+lq4xAtaC3BvxXO2tkuaCvjmk+4qqR5rKeOmb6oKNGssv66Zjsg27KbeaS9mf8V5s/kFxWoSOe0/r8XwAAAP//xMZFkg==" } diff --git a/metricbeat/module/prometheus/fields.go b/metricbeat/module/prometheus/fields.go index d8b0eb230f4..e93b578c7b7 100644 --- a/metricbeat/module/prometheus/fields.go +++ b/metricbeat/module/prometheus/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetPrometheus returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/prometheus. +// This is the base64 encoded gzipped contents of module/prometheus. func AssetPrometheus() string { return "eJzMkkGO2zAMRfc+xYe7G2TmAF70BAXaosuiCBT7O1ZHllSSniC3LxzHGU0yQJF2Uy75RfLxi4945rFBljTSBk5aAeYtsEH95ZKsK6CjtuKz+RQbfKwA4Js5U2grLrNDL2mEw2sVGLucfLSnCtAhiW3bFHu/b9C7oKwAYaBTNti7+Q3NfNxrg++1aqg3qAezXP+ogN4zdNqc5j4iupFX1HPYMc+9JE35nCnL5viAz9JR4BV+zEnMRcNA4QbB7RgUBx8CRmftgN6L2gY2EEI1OCG6NO0CL/1WlKX46eEirDBp95OtFeklsV3UZx4PSbpCfsfmNQpnR5r49jz1BmZR76e52u2Nuh1dzj7uz0/rh/ovoW9of02U4//G+uLCdPr1Kdh627P89VPB//Z639nqZqfyNP8Ac2qwfiVLHy5jdzRX5K9vfUURjsm4PYg3/gvR0genPivYqzNn45TyQrmD9ncAAAD//1baTA8=" } diff --git a/metricbeat/module/prometheus/remote_write/_meta/docs.asciidoc b/metricbeat/module/prometheus/remote_write/_meta/docs.asciidoc index 2137a8fd0d0..2f46dc6e47c 100644 --- a/metricbeat/module/prometheus/remote_write/_meta/docs.asciidoc +++ b/metricbeat/module/prometheus/remote_write/_meta/docs.asciidoc @@ -8,10 +8,10 @@ remote_write: ------------------------------------------------------------------------------ -TIP: In order to assure the health of the whole queue, the following two configuration +TIP: In order to assure the health of the whole queue, the following configuration https://prometheus.io/docs/practices/remote_write/#parameters[parameters] should be considered: -- `max_shards`: Sets the maximum number of paralelism with which Prometheus will try to send samples to Metricbeat. +- `max_shards`: Sets the maximum number of parallelism with which Prometheus will try to send samples to Metricbeat. It is recommended that this setting should be equal to the number of cores of the machine where Metricbeat runs. Metricbeat can handle connections in parallel and hence setting `max_shards` to the number of parallelism that Metricbeat can actually achieve is the optimal queue configuration. diff --git a/metricbeat/module/rabbitmq/fields.go b/metricbeat/module/rabbitmq/fields.go index e737f5f5808..07ea6fba3fe 100644 --- a/metricbeat/module/rabbitmq/fields.go +++ b/metricbeat/module/rabbitmq/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetRabbitmq returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/rabbitmq. +// This is the base64 encoded gzipped contents of module/rabbitmq. func AssetRabbitmq() string { return "eJzsWk9vG7sRv/tTDHxJAjiL9OpDgVe/9jUHB3lNXnsoCmNEjnZZcckNhytZLfrdiyFX/3ctyV4lr33RwYCl3Znfb2Y4f0i+hRktbyHgZGJi/eUKIJpo6Rau/5K+uv/5+gpAE6tgmmi8u4XfXwEArH6G2uvW0hVAIEvIdAslXgEwxWhcybfw92tme30D11WMzfU/rgCmhqzm2yTnLTisaQeBfOKyEUnBt033TQ+GXUnb0uaV57j+diVuRsuFD3rr+16h+fNXE2KLFkRSkgoLEytw3r394dPd+/egKgyoIgUGYoUNaUAG4+CuOMCjvHOkRM0BqG2ORyD1Stk1++qzb5htMPJ354dh+xwBJJ/PFWXz+CnEirZAPsdgTzlxAxStQd77pcFYbcKo6Hu5NmXAzCOGdt8GR3iOEg/b9FqmcCY7eaXocd+LiP3CFBKkfpjO6/5oOcEJ8u7oeD94TU/g5YhxxPC+24RzktyvVFXoHNl9c2S91rvyGWuqrScUZFWthIN3eyvsSTAPNT6OiKfGR1O3dR8utNYvSJ+KbxqwphHR3XfIGgq1YTYTS8DmXykjYdYGr42DyTISv4HowVHpo8HYrWFlDbnIxZ7kqQ81xtv8Xi8TQTxiJl02PVm034aDqfFZmj9RmFNIQnMqn0Q0jjTMDUKgOQUm+PHDpxvwAUxkeP8RUOtAzGCm20/AFI2VUAiwQAZtGCeWdD+JhigU4zL5SBfh4XwEck9R8aGfxfnh3DlDJD5hthEVJps9oQ7VjOKD8q2LBZMbS+2HdSLJGhhE+KlJZAdVIEVmTvuxMh6ylYJnoWvIaXOAYTxwnfxTsXkVL+vOpOA8b25jupgzO1zn+jJXh4cm+LnRpPv6mRckrNR9cUPKTA3p7e55t8VZoaFHKbslvWSO6JHxa5sivrTUvrjD/j8fIHQbpCL1Epx4bwndeQj/VlGsZK2EVPA2fuA2zM2cZE2n2hSII4b9jmmFC9voHzRZGujDX4RtOzishQlB1qST2hqjUWjtEhYVOXA+JQwKMjkNVG7jIgWH9jJQV0sNDK813YApqACFTqwsDEwgFe0SmnZiDVekpUmdLAG75PM/MEAuKg8qEIontolvoPdyqIkZS+Kio/5gXJHKwEjZH+5EmGSVlaYtI18bdy2WRreG2/kmhT/OpKqiUisRwbfRuLLfG31MNEU0loswNJJOrcd9qscYwZ/9AupWVbsB1ql9axxkwBUy5N+0jEbApLzTkk/kvVqykJRCF4GxbqxwTRE6R3siQ9/Gr+gs38brNNIdeOulnhIe38JVvo1j+mpgx+bs9mDv/fNbA214VkwDUXE4Oj8/On40PAORCtygIlhN9OeP7Rt81tQmjoryozcuAkZYVKbzu6gDtBjqXLNKD346PR/2VBfRx4FKdT7SPxlL62d8YMA5GisdxcC2jS6kio6k/RcmDdM9CP2KS1W4th4102yGgp/uwDeUi9+w/kDKoqlJjxorP91lb8NG/NlBYXwhVnyo0GlLhW/IPWCMVDexwHlZ1GOBTeGStYjFHOC8hGj2WolTYI3pyENUWcmgsQKhvpxhRPpRwyQIo2ZGjJhVr3Lic8IowRrTNxtYmxU2rFy8dwn1KSqOA2Ci2bhxkfSL2KMBkXSPTj2pPoH40qlLEF86dZy46B6fuKg+TnwRTKQLME9yj1LP2kfPAln5S9JABja6UzKwI16pqf46LVlNtQ/LMZoygZw2F8ZEfJ/hidyBNnet3hEbLFJHGx8v1CPdJyUQAzrGtDnJnS0rnBNMiJxMLWKsNP2jTCtfWhNIZ7+zTNkJ41MkAta/Kg7a6LQR0HE5jQqXBUcf6GH0crrhsp6O9wmkYjsNvu6CPD0GCc8xuOMv+xPwitJITkx6IuAX7DofAlxdJOjV1ASvRp237g8O0P8YLLoyaSLmoSWegIw4eX0Y1C+5puUBy3dP+TBWktvgUD4Qg6ZIKm3kOg1tOjeGybID2Y8pbQQXxml6LP7p2+DQXjSQAykfNO/Hbd6PTjCgg3Ec7wXzwwrmbjrYQnkc3be1Yj++0LoiPTQSoh/mFCTdDC9IWKCJxpUCUbT3wmKvZhS/0taMdCSyRgG5UzyQNTpUI+aNA1CpPTmGY9wrMilly/sDJyLNQcv9goAVXVni4R7rfhyevcm6L+D7Aez3A9jf5gEsPSrbsplfFKvhjR54nU6NKmTwC0fhoTH6zffLrwNJIYncyQoFfJCxWJz8KrTOGVe+uoFJG6HGpQTE9b956ZRx5Q3cc5kO9f5zDWa66w95pgreGTauLOBn+X41ImAgsF6l02TvQBwXKSR3cB7O5AnVhkAu2iVov3DreOQq/xcrwAS/ZcH/Sh56NbBiQtnW6Q5ojY8PTTA+mLi8WOO/UgCW5mQ5FfWNbaIHbpvhC3nKO25rCnyh/mwj/4j6NhprOMVy0aiTgaw2VRoK6vD+2bEuJOTpfRWRUp7h9YTiQgbKd8W7ND78rnj3JsfJTsilDip6MHVN2mAkCR2yRlLqekiNfosifK4Mg0IngWWJU/Q5kS/xvH4uB6ypjQTsZAmO4sKHmTxQEme8AZpAU4qqyoe+Rw6nUz85qoc/tXVuv1Ev85DlUM2cX1jSJemNBV5ng2lqYjWQGPdgXuL0fOfsfAvRBS82JNNcehMk2z/6rnRK8OULP73X0AcwXtzg6uAqRsZ9OePvRuOlvdBn+VTDpPFaUoQdMGch/3q+GVrAF7z6Q4ENR3LjHuN+liyyXSDXajakOvQ5E7xOxR7tApcsC+ldrqEBHYsr81M8mLxqH5bn7dgPnwQcJfeHdMafgi9t63dVIxUKYbS76wDI7JVJrU/qYdakb8A4Zdt015sjqtkNVIRNSuWru4XAMbQqtmFoNzFtXctSHrd72HeglGXeDsjDLep0LSaZwPBqzDFOEZgIaboZWneJQt6M/zYctjauTiLx3wAAAP//Fn8fpw==" } diff --git a/metricbeat/module/redis/_meta/config.epr.yml b/metricbeat/module/redis/_meta/config.epr.yml new file mode 100644 index 00000000000..e994b333fe8 --- /dev/null +++ b/metricbeat/module/redis/_meta/config.epr.yml @@ -0,0 +1,29 @@ +- module: redis + metricsets: + - info + - key + - keyspace + period: 10s + + # Redis hosts + hosts: ["127.0.0.1:6379"] + + # Redis AUTH password. Empty by default. + password: "" + + # The duration to remain idle before closing connections + idle_timeout: 20s + + # Network type to be used for redis connection. Default: tcp + network: tcp + + # Max number of concurrent connections. Default: 10 + maxconn: 10 + +- module: redis + metricsets: + - key + key: + patterns: + - pattern: '*' + limit: 20 diff --git a/metricbeat/module/redis/fields.go b/metricbeat/module/redis/fields.go index 5ec77a24318..ccdb515cba3 100644 --- a/metricbeat/module/redis/fields.go +++ b/metricbeat/module/redis/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetRedis returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/redis. +// This is the base64 encoded gzipped contents of module/redis. func AssetRedis() string { return "eJzkXF9v27YWf++nOMh9WAYk6u7D9hAMA5q13S3WtUXS4mJPCiUd2ZwpUiMpp+6nvyAp2bJMSvIfORmuH1rElnh+5/D8JQ95DQtc3YDEjKoXAJpqhjdwcWf+vngBkKFKJS01FfwGfnkBAGB/gwK1pKmCVDCGqcYMcikK92P0AkAiQ6LwBmbkBUBOkWXqxr5/DZwUuKFpPnpVmkelqMr6Gw9h83mwbz1AKrgmlCvQcwTKcyELYp4FwjNQmmiqtIG3Dcp82lDacMwg6y99iHpQWWRmgPHAJOpKcswgWdlHH959ePvRvF4UhGdRa+htSTafLhttVlJGkWu19VuIowGuNhPuBrUsqKjzjA/MFiDBuVWSnScaWEzwmefHAWTm86EqEpQg8gZhTYwKruASv6asyiifbX1ttUIxskT1fZeXDWqDCZWORaXLSseMKr0//lJiSjRmN3DxU/Rj9MPFYVy+d1jAYQGDBUghDF+VlJZtD/cSS0ZSp2QF+dpwklR5jrKH851nJ5i3QzgKI07ozM4V5TXoJ5upW4cELBJw4jtgqtaMDM9U+9EJJupAhgR3Hh5+jH7oYSBhIl2cxTEoKJFbV2C8sSNsHQNhDC5v33/6+OkKbu82/73/9OX+Py3oAV9bKb0j96N9rR20HT/2dbnIScJ65JoIwZDww0T7jmfU2IqJckTb+NUBrhoAQ+Irq9OK7tdPX1yM2lNelcIsUqvuaxtEKiUMszhngvjCwAip3a+UxsIiTAVXVbGJ/g67QrlEGbaVBmOczinLJPpm71RgvyiUx0KtVI9DmlSeCUkXRoV4BqUUKSqFPcFjDfZJBduP2Ws9BRZCrk5rQG7Mw/I8K8glYRXu68+dn7uBZKXRZ4MjBPtZaMKAr72+HQoIY8KGdSPmrUIgAF+qsA+YDvyHDmzrV92EbDggxu6QNwojSpREmxCmnCVckmgREZCoaGZzddSg6DfsCb+W5RLJ4gl4/oRk0ahb2xjGzBKryBMg/qIwaxDXk/C+IoB8RjlGzVP9yDOiicK9K4lTGMgcrToA5bWWidzyUEMCv7OB7XTzyez7Dyd3RguqexPiqBSMpl23uIG4wNWjkL7UaASKN0tqM1xwREALM63wOEfeaIZFaFIgiSSdB1OgNupcklmBXLtUz1i1CMI/IvbcmYEhQf1o3IhRx9hhjqVSdoWi9d1IsKEZnVodbq0G78tLkCmSarrEOEPDXURVLCvOqRf8CRLot4zMgLos2jhwmtcAwAFYi9eokftlBAsuUIhwznWU5tf2t6YyQq3Xz8a+lBx6kxXoyTc8NLylDgzpIYzQRRgnHvN5tQ7VAROCHY17Dqid5hVY4BDmJrV4BqjvmixnhKjHuVcYdLF7wHu7ZcVhkvs5VTizkBsubGIWGm2tHEqdR7brqR8Wq8H0XIS5hr09lreyK1EqqjTytOshTrU+sm9txwTJzhkPTWJqaJoklUBWFSXklKEJiIJfz4Qfy7/gs3gtoBBLhIca8oNJ0po/onpd6sGmCCTLQOg5yoY9JxtI7FS5KuxSaSI1aFrgFWhbZNoJvLLvNJZxBVEUfT8cEmWW7B0Gx9RSUixphmp7yykRlYa717c96gQjwywjSseKLDFK54TPUMWK+keDMWY10mRaa7iOKliqTjmI0lYvRuI2Ezgx3DelSOfXCTFloiGnNClKg95iVVWaolJ5xeycGFBhfWnzkMwsA5THpRQzid4VChhhiXuw0rVIssY8YIE7sN0MaKKrftjh5HQP2PeWTlPYWrGvcdfrJcIvGR9qM4eRwvTY4JFVPYRH8/a6HmWAO1PeK0wFz/rDdc1ovYfzzHltFM7Lbw6E+xPAXgGkolzFgsePkupGTem355CRe1dqDNxrwa8t3KbssXttWSWNaDY6cfu6K6OQXMKlmwhvoU4Tp159fOvi1DFhKrztBdP6R4OeidnMZi91zb5VlQ7UVU4Jn9jFGyZqKG37GunvGyZUOsesepJZIDzAwyNlDBKENTYQTR6x61qoglQUJUONuwuNPo7/KcEiML/j4kXD7D8sYAR4DseMLr+u4yF6JqHh3oSFmsc2a532j4HIv6W4T5ib+SZnHA/PA39XrbRoJmYcF/9HKchI1XzWxhbssRrgyrDT+M1nwJZhpWndsmsqdqL348YUuc+ElQ0LggMjGpVtU5W6KkHIxr+M079crXga1Y1iZ1vcsFTX7Wl/iURZs9s0hLx7+RH+rtC779oFnyEjq8O3Q8bGW0elhp6KiutQ+NkE1JLR1Bfrj1jTDA05tJwpBQtvYh+1T3Yn2NppUK40MYnmZUq4yT8vCqI0yosro5kXtgX5ItRiCL4O6ti1LQehn6RfsiEGHWJBeI6rWOT5Aa0N7dbbn6IfD0Nv45VtSvtOrZ1bS+MggC3IkrE+JmZRcJvuWHl3a5c22Jq4Z/c3BDMQMc/TdOXtKvHxEwigXWZyKpWOzWgHq9QohXFqW6vGMO7Ny4MczKnSrKeP8HDc96EOHvPlSLkP2HE0qdAPsdIOPleuTqsbtVJUpUnqH+c0nW8BffdaAZEIJE2x9HUfdCAzyhfhSuUEYadTnVC+gMuqfJmJR/79IDhTeFAR18sAMZmF248GfElPNb9XDKqhdPd7qEkzSFqv6eh5zcGgwpjEqH9x7QS7mOu+eIu3di5UgaFuqyNXHdqgOgoxw9y5wefQkmrAQIK5kLjmqLVSNo6h565oVsm0JFwZf2/S7rqyJXD/54dfe/aQgvzb6Z7Wpe560MYPWOLrNHQAYympkFSHOyePQ9kMv5McEwUEUsIzmhnjyYWEnFAmlj2G7RBTFUskmeAsDHqKxoRarLbFM7veIu8tfVzMO1XVc29H655YhRFVzxKl8tuMA0MYJT53URI9d1zQFKPwKAWdOfO4AS291WqbteF8ZEZ1rObk30eGzHGEMip7dP9UlJKKsiym4ZNZpyJUiOzYCneYiAhHpn5dEirKK8bOoENEpvM4oT3tpyeTeMU0LRl+pXwWk5JOr7VpGg+Z9Klo1WeS+jS3f8brAaLSO8KpZ11W/BxGptMyLoU8KrAPU6nKQPPSCWnMv007PpNVnDKR7n3SaD8yqeA5ncU5PXp5byCiexraj2zNPOZqBXs6XmKKdHncYWp/9tY5Wtc+6N0QDR/J2Yb419EXQRwA0REdtc5q799QcXME8xxIHcn1qc8xODnqyB7IP+z8zViQqB+FXNRH/5tFp/BMG1TuPoezwKqvjtjFFQToSg5NOIpKRaJUcYky9jcxnHI9vTvDUKKsK8+RWN0lD4uknPKEuqnSauF+52obkKYi26A1wv799qVPYgEZu2tEzgrcbU8PIw8XlyuehtLTU1TCG+M3RECiIajcglZgf2cLWkmkpoRF4qhgOgpgs74JNc0aLEj8u0LlSaq9QFFOcTPJNtIMOR2BMwh4gSsV4deSykmuI+m6/QWuwFJzyza49FzZ1AW3pJNfolTTsAQhqxC0gIJ8bR9t3RmhF3VJUozmfeXXKWC3euuZEIuqrEWsml2RglAOmTu7S3oOt64hF1SpifdZc0IZZnsCDhdoVaKqxB7P4MimQP4bE8mWDpdV8lJVCTQ0nQdrbgqrkvWIYcWuUZdEa5Se5yZEXdMcATpc1NhukzgXchFX06QRu42ctsEltwlQu4ezoKkU3UbO8FqFrbcxTu0R9ViJdIGT2Oi2k67pGOTcLvT+8e63u1ef30BZyVKoMb0GNkDGzlGrWEuSLjCLjelMjt7aZ03RorcoVmvwcElKuxKfMATBmb0ewGQj9ov6wrlhDrfPok/uO+2FDibba/XzlShzIesLOeqj6XajuXM8vc5oR7JyBp9KEiFN/PIxZZvCNhfuBE7c78fSAlfx5DPk9G5ONDyibICzVQt6zwb0Lt4zTEMHsVrQsjxI8hurF49MzCLbaeZdd/HAHoD8qxnLeiUmHjfO1OODWtnBiy7dfe4IXeCqdUXo7hkYm+a1Xtn/7k/zr1c8/kWvAQn9jis74sA9VDsLrUeQ/MLp3xUCdQ5Wz6myqfLlf02UNspjZAY/N3naLzc/Gwy/DN3TZxCdVi7mdXsl01w82juZHj7/+emN5wpXLx6GfKbnJ1Lk93awJkOw4mpl9AwLe/2hkSejSqurmrr9RmlJ+UxdQUpkRjlhVK/cD6jVkFRdFI607lbKh3JyX+/Ga9GM7bNBO/PHGqIdpO/CXmuRtUDtw9Pe2ntKI/q9Rgz2wgGaUwxd17KOEstZfLppfLVESWYIWrMBup687VCinehT11Hrgm6MJh8K5X8BAAD//0AQtHI=" } diff --git a/metricbeat/module/system/fields.go b/metricbeat/module/system/fields.go index 472019559a2..28135f5fa95 100644 --- a/metricbeat/module/system/fields.go +++ b/metricbeat/module/system/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetSystem returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/system. +// This is the base64 encoded gzipped contents of module/system. func AssetSystem() string { return "eJzsff+PGzey5+/5KwgfFhm/m5E93mxe3vxwgON5uRvAWQ9sB+8Bh4NMdZck7rDJDsmWrPz1BxbZ39nqbqmlkYMMFtlkRiI/VSwW6xuLN+QJdndE77SB5DtCDDMc7siLT/iLF98REoOOFEsNk+KO/K/vCCHE/ZFoQ02mSQJGsUhfE86egLx7/I1QEZMEEql2JNN0BdfErKkhVAGJJOcQGYjJUsmEmDUQmYKihomVRzH7jhC9lsrMIymWbHVHjMrgO0IUcKAa7siKfkfIkgGP9R0CuiGCJlAhw/6YXWo/q2SW+t8ESLE/X9zXvpBICkOZ0ITLiHI/Wk7fzH++Om917kgqKH4Zmn0PggqKGztOBYrlp0dAllIRSjQTKw44H5FLQkmSccPwexUO5j91puU/TSKqhLC49uucFC7FqvGHPdTYHwv9nUUlsmQBqkRV++T/II+gIhCGrkAHAWUa1CyNTBCWjiiHeL7kkjY/sJQqoeaOpG78ceA/ryH/Il0hoy05hiVAdArCECYQGNEpjaCDthoFhkVPehrWWnA0kZkwRwLz8nKJzH0CJYCPoWJCBvdyeAQ6wSK4PA5LQbjc3qSKScXMjqRKRqA16CHUnI3Th6JkMb9AniOqAcDPJ8gDAMktZeYCeSmIBUaupCAx008vh9FxTh0xDp/6/fKYrEFtWGRNM2vSramIuf2PNVXx1lpzTBhQKktN735Uv5+P9ZOh1nJpvqV1sXgPo/C51+YA5AYov7yVYYIwsZE8E4aqnVMBix36ORumTEY5fmO7Zhzwt+tdalmipWpNtqW6xi9p1qDyI1CqWesLbzeUcbrgQKTgO3t4/ibY10GMPKdevFwGFb5cmh3lykVp1vImLVXWY9bHeWfWzZtyoZxvli8Ujk5SBdpbX7gCUpuZ+7AUN8LuH87+gKabSCo7Q5Mt45ys6Qasg0q/siRLyIbyDDfNl9vXr/9G/s1N9wXHbg1WzlMbl3IFNN4RQ5+sfDDtR2XCSEKjCMXO6ZZNe9AAFgvlT+2akg+iHSLQ161hdzIjERVu0aosL4I3KwXUgLK/EI5v5BepCHylScrhmrAl+XtrWCdS9uvUkB9f/81Cu7Zy5YTLhz1mUZrNcm5+cdKzAHL7U+fi/Llc2D+Xk/jtul9/Fm/nG7Ja/7LLAxT+Zd1OY90aaS6UkdYWBE0c2XiiPsQcUHAePvyX1UJdRsk/S8tokH1iLamLZMHYMPXFEjL2oL9MQo467S+TpOFH/oXiP+Dcv0xKJj/8vykyD7UALpPIb9UMuDRuDrECrvNAiIY4Z3IZs0HnOkB7w2L43IrufSuZ6UvO6X4bWdALTCZedBLuuVMhh5+Iz4380EPur9xDlSdWTpn8rsmKMekHO0Ql/2D/kzx8KMrIBtbg5T/jcxT2n8H1fILdVqpm4sDHj++Ijunt+OVG8uyUfcIGilE+d4fnCHgDIXyv/Qx5uRv5vGaaJHRHhDRkAVY4Nix2xzjlvGR6a0wfo+8hSAGNZ5jwmHDzoKVUsTDsJFZk7ApZkdFZZCV8mXG+68G3VczAyQHiLAciRA4udmZ4Ri03BUNfOgA8DoMw6rDJB0HeM5F9dSku1pyKNOxADZGRyo+EyZ6UMy9pglCts8RyBj9FNPsD7dB/3L4ZtILPzyCLw4CYhkf5YAPZ1Bq1n20oVvbcOaHYJ4xbnyCSItb+ePNqBXfsoIV9Nohuz/Yai6cGGMYYS3sOPrz60A/Qem8zXG0Fv2egzSwBtQI9T0HNNURB7CEPswd8M1WP29xPqQnOiVly4ihxGdstKCC/Z5BBTIzEzRDDhvX6Np4sJyLnpQvnPDVhtfU660KV6JnWLfQVOg9YoPOuzLSU4Ip4AvacNhOQ8XN53ha2bwtz03YPH2m9BFHrX0xLCN2Aoiuo+jRLqRpSFlwRI60Fah0WiMfs/zOuihOxUy6LI+l869LYNBMtTL7j6WY1tzbKaUhB6+eKCcfel3aZLOqBGmAYJajDT0wHzkE4iJVZn4SIc27zaQXJhS9g3mllHS9EbgZHiBWmqrn1Eol6ePVh2vVYZHo3HTWP4ch9nClrJG7XLFrXSeg+FK8WVMRbFps1yQzj7A9qp0UmlJ96OSP37uOamky5j8goyqzj4mrmypJHTSIuNS59vYoxZwkIo2S6OyaYVIat/HXI9pjjA0Q0H3S+YGbS0F+B1g5sl6wNt4Tx/KmgEq/HeW25SQ3bQC49qZS8cNl/eP0fP7ZWeck41G6+koOihuUwrdrl8k9TlDAXRJ8ppoABQszqVPhtpHX5M5EqtmEcrJ+Buan8xJsFobtNOh8Z4BwVxKyW1N6RL69i2Lyyf739EkRk5z0BFDtGEwp8NT+EQWDAfZ5K1hHpOxgLDmw1LY7d4k0YDUrrCcMGdnwiZAzaSovdo/ibduS8AknBs0r7fqm26OZTc63CLwVwCNOQ72fimlvjCu/2cyzTcN7AsZ1wJLznP90aoPfVKRSiqO0Bc9Q55kXKjVQ5yiqHWJ4Jo6uVghUtUmGUc6dyGpdbyq8efXvn0GTIP+vqx6MhS5k1PePa9jliW38OqL0OeXNTBZy4fVIfXtk24aBxfUqqSSwj3YoGBLhO9mvgvazoQ9/CGfIeiGciasDGHmgCtJvl2QDiTu0BGFLH50Po1N4VAk15ppGnL9suD5c0PkZ9WBfPjpH7sEdu+Be3L8YqYfsnJlbzJY2MVHfWtRuniN9X4BfuJafakISJzEB4D7/4xyUh/YfH2qFwXtxeFNrbANwwbixBfC6ZCMgCiVlRkzCstLBNznMtRVhgpqDo2aSrQ6qOpAk/FKbotJeGW9rZdQU7yrxzQ7RCFL7f2AThibO5HXiuOdz9/aPO5278Zo/YQbDO6NU6/6ys7EOLyq954Qu5iisXHI0laCy8YiLiWVx8OJLCVXksdrk5GdFoDZpQ0ba/FtlyCUqTKw2Fr+pZQyOTUT5rmCEX744NWlhH22H2ehvJWxyt7AgIMdaNWs71WfF7reXgjiBnsEg9QRV+VmTwwRAFXhlqF9lnVohAREAWYLbgb757kcaqhmqsxq9QsCmC/Wl+ksSQgoh1rnk/fHJxskQqIDEYyri+JimqQRKtIXoqfOSKDH/pEAny/D6UZ3d4yz8YzINQHmUcHfkFtctS4UW9TMxph7y/wK+QlAkODAG8SpWMXiWQMLGU121e2B+pqhPi16rg0D0plUqhRNiyProFbjVUsaBt38v+fBDkw6f/JgwJpURnSVMB5jLEBI0wdZCL0AdB/ouJWG71tf8+/N7e2H4VZSEW/utDxaJDvZEhKo70qjky0Ets51Zau7SvQHhLm5qtW+elCpbs6x158X+RrP/XNK/qoRQreThKabZYS4VpwyLtUj5lvtDiqPVPzaU5FCztj3w8s99eEjNUlJ5LraPhMw7vc2nEMik7Cq7MzCxtXRYfgLmGKcqNMBwKIaRW5WamHwETpwPARP/8CmhM11hvdjQM5H0xIKkPOAABHhGjY377IOCIZF3NqX9rWjsbtgmLFD5dwRydvsOs1fzIxmqVQiOP1LDpSkdUzJ8s7AMFK+dm8HZKC7WX+4gK4TwZN3UfwJgpiA7UAMcAdPPyZllOFR86A2cDZmcrgimt0okW7wxQfsbVLaMrDq2CiFOWDF1pRHu+pe5G27vs7gNzWC5ZxEBEuzPqIzd3jpaUGCr6aEbeEi63oKo6iomYRXhpuxQea1lro7LVCi9CGlmM21RiTRa45XweFri5z86CoB5fZysICevZDXALxEvyc9vew/Zf+GAt08UVgnzhRSolv3Rb/NdKsIgJQjmXES5RSc4UnmkPAUfZNvXS0ZpcdVbokol8i2lEp4w0HSxEClxF8vMSkqMgi8y4kEtAnkZSpjOV8uzEh2sfYXIDKpJJwkZvjRiWNOMmVLQxmIYj9ve9m94Vti6lGgfeHlyzqr/ZBB46MFrIKksf8mEr9HZoedJwREK8IH3MbMEagqiiJSjnCxo9TTL1u9yxrrAGa/KTTOMVdp1yZv9liZ1ktzStwiuu/4PZSlVFND7L58eopPn8b6qNDGrv4eR/x+YTy0Yly/l6GIBZj0z8YkK1CX5IQwOZmbPWIDZvZWsQXU0KK+Ge50SoIALWfx/GhcWiJ5j0LkLVMcKxBzLsdEhUgWQgY5iYgVJSnYYtbmjfbsUhYmI1YK3OhUmDiPsRMTGLlbTK+iSImIhkgiXwfu3KS1J+2gEcOyVAmZmV3A+wmphnmlC+pbv2Yfna+lr3VG2twS9i8vOne7KAiGYafPbKmm4KUqlMGb7pbl3TOI/mOksSOqD6pDgsFmDosPPqV38iucs7zv9dcbmgvFDtmJpjZjfw/GHp7N+CyyUX/4KWR9OzYA+PLmYOKtwFzkRTzvb5Xc90WTzldL/d908358zAxHO+Zwb2T8yiZNJVfPdrgNLCAHWtp46yuvwYFavL/8aaXDSmhl5XHyS8rr702HgmkUxrdVHOaFNjpNSsC7pnga8mbOVuUBZPSLZnxAaMIwy9IVU3nmc4dOPO0guVCcHE6kW4rjXteHyxn/z2N4dQnx4x4YEzrg6fsf3VITNGScyZmHiNlxnnxHreVMQ3dngXqjLSrroyLpDgcF/7EjQ8FgI1PVStsgSLhTSkVFF/tgWr8dlKSAVzupAbuCNvXv/wU1jjaVAHbCXXLfywfRRtD11WezoysfIpi3p56NDZQWyGq1n3y/mREgBiw5QUduXIhipGF9zH9oJS4B7QsSo01KmKVpoDkl8UwM+f7q9d2ZJTsh8+kf8Oq4z6W0Vkupj5u8ffbnQKEVuyqBosT8s+h2PD4Z3dZsmorHd3LjnQ+tFUNfK+NrRNsK5nMBqtJ0JbvEFkwbpsg2YiAic9Xl908boJ9PJS+Y3um95eL9YCKS2K3bM0xtPywVQcBc0SxqnyhVHBaf9mZykYWZ0gZjrldFd6CkamucrO22+2Oy2GmdvROfqb4jBsauGH+shV96zy8lbrvkFZ72+5yAxRVHQFPrEw8nW7O0WTxXtaPZMz64VwC+gmYCcTp8TraoP3Lu8eflrtEerqUqKL20bvGHQW0zZ/wStnInbEtVNDx4XU1uUPMrxOx+UDx55Hfedd33n1TMmRUgLytsTex6qye011tcDXVTc3Ks/fYWqIvFtTtQJyZQIXKYqRqTNXcm+OCroCZWchLsGEpc4YcPcuTI7kZfECn4+6uktMTPdLqtL62TLMlskfQbPYbq1PYMgn9gfMGtoiwHcZRVnKXFo6ofYf7jNXH9/++rJ3RaJMKTuhN3qJBpcDu+640d/k1uWdQaNZ1L3b1lQ913bDueMQMZnubJsR9ngOuCHzC+NQfEYqbwvmERV3PFtJQXa7SCPTFadhGWhQ7u482FPa+xNTK0eZgjjq+Gt0CanzQOP4g888zhJmZlouR9d6DBUQuTRulrwiqAd6YT0Fh6x5hZWxIyrIAki0tmZV3LToqCFU7PD87WPFmrac2qlYYYc+FSsqY1tWYKv8BRBF8/dPlJSmwxEObbyDt2Qe0bcbCPHosjWlmwnbomIDODxXqX5yF3QSwPbvrRH9t4reIwrKXEbLmLLnrhtIr1mKJVCtAYUUN5YdfmRkoIbaBMi/WnAB1cJYv70VdyM9EbQBDCZemh7u0cCwkiSxAYujRhOqtYwYhsO2zKzdcWrZHPZhHtD7w+Z74ntDaD7qw70LyvjW0/noOBrSnV8GC45KF3uStqRW/2HWp2OSHT2/HuTlqNknzv9aZwvnT32vXSsb1zlrFMtwtnMwrR27IuNKeLo5FqVZyQuiozXEGQeNPhXFLvLOTqX6qaj88vsoOOZb951cP0thlOTca7atLGK3xVRKX5N3v3xCBfLxc3hQ+3dtqIgdmPwNA74jS8pUOZTXM6mSVl8wKSgPlFUjd7BRgLf+c/cxv3WaL2NxRXILbLU2M/LxcwVGcFwFlHtftAFKg9GVd7WDnnbQHiXlO0b1BUAm+3vaeadNSlZsA8Lankzuq54cVq0VVGhkwH4lTQl8uM/jTk3p2QugQ10cBCG8CezP4yFqo3O0kDrZS2S01DO/YMFCSTK6QG0PqTgProV/Wi1hkZJ5a38sMZRbomCVcarsqdg5lGPJ9zrXE0aiLCvQMlMRaKLXMuMx2iVQVJKO4MnvmTT09Cz53HD1OxnjNjLl4YvBCClXk7S6R1Um8v0pBfi9Sa6oJjEsmTP7urlcFY6uDgoh7qGrdmrevRVYi7cC5aMbGCDx4SewCq/YSIinqvA6B611H82NxhpbZ5W8QD5Z7LVjNyfTzDPFmd95seYbYoWerdZVa3Qve5W54P1a7Mtu/nbsVwzDjN2oysxUJtDVugRmYBhfihVog9YHE5nMtN9znQMz0XBR6pt4TTfQxbWBbHINdxyMU7OprHv3qgbLZTeUa1Q6tQ1jN0VdxXQrN7u1kRXAabr/ckabdLNW0hgO8dmZYGVFd63qwrUZ8djIFRLJ9HXnuPm1iK1LYFvdnhffmTXsPIO+rmmG/RjxAbPlXr1UUXdWqmsr5OIBTBE8C4eq/ybHxcmP0CJinrd9dx3Zr5ggggpZa2Xvd1qxHj0GRmidhvlMNNoTBR7kN3kvKO+uHKjdyn/+sqaLn2e2pn0m+jxWY/WN4oqg19pnWRVQ9Z975H3UHnelSNPQWtBSBV8Ax1KQRMZdty3C+HzO+nwIr1xq+uUYqCmocISF7C+Qyn9qhVLHS1ZBZUt7FmRLQYBGa/xoQ8L2HN9M94vY3iQ0Gac9/eVUHxZ2NbB/KdATKdDxijKBZIYZtM7cMhmyQ/syiyMIr7Z69Mm9xa4z/FW+ujSa4IR+vRyi11BEBau9/6am3OW7LpHqMvTiDpl67zpLbV4XbP327uOTRmt46Us02vFqTPRULPdMDz0fLPeWlPHs9PGUeq7Xey6NmhOX9rtqrOlLsm0VEJc/CrCJ0nCC9fbSdMMa8p5+eXFITVFgN0FsiORaoPs91DnelHurYNal6pVWpY49jNvMqjFljyFxLLMuXhXlkSQvcE2m4WLvj5NMrYD09pJUUHOz4YJ2jnjVWnVUViOV0tOl2iu+GPZkZsvT5dstTRb0mi+do47nzMUrE7ls8GefguiOEh6iOJ4u03RprtuUtosde26i9CJVRU1H2EPm87vHsu9xq7J1DKGXqhqqOqFJcUBH9ATHDtOeyKZvQU94ZjX51FIYfVw62NIouHWZSqO5kN3JqvH2hQtYus7gcypkuCXLYAZMKCtvhRS7RGa6tEBdB1spiO9kzoFqc6MgAmH47gZ329X7j791M4gzbWpXbpN0qcmVXieQvAyV2Q9nnvXSz8y8XxiHmwWNnsri9JI57z/+VpB7AFXI6zPT82gPCJx46jVaM1BURWsWUT53rJpflmqsho0LTyyH7a2novFCRU843deduZ2EXXp7mdwqPbLBfOscss7Pw/iWv7Hw7WjS4lWIqrqo7bxuB7e5Iw/i1DOozW5OhRVqkEcHSEeCTfsui+JP/u1wR+2Ng0j8/+Gjnt2qeFqdg83ase/j2YtkrI6gxe2KundqFFutQEFsP7EvAIbQR8rDv6SafwN0I9AewsmLX+2nXrj/1GRtRUiUd1d8MMC9u8J3eIfFyH3ur3u2Bpti4OWamFVvdwyUKD3vDLucoPAMe2Laf2L1may81sTcpby8Q9MBdHT1+jw1ITKrOGnHkrLvTu8gUs5xLPrUm90higqdUsy7FE3IX14TIbvjvtMarkrruZ35Yrj2z0YXTbkktGBkkF/jSme2NL0YWj8VaY8DVy8TsGGRwee7LoWoXyvh2IgKIY27q+AfZhhEaU7lgj+xkA4fUS7zM5dRtW/vX1UyU1fJHFAk46oJL0Vim2/NO8WDumYJSrlon3s00j/Tv0Chiu3m65yc7EnWjGITk+epuiwZ8PDqQ97ZVAos87fcdhVylvzDCcc6bCiv1vvaY2zoITmLdu0GqvkN7UADVWY43JFHb19+GthhdQ9T/BC1Ht+Vi9Ggi/YjwYeGT/7gb19nuMY6lrARLtMtvJUbJ46wiZBUXj+o92sZhIXFrYKk44HYQUeh0BwgPQVL8oHHoTFTNlGugHHjjsLyh0wWbPoVcsOOQhIDnZ4lMT6oF0ZBHsz3mmxA7UgmOHsC7k0dZtytdOuWUoVvfTBBtEz8XTrKiWYm8yqVGZLQnXdiw6Rl4knIbdO5PJ66krDKtZG1e4AOWwrzWHzvbTajGGys3lfWJfOI2ipa0Zp1NFrtNr5/8vcS+nhFk6KfnzvqurYkNa3beUfMmzfpvnFLMQABhw2ED4+DO4vatXDj1gGEObAT0dzClmE5PQjFO1+IaAcnbvBrwhyWj28f7glViu7cvco4EzEVJtx7Pmb6KU+fTbSNqu8TuZitm2TP/Kc84HGGyiLhXQamjXWb92FCH3p6luCwcT9LlpTxyY6yyvxu3P75cX/pMc3Rj+/aSxKKTXsU3SIKp2/DrdvRvZhWciphFRy8KjRryWMMw5Pb129+uLHuTw5hHzy7P09gkHh83sD2EF0oWeGNMDtvD9pCP4Fq6K7JHl2ough5nxc3mz7DoYUVHpVjqk1o5ZCQNJ4f1Wn+M17/pjGpHUz75jx6uvwsHDFltjieSp0tbsYROcdGt8E5A31OWxNipsTQJM0nxG653hbDPmwz3ykph4LBcXf2UBHn/tW1s1Ht/4wmWdru0lZ0K/8K0TyS8VF8+vTwv9/9n/f3xI5TtibzCL/XrvFi+1GIismY3/QPouhtmeZ3XNFtrNWtK7xy/c3GojTzpX/B25XDO9iVXbXr9w07Z/YJkP0FlsPnrxVFtjLozcmxDG6GGZejZq1UnWFh3fCFab2Y04ljUNg3f/gml7PO2wWDYr/nrrVwEciOzGIFVfi1rtPhyh9Z6kHW/aTZYGjBafueI+x6k+6wWX3+qTJn276QFtgxru/nd49+FF0aOU69HxdXdA9adDlm3Q9j+I0z6/p+14MYQRBLmrBWr7ihCOznjpmcy4jyGQs35Wz9ungk5/Y/3sxez97MbolU5M3r17d3r+9//unu7c//eX/30z/+/uPd3e040/a9xUEeHgmNY+V7jbKimR8V5OFx84Od7OFx82PxoSG0pVI1N0SniBf0vXlzCHw7VQ8mBYk0cAEM/4hAJua4p+4sLPcEDOf5Wuowqp73Qv/9x5s3t7c3t7f/fvP3H2diO/N/mUWy9dp4D+bHzx+JgkiqOHjoq3xNZuQBX9OTC0OxS9uGUaJgA0q3j+eHR8KlfOpMmDXYAIbH85Rnei5HPblUvp96KPn4Js9yCZFPlKY3LoQWS7SEr+Dz+/uXuYnveWEXzVWYSgEkke1rSpwugNfe8LrGAexo//MWXc8XSylnC6pmK8mpWM2kWs1eWP6+qP6ilfQungOyY8RgQCVM5G++2OFJJBPwXYepIJAsII4hJpFMd0VgkJpWmyH8wtqY9O7VqzRbcBbpbLlkXxHHYFme40uYhzoobeH8Tzuc/9AiJ9O1lyrWBCXQixvxFzV6EHc/f9Z3xo1/OG0vAP+wzIEgAoGIw1BM/dbZL5V3zkht6L044Ouhz/hZ3zjDcppj+IHtg0aLRPhb4yfuDCv1TL3MOJ+PEIW6Ddydnv+EfydD3z8dkZ2XS9emP7efWZmT9wGCoyzodkvSg/u5v0U5FsJZ1M1F6I1J7HXLfafQPoc4XP1hgSEPu9FVe/trA4EigQmxFFOg8dP5XuyU6+IejD10bXo6OnUzZIKnQ36tXwyvupJ5wOe6bLddhmaKZqS+DhfLU11ALXXPwP0BM/JOKgU6xcZrRub9pjRgXvuV1Ziv9E6/EmBesXTzwysTpfMEkhn50NH2v7vML9z89+hO7P2rSwYGgKRK13R/nXf3Sg9Ei4jdXveL5KeF2Ip8vrTd/N1LQZcOmZqAXJ/0832YXjkBPgttn55pwgNtLQKm161k1wkAlnmwyrSjuBlxqWG+pZ2tQ06CtoHQ6oh5iWQeTAjVcRuWXAbsAsgQ1Hon5jr8oNVZQec4hmJWEDUfrX0WzBbHEMxLJnBNmqGgs4MugIxB3Yz/PBvqN0NQc6rNnEahDMxZQec4hmC2uuYsJ0i/ymNiFUJcOGnxpObrb/d/EvPVEvKM5msWX6L5un91yUDz9dzGXxfqPf9S7I608YjF6CjBFzfEl3o3A3+dQaxyUXGf8rGEI1NtvjH7LAlXMwRSA/n2yb/a+DMTaWbm+YcSxjkLlw8MKOj88CmnFV92KIdql0tlGpTu5f0BxVLv5WoF8U3x9jlozaRoBpD38bgjnHZwmWt5CcuDCc6qofV41BHzvhXV1AiXK2Y1V3OKPfe9jqT5/udM+0pG98raAA4EkrBHorBfz2euSkPHAoRqRY5Zg0L4hpam1NMTQSQLKTm04gO9SOzXCBMxi5xmonlmaC9HjilxC69I3vm1Ufi2B0Mkp5aKymo4BR0HZinL3mncOqwOrqleA77rSR6H6QS3RvORKdfeI/RtLS3oc9Jly9QGoPJf/n8AAAD//y766bE=" } diff --git a/metricbeat/module/traefik/fields.go b/metricbeat/module/traefik/fields.go index d18ad7398e8..19bd3b052b9 100644 --- a/metricbeat/module/traefik/fields.go +++ b/metricbeat/module/traefik/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetTraefik returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/traefik. +// This is the base64 encoded gzipped contents of module/traefik. func AssetTraefik() string { return "eJy00bFu2zAQBuBdT/HDS4ECdncNBTx2aFEUzWxQ1ElmLPGIO8qI3z6gLDmyojhGgtzIo3if/lvjQKccUQxV7pAB0cWGcqyGk1UGlKRWXIiOfY6fGQD8P3chdCRRQhB+OuEHGjYlCtMYb0nQUhRnNQOEGjJKOWqTAZWjptS8f2kNb1qaClLFU0iXhbswnCwgPgJJNR0/JezJNHF/OV5S3JCk+n2eAy6icZ5KVMLtaPymwwRs//4C+TKw83Hy/XVIY821U3EXomtpo2Sv2qO8YV/PGjfwqR76B8HVJVjnNaYQ4TyULPtSFylCGtgrLULmEd4h+Te8N9vdrVSmHMvdVbbvBnMHKdWfri1IUkDjD89lLwZzrHf9frrXdz4v2R5JTE0XB/rNOY/WWeGlVU1tGk3sdGe5JN18f5PHxSPZpRzPjd0XxIlAMvCQeNlzAAAA//9iMTnZ" } diff --git a/metricbeat/module/uwsgi/fields.go b/metricbeat/module/uwsgi/fields.go index 120cb4a8a24..78247b12e18 100644 --- a/metricbeat/module/uwsgi/fields.go +++ b/metricbeat/module/uwsgi/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetUwsgi returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/uwsgi. +// This is the base64 encoded gzipped contents of module/uwsgi. func AssetUwsgi() string { return "eJzEl8tu6zYQhvd+ikFWLWAb7daLAgGSRRdtUzvnslNocWQTpkhlOLQiP/0BdfFVVowDGoc7yfT/f8PLzGgCG6xm4Eu3UiMAVqxxBg/188MIQKJLSRWsrJnBXyMAaOZCbqXXOAIg1CgczmAlRgCZQi3drJ44ASNyPIiHwVURppL1Rfumx+FU5ljKsWDv9q/79MI4ZepGr1MzasRpow45MqnUIbcYRzPPuY7Z2LLQU8J3j47dyZSOU1uzOvthACqM1yAKnSishZEa5YA/fqRYq8UluCJ76l2SYkyQyNKd4q8doMfhfBOEvA+HkJ+7F0pGcn0hm6JzoPp3vLS0QZpGs/tW633iFi+61q64KUiR1gfwwuNnzRcsGMFmrf4Y/gSVgWOlNey9wGB5OHyW10ilcgh/DJFGzgD/+nyJFEj3IA5pixKWFfBaudZ3CEmiZpH8IjAQGSN1D8qF7GyFRAnlGk24U+kaJfzz+D2ZP///5XnxuhiKJXqCe94LAgnlrmTX1n0tSGwUqSS13nAkgieyRYHysI5hAVWO1vMQi1MrI3S0ZcgVM0poVeEywF775N2jx+hHqqMohaovIltY4mD566jOO4QDzwar0tJ59rotSbWtwW/pGkUxhkJ4h+NAOYald9UYlNT4+2BacBGp5uiURMOwQIaF2uEUcswtVZB6IjSsK/CuuYqiy7DTkOGELkXlYIdkgamCt8mk+eeEsLDEb2Br17AJx41bb0xbt4u0818VsRf6Mh6ndgjChfOAMhyDO4ZD3hhlVkm4e5EreCsNF9IXxcMVojRR08u80fz8RvNHrG6JhHH7fLIbjFhsVwnFCvVxiyRWCGEZrXF4fb1TSxivc2qL299P172aKUnE5lCEi95Zvwx5d4VlWjeo0ZM1nzbpTRdwA01IqiqNXztqWciUxoalv1k/ZSHrGWNtzbwWg96Gq9/eZlnTFEUi+K/TuwHiDp9th83IhNKh1QsegwsR+4vtAiFYuNGPAAAA//9dpnuJ" } diff --git a/metricbeat/module/vsphere/fields.go b/metricbeat/module/vsphere/fields.go index 1490fb00f37..b0588e42a72 100644 --- a/metricbeat/module/vsphere/fields.go +++ b/metricbeat/module/vsphere/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetVsphere returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/vsphere. +// This is the base64 encoded gzipped contents of module/vsphere. func AssetVsphere() string { return "eJzMls1u2zoQhfd+ikH28QNocTe5SLtxWyBNtwYjjSzWooYgRwnUpy9ISoZ+6CapqLZcGLBInfNJ1JzhLZyxy+DZ6goN7gBYco0Z3Dw/+Cs3O4ACbW6kZklNBv/tAAD6WVBUtLW7zWCNwmIGJ7EDKCXWhc380ltohMKxhRvcabfYUKv7KxGXqdBYrBAsLNNFLi55VbafiohMn2MYc4wxivudTAwkZ+xeyBSzuV/wuPH/wLTUHQxL6/TTWd7LGm1nGRUshAfPXGiRS+72TCzq/VPHaKMENTWn99l/dYrgFYFK4AqjG+NGSUYJzmBpv+AsDWJSzHuDmJyytVgkpXy0WGxDqXNOyajR5NjwWyn75ZfZRSBUZHlNFszu/9sx8JEsX0+AXLdhU1T1I+Wm3H15BNnAYaY6tg31n843VP8bjH1Bp/P15fyKrUJFZqsiPXhxZx+Tfr06e7it8jgR3jYxvBauQX4hcz66f3Gw3yraT0EWprKX84803IpaibySzapzy1Wl96eWi729nD/n2uCaCU7M3M9GWTk0k1mWb5XR38I2wCHsw/W4poTf2Nz0s0YjWDYneAhHuH+taYxD9NSi5c2ilEr44AxWh5Zn9R/rhqihUJKkf/rXOukByd6rbwbpYcctYT1r3lomdQyhGSWkp++4OAyHi8cVlX3njSFi/Efb1s8AAAD//5A1D00=" } diff --git a/metricbeat/module/windows/_meta/config.reference.yml b/metricbeat/module/windows/_meta/config.reference.yml index dc9b27a9c3d..49656feb554 100644 --- a/metricbeat/module/windows/_meta/config.reference.yml +++ b/metricbeat/module/windows/_meta/config.reference.yml @@ -4,11 +4,14 @@ period: 10s perfmon.ignore_non_existent_counters: false perfmon.group_measurements_by_instance: false - perfmon.counters: - # - instance_label: processor.name - # instance_name: total - # measurement_label: processor.time.total.pct - # query: '\Processor Information(_Total)\% Processor Time' + perfmon.queries: +# - object: 'Process' +# instance: ["*"] +# counters: +# - name: 'Disk Writes/sec' +# field: physical_disk.write.per_sec +# format: "float" +# - name: "% Disk Write Time" - module: windows metricsets: ["service"] diff --git a/metricbeat/module/windows/_meta/config.yml b/metricbeat/module/windows/_meta/config.yml index 3d3269a3b07..57ab272ef23 100644 --- a/metricbeat/module/windows/_meta/config.yml +++ b/metricbeat/module/windows/_meta/config.yml @@ -1,22 +1,17 @@ - module: windows - #metricsets: - # - service + metricsets: + - service period: 1m #- module: windows # metricsets: -# - perfmon +# - perfmon # period: 10s -# perfmon.counters: -# - instance_label: processor.name -# instance_name: total -# measurement_label: processor.time.total.pct -# query: '\Processor Information(_Total)\% Processor Time' -# -# - instance_label: physical_disk.name -# measurement_label: physical_disk.write.per_sec -# query: '\PhysicalDisk(*)\Disk Writes/sec' -# -# - instance_label: physical_disk.name -# measurement_label: physical_disk.write.time.pct -# query: '\PhysicalDisk(*)\% Disk Write Time' +# perfmon.queries: +# - object: 'Process' +# instance: ["*"] +# counters: +# - name: 'Disk Writes/sec' +# field: physical_disk.write.per_sec +# format: "float" +# - name: "% Disk Write Time" diff --git a/metricbeat/module/windows/_meta/docs.asciidoc b/metricbeat/module/windows/_meta/docs.asciidoc index 3a14b2eb03f..b7ed8584d22 100644 --- a/metricbeat/module/windows/_meta/docs.asciidoc +++ b/metricbeat/module/windows/_meta/docs.asciidoc @@ -1,2 +1,7 @@ -This is the Windows module. It collects metrics from Windows systems, -by default metricset `service` is enabled. +This is the `windows` module which collects metrics from Windows systems. +The module contains the `service` metricset, which is set up by default when the `windows` module is enabled. +The `service` metricset will retrieve status information of the services on the Windows machines. The second `windows` +metricset is `perfmon` which collects Windows performance counter values. + + + diff --git a/metricbeat/module/windows/fields.go b/metricbeat/module/windows/fields.go index 34f9f8b7861..2cf2c008578 100644 --- a/metricbeat/module/windows/fields.go +++ b/metricbeat/module/windows/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetWindows returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/windows. +// This is the base64 encoded gzipped contents of module/windows. func AssetWindows() string { - return "eJysVcFu2zgQvfsrBr30Ehvb9rQ+LOCNN7tebNOiSWEsECBiyJE1CEWqnKEd/31BSXZk2akdIzwY9JB8897MoziER1yPYUXO+BUPAITE4hjezZvIuwGAQdaBKiHvxvDHAADgszfRIuQ+wHx7lAsf5F57l9NiDLmyjAOAgBYV4xgWagCQE1rD4xpkCE6V2E2ehqyrtDn4WLWRA/l3gRqwbaIHFNWJN0kYw5I0buOHEr2YrBlZi5GB9k4UOQYpEFiURO7WYpOMR53zu3XYjL6MLmUyO+EN40dcr3zor+GTKqvUuWL+6frfK/1hUvV2/EJZGhOIjn5EhNm01lJLa3SMYCZADAoKxQX4vF4slS7I4XuGv7/PpqCcSeE93Baj1vRcj4OC0+85kueoealfJ/f2Wd4p1AxxZdX6/myKrTP+WqITuPTWohYfXs+5JVLT2nRi06ZfS2BRQe4T21cIOKWKCTdWNUyfUb2h8sz0YBGWykZkUAEhm0TxpRLS2cUeavan95JdQDYlVg8WTZp/Vi4qm13URstu1ixYZidJPrdn17cw+X77z5dvs9v/7/7zWtmbvY/ICTWaaO2jk6Zj0RkMsCpIF6C2BgzR8REplZLibCWX47v57Hr6ZX5zx3XhPn2846UuPMsInxCGj9DVB8NXfjuuorVr+BGVpZzQ1GRBfG2FnCyCFEqAEpkSnXDXI/vtJ6dtNOQWoMIi1geO91ne2NVKS1S2QT7d1ZfeCblIbnHI1l9V5HqpmTa+/hada4M3ya7bua+qZt4YPv1Hc8zx+ETpCTZvWI0rH57Tb582kII4PQqpMBiCD5DSNp3e+horH4T3IFcFuuZypi6LB27VptISw4qshQessRfoMJDuv617mB0O0VnkHZNBFfySTGrTJjTkCjXlpDsnj93BF15k693ipav34bffP55R740rDtdbMXtNStJVC14nsV9n0yPsYyVU4qjst+NFDbkPpZIxmBhUIttbJldFud9sKslaYtTemX6C01/i99yyhLY5aIDcDvZo8DMAAP//0yHt8w==" + return "eJysVl1v2zYUffevuMhLgSIx1vZpfiiQxcvmYUmLJoUxIIDFkFfWXShS5b20Y2A/fqAkO5Y/YscoHwKFpM49555D0RfwhIsBzMkZP+cegJBYHMDZuJk56wEYZB2oEvJuAJ97AAA33kSLkPsA49WrXPggE+1dTtMB5Moy9gACWlSMA5iqHkBOaA0PapALcKrE9eJpyKJKm4OPVTuzo34XaB2swpCX3q3mdwGm0aW1HDtLNWMbeJPCCwlyLMpp7AA0PJ5wMffBdFY6Vf/rLAGMWiyYKRuxv6NaiRJIc/99//2Ogv7xX9TSWWimJs16br3auzwpVVWRm7Z7z96f7Sf+eYP4TU2roc0QUGJwaF4EbFnHGGbUadpu616pmrUYGWjvRJFjkAKBRUnk9bgui3H/YCa2bV4z2myI3m8yAD6rskqHqxh/uv3rWn+4rDZ2vNpPgEuIjn5EhNGw1lJLa3T0YSRADAoKxQX4vF4slS7I4TuGP76PhqCcSdNbuC1GrWmHP+uC099TJI9R80y/Te79i7xjqBniyqrF5GSKbTJ+n6ETuPLWohYf3s65JVLTWjqxtOl1CSwqNAfvDQKO6WLCjVUNs8mo3lB5Znq0uDytKiBkl1F8qYR0dr6Fmv3mvWTnkA2J1aNFk55vlIvKZud10LK7BQuW2VGST/Xs9h4uv9//+eXb6P6fh7+9VvZu6yNyRI8utfbRSeNYdAYDzAvSBahVAEN0fEBKpaQ4WcnV4GE8uh1+Gd89cN24Tx8feKYLz9LHZ4SLJ1jXBxdv/HZcR2sX8CMqSzmhqcmC+DoKOVkEKZQAJTIlOuH1jGzbT07baMhNQYVprF847LP85FQrLVHZBvn4VF95J+QiuemuWH9Vkeul5rHJ9bfoXDt5l+K6evb15bgKfPofzaHE4zOlX0nmJ3bj2oeX8qurDaQgTpdCagyG4AOkso3Tq1xj5YPwFuS8QNcczuSyeOBWbWotMczJWnjEGnuKDtNVv3G3bmGucYjOIndCBlXwMzLJpuXUBVeoKSe99uahM7jnRrbeTfcdvQ+//PrxhH4vU7G734rZa1KSjlrwOon9OhoeYB8roRL75aYdezXkPpRKBmBiUInsxjK5Kspkuakka4lRe2c2Cxx/E7/jliW05qABch3sfu//AAAA//8t/Vii" } diff --git a/metricbeat/module/windows/perfmon/_meta/data.json b/metricbeat/module/windows/perfmon/_meta/data.json index e6b39dd8588..4e4d8724078 100644 --- a/metricbeat/module/windows/perfmon/_meta/data.json +++ b/metricbeat/module/windows/perfmon/_meta/data.json @@ -1,24 +1,30 @@ { "@timestamp": "2017-10-12T08:05:34.853Z", - "beat": { - "hostname": "host.example.com", - "name": "host.example.com" + "event": { + "dataset": "windows.perfmon", + "duration": 115000, + "module": "windows" }, "metricset": { - "module": "windows", "name": "perfmon", - "rtt": 115 + "period": 10000 + }, + "service": { + "type": "windows" }, "windows": { "perfmon": { - "processor": { - "name": "_Total", - "time": { - "total": { - "pct": 1.4663385364361736 + "instance": "_Total", + "metrics": { + "processor": { + "time": { + "total": { + "pct": 6.310940413107646 + } } } - } + }, + "object": "Processor Information" } } } \ No newline at end of file diff --git a/metricbeat/module/windows/perfmon/_meta/docs.asciidoc b/metricbeat/module/windows/perfmon/_meta/docs.asciidoc index 4c90de92fdd..f04c9247c04 100644 --- a/metricbeat/module/windows/perfmon/_meta/docs.asciidoc +++ b/metricbeat/module/windows/perfmon/_meta/docs.asciidoc @@ -15,6 +15,28 @@ to collect. The example below collects processor time and disk writes every period: 10s perfmon.ignore_non_existent_counters: true perfmon.group_measurements_by_instance: true + perfmon.queries: + - object: "Process" + instance: ["svchost*", "conhost*"] + counters: + - name: "% Processor Time" + field: time.processor.pct + format: "float" + - name: "Thread Count" + field: thread_count + - name: "IO Read Operations/sec" + - object: "PhysicalDisk" + field : "disk" + instance: "*" + counters: + - name: "Disk Writes/sec" + - name: "% Disk Write Time" + field: "write_time" + format: "float" + + + // deprecated, will be removed in 8.0 + perfmon.counters: - instance_label: processor.name instance_name: total @@ -46,7 +68,33 @@ counter requires three config options - `instance_label`, `measurement_label`, and `query`. [float] -==== Counter Configuration +==== Query Configuration + +Each item in the `query` list specifies multiple perfmon queries to perform. In the +events generated by the metricset these configuration options map to the field +values as shown below. + +*`object`*:: The performance object to query. A performance object can be a physical component, such as processors, disks, and memory, or a system object, such as processes and threads. Required + +*`field`*:: The object field/label. Not required, if not entered, it will be `object`. + +*`instance`*:: Matches the ParentInstance, ObjectInstance, and InstanceIndex are included in the path if multiple instances of the object can exist. Not required for performance counters which do not contain one. + +*`counters`*:: List of the partial counter paths (At least one partial counter path is required). + +*`name`*:: The counter name. Required. This is the counter specified in Performance Data Helper (PDH) syntax. For example in case of the counter path `\Processor Information(_Total)\% Processor Time`, +the value for this configuration option will be `% Processor Time`. + +*`field`*:: The counter path value field/label. Not required, if not entered, it will be generated based on the counter path. + +*`format`*:: Format of the measurement value. The value can be either `float`, `large` or +`long`. The default is `float`. + + + + +[float] +==== Deprecated Counter Configuration Each item in the `counters` list specifies a perfmon query to perform. In the events generated by the metricset these configuration options map to the field @@ -77,6 +125,6 @@ Performance Data Helper (PDH) syntax. This field is required. For example place of an instance name to perform a wildcard query that generates an event for each counter instance (e.g. `\PhysicalDisk(*)\Disk Writes/sec`). -*`format`*:: Format of the measurement value. The value can be either `float` or +*`format`*:: Format of the measurement value. The value can be either `float`, `large` or `long`. The default is `float`. diff --git a/metricbeat/module/windows/perfmon/_meta/fields.yml b/metricbeat/module/windows/perfmon/_meta/fields.yml index 8033a27f5ac..177af776297 100644 --- a/metricbeat/module/windows/perfmon/_meta/fields.yml +++ b/metricbeat/module/windows/perfmon/_meta/fields.yml @@ -1 +1,17 @@ -- release: beta +- name: perfmon + type: group + release: ga + description: > + perfmon + fields: + - name: instance + type: keyword + description: | + Instance value. + - name: metrics.*.* + type: object + object_type: float + object_type_mapping_type: "*" + description: > + Metric values returned. + diff --git a/metricbeat/module/windows/perfmon/config.go b/metricbeat/module/windows/perfmon/config.go new file mode 100644 index 00000000000..971d4629b27 --- /dev/null +++ b/metricbeat/module/windows/perfmon/config.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build windows + +package perfmon + +import ( + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/libbeat/common/cfgwarn" +) + +var allowedFormats = []string{"float", "large", "long"} + +// Config for the windows perfmon metricset. +type Config struct { + IgnoreNECounters bool `config:"perfmon.ignore_non_existent_counters"` + GroupMeasurements bool `config:"perfmon.group_measurements_by_instance"` + Counters []Counter `config:"perfmon.counters"` + Queries []Query `config:"perfmon.queries"` + GroupAllCountersTo string `config:"perfmon.group_all_counter"` +} + +// Counter for the perfmon counters (old implementation deprecated). +type Counter struct { + InstanceLabel string `config:"instance_label"` + InstanceName string `config:"instance_name"` + MeasurementLabel string `config:"measurement_label" validate:"required"` + Query string `config:"query" validate:"required"` + Format string `config:"format"` +} + +// QueryConfig for perfmon queries. This will be used as the new configuration format +type Query struct { + Name string `config:"object" validate:"required"` + Field string `config:"field"` + Instance []string `config:"instance"` + Counters []QueryCounter `config:"counters" validate:"required,nonzero"` + Namespace string `config:"namespace"` +} + +// QueryConfigCounter for perfmon queries. This will be used as the new configuration format +type QueryCounter struct { + Name string `config:"name" validate:"required"` + Field string `config:"field"` + Format string `config:"format"` +} + +func (query *Query) InitDefaults() { + query.Namespace = "metrics" +} + +func (counter *QueryCounter) InitDefaults() { + counter.Format = "float" +} + +func (counter *Counter) InitDefaults() { + counter.Format = "float" +} + +func (counter *Counter) Validate() error { + if !isValidFormat(counter.Format) { + return errors.Errorf("initialization failed: format '%s' "+ + "for counter '%s' is invalid (must be float, large or long)", + counter.Format, counter.InstanceLabel) + } + return nil +} + +func (counter *QueryCounter) Validate() error { + if !isValidFormat(counter.Format) { + return errors.Errorf("initialization failed: format '%s' "+ + "for counter '%s' is invalid (must be float, large or long)", + counter.Format, counter.Name) + } + return nil +} + +func (conf *Config) Validate() error { + if len(conf.Counters) == 0 && len(conf.Queries) == 0 { + return errors.New("no perfmon counters or queries have been configured") + } + if len(conf.Counters) > 0 { + cfgwarn.Deprecate("8.0", "perfmon.counters configuration option is deprecated and will be removed in the future major version, "+ + "we advise using the perfmon.queries configuration option instead.") + } + return nil +} + +func isValidFormat(format string) bool { + for _, form := range allowedFormats { + if form == format { + return true + } + } + return false +} diff --git a/metricbeat/module/windows/perfmon/config_test.go b/metricbeat/module/windows/perfmon/config_test.go new file mode 100644 index 00000000000..8f96a9ca1ed --- /dev/null +++ b/metricbeat/module/windows/perfmon/config_test.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build windows + +package perfmon + +import ( + "testing" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/go-ucfg" + + "github.com/stretchr/testify/assert" +) + +func TestValidate(t *testing.T) { + conf := common.MapStr{ + "module": "windows", + "period": "10s", + "metricsets": []string{"perfmon"}, + "perfmon.group_measurements_by_instance": true, + } + c, err := ucfg.NewFrom(conf) + assert.NoError(t, err) + var config Config + err = c.Unpack(&config) + assert.Error(t, err, "no perfmon counters or queries have been configured") + conf["perfmon.queries"] = []common.MapStr{ + { + "object": "Process", + }, + } + c, err = ucfg.NewFrom(conf) + assert.NoError(t, err) + err = c.Unpack(&config) + assert.Error(t, err, "missing required field accessing 'perfmon.queries.0.counters'") + + conf["perfmon.queries"] = []common.MapStr{ + { + "object": "Process", + "counters": []common.MapStr{ + { + "name": "Thread Count", + }, + }, + }, + } + c, err = ucfg.NewFrom(conf) + assert.NoError(t, err) + err = c.Unpack(&config) + assert.NoError(t, err) + assert.Equal(t, config.Queries[0].Counters[0].Format, "float") + assert.Equal(t, config.Queries[0].Namespace, "metrics") + assert.Equal(t, config.Queries[0].Name, "Process") + assert.Equal(t, config.Queries[0].Counters[0].Name, "Thread Count") + assert.True(t, config.GroupMeasurements) + +} diff --git a/metricbeat/module/windows/perfmon/data.go b/metricbeat/module/windows/perfmon/data.go new file mode 100644 index 00000000000..7db0a338de2 --- /dev/null +++ b/metricbeat/module/windows/perfmon/data.go @@ -0,0 +1,149 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build windows + +package perfmon + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/metricbeat/helper/windows/pdh" + "github.com/elastic/beats/v7/metricbeat/mb" +) + +var processRegexp = regexp.MustCompile(`(.+?)#[1-9]+`) + +func (re *Reader) groupToEvents(counters map[string][]pdh.CounterValue) []mb.Event { + eventMap := make(map[string]*mb.Event) + for counterPath, values := range counters { + hasCounter, counter := re.getCounter(counterPath) + for ind, val := range values { + // Some counters, such as rate counters, require two counter values in order to compute a displayable value. In this case we must call PdhCollectQueryData twice before calling PdhGetFormattedCounterValue. + // For more information, see Collecting Performance Data (https://docs.microsoft.com/en-us/windows/desktop/PerfCtrs/collecting-performance-data). + if val.Err != nil && !re.executed { + re.log.Debugw("Ignoring the first measurement because the data isn't ready", + "error", val.Err, logp.Namespace("perfmon"), "query", counterPath) + continue + } + + var eventKey string + if re.config.GroupMeasurements && val.Err == nil { + // Send measurements with the same instance label as part of the same event + eventKey = val.Instance + } else { + // Send every measurement as an individual event + // If a counter contains an error, it will always be sent as an individual event + eventKey = counterPath + strconv.Itoa(ind) + } + + // Create a new event if the key doesn't exist in the map + if _, ok := eventMap[eventKey]; !ok { + eventMap[eventKey] = &mb.Event{ + MetricSetFields: common.MapStr{}, + Error: errors.Wrapf(val.Err, "failed on query=%v", counterPath), + } + if val.Instance != "" && hasCounter { + //will ignore instance counter + if ok, match := matchesParentProcess(val.Instance); ok { + eventMap[eventKey].MetricSetFields.Put(counter.InstanceField, match) + } else { + eventMap[eventKey].MetricSetFields.Put(counter.InstanceField, val.Instance) + } + } + } + event := eventMap[eventKey] + if val.Measurement != nil { + event.MetricSetFields.Put(counter.QueryField, val.Measurement) + } else { + event.MetricSetFields.Put(counter.QueryField, 0) + } + if counter.ObjectField != "" { + event.MetricSetFields.Put(counter.ObjectField, counter.ObjectName) + } + } + } + // Write the values into the map. + events := make([]mb.Event, 0, len(eventMap)) + for _, val := range eventMap { + events = append(events, *val) + } + return events +} + +func (re *Reader) groupToSingleEvent(counters map[string][]pdh.CounterValue) mb.Event { + event := mb.Event{ + MetricSetFields: common.MapStr{}, + } + measurements := make(map[string]float64, 0) + for counterPath, values := range counters { + _, readerCounter := re.getCounter(counterPath) + for _, val := range values { + // Some counters, such as rate counters, require two counter values in order to compute a displayable value. In this case we must call PdhCollectQueryData twice before calling PdhGetFormattedCounterValue. + // For more information, see Collecting Performance Data (https://docs.microsoft.com/en-us/windows/desktop/PerfCtrs/collecting-performance-data). + if val.Err != nil && !re.executed { + re.log.Debugw("Ignoring the first measurement because the data isn't ready", + "error", val.Err, logp.Namespace("perfmon"), "query", counterPath) + continue + } + var counterVal float64 + switch val.Measurement.(type) { + case int64: + counterVal = float64(val.Measurement.(int64)) + case int: + counterVal = float64(val.Measurement.(int)) + default: + counterVal = val.Measurement.(float64) + } + if _, ok := measurements[readerCounter.QueryField]; !ok { + measurements[readerCounter.QueryField] = counterVal + measurements[readerCounter.QueryField+instanceCountLabel] = 1 + } else { + measurements[readerCounter.QueryField+instanceCountLabel] = measurements[readerCounter.QueryField+instanceCountLabel] + 1 + measurements[readerCounter.QueryField] = measurements[readerCounter.QueryField] + counterVal + } + } + } + for key, val := range measurements { + if strings.Contains(key, instanceCountLabel) { + if val == 1 { + continue + } else { + event.MetricSetFields.Put(fmt.Sprintf("%s.%s", strings.Split(key, ".")[0], re.config.GroupAllCountersTo), val) + } + } else { + event.MetricSetFields.Put(key, val) + } + } + return event +} + +// matchParentProcess will try to get the parent process name +func matchesParentProcess(instanceName string) (bool, string) { + matches := processRegexp.FindStringSubmatch(instanceName) + if len(matches) == 2 { + return true, matches[1] + } + return false, instanceName +} diff --git a/metricbeat/module/windows/perfmon/data_test.go b/metricbeat/module/windows/perfmon/data_test.go new file mode 100644 index 00000000000..616d87d686f --- /dev/null +++ b/metricbeat/module/windows/perfmon/data_test.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build windows + +package perfmon + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/metricbeat/helper/windows/pdh" +) + +func TestGroupToEvents(t *testing.T) { + reader := Reader{ + query: pdh.Query{}, + executed: true, + log: nil, + counters: []PerfCounter{ + { + QueryField: "datagrams_sent_per_sec", + QueryName: `\UDPv4\Datagrams Sent/sec`, + Format: "float", + ObjectName: "UDPv4", + ObjectField: "object", + ChildQueries: []string{`\UDPv4\Datagrams Sent/sec`}, + }, + }, + } + counters := map[string][]pdh.CounterValue{ + `\UDPv4\Datagrams Sent/sec`: { + { + Instance: "", + Measurement: 23, + Err: nil, + }, + }, + } + events := reader.groupToEvents(counters) + assert.NotNil(t, events) + assert.Equal(t, len(events), 1) + ok, err := events[0].MetricSetFields.HasKey("datagrams_sent_per_sec") + assert.NoError(t, err) + assert.True(t, ok) + ok, err = events[0].MetricSetFields.HasKey("object") + assert.NoError(t, err) + assert.True(t, ok) + val, err := events[0].MetricSetFields.GetValue("datagrams_sent_per_sec") + assert.NoError(t, err) + assert.Equal(t, val, 23) + val, err = events[0].MetricSetFields.GetValue("object") + assert.NoError(t, err) + assert.Equal(t, val, "UDPv4") + +} + +func TestGroupToSingleEvent(t *testing.T) { + reader := Reader{ + query: pdh.Query{}, + executed: true, + log: nil, + config: Config{ + GroupAllCountersTo: "processor_count", + }, + counters: []PerfCounter{ + { + QueryField: "%_processor_time", + QueryName: `\Processor Information(*)\% Processor Time`, + Format: "float", + ObjectName: "Processor Information", + ObjectField: "object", + InstanceName: "*", + InstanceField: "instance", + ChildQueries: []string{`\Processor Information(processor0)\% Processor Time`, `\Processor Information(processor1)\% Processor Time`}, + }, + { + QueryField: "%_user_time", + QueryName: `\Processor Information(*)\% User Time`, + Format: "float", + ObjectName: "Processor Information", + ObjectField: "object", + InstanceName: "*", + InstanceField: "instance", + ChildQueries: []string{`\Processor Information(processor0)\% User Time`, `\Processor Information(processor1)\% User Time`}, + }, + }, + } + + counters := map[string][]pdh.CounterValue{ + `\Processor Information(processor0)\% Processor Time`: { + { + Instance: "processor0", + Measurement: 23, + }, + }, + `\Processor Information(processor1)\% Processor Time`: { + { + Instance: "processor1", + Measurement: 21, + }, + }, + `\Processor Information(processor0)\% User Time`: { + { + Instance: "processor0", + Measurement: 10, + }, + }, + `\Processor Information(processor1)\% User Time`: { + { + Instance: "processor1", + Measurement: 11, + }, + }, + } + event := reader.groupToSingleEvent(counters) + assert.NotNil(t, event) + ok, err := event.MetricSetFields.HasKey("%_processor_time") + assert.NoError(t, err) + assert.True(t, ok) + ok, err = event.MetricSetFields.HasKey("%_processor_time:count") + assert.NoError(t, err) + assert.True(t, ok) + val, err := event.MetricSetFields.GetValue("%_processor_time") + assert.NoError(t, err) + assert.Equal(t, val, float64(44)) + val, err = event.MetricSetFields.GetValue("%_processor_time:count") + assert.NoError(t, err) + assert.Equal(t, val, common.MapStr{"processor_count": float64(2)}) + ok, err = event.MetricSetFields.HasKey("%_user_time") + assert.NoError(t, err) + assert.True(t, ok) + ok, err = event.MetricSetFields.HasKey("%_user_time:count") + assert.NoError(t, err) + assert.True(t, ok) + val, err = event.MetricSetFields.GetValue("%_user_time") + assert.NoError(t, err) + assert.Equal(t, val, float64(21)) + val, err = event.MetricSetFields.GetValue("%_user_time:count") + assert.NoError(t, err) + assert.Equal(t, val, common.MapStr{"processor_count": float64(2)}) +} + +func TestMatchesParentProcess(t *testing.T) { + ok, val := matchesParentProcess("svchost") + assert.False(t, ok) + assert.Equal(t, val, "svchost") + ok, val = matchesParentProcess("svchost#54") + assert.True(t, ok) + assert.Equal(t, val, "svchost") +} diff --git a/metricbeat/module/windows/perfmon/perfmon.go b/metricbeat/module/windows/perfmon/perfmon.go index 76f8833b3f0..7f4712a5f3b 100644 --- a/metricbeat/module/windows/perfmon/perfmon.go +++ b/metricbeat/module/windows/perfmon/perfmon.go @@ -20,34 +20,14 @@ package perfmon import ( - "strings" - "github.com/elastic/beats/v7/metricbeat/mb/parse" "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/metricbeat/mb" ) -// CounterConfig for perfmon counters. -type CounterConfig struct { - InstanceLabel string `config:"instance_label"` - InstanceName string `config:"instance_name"` - MeasurementLabel string `config:"measurement_label" validate:"required"` - Query string `config:"query" validate:"required"` - Format string `config:"format"` -} - -// Config for the windows perfmon metricset. -type Config struct { - IgnoreNECounters bool `config:"perfmon.ignore_non_existent_counters"` - GroupMeasurements bool `config:"perfmon.group_measurements_by_instance"` - CounterConfig []CounterConfig `config:"perfmon.counters" validate:"required"` - GroupAllCountersTo string `config:"perfmon.group_all_counter"` -} - const metricsetName = "perfmon" func init() { @@ -62,25 +42,10 @@ type MetricSet struct { // New create a new instance of the MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Beta("The perfmon metricset is beta") - var config Config if err := base.Module().UnpackConfig(&config); err != nil { return nil, err } - for _, value := range config.CounterConfig { - form := strings.ToLower(value.Format) - switch form { - case "", "float": - value.Format = "float" - case "long", "large": - default: - return nil, errors.Errorf("initialization failed: format '%s' "+ - "for counter '%s' is invalid (must be float, large or long)", - value.Format, value.InstanceLabel) - } - - } reader, err := NewReader(config) if err != nil { return nil, errors.Wrap(err, "initialization of reader failed") diff --git a/metricbeat/module/windows/perfmon/perfmon_test.go b/metricbeat/module/windows/perfmon/perfmon_test.go index cd882131b54..069138a4226 100644 --- a/metricbeat/module/windows/perfmon/perfmon_test.go +++ b/metricbeat/module/windows/perfmon/perfmon_test.go @@ -27,8 +27,6 @@ import ( "github.com/elastic/beats/v7/metricbeat/helper/windows/pdh" - "github.com/elastic/beats/v7/libbeat/common" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" @@ -39,6 +37,37 @@ import ( const processorTimeCounter = `\Processor Information(_Total)\% Processor Time` func TestData(t *testing.T) { + config := map[string]interface{}{ + "module": "windows", + "metricsets": []string{"perfmon"}, + "perfmon.queries": []map[string]interface{}{ + { + "object": "Processor Information", + "instance": []string{"_Total"}, + "counters": []map[string]interface{}{ + { + "name": "% Processor Time", + "field": "processor.time.total.pct", + }, + { + "name": "% User Time", + }, + }, + }, + }, + } + + ms := mbtest.NewReportingMetricSetV2Error(t, config) + mbtest.ReportingFetchV2Error(ms) + time.Sleep(60 * time.Millisecond) + + if err := mbtest.WriteEventsReporterV2Error(ms, t, "/"); err != nil { + t.Fatal("write", err) + } + +} + +func TestDataDeprecated(t *testing.T) { config := map[string]interface{}{ "module": "windows", "metricsets": []string{"perfmon"}, @@ -81,12 +110,14 @@ func TestCounterWithNoInstanceName(t *testing.T) { config := map[string]interface{}{ "module": "windows", "metricsets": []string{"perfmon"}, - "perfmon.counters": []map[string]string{ + "perfmon.queries": []map[string]interface{}{ { - "instance_label": "processor.name", - "measurement_label": "processor.time.total.pct", - "query": `\UDPv4\Datagrams Sent/sec`, - //"query": `\UDPv4\Verzonden datagrammen per seconde`, + "object": "UDPv4", + "counters": []map[string]interface{}{ + { + "name": "Datagrams Sent/sec", + }, + }, }, }, } @@ -102,9 +133,10 @@ func TestCounterWithNoInstanceName(t *testing.T) { if len(events) == 0 { t.Fatal("no events received") } - process := events[0].MetricSetFields["processor"].(common.MapStr) + val, err := events[0].MetricSetFields.GetValue("object") + assert.NoError(t, err) // Check values - assert.EqualValues(t, "UDPv4", process["name"]) + assert.EqualValues(t, "UDPv4", val) } @@ -115,12 +147,11 @@ func TestQuery(t *testing.T) { t.Fatal(err) } defer q.Close() - counter := CounterConfig{Format: "float", InstanceName: "TestInstanceName"} path, err := q.GetCounterPaths(processorTimeCounter) if err != nil { t.Fatal(err) } - err = q.AddCounter(path[0], counter.InstanceName, counter.Format, false) + err = q.AddCounter(path[0], "TestInstanceName", "float", false) if err != nil { t.Fatal(err) } @@ -150,12 +181,36 @@ func TestQuery(t *testing.T) { func TestExistingCounter(t *testing.T) { config := Config{ - CounterConfig: make([]CounterConfig, 1), + Queries: make([]Query, 1), + } + config.Queries[0].Name = "Processor Information" + config.Queries[0].Instance = []string{"_Total"} + config.Queries[0].Counters = []QueryCounter{ + { + Name: "% Processor Time", + }, } - config.CounterConfig[0].InstanceLabel = "processor.name" - config.CounterConfig[0].MeasurementLabel = "processor.time.total.pct" - config.CounterConfig[0].Query = processorTimeCounter - config.CounterConfig[0].Format = "float" + handle, err := NewReader(config) + if err != nil { + t.Fatal(err) + } + defer handle.query.Close() + + values, err := handle.Read() + if err != nil { + t.Fatal(err) + } + t.Log(values) +} + +func TestExistingCounterDeprecated(t *testing.T) { + config := Config{ + Counters: make([]Counter, 1), + } + config.Counters[0].InstanceLabel = "processor.name" + config.Counters[0].MeasurementLabel = "processor.time.total.pct" + config.Counters[0].Query = processorTimeCounter + config.Counters[0].Format = "float" handle, err := NewReader(config) if err != nil { t.Fatal(err) @@ -172,12 +227,34 @@ func TestExistingCounter(t *testing.T) { func TestNonExistingCounter(t *testing.T) { config := Config{ - CounterConfig: make([]CounterConfig, 1), + Queries: make([]Query, 1), + } + config.Queries[0].Name = "Processor Information" + config.Queries[0].Instance = []string{"_Total"} + config.Queries[0].Counters = []QueryCounter{ + { + Name: "% Processor Time time", + }, + } + handle, err := NewReader(config) + if assert.Error(t, err) { + assert.EqualValues(t, pdh.PDH_CSTATUS_NO_COUNTER, errors.Cause(err)) + } + + if handle != nil { + err = handle.query.Close() + assert.NoError(t, err) + } +} + +func TestNonExistingCounterDeprecated(t *testing.T) { + config := Config{ + Counters: make([]Counter, 1), } - config.CounterConfig[0].InstanceLabel = "processor.name" - config.CounterConfig[0].MeasurementLabel = "processor.time.total.pct" - config.CounterConfig[0].Query = "\\Processor Information(_Total)\\not existing counter" - config.CounterConfig[0].Format = "float" + config.Counters[0].InstanceLabel = "processor.name" + config.Counters[0].MeasurementLabel = "processor.time.total.pct" + config.Counters[0].Query = "\\Processor Information(_Total)\\not existing counter" + config.Counters[0].Format = "float" handle, err := NewReader(config) if assert.Error(t, err) { assert.EqualValues(t, pdh.PDH_CSTATUS_NO_COUNTER, errors.Cause(err)) @@ -191,13 +268,41 @@ func TestNonExistingCounter(t *testing.T) { func TestIgnoreNonExistentCounter(t *testing.T) { config := Config{ - CounterConfig: make([]CounterConfig, 1), + Queries: make([]Query, 1), + IgnoreNECounters: true, + } + config.Queries[0].Name = "Processor Information" + config.Queries[0].Instance = []string{"_Total"} + config.Queries[0].Counters = []QueryCounter{ + { + Name: "% Processor Time time", + }, + } + handle, err := NewReader(config) + + values, err := handle.Read() + + if assert.Error(t, err) { + assert.EqualValues(t, pdh.PDH_NO_DATA, errors.Cause(err)) + } + + if handle != nil { + err = handle.query.Close() + assert.NoError(t, err) + } + + t.Log(values) +} + +func TestIgnoreNonExistentCounterDeprecated(t *testing.T) { + config := Config{ + Counters: make([]Counter, 1), IgnoreNECounters: true, } - config.CounterConfig[0].InstanceLabel = "processor.name" - config.CounterConfig[0].MeasurementLabel = "processor.time.total.pct" - config.CounterConfig[0].Query = "\\Processor Information(_Total)\\not existing counter" - config.CounterConfig[0].Format = "float" + config.Counters[0].InstanceLabel = "processor.name" + config.Counters[0].MeasurementLabel = "processor.time.total.pct" + config.Counters[0].Query = "\\Processor Information(_Total)\\not existing counter" + config.Counters[0].Format = "float" handle, err := NewReader(config) values, err := handle.Read() @@ -216,12 +321,34 @@ func TestIgnoreNonExistentCounter(t *testing.T) { func TestNonExistingObject(t *testing.T) { config := Config{ - CounterConfig: make([]CounterConfig, 1), + Queries: make([]Query, 1), + } + config.Queries[0].Name = "Processor MisInformation" + config.Queries[0].Instance = []string{"_Total"} + config.Queries[0].Counters = []QueryCounter{ + { + Name: "% Processor Time", + }, + } + handle, err := NewReader(config) + if assert.Error(t, err) { + assert.EqualValues(t, pdh.PDH_CSTATUS_NO_OBJECT, errors.Cause(err)) + } + + if handle != nil { + err = handle.query.Close() + assert.NoError(t, err) + } +} + +func TestNonExistingObjectDeprecated(t *testing.T) { + config := Config{ + Counters: make([]Counter, 1), } - config.CounterConfig[0].InstanceLabel = "processor.name" - config.CounterConfig[0].MeasurementLabel = "processor.time.total.pct" - config.CounterConfig[0].Query = "\\non existing object\\% Processor Performance" - config.CounterConfig[0].Format = "float" + config.Counters[0].InstanceLabel = "processor.name" + config.Counters[0].MeasurementLabel = "processor.time.total.pct" + config.Counters[0].Query = "\\non existing object\\% Processor Performance" + config.Counters[0].Format = "float" handle, err := NewReader(config) if assert.Error(t, err) { assert.EqualValues(t, pdh.PDH_CSTATUS_NO_OBJECT, errors.Cause(err)) @@ -240,13 +367,12 @@ func TestLongOutputFormat(t *testing.T) { t.Fatal(err) } defer query.Close() - counter := CounterConfig{Format: "long"} path, err := query.GetCounterPaths(processorTimeCounter) if err != nil { t.Fatal(err) } assert.NotZero(t, len(path)) - err = query.AddCounter(path[0], counter.InstanceName, counter.Format, false) + err = query.AddCounter(path[0], "", "long", false) if err != nil && err != pdh.PDH_NO_MORE_DATA { t.Fatal(err) } @@ -280,13 +406,12 @@ func TestFloatOutputFormat(t *testing.T) { t.Fatal(err) } defer query.Close() - counter := CounterConfig{Format: "float"} path, err := query.GetCounterPaths(processorTimeCounter) if err != nil { t.Fatal(err) } assert.NotZero(t, len(path)) - err = query.AddCounter(path[0], counter.InstanceName, counter.Format, false) + err = query.AddCounter(path[0], "", "float", false) if err != nil && err != pdh.PDH_NO_MORE_DATA { t.Fatal(err) } @@ -315,13 +440,15 @@ func TestFloatOutputFormat(t *testing.T) { func TestWildcardQuery(t *testing.T) { config := Config{ - CounterConfig: make([]CounterConfig, 1), + Queries: make([]Query, 1), + } + config.Queries[0].Name = "Processor Information" + config.Queries[0].Instance = []string{"*"} + config.Queries[0].Counters = []QueryCounter{ + { + Name: "% Processor Time", + }, } - config.CounterConfig[0].InstanceLabel = "processor.name" - config.CounterConfig[0].InstanceName = "TestInstanceName" - config.CounterConfig[0].MeasurementLabel = "processor.time.pct" - config.CounterConfig[0].Query = `\Processor Information(*)\% Processor Time` - config.CounterConfig[0].Format = "float" handle, err := NewReader(config) if err != nil { t.Fatal(err) @@ -337,28 +464,26 @@ func TestWildcardQuery(t *testing.T) { t.Fatal(err) } assert.NotZero(t, len(values)) - pctKey, err := values[0].MetricSetFields.HasKey("processor.time.pct") + pctKey, err := values[0].MetricSetFields.HasKey("metrics.%_processor_time") if err != nil { t.Fatal(err) } assert.True(t, pctKey) - - pct, err := values[0].MetricSetFields.GetValue("processor.name") - if err != nil { - t.Fatal(err) - } - assert.NotEqual(t, "TestInstanceName", pct) - t.Log(values) } func TestWildcardQueryNoInstanceName(t *testing.T) { config := Config{ - CounterConfig: make([]CounterConfig, 1), + Queries: make([]Query, 1), + } + config.Queries[0].Name = "Process" + config.Queries[0].Instance = []string{"*"} + config.Queries[0].Counters = []QueryCounter{ + { + Name: "Private Bytes", + }, } - config.CounterConfig[0].InstanceLabel = "process_private_bytes" - config.CounterConfig[0].MeasurementLabel = "process.private.bytes" - config.CounterConfig[0].Query = `\Process(*)\Private Bytes` + handle, err := NewReader(config) if err != nil { t.Fatal(err) @@ -374,18 +499,18 @@ func TestWildcardQueryNoInstanceName(t *testing.T) { t.Fatal(err) } assert.NotZero(t, len(values)) - pctKey, err := values[0].MetricSetFields.HasKey("process.private.bytes") + pctKey, err := values[0].MetricSetFields.HasKey("metrics.private_bytes") if err != nil { t.Fatal(err) } assert.True(t, pctKey) for _, s := range values { - pct, err := s.MetricSetFields.GetValue("process_private_bytes") + instance, err := s.MetricSetFields.GetValue("instance") if err != nil { t.Fatal(err) } - assert.False(t, strings.Contains(pct.(string), "*")) + assert.False(t, strings.Contains(instance.(string), "*")) } t.Log(values) @@ -393,23 +518,80 @@ func TestWildcardQueryNoInstanceName(t *testing.T) { func TestGroupByInstance(t *testing.T) { config := Config{ - CounterConfig: make([]CounterConfig, 3), + Queries: make([]Query, 1), + GroupMeasurements: true, + } + config.Queries[0].Name = "Processor Information" + config.Queries[0].Instance = []string{"_Total"} + config.Queries[0].Counters = []QueryCounter{ + { + Name: "% Processor Time", + }, + { + Name: "% User Time", + }, + { + Name: "% Privileged Time", + }, + } + handle, err := NewReader(config) + if err != nil { + t.Fatal(err) + } + defer handle.query.Close() + + values, _ := handle.Read() + + time.Sleep(time.Millisecond * 1000) + + values, err = handle.Read() + if err != nil { + t.Fatal(err) + } + + assert.EqualValues(t, 1, len(values)) // Assert all metrics have been grouped into a single event + + // Test all keys exist in the event + pctKey, err := values[0].MetricSetFields.HasKey("metrics.%_processor_time") + if err != nil { + t.Fatal(err) + } + assert.True(t, pctKey) + + pctKey, err = values[0].MetricSetFields.HasKey("metrics.%_user_time") + if err != nil { + t.Fatal(err) + } + assert.True(t, pctKey) + + pctKey, err = values[0].MetricSetFields.HasKey("metrics.%_privileged_time") + if err != nil { + t.Fatal(err) + } + assert.True(t, pctKey) + + t.Log(values) +} + +func TestGroupByInstanceDeprecated(t *testing.T) { + config := Config{ + Counters: make([]Counter, 3), GroupMeasurements: true, } - config.CounterConfig[0].InstanceLabel = "processor.name" - config.CounterConfig[0].MeasurementLabel = "processor.time.pct" - config.CounterConfig[0].Query = `\Processor Information(_Total)\% Processor Time` - config.CounterConfig[0].Format = "float" + config.Counters[0].InstanceLabel = "processor.name" + config.Counters[0].MeasurementLabel = "processor.time.pct" + config.Counters[0].Query = `\Processor Information(_Total)\% Processor Time` + config.Counters[0].Format = "float" - config.CounterConfig[1].InstanceLabel = "processor.name" - config.CounterConfig[1].MeasurementLabel = "processor.time.user.pct" - config.CounterConfig[1].Query = `\Processor Information(_Total)\% User Time` - config.CounterConfig[1].Format = "float" + config.Counters[1].InstanceLabel = "processor.name" + config.Counters[1].MeasurementLabel = "processor.time.user.pct" + config.Counters[1].Query = `\Processor Information(_Total)\% User Time` + config.Counters[1].Format = "float" - config.CounterConfig[2].InstanceLabel = "processor.name" - config.CounterConfig[2].MeasurementLabel = "processor.time.privileged.ns" - config.CounterConfig[2].Query = `\Processor Information(_Total)\% Privileged Time` - config.CounterConfig[2].Format = "float" + config.Counters[2].InstanceLabel = "processor.name" + config.Counters[2].MeasurementLabel = "processor.time.privileged.ns" + config.Counters[2].Query = `\Processor Information(_Total)\% Privileged Time` + config.Counters[2].Format = "float" handle, err := NewReader(config) if err != nil { diff --git a/metricbeat/module/windows/perfmon/reader.go b/metricbeat/module/windows/perfmon/reader.go index a7ecd666362..c65c4a8118a 100644 --- a/metricbeat/module/windows/perfmon/reader.go +++ b/metricbeat/module/windows/perfmon/reader.go @@ -22,30 +22,42 @@ package perfmon import ( "fmt" "regexp" - "strconv" "strings" + "unicode" "github.com/elastic/beats/v7/metricbeat/helper/windows/pdh" "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/metricbeat/mb" ) -var processRegexp = regexp.MustCompile(`(.+?)#[1-9]+`) - -const instanceCountLabel = ":count" +const ( + instanceCountLabel = ":count" + defaultInstanceField = "instance" + defaultObjectField = "object" + replaceUpperCaseRegex = `(?:[^A-Z_\W])([A-Z])[^A-Z]` +) // Reader will contain the config options type Reader struct { - query pdh.Query // PDH Query - instanceLabel map[string]string // Mapping of counter path to key used for the label (e.g. processor.name) - measurement map[string]string // Mapping of counter path to key used for the value (e.g. processor.cpu_time). - executed bool // Indicates if the query has been executed. - log *logp.Logger // - config Config // Metricset configuration + query pdh.Query // PDH Query + executed bool // Indicates if the query has been executed. + log *logp.Logger // + config Config // Metricset configuration + counters []PerfCounter +} + +type PerfCounter struct { + InstanceField string + InstanceName string + QueryField string + QueryName string + Format string + ObjectName string + ObjectField string + ChildQueries []string } // NewReader creates a new instance of Reader. @@ -55,44 +67,43 @@ func NewReader(config Config) (*Reader, error) { return nil, err } r := &Reader{ - query: query, - instanceLabel: map[string]string{}, - measurement: map[string]string{}, - log: logp.NewLogger("perfmon"), - config: config, + query: query, + log: logp.NewLogger("perfmon"), + config: config, } - for _, counter := range config.CounterConfig { - childQueries, err := query.GetCounterPaths(counter.Query) + r.mapCounters(config) + for i, counter := range r.counters { + r.counters[i].ChildQueries = []string{} + childQueries, err := query.GetCounterPaths(counter.QueryName) if err != nil { if config.IgnoreNECounters { switch err { case pdh.PDH_CSTATUS_NO_COUNTER, pdh.PDH_CSTATUS_NO_COUNTERNAME, pdh.PDH_CSTATUS_NO_INSTANCE, pdh.PDH_CSTATUS_NO_OBJECT: r.log.Infow("Ignoring non existent counter", "error", err, - logp.Namespace("perfmon"), "query", counter.Query) + logp.Namespace("perfmon"), "query", counter.QueryName) continue } } else { query.Close() - return nil, errors.Wrapf(err, `failed to expand counter (query="%v")`, counter.Query) + return nil, errors.Wrapf(err, `failed to expand counter (query="%v")`, counter.QueryName) } } // check if the pdhexpandcounterpath/pdhexpandwildcardpath functions have expanded the counter successfully. if len(childQueries) == 0 || (len(childQueries) == 1 && strings.Contains(childQueries[0], "*")) { // covering cases when PdhExpandWildCardPathW returns no counter paths or is unable to expand and the ignore_non_existent_counters flag is set if config.IgnoreNECounters { - r.log.Infow("Ignoring non existent counter", "initial query", counter.Query, + r.log.Infow("Ignoring non existent counter", "initial query", counter.QueryName, logp.Namespace("perfmon"), "expanded query", childQueries) continue } - return nil, errors.Errorf(`failed to expand counter (query="%v")`, counter.Query) + return nil, errors.Errorf(`failed to expand counter (query="%v"), no error returned`, counter.QueryName) } for _, v := range childQueries { if err := query.AddCounter(v, counter.InstanceName, counter.Format, len(childQueries) > 1); err != nil { - return nil, errors.Wrapf(err, `failed to add counter (query="%v")`, counter.Query) + return nil, errors.Wrapf(err, `failed to add counter (query="%v")`, counter.QueryName) } - r.instanceLabel[v] = counter.InstanceLabel - r.measurement[v] = counter.MeasurementLabel + r.counters[i].ChildQueries = append(r.counters[i].ChildQueries, v) } } return r, nil @@ -101,19 +112,20 @@ func NewReader(config Config) (*Reader, error) { // RefreshCounterPaths will recheck for any new instances and add them to the counter list func (re *Reader) RefreshCounterPaths() error { var newCounters []string - for _, counter := range re.config.CounterConfig { - childQueries, err := re.query.GetCounterPaths(counter.Query) + for i, counter := range re.counters { + re.counters[i].ChildQueries = []string{} + childQueries, err := re.query.GetCounterPaths(counter.QueryName) if err != nil { if re.config.IgnoreNECounters { switch err { case pdh.PDH_CSTATUS_NO_COUNTER, pdh.PDH_CSTATUS_NO_COUNTERNAME, pdh.PDH_CSTATUS_NO_INSTANCE, pdh.PDH_CSTATUS_NO_OBJECT: re.log.Infow("Ignoring non existent counter", "error", err, - logp.Namespace("perfmon"), "query", counter.Query) + logp.Namespace("perfmon"), "query", counter.QueryName) continue } } else { - return errors.Wrapf(err, `failed to expand counter (query="%v")`, counter.Query) + return errors.Wrapf(err, `failed to expand counter (query="%v")`, counter.QueryName) } } newCounters = append(newCounters, childQueries...) @@ -121,10 +133,9 @@ func (re *Reader) RefreshCounterPaths() error { if err == nil && len(childQueries) >= 1 && !strings.Contains(childQueries[0], "*") { for _, v := range childQueries { if err := re.query.AddCounter(v, counter.InstanceName, counter.Format, len(childQueries) > 1); err != nil { - return errors.Wrapf(err, "failed to add counter (query='%v')", counter.Query) + return errors.Wrapf(err, "failed to add counter (query='%v')", counter.QueryName) } - re.instanceLabel[v] = counter.InstanceLabel - re.measurement[v] = counter.MeasurementLabel + re.counters[i].ChildQueries = append(re.counters[i].ChildQueries, v) } } } @@ -152,7 +163,7 @@ func (re *Reader) Read() ([]mb.Event, error) { var events []mb.Event // GroupAllCountersTo config option where counters for all instances are aggregated and instance count is added in the event under the string value provided by this option. if re.config.GroupAllCountersTo != "" { - event := re.groupToEvent(values) + event := re.groupToSingleEvent(values) events = append(events, event) } else { events = re.groupToEvents(values) @@ -161,114 +172,134 @@ func (re *Reader) Read() ([]mb.Event, error) { return events, nil } -func (re *Reader) groupToEvents(counters map[string][]pdh.CounterValue) []mb.Event { - eventMap := make(map[string]*mb.Event) - - for counterPath, values := range counters { - for ind, val := range values { - // Some counters, such as rate counters, require two counter values in order to compute a displayable value. In this case we must call PdhCollectQueryData twice before calling PdhGetFormattedCounterValue. - // For more information, see Collecting Performance Data (https://docs.microsoft.com/en-us/windows/desktop/PerfCtrs/collecting-performance-data). - if val.Err != nil && !re.executed { - re.log.Debugw("Ignoring the first measurement because the data isn't ready", - "error", val.Err, logp.Namespace("perfmon"), "query", counterPath) - continue - } +// Close will close the PDH query for now. +func (re *Reader) Close() error { + return re.query.Close() +} - var eventKey string - if re.config.GroupMeasurements && val.Err == nil { - // Send measurements with the same instance label as part of the same event - eventKey = val.Instance - } else { - // Send every measurement as an individual event - // If a counter contains an error, it will always be sent as an individual event - eventKey = counterPath + strconv.Itoa(ind) +func (re *Reader) getCounter(query string) (bool, PerfCounter) { + for _, counter := range re.counters { + for _, childQuery := range counter.ChildQueries { + if childQuery == query { + return true, counter } + } + } + return false, PerfCounter{} +} - // Create a new event if the key doesn't exist in the map - if _, ok := eventMap[eventKey]; !ok { - eventMap[eventKey] = &mb.Event{ - MetricSetFields: common.MapStr{}, - Error: errors.Wrapf(val.Err, "failed on query=%v", counterPath), - } - if val.Instance != "" && re.instanceLabel[counterPath] != "" { - //will ignore instance counter - if ok, match := matchesParentProcess(val.Instance); ok { - eventMap[eventKey].MetricSetFields.Put(re.instanceLabel[counterPath], match) - } else { - eventMap[eventKey].MetricSetFields.Put(re.instanceLabel[counterPath], val.Instance) +func (re *Reader) mapCounters(config Config) { + re.counters = []PerfCounter{} + if len(config.Counters) > 0 { + for _, counter := range config.Counters { + re.counters = append(re.counters, PerfCounter{ + InstanceField: counter.InstanceLabel, + InstanceName: counter.InstanceName, + QueryField: counter.MeasurementLabel, + QueryName: counter.Query, + Format: counter.Format, + ChildQueries: nil, + }) + } + } + if len(config.Queries) > 0 { + for _, query := range config.Queries { + for _, counter := range query.Counters { + // counter paths can also not contain any instances + if len(query.Instance) == 0 { + re.counters = append(re.counters, PerfCounter{ + InstanceField: defaultInstanceField, + InstanceName: "", + QueryField: mapCounterPathLabel(query.Namespace, counter.Field, counter.Name), + QueryName: mapQuery(query.Name, "", counter.Name), + Format: counter.Format, + ObjectName: query.Name, + ObjectField: mapObjectName(query.Field), + }) + } else { + for _, instance := range query.Instance { + re.counters = append(re.counters, PerfCounter{ + InstanceField: defaultInstanceField, + InstanceName: instance, + QueryField: mapCounterPathLabel(query.Namespace, counter.Field, counter.Name), + QueryName: mapQuery(query.Name, instance, counter.Name), + Format: counter.Format, + ObjectName: query.Name, + ObjectField: mapObjectName(query.Field), + }) } } } - event := eventMap[eventKey] - if val.Measurement != nil { - event.MetricSetFields.Put(re.measurement[counterPath], val.Measurement) - } else { - event.MetricSetFields.Put(re.measurement[counterPath], 0) - } } } - // Write the values into the map. - events := make([]mb.Event, 0, len(eventMap)) - for _, val := range eventMap { - events = append(events, *val) - } - return events } -func (re *Reader) groupToEvent(counters map[string][]pdh.CounterValue) mb.Event { - event := mb.Event{ - MetricSetFields: common.MapStr{}, +func mapObjectName(objectField string) string { + if objectField != "" { + return objectField } - measurements := make(map[string]float64, 0) - for counterPath, values := range counters { - for _, val := range values { - // Some counters, such as rate counters, require two counter values in order to compute a displayable value. In this case we must call PdhCollectQueryData twice before calling PdhGetFormattedCounterValue. - // For more information, see Collecting Performance Data (https://docs.microsoft.com/en-us/windows/desktop/PerfCtrs/collecting-performance-data). - if val.Err != nil && !re.executed { - re.log.Debugw("Ignoring the first measurement because the data isn't ready", - "error", val.Err, logp.Namespace("perfmon"), "query", counterPath) - continue - } - var counterVal float64 - switch val.Measurement.(type) { - case int64: - counterVal = float64(val.Measurement.(int64)) - default: - counterVal = val.Measurement.(float64) - } - if _, ok := measurements[re.measurement[counterPath]]; !ok { - measurements[re.measurement[counterPath]] = counterVal - measurements[re.measurement[counterPath]+instanceCountLabel] = 1 - } else { - measurements[re.measurement[counterPath]+instanceCountLabel] = measurements[re.measurement[counterPath]+instanceCountLabel] + 1 - measurements[re.measurement[counterPath]] = measurements[re.measurement[counterPath]] + counterVal - } - } + return defaultObjectField +} + +func mapQuery(obj string, instance string, path string) string { + var query string + // trim object + obj = strings.TrimPrefix(obj, "\\") + obj = strings.TrimSuffix(obj, "\\") + query = fmt.Sprintf("\\%s", obj) + + if instance != "" { + // trim instance + instance = strings.TrimPrefix(instance, "(") + instance = strings.TrimSuffix(instance, ")") + query += fmt.Sprintf("(%s)", instance) } - for key, val := range measurements { - if strings.Contains(key, instanceCountLabel) { - if val == 1 { - continue - } else { - event.MetricSetFields.Put(fmt.Sprintf("%s.%s", strings.Split(key, ".")[0], re.config.GroupAllCountersTo), val) - } - } else { - event.MetricSetFields.Put(key, val) - } + + if strings.HasPrefix(path, "\\") { + query += path + } else { + query += fmt.Sprintf("\\%s", path) } - return event + return query } -// Close will close the PDH query for now. -func (re *Reader) Close() error { - return re.query.Close() -} +func mapCounterPathLabel(namespace string, label string, path string) string { + if label == "" { + label = path + } + // replace spaces with underscores + // replace backslashes with "per" + // replace actual percentage symbol with the symbol "pct" + r := strings.NewReplacer(" ", "_", "/sec", "_per_sec", "/_sec", "_per_sec", "\\", "_", "_%_", "_pct_", ":", "_", "_-_", "_") + label = r.Replace(label) + // replace uppercases with underscores + label = replaceUpperCase(label) -// matchParentProcess will try to get the parent process name -func matchesParentProcess(instanceName string) (bool, string) { - matches := processRegexp.FindStringSubmatch(instanceName) - if len(matches) == 2 { - return true, matches[1] + // avoid cases as this "logicaldisk_avg._disk_sec_per_transfer" + obj := strings.Split(label, ".") + for index := range obj { + // in some cases a trailing "_" is found + obj[index] = strings.TrimPrefix(obj[index], "_") + obj[index] = strings.TrimSuffix(obj[index], "_") } - return false, instanceName + label = strings.ToLower(strings.Join(obj, "_")) + label = strings.Replace(label, "__", "_", -1) + return namespace + "." + label +} + +// replaceUpperCase func will replace upper case with '_' +func replaceUpperCase(src string) string { + replaceUpperCaseRegexp := regexp.MustCompile(replaceUpperCaseRegex) + return replaceUpperCaseRegexp.ReplaceAllStringFunc(src, func(str string) string { + var newStr string + for _, r := range str { + // split into fields based on class of unicode character + if unicode.IsUpper(r) { + newStr += "_" + strings.ToLower(string(r)) + } else { + newStr += string(r) + } + } + return newStr + }) } diff --git a/metricbeat/module/windows/perfmon/reader_integration_test.go b/metricbeat/module/windows/perfmon/reader_integration_test.go new file mode 100644 index 00000000000..fd19b1e5c09 --- /dev/null +++ b/metricbeat/module/windows/perfmon/reader_integration_test.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build integration +// +build windows + +package perfmon + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var validQuery = `\Processor Information(_Total)\% Processor Time` + +// TestNewReaderWhenQueryPathNotProvided will check for invalid/no query. +func TestNewReaderWhenQueryPathNotProvided(t *testing.T) { + counter := Counter{Format: "float", InstanceName: "TestInstanceName"} + config := Config{ + IgnoreNECounters: false, + GroupMeasurements: false, + Counters: []Counter{counter}, + } + reader, err := NewReader(config) + assert.NotNil(t, err) + assert.Nil(t, reader) + assert.EqualValues(t, err.Error(), `failed to expand counter (query=""): no query path given`) +} + +// TestNewReaderWithValidQueryPath should successfully instantiate the reader. +func TestNewReaderWithValidQueryPath(t *testing.T) { + counter := Counter{Format: "float", InstanceName: "TestInstanceName", Query: validQuery} + config := Config{ + IgnoreNECounters: false, + GroupMeasurements: false, + Counters: []Counter{counter}, + } + reader, err := NewReader(config) + defer reader.Close() + assert.Nil(t, err) + assert.NotNil(t, reader) + assert.NotNil(t, reader.query) + assert.NotNil(t, reader.query.Handle) + assert.NotNil(t, reader.query.Counters) + assert.NotZero(t, len(reader.query.Counters)) + +} + +// TestReadSuccessfully will test the func read when it first retrieves no events (and ignored) and then starts retrieving events. +func TestReadSuccessfully(t *testing.T) { + counter := Counter{Format: "float", InstanceName: "TestInstanceName", Query: validQuery} + config := Config{ + IgnoreNECounters: false, + GroupMeasurements: false, + Counters: []Counter{counter}, + } + reader, err := NewReader(config) + if err != nil { + t.Fatal(err) + } + //Some counters, such as rate counters, require two counter values in order to compute a displayable value. In this case we call reader.Read() twice. + // For more information, see Collecting Performance Data (https://docs.microsoft.com/en-us/windows/desktop/PerfCtrs/collecting-performance-data). + events, err := reader.Read() + assert.Nil(t, err) + assert.NotNil(t, events) + assert.Zero(t, len(events)) + events, err = reader.Read() + assert.Nil(t, err) + assert.NotNil(t, events) + assert.NotZero(t, len(events)) +} diff --git a/metricbeat/module/windows/perfmon/reader_test.go b/metricbeat/module/windows/perfmon/reader_test.go index 7f925d21a62..697281b64e6 100644 --- a/metricbeat/module/windows/perfmon/reader_test.go +++ b/metricbeat/module/windows/perfmon/reader_test.go @@ -23,63 +23,138 @@ import ( "testing" "github.com/stretchr/testify/assert" -) -var validQuery = `\Processor Information(_Total)\% Processor Time` + "github.com/elastic/beats/v7/metricbeat/helper/windows/pdh" +) -// TestNewReaderWhenQueryPathNotProvided will check for invalid/no query. -func TestNewReaderWhenQueryPathNotProvided(t *testing.T) { - counter := CounterConfig{Format: "float", InstanceName: "TestInstanceName"} - config := Config{ - IgnoreNECounters: false, - GroupMeasurements: false, - CounterConfig: []CounterConfig{counter}, +func TestGetCounter(t *testing.T) { + reader := Reader{ + query: pdh.Query{}, + executed: true, + log: nil, + counters: []PerfCounter{ + { + QueryField: "datagrams_sent_per_sec", + QueryName: `\UDPv4\Datagrams Sent/sec`, + Format: "float", + ObjectName: "UDPv4", + ObjectField: "object", + ChildQueries: []string{`\UDPv4\Datagrams Sent/sec`}, + }, + }, } - reader, err := NewReader(config) - assert.NotNil(t, err) - assert.Nil(t, reader) - assert.EqualValues(t, err.Error(), `failed to expand counter (query=""): no query path given`) + ok, val := reader.getCounter(`\UDPv4\Datagrams Sent/sec`) + assert.True(t, ok) + assert.Equal(t, val.QueryField, "datagrams_sent_per_sec") + assert.Equal(t, val.ObjectName, "UDPv4") + } -// TestNewReaderWithValidQueryPath should successfully instantiate the reader. -func TestNewReaderWithValidQueryPath(t *testing.T) { - counter := CounterConfig{Format: "float", InstanceName: "TestInstanceName", Query: validQuery} +func TestMapCounters(t *testing.T) { config := Config{ IgnoreNECounters: false, GroupMeasurements: false, - CounterConfig: []CounterConfig{counter}, + Counters: []Counter{ + { + InstanceLabel: "physical_disk.name", + InstanceName: "total", + MeasurementLabel: "physical_disk.write.time.pct", + Query: `\PhysicalDisk(*)\% Disk Write Time`, + Format: "float", + }, + }, + Queries: []Query{ + { + Name: "Process", + Namespace: "metrics", + Instance: []string{"svchost*"}, + Counters: []QueryCounter{ + { + Name: "% Processor Time", + Format: "float", + }, + }, + }, + { + Name: "Process", + Field: "disk", + Namespace: "metrics", + Instance: []string{"conhost*"}, + Counters: []QueryCounter{ + { + Name: "IO Read Operations/sec", + Field: "read_ops", + Format: "double", + }, + }, + }, + }, + } + reader := Reader{} + reader.mapCounters(config) + assert.Equal(t, len(reader.counters), 3) + for _, readerCounter := range reader.counters { + if readerCounter.InstanceField == "physical_disk.name" { + assert.Equal(t, readerCounter.InstanceName, "total") + assert.Equal(t, readerCounter.ObjectName, "") + assert.Equal(t, readerCounter.ObjectField, "") + assert.Equal(t, readerCounter.QueryField, "physical_disk.write.time.pct") + assert.Equal(t, readerCounter.QueryName, `\PhysicalDisk(*)\% Disk Write Time`) + assert.Equal(t, len(readerCounter.ChildQueries), 0) + assert.Equal(t, readerCounter.Format, "float") + } else if readerCounter.InstanceName == "svchost*" { + assert.Equal(t, readerCounter.ObjectName, "Process") + assert.Equal(t, readerCounter.ObjectField, "object") + assert.Equal(t, readerCounter.QueryField, "metrics.%_processor_time") + assert.Equal(t, readerCounter.QueryName, `\Process(svchost*)\% Processor Time`) + assert.Equal(t, len(readerCounter.ChildQueries), 0) + assert.Equal(t, readerCounter.Format, "float") + } else { + assert.Equal(t, readerCounter.InstanceName, "conhost*") + assert.Equal(t, readerCounter.ObjectName, "Process") + assert.Equal(t, readerCounter.ObjectField, "disk") + assert.Equal(t, readerCounter.QueryField, "metrics.read_ops") + assert.Equal(t, readerCounter.QueryName, `\Process(conhost*)\IO Read Operations/sec`) + assert.Equal(t, len(readerCounter.ChildQueries), 0) + assert.Equal(t, readerCounter.Format, "double") + } } - reader, err := NewReader(config) - defer reader.Close() - assert.Nil(t, err) - assert.NotNil(t, reader) - assert.NotNil(t, reader.query) - assert.NotNil(t, reader.query.Handle) - assert.NotNil(t, reader.query.Counters) - assert.NotZero(t, len(reader.query.Counters)) +} + +func TestMapQuery(t *testing.T) { + //mapQuery(obj string, instance string, path string) string { + obj := "Process" + instance := "*" + path := "% Processor Time" + result := mapQuery(obj, instance, path) + assert.Equal(t, result, `\Process(*)\% Processor Time`) + obj = `\Process\` + instance = "(*" + result = mapQuery(obj, instance, path) + assert.Equal(t, result, `\Process(*)\% Processor Time`) } -// TestReadSuccessfully will test the func read when it first retrieves no events (and ignored) and then starts retrieving events. -func TestReadSuccessfully(t *testing.T) { - counter := CounterConfig{Format: "float", InstanceName: "TestInstanceName", Query: validQuery} - config := Config{ - IgnoreNECounters: false, - GroupMeasurements: false, - CounterConfig: []CounterConfig{counter}, - } - reader, err := NewReader(config) - if err != nil { - t.Fatal(err) - } - //Some counters, such as rate counters, require two counter values in order to compute a displayable value. In this case we call reader.Read() twice. - // For more information, see Collecting Performance Data (https://docs.microsoft.com/en-us/windows/desktop/PerfCtrs/collecting-performance-data). - events, err := reader.Read() - assert.Nil(t, err) - assert.NotNil(t, events) - assert.Zero(t, len(events)) - events, err = reader.Read() - assert.Nil(t, err) - assert.NotNil(t, events) - assert.NotZero(t, len(events)) +func TestMapCounterPathLabel(t *testing.T) { + result := mapCounterPathLabel("metrics", "", `WININET: Bytes from server`) + assert.Equal(t, result, "metrics.wininet_bytes_from_server") + result = mapCounterPathLabel("metrics", "", `RSC Coalesced Packet Bucket 5 (16To31)`) + assert.Equal(t, result, "metrics.rsc_coalesced_packet_bucket_5_(16_to31)") + result = mapCounterPathLabel("metrics", "", `Total Memory Usage --- Non-Paged Pool`) + assert.Equal(t, result, "metrics.total_memory_usage_---_non-paged_pool") + result = mapCounterPathLabel("metrics", "", `IPv6 NBLs/sec indicated with low-resource flag`) + assert.Equal(t, result, "metrics.ipv6_nbls_per_sec_indicated_with_low-resource_flag") + result = mapCounterPathLabel("metrics", "", `Queued Poison Messages Per Second`) + assert.Equal(t, result, "metrics.queued_poison_messages_per_second") + result = mapCounterPathLabel("metrics", "", `I/O Log Writes Average Latency`) + assert.Equal(t, result, "metrics.i/o_log_writes_average_latency") + result = mapCounterPathLabel("metrics", "io.logwrites.average latency", `I/O Log Writes Average Latency`) + assert.Equal(t, result, "metrics.io_logwrites_average_latency") + + result = mapCounterPathLabel("metrics", "this.is__exceptional-test:case/sec", `RSC Coalesced Packet Bucket 5 (16To31)`) + assert.Equal(t, result, "metrics.this_is_exceptional-test_case_per_sec") + + result = mapCounterPathLabel("metrics", "logicaldisk_avg._disk_sec_per_transfer", `RSC Coalesced Packet Bucket 5 (16To31)`) + assert.Equal(t, result, "metrics.logicaldisk_avg_disk_sec_per_transfer") + } diff --git a/metricbeat/module/windows/service/reader.go b/metricbeat/module/windows/service/reader.go new file mode 100644 index 00000000000..918d59cb428 --- /dev/null +++ b/metricbeat/module/windows/service/reader.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build windows + +package service + +import ( + "crypto/sha256" + "encoding/base64" + "strconv" + "syscall" + + "github.com/pkg/errors" + "golang.org/x/sys/windows/registry" + + "github.com/elastic/beats/v7/libbeat/common" +) + +var ( + // errorNames is mapping of errno values to names. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681383(v=vs.85).aspx + errorNames = map[uint32]string{ + 1077: "ERROR_SERVICE_NEVER_STARTED", + } + InvalidDatabaseHandle = ^Handle(0) +) + +type Handle uintptr + +type Reader struct { + handle Handle + state ServiceEnumState + guid string // Host's MachineGuid value (a unique ID for the host). + ids map[string]string // Cache of service IDs. + protectedServices map[string]struct{} +} + +func NewReader() (*Reader, error) { + handle, err := openSCManager("", "", ScManagerEnumerateService|ScManagerConnect) + if err != nil { + return nil, errors.Wrap(err, "initialization failed") + } + + guid, err := getMachineGUID() + if err != nil { + return nil, err + } + + r := &Reader{ + handle: handle, + state: ServiceStateAll, + guid: guid, + ids: map[string]string{}, + protectedServices: map[string]struct{}{}, + } + + return r, nil +} + +func (reader *Reader) Read() ([]common.MapStr, error) { + services, err := GetServiceStates(reader.handle, reader.state, reader.protectedServices) + if err != nil { + return nil, err + } + + result := make([]common.MapStr, 0, len(services)) + + for _, service := range services { + ev := common.MapStr{ + "id": reader.getServiceID(service.ServiceName), + "display_name": service.DisplayName, + "name": service.ServiceName, + "state": service.CurrentState, + "start_type": service.StartType.String(), + "start_name": service.ServiceStartName, + "path_name": service.BinaryPathName, + } + + if service.CurrentState == "Stopped" { + ev.Put("exit_code", getErrorCode(service.ExitCode)) + } + + if service.PID > 0 { + ev.Put("pid", service.PID) + } + + if service.Uptime > 0 { + if _, err = ev.Put("uptime.ms", service.Uptime); err != nil { + return nil, err + } + } + + result = append(result, ev) + } + + return result, nil +} + +func (reader *Reader) Close() error { + return closeHandle(reader.handle) +} + +func openSCManager(machineName string, databaseName string, desiredAccess ServiceSCMAccessRight) (Handle, error) { + var machineNamePtr *uint16 + if machineName != "" { + var err error + machineNamePtr, err = syscall.UTF16PtrFromString(machineName) + if err != nil { + return InvalidDatabaseHandle, err + } + } + + var databaseNamePtr *uint16 + if databaseName != "" { + var err error + databaseNamePtr, err = syscall.UTF16PtrFromString(databaseName) + if err != nil { + return InvalidDatabaseHandle, err + } + } + + handle, err := _OpenSCManager(machineNamePtr, databaseNamePtr, desiredAccess) + if err != nil { + return InvalidDatabaseHandle, ServiceErrno(err.(syscall.Errno)) + } + + return handle, nil +} + +// getMachineGUID returns the machine's GUID value which is unique to a Windows +// installation. +func getMachineGUID() (string, error) { + const key = registry.LOCAL_MACHINE + const path = `SOFTWARE\Microsoft\Cryptography` + const name = "MachineGuid" + + k, err := registry.OpenKey(key, path, registry.READ|registry.WOW64_64KEY) + if err != nil { + return "", errors.Wrapf(err, `failed to open HKLM\%v`, path) + } + + guid, _, err := k.GetStringValue(name) + if err != nil { + return "", errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + } + + return guid, nil +} + +// getServiceID returns a unique ID for the service that is derived from the +// machine's GUID and the service's name. +func (reader *Reader) getServiceID(name string) string { + // hash returns a base64 encoded sha256 hash that is truncated to 10 chars. + hash := func(v string) string { + sum := sha256.Sum256([]byte(v)) + base64Hash := base64.RawURLEncoding.EncodeToString(sum[:]) + return base64Hash[:10] + } + + id, found := reader.ids[name] + if !found { + id = hash(reader.guid + name) + reader.ids[name] = id + } + + return id +} + +func getErrorCode(errno uint32) string { + name, found := errorNames[errno] + if found { + return name + } + return strconv.Itoa(int(errno)) +} + +func closeHandle(handle Handle) error { + if err := _CloseServiceHandle(uintptr(handle)); err != nil { + return ServiceErrno(err.(syscall.Errno)) + } + return nil +} diff --git a/metricbeat/module/windows/service/reader_test.go b/metricbeat/module/windows/service/reader_test.go new file mode 100644 index 00000000000..0d2cb7d7e03 --- /dev/null +++ b/metricbeat/module/windows/service/reader_test.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build windows + +package service + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewReader(t *testing.T) { + reader, err := NewReader() + assert.NoError(t, err) + assert.NotNil(t, reader) + defer reader.Close() + assert.NotNil(t, reader.handle) +} + +func TestOpenSCManager(t *testing.T) { + handle, err := openSCManager("invalidMachine", "", ScManagerEnumerateService|ScManagerConnect) + assert.Error(t, err) + assert.Equal(t, handle, InvalidDatabaseHandle) + + handle, err = openSCManager("", "invalidDbName", ScManagerEnumerateService|ScManagerConnect) + assert.Error(t, err) + assert.Equal(t, handle, InvalidDatabaseHandle) + + handle, err = openSCManager("", "", ScManagerEnumerateService|ScManagerConnect) + assert.NoError(t, err) + assert.NotEqual(t, handle, InvalidDatabaseHandle) + closeHandle(handle) +} + +func TestGetMachineGUID(t *testing.T) { + guid, err := getMachineGUID() + assert.NoError(t, err) + assert.NotNil(t, guid) +} + +func TestRead(t *testing.T) { + reader, err := NewReader() + assert.NoError(t, err) + result, err := reader.Read() + assert.NoError(t, err) + assert.True(t, len(result) > 0) + reader.Close() +} diff --git a/metricbeat/module/windows/service/service.go b/metricbeat/module/windows/service/service.go index fce48ab6cff..4a0bf2b9e75 100644 --- a/metricbeat/module/windows/service/service.go +++ b/metricbeat/module/windows/service/service.go @@ -37,14 +37,14 @@ func init() { // multiple fetch calls. type MetricSet struct { mb.BaseMetricSet - reader *ServiceReader + reader *Reader } // New create a new instance of the MetricSet // Part of new is also setting up the configuration by processing additional // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - reader, err := NewServiceReader() + reader, err := NewReader() if err != nil { return nil, err } diff --git a/metricbeat/module/windows/service/service_integration_windows_test.go b/metricbeat/module/windows/service/service_integration_test.go similarity index 98% rename from metricbeat/module/windows/service/service_integration_windows_test.go rename to metricbeat/module/windows/service/service_integration_test.go index 51306fe5c37..fe1e987fc89 100644 --- a/metricbeat/module/windows/service/service_integration_windows_test.go +++ b/metricbeat/module/windows/service/service_integration_test.go @@ -15,7 +15,8 @@ // specific language governing permissions and limitations // under the License. -// +build integration windows +// +build integration +// +build windows package service @@ -53,7 +54,7 @@ func TestData(t *testing.T) { } func TestReadService(t *testing.T) { - reader, err := NewServiceReader() + reader, err := NewReader() if err != nil { t.Fatal(err) } diff --git a/metricbeat/module/windows/service/service_windows.go b/metricbeat/module/windows/service/service_status.go similarity index 56% rename from metricbeat/module/windows/service/service_windows.go rename to metricbeat/module/windows/service/service_status.go index 877be4e854f..8bef03126ad 100644 --- a/metricbeat/module/windows/service/service_windows.go +++ b/metricbeat/module/windows/service/service_status.go @@ -21,21 +21,18 @@ package service import ( "bytes" - "crypto/sha256" - "encoding/base64" "strconv" "syscall" "time" "unicode/utf16" "unsafe" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/pkg/errors" "golang.org/x/sys/windows" - "golang.org/x/sys/windows/registry" - "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/winlogbeat/sys" "github.com/elastic/gosigar" ) @@ -47,52 +44,18 @@ import ( //sys _QueryServiceConfig2(serviceHandle ServiceHandle, infoLevel ServiceConfigInformation, configBuffer *byte, bufSize uint32, bytesNeeded *uint32) (err error) [failretval==0] = advapi32.QueryServiceConfig2W //sys _CloseServiceHandle(handle uintptr) (err error) = advapi32.CloseServiceHandle -var ( - sizeofEnumServiceStatusProcess = (int)(unsafe.Sizeof(EnumServiceStatusProcess{})) -) - -type ServiceDatabaseHandle uintptr - -type ServiceHandle uintptr - -type ProcessHandle uintptr - -type ServiceConfigInformation uint32 - const ( - ServiceConfigDelayedAutoStartInfo ServiceConfigInformation = 3 - ServiceConfigDescription ServiceConfigInformation = 1 - ServiceConfigFailureActions ServiceConfigInformation = 2 - ServiceConfigFailureActionsFlag ServiceConfigInformation = 4 - ServiceConfigPreferredNode ServiceConfigInformation = 9 - ServiceConfigPreshutdownInfo ServiceConfigInformation = 7 - ServiceConfigRequiredPrivilegesInfo ServiceConfigInformation = 6 - ServiceConfigServiceSidInfo ServiceConfigInformation = 5 - ServiceConfigTriggerInfo ServiceConfigInformation = 8 - ServiceConfigLaunchProtected ServiceConfigInformation = 12 -) - -type serviceDelayedAutoStartInfo struct { - delayedAutoStart bool -} - -type serviceTriggerInfo struct { - cTriggers uint32 - pTriggers uintptr - pReserved uintptr -} - -var serviceStates = map[ServiceState]string{ - ServiceContinuePending: "Continuing", - ServicePausePending: "Pausing", - ServicePaused: "Paused", - ServiceRunning: "Running", - ServiceStartPending: "Starting", - ServiceStopPending: "Stopping", - ServiceStopped: "Stopped", -} + ConfigDelayedAutoStartInfo ConfigInformation = 3 + ConfigTriggerInfo ConfigInformation = 8 + ConfigLaunchProtected ConfigInformation = 12 + ConfigDescription ConfigInformation = 1 + ConfigFailureActions ConfigInformation = 2 + ConfigFailureActionsFlag ConfigInformation = 4 + ConfigPreferredNode ConfigInformation = 9 + ConfigPreshutdownInfo ConfigInformation = 7 + ConfigRequiredPrivilegesInfo ConfigInformation = 6 + ConfigServiceSidInfo ConfigInformation = 5 -const ( StartTypeBoot ServiceStartType = iota StartTypeSystem StartTypeAutomatic @@ -104,36 +67,33 @@ const ( StartTypeManualTriggered ) -var serviceStartTypes = map[ServiceStartType]string{ - StartTypeBoot: "Boot", - StartTypeSystem: "System", - StartTypeAutomatic: "Automatic", - StartTypeManual: "Manual", - StartTypeDisabled: "Disabled", - StartTypeAutomaticDelayed: "Automatic (Delayed)", - StartTypeAutomaticTriggered: "Automatic (Triggered)", - StartTypeAutomaticDelayedTriggered: "Automatic (Delayed, Triggered)", - StartTypeManualTriggered: "Manual (Triggered)", -} - -func (startType ServiceStartType) String() string { - return serviceStartTypes[startType] -} - -func (state ServiceState) String() string { - if val, ok := serviceStates[state]; ok { - return val +var ( + InvalidServiceHandle = ^Handle(0) + serviceStates = map[ServiceState]string{ + ServiceContinuePending: "Continuing", + ServicePausePending: "Pausing", + ServicePaused: "Paused", + ServiceRunning: "Running", + ServiceStartPending: "Starting", + ServiceStopPending: "Stopping", + ServiceStopped: "Stopped", + } + serviceStartTypes = map[ServiceStartType]string{ + StartTypeBoot: "Boot", + StartTypeSystem: "System", + StartTypeAutomatic: "Automatic", + StartTypeManual: "Manual", + StartTypeDisabled: "Disabled", + StartTypeAutomaticDelayed: "Automatic (Delayed)", + StartTypeAutomaticTriggered: "Automatic (Triggered)", + StartTypeAutomaticDelayedTriggered: "Automatic (Delayed, Triggered)", + StartTypeManualTriggered: "Manual (Triggered)", } - return "" -} +) -// errorNames is mapping of errno values to names. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms681383(v=vs.85).aspx -var errorNames = map[uint32]string{ - 1077: "ERROR_SERVICE_NEVER_STARTED", -} +type ConfigInformation uint32 -type ServiceStatus struct { +type Status struct { DisplayName string ServiceName string CurrentState string @@ -145,88 +105,28 @@ type ServiceStatus struct { BinaryPathName string } -type ServiceReader struct { - handle ServiceDatabaseHandle - state ServiceEnumState - guid string // Host's MachineGuid value (a unique ID for the host). - ids map[string]string // Cache of service IDs. - protectedServices map[string]struct{} +type serviceTriggerInfo struct { + cTriggers uint32 + pTriggers uintptr + pReserved uintptr } -var InvalidServiceDatabaseHandle = ^ServiceDatabaseHandle(0) -var InvalidServiceHandle = ^ServiceHandle(0) - -func OpenSCManager(machineName string, databaseName string, desiredAccess ServiceSCMAccessRight) (ServiceDatabaseHandle, error) { - var machineNamePtr *uint16 - if machineName != "" { - var err error - machineNamePtr, err = syscall.UTF16PtrFromString(machineName) - if err != nil { - return InvalidServiceDatabaseHandle, err - } - } - - var databaseNamePtr *uint16 - if databaseName != "" { - var err error - databaseNamePtr, err = syscall.UTF16PtrFromString(databaseName) - if err != nil { - return InvalidServiceDatabaseHandle, err - } - } - - handle, err := _OpenSCManager(machineNamePtr, databaseNamePtr, desiredAccess) - if err != nil { - return InvalidServiceDatabaseHandle, ServiceErrno(err.(syscall.Errno)) - } - - return handle, nil +type serviceDelayedAutoStartInfo struct { + delayedAutoStart bool } -func OpenService(handle ServiceDatabaseHandle, serviceName string, desiredAccess ServiceAccessRight) (ServiceHandle, error) { - var serviceNamePtr *uint16 - if serviceName != "" { - var err error - serviceNamePtr, err = syscall.UTF16PtrFromString(serviceName) - if err != nil { - return InvalidServiceHandle, err - } - } - - serviceHandle, err := _OpenService(handle, serviceNamePtr, desiredAccess) - if err != nil { - return InvalidServiceHandle, ServiceErrno(err.(syscall.Errno)) - } - - return serviceHandle, nil +func (startType ServiceStartType) String() string { + return serviceStartTypes[startType] } -func QueryServiceConfig2(serviceHandle ServiceHandle, infoLevel ServiceConfigInformation) ([]byte, error) { - var buffer []byte - - for { - var bytesNeeded uint32 - var bufPtr *byte - if len(buffer) > 0 { - bufPtr = &buffer[0] - } - - if err := _QueryServiceConfig2(serviceHandle, infoLevel, bufPtr, uint32(len(buffer)), &bytesNeeded); err != nil { - if ServiceErrno(err.(syscall.Errno)) == SERVICE_ERROR_INSUFFICIENT_BUFFER { - // Increase buffer size and retry. - buffer = make([]byte, len(buffer)+int(bytesNeeded)) - continue - } - return nil, err - } - - break +func (state ServiceState) String() string { + if val, ok := serviceStates[state]; ok { + return val } - - return buffer, nil + return "" } -func getServiceStates(handle ServiceDatabaseHandle, state ServiceEnumState, protectedServices map[string]struct{}) ([]ServiceStatus, error) { +func GetServiceStates(handle Handle, state ServiceEnumState, protectedServices map[string]struct{}) ([]Status, error) { var servicesReturned uint32 var servicesBuffer []byte @@ -243,7 +143,7 @@ func getServiceStates(handle ServiceDatabaseHandle, state ServiceEnumState, prot servicesBuffer = make([]byte, len(servicesBuffer)+int(bytesNeeded)) continue } - return nil, ServiceErrno(err.(syscall.Errno)) + return nil, errors.Wrap(ServiceErrno(err.(syscall.Errno)), "error while calling the _EnumServicesStatusEx api") } break @@ -256,9 +156,10 @@ func getServiceStates(handle ServiceDatabaseHandle, state ServiceEnumState, prot servicesBuffer = servicesBuffer[:len(servicesBuffer)-1] } - var services []ServiceStatus + var services []Status + var sizeStatusProcess = (int)(unsafe.Sizeof(EnumServiceStatusProcess{})) for i := 0; i < int(servicesReturned); i++ { - serviceTemp := (*EnumServiceStatusProcess)(unsafe.Pointer(&servicesBuffer[i*sizeofEnumServiceStatusProcess])) + serviceTemp := (*EnumServiceStatusProcess)(unsafe.Pointer(&servicesBuffer[i*sizeStatusProcess])) service, err := getServiceInformation(serviceTemp, servicesBuffer, handle, protectedServices) if err != nil { @@ -271,8 +172,8 @@ func getServiceStates(handle ServiceDatabaseHandle, state ServiceEnumState, prot return services, nil } -func getServiceInformation(rawService *EnumServiceStatusProcess, servicesBuffer []byte, handle ServiceDatabaseHandle, protectedServices map[string]struct{}) (ServiceStatus, error) { - service := ServiceStatus{ +func getServiceInformation(rawService *EnumServiceStatusProcess, servicesBuffer []byte, handle Handle, protectedServices map[string]struct{}) (Status, error) { + service := Status{ PID: rawService.ServiceStatusProcess.DwProcessId, } @@ -281,13 +182,13 @@ func getServiceInformation(rawService *EnumServiceStatusProcess, servicesBuffer displayNameOffset := uintptr(unsafe.Pointer(rawService.LpDisplayName)) - (uintptr)(unsafe.Pointer(&servicesBuffer[0])) strBuf := new(bytes.Buffer) - if err := sys.UTF16ToUTF8Bytes(servicesBuffer[displayNameOffset:], strBuf); err != nil { + if err := common.UTF16ToUTF8Bytes(servicesBuffer[displayNameOffset:], strBuf); err != nil { return service, err } service.DisplayName = strBuf.String() strBuf.Reset() - if err := sys.UTF16ToUTF8Bytes(servicesBuffer[serviceNameOffset:], strBuf); err != nil { + if err := common.UTF16ToUTF8Bytes(servicesBuffer[serviceNameOffset:], strBuf); err != nil { return service, err } service.ServiceName = strBuf.String() @@ -307,12 +208,12 @@ func getServiceInformation(rawService *EnumServiceStatusProcess, servicesBuffer service.ExitCode = rawService.ServiceStatusProcess.DwServiceSpecificExitCode } - serviceHandle, err := OpenService(handle, service.ServiceName, ServiceQueryConfig) + serviceHandle, err := openServiceHandle(handle, service.ServiceName, ServiceQueryConfig) if err != nil { - return service, err + return service, errors.Wrapf(err, "error while opening service %s", service.ServiceName) } - defer CloseServiceHandle(serviceHandle) + defer closeHandle(serviceHandle) // Get detailed information if err := getAdditionalServiceInfo(serviceHandle, &service); err != nil { @@ -341,21 +242,25 @@ func getServiceInformation(rawService *EnumServiceStatusProcess, servicesBuffer return service, nil } -// getServiceUptime returns the uptime for process -func getServiceUptime(processID uint32) (time.Duration, error) { - var processCreationTime gosigar.ProcTime +func openServiceHandle(handle Handle, serviceName string, desiredAccess ServiceAccessRight) (Handle, error) { + var serviceNamePtr *uint16 + if serviceName != "" { + var err error + serviceNamePtr, err = syscall.UTF16PtrFromString(serviceName) + if err != nil { + return InvalidServiceHandle, err + } + } - err := processCreationTime.Get(int(processID)) + serviceHandle, err := _OpenService(handle, serviceNamePtr, desiredAccess) if err != nil { - return time.Duration(processCreationTime.StartTime), err + return InvalidServiceHandle, ServiceErrno(err.(syscall.Errno)) } - uptime := time.Since(time.Unix(0, int64(processCreationTime.StartTime)*int64(time.Millisecond))) - - return uptime, nil + return serviceHandle, nil } -func getAdditionalServiceInfo(serviceHandle ServiceHandle, service *ServiceStatus) error { +func getAdditionalServiceInfo(serviceHandle Handle, service *Status) error { var buffer []byte for { @@ -371,7 +276,7 @@ func getAdditionalServiceInfo(serviceHandle ServiceHandle, service *ServiceStatu buffer = make([]byte, len(buffer)+int(bytesNeeded)) continue } - return ServiceErrno(err.(syscall.Errno)) + return errors.Wrapf(ServiceErrno(err.(syscall.Errno)), "error while querying the service configuration %s", service.ServiceName) } serviceQueryConfig := (*QueryServiceConfig)(unsafe.Pointer(&buffer[0])) service.StartType = ServiceStartType(serviceQueryConfig.DwStartType) @@ -379,13 +284,13 @@ func getAdditionalServiceInfo(serviceHandle ServiceHandle, service *ServiceStatu binaryPathNameOffset := uintptr(unsafe.Pointer(serviceQueryConfig.LpBinaryPathName)) - (uintptr)(unsafe.Pointer(&buffer[0])) strBuf := new(bytes.Buffer) - if err := sys.UTF16ToUTF8Bytes(buffer[serviceStartNameOffset:], strBuf); err != nil { + if err := common.UTF16ToUTF8Bytes(buffer[serviceStartNameOffset:], strBuf); err != nil { return err } service.ServiceStartName = strBuf.String() strBuf.Reset() - if err := sys.UTF16ToUTF8Bytes(buffer[binaryPathNameOffset:], strBuf); err != nil { + if err := common.UTF16ToUTF8Bytes(buffer[binaryPathNameOffset:], strBuf); err != nil { return err } service.BinaryPathName = strBuf.String() @@ -396,21 +301,21 @@ func getAdditionalServiceInfo(serviceHandle ServiceHandle, service *ServiceStatu return nil } -func getOptionalServiceInfo(serviceHandle ServiceHandle, service *ServiceStatus) error { +func getOptionalServiceInfo(serviceHandle Handle, service *Status) error { // Get information if the service is started delayed or triggered. Only valid for automatic or manual services. So filter them first. if service.StartType == StartTypeAutomatic || service.StartType == StartTypeManual { var delayedInfo *serviceDelayedAutoStartInfo if service.StartType == StartTypeAutomatic { - delayedInfoBuffer, err := QueryServiceConfig2(serviceHandle, ServiceConfigDelayedAutoStartInfo) + delayedInfoBuffer, err := queryServiceConfig2(serviceHandle, ConfigDelayedAutoStartInfo) if err != nil { - return err + return errors.Wrapf(err, "error while querying rhe service configuration %s", service.ServiceName) } delayedInfo = (*serviceDelayedAutoStartInfo)(unsafe.Pointer(&delayedInfoBuffer[0])) } // Get information if the service is triggered. - triggeredInfoBuffer, err := QueryServiceConfig2(serviceHandle, ServiceConfigTriggerInfo) + triggeredInfoBuffer, err := queryServiceConfig2(serviceHandle, ConfigTriggerInfo) if err != nil { return err } @@ -436,104 +341,43 @@ func getOptionalServiceInfo(serviceHandle ServiceHandle, service *ServiceStatus) return nil } -func (reader *ServiceReader) Close() error { - return CloseServiceDatabaseHandle(reader.handle) -} - -func CloseServiceDatabaseHandle(handle ServiceDatabaseHandle) error { - if err := _CloseServiceHandle(uintptr(handle)); err != nil { - return ServiceErrno(err.(syscall.Errno)) - } - - return nil -} - -func CloseServiceHandle(handle ServiceHandle) error { - if err := _CloseServiceHandle(uintptr(handle)); err != nil { - return ServiceErrno(err.(syscall.Errno)) - } - - return nil -} - -func NewServiceReader() (*ServiceReader, error) { - hndl, err := OpenSCManager("", "", ScManagerEnumerateService|ScManagerConnect) - if err != nil { - return nil, errors.Wrap(err, "initialization failed") - } - - guid, err := getMachineGUID() - if err != nil { - return nil, err - } - - r := &ServiceReader{ - handle: hndl, - state: ServiceStateAll, - guid: guid, - ids: map[string]string{}, - protectedServices: map[string]struct{}{}, - } - - return r, nil -} - -func (reader *ServiceReader) Read() ([]common.MapStr, error) { - services, err := getServiceStates(reader.handle, reader.state, reader.protectedServices) - if err != nil { - return nil, err - } - - result := make([]common.MapStr, 0, len(services)) - - for _, service := range services { - ev := common.MapStr{ - "id": reader.getServiceID(service.ServiceName), - "display_name": service.DisplayName, - "name": service.ServiceName, - "state": service.CurrentState, - "start_type": service.StartType.String(), - "start_name": service.ServiceStartName, - "path_name": service.BinaryPathName, - } - - if service.CurrentState == "Stopped" { - ev.Put("exit_code", getErrorCode(service.ExitCode)) - } +func queryServiceConfig2(serviceHandle Handle, infoLevel ConfigInformation) ([]byte, error) { + var buffer []byte - if service.PID > 0 { - ev.Put("pid", service.PID) + for { + var bytesNeeded uint32 + var bufPtr *byte + if len(buffer) > 0 { + bufPtr = &buffer[0] } - if service.Uptime > 0 { - if _, err = ev.Put("uptime.ms", service.Uptime); err != nil { - return nil, err + if err := _QueryServiceConfig2(serviceHandle, infoLevel, bufPtr, uint32(len(buffer)), &bytesNeeded); err != nil { + if ServiceErrno(err.(syscall.Errno)) == SERVICE_ERROR_INSUFFICIENT_BUFFER { + // Increase buffer size and retry. + buffer = make([]byte, len(buffer)+int(bytesNeeded)) + continue } + return nil, err } - result = append(result, ev) + break } - return result, nil + return buffer, nil } -// getServiceID returns a unique ID for the service that is derived from the -// machine's GUID and the service's name. -func (reader *ServiceReader) getServiceID(name string) string { - // hash returns a base64 encoded sha256 hash that is truncated to 10 chars. - hash := func(v string) string { - sum := sha256.Sum256([]byte(v)) - base64Hash := base64.RawURLEncoding.EncodeToString(sum[:]) - return base64Hash[:10] - } +// getServiceUptime returns the uptime for process +func getServiceUptime(processID uint32) (time.Duration, error) { + var processCreationTime gosigar.ProcTime - id, found := reader.ids[name] - if !found { - id = hash(reader.guid + name) - reader.ids[name] = id + err := processCreationTime.Get(int(processID)) + if err != nil { + return time.Duration(processCreationTime.StartTime), err } - return id + uptime := time.Since(time.Unix(0, int64(processCreationTime.StartTime)*int64(time.Millisecond))) + + return uptime, nil } func (e ServiceErrno) Error() string { @@ -556,31 +400,3 @@ func (e ServiceErrno) Error() string { } return string(utf16.Decode(b[:n])) } - -// getMachineGUID returns the machine's GUID value which is unique to a Windows -// installation. -func getMachineGUID() (string, error) { - const key = registry.LOCAL_MACHINE - const path = `SOFTWARE\Microsoft\Cryptography` - const name = "MachineGuid" - - k, err := registry.OpenKey(key, path, registry.READ|registry.WOW64_64KEY) - if err != nil { - return "", errors.Wrapf(err, `failed to open HKLM\%v`, path) - } - - guid, _, err := k.GetStringValue(name) - if err != nil { - return "", errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) - } - - return guid, nil -} - -func getErrorCode(errno uint32) string { - name, found := errorNames[errno] - if found { - return name - } - return strconv.Itoa(int(errno)) -} diff --git a/metricbeat/module/windows/service/service_status_test.go b/metricbeat/module/windows/service/service_status_test.go new file mode 100644 index 00000000000..667632abf4c --- /dev/null +++ b/metricbeat/module/windows/service/service_status_test.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build windows + +package service + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetServiceStates(t *testing.T) { + handle, err := openSCManager("", "", ScManagerEnumerateService|ScManagerConnect) + assert.NoError(t, err) + assert.NotEqual(t, handle, InvalidDatabaseHandle) + services, err := GetServiceStates(handle, ServiceStateAll, map[string]struct{}{}) + assert.NoError(t, err) + assert.True(t, len(services) > 0) + closeHandle(handle) +} diff --git a/metricbeat/module/windows/service/zservice_windows.go b/metricbeat/module/windows/service/zservice_windows.go index 31fdf630fe7..cb816720b15 100644 --- a/metricbeat/module/windows/service/zservice_windows.go +++ b/metricbeat/module/windows/service/zservice_windows.go @@ -64,9 +64,9 @@ var ( procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") ) -func _OpenSCManager(machineName *uint16, databaseName *uint16, desiredAcces ServiceSCMAccessRight) (handle ServiceDatabaseHandle, err error) { +func _OpenSCManager(machineName *uint16, databaseName *uint16, desiredAcces ServiceSCMAccessRight) (handle Handle, err error) { r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(desiredAcces)) - handle = ServiceDatabaseHandle(r0) + handle = Handle(r0) if handle == 0 { if e1 != 0 { err = errnoErr(e1) @@ -77,7 +77,7 @@ func _OpenSCManager(machineName *uint16, databaseName *uint16, desiredAcces Serv return } -func _EnumServicesStatusEx(handle ServiceDatabaseHandle, infoLevel ServiceInfoLevel, serviceType ServiceType, serviceState ServiceEnumState, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uintptr, groupName *uintptr) (err error) { +func _EnumServicesStatusEx(handle Handle, infoLevel ServiceInfoLevel, serviceType ServiceType, serviceState ServiceEnumState, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uintptr, groupName *uintptr) (err error) { r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(handle), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) if r1 == 0 { if e1 != 0 { @@ -89,9 +89,9 @@ func _EnumServicesStatusEx(handle ServiceDatabaseHandle, infoLevel ServiceInfoLe return } -func _OpenService(handle ServiceDatabaseHandle, serviceName *uint16, desiredAccess ServiceAccessRight) (serviceHandle ServiceHandle, err error) { +func _OpenService(handle Handle, serviceName *uint16, desiredAccess ServiceAccessRight) (serviceHandle Handle, err error) { r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(serviceName)), uintptr(desiredAccess)) - serviceHandle = ServiceHandle(r0) + serviceHandle = Handle(r0) if serviceHandle == 0 { if e1 != 0 { err = errnoErr(e1) @@ -102,7 +102,7 @@ func _OpenService(handle ServiceDatabaseHandle, serviceName *uint16, desiredAcce return } -func _QueryServiceConfig(serviceHandle ServiceHandle, serviceConfig *byte, bufSize uint32, bytesNeeded *uint32) (err error) { +func _QueryServiceConfig(serviceHandle Handle, serviceConfig *byte, bufSize uint32, bytesNeeded *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(serviceHandle), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) if r1 == 0 { if e1 != 0 { @@ -114,7 +114,7 @@ func _QueryServiceConfig(serviceHandle ServiceHandle, serviceConfig *byte, bufSi return } -func _QueryServiceConfig2(serviceHandle ServiceHandle, infoLevel ServiceConfigInformation, configBuffer *byte, bufSize uint32, bytesNeeded *uint32) (err error) { +func _QueryServiceConfig2(serviceHandle Handle, infoLevel ConfigInformation, configBuffer *byte, bufSize uint32, bytesNeeded *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(serviceHandle), uintptr(infoLevel), uintptr(unsafe.Pointer(configBuffer)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) if r1 == 0 { if e1 != 0 { diff --git a/metricbeat/module/zookeeper/fields.go b/metricbeat/module/zookeeper/fields.go index 9ab4ca9a74b..0087aaab85b 100644 --- a/metricbeat/module/zookeeper/fields.go +++ b/metricbeat/module/zookeeper/fields.go @@ -30,7 +30,7 @@ func init() { } // AssetZookeeper returns asset data. -// This is the base64 encoded gzipped contents of ../metricbeat/module/zookeeper. +// This is the base64 encoded gzipped contents of module/zookeeper. func AssetZookeeper() string { return "eJy0WM2O2zgMvucpiF56afMAc1ig2O6hWLSLLbqXXlxGZmJhZNGV6PzM0y8kW/5J7CQz4/EpiCx+Hyn+fPJHeKTTAzwxPxJV5FYAosXQA7z7yfx3/O/dCiAnr5yuRLN9gD9WAADdOpQkTisPio0hJZTD5gRSEGy5dh8NiYSX2Gphp+0OFJcl2tyvVwC+YCeZYrvVuwfYovG0AnBkCD09wA5XAFtNJvcPEfUjWCxpzDg8cqrC647rqv1ngnJ4fnU7f4FiK6itj2STF44qdq0TnY/d9iH38Ay5DfkptpZUAO+WpkiGZ+xsemboNxySbT/4/5zJkI22Qo68ZFz50QuJk2G7O1u4QiA8X1qTcG4yYf6uqaZ8IbR/o7EZz3tMR4r0fjHU7625m7ierCyE+WcPNTabsEor7mZOXQH5FQzckfvnBdzsO0vEWAzrm8l8LTcL9hJ+TcbvkU4HdufneSOEfWdKtteTyFhVjo+6RKEsR8HM66dpGs8/xk+9bQhmgbcDXgFtmpNBIatOa9yfA76YyZ4c7ihZhg3JgcgCWU/lxlAMkgdtodTGaE+KB/1tzI6qgkpyaHymuF4s57/V5YZcCFEHAE+Wc5qhsWVj+EBuqV7W43eWwYcYtWWgaufISozUNKMSj9lWG8oSErtFI/QVj7qsS7A9U20IejQPGJnnsGUXSffZVjlW5GdimRKuxOPCXFPC3ZVZHQ1tl6Kh7fNp2LrM0Og9ZXMtf4ksG9gG4cFJSYEC6LqMMyeIbKbJckX2TbPu23y2BexUHndmGtfiBW2u7S5z9LsmL8uHdgACCaQJqyXKQ7Q3lGj2c06Z2gu5ad4VqkcSv15YXPSc+/hZkgO7xwTZ6ZnrxBZUH/eQCnAzhKg5XX+yavmjba1DtB5OUqFzp3Di4yLqBlvXzKfZenJ7cpkXlAXFx3c2FHrNuDASpxkmJ6soz95urDUAg+nWCb1DQRYQwrAdBQS0B0OYzxXFnpwfX256pmg0nvtQoRRNyLWi9fTuu2Vdux3Q5rCptcnBi2sqvnFrmvMBRRVv1CCjbfKDxu1JgJs8MKzQ3Nsmo/B5I5aNqLqQ+Tf5jUvmebeP982mF1w4vNvHi0pzyXj/qlvGa8f5YHgbTVaGJ93absZL8OIiTBc6Z5LDeRjnfBrau7wnXPXpwq90O8AyJFzwT3RJIPhItpOS7QkKgyNfsY2OYhuJNGZnKV4qy+dRTHryLSleqM5nUmy15sIUO3qcv3hCfQ2dPRDqGKzhS2id3Uz6AFJoDyWegLQU5IJEano/sOuGxhr+CYsH7ekDaAkDIiotNGxpWky/rpH9YEEzuPHE3nVLWb6yuDvJ2Hy9ApRR4H6EQNFREeUenshxMz8Hp9sqNg8lOxopUNsGDYM0EU5NdvnvWBPeJHupzV5pUC8WkxOwwdYdkO04z/I5ITaxMIL+HARLm+M/0+fdNCQGjVpbqP10rj4d9XS07yix/6z+XRPs0dQTNMSh9RhHD3z5HHKIIloYG16HMPE2lmPFqoiSBiEWTazUmDLkBTdG+2Iwspv6DK9rH7yK3SQnIVdqS/EdobJihwbY5RT1URhfBdrdTB29tlYHrvoUiJaobh2c+ax0vnIv7l8xaHdG/lMKstc7q7eaGi1SkdOcB4aHQocjSLUcirXlv/o/AAD//7suFxk=" } diff --git a/metricbeat/modules.d/kvm.yml.disabled b/metricbeat/modules.d/kvm.yml.disabled index 878e279b969..8450e1afc6d 100644 --- a/metricbeat/modules.d/kvm.yml.disabled +++ b/metricbeat/modules.d/kvm.yml.disabled @@ -4,5 +4,6 @@ - module: kvm #metricsets: # - dommemstat + # - status period: 10s hosts: ["unix:///var/run/libvirt/libvirt-sock"] diff --git a/metricbeat/modules.d/windows.yml.disabled b/metricbeat/modules.d/windows.yml.disabled index 059667d82f3..270e5a7f64c 100644 --- a/metricbeat/modules.d/windows.yml.disabled +++ b/metricbeat/modules.d/windows.yml.disabled @@ -2,24 +2,19 @@ # Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-windows.html - module: windows - #metricsets: - # - service + metricsets: + - service period: 1m #- module: windows # metricsets: -# - perfmon +# - perfmon # period: 10s -# perfmon.counters: -# - instance_label: processor.name -# instance_name: total -# measurement_label: processor.time.total.pct -# query: '\Processor Information(_Total)\% Processor Time' -# -# - instance_label: physical_disk.name -# measurement_label: physical_disk.write.per_sec -# query: '\PhysicalDisk(*)\Disk Writes/sec' -# -# - instance_label: physical_disk.name -# measurement_label: physical_disk.write.time.pct -# query: '\PhysicalDisk(*)\% Disk Write Time' +# perfmon.queries: +# - object: 'Process' +# instance: ["*"] +# counters: +# - name: 'Disk Writes/sec' +# field: physical_disk.write.per_sec +# format: "float" +# - name: "% Disk Write Time" diff --git a/metricbeat/scripts/assets/assets.go b/metricbeat/scripts/assets/assets.go deleted file mode 100644 index 51c24afe452..00000000000 --- a/metricbeat/scripts/assets/assets.go +++ /dev/null @@ -1,75 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "os" - "path" - - "github.com/elastic/beats/v7/libbeat/asset" - "github.com/elastic/beats/v7/libbeat/generator/fields" - "github.com/elastic/beats/v7/licenses" -) - -func main() { - - flag.Parse() - args := flag.Args() - - if len(args) != 1 { - fmt.Fprintln(os.Stderr, "Module path must be set") - os.Exit(1) - } - - dir := args[0] - - modules, err := fields.GetModules(dir) - if err != nil { - fmt.Fprintf(os.Stderr, "Error fetching modules: %s\n", err) - os.Exit(1) - } - - for _, module := range modules { - files, err := fields.CollectFiles(module, dir) - if err != nil { - fmt.Fprintf(os.Stderr, "Error fetching files for module %s: %s\n", module, err) - os.Exit(1) - } - - data, err := fields.GenerateFieldsYml(files) - if err != nil { - fmt.Fprintf(os.Stderr, "Error fetching files for module %s: %s\n", module, err) - os.Exit(1) - } - - bs, err := asset.CreateAsset(licenses.ASL2, "metricbeat", module, module, data, "asset.ModuleFieldsPri", dir+"/"+module) - if err != nil { - fmt.Fprintf(os.Stderr, "Error creating golang file from template: %s\n", err) - os.Exit(1) - } - - err = ioutil.WriteFile(path.Join(dir, module, "fields.go"), bs, 0644) - if err != nil { - fmt.Fprintf(os.Stderr, "Error writing fields.go: %s\n", err) - os.Exit(1) - } - } -} diff --git a/metricbeat/scripts/mage/target/metricset/metricset.go b/metricbeat/scripts/mage/target/metricset/metricset.go new file mode 100644 index 00000000000..46bd2171295 --- /dev/null +++ b/metricbeat/scripts/mage/target/metricset/metricset.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package metricset + +import ( + "os" + "path/filepath" + + "github.com/magefile/mage/sh" + + devtools "github.com/elastic/beats/v7/dev-tools/mage" +) + +// CreateMetricset creates a new metricset. +// +// Required ENV variables: +// * MODULE: Name of the module +// * METRICSET: Name of the metricset +func CreateMetricset() error { + ve, err := devtools.PythonVirtualenv() + if err != nil { + return err + } + python, err := devtools.LookVirtualenvPath(ve, "python") + if err != nil { + return err + } + beatsDir, err := devtools.ElasticBeatsDir() + if err != nil { + return err + } + scriptPath := filepath.Join(beatsDir, "metricbeat", "scripts", "create_metricset.py") + + _, err = sh.Exec( + map[string]string{}, os.Stdout, os.Stderr, python, scriptPath, + "--path", devtools.CWD(), "--es_beats", beatsDir, + "--module", os.Getenv("MODULE"), "--metricset", os.Getenv("METRICSET"), + ) + return err +} diff --git a/metricbeat/tests/system/test_base.py b/metricbeat/tests/system/test_base.py index 5e52872bb7b..4f680d29172 100644 --- a/metricbeat/tests/system/test_base.py +++ b/metricbeat/tests/system/test_base.py @@ -106,4 +106,4 @@ def get_kibana_url(self): return "http://" + self.compose_host("kibana") def kibana_dir(self): - return os.path.join(self.beat_path, "_meta", "kibana.generated") + return os.path.join(self.beat_path, "build", "kibana") diff --git a/packetbeat/magefile.go b/packetbeat/magefile.go index 7793ed32ec0..8e381260a92 100644 --- a/packetbeat/magefile.go +++ b/packetbeat/magefile.go @@ -20,7 +20,6 @@ package main import ( - "context" "fmt" "log" "regexp" @@ -38,6 +37,8 @@ import ( "github.com/elastic/beats/v7/dev-tools/mage/target/common" // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/integtest/notests" + // mage:import + _ "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) func init() { @@ -46,12 +47,6 @@ func init() { devtools.BeatDescription = "Packetbeat analyzes network traffic and sends the data to Elasticsearch." } -// Aliases provides compatibility with CI while we transition all Beats -// to having common testing targets. -var Aliases = map[string]interface{}{ - "goTestUnit": GoUnitTest, // dev-tools/jenkins_ci.ps1 uses this. -} - // Build builds the Beat binary. func Build() error { return devtools.Build(devtools.DefaultBuildArgs()) @@ -192,24 +187,6 @@ func Dashboards() error { return devtools.KibanaDashboards("protos") } -// UnitTest executes the unit tests. -func UnitTest() { - mg.SerialDeps(GoUnitTest, PythonUnitTest) -} - -// GoUnitTest executes the Go unit tests. -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -func GoUnitTest(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestUnitArgs()) -} - -// PythonUnitTest executes the python system tests. -func PythonUnitTest() error { - mg.SerialDeps(Fields, devtools.BuildSystemTestBinary) - return devtools.PythonNoseTest(devtools.DefaultPythonTestUnitArgs()) -} - // ----------------------------------------------------------------------------- // Customizations specific to Packetbeat. // - Config file contains an OS specific device name (affects darwin, windows). diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 993ddb4b06c..30621ffbde4 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -953,6 +953,27 @@ output.elasticsearch: # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. @@ -1221,6 +1242,9 @@ output.elasticsearch: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password @@ -1807,6 +1831,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m diff --git a/vendor/github.com/docker/spdystream/CONTRIBUTING.md b/vendor/github.com/docker/spdystream/CONTRIBUTING.md new file mode 100644 index 00000000000..d4eddcc5396 --- /dev/null +++ b/vendor/github.com/docker/spdystream/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to SpdyStream + +Want to hack on spdystream? Awesome! Here are instructions to get you +started. + +SpdyStream is a part of the [Docker](https://docker.io) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/docker/spdystream/LICENSE b/vendor/github.com/docker/spdystream/LICENSE new file mode 100644 index 00000000000..9e4bd4dbee9 --- /dev/null +++ b/vendor/github.com/docker/spdystream/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs new file mode 100644 index 00000000000..e26cd4fc8ed --- /dev/null +++ b/vendor/github.com/docker/spdystream/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/spdystream/MAINTAINERS b/vendor/github.com/docker/spdystream/MAINTAINERS new file mode 100644 index 00000000000..14e263325c7 --- /dev/null +++ b/vendor/github.com/docker/spdystream/MAINTAINERS @@ -0,0 +1,28 @@ +# Spdystream maintainers file +# +# This file describes who runs the docker/spdystream project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "dmcgowan", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@docker.com" + GitHub = "dmcgowan" diff --git a/vendor/github.com/docker/spdystream/README.md b/vendor/github.com/docker/spdystream/README.md new file mode 100644 index 00000000000..11cccd0a09e --- /dev/null +++ b/vendor/github.com/docker/spdystream/README.md @@ -0,0 +1,77 @@ +# SpdyStream + +A multiplexed stream library using spdy + +## Usage + +Client example (connecting to mirroring server without auth) + +```go +package main + +import ( + "fmt" + "github.com/docker/spdystream" + "net" + "net/http" +) + +func main() { + conn, err := net.Dial("tcp", "localhost:8080") + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.NoOpStreamHandler) + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + panic(err) + } + + stream.Wait() + + fmt.Fprint(stream, "Writing to stream") + + buf := make([]byte, 25) + stream.Read(buf) + fmt.Println(string(buf)) + + stream.Close() +} +``` + +Server example (mirroring server without auth) + +```go +package main + +import ( + "github.com/docker/spdystream" + "net" +) + +func main() { + listener, err := net.Listen("tcp", "localhost:8080") + if err != nil { + panic(err) + } + for { + conn, err := listener.Accept() + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.MirrorStreamHandler) + } +} +``` + +## Copyright and license + +Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go new file mode 100644 index 00000000000..6031a0db1ab --- /dev/null +++ b/vendor/github.com/docker/spdystream/connection.go @@ -0,0 +1,958 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/docker/spdystream/spdy" +) + +var ( + ErrInvalidStreamId = errors.New("Invalid stream id") + ErrTimeout = errors.New("Timeout occured") + ErrReset = errors.New("Stream reset") + ErrWriteClosedStream = errors.New("Write on closed stream") +) + +const ( + FRAME_WORKERS = 5 + QUEUE_SIZE = 50 +) + +type StreamHandler func(stream *Stream) + +type AuthHandler func(header http.Header, slot uint8, parent uint32) bool + +type idleAwareFramer struct { + f *spdy.Framer + conn *Connection + writeLock sync.Mutex + resetChan chan struct{} + setTimeoutLock sync.Mutex + setTimeoutChan chan time.Duration + timeout time.Duration +} + +func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer { + iaf := &idleAwareFramer{ + f: framer, + resetChan: make(chan struct{}, 2), + // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about + // the same time the connection is being closed + setTimeoutChan: make(chan time.Duration, 1), + } + return iaf +} + +func (i *idleAwareFramer) monitor() { + var ( + timer *time.Timer + expired <-chan time.Time + resetChan = i.resetChan + setTimeoutChan = i.setTimeoutChan + ) +Loop: + for { + select { + case timeout := <-i.setTimeoutChan: + i.timeout = timeout + if timeout == 0 { + if timer != nil { + timer.Stop() + } + } else { + if timer == nil { + timer = time.NewTimer(timeout) + expired = timer.C + } else { + timer.Reset(timeout) + } + } + case <-resetChan: + if timer != nil && i.timeout > 0 { + timer.Reset(i.timeout) + } + case <-expired: + i.conn.streamCond.L.Lock() + streams := i.conn.streams + i.conn.streams = make(map[spdy.StreamId]*Stream) + i.conn.streamCond.Broadcast() + i.conn.streamCond.L.Unlock() + go func() { + for _, stream := range streams { + stream.resetStream() + } + i.conn.Close() + }() + case <-i.conn.closeChan: + if timer != nil { + timer.Stop() + } + + // Start a goroutine to drain resetChan. This is needed because we've seen + // some unit tests with large numbers of goroutines get into a situation + // where resetChan fills up, at least 1 call to Write() is still trying to + // send to resetChan, the connection gets closed, and this case statement + // attempts to grab the write lock that Write() already has, causing a + // deadlock. + // + // See https://github.com/docker/spdystream/issues/49 for more details. + go func() { + for _ = range resetChan { + } + }() + + go func() { + for _ = range setTimeoutChan { + } + }() + + i.writeLock.Lock() + close(resetChan) + i.resetChan = nil + i.writeLock.Unlock() + + i.setTimeoutLock.Lock() + close(i.setTimeoutChan) + i.setTimeoutChan = nil + i.setTimeoutLock.Unlock() + + break Loop + } + } + + // Drain resetChan + for _ = range resetChan { + } +} + +func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error { + i.writeLock.Lock() + defer i.writeLock.Unlock() + if i.resetChan == nil { + return io.EOF + } + err := i.f.WriteFrame(frame) + if err != nil { + return err + } + + i.resetChan <- struct{}{} + + return nil +} + +func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) { + frame, err := i.f.ReadFrame() + if err != nil { + return nil, err + } + + // resetChan should never be closed since it is only closed + // when the connection has closed its closeChan. This closure + // only occurs after all Reads have finished + // TODO (dmcgowan): refactor relationship into connection + i.resetChan <- struct{}{} + + return frame, nil +} + +func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) { + i.setTimeoutLock.Lock() + defer i.setTimeoutLock.Unlock() + + if i.setTimeoutChan == nil { + return + } + + i.setTimeoutChan <- timeout +} + +type Connection struct { + conn net.Conn + framer *idleAwareFramer + + closeChan chan bool + goneAway bool + lastStreamChan chan<- *Stream + goAwayTimeout time.Duration + closeTimeout time.Duration + + streamLock *sync.RWMutex + streamCond *sync.Cond + streams map[spdy.StreamId]*Stream + + nextIdLock sync.Mutex + receiveIdLock sync.Mutex + nextStreamId spdy.StreamId + receivedStreamId spdy.StreamId + + pingIdLock sync.Mutex + pingId uint32 + pingChans map[uint32]chan error + + shutdownLock sync.Mutex + shutdownChan chan error + hasShutdown bool + + // for testing https://github.com/docker/spdystream/pull/56 + dataFrameHandler func(*spdy.DataFrame) error +} + +// NewConnection creates a new spdy connection from an existing +// network connection. +func NewConnection(conn net.Conn, server bool) (*Connection, error) { + framer, framerErr := spdy.NewFramer(conn, conn) + if framerErr != nil { + return nil, framerErr + } + idleAwareFramer := newIdleAwareFramer(framer) + var sid spdy.StreamId + var rid spdy.StreamId + var pid uint32 + if server { + sid = 2 + rid = 1 + pid = 2 + } else { + sid = 1 + rid = 2 + pid = 1 + } + + streamLock := new(sync.RWMutex) + streamCond := sync.NewCond(streamLock) + + session := &Connection{ + conn: conn, + framer: idleAwareFramer, + + closeChan: make(chan bool), + goAwayTimeout: time.Duration(0), + closeTimeout: time.Duration(0), + + streamLock: streamLock, + streamCond: streamCond, + streams: make(map[spdy.StreamId]*Stream), + nextStreamId: sid, + receivedStreamId: rid, + + pingId: pid, + pingChans: make(map[uint32]chan error), + + shutdownChan: make(chan error), + } + session.dataFrameHandler = session.handleDataFrame + idleAwareFramer.conn = session + go idleAwareFramer.monitor() + + return session, nil +} + +// Ping sends a ping frame across the connection and +// returns the response time +func (s *Connection) Ping() (time.Duration, error) { + pid := s.pingId + s.pingIdLock.Lock() + if s.pingId > 0x7ffffffe { + s.pingId = s.pingId - 0x7ffffffe + } else { + s.pingId = s.pingId + 2 + } + s.pingIdLock.Unlock() + pingChan := make(chan error) + s.pingChans[pid] = pingChan + defer delete(s.pingChans, pid) + + frame := &spdy.PingFrame{Id: pid} + startTime := time.Now() + writeErr := s.framer.WriteFrame(frame) + if writeErr != nil { + return time.Duration(0), writeErr + } + select { + case <-s.closeChan: + return time.Duration(0), errors.New("connection closed") + case err, ok := <-pingChan: + if ok && err != nil { + return time.Duration(0), err + } + break + } + return time.Now().Sub(startTime), nil +} + +// Serve handles frames sent from the server, including reply frames +// which are needed to fully initiate connections. Both clients and servers +// should call Serve in a separate goroutine before creating streams. +func (s *Connection) Serve(newHandler StreamHandler) { + // use a WaitGroup to wait for all frames to be drained after receiving + // go-away. + var wg sync.WaitGroup + + // Parition queues to ensure stream frames are handled + // by the same worker, ensuring order is maintained + frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS) + for i := 0; i < FRAME_WORKERS; i++ { + frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE) + + // Ensure frame queue is drained when connection is closed + go func(frameQueue *PriorityFrameQueue) { + <-s.closeChan + frameQueue.Drain() + }(frameQueues[i]) + + wg.Add(1) + go func(frameQueue *PriorityFrameQueue) { + // let the WaitGroup know this worker is done + defer wg.Done() + + s.frameHandler(frameQueue, newHandler) + }(frameQueues[i]) + } + + var ( + partitionRoundRobin int + goAwayFrame *spdy.GoAwayFrame + ) +Loop: + for { + readFrame, err := s.framer.ReadFrame() + if err != nil { + if err != io.EOF { + fmt.Errorf("frame read error: %s", err) + } else { + debugMessage("(%p) EOF received", s) + } + break + } + var priority uint8 + var partition int + switch frame := readFrame.(type) { + case *spdy.SynStreamFrame: + if s.checkStreamFrame(frame) { + priority = frame.Priority + partition = int(frame.StreamId % FRAME_WORKERS) + debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId) + s.addStreamFrame(frame) + } else { + debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId) + continue + } + case *spdy.SynReplyFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.DataFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.RstStreamFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.HeadersFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.PingFrame: + priority = 0 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + case *spdy.GoAwayFrame: + // hold on to the go away frame and exit the loop + goAwayFrame = frame + break Loop + default: + priority = 7 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + } + frameQueues[partition].Push(readFrame, priority) + } + close(s.closeChan) + + // wait for all frame handler workers to indicate they've drained their queues + // before handling the go away frame + wg.Wait() + + if goAwayFrame != nil { + s.handleGoAwayFrame(goAwayFrame) + } + + // now it's safe to close remote channels and empty s.streams + s.streamCond.L.Lock() + // notify streams that they're now closed, which will + // unblock any stream Read() calls + for _, stream := range s.streams { + stream.closeRemoteChannels() + } + s.streams = make(map[spdy.StreamId]*Stream) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) { + for { + popFrame := frameQueue.Pop() + if popFrame == nil { + return + } + + var frameErr error + switch frame := popFrame.(type) { + case *spdy.SynStreamFrame: + frameErr = s.handleStreamFrame(frame, newHandler) + case *spdy.SynReplyFrame: + frameErr = s.handleReplyFrame(frame) + case *spdy.DataFrame: + frameErr = s.dataFrameHandler(frame) + case *spdy.RstStreamFrame: + frameErr = s.handleResetFrame(frame) + case *spdy.HeadersFrame: + frameErr = s.handleHeaderFrame(frame) + case *spdy.PingFrame: + frameErr = s.handlePingFrame(frame) + case *spdy.GoAwayFrame: + frameErr = s.handleGoAwayFrame(frame) + default: + frameErr = fmt.Errorf("unhandled frame type: %T", frame) + } + + if frameErr != nil { + fmt.Errorf("frame handling error: %s", frameErr) + } + } +} + +func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 { + stream, streamOk := s.getStream(streamId) + if !streamOk { + return 7 + } + return stream.priority +} + +func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) { + var parent *Stream + if frame.AssociatedToStreamId != spdy.StreamId(0) { + parent, _ = s.getStream(frame.AssociatedToStreamId) + } + + stream := &Stream{ + streamId: frame.StreamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: frame.Headers, + finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00, + replyCond: sync.NewCond(new(sync.Mutex)), + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 { + stream.closeRemoteChannels() + } + + s.addStream(stream) +} + +// checkStreamFrame checks to see if a stream frame is allowed. +// If the stream is invalid, then a reset frame with protocol error +// will be returned. +func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool { + s.receiveIdLock.Lock() + defer s.receiveIdLock.Unlock() + if s.goneAway { + return false + } + validationErr := s.validateStreamId(frame.StreamId) + if validationErr != nil { + go func() { + resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId) + if resetErr != nil { + fmt.Errorf("reset error: %s", resetErr) + } + }() + return false + } + return true +} + +func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error { + stream, ok := s.getStream(frame.StreamId) + if !ok { + return fmt.Errorf("Missing stream: %d", frame.StreamId) + } + + newHandler(stream) + + return nil +} + +func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error { + debugMessage("(%p) Reply frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("Reply frame gone away for %d", frame.StreamId) + // Stream has already gone away + return nil + } + if stream.replied { + // Stream has already received reply + return nil + } + stream.replied = true + + // TODO Check for error + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + close(stream.startChan) + + return nil +} + +func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already been removed + return nil + } + s.removeStream(stream) + stream.closeRemoteChannels() + + if !stream.replied { + stream.replied = true + stream.startChan <- ErrReset + close(stream.startChan) + } + + stream.finishLock.Lock() + stream.finished = true + stream.finishLock.Unlock() + + return nil +} + +func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already gone away + return nil + } + if !stream.replied { + // No reply received...Protocol error? + return nil + } + + // TODO limit headers while not blocking (use buffered chan or goroutine?) + select { + case <-stream.closeChan: + return nil + case stream.headerChan <- frame.Headers: + } + + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + return nil +} + +func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { + debugMessage("(%p) Data frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId) + // Stream has already gone away + return nil + } + if !stream.replied { + debugMessage("(%p) Data frame not replied %d", s, frame.StreamId) + // No reply received...Protocol error? + return nil + } + + debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId) + if len(frame.Data) > 0 { + stream.dataLock.RLock() + select { + case <-stream.closeChan: + debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId) + case stream.dataChan <- frame.Data: + debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId) + } + stream.dataLock.RUnlock() + } + if (frame.Flags & spdy.DataFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + return nil +} + +func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { + if s.pingId&0x01 != frame.Id&0x01 { + return s.framer.WriteFrame(frame) + } + pingChan, pingOk := s.pingChans[frame.Id] + if pingOk { + close(pingChan) + } + return nil +} + +func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error { + debugMessage("(%p) Go away received", s) + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + if s.lastStreamChan != nil { + stream, _ := s.getStream(frame.LastGoodStreamId) + go func() { + s.lastStreamChan <- stream + }() + } + + // Do not block frame handler waiting for closure + go s.shutdown(s.goAwayTimeout) + + return nil +} + +func (s *Connection) remoteStreamFinish(stream *Stream) { + stream.closeRemoteChannels() + + stream.finishLock.Lock() + if stream.finished { + // Stream is fully closed, cleanup + s.removeStream(stream) + } + stream.finishLock.Unlock() +} + +// CreateStream creates a new spdy stream using the parameters for +// creating the stream frame. The stream frame will be sent upon +// calling this function, however this function does not wait for +// the reply frame. If waiting for the reply is desired, use +// the stream Wait or WaitTimeout function on the stream returned +// by this function. +func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) { + // MUST synchronize stream creation (all the way to writing the frame) + // as stream IDs **MUST** increase monotonically. + s.nextIdLock.Lock() + defer s.nextIdLock.Unlock() + + streamId := s.getNextStreamId() + if streamId == 0 { + return nil, fmt.Errorf("Unable to get new stream id") + } + + stream := &Stream{ + streamId: streamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: headers, + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + + debugMessage("(%p) (%p) Create stream", s, stream) + + s.addStream(stream) + + return stream, s.sendStream(stream, fin) +} + +func (s *Connection) shutdown(closeTimeout time.Duration) { + // TODO Ensure this isn't called multiple times + s.shutdownLock.Lock() + if s.hasShutdown { + s.shutdownLock.Unlock() + return + } + s.hasShutdown = true + s.shutdownLock.Unlock() + + var timeout <-chan time.Time + if closeTimeout > time.Duration(0) { + timeout = time.After(closeTimeout) + } + streamsClosed := make(chan bool) + + go func() { + s.streamCond.L.Lock() + for len(s.streams) > 0 { + debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams) + s.streamCond.Wait() + } + s.streamCond.L.Unlock() + close(streamsClosed) + }() + + var err error + select { + case <-streamsClosed: + // No active streams, close should be safe + err = s.conn.Close() + case <-timeout: + // Force ungraceful close + err = s.conn.Close() + // Wait for cleanup to clear active streams + <-streamsClosed + } + + if err != nil { + duration := 10 * time.Minute + time.AfterFunc(duration, func() { + select { + case err, ok := <-s.shutdownChan: + if ok { + fmt.Errorf("Unhandled close error after %s: %s", duration, err) + } + default: + } + }) + s.shutdownChan <- err + } + close(s.shutdownChan) + + return +} + +// Closes spdy connection by sending GoAway frame and initiating shutdown +func (s *Connection) Close() error { + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + var lastStreamId spdy.StreamId + if s.receivedStreamId > 2 { + lastStreamId = s.receivedStreamId - 2 + } + + goAwayFrame := &spdy.GoAwayFrame{ + LastGoodStreamId: lastStreamId, + Status: spdy.GoAwayOK, + } + + err := s.framer.WriteFrame(goAwayFrame) + if err != nil { + return err + } + + go s.shutdown(s.closeTimeout) + + return nil +} + +// CloseWait closes the connection and waits for shutdown +// to finish. Note the underlying network Connection +// is not closed until the end of shutdown. +func (s *Connection) CloseWait() error { + closeErr := s.Close() + if closeErr != nil { + return closeErr + } + shutdownErr, ok := <-s.shutdownChan + if ok { + return shutdownErr + } + return nil +} + +// Wait waits for the connection to finish shutdown or for +// the wait timeout duration to expire. This needs to be +// called either after Close has been called or the GOAWAYFRAME +// has been received. If the wait timeout is 0, this function +// will block until shutdown finishes. If wait is never called +// and a shutdown error occurs, that error will be logged as an +// unhandled error. +func (s *Connection) Wait(waitTimeout time.Duration) error { + var timeout <-chan time.Time + if waitTimeout > time.Duration(0) { + timeout = time.After(waitTimeout) + } + + select { + case err, ok := <-s.shutdownChan: + if ok { + return err + } + case <-timeout: + return ErrTimeout + } + return nil +} + +// NotifyClose registers a channel to be called when the remote +// peer inidicates connection closure. The last stream to be +// received by the remote will be sent on the channel. The notify +// timeout will determine the duration between go away received +// and the connection being closed. +func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) { + s.goAwayTimeout = timeout + s.lastStreamChan = c +} + +// SetCloseTimeout sets the amount of time close will wait for +// streams to finish before terminating the underlying network +// connection. Setting the timeout to 0 will cause close to +// wait forever, which is the default. +func (s *Connection) SetCloseTimeout(timeout time.Duration) { + s.closeTimeout = timeout +} + +// SetIdleTimeout sets the amount of time the connection may sit idle before +// it is forcefully terminated. +func (s *Connection) SetIdleTimeout(timeout time.Duration) { + s.framer.setIdleTimeout(timeout) +} + +func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + headerFrame := &spdy.HeadersFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(headerFrame) +} + +func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + replyFrame := &spdy.SynReplyFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(replyFrame) +} + +func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error { + resetFrame := &spdy.RstStreamFrame{ + StreamId: streamId, + Status: status, + } + + return s.framer.WriteFrame(resetFrame) +} + +func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error { + return s.sendResetFrame(status, stream.streamId) +} + +func (s *Connection) sendStream(stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + stream.finished = true + } + + var parentId spdy.StreamId + if stream.parent != nil { + parentId = stream.parent.streamId + } + + streamFrame := &spdy.SynStreamFrame{ + StreamId: spdy.StreamId(stream.streamId), + AssociatedToStreamId: spdy.StreamId(parentId), + Headers: stream.headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(streamFrame) +} + +// getNextStreamId returns the next sequential id +// every call should produce a unique value or an error +func (s *Connection) getNextStreamId() spdy.StreamId { + sid := s.nextStreamId + if sid > 0x7fffffff { + return 0 + } + s.nextStreamId = s.nextStreamId + 2 + return sid +} + +// PeekNextStreamId returns the next sequential id and keeps the next id untouched +func (s *Connection) PeekNextStreamId() spdy.StreamId { + sid := s.nextStreamId + return sid +} + +func (s *Connection) validateStreamId(rid spdy.StreamId) error { + if rid > 0x7fffffff || rid < s.receivedStreamId { + return ErrInvalidStreamId + } + s.receivedStreamId = rid + 2 + return nil +} + +func (s *Connection) addStream(stream *Stream) { + s.streamCond.L.Lock() + s.streams[stream.streamId] = stream + debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) removeStream(stream *Stream) { + s.streamCond.L.Lock() + delete(s.streams, stream.streamId) + debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) { + s.streamLock.RLock() + stream, ok = s.streams[streamId] + s.streamLock.RUnlock() + return +} + +// FindStream looks up the given stream id and either waits for the +// stream to be found or returns nil if the stream id is no longer +// valid. +func (s *Connection) FindStream(streamId uint32) *Stream { + var stream *Stream + var ok bool + s.streamCond.L.Lock() + stream, ok = s.streams[spdy.StreamId(streamId)] + debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok) + for !ok && streamId >= uint32(s.receivedStreamId) { + s.streamCond.Wait() + stream, ok = s.streams[spdy.StreamId(streamId)] + } + s.streamCond.L.Unlock() + return stream +} + +func (s *Connection) CloseChan() <-chan bool { + return s.closeChan +} diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go new file mode 100644 index 00000000000..b59fa5fdcd0 --- /dev/null +++ b/vendor/github.com/docker/spdystream/handlers.go @@ -0,0 +1,38 @@ +package spdystream + +import ( + "io" + "net/http" +) + +// MirrorStreamHandler mirrors all streams. +func MirrorStreamHandler(stream *Stream) { + replyErr := stream.SendReply(http.Header{}, false) + if replyErr != nil { + return + } + + go func() { + io.Copy(stream, stream) + stream.Close() + }() + go func() { + for { + header, receiveErr := stream.ReceiveHeader() + if receiveErr != nil { + return + } + sendErr := stream.SendHeader(header, false) + if sendErr != nil { + return + } + } + }() +} + +// NoopStreamHandler does nothing when stream connects, most +// likely used with RejectAuthHandler which will not allow any +// streams to make it to the stream handler. +func NoOpStreamHandler(stream *Stream) { + stream.SendReply(http.Header{}, false) +} diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go new file mode 100644 index 00000000000..fc8582b5c6f --- /dev/null +++ b/vendor/github.com/docker/spdystream/priority.go @@ -0,0 +1,98 @@ +package spdystream + +import ( + "container/heap" + "sync" + + "github.com/docker/spdystream/spdy" +) + +type prioritizedFrame struct { + frame spdy.Frame + priority uint8 + insertId uint64 +} + +type frameQueue []*prioritizedFrame + +func (fq frameQueue) Len() int { + return len(fq) +} + +func (fq frameQueue) Less(i, j int) bool { + if fq[i].priority == fq[j].priority { + return fq[i].insertId < fq[j].insertId + } + return fq[i].priority < fq[j].priority +} + +func (fq frameQueue) Swap(i, j int) { + fq[i], fq[j] = fq[j], fq[i] +} + +func (fq *frameQueue) Push(x interface{}) { + *fq = append(*fq, x.(*prioritizedFrame)) +} + +func (fq *frameQueue) Pop() interface{} { + old := *fq + n := len(old) + *fq = old[0 : n-1] + return old[n-1] +} + +type PriorityFrameQueue struct { + queue *frameQueue + c *sync.Cond + size int + nextInsertId uint64 + drain bool +} + +func NewPriorityFrameQueue(size int) *PriorityFrameQueue { + queue := make(frameQueue, 0, size) + heap.Init(&queue) + + return &PriorityFrameQueue{ + queue: &queue, + size: size, + c: sync.NewCond(&sync.Mutex{}), + } +} + +func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() >= q.size { + q.c.Wait() + } + pFrame := &prioritizedFrame{ + frame: frame, + priority: priority, + insertId: q.nextInsertId, + } + q.nextInsertId = q.nextInsertId + 1 + heap.Push(q.queue, pFrame) + q.c.Signal() +} + +func (q *PriorityFrameQueue) Pop() spdy.Frame { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() == 0 { + if q.drain { + return nil + } + q.c.Wait() + } + frame := heap.Pop(q.queue).(*prioritizedFrame).frame + q.c.Signal() + return frame +} + +func (q *PriorityFrameQueue) Drain() { + q.c.L.Lock() + defer q.c.L.Unlock() + q.drain = true + q.c.Broadcast() +} diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go new file mode 100644 index 00000000000..5a5ff0e14cd --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/dictionary.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +// headerDictionary is the dictionary sent to the zlib compressor/decompressor. +var headerDictionary = []byte{ + 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, + 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, + 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, + 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, + 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, + 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, + 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, + 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, + 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, + 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, + 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, + 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, + 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, + 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, + 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, + 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, + 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, + 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, + 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, + 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, + 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, + 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, + 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, + 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, + 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, + 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, + 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, + 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, + 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, + 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, + 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, + 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, + 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, + 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, + 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, + 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, + 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, + 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, + 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, + 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, + 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, + 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, + 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, + 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, + 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, + 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, + 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, + 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, + 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, + 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, + 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, + 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, + 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, + 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, + 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, + 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, + 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, + 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, + 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, + 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, + 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, + 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, + 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, + 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, + 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, + 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, + 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, + 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, + 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, + 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, + 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, + 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, + 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, + 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, + 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, + 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, + 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, + 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, + 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, +} diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go new file mode 100644 index 00000000000..9359a95015c --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/read.go @@ -0,0 +1,348 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "compress/zlib" + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynStreamFrame(h, frame) +} + +func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynReplyFrame(h, frame) +} + +func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + var numSettings uint32 + if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { + return err + } + frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) + for i := uint32(0); i < numSettings; i++ { + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { + return err + } + frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24) + frame.FlagIdValues[i].Id &= 0xffffff + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil { + return err + } + } + return nil +} + +func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil { + return err + } + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, StreamId(frame.Id)} + } + return nil +} + +func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + return nil +} + +func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readHeadersFrame(h, frame) +} + +func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil { + return err + } + return nil +} + +func newControlFrame(frameType ControlFrameType) (controlFrame, error) { + ctor, ok := cframeCtor[frameType] + if !ok { + return nil, &Error{Err: InvalidControlFrame} + } + return ctor(), nil +} + +var cframeCtor = map[ControlFrameType]func() controlFrame{ + TypeSynStream: func() controlFrame { return new(SynStreamFrame) }, + TypeSynReply: func() controlFrame { return new(SynReplyFrame) }, + TypeRstStream: func() controlFrame { return new(RstStreamFrame) }, + TypeSettings: func() controlFrame { return new(SettingsFrame) }, + TypePing: func() controlFrame { return new(PingFrame) }, + TypeGoAway: func() controlFrame { return new(GoAwayFrame) }, + TypeHeaders: func() controlFrame { return new(HeadersFrame) }, + TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) }, +} + +func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error { + if f.headerDecompressor != nil { + f.headerReader.N = payloadSize + return nil + } + f.headerReader = io.LimitedReader{R: f.r, N: payloadSize} + decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary)) + if err != nil { + return err + } + f.headerDecompressor = decompressor + return nil +} + +// ReadFrame reads SPDY encoded data and returns a decompressed Frame. +func (f *Framer) ReadFrame() (Frame, error) { + var firstWord uint32 + if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil { + return nil, err + } + if firstWord&0x80000000 != 0 { + frameType := ControlFrameType(firstWord & 0xffff) + version := uint16(firstWord >> 16 & 0x7fff) + return f.parseControlFrame(version, frameType) + } + return f.parseDataFrame(StreamId(firstWord & 0x7fffffff)) +} + +func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + flags := ControlFlags((length & 0xff000000) >> 24) + length &= 0xffffff + header := ControlFrameHeader{version, frameType, flags, length} + cframe, err := newControlFrame(frameType) + if err != nil { + return nil, err + } + if err = cframe.read(header, f); err != nil { + return nil, err + } + return cframe, nil +} + +func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { + var numHeaders uint32 + if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { + return nil, err + } + var e error + h := make(http.Header, int(numHeaders)) + for i := 0; i < int(numHeaders); i++ { + var length uint32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + nameBytes := make([]byte, length) + if _, err := io.ReadFull(r, nameBytes); err != nil { + return nil, err + } + name := string(nameBytes) + if name != strings.ToLower(name) { + e = &Error{UnlowercasedHeaderName, streamId} + name = strings.ToLower(name) + } + if h[name] != nil { + e = &Error{DuplicateHeaders, streamId} + } + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + value := make([]byte, length) + if _, err := io.ReadFull(r, value); err != nil { + return nil, err + } + valueList := strings.Split(string(value), headerValueSeparator) + for _, v := range valueList { + h.Add(name, v) + } + } + if e != nil { + return h, e + } + return h, nil +} + +func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil { + return err + } + frame.Priority >>= 5 + if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 10)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidReqHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidRespHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + var invalidHeaders map[string]bool + if frame.StreamId%2 == 0 { + invalidHeaders = invalidReqHeaders + } else { + invalidHeaders = invalidRespHeaders + } + for h := range frame.Headers { + if invalidHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + var frame DataFrame + frame.StreamId = streamId + frame.Flags = DataFlags(length >> 24) + length &= 0xffffff + frame.Data = make([]byte, length) + if _, err := io.ReadFull(f.r, frame.Data); err != nil { + return nil, err + } + if frame.StreamId == 0 { + return nil, &Error{ZeroStreamId, 0} + } + return &frame, nil +} diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go new file mode 100644 index 00000000000..7b6ee9c6f2b --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/types.go @@ -0,0 +1,275 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package spdy implements the SPDY protocol (currently SPDY/3), described in +// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. +package spdy + +import ( + "bytes" + "compress/zlib" + "io" + "net/http" +) + +// Version is the protocol version number that this package implements. +const Version = 3 + +// ControlFrameType stores the type field in a control frame header. +type ControlFrameType uint16 + +const ( + TypeSynStream ControlFrameType = 0x0001 + TypeSynReply = 0x0002 + TypeRstStream = 0x0003 + TypeSettings = 0x0004 + TypePing = 0x0006 + TypeGoAway = 0x0007 + TypeHeaders = 0x0008 + TypeWindowUpdate = 0x0009 +) + +// ControlFlags are the flags that can be set on a control frame. +type ControlFlags uint8 + +const ( + ControlFlagFin ControlFlags = 0x01 + ControlFlagUnidirectional = 0x02 + ControlFlagSettingsClearSettings = 0x01 +) + +// DataFlags are the flags that can be set on a data frame. +type DataFlags uint8 + +const ( + DataFlagFin DataFlags = 0x01 +) + +// MaxDataLength is the maximum number of bytes that can be stored in one frame. +const MaxDataLength = 1<<24 - 1 + +// headerValueSepator separates multiple header values. +const headerValueSeparator = "\x00" + +// Frame is a single SPDY frame in its unpacked in-memory representation. Use +// Framer to read and write it. +type Frame interface { + write(f *Framer) error +} + +// ControlFrameHeader contains all the fields in a control frame header, +// in its unpacked in-memory representation. +type ControlFrameHeader struct { + // Note, high bit is the "Control" bit. + version uint16 // spdy version number + frameType ControlFrameType + Flags ControlFlags + length uint32 // length of data field +} + +type controlFrame interface { + Frame + read(h ControlFrameHeader, f *Framer) error +} + +// StreamId represents a 31-bit value identifying the stream. +type StreamId uint32 + +// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM +// frame. +type SynStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to + Priority uint8 // priority of this frame (3-bit) + Slot uint8 // index in the server's credential vector of the client certificate + Headers http.Header +} + +// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame. +type SynReplyFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// RstStreamStatus represents the status that led to a RST_STREAM. +type RstStreamStatus uint32 + +const ( + ProtocolError RstStreamStatus = iota + 1 + InvalidStream + RefusedStream + UnsupportedVersion + Cancel + InternalError + FlowControlError + StreamInUse + StreamAlreadyClosed + InvalidCredentials + FrameTooLarge +) + +// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM +// frame. +type RstStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Status RstStreamStatus +} + +// SettingsFlag represents a flag in a SETTINGS frame. +type SettingsFlag uint8 + +const ( + FlagSettingsPersistValue SettingsFlag = 0x1 + FlagSettingsPersisted = 0x2 +) + +// SettingsFlag represents the id of an id/value pair in a SETTINGS frame. +type SettingsId uint32 + +const ( + SettingsUploadBandwidth SettingsId = iota + 1 + SettingsDownloadBandwidth + SettingsRoundTripTime + SettingsMaxConcurrentStreams + SettingsCurrentCwnd + SettingsDownloadRetransRate + SettingsInitialWindowSize + SettingsClientCretificateVectorSize +) + +// SettingsFlagIdValue is the unpacked, in-memory representation of the +// combined flag/id/value for a setting in a SETTINGS frame. +type SettingsFlagIdValue struct { + Flag SettingsFlag + Id SettingsId + Value uint32 +} + +// SettingsFrame is the unpacked, in-memory representation of a SPDY +// SETTINGS frame. +type SettingsFrame struct { + CFHeader ControlFrameHeader + FlagIdValues []SettingsFlagIdValue +} + +// PingFrame is the unpacked, in-memory representation of a PING frame. +type PingFrame struct { + CFHeader ControlFrameHeader + Id uint32 // unique id for this ping, from server is even, from client is odd. +} + +// GoAwayStatus represents the status in a GoAwayFrame. +type GoAwayStatus uint32 + +const ( + GoAwayOK GoAwayStatus = iota + GoAwayProtocolError + GoAwayInternalError +) + +// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame. +type GoAwayFrame struct { + CFHeader ControlFrameHeader + LastGoodStreamId StreamId // last stream id which was accepted by sender + Status GoAwayStatus +} + +// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame. +type HeadersFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// WindowUpdateFrame is the unpacked, in-memory representation of a +// WINDOW_UPDATE frame. +type WindowUpdateFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + DeltaWindowSize uint32 // additional number of bytes to existing window size +} + +// TODO: Implement credential frame and related methods. + +// DataFrame is the unpacked, in-memory representation of a DATA frame. +type DataFrame struct { + // Note, high bit is the "Control" bit. Should be 0 for data frames. + StreamId StreamId + Flags DataFlags + Data []byte // payload data of this frame +} + +// A SPDY specific error. +type ErrorCode string + +const ( + UnlowercasedHeaderName ErrorCode = "header was not lowercased" + DuplicateHeaders = "multiple headers with same name" + WrongCompressedPayloadSize = "compressed payload size was incorrect" + UnknownFrameType = "unknown frame type" + InvalidControlFrame = "invalid control frame" + InvalidDataFrame = "invalid data frame" + InvalidHeaderPresent = "frame contained invalid header" + ZeroStreamId = "stream id zero is disallowed" +) + +// Error contains both the type of error and additional values. StreamId is 0 +// if Error is not associated with a stream. +type Error struct { + Err ErrorCode + StreamId StreamId +} + +func (e *Error) Error() string { + return string(e.Err) +} + +var invalidReqHeaders = map[string]bool{ + "Connection": true, + "Host": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +var invalidRespHeaders = map[string]bool{ + "Connection": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +// Framer handles serializing/deserializing SPDY frames, including compressing/ +// decompressing payloads. +type Framer struct { + headerCompressionDisabled bool + w io.Writer + headerBuf *bytes.Buffer + headerCompressor *zlib.Writer + r io.Reader + headerReader io.LimitedReader + headerDecompressor io.ReadCloser +} + +// NewFramer allocates a new Framer for a given SPDY connection, represented by +// a io.Writer and io.Reader. Note that Framer will read and write individual fields +// from/to the Reader and Writer, so the caller should pass in an appropriately +// buffered implementation to optimize performance. +func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { + compressBuf := new(bytes.Buffer) + compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary)) + if err != nil { + return nil, err + } + framer := &Framer{ + w: w, + headerBuf: compressBuf, + headerCompressor: compressor, + r: r, + } + return framer, nil +} diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go new file mode 100644 index 00000000000..b212f66a235 --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/write.go @@ -0,0 +1,318 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) write(f *Framer) error { + return f.writeSynStreamFrame(frame) +} + +func (frame *SynReplyFrame) write(f *Framer) error { + return f.writeSynReplyFrame(frame) +} + +func (frame *RstStreamFrame) write(f *Framer) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeRstStream + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return +} + +func (frame *SettingsFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSettings + frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { + return + } + for _, flagIdValue := range frame.FlagIdValues { + flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id) + if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil { + return + } + } + return +} + +func (frame *PingFrame) write(f *Framer) (err error) { + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypePing + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 4 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil { + return + } + return +} + +func (frame *GoAwayFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeGoAway + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return nil +} + +func (frame *HeadersFrame) write(f *Framer) error { + return f.writeHeadersFrame(frame) +} + +func (frame *WindowUpdateFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeWindowUpdate + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil { + return + } + return nil +} + +func (frame *DataFrame) write(f *Framer) error { + return f.writeDataFrame(frame) +} + +// WriteFrame writes a frame. +func (f *Framer) WriteFrame(frame Frame) error { + return frame.write(f) +} + +func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error { + if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil { + return err + } + if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil { + return err + } + flagsAndLength := uint32(h.Flags)<<24 | h.length + if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil { + return err + } + return nil +} + +func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) { + n = 0 + if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { + return + } + n += 2 + for name, values := range h { + if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { + return + } + n += 2 + name = strings.ToLower(name) + if _, err = io.WriteString(w, name); err != nil { + return + } + n += len(name) + v := strings.Join(values, headerValueSeparator) + if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { + return + } + n += 2 + if _, err = io.WriteString(w, v); err != nil { + return + } + n += len(v) + } + return +} + +func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynStream + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil { + return err + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return err + } + f.headerBuf.Reset() + return nil +} + +func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynReply + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeHeaders + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeDataFrame(frame *DataFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength { + return &Error{InvalidDataFrame, frame.StreamId} + } + + // Serialize frame to Writer. + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data)) + if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { + return + } + if _, err = f.w.Write(frame.Data); err != nil { + return + } + return nil +} diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go new file mode 100644 index 00000000000..f9e9ee267f8 --- /dev/null +++ b/vendor/github.com/docker/spdystream/stream.go @@ -0,0 +1,327 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/docker/spdystream/spdy" +) + +var ( + ErrUnreadPartialData = errors.New("unread partial data") +) + +type Stream struct { + streamId spdy.StreamId + parent *Stream + conn *Connection + startChan chan error + + dataLock sync.RWMutex + dataChan chan []byte + unread []byte + + priority uint8 + headers http.Header + headerChan chan http.Header + finishLock sync.Mutex + finished bool + replyCond *sync.Cond + replied bool + closeLock sync.Mutex + closeChan chan bool +} + +// WriteData writes data to stream, sending a dataframe per call +func (s *Stream) WriteData(data []byte, fin bool) error { + s.waitWriteReply() + var flags spdy.DataFlags + + if fin { + flags = spdy.DataFlagFin + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return ErrWriteClosedStream + } + s.finished = true + s.finishLock.Unlock() + } + + dataFrame := &spdy.DataFrame{ + StreamId: s.streamId, + Flags: flags, + Data: data, + } + + debugMessage("(%p) (%d) Writing data frame", s, s.streamId) + return s.conn.framer.WriteFrame(dataFrame) +} + +// Write writes bytes to a stream, calling write data for each call. +func (s *Stream) Write(data []byte) (n int, err error) { + err = s.WriteData(data, false) + if err == nil { + n = len(data) + } + return +} + +// Read reads bytes from a stream, a single read will never get more +// than what is sent on a single data frame, but a multiple calls to +// read may get data from the same data frame. +func (s *Stream) Read(p []byte) (n int, err error) { + if s.unread == nil { + select { + case <-s.closeChan: + return 0, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return 0, io.EOF + } + s.unread = read + } + } + n = copy(p, s.unread) + if n < len(s.unread) { + s.unread = s.unread[n:] + } else { + s.unread = nil + } + return +} + +// ReadData reads an entire data frame and returns the byte array +// from the data frame. If there is unread data from the result +// of a Read call, this function will return an ErrUnreadPartialData. +func (s *Stream) ReadData() ([]byte, error) { + debugMessage("(%p) Reading data from %d", s, s.streamId) + if s.unread != nil { + return nil, ErrUnreadPartialData + } + select { + case <-s.closeChan: + return nil, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return nil, io.EOF + } + return read, nil + } +} + +func (s *Stream) waitWriteReply() { + if s.replyCond != nil { + s.replyCond.L.Lock() + for !s.replied { + s.replyCond.Wait() + } + s.replyCond.L.Unlock() + } +} + +// Wait waits for the stream to receive a reply. +func (s *Stream) Wait() error { + return s.WaitTimeout(time.Duration(0)) +} + +// WaitTimeout waits for the stream to receive a reply or for timeout. +// When the timeout is reached, ErrTimeout will be returned. +func (s *Stream) WaitTimeout(timeout time.Duration) error { + var timeoutChan <-chan time.Time + if timeout > time.Duration(0) { + timeoutChan = time.After(timeout) + } + + select { + case err := <-s.startChan: + if err != nil { + return err + } + break + case <-timeoutChan: + return ErrTimeout + } + return nil +} + +// Close closes the stream by sending an empty data frame with the +// finish flag set, indicating this side is finished with the stream. +func (s *Stream) Close() error { + select { + case <-s.closeChan: + // Stream is now fully closed + s.conn.removeStream(s) + default: + break + } + return s.WriteData([]byte{}, true) +} + +// Reset sends a reset frame, putting the stream into the fully closed state. +func (s *Stream) Reset() error { + s.conn.removeStream(s) + return s.resetStream() +} + +func (s *Stream) resetStream() error { + // Always call closeRemoteChannels, even if s.finished is already true. + // This makes it so that stream.Close() followed by stream.Reset() allows + // stream.Read() to unblock. + s.closeRemoteChannels() + + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return nil + } + s.finished = true + s.finishLock.Unlock() + + resetFrame := &spdy.RstStreamFrame{ + StreamId: s.streamId, + Status: spdy.Cancel, + } + return s.conn.framer.WriteFrame(resetFrame) +} + +// CreateSubStream creates a stream using the current as the parent +func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) { + return s.conn.CreateStream(headers, s, fin) +} + +// SetPriority sets the stream priority, does not affect the +// remote priority of this stream after Open has been called. +// Valid values are 0 through 7, 0 being the highest priority +// and 7 the lowest. +func (s *Stream) SetPriority(priority uint8) { + s.priority = priority +} + +// SendHeader sends a header frame across the stream +func (s *Stream) SendHeader(headers http.Header, fin bool) error { + return s.conn.sendHeaders(headers, s, fin) +} + +// SendReply sends a reply on a stream, only valid to be called once +// when handling a new stream +func (s *Stream) SendReply(headers http.Header, fin bool) error { + if s.replyCond == nil { + return errors.New("cannot reply on initiated stream") + } + s.replyCond.L.Lock() + defer s.replyCond.L.Unlock() + if s.replied { + return nil + } + + err := s.conn.sendReply(headers, s, fin) + if err != nil { + return err + } + + s.replied = true + s.replyCond.Broadcast() + return nil +} + +// Refuse sends a reset frame with the status refuse, only +// valid to be called once when handling a new stream. This +// may be used to indicate that a stream is not allowed +// when http status codes are not being used. +func (s *Stream) Refuse() error { + if s.replied { + return nil + } + s.replied = true + return s.conn.sendReset(spdy.RefusedStream, s) +} + +// Cancel sends a reset frame with the status canceled. This +// can be used at any time by the creator of the Stream to +// indicate the stream is no longer needed. +func (s *Stream) Cancel() error { + return s.conn.sendReset(spdy.Cancel, s) +} + +// ReceiveHeader receives a header sent on the other side +// of the stream. This function will block until a header +// is received or stream is closed. +func (s *Stream) ReceiveHeader() (http.Header, error) { + select { + case <-s.closeChan: + break + case header, ok := <-s.headerChan: + if !ok { + return nil, fmt.Errorf("header chan closed") + } + return header, nil + } + return nil, fmt.Errorf("stream closed") +} + +// Parent returns the parent stream +func (s *Stream) Parent() *Stream { + return s.parent +} + +// Headers returns the headers used to create the stream +func (s *Stream) Headers() http.Header { + return s.headers +} + +// String returns the string version of stream using the +// streamId to uniquely identify the stream +func (s *Stream) String() string { + return fmt.Sprintf("stream:%d", s.streamId) +} + +// Identifier returns a 32 bit identifier for the stream +func (s *Stream) Identifier() uint32 { + return uint32(s.streamId) +} + +// IsFinished returns whether the stream has finished +// sending data +func (s *Stream) IsFinished() bool { + return s.finished +} + +// Implement net.Conn interface + +func (s *Stream) LocalAddr() net.Addr { + return s.conn.conn.LocalAddr() +} + +func (s *Stream) RemoteAddr() net.Addr { + return s.conn.conn.RemoteAddr() +} + +// TODO set per stream values instead of connection-wide + +func (s *Stream) SetDeadline(t time.Time) error { + return s.conn.conn.SetDeadline(t) +} + +func (s *Stream) SetReadDeadline(t time.Time) error { + return s.conn.conn.SetReadDeadline(t) +} + +func (s *Stream) SetWriteDeadline(t time.Time) error { + return s.conn.conn.SetWriteDeadline(t) +} + +func (s *Stream) closeRemoteChannels() { + s.closeLock.Lock() + defer s.closeLock.Unlock() + select { + case <-s.closeChan: + default: + close(s.closeChan) + } +} diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go new file mode 100644 index 00000000000..1b2c199a402 --- /dev/null +++ b/vendor/github.com/docker/spdystream/utils.go @@ -0,0 +1,16 @@ +package spdystream + +import ( + "log" + "os" +) + +var ( + DEBUG = os.Getenv("DEBUG") +) + +func debugMessage(fmt string, args ...interface{}) { + if DEBUG != "" { + log.Printf(fmt, args...) + } +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 00000000000..9d80f19521b --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 00000000000..213bf204afe --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,99 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +// +// Blowfish is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See https://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 00000000000..d04077595ab --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// https://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go new file mode 100644 index 00000000000..87f1e369cc2 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 +// +build !gccgo,!appengine + +package chacha20 + +const bufSize = 256 + +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s new file mode 100644 index 00000000000..b3a16ef751a --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -0,0 +1,308 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 +// +build !gccgo,!appengine + +#include "textflag.h" + +#define NUM_ROUNDS 10 + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD dst+0(FP), R1 + MOVD src+24(FP), R2 + MOVD src_len+32(FP), R3 + MOVD key+48(FP), R4 + MOVD nonce+56(FP), R6 + MOVD counter+64(FP), R7 + + MOVD $·constants(SB), R10 + MOVD $·incRotMatrix(SB), R11 + + MOVW (R7), R20 + + AND $~255, R3, R13 + ADD R2, R13, R12 // R12 for block end + AND $255, R3, R13 +loop: + MOVD $NUM_ROUNDS, R21 + VLD1 (R11), [V30.S4, V31.S4] + + // load contants + // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] + WORD $0x4D60E940 + + // load keys + // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] + WORD $0x4DFFE884 + // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] + WORD $0x4DFFE888 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V12.S4] + WORD $0x4D40C8EC + + // VLD3R (R6), [V13.S4, V14.S4, V15.S4] + WORD $0x4D40E8CD + + // update counter + VADD V30.S4, V12.S4, V12.S4 + +chacha: + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) + VADD V8.S4, V12.S4, V8.S4 + VADD V9.S4, V13.S4, V9.S4 + VADD V10.S4, V14.S4, V10.S4 + VADD V11.S4, V15.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $12, V16.S4, V4.S4 + VSHL $12, V17.S4, V5.S4 + VSHL $12, V18.S4, V6.S4 + VSHL $12, V19.S4, V7.S4 + VSRI $20, V16.S4, V4.S4 + VSRI $20, V17.S4, V5.S4 + VSRI $20, V18.S4, V6.S4 + VSRI $20, V19.S4, V7.S4 + + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) + VADD V12.S4, V8.S4, V8.S4 + VADD V13.S4, V9.S4, V9.S4 + VADD V14.S4, V10.S4, V10.S4 + VADD V15.S4, V11.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $7, V16.S4, V4.S4 + VSHL $7, V17.S4, V5.S4 + VSHL $7, V18.S4, V6.S4 + VSHL $7, V19.S4, V7.S4 + VSRI $25, V16.S4, V4.S4 + VSRI $25, V17.S4, V5.S4 + VSRI $25, V18.S4, V6.S4 + VSRI $25, V19.S4, V7.S4 + + // V0..V3 += V5..V7, V4 + // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) + VADD V0.S4, V5.S4, V0.S4 + VADD V1.S4, V6.S4, V1.S4 + VADD V2.S4, V7.S4, V2.S4 + VADD V3.S4, V4.S4, V3.S4 + VEOR V15.B16, V0.B16, V15.B16 + VEOR V12.B16, V1.B16, V12.B16 + VEOR V13.B16, V2.B16, V13.B16 + VEOR V14.B16, V3.B16, V14.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 12) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $12, V16.S4, V5.S4 + VSHL $12, V17.S4, V6.S4 + VSHL $12, V18.S4, V7.S4 + VSHL $12, V19.S4, V4.S4 + VSRI $20, V16.S4, V5.S4 + VSRI $20, V17.S4, V6.S4 + VSRI $20, V18.S4, V7.S4 + VSRI $20, V19.S4, V4.S4 + + // V0 += V5; V15 <<<= ((V0 XOR V15), 8) + // ... + VADD V5.S4, V0.S4, V0.S4 + VADD V6.S4, V1.S4, V1.S4 + VADD V7.S4, V2.S4, V2.S4 + VADD V4.S4, V3.S4, V3.S4 + VEOR V0.B16, V15.B16, V15.B16 + VEOR V1.B16, V12.B16, V12.B16 + VEOR V2.B16, V13.B16, V13.B16 + VEOR V3.B16, V14.B16, V14.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 7) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $7, V16.S4, V5.S4 + VSHL $7, V17.S4, V6.S4 + VSHL $7, V18.S4, V7.S4 + VSHL $7, V19.S4, V4.S4 + VSRI $25, V16.S4, V5.S4 + VSRI $25, V17.S4, V6.S4 + VSRI $25, V18.S4, V7.S4 + VSRI $25, V19.S4, V4.S4 + + SUB $1, R21 + CBNZ R21, chacha + + // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] + WORD $0x4D60E950 + + // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] + WORD $0x4DFFE894 + VADD V30.S4, V12.S4, V12.S4 + VADD V16.S4, V0.S4, V0.S4 + VADD V17.S4, V1.S4, V1.S4 + VADD V18.S4, V2.S4, V2.S4 + VADD V19.S4, V3.S4, V3.S4 + // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] + WORD $0x4DFFE898 + // restore R4 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V28.S4] + WORD $0x4D40C8FC + // VLD3R (R6), [V29.S4, V30.S4, V31.S4] + WORD $0x4D40E8DD + + VADD V20.S4, V4.S4, V4.S4 + VADD V21.S4, V5.S4, V5.S4 + VADD V22.S4, V6.S4, V6.S4 + VADD V23.S4, V7.S4, V7.S4 + VADD V24.S4, V8.S4, V8.S4 + VADD V25.S4, V9.S4, V9.S4 + VADD V26.S4, V10.S4, V10.S4 + VADD V27.S4, V11.S4, V11.S4 + VADD V28.S4, V12.S4, V12.S4 + VADD V29.S4, V13.S4, V13.S4 + VADD V30.S4, V14.S4, V14.S4 + VADD V31.S4, V15.S4, V15.S4 + + VZIP1 V1.S4, V0.S4, V16.S4 + VZIP2 V1.S4, V0.S4, V17.S4 + VZIP1 V3.S4, V2.S4, V18.S4 + VZIP2 V3.S4, V2.S4, V19.S4 + VZIP1 V5.S4, V4.S4, V20.S4 + VZIP2 V5.S4, V4.S4, V21.S4 + VZIP1 V7.S4, V6.S4, V22.S4 + VZIP2 V7.S4, V6.S4, V23.S4 + VZIP1 V9.S4, V8.S4, V24.S4 + VZIP2 V9.S4, V8.S4, V25.S4 + VZIP1 V11.S4, V10.S4, V26.S4 + VZIP2 V11.S4, V10.S4, V27.S4 + VZIP1 V13.S4, V12.S4, V28.S4 + VZIP2 V13.S4, V12.S4, V29.S4 + VZIP1 V15.S4, V14.S4, V30.S4 + VZIP2 V15.S4, V14.S4, V31.S4 + VZIP1 V18.D2, V16.D2, V0.D2 + VZIP2 V18.D2, V16.D2, V4.D2 + VZIP1 V19.D2, V17.D2, V8.D2 + VZIP2 V19.D2, V17.D2, V12.D2 + VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] + + VZIP1 V22.D2, V20.D2, V1.D2 + VZIP2 V22.D2, V20.D2, V5.D2 + VZIP1 V23.D2, V21.D2, V9.D2 + VZIP2 V23.D2, V21.D2, V13.D2 + VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] + VZIP1 V26.D2, V24.D2, V2.D2 + VZIP2 V26.D2, V24.D2, V6.D2 + VZIP1 V27.D2, V25.D2, V10.D2 + VZIP2 V27.D2, V25.D2, V14.D2 + VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] + VZIP1 V30.D2, V28.D2, V3.D2 + VZIP2 V30.D2, V28.D2, V7.D2 + VZIP1 V31.D2, V29.D2, V11.D2 + VZIP2 V31.D2, V29.D2, V15.D2 + VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] + VEOR V0.B16, V16.B16, V16.B16 + VEOR V1.B16, V17.B16, V17.B16 + VEOR V2.B16, V18.B16, V18.B16 + VEOR V3.B16, V19.B16, V19.B16 + VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) + VEOR V4.B16, V20.B16, V20.B16 + VEOR V5.B16, V21.B16, V21.B16 + VEOR V6.B16, V22.B16, V22.B16 + VEOR V7.B16, V23.B16, V23.B16 + VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) + VEOR V8.B16, V24.B16, V24.B16 + VEOR V9.B16, V25.B16, V25.B16 + VEOR V10.B16, V26.B16, V26.B16 + VEOR V11.B16, V27.B16, V27.B16 + VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) + VEOR V12.B16, V28.B16, V28.B16 + VEOR V13.B16, V29.B16, V29.B16 + VEOR V14.B16, V30.B16, V30.B16 + VEOR V15.B16, V31.B16, V31.B16 + VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) + + ADD $4, R20 + MOVW R20, (R7) // update counter + + CMP R2, R12 + BGT loop + + RET + + +DATA ·constants+0x00(SB)/4, $0x61707865 +DATA ·constants+0x04(SB)/4, $0x3320646e +DATA ·constants+0x08(SB)/4, $0x79622d32 +DATA ·constants+0x0c(SB)/4, $0x6b206574 +GLOBL ·constants(SB), NOPTR|RODATA, $32 + +DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 +DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 +DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 +DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 +DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 +DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 +DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B +DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F +GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go new file mode 100644 index 00000000000..098ec9f6be0 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go @@ -0,0 +1,364 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms +// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01. +package chacha20 + +import ( + "crypto/cipher" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/internal/subtle" +) + +const ( + // KeySize is the size of the key used by this cipher, in bytes. + KeySize = 32 + + // NonceSize is the size of the nonce used with the standard variant of this + // cipher, in bytes. + // + // Note that this is too short to be safely generated at random if the same + // key is reused more than 2³² times. + NonceSize = 12 + + // NonceSizeX is the size of the nonce used with the XChaCha20 variant of + // this cipher, in bytes. + NonceSizeX = 24 +) + +// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key +// and nonce. A *Cipher implements the cipher.Stream interface. +type Cipher struct { + // The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter + // (incremented after each block), and 3 of nonce. + key [8]uint32 + counter uint32 + nonce [3]uint32 + + // The last len bytes of buf are leftover key stream bytes from the previous + // XORKeyStream invocation. The size of buf depends on how many blocks are + // computed at a time. + buf [bufSize]byte + len int + + // The counter-independent results of the first round are cached after they + // are computed the first time. + precompDone bool + p1, p5, p9, p13 uint32 + p2, p6, p10, p14 uint32 + p3, p7, p11, p15 uint32 +} + +var _ cipher.Stream = (*Cipher)(nil) + +// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given +// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided, +// the XChaCha20 construction will be used. It returns an error if key or nonce +// have any other length. +// +// Note that ChaCha20, like all stream ciphers, is not authenticated and allows +// attackers to silently tamper with the plaintext. For this reason, it is more +// appropriate as a building block than as a standalone encryption mechanism. +// Instead, consider using package golang.org/x/crypto/chacha20poly1305. +func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) { + // This function is split into a wrapper so that the Cipher allocation will + // be inlined, and depending on how the caller uses the return value, won't + // escape to the heap. + c := &Cipher{} + return newUnauthenticatedCipher(c, key, nonce) +} + +func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20: wrong key size") + } + if len(nonce) == NonceSizeX { + // XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a + // derived key, allowing it to operate on a nonce of 24 bytes. See + // draft-irtf-cfrg-xchacha-01, Section 2.3. + key, _ = HChaCha20(key, nonce[0:16]) + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + nonce = cNonce + } else if len(nonce) != NonceSize { + return nil, errors.New("chacha20: wrong nonce size") + } + + c.key = [8]uint32{ + binary.LittleEndian.Uint32(key[0:4]), + binary.LittleEndian.Uint32(key[4:8]), + binary.LittleEndian.Uint32(key[8:12]), + binary.LittleEndian.Uint32(key[12:16]), + binary.LittleEndian.Uint32(key[16:20]), + binary.LittleEndian.Uint32(key[20:24]), + binary.LittleEndian.Uint32(key[24:28]), + binary.LittleEndian.Uint32(key[28:32]), + } + c.nonce = [3]uint32{ + binary.LittleEndian.Uint32(nonce[0:4]), + binary.LittleEndian.Uint32(nonce[4:8]), + binary.LittleEndian.Uint32(nonce[8:12]), + } + return c, nil +} + +// The constant first 4 words of the ChaCha20 state. +const ( + j0 uint32 = 0x61707865 // expa + j1 uint32 = 0x3320646e // nd 3 + j2 uint32 = 0x79622d32 // 2-by + j3 uint32 = 0x6b206574 // te k +) + +const blockSize = 64 + +// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words. +// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16 +// words each round, in columnar or diagonal groups of 4 at a time. +func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { + a += b + d ^= a + d = bits.RotateLeft32(d, 16) + c += d + b ^= c + b = bits.RotateLeft32(b, 12) + a += b + d ^= a + d = bits.RotateLeft32(d, 8) + c += d + b ^= c + b = bits.RotateLeft32(b, 7) + return a, b, c, d +} + +// XORKeyStream XORs each byte in the given slice with a byte from the +// cipher's key stream. Dst and src must overlap entirely or not at all. +// +// If len(dst) < len(src), XORKeyStream will panic. It is acceptable +// to pass a dst bigger than src, and in that case, XORKeyStream will +// only update dst[:len(src)] and will not touch the rest of dst. +// +// Multiple calls to XORKeyStream behave as if the concatenation of +// the src buffers was passed in a single run. That is, Cipher +// maintains state and does not reset at each XORKeyStream call. +func (s *Cipher) XORKeyStream(dst, src []byte) { + if len(src) == 0 { + return + } + if len(dst) < len(src) { + panic("chacha20: output smaller than input") + } + dst = dst[:len(src)] + if subtle.InexactOverlap(dst, src) { + panic("chacha20: invalid buffer overlap") + } + + // First, drain any remaining key stream from a previous XORKeyStream. + if s.len != 0 { + keyStream := s.buf[bufSize-s.len:] + if len(src) < len(keyStream) { + keyStream = keyStream[:len(src)] + } + _ = src[len(keyStream)-1] // bounds check elimination hint + for i, b := range keyStream { + dst[i] = src[i] ^ b + } + s.len -= len(keyStream) + src = src[len(keyStream):] + dst = dst[len(keyStream):] + } + + const blocksPerBuf = bufSize / blockSize + numBufs := (uint64(len(src)) + bufSize - 1) / bufSize + if uint64(s.counter)+numBufs*blocksPerBuf >= 1<<32 { + panic("chacha20: counter overflow") + } + + // xorKeyStreamBlocks implementations expect input lengths that are a + // multiple of bufSize. Platform-specific ones process multiple blocks at a + // time, so have bufSizes that are a multiple of blockSize. + + rem := len(src) % bufSize + full := len(src) - rem + + if full > 0 { + s.xorKeyStreamBlocks(dst[:full], src[:full]) + } + + // If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and + // keep the leftover keystream for the next XORKeyStream invocation. + if rem > 0 { + s.buf = [bufSize]byte{} + copy(s.buf[:], src[full:]) + s.xorKeyStreamBlocks(s.buf[:], s.buf[:]) + s.len = bufSize - copy(dst[full:], s.buf[:]) + } +} + +func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { + if len(dst) != len(src) || len(dst)%blockSize != 0 { + panic("chacha20: internal error: wrong dst and/or src length") + } + + // To generate each block of key stream, the initial cipher state + // (represented below) is passed through 20 rounds of shuffling, + // alternatively applying quarterRounds by columns (like 1, 5, 9, 13) + // or by diagonals (like 1, 6, 11, 12). + // + // 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc + // 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk + // 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk + // 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn + // + // c=constant k=key b=blockcount n=nonce + var ( + c0, c1, c2, c3 = j0, j1, j2, j3 + c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3] + c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7] + _, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2] + ) + + // Three quarters of the first round don't depend on the counter, so we can + // calculate them here, and reuse them for multiple blocks in the loop, and + // for future XORKeyStream invocations. + if !s.precompDone { + s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13) + s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14) + s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15) + s.precompDone = true + } + + for i := 0; i < len(src); i += blockSize { + // The remainder of the first column round. + fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter) + + // The second diagonal round. + x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15) + x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12) + x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13) + x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14) + + // The remaining 18 rounds. + for i := 0; i < 9; i++ { + // Column round. + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + // Diagonal round. + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + // Finally, add back the initial state to generate the key stream. + x0 += c0 + x1 += c1 + x2 += c2 + x3 += c3 + x4 += c4 + x5 += c5 + x6 += c6 + x7 += c7 + x8 += c8 + x9 += c9 + x10 += c10 + x11 += c11 + x12 += s.counter + x13 += c13 + x14 += c14 + x15 += c15 + + s.counter += 1 + if s.counter == 0 { + panic("chacha20: internal error: counter overflow") + } + + in, out := src[i:], dst[i:] + in, out = in[:blockSize], out[:blockSize] // bounds check elimination hint + + // XOR the key stream with the source and write out the result. + xor(out[0:], in[0:], x0) + xor(out[4:], in[4:], x1) + xor(out[8:], in[8:], x2) + xor(out[12:], in[12:], x3) + xor(out[16:], in[16:], x4) + xor(out[20:], in[20:], x5) + xor(out[24:], in[24:], x6) + xor(out[28:], in[28:], x7) + xor(out[32:], in[32:], x8) + xor(out[36:], in[36:], x9) + xor(out[40:], in[40:], x10) + xor(out[44:], in[44:], x11) + xor(out[48:], in[48:], x12) + xor(out[52:], in[52:], x13) + xor(out[56:], in[56:], x14) + xor(out[60:], in[60:], x15) + } +} + +// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes +// key and a 16 bytes nonce. It returns an error if key or nonce have any other +// length. It is used as part of the XChaCha20 construction. +func HChaCha20(key, nonce []byte) ([]byte, error) { + // This function is split into a wrapper so that the slice allocation will + // be inlined, and depending on how the caller uses the return value, won't + // escape to the heap. + out := make([]byte, 32) + return hChaCha20(out, key, nonce) +} + +func hChaCha20(out, key, nonce []byte) ([]byte, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20: wrong HChaCha20 key size") + } + if len(nonce) != 16 { + return nil, errors.New("chacha20: wrong HChaCha20 nonce size") + } + + x0, x1, x2, x3 := j0, j1, j2, j3 + x4 := binary.LittleEndian.Uint32(key[0:4]) + x5 := binary.LittleEndian.Uint32(key[4:8]) + x6 := binary.LittleEndian.Uint32(key[8:12]) + x7 := binary.LittleEndian.Uint32(key[12:16]) + x8 := binary.LittleEndian.Uint32(key[16:20]) + x9 := binary.LittleEndian.Uint32(key[20:24]) + x10 := binary.LittleEndian.Uint32(key[24:28]) + x11 := binary.LittleEndian.Uint32(key[28:32]) + x12 := binary.LittleEndian.Uint32(nonce[0:4]) + x13 := binary.LittleEndian.Uint32(nonce[4:8]) + x14 := binary.LittleEndian.Uint32(nonce[8:12]) + x15 := binary.LittleEndian.Uint32(nonce[12:16]) + + for i := 0; i < 10; i++ { + // Diagonal round. + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + // Column round. + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + _ = out[31] // bounds check elimination hint + binary.LittleEndian.PutUint32(out[0:4], x0) + binary.LittleEndian.PutUint32(out[4:8], x1) + binary.LittleEndian.PutUint32(out[8:12], x2) + binary.LittleEndian.PutUint32(out[12:16], x3) + binary.LittleEndian.PutUint32(out[16:20], x12) + binary.LittleEndian.PutUint32(out[20:24], x13) + binary.LittleEndian.PutUint32(out[24:28], x14) + binary.LittleEndian.PutUint32(out[28:32], x15) + return out, nil +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go new file mode 100644 index 00000000000..ec609ed868b --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !arm64,!s390x,!ppc64le arm64,!go1.11 gccgo appengine + +package chacha20 + +const bufSize = blockSize + +func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) { + s.xorKeyStreamBlocksGeneric(dst, src) +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go new file mode 100644 index 00000000000..d0ec61f08d9 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!appengine + +package chacha20 + +const bufSize = 256 + +//go:noescape +func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter) +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s new file mode 100644 index 00000000000..533014ea3e8 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -0,0 +1,449 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on CRYPTOGAMS code with the following comment: +// # ==================================================================== +// # Written by Andy Polyakov for the OpenSSL +// # project. The module is, however, dual licensed under OpenSSL and +// # CRYPTOGAMS licenses depending on where you obtain it. For further +// # details see http://www.openssl.org/~appro/cryptogams/. +// # ==================================================================== + +// Code for the perl script that generates the ppc64 assembler +// can be found in the cryptogams repository at the link below. It is based on +// the original from openssl. + +// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91 + +// The differences in this and the original implementation are +// due to the calling conventions and initialization of constants. + +// +build !gccgo,!appengine + +#include "textflag.h" + +#define OUT R3 +#define INP R4 +#define LEN R5 +#define KEY R6 +#define CNT R7 +#define TMP R15 + +#define CONSTBASE R16 +#define BLOCKS R17 + +DATA consts<>+0x00(SB)/8, $0x3320646e61707865 +DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 +DATA consts<>+0x10(SB)/8, $0x0000000000000001 +DATA consts<>+0x18(SB)/8, $0x0000000000000000 +DATA consts<>+0x20(SB)/8, $0x0000000000000004 +DATA consts<>+0x28(SB)/8, $0x0000000000000000 +DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d +DATA consts<>+0x38(SB)/8, $0x0203000106070405 +DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c +DATA consts<>+0x48(SB)/8, $0x0102030005060704 +DATA consts<>+0x50(SB)/8, $0x6170786561707865 +DATA consts<>+0x58(SB)/8, $0x6170786561707865 +DATA consts<>+0x60(SB)/8, $0x3320646e3320646e +DATA consts<>+0x68(SB)/8, $0x3320646e3320646e +DATA consts<>+0x70(SB)/8, $0x79622d3279622d32 +DATA consts<>+0x78(SB)/8, $0x79622d3279622d32 +DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 +DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 +DATA consts<>+0x90(SB)/8, $0x0000000100000000 +DATA consts<>+0x98(SB)/8, $0x0000000300000002 +GLOBL consts<>(SB), RODATA, $0xa0 + +//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) +TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 + MOVD out+0(FP), OUT + MOVD inp+8(FP), INP + MOVD len+16(FP), LEN + MOVD key+24(FP), KEY + MOVD counter+32(FP), CNT + + // Addressing for constants + MOVD $consts<>+0x00(SB), CONSTBASE + MOVD $16, R8 + MOVD $32, R9 + MOVD $48, R10 + MOVD $64, R11 + SRD $6, LEN, BLOCKS + // V16 + LXVW4X (CONSTBASE)(R0), VS48 + ADD $80,CONSTBASE + + // Load key into V17,V18 + LXVW4X (KEY)(R0), VS49 + LXVW4X (KEY)(R8), VS50 + + // Load CNT, NONCE into V19 + LXVW4X (CNT)(R0), VS51 + + // Clear V27 + VXOR V27, V27, V27 + + // V28 + LXVW4X (CONSTBASE)(R11), VS60 + + // splat slot from V19 -> V26 + VSPLTW $0, V19, V26 + + VSLDOI $4, V19, V27, V19 + VSLDOI $12, V27, V19, V19 + + VADDUWM V26, V28, V26 + + MOVD $10, R14 + MOVD R14, CTR + +loop_outer_vsx: + // V0, V1, V2, V3 + LXVW4X (R0)(CONSTBASE), VS32 + LXVW4X (R8)(CONSTBASE), VS33 + LXVW4X (R9)(CONSTBASE), VS34 + LXVW4X (R10)(CONSTBASE), VS35 + + // splat values from V17, V18 into V4-V11 + VSPLTW $0, V17, V4 + VSPLTW $1, V17, V5 + VSPLTW $2, V17, V6 + VSPLTW $3, V17, V7 + VSPLTW $0, V18, V8 + VSPLTW $1, V18, V9 + VSPLTW $2, V18, V10 + VSPLTW $3, V18, V11 + + // VOR + VOR V26, V26, V12 + + // splat values from V19 -> V13, V14, V15 + VSPLTW $1, V19, V13 + VSPLTW $2, V19, V14 + VSPLTW $3, V19, V15 + + // splat const values + VSPLTISW $-16, V27 + VSPLTISW $12, V28 + VSPLTISW $8, V29 + VSPLTISW $7, V30 + +loop_vsx: + VADDUWM V0, V4, V0 + VADDUWM V1, V5, V1 + VADDUWM V2, V6, V2 + VADDUWM V3, V7, V3 + + VXOR V12, V0, V12 + VXOR V13, V1, V13 + VXOR V14, V2, V14 + VXOR V15, V3, V15 + + VRLW V12, V27, V12 + VRLW V13, V27, V13 + VRLW V14, V27, V14 + VRLW V15, V27, V15 + + VADDUWM V8, V12, V8 + VADDUWM V9, V13, V9 + VADDUWM V10, V14, V10 + VADDUWM V11, V15, V11 + + VXOR V4, V8, V4 + VXOR V5, V9, V5 + VXOR V6, V10, V6 + VXOR V7, V11, V7 + + VRLW V4, V28, V4 + VRLW V5, V28, V5 + VRLW V6, V28, V6 + VRLW V7, V28, V7 + + VADDUWM V0, V4, V0 + VADDUWM V1, V5, V1 + VADDUWM V2, V6, V2 + VADDUWM V3, V7, V3 + + VXOR V12, V0, V12 + VXOR V13, V1, V13 + VXOR V14, V2, V14 + VXOR V15, V3, V15 + + VRLW V12, V29, V12 + VRLW V13, V29, V13 + VRLW V14, V29, V14 + VRLW V15, V29, V15 + + VADDUWM V8, V12, V8 + VADDUWM V9, V13, V9 + VADDUWM V10, V14, V10 + VADDUWM V11, V15, V11 + + VXOR V4, V8, V4 + VXOR V5, V9, V5 + VXOR V6, V10, V6 + VXOR V7, V11, V7 + + VRLW V4, V30, V4 + VRLW V5, V30, V5 + VRLW V6, V30, V6 + VRLW V7, V30, V7 + + VADDUWM V0, V5, V0 + VADDUWM V1, V6, V1 + VADDUWM V2, V7, V2 + VADDUWM V3, V4, V3 + + VXOR V15, V0, V15 + VXOR V12, V1, V12 + VXOR V13, V2, V13 + VXOR V14, V3, V14 + + VRLW V15, V27, V15 + VRLW V12, V27, V12 + VRLW V13, V27, V13 + VRLW V14, V27, V14 + + VADDUWM V10, V15, V10 + VADDUWM V11, V12, V11 + VADDUWM V8, V13, V8 + VADDUWM V9, V14, V9 + + VXOR V5, V10, V5 + VXOR V6, V11, V6 + VXOR V7, V8, V7 + VXOR V4, V9, V4 + + VRLW V5, V28, V5 + VRLW V6, V28, V6 + VRLW V7, V28, V7 + VRLW V4, V28, V4 + + VADDUWM V0, V5, V0 + VADDUWM V1, V6, V1 + VADDUWM V2, V7, V2 + VADDUWM V3, V4, V3 + + VXOR V15, V0, V15 + VXOR V12, V1, V12 + VXOR V13, V2, V13 + VXOR V14, V3, V14 + + VRLW V15, V29, V15 + VRLW V12, V29, V12 + VRLW V13, V29, V13 + VRLW V14, V29, V14 + + VADDUWM V10, V15, V10 + VADDUWM V11, V12, V11 + VADDUWM V8, V13, V8 + VADDUWM V9, V14, V9 + + VXOR V5, V10, V5 + VXOR V6, V11, V6 + VXOR V7, V8, V7 + VXOR V4, V9, V4 + + VRLW V5, V30, V5 + VRLW V6, V30, V6 + VRLW V7, V30, V7 + VRLW V4, V30, V4 + BC 16, LT, loop_vsx + + VADDUWM V12, V26, V12 + + WORD $0x13600F8C // VMRGEW V0, V1, V27 + WORD $0x13821F8C // VMRGEW V2, V3, V28 + + WORD $0x10000E8C // VMRGOW V0, V1, V0 + WORD $0x10421E8C // VMRGOW V2, V3, V2 + + WORD $0x13A42F8C // VMRGEW V4, V5, V29 + WORD $0x13C63F8C // VMRGEW V6, V7, V30 + + XXPERMDI VS32, VS34, $0, VS33 + XXPERMDI VS32, VS34, $3, VS35 + XXPERMDI VS59, VS60, $0, VS32 + XXPERMDI VS59, VS60, $3, VS34 + + WORD $0x10842E8C // VMRGOW V4, V5, V4 + WORD $0x10C63E8C // VMRGOW V6, V7, V6 + + WORD $0x13684F8C // VMRGEW V8, V9, V27 + WORD $0x138A5F8C // VMRGEW V10, V11, V28 + + XXPERMDI VS36, VS38, $0, VS37 + XXPERMDI VS36, VS38, $3, VS39 + XXPERMDI VS61, VS62, $0, VS36 + XXPERMDI VS61, VS62, $3, VS38 + + WORD $0x11084E8C // VMRGOW V8, V9, V8 + WORD $0x114A5E8C // VMRGOW V10, V11, V10 + + WORD $0x13AC6F8C // VMRGEW V12, V13, V29 + WORD $0x13CE7F8C // VMRGEW V14, V15, V30 + + XXPERMDI VS40, VS42, $0, VS41 + XXPERMDI VS40, VS42, $3, VS43 + XXPERMDI VS59, VS60, $0, VS40 + XXPERMDI VS59, VS60, $3, VS42 + + WORD $0x118C6E8C // VMRGOW V12, V13, V12 + WORD $0x11CE7E8C // VMRGOW V14, V15, V14 + + VSPLTISW $4, V27 + VADDUWM V26, V27, V26 + + XXPERMDI VS44, VS46, $0, VS45 + XXPERMDI VS44, VS46, $3, VS47 + XXPERMDI VS61, VS62, $0, VS44 + XXPERMDI VS61, VS62, $3, VS46 + + VADDUWM V0, V16, V0 + VADDUWM V4, V17, V4 + VADDUWM V8, V18, V8 + VADDUWM V12, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + // Bottom of loop + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V1, V16, V0 + VADDUWM V5, V17, V4 + VADDUWM V9, V18, V8 + VADDUWM V13, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + VXOR V27, V0, V27 + + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(V10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V2, V16, V0 + VADDUWM V6, V17, V4 + VADDUWM V10, V18, V8 + VADDUWM V14, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V3, V16, V0 + VADDUWM V7, V17, V4 + VADDUWM V11, V18, V8 + VADDUWM V15, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + + MOVD $10, R14 + MOVD R14, CTR + BNE loop_outer_vsx + +done_vsx: + // Increment counter by number of 64 byte blocks + MOVD (CNT), R14 + ADD BLOCKS, R14 + MOVD R14, (CNT) + RET + +tail_vsx: + ADD $32, R1, R11 + MOVD LEN, CTR + + // Save values on stack to copy from + STXVW4X VS32, (R11)(R0) + STXVW4X VS36, (R11)(R8) + STXVW4X VS40, (R11)(R9) + STXVW4X VS44, (R11)(R10) + ADD $-1, R11, R12 + ADD $-1, INP + ADD $-1, OUT + +looptail_vsx: + // Copying the result to OUT + // in bytes. + MOVBZU 1(R12), KEY + MOVBZU 1(INP), TMP + XOR KEY, TMP, KEY + MOVBU KEY, 1(OUT) + BC 16, LT, looptail_vsx + + // Clear the stack values + STXVW4X VS48, (R11)(R0) + STXVW4X VS48, (R11)(R8) + STXVW4X VS48, (R11)(R9) + STXVW4X VS48, (R11)(R10) + BR done_vsx diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go new file mode 100644 index 00000000000..cd55f45a333 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!appengine + +package chacha20 + +import "golang.org/x/sys/cpu" + +var haveAsm = cpu.S390X.HasVX + +const bufSize = 256 + +// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only +// be called when the vector facility is available. Implementation in asm_s390x.s. +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + if cpu.S390X.HasVX { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) + } else { + c.xorKeyStreamBlocksGeneric(dst, src) + } +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s new file mode 100644 index 00000000000..de52a2ea8d1 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s @@ -0,0 +1,224 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!appengine + +#include "go_asm.h" +#include "textflag.h" + +// This is an implementation of the ChaCha20 encryption algorithm as +// specified in RFC 7539. It uses vector instructions to compute +// 4 keystream blocks in parallel (256 bytes) which are then XORed +// with the bytes in the input slice. + +GLOBL ·constants<>(SB), RODATA|NOPTR, $32 +// BSWAP: swap bytes in each 4-byte element +DATA ·constants<>+0x00(SB)/4, $0x03020100 +DATA ·constants<>+0x04(SB)/4, $0x07060504 +DATA ·constants<>+0x08(SB)/4, $0x0b0a0908 +DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c +// J0: [j0, j1, j2, j3] +DATA ·constants<>+0x10(SB)/4, $0x61707865 +DATA ·constants<>+0x14(SB)/4, $0x3320646e +DATA ·constants<>+0x18(SB)/4, $0x79622d32 +DATA ·constants<>+0x1c(SB)/4, $0x6b206574 + +#define BSWAP V5 +#define J0 V6 +#define KEY0 V7 +#define KEY1 V8 +#define NONCE V9 +#define CTR V10 +#define M0 V11 +#define M1 V12 +#define M2 V13 +#define M3 V14 +#define INC V15 +#define X0 V16 +#define X1 V17 +#define X2 V18 +#define X3 V19 +#define X4 V20 +#define X5 V21 +#define X6 V22 +#define X7 V23 +#define X8 V24 +#define X9 V25 +#define X10 V26 +#define X11 V27 +#define X12 V28 +#define X13 V29 +#define X14 V30 +#define X15 V31 + +#define NUM_ROUNDS 20 + +#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $16, a2, a2 \ + VERLLF $16, b2, b2 \ + VERLLF $16, c2, c2 \ + VERLLF $16, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $12, a1, a1 \ + VERLLF $12, b1, b1 \ + VERLLF $12, c1, c1 \ + VERLLF $12, d1, d1 \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $8, a2, a2 \ + VERLLF $8, b2, b2 \ + VERLLF $8, c2, c2 \ + VERLLF $8, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $7, a1, a1 \ + VERLLF $7, b1, b1 \ + VERLLF $7, c1, c1 \ + VERLLF $7, d1, d1 + +#define PERMUTE(mask, v0, v1, v2, v3) \ + VPERM v0, v0, mask, v0 \ + VPERM v1, v1, mask, v1 \ + VPERM v2, v2, mask, v2 \ + VPERM v3, v3, mask, v3 + +#define ADDV(x, v0, v1, v2, v3) \ + VAF x, v0, v0 \ + VAF x, v1, v1 \ + VAF x, v2, v2 \ + VAF x, v3, v3 + +#define XORV(off, dst, src, v0, v1, v2, v3) \ + VLM off(src), M0, M3 \ + PERMUTE(BSWAP, v0, v1, v2, v3) \ + VX v0, M0, M0 \ + VX v1, M1, M1 \ + VX v2, M2, M2 \ + VX v3, M3, M3 \ + VSTM M0, M3, off(dst) + +#define SHUFFLE(a, b, c, d, t, u, v, w) \ + VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]} + VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]} + VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]} + VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]} + VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]} + VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]} + VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} + VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD $·constants<>(SB), R1 + MOVD dst+0(FP), R2 // R2=&dst[0] + LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src) + MOVD key+48(FP), R5 // R5=key + MOVD nonce+56(FP), R6 // R6=nonce + MOVD counter+64(FP), R7 // R7=counter + + // load BSWAP and J0 + VLM (R1), BSWAP, J0 + + // setup + MOVD $95, R0 + VLM (R5), KEY0, KEY1 + VLL R0, (R6), NONCE + VZERO M0 + VLEIB $7, $32, M0 + VSRLB M0, NONCE, NONCE + + // initialize counter values + VLREPF (R7), CTR + VZERO INC + VLEIF $1, $1, INC + VLEIF $2, $2, INC + VLEIF $3, $3, INC + VAF INC, CTR, CTR + VREPIF $4, INC + +chacha: + VREPF $0, J0, X0 + VREPF $1, J0, X1 + VREPF $2, J0, X2 + VREPF $3, J0, X3 + VREPF $0, KEY0, X4 + VREPF $1, KEY0, X5 + VREPF $2, KEY0, X6 + VREPF $3, KEY0, X7 + VREPF $0, KEY1, X8 + VREPF $1, KEY1, X9 + VREPF $2, KEY1, X10 + VREPF $3, KEY1, X11 + VLR CTR, X12 + VREPF $1, NONCE, X13 + VREPF $2, NONCE, X14 + VREPF $3, NONCE, X15 + + MOVD $(NUM_ROUNDS/2), R1 + +loop: + ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) + ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) + + ADD $-1, R1 + BNE loop + + // decrement length + ADD $-256, R4 + + // rearrange vectors + SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) + ADDV(J0, X0, X1, X2, X3) + SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3) + ADDV(KEY0, X4, X5, X6, X7) + SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3) + ADDV(KEY1, X8, X9, X10, X11) + VAF CTR, X12, X12 + SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3) + ADDV(NONCE, X12, X13, X14, X15) + + // increment counters + VAF INC, CTR, CTR + + // xor keystream with plaintext + XORV(0*64, R2, R3, X0, X4, X8, X12) + XORV(1*64, R2, R3, X1, X5, X9, X13) + XORV(2*64, R2, R3, X2, X6, X10, X14) + XORV(3*64, R2, R3, X3, X7, X11, X15) + + // increment pointers + MOVD $256(R2), R2 + MOVD $256(R3), R3 + + CMPBNE R4, $0, chacha + + VSTEF $0, CTR, (R7) + RET diff --git a/vendor/golang.org/x/crypto/chacha20/xor.go b/vendor/golang.org/x/crypto/chacha20/xor.go new file mode 100644 index 00000000000..0110c9865af --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/xor.go @@ -0,0 +1,41 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found src the LICENSE file. + +package chacha20 + +import "runtime" + +// Platforms that have fast unaligned 32-bit little endian accesses. +const unaligned = runtime.GOARCH == "386" || + runtime.GOARCH == "amd64" || + runtime.GOARCH == "arm64" || + runtime.GOARCH == "ppc64le" || + runtime.GOARCH == "s390x" + +// xor reads a little endian uint32 from src, XORs it with u and +// places the result in little endian byte order in dst. +func xor(dst, src []byte, u uint32) { + _, _ = src[3], dst[3] // eliminate bounds checks + if unaligned { + // The compiler should optimize this code into + // 32-bit unaligned little endian loads and stores. + // TODO: delete once the compiler does a reliably + // good job with the generic code below. + // See issue #25111 for more details. + v := uint32(src[0]) + v |= uint32(src[1]) << 8 + v |= uint32(src[2]) << 16 + v |= uint32(src[3]) << 24 + v ^= u + dst[0] = byte(v) + dst[1] = byte(v >> 8) + dst[2] = byte(v >> 16) + dst[3] = byte(v >> 24) + } else { + dst[0] = src[0] ^ byte(u) + dst[1] = src[1] ^ byte(u>>8) + dst[2] = src[2] ^ byte(u>>16) + dst[3] = src[3] ^ byte(u>>24) + } +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 00000000000..4b9a655d1b5 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,95 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of the X25519 function, which +// performs scalar multiplication on the elliptic curve known as Curve25519. +// See RFC 7748. +package curve25519 // import "golang.org/x/crypto/curve25519" + +import ( + "crypto/subtle" + "fmt" +) + +// ScalarMult sets dst to the product scalar * point. +// +// Deprecated: when provided a low-order point, ScalarMult will set dst to all +// zeroes, irrespective of the scalar. Instead, use the X25519 function, which +// will return an error. +func ScalarMult(dst, scalar, point *[32]byte) { + scalarMult(dst, scalar, point) +} + +// ScalarBaseMult sets dst to the product scalar * base where base is the +// standard generator. +// +// It is recommended to use the X25519 function with Basepoint instead, as +// copying into fixed size arrays can lead to unexpected bugs. +func ScalarBaseMult(dst, scalar *[32]byte) { + ScalarMult(dst, scalar, &basePoint) +} + +const ( + // ScalarSize is the size of the scalar input to X25519. + ScalarSize = 32 + // PointSize is the size of the point input to X25519. + PointSize = 32 +) + +// Basepoint is the canonical Curve25519 generator. +var Basepoint []byte + +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +func init() { Basepoint = basePoint[:] } + +func checkBasepoint() { + if subtle.ConstantTimeCompare(Basepoint, []byte{ + 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }) != 1 { + panic("curve25519: global Basepoint value was modified") + } +} + +// X25519 returns the result of the scalar multiplication (scalar * point), +// according to RFC 7748, Section 5. scalar, point and the return value are +// slices of 32 bytes. +// +// scalar can be generated at random, for example with crypto/rand. point should +// be either Basepoint or the output of another X25519 call. +// +// If point is Basepoint (but not if it's a different slice with the same +// contents) a precomputed implementation might be used for performance. +func X25519(scalar, point []byte) ([]byte, error) { + // Outline the body of function, to let the allocation be inlined in the + // caller, and possibly avoid escaping to the heap. + var dst [32]byte + return x25519(&dst, scalar, point) +} + +func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { + var in [32]byte + if l := len(scalar); l != 32 { + return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32) + } + if l := len(point); l != 32 { + return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32) + } + copy(in[:], scalar) + if &point[0] == &Basepoint[0] { + checkBasepoint() + ScalarBaseMult(dst, &in) + } else { + var base, zero [32]byte + copy(base[:], point) + ScalarMult(dst, &in, &base) + if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { + return nil, fmt.Errorf("bad input point: low order point") + } + } + return dst[:], nil +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go new file mode 100644 index 00000000000..5120b779b9b --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine,!purego + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s new file mode 100644 index 00000000000..0250c888592 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s @@ -0,0 +1,1793 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine,!purego + +#define REDMASK51 0x0007FFFFFFFFFFFF + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$296-8 + MOVQ inout+0(FP),DI + + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,0(SP) + MOVQ DX,8(SP) + MOVQ CX,16(SP) + MOVQ R8,24(SP) + MOVQ R9,32(SP) + MOVQ AX,40(SP) + MOVQ R10,48(SP) + MOVQ R11,56(SP) + MOVQ R12,64(SP) + MOVQ R13,72(SP) + MOVQ 40(SP),AX + MULQ 40(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 48(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 48(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(SP) + MOVQ R8,88(SP) + MOVQ R9,96(SP) + MOVQ AX,104(SP) + MOVQ R10,112(SP) + MOVQ 0(SP),AX + MULQ 0(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 8(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 32(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(SP) + MOVQ R8,128(SP) + MOVQ R9,136(SP) + MOVQ AX,144(SP) + MOVQ R10,152(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 80(SP),SI + SUBQ 88(SP),DX + SUBQ 96(SP),CX + SUBQ 104(SP),R8 + SUBQ 112(SP),R9 + MOVQ SI,160(SP) + MOVQ DX,168(SP) + MOVQ CX,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,200(SP) + MOVQ DX,208(SP) + MOVQ CX,216(SP) + MOVQ R8,224(SP) + MOVQ R9,232(SP) + MOVQ AX,240(SP) + MOVQ R10,248(SP) + MOVQ R11,256(SP) + MOVQ R12,264(SP) + MOVQ R13,272(SP) + MOVQ 224(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,280(SP) + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 232(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,288(SP) + MULQ 48(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 40(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 200(SP),AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 200(SP),AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 200(SP),AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 208(SP),AX + MULQ 40(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 208(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),AX + MULQ 40(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 216(SP),AX + MULQ 48(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 216(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 224(SP),AX + MULQ 40(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 224(SP),AX + MULQ 48(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 280(SP),AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 232(SP),AX + MULQ 40(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 288(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 288(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(SP) + MOVQ R8,48(SP) + MOVQ R9,56(SP) + MOVQ AX,64(SP) + MOVQ R10,72(SP) + MOVQ 264(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,200(SP) + MULQ 16(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,208(SP) + MULQ 8(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 0(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 240(SP),AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 240(SP),AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 248(SP),AX + MULQ 0(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 248(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 248(SP),AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 248(SP),AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 248(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 0(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 256(SP),AX + MULQ 8(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 256(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 0(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 8(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 200(SP),AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 0(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),AX + MULQ 16(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 24(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 40(SP),SI + ADDQ 48(SP),R8 + ADDQ 56(SP),R9 + ADDQ 64(SP),AX + ADDQ 72(SP),R10 + SUBQ 40(SP),DX + SUBQ 48(SP),CX + SUBQ 56(SP),R11 + SUBQ 64(SP),R12 + SUBQ 72(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 144(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 152(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 88(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(SP),AX + MULQ 96(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(SP),AX + MULQ 104(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(SP),AX + MULQ 112(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 128(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 136(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 136(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(SP),AX + MULQ 80(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 144(SP),AX + MULQ 88(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 104(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 112(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 160(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 168(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 176(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 184(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 192(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 80(SP),SI + ADDQ 88(SP),CX + ADDQ 96(SP),R8 + ADDQ 104(SP),R9 + ADDQ 112(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 176(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 168(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 176(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 184(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 192(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 176(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 184(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 176(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 160(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 168(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 184(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 192(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 176(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 184(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 192(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + RET + +// func cswap(inout *[4][5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + SUBQ $1, SI + NOTQ SI + MOVQ SI, X15 + PSHUFD $0x44, X15, X15 + + MOVOU 0(DI), X0 + MOVOU 16(DI), X2 + MOVOU 32(DI), X4 + MOVOU 48(DI), X6 + MOVOU 64(DI), X8 + MOVOU 80(DI), X1 + MOVOU 96(DI), X3 + MOVOU 112(DI), X5 + MOVOU 128(DI), X7 + MOVOU 144(DI), X9 + + MOVO X1, X10 + MOVO X3, X11 + MOVO X5, X12 + MOVO X7, X13 + MOVO X9, X14 + + PXOR X0, X10 + PXOR X2, X11 + PXOR X4, X12 + PXOR X6, X13 + PXOR X8, X14 + PAND X15, X10 + PAND X15, X11 + PAND X15, X12 + PAND X15, X13 + PAND X15, X14 + PXOR X10, X0 + PXOR X10, X1 + PXOR X11, X2 + PXOR X11, X3 + PXOR X12, X4 + PXOR X12, X5 + PXOR X13, X6 + PXOR X13, X7 + PXOR X14, X8 + PXOR X14, X9 + + MOVOU X0, 0(DI) + MOVOU X2, 16(DI) + MOVOU X4, 32(DI) + MOVOU X6, 48(DI) + MOVOU X8, 64(DI) + MOVOU X1, 80(DI) + MOVOU X3, 96(DI) + MOVOU X5, 112(DI) + MOVOU X7, 128(DI) + MOVOU X9, 144(DI) + RET + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R8,R9 + ANDQ SI,R8 + SHLQ $13,R10,R11 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BX,BP + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,CX,R8 + ANDQ SI,CX + SHLQ $13,R9,R10 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R11,R12 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R13,R14 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,R15,BX + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go b/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go new file mode 100644 index 00000000000..c43b13fc83e --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go @@ -0,0 +1,828 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package curve25519 + +import "encoding/binary" + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + b = -b + for i := range f { + t := b & (f[i] ^ g[i]) + f[i] ^= t + g[i] ^= t + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + return int64(binary.LittleEndian.Uint32(in)) +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 0x7fffff) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMultGeneric(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go b/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go new file mode 100644 index 00000000000..047d49afc27 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 gccgo appengine purego + +package curve25519 + +func scalarMult(out, in, base *[32]byte) { + scalarMultGeneric(out, in, base) +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go new file mode 100644 index 00000000000..f38797bfa1b --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go new file mode 100644 index 00000000000..0cc4a8a642c --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/poly1305/bits_compat.go new file mode 100644 index 00000000000..157a69f61bd --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/bits_compat.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package poly1305 + +// Generic fallbacks for the math/bits intrinsics, copied from +// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had +// variable time fallbacks until Go 1.13. + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + sum = x + y + carry + carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 + return +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + diff = x - y - borrow + borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 + return +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go new file mode 100644 index 00000000000..a0a185f0fc7 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package poly1305 + +import "math/bits" + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + return bits.Add64(x, y, carry) +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + return bits.Sub64(x, y, borrow) +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + return bits.Mul64(x, y) +} diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go new file mode 100644 index 00000000000..a8dd589ae39 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!ppc64le gccgo appengine + +package poly1305 + +type mac struct{ macGeneric } + +func newMAC(key *[32]byte) mac { return mac{newMACGeneric(key)} } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go new file mode 100644 index 00000000000..066159b797d --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -0,0 +1,89 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. +package poly1305 // import "golang.org/x/crypto/poly1305" + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + sum(out, m, key) +} + +// Verify returns true if mac is a valid authenticator for m with the given key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + return &MAC{ + mac: newMAC(key), + finalized: false, + } +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum causes it to panic. +type MAC struct { + mac // platform-dependent implementation + + finalized bool +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum. +func (h *MAC) Write(p []byte) (n int, err error) { + if h.finalized { + panic("poly1305: write to MAC after Sum") + } + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return append(b, mac[:]...) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go new file mode 100644 index 00000000000..df56a652ff0 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -0,0 +1,58 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +func sum(out *[16]byte, m []byte, key *[32]byte) { + h := newMAC(key) + h.Write(m) + h.Sum(out) +} + +func newMAC(key *[32]byte) (h mac) { + initialize(key, &h.r, &h.s) + return +} + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s new file mode 100644 index 00000000000..8c0cefbb3cb --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -0,0 +1,108 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +#define POLY1305_ADD(msg, h0, h1, h2) \ + ADDQ 0(msg), h0; \ + ADCQ 8(msg), h1; \ + ADCQ $1, h2; \ + LEAQ 16(msg), msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ + MOVQ r0, AX; \ + MULQ h0; \ + MOVQ AX, t0; \ + MOVQ DX, t1; \ + MOVQ r0, AX; \ + MULQ h1; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ r0, t2; \ + IMULQ h2, t2; \ + ADDQ DX, t2; \ + \ + MOVQ r1, AX; \ + MULQ h0; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ DX, h0; \ + MOVQ r1, t3; \ + IMULQ h2, t3; \ + MOVQ r1, AX; \ + MULQ h1; \ + ADDQ AX, t2; \ + ADCQ DX, t3; \ + ADDQ h0, t2; \ + ADCQ $0, t3; \ + \ + MOVQ t0, h0; \ + MOVQ t1, h1; \ + MOVQ t2, h2; \ + ANDQ $3, h2; \ + MOVQ t2, t0; \ + ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ + ADDQ t0, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2; \ + SHRQ $2, t3, t2; \ + SHRQ $2, t3; \ + ADDQ t2, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVQ state+0(FP), DI + MOVQ msg_base+8(FP), SI + MOVQ msg_len+16(FP), R15 + + MOVQ 0(DI), R8 // h0 + MOVQ 8(DI), R9 // h1 + MOVQ 16(DI), R10 // h2 + MOVQ 24(DI), R11 // r0 + MOVQ 32(DI), R12 // r1 + + CMPQ R15, $16 + JB bytes_between_0_and_15 + +loop: + POLY1305_ADD(SI, R8, R9, R10) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) + SUBQ $16, R15 + CMPQ R15, $16 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $1, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $8, BX, CX + SHLQ $8, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0, R10 + MOVQ $16, R15 + JMP multiply + +done: + MOVQ R8, 0(DI) + MOVQ R9, 8(DI) + MOVQ R10, 16(DI) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go new file mode 100644 index 00000000000..1187eab78fd --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_generic.go @@ -0,0 +1,307 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file provides the generic implementation of Sum and MAC. Other files +// might provide optimized assembly implementations of some of this code. + +package poly1305 + +import "encoding/binary" + +// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag +// for a 64 bytes message is approximately +// +// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 +// +// for some secret r and s. It can be computed sequentially like +// +// for len(msg) > 0: +// h += read(msg, 16) +// h *= r +// h %= 2¹³⁰ - 5 +// return h + s +// +// All the complexity is about doing performant constant-time math on numbers +// larger than any available numeric type. + +func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMACGeneric(key) + h.Write(msg) + h.Sum(out) +} + +func newMACGeneric(key *[32]byte) (h macGeneric) { + initialize(key, &h.r, &h.s) + return +} + +// macState holds numbers in saturated 64-bit little-endian limbs. That is, +// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. +type macState struct { + // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but + // can grow larger during and after rounds. + h [3]uint64 + // r and s are the private key components. + r [2]uint64 + s [2]uint64 +} + +type macGeneric struct { + macState + + buffer [TagSize]byte + offset int +} + +// Write splits the incoming message into TagSize chunks, and passes them to +// update. It buffers incomplete chunks. +func (h *macGeneric) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + updateGeneric(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + updateGeneric(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +// Sum flushes the last incomplete chunk from the buffer, if any, and generates +// the MAC output. It does not modify its state, in order to allow for multiple +// calls to Sum, even if no Write is allowed after Sum. +func (h *macGeneric) Sum(out *[TagSize]byte) { + state := h.macState + if h.offset > 0 { + updateGeneric(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} + +// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It +// clears some bits of the secret coefficient to make it possible to implement +// multiplication more efficiently. +const ( + rMask0 = 0x0FFFFFFC0FFFFFFF + rMask1 = 0x0FFFFFFC0FFFFFFC +) + +func initialize(key *[32]byte, r, s *[2]uint64) { + r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 + r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 + s[0] = binary.LittleEndian.Uint64(key[16:24]) + s[1] = binary.LittleEndian.Uint64(key[24:32]) +} + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +func mul64(a, b uint64) uint128 { + hi, lo := bitsMul64(a, b) + return uint128{lo, hi} +} + +func add128(a, b uint128) uint128 { + lo, c := bitsAdd64(a.lo, b.lo, 0) + hi, c := bitsAdd64(a.hi, b.hi, c) + if c != 0 { + panic("poly1305: unexpected overflow") + } + return uint128{lo, hi} +} + +func shiftRightBy2(a uint128) uint128 { + a.lo = a.lo>>2 | (a.hi&3)<<62 + a.hi = a.hi >> 2 + return a +} + +// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of +// 128 bits of message, it computes +// +// h₊ = (h + m) * r mod 2¹³⁰ - 5 +// +// If the msg length is not a multiple of TagSize, it assumes the last +// incomplete chunk is the final one. +func updateGeneric(state *macState, msg []byte) { + h0, h1, h2 := state.h[0], state.h[1], state.h[2] + r0, r1 := state.r[0], state.r[1] + + for len(msg) > 0 { + var c uint64 + + // For the first step, h + m, we use a chain of bits.Add64 intrinsics. + // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially + // reduced at the end of the multiplication below. + // + // The spec requires us to set a bit just above the message size, not to + // hide leading zeroes. For full chunks, that's 1 << 128, so we can just + // add 1 to the most significant (2¹²⁸) limb, h2. + if len(msg) >= TagSize { + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h2 += c + 1 + + msg = msg[TagSize:] + } else { + var buf [TagSize]byte + copy(buf[:], msg) + buf[len(msg)] = 1 + + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h2 += c + + msg = nil + } + + // Multiplication of big number limbs is similar to elementary school + // columnar multiplication. Instead of digits, there are 64-bit limbs. + // + // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. + // + // h2 h1 h0 x + // r1 r0 = + // ---------------- + // h2r0 h1r0 h0r0 <-- individual 128-bit products + // + h2r1 h1r1 h0r1 + // ------------------------ + // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs + // ------------------------ + // m3.hi m2.hi m1.hi m0.hi <-- carry propagation + // + m3.lo m2.lo m1.lo m0.lo + // ------------------------------- + // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs + // + // The main difference from pen-and-paper multiplication is that we do + // carry propagation in a separate step, as if we wrote two digit sums + // at first (the 128-bit limbs), and then carried the tens all at once. + + h0r0 := mul64(h0, r0) + h1r0 := mul64(h1, r0) + h2r0 := mul64(h2, r0) + h0r1 := mul64(h0, r1) + h1r1 := mul64(h1, r1) + h2r1 := mul64(h2, r1) + + // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their + // top 4 bits cleared by rMask{0,1}, we know that their product is not going + // to overflow 64 bits, so we can ignore the high part of the products. + // + // This also means that the product doesn't have a fifth limb (t4). + if h2r0.hi != 0 { + panic("poly1305: unexpected overflow") + } + if h2r1.hi != 0 { + panic("poly1305: unexpected overflow") + } + + m0 := h0r0 + m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again + m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. + m3 := h2r1 + + t0 := m0.lo + t1, c := bitsAdd64(m1.lo, m0.hi, 0) + t2, c := bitsAdd64(m2.lo, m1.hi, c) + t3, _ := bitsAdd64(m3.lo, m2.hi, c) + + // Now we have the result as 4 64-bit limbs, and we need to reduce it + // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do + // a cheap partial reduction according to the reduction identity + // + // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 + // + // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is + // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the + // assumptions we make about h in the rest of the code. + // + // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 + + // We split the final result at the 2¹³⁰ mark into h and cc, the carry. + // Note that the carry bits are effectively shifted left by 2, in other + // words, cc = c * 4 for the c in the reduction identity. + h0, h1, h2 = t0, t1, t2&maskLow2Bits + cc := uint128{t2 & maskNotLow2Bits, t3} + + // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + cc = shiftRightBy2(cc) + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most + // + // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 + } + + state.h[0], state.h[1], state.h[2] = h0, h1, h2 +} + +const ( + maskLow2Bits uint64 = 0x0000000000000003 + maskNotLow2Bits uint64 = ^maskLow2Bits +) + +// select64 returns x if v == 1 and y if v == 0, in constant time. +func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } + +// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. +const ( + p0 = 0xFFFFFFFFFFFFFFFB + p1 = 0xFFFFFFFFFFFFFFFF + p2 = 0x0000000000000003 +) + +// finalize completes the modular reduction of h and computes +// +// out = h + s mod 2¹²⁸ +// +func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { + h0, h1, h2 := h[0], h[1], h[2] + + // After the partial reduction in updateGeneric, h might be more than + // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction + // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the + // result if the subtraction underflows, and t otherwise. + + hMinusP0, b := bitsSub64(h0, p0, 0) + hMinusP1, b := bitsSub64(h1, p1, b) + _, b = bitsSub64(h2, p2, b) + + // h = h if h < p else h - p + h0 = select64(b, h0, hMinusP0) + h1 = select64(b, h1, hMinusP1) + + // Finally, we compute the last Poly1305 step + // + // tag = h + s mod 2¹²⁸ + // + // by just doing a wide addition with the 128 low bits of h and discarding + // the overflow. + h0, c := bitsAdd64(h0, s[0], 0) + h1, _ = bitsAdd64(h1, s[1], c) + + binary.LittleEndian.PutUint64(out[0:8], h0) + binary.LittleEndian.PutUint64(out[8:16], h1) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go new file mode 100644 index 00000000000..32a9cef6bbf --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo appengine nacl + +package poly1305 + +func sum(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMAC(key) + h.Write(msg) + h.Sum(out) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go new file mode 100644 index 00000000000..3233616935b --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -0,0 +1,58 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le,!gccgo,!appengine + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +func sum(out *[16]byte, m []byte, key *[32]byte) { + h := newMAC(key) + h.Write(m) + h.Sum(out) +} + +func newMAC(key *[32]byte) (h mac) { + initialize(key, &h.r, &h.s) + return +} + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s new file mode 100644 index 00000000000..4e20bf299a5 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s @@ -0,0 +1,181 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le,!gccgo,!appengine + +#include "textflag.h" + +// This was ported from the amd64 implementation. + +#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ + MOVD (msg), t0; \ + MOVD 8(msg), t1; \ + MOVD $1, t2; \ + ADDC t0, h0, h0; \ + ADDE t1, h1, h1; \ + ADDE t2, h2; \ + ADD $16, msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ + MULLD r0, h0, t0; \ + MULLD r0, h1, t4; \ + MULHDU r0, h0, t1; \ + MULHDU r0, h1, t5; \ + ADDC t4, t1, t1; \ + MULLD r0, h2, t2; \ + ADDZE t5; \ + MULHDU r1, h0, t4; \ + MULLD r1, h0, h0; \ + ADD t5, t2, t2; \ + ADDC h0, t1, t1; \ + MULLD h2, r1, t3; \ + ADDZE t4, h0; \ + MULHDU r1, h1, t5; \ + MULLD r1, h1, t4; \ + ADDC t4, t2, t2; \ + ADDE t5, t3, t3; \ + ADDC h0, t2, t2; \ + MOVD $-4, t4; \ + MOVD t0, h0; \ + MOVD t1, h1; \ + ADDZE t3; \ + ANDCC $3, t2, h2; \ + AND t2, t4, t0; \ + ADDC t0, h0, h0; \ + ADDE t3, h1, h1; \ + SLD $62, t3, t4; \ + SRD $2, t2; \ + ADDZE h2; \ + OR t4, t2, t2; \ + SRD $2, t3; \ + ADDC t2, h0, h0; \ + ADDE t3, h1, h1; \ + ADDZE h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVD state+0(FP), R3 + MOVD msg_base+8(FP), R4 + MOVD msg_len+16(FP), R5 + + MOVD 0(R3), R8 // h0 + MOVD 8(R3), R9 // h1 + MOVD 16(R3), R10 // h2 + MOVD 24(R3), R11 // r0 + MOVD 32(R3), R12 // r1 + + CMP R5, $16 + BLT bytes_between_0_and_15 + +loop: + POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) + ADD $-16, R5 + CMP R5, $16 + BGE loop + +bytes_between_0_and_15: + CMP $0, R5 + BEQ done + MOVD $0, R16 // h0 + MOVD $0, R17 // h1 + +flush_buffer: + CMP R5, $8 + BLE just1 + + MOVD $8, R21 + SUB R21, R5, R21 + + // Greater than 8 -- load the rightmost remaining bytes in msg + // and put into R17 (h1) + MOVD (R4)(R21), R17 + MOVD $16, R22 + + // Find the offset to those bytes + SUB R5, R22, R22 + SLD $3, R22 + + // Shift to get only the bytes in msg + SRD R22, R17, R17 + + // Put 1 at high end + MOVD $1, R23 + SLD $3, R21 + SLD R21, R23, R23 + OR R23, R17, R17 + + // Remainder is 8 + MOVD $8, R5 + +just1: + CMP R5, $8 + BLT less8 + + // Exactly 8 + MOVD (R4), R16 + + CMP $0, R17 + + // Check if we've already set R17; if not + // set 1 to indicate end of msg. + BNE carry + MOVD $1, R17 + BR carry + +less8: + MOVD $0, R16 // h0 + MOVD $0, R22 // shift count + CMP R5, $4 + BLT less4 + MOVWZ (R4), R16 + ADD $4, R4 + ADD $-4, R5 + MOVD $32, R22 + +less4: + CMP R5, $2 + BLT less2 + MOVHZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $16, R22 + ADD $-2, R5 + ADD $2, R4 + +less2: + CMP $0, R5 + BEQ insert1 + MOVBZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $8, R22 + +insert1: + // Insert 1 at end of msg + MOVD $1, R21 + SLD R22, R21, R21 + OR R16, R21, R16 + +carry: + // Add new values to h0, h1, h2 + ADDC R16, R8 + ADDE R17, R9 + ADDE $0, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply + +done: + // Save h0, h1, h2 in state + MOVD R8, 0(R3) + MOVD R9, 8(R3) + MOVD R10, 16(R3) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go new file mode 100644 index 00000000000..a8920ee9d21 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -0,0 +1,39 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,go1.11,!gccgo,!appengine + +package poly1305 + +import ( + "golang.org/x/sys/cpu" +) + +// poly1305vx is an assembly implementation of Poly1305 that uses vector +// instructions. It must only be called if the vector facility (vx) is +// available. +//go:noescape +func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte) + +// poly1305vmsl is an assembly implementation of Poly1305 that uses vector +// instructions, including VMSL. It must only be called if the vector facility (vx) is +// available and if VMSL is supported. +//go:noescape +func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte) + +func sum(out *[16]byte, m []byte, key *[32]byte) { + if cpu.S390X.HasVX { + var mPtr *byte + if len(m) > 0 { + mPtr = &m[0] + } + if cpu.S390X.HasVXE && len(m) > 256 { + poly1305vmsl(out, mPtr, uint64(len(m)), key) + } else { + poly1305vx(out, mPtr, uint64(len(m)), key) + } + } else { + sumGeneric(out, m, key) + } +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s new file mode 100644 index 00000000000..ca5a309d867 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -0,0 +1,378 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,go1.11,!gccgo,!appengine + +#include "textflag.h" + +// Implementation of Poly1305 using the vector facility (vx). + +// constants +#define MOD26 V0 +#define EX0 V1 +#define EX1 V2 +#define EX2 V3 + +// temporaries +#define T_0 V4 +#define T_1 V5 +#define T_2 V6 +#define T_3 V7 +#define T_4 V8 + +// key (r) +#define R_0 V9 +#define R_1 V10 +#define R_2 V11 +#define R_3 V12 +#define R_4 V13 +#define R5_1 V14 +#define R5_2 V15 +#define R5_3 V16 +#define R5_4 V17 +#define RSAVE_0 R5 +#define RSAVE_1 R6 +#define RSAVE_2 R7 +#define RSAVE_3 R8 +#define RSAVE_4 R9 +#define R5SAVE_1 V28 +#define R5SAVE_2 V29 +#define R5SAVE_3 V30 +#define R5SAVE_4 V31 + +// message block +#define F_0 V18 +#define F_1 V19 +#define F_2 V20 +#define F_3 V21 +#define F_4 V22 + +// accumulator +#define H_0 V23 +#define H_1 V24 +#define H_2 V25 +#define H_3 V26 +#define H_4 V27 + +GLOBL ·keyMask<>(SB), RODATA, $16 +DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f +DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f + +GLOBL ·bswapMask<>(SB), RODATA, $16 +DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 +DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 + +GLOBL ·constants<>(SB), RODATA, $64 +// MOD26 +DATA ·constants<>+0(SB)/8, $0x3ffffff +DATA ·constants<>+8(SB)/8, $0x3ffffff +// EX0 +DATA ·constants<>+16(SB)/8, $0x0006050403020100 +DATA ·constants<>+24(SB)/8, $0x1016151413121110 +// EX1 +DATA ·constants<>+32(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+40(SB)/8, $0x161c1b1a19181716 +// EX2 +DATA ·constants<>+48(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d + +// h = (f*g) % (2**130-5) [partial reduction] +#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ + VMLOF f0, g0, h0 \ + VMLOF f0, g1, h1 \ + VMLOF f0, g2, h2 \ + VMLOF f0, g3, h3 \ + VMLOF f0, g4, h4 \ + VMLOF f1, g54, T_0 \ + VMLOF f1, g0, T_1 \ + VMLOF f1, g1, T_2 \ + VMLOF f1, g2, T_3 \ + VMLOF f1, g3, T_4 \ + VMALOF f2, g53, h0, h0 \ + VMALOF f2, g54, h1, h1 \ + VMALOF f2, g0, h2, h2 \ + VMALOF f2, g1, h3, h3 \ + VMALOF f2, g2, h4, h4 \ + VMALOF f3, g52, T_0, T_0 \ + VMALOF f3, g53, T_1, T_1 \ + VMALOF f3, g54, T_2, T_2 \ + VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g1, T_4, T_4 \ + VMALOF f4, g51, h0, h0 \ + VMALOF f4, g52, h1, h1 \ + VMALOF f4, g53, h2, h2 \ + VMALOF f4, g54, h3, h3 \ + VMALOF f4, g0, h4, h4 \ + VAG T_0, h0, h0 \ + VAG T_1, h1, h1 \ + VAG T_2, h2, h2 \ + VAG T_3, h3, h3 \ + VAG T_4, h4, h4 + +// carry h0->h1 h3->h4, h1->h2 h4->h0, h0->h1 h2->h3, h3->h4 +#define REDUCE(h0, h1, h2, h3, h4) \ + VESRLG $26, h0, T_0 \ + VESRLG $26, h3, T_1 \ + VN MOD26, h0, h0 \ + VN MOD26, h3, h3 \ + VAG T_0, h1, h1 \ + VAG T_1, h4, h4 \ + VESRLG $26, h1, T_2 \ + VESRLG $26, h4, T_3 \ + VN MOD26, h1, h1 \ + VN MOD26, h4, h4 \ + VESLG $2, T_3, T_4 \ + VAG T_3, T_4, T_4 \ + VAG T_2, h2, h2 \ + VAG T_4, h0, h0 \ + VESRLG $26, h2, T_0 \ + VESRLG $26, h0, T_1 \ + VN MOD26, h2, h2 \ + VN MOD26, h0, h0 \ + VAG T_0, h3, h3 \ + VAG T_1, h1, h1 \ + VESRLG $26, h3, T_2 \ + VN MOD26, h3, h3 \ + VAG T_2, h4, h4 + +// expand in0 into d[0] and in1 into d[1] +#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ + VGBM $0x0707, d1 \ // d1=tmp + VPERM in0, in1, EX2, d4 \ + VPERM in0, in1, EX0, d0 \ + VPERM in0, in1, EX1, d2 \ + VN d1, d4, d4 \ + VESRLG $26, d0, d1 \ + VESRLG $30, d2, d3 \ + VESRLG $4, d2, d2 \ + VN MOD26, d0, d0 \ + VN MOD26, d1, d1 \ + VN MOD26, d2, d2 \ + VN MOD26, d3, d3 + +// pack h4:h0 into h1:h0 (no carry) +#define PACK(h0, h1, h2, h3, h4) \ + VESLG $26, h1, h1 \ + VESLG $26, h3, h3 \ + VO h0, h1, h0 \ + VO h2, h3, h2 \ + VESLG $4, h2, h2 \ + VLEIB $7, $48, h1 \ + VSLB h1, h2, h2 \ + VO h0, h2, h0 \ + VLEIB $7, $104, h1 \ + VSLB h1, h4, h3 \ + VO h3, h0, h0 \ + VLEIB $7, $24, h1 \ + VSRLB h1, h4, h1 + +// if h > 2**130-5 then h -= 2**130-5 +#define MOD(h0, h1, t0, t1, t2) \ + VZERO t0 \ + VLEIG $1, $5, t0 \ + VACCQ h0, t0, t1 \ + VAQ h0, t0, t0 \ + VONE t2 \ + VLEIG $1, $-4, t2 \ + VAQ t2, t1, t1 \ + VACCQ h1, t1, t1 \ + VONE t2 \ + VAQ t2, t1, t1 \ + VN h0, t1, t2 \ + VNC t0, t1, t1 \ + VO t1, t2, h0 + +// func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]key) +TEXT ·poly1305vx(SB), $0-32 + // This code processes up to 2 blocks (32 bytes) per iteration + // using the algorithm described in: + // NEON crypto, Daniel J. Bernstein & Peter Schwabe + // https://cryptojedi.org/papers/neoncrypto-20120320.pdf + LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key + + // load MOD26, EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), MOD26, EX2 + + // setup r + VL (R4), T_0 + MOVD $·keyMask<>(SB), R6 + VL (R6), T_1 + VN T_0, T_1, T_0 + EXPAND(T_0, T_0, R_0, R_1, R_2, R_3, R_4) + + // setup r*5 + VLEIG $0, $5, T_0 + VLEIG $1, $5, T_0 + + // store r (for final block) + VMLOF T_0, R_1, R5SAVE_1 + VMLOF T_0, R_2, R5SAVE_2 + VMLOF T_0, R_3, R5SAVE_3 + VMLOF T_0, R_4, R5SAVE_4 + VLGVG $0, R_0, RSAVE_0 + VLGVG $0, R_1, RSAVE_1 + VLGVG $0, R_2, RSAVE_2 + VLGVG $0, R_3, RSAVE_3 + VLGVG $0, R_4, RSAVE_4 + + // skip r**2 calculation + CMPBLE R3, $16, skip + + // calculate r**2 + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5SAVE_1, R5SAVE_2, R5SAVE_3, R5SAVE_4, H_0, H_1, H_2, H_3, H_4) + REDUCE(H_0, H_1, H_2, H_3, H_4) + VLEIG $0, $5, T_0 + VLEIG $1, $5, T_0 + VMLOF T_0, H_1, R5_1 + VMLOF T_0, H_2, R5_2 + VMLOF T_0, H_3, R5_3 + VMLOF T_0, H_4, R5_4 + VLR H_0, R_0 + VLR H_1, R_1 + VLR H_2, R_2 + VLR H_3, R_3 + VLR H_4, R_4 + + // initialize h + VZERO H_0 + VZERO H_1 + VZERO H_2 + VZERO H_3 + VZERO H_4 + +loop: + CMPBLE R3, $32, b2 + VLM (R2), T_0, T_1 + SUB $32, R3 + MOVD $32(R2), R2 + EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) + VLEIB $4, $1, F_4 + VLEIB $12, $1, F_4 + +multiply: + VAG H_0, F_0, F_0 + VAG H_1, F_1, F_1 + VAG H_2, F_2, F_2 + VAG H_3, F_3, F_3 + VAG H_4, F_4, F_4 + MULTIPLY(F_0, F_1, F_2, F_3, F_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + REDUCE(H_0, H_1, H_2, H_3, H_4) + CMPBNE R3, $0, loop + +finish: + // sum vectors + VZERO T_0 + VSUMQG H_0, T_0, H_0 + VSUMQG H_1, T_0, H_1 + VSUMQG H_2, T_0, H_2 + VSUMQG H_3, T_0, H_3 + VSUMQG H_4, T_0, H_4 + + // h may be >= 2*(2**130-5) so we need to reduce it again + REDUCE(H_0, H_1, H_2, H_3, H_4) + + // carry h1->h4 + VESRLG $26, H_1, T_1 + VN MOD26, H_1, H_1 + VAQ T_1, H_2, H_2 + VESRLG $26, H_2, T_2 + VN MOD26, H_2, H_2 + VAQ T_2, H_3, H_3 + VESRLG $26, H_3, T_3 + VN MOD26, H_3, H_3 + VAQ T_3, H_4, H_4 + + // h is now < 2*(2**130-5) + // pack h into h1 (hi) and h0 (lo) + PACK(H_0, H_1, H_2, H_3, H_4) + + // if h > 2**130-5 then h -= 2**130-5 + MOD(H_0, H_1, T_0, T_1, T_2) + + // h += s + MOVD $·bswapMask<>(SB), R5 + VL (R5), T_1 + VL 16(R4), T_0 + VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) + VAQ T_0, H_0, H_0 + VPERM H_0, H_0, T_1, H_0 // reverse bytes (to little) + VST H_0, (R1) + + RET + +b2: + CMPBLE R3, $16, b1 + + // 2 blocks remaining + SUB $17, R3 + VL (R2), T_0 + VLL R3, 16(R2), T_1 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_1 + EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) + CMPBNE R3, $16, 2(PC) + VLEIB $12, $1, F_4 + VLEIB $4, $1, F_4 + + // setup [r²,r] + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, RSAVE_3, R_3 + VLVGG $1, RSAVE_4, R_4 + VPDI $0, R5_1, R5SAVE_1, R5_1 + VPDI $0, R5_2, R5SAVE_2, R5_2 + VPDI $0, R5_3, R5SAVE_3, R5_3 + VPDI $0, R5_4, R5SAVE_4, R5_4 + + MOVD $0, R3 + BR multiply + +skip: + VZERO H_0 + VZERO H_1 + VZERO H_2 + VZERO H_3 + VZERO H_4 + + CMPBEQ R3, $0, finish + +b1: + // 1 block remaining + SUB $1, R3 + VLL R3, (R2), T_0 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_0 + VZERO T_1 + EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) + CMPBNE R3, $16, 2(PC) + VLEIB $4, $1, F_4 + VLEIG $1, $1, R_0 + VZERO R_1 + VZERO R_2 + VZERO R_3 + VZERO R_4 + VZERO R5_1 + VZERO R5_2 + VZERO R5_3 + VZERO R5_4 + + // setup [r, 1] + VLVGG $0, RSAVE_0, R_0 + VLVGG $0, RSAVE_1, R_1 + VLVGG $0, RSAVE_2, R_2 + VLVGG $0, RSAVE_3, R_3 + VLVGG $0, RSAVE_4, R_4 + VPDI $0, R5SAVE_1, R5_1, R5_1 + VPDI $0, R5SAVE_2, R5_2, R5_2 + VPDI $0, R5SAVE_3, R5_3, R5_3 + VPDI $0, R5SAVE_4, R5_4, R5_4 + + MOVD $0, R3 + BR multiply diff --git a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s new file mode 100644 index 00000000000..e60bbc1d7f8 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s @@ -0,0 +1,909 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,go1.11,!gccgo,!appengine + +#include "textflag.h" + +// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction. + +// constants +#define EX0 V1 +#define EX1 V2 +#define EX2 V3 + +// temporaries +#define T_0 V4 +#define T_1 V5 +#define T_2 V6 +#define T_3 V7 +#define T_4 V8 +#define T_5 V9 +#define T_6 V10 +#define T_7 V11 +#define T_8 V12 +#define T_9 V13 +#define T_10 V14 + +// r**2 & r**4 +#define R_0 V15 +#define R_1 V16 +#define R_2 V17 +#define R5_1 V18 +#define R5_2 V19 +// key (r) +#define RSAVE_0 R7 +#define RSAVE_1 R8 +#define RSAVE_2 R9 +#define R5SAVE_1 R10 +#define R5SAVE_2 R11 + +// message block +#define M0 V20 +#define M1 V21 +#define M2 V22 +#define M3 V23 +#define M4 V24 +#define M5 V25 + +// accumulator +#define H0_0 V26 +#define H1_0 V27 +#define H2_0 V28 +#define H0_1 V29 +#define H1_1 V30 +#define H2_1 V31 + +GLOBL ·keyMask<>(SB), RODATA, $16 +DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f +DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f + +GLOBL ·bswapMask<>(SB), RODATA, $16 +DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 +DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 + +GLOBL ·constants<>(SB), RODATA, $48 +// EX0 +DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f +DATA ·constants<>+8(SB)/8, $0x0000050403020100 +// EX1 +DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f +DATA ·constants<>+24(SB)/8, $0x00000a0908070605 +// EX2 +DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f +DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b + +GLOBL ·c<>(SB), RODATA, $48 +// EX0 +DATA ·c<>+0(SB)/8, $0x0000050403020100 +DATA ·c<>+8(SB)/8, $0x0000151413121110 +// EX1 +DATA ·c<>+16(SB)/8, $0x00000a0908070605 +DATA ·c<>+24(SB)/8, $0x00001a1918171615 +// EX2 +DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b +DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b + +GLOBL ·reduce<>(SB), RODATA, $32 +// 44 bit +DATA ·reduce<>+0(SB)/8, $0x0 +DATA ·reduce<>+8(SB)/8, $0xfffffffffff +// 42 bit +DATA ·reduce<>+16(SB)/8, $0x0 +DATA ·reduce<>+24(SB)/8, $0x3ffffffffff + +// h = (f*g) % (2**130-5) [partial reduction] +// uses T_0...T_9 temporary registers +// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2 +// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9 +// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2 +#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \ + \ // Eliminate the dependency for the last 2 VMSLs + VMSLG m02_0, r_2, m4_2, m4_2 \ + VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined + VMSLG m02_0, r_0, m4_0, m4_0 \ + VMSLG m02_1, r5_2, V0, T_0 \ + VMSLG m02_0, r_1, m4_1, m4_1 \ + VMSLG m02_1, r_0, V0, T_1 \ + VMSLG m02_1, r_1, V0, T_2 \ + VMSLG m02_2, r5_1, V0, T_3 \ + VMSLG m02_2, r5_2, V0, T_4 \ + VMSLG m13_0, r_0, m5_0, m5_0 \ + VMSLG m13_1, r5_2, V0, T_5 \ + VMSLG m13_0, r_1, m5_1, m5_1 \ + VMSLG m13_1, r_0, V0, T_6 \ + VMSLG m13_1, r_1, V0, T_7 \ + VMSLG m13_2, r5_1, V0, T_8 \ + VMSLG m13_2, r5_2, V0, T_9 \ + VMSLG m02_2, r_0, m4_2, m4_2 \ + VMSLG m13_2, r_0, m5_2, m5_2 \ + VAQ m4_0, T_0, m02_0 \ + VAQ m4_1, T_1, m02_1 \ + VAQ m5_0, T_5, m13_0 \ + VAQ m5_1, T_6, m13_1 \ + VAQ m02_0, T_3, m02_0 \ + VAQ m02_1, T_4, m02_1 \ + VAQ m13_0, T_8, m13_0 \ + VAQ m13_1, T_9, m13_1 \ + VAQ m4_2, T_2, m02_2 \ + VAQ m5_2, T_7, m13_2 \ + +// SQUARE uses three limbs of r and r_2*5 to output square of r +// uses T_1, T_5 and T_7 temporary registers +// input: r_0, r_1, r_2, r5_2 +// temp: TEMP0, TEMP1, TEMP2 +// output: p0, p1, p2 +#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \ + VMSLG r_0, r_0, p0, p0 \ + VMSLG r_1, r5_2, V0, TEMP0 \ + VMSLG r_2, r5_2, p1, p1 \ + VMSLG r_0, r_1, V0, TEMP1 \ + VMSLG r_1, r_1, p2, p2 \ + VMSLG r_0, r_2, V0, TEMP2 \ + VAQ TEMP0, p0, p0 \ + VAQ TEMP1, p1, p1 \ + VAQ TEMP2, p2, p2 \ + VAQ TEMP0, p0, p0 \ + VAQ TEMP1, p1, p1 \ + VAQ TEMP2, p2, p2 \ + +// carry h0->h1->h2->h0 || h3->h4->h5->h3 +// uses T_2, T_4, T_5, T_7, T_8, T_9 +// t6, t7, t8, t9, t10, t11 +// input: h0, h1, h2, h3, h4, h5 +// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11 +// output: h0, h1, h2, h3, h4, h5 +#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \ + VLM (R12), t6, t7 \ // 44 and 42 bit clear mask + VLEIB $7, $0x28, t10 \ // 5 byte shift mask + VREPIB $4, t8 \ // 4 bit shift mask + VREPIB $2, t11 \ // 2 bit shift mask + VSRLB t10, h0, t0 \ // h0 byte shift + VSRLB t10, h1, t1 \ // h1 byte shift + VSRLB t10, h2, t2 \ // h2 byte shift + VSRLB t10, h3, t3 \ // h3 byte shift + VSRLB t10, h4, t4 \ // h4 byte shift + VSRLB t10, h5, t5 \ // h5 byte shift + VSRL t8, t0, t0 \ // h0 bit shift + VSRL t8, t1, t1 \ // h2 bit shift + VSRL t11, t2, t2 \ // h2 bit shift + VSRL t8, t3, t3 \ // h3 bit shift + VSRL t8, t4, t4 \ // h4 bit shift + VESLG $2, t2, t9 \ // h2 carry x5 + VSRL t11, t5, t5 \ // h5 bit shift + VN t6, h0, h0 \ // h0 clear carry + VAQ t2, t9, t2 \ // h2 carry x5 + VESLG $2, t5, t9 \ // h5 carry x5 + VN t6, h1, h1 \ // h1 clear carry + VN t7, h2, h2 \ // h2 clear carry + VAQ t5, t9, t5 \ // h5 carry x5 + VN t6, h3, h3 \ // h3 clear carry + VN t6, h4, h4 \ // h4 clear carry + VN t7, h5, h5 \ // h5 clear carry + VAQ t0, h1, h1 \ // h0->h1 + VAQ t3, h4, h4 \ // h3->h4 + VAQ t1, h2, h2 \ // h1->h2 + VAQ t4, h5, h5 \ // h4->h5 + VAQ t2, h0, h0 \ // h2->h0 + VAQ t5, h3, h3 \ // h5->h3 + VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves + VREPG $1, t7, t7 \ + VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5] + VSLDB $8, h1, h1, h1 \ + VSLDB $8, h2, h2, h2 \ + VO h0, h3, h3 \ + VO h1, h4, h4 \ + VO h2, h5, h5 \ + VESRLG $44, h3, t0 \ // 44 bit shift right + VESRLG $44, h4, t1 \ + VESRLG $42, h5, t2 \ + VN t6, h3, h3 \ // clear carry bits + VN t6, h4, h4 \ + VN t7, h5, h5 \ + VESLG $2, t2, t9 \ // multiply carry by 5 + VAQ t9, t2, t2 \ + VAQ t0, h4, h4 \ + VAQ t1, h5, h5 \ + VAQ t2, h3, h3 \ + +// carry h0->h1->h2->h0 +// input: h0, h1, h2 +// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8 +// output: h0, h1, h2 +#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \ + VLEIB $7, $0x28, t3 \ // 5 byte shift mask + VREPIB $4, t4 \ // 4 bit shift mask + VREPIB $2, t7 \ // 2 bit shift mask + VGBM $0x003F, t5 \ // mask to clear carry bits + VSRLB t3, h0, t0 \ + VSRLB t3, h1, t1 \ + VSRLB t3, h2, t2 \ + VESRLG $4, t5, t5 \ // 44 bit clear mask + VSRL t4, t0, t0 \ + VSRL t4, t1, t1 \ + VSRL t7, t2, t2 \ + VESRLG $2, t5, t6 \ // 42 bit clear mask + VESLG $2, t2, t8 \ + VAQ t8, t2, t2 \ + VN t5, h0, h0 \ + VN t5, h1, h1 \ + VN t6, h2, h2 \ + VAQ t0, h1, h1 \ + VAQ t1, h2, h2 \ + VAQ t2, h0, h0 \ + VSRLB t3, h0, t0 \ + VSRLB t3, h1, t1 \ + VSRLB t3, h2, t2 \ + VSRL t4, t0, t0 \ + VSRL t4, t1, t1 \ + VSRL t7, t2, t2 \ + VN t5, h0, h0 \ + VN t5, h1, h1 \ + VESLG $2, t2, t8 \ + VN t6, h2, h2 \ + VAQ t0, h1, h1 \ + VAQ t8, t2, t2 \ + VAQ t1, h2, h2 \ + VAQ t2, h0, h0 \ + +// expands two message blocks into the lower halfs of the d registers +// moves the contents of the d registers into upper halfs +// input: in1, in2, d0, d1, d2, d3, d4, d5 +// temp: TEMP0, TEMP1, TEMP2, TEMP3 +// output: d0, d1, d2, d3, d4, d5 +#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \ + VGBM $0xff3f, TEMP0 \ + VGBM $0xff1f, TEMP1 \ + VESLG $4, d1, TEMP2 \ + VESLG $4, d4, TEMP3 \ + VESRLG $4, TEMP0, TEMP0 \ + VPERM in1, d0, EX0, d0 \ + VPERM in2, d3, EX0, d3 \ + VPERM in1, d2, EX2, d2 \ + VPERM in2, d5, EX2, d5 \ + VPERM in1, TEMP2, EX1, d1 \ + VPERM in2, TEMP3, EX1, d4 \ + VN TEMP0, d0, d0 \ + VN TEMP0, d3, d3 \ + VESRLG $4, d1, d1 \ + VESRLG $4, d4, d4 \ + VN TEMP1, d2, d2 \ + VN TEMP1, d5, d5 \ + VN TEMP0, d1, d1 \ + VN TEMP0, d4, d4 \ + +// expands one message block into the lower halfs of the d registers +// moves the contents of the d registers into upper halfs +// input: in, d0, d1, d2 +// temp: TEMP0, TEMP1, TEMP2 +// output: d0, d1, d2 +#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \ + VGBM $0xff3f, TEMP0 \ + VESLG $4, d1, TEMP2 \ + VGBM $0xff1f, TEMP1 \ + VPERM in, d0, EX0, d0 \ + VESRLG $4, TEMP0, TEMP0 \ + VPERM in, d2, EX2, d2 \ + VPERM in, TEMP2, EX1, d1 \ + VN TEMP0, d0, d0 \ + VN TEMP1, d2, d2 \ + VESRLG $4, d1, d1 \ + VN TEMP0, d1, d1 \ + +// pack h2:h0 into h1:h0 (no carry) +// input: h0, h1, h2 +// output: h0, h1, h2 +#define PACK(h0, h1, h2) \ + VMRLG h1, h2, h2 \ // copy h1 to upper half h2 + VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20 + VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1 + VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1 + VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1 + VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1) + VLEIG $0, $0, h2 \ // clear upper half of h2 + VESRLG $40, h2, h1 \ // h1 now has upper two bits of result + VLEIB $7, $88, h1 \ // for byte shift (11 bytes) + VSLB h1, h2, h2 \ // shift h2 11 bytes to the left + VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1 + VLEIG $0, $0, h1 \ // clear upper half of h1 + +// if h > 2**130-5 then h -= 2**130-5 +// input: h0, h1 +// temp: t0, t1, t2 +// output: h0 +#define MOD(h0, h1, t0, t1, t2) \ + VZERO t0 \ + VLEIG $1, $5, t0 \ + VACCQ h0, t0, t1 \ + VAQ h0, t0, t0 \ + VONE t2 \ + VLEIG $1, $-4, t2 \ + VAQ t2, t1, t1 \ + VACCQ h1, t1, t1 \ + VONE t2 \ + VAQ t2, t1, t1 \ + VN h0, t1, t2 \ + VNC t0, t1, t1 \ + VO t1, t2, h0 \ + +// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key) +TEXT ·poly1305vmsl(SB), $0-32 + // This code processes 6 + up to 4 blocks (32 bytes) per iteration + // using the algorithm described in: + // NEON crypto, Daniel J. Bernstein & Peter Schwabe + // https://cryptojedi.org/papers/neoncrypto-20120320.pdf + // And as moddified for VMSL as described in + // Accelerating Poly1305 Cryptographic Message Authentication on the z14 + // O'Farrell et al, CASCON 2017, p48-55 + // https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht + + LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key + VZERO V0 // c + + // load EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 // c + + // setup r + VL (R4), T_0 + MOVD $·keyMask<>(SB), R6 + VL (R6), T_1 + VN T_0, T_1, T_0 + VZERO T_2 // limbs for r + VZERO T_3 + VZERO T_4 + EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7) + + // T_2, T_3, T_4: [0, r] + + // setup r*20 + VLEIG $0, $0, T_0 + VLEIG $1, $20, T_0 // T_0: [0, 20] + VZERO T_5 + VZERO T_6 + VMSLG T_0, T_3, T_5, T_5 + VMSLG T_0, T_4, T_6, T_6 + + // store r for final block in GR + VLGVG $1, T_2, RSAVE_0 // c + VLGVG $1, T_3, RSAVE_1 // c + VLGVG $1, T_4, RSAVE_2 // c + VLGVG $1, T_5, R5SAVE_1 // c + VLGVG $1, T_6, R5SAVE_2 // c + + // initialize h + VZERO H0_0 + VZERO H1_0 + VZERO H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + // initialize pointer for reduce constants + MOVD $·reduce<>(SB), R12 + + // calculate r**2 and 20*(r**2) + VZERO R_0 + VZERO R_1 + VZERO R_2 + SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7) + REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1) + VZERO R5_1 + VZERO R5_2 + VMSLG T_0, R_1, R5_1, R5_1 + VMSLG T_0, R_2, R5_2, R5_2 + + // skip r**4 calculation if 3 blocks or less + CMPBLE R3, $48, b4 + + // calculate r**4 and 20*(r**4) + VZERO T_8 + VZERO T_9 + VZERO T_10 + SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7) + REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1) + VZERO T_2 + VZERO T_3 + VMSLG T_0, T_9, T_2, T_2 + VMSLG T_0, T_10, T_3, T_3 + + // put r**2 to the right and r**4 to the left of R_0, R_1, R_2 + VSLDB $8, T_8, T_8, T_8 + VSLDB $8, T_9, T_9, T_9 + VSLDB $8, T_10, T_10, T_10 + VSLDB $8, T_2, T_2, T_2 + VSLDB $8, T_3, T_3, T_3 + + VO T_8, R_0, R_0 + VO T_9, R_1, R_1 + VO T_10, R_2, R_2 + VO T_2, R5_1, R5_1 + VO T_3, R5_2, R5_2 + + CMPBLE R3, $80, load // less than or equal to 5 blocks in message + + // 6(or 5+1) blocks + SUB $81, R3 + VLM (R2), M0, M4 + VLL R3, 80(R2), M5 + ADD $1, R3 + MOVBZ $1, R0 + CMPBGE R3, $16, 2(PC) + VLVGB R3, R0, M5 + MOVD $96(R2), R2 + EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) + EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) + VLEIB $2, $1, H2_0 + VLEIB $2, $1, H2_1 + VLEIB $10, $1, H2_0 + VLEIB $10, $1, H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO T_4 + VZERO T_10 + EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3) + VLR T_4, M4 + VLEIB $10, $1, M2 + CMPBLT R3, $16, 2(PC) + VLEIB $10, $1, T_10 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + + SUB $16, R3 + CMPBLE R3, $0, square + +load: + // load EX0, EX1 and EX2 + MOVD $·c<>(SB), R5 + VLM (R5), EX0, EX2 + +loop: + CMPBLE R3, $64, add // b4 // last 4 or less blocks left + + // next 4 full blocks + VLM (R2), M2, M5 + SUB $64, R3 + MOVD $64(R2), R2 + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9) + + // expacc in-lined to create [m2, m3] limbs + VGBM $0x3f3f, T_0 // 44 bit clear mask + VGBM $0x1f1f, T_1 // 40 bit clear mask + VPERM M2, M3, EX0, T_3 + VESRLG $4, T_0, T_0 // 44 bit clear mask ready + VPERM M2, M3, EX1, T_4 + VPERM M2, M3, EX2, T_5 + VN T_0, T_3, T_3 + VESRLG $4, T_4, T_4 + VN T_1, T_5, T_5 + VN T_0, T_4, T_4 + VMRHG H0_1, T_3, H0_0 + VMRHG H1_1, T_4, H1_0 + VMRHG H2_1, T_5, H2_0 + VMRLG H0_1, T_3, H0_1 + VMRLG H1_1, T_4, H1_1 + VMRLG H2_1, T_5, H2_1 + VLEIB $10, $1, H2_0 + VLEIB $10, $1, H2_1 + VPERM M4, M5, EX0, T_3 + VPERM M4, M5, EX1, T_4 + VPERM M4, M5, EX2, T_5 + VN T_0, T_3, T_3 + VESRLG $4, T_4, T_4 + VN T_1, T_5, T_5 + VN T_0, T_4, T_4 + VMRHG V0, T_3, M0 + VMRHG V0, T_4, M1 + VMRHG V0, T_5, M2 + VMRLG V0, T_3, M3 + VMRLG V0, T_4, M4 + VMRLG V0, T_5, M5 + VLEIB $10, $1, M2 + VLEIB $10, $1, M5 + + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + CMPBNE R3, $0, loop + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + + // load EX0, EX1, EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + // sum vectors + VAQ H0_0, H0_1, H0_0 + VAQ H1_0, H1_1, H1_0 + VAQ H2_0, H2_1, H2_0 + + // h may be >= 2*(2**130-5) so we need to reduce it again + // M0...M4 are used as temps here + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + +next: // carry h1->h2 + VLEIB $7, $0x28, T_1 + VREPIB $4, T_2 + VGBM $0x003F, T_3 + VESRLG $4, T_3 + + // byte shift + VSRLB T_1, H1_0, T_4 + + // bit shift + VSRL T_2, T_4, T_4 + + // clear h1 carry bits + VN T_3, H1_0, H1_0 + + // add carry + VAQ T_4, H2_0, H2_0 + + // h is now < 2*(2**130-5) + // pack h into h1 (hi) and h0 (lo) + PACK(H0_0, H1_0, H2_0) + + // if h > 2**130-5 then h -= 2**130-5 + MOD(H0_0, H1_0, T_0, T_1, T_2) + + // h += s + MOVD $·bswapMask<>(SB), R5 + VL (R5), T_1 + VL 16(R4), T_0 + VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) + VAQ T_0, H0_0, H0_0 + VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little) + VST H0_0, (R1) + RET + +add: + // load EX0, EX1, EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + CMPBLE R3, $64, b4 + +b4: + CMPBLE R3, $48, b3 // 3 blocks or less + + // 4(3+1) blocks remaining + SUB $49, R3 + VLM (R2), M0, M2 + VLL R3, 48(R2), M3 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M3 + MOVD $64(R2), R2 + EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) + VLEIB $10, $1, H2_0 + VLEIB $10, $1, H2_1 + VZERO M0 + VZERO M1 + VZERO M4 + VZERO M5 + VZERO T_4 + VZERO T_10 + EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3) + VLR T_4, M2 + VLEIB $10, $1, M4 + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, T_10 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + SUB $16, R3 + CMPBLE R3, $0, square // this condition must always hold true! + +b3: + CMPBLE R3, $32, b2 + + // 3 blocks remaining + + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // H*[r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5) + + SUB $33, R3 + VLM (R2), M0, M1 + VLL R3, 32(R2), M2 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M2 + + // H += m0 + VZERO T_1 + VZERO T_2 + VZERO T_3 + EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6) + VLEIB $10, $1, T_3 + VAG H0_0, T_1, H0_0 + VAG H1_0, T_2, H1_0 + VAG H2_0, T_3, H2_0 + + VZERO M0 + VZERO M3 + VZERO M4 + VZERO M5 + VZERO T_10 + + // (H+m0)*r + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9) + + // H += m1 + VZERO V0 + VZERO T_1 + VZERO T_2 + VZERO T_3 + EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6) + VLEIB $10, $1, T_3 + VAQ H0_0, T_1, H0_0 + VAQ H1_0, T_2, H1_0 + VAQ H2_0, T_3, H2_0 + REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) + + // [H, m2] * [r**2, r] + EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3) + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, H2_0 + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10) + SUB $16, R3 + CMPBLE R3, $0, next // this condition must always hold true! + +b2: + CMPBLE R3, $16, b1 + + // 2 blocks remaining + + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // H*[r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + + // move h to the left and 0s at the right + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + + // get message blocks and append 1 to start + SUB $17, R3 + VL (R2), M0 + VLL R3, 16(R2), M1 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M1 + VZERO T_6 + VZERO T_7 + VZERO T_8 + EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3) + EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3) + VLEIB $2, $1, T_8 + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, T_8 + + // add [m0, m1] to h + VAG H0_0, T_6, H0_0 + VAG H1_0, T_7, H1_0 + VAG H2_0, T_8, H2_0 + + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + VZERO T_10 + VZERO M0 + + // at this point R_0 .. R5_2 look like [r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) + SUB $16, R3, R3 + CMPBLE R3, $0, next + +b1: + CMPBLE R3, $0, next + + // 1 block remaining + + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // H*[r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + + // set up [0, m0] limbs + SUB $1, R3 + VLL R3, (R2), M0 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M0 + VZERO T_1 + VZERO T_2 + VZERO T_3 + EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m] + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, T_3 + + // h+m0 + VAQ H0_0, T_1, H0_0 + VAQ H1_0, T_2, H1_0 + VAQ H2_0, T_3, H2_0 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + + BR next + +square: + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // (h0*r**2) + (h1*r) + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + BR next diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go new file mode 100644 index 00000000000..1ab07d078db --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/buffer.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" +) + +// buffer provides a linked list buffer for data exchange +// between producer and consumer. Theoretically the buffer is +// of unlimited capacity as it does no allocation of its own. +type buffer struct { + // protects concurrent access to head, tail and closed + *sync.Cond + + head *element // the buffer that will be read first + tail *element // the buffer that will be read last + + closed bool +} + +// An element represents a single link in a linked list. +type element struct { + buf []byte + next *element +} + +// newBuffer returns an empty buffer that is not closed. +func newBuffer() *buffer { + e := new(element) + b := &buffer{ + Cond: newCond(), + head: e, + tail: e, + } + return b +} + +// write makes buf available for Read to receive. +// buf must not be modified after the call to write. +func (b *buffer) write(buf []byte) { + b.Cond.L.Lock() + e := &element{buf: buf} + b.tail.next = e + b.tail = e + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// eof closes the buffer. Reads from the buffer once all +// the data has been consumed will receive io.EOF. +func (b *buffer) eof() { + b.Cond.L.Lock() + b.closed = true + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// Read reads data from the internal buffer in buf. Reads will block +// if no data is available, or until the buffer is closed. +func (b *buffer) Read(buf []byte) (n int, err error) { + b.Cond.L.Lock() + defer b.Cond.L.Unlock() + + for len(buf) > 0 { + // if there is data in b.head, copy it + if len(b.head.buf) > 0 { + r := copy(buf, b.head.buf) + buf, b.head.buf = buf[r:], b.head.buf[r:] + n += r + continue + } + // if there is a next buffer, make it the head + if len(b.head.buf) == 0 && b.head != b.tail { + b.head = b.head.next + continue + } + + // if at least one byte has been copied, return + if n > 0 { + break + } + + // if nothing was read, and there is nothing outstanding + // check to see if the buffer is closed. + if b.closed { + err = io.EOF + break + } + // out of buffers, wait for producer + b.Cond.Wait() + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go new file mode 100644 index 00000000000..0f89aec1c7f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -0,0 +1,546 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sort" + "time" +) + +// These constants from [PROTOCOL.certkeys] represent the algorithm names +// for certificate types supported by this package. +const ( + CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" + CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" + CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" + CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" + CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" +) + +// Certificate types distinguish between host and user +// certificates. The values can be set in the CertType field of +// Certificate. +const ( + UserCert = 1 + HostCert = 2 +) + +// Signature represents a cryptographic signature. +type Signature struct { + Format string + Blob []byte + Rest []byte `ssh:"rest"` +} + +// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that +// a certificate does not expire. +const CertTimeInfinity = 1<<64 - 1 + +// An Certificate represents an OpenSSH certificate as defined in +// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the +// PublicKey interface, so it can be unmarshaled using +// ParsePublicKey. +type Certificate struct { + Nonce []byte + Key PublicKey + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []string + ValidAfter uint64 + ValidBefore uint64 + Permissions + Reserved []byte + SignatureKey PublicKey + Signature *Signature +} + +// genericCertData holds the key-independent part of the certificate data. +// Overall, certificates contain an nonce, public key fields and +// key-independent fields. +type genericCertData struct { + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []byte + ValidAfter uint64 + ValidBefore uint64 + CriticalOptions []byte + Extensions []byte + Reserved []byte + SignatureKey []byte + Signature []byte +} + +func marshalStringList(namelist []string) []byte { + var to []byte + for _, name := range namelist { + s := struct{ N string }{name} + to = append(to, Marshal(&s)...) + } + return to +} + +type optionsTuple struct { + Key string + Value []byte +} + +type optionsTupleValue struct { + Value string +} + +// serialize a map of critical options or extensions +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty string value +func marshalTuples(tups map[string]string) []byte { + keys := make([]string, 0, len(tups)) + for key := range tups { + keys = append(keys, key) + } + sort.Strings(keys) + + var ret []byte + for _, key := range keys { + s := optionsTuple{Key: key} + if value := tups[key]; len(value) > 0 { + s.Value = Marshal(&optionsTupleValue{value}) + } + ret = append(ret, Marshal(&s)...) + } + return ret +} + +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty option value +func parseTuples(in []byte) (map[string]string, error) { + tups := map[string]string{} + var lastKey string + var haveLastKey bool + + for len(in) > 0 { + var key, val, extra []byte + var ok bool + + if key, in, ok = parseString(in); !ok { + return nil, errShortRead + } + keyStr := string(key) + // according to [PROTOCOL.certkeys], the names must be in + // lexical order. + if haveLastKey && keyStr <= lastKey { + return nil, fmt.Errorf("ssh: certificate options are not in lexical order") + } + lastKey, haveLastKey = keyStr, true + // the next field is a data field, which if non-empty has a string embedded + if val, in, ok = parseString(in); !ok { + return nil, errShortRead + } + if len(val) > 0 { + val, extra, ok = parseString(val) + if !ok { + return nil, errShortRead + } + if len(extra) > 0 { + return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") + } + tups[keyStr] = string(val) + } else { + tups[keyStr] = "" + } + } + return tups, nil +} + +func parseCert(in []byte, privAlgo string) (*Certificate, error) { + nonce, rest, ok := parseString(in) + if !ok { + return nil, errShortRead + } + + key, rest, err := parsePubKey(rest, privAlgo) + if err != nil { + return nil, err + } + + var g genericCertData + if err := Unmarshal(rest, &g); err != nil { + return nil, err + } + + c := &Certificate{ + Nonce: nonce, + Key: key, + Serial: g.Serial, + CertType: g.CertType, + KeyId: g.KeyId, + ValidAfter: g.ValidAfter, + ValidBefore: g.ValidBefore, + } + + for principals := g.ValidPrincipals; len(principals) > 0; { + principal, rest, ok := parseString(principals) + if !ok { + return nil, errShortRead + } + c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) + principals = rest + } + + c.CriticalOptions, err = parseTuples(g.CriticalOptions) + if err != nil { + return nil, err + } + c.Extensions, err = parseTuples(g.Extensions) + if err != nil { + return nil, err + } + c.Reserved = g.Reserved + k, err := ParsePublicKey(g.SignatureKey) + if err != nil { + return nil, err + } + + c.SignatureKey = k + c.Signature, rest, ok = parseSignatureBody(g.Signature) + if !ok || len(rest) > 0 { + return nil, errors.New("ssh: signature parse error") + } + + return c, nil +} + +type openSSHCertSigner struct { + pub *Certificate + signer Signer +} + +type algorithmOpenSSHCertSigner struct { + *openSSHCertSigner + algorithmSigner AlgorithmSigner +} + +// NewCertSigner returns a Signer that signs with the given Certificate, whose +// private key is held by signer. It returns an error if the public key in cert +// doesn't match the key used by signer. +func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { + if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + return nil, errors.New("ssh: signer and cert have different public key") + } + + if algorithmSigner, ok := signer.(AlgorithmSigner); ok { + return &algorithmOpenSSHCertSigner{ + &openSSHCertSigner{cert, signer}, algorithmSigner}, nil + } else { + return &openSSHCertSigner{cert, signer}, nil + } +} + +func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.signer.Sign(rand, data) +} + +func (s *openSSHCertSigner) PublicKey() PublicKey { + return s.pub +} + +func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) +} + +const sourceAddressCriticalOption = "source-address" + +// CertChecker does the work of verifying a certificate. Its methods +// can be plugged into ClientConfig.HostKeyCallback and +// ServerConfig.PublicKeyCallback. For the CertChecker to work, +// minimally, the IsAuthority callback should be set. +type CertChecker struct { + // SupportedCriticalOptions lists the CriticalOptions that the + // server application layer understands. These are only used + // for user certificates. + SupportedCriticalOptions []string + + // IsUserAuthority should return true if the key is recognized as an + // authority for the given user certificate. This allows for + // certificates to be signed by other certificates. This must be set + // if this CertChecker will be checking user certificates. + IsUserAuthority func(auth PublicKey) bool + + // IsHostAuthority should report whether the key is recognized as + // an authority for this host. This allows for certificates to be + // signed by other keys, and for those other keys to only be valid + // signers for particular hostnames. This must be set if this + // CertChecker will be checking host certificates. + IsHostAuthority func(auth PublicKey, address string) bool + + // Clock is used for verifying time stamps. If nil, time.Now + // is used. + Clock func() time.Time + + // UserKeyFallback is called when CertChecker.Authenticate encounters a + // public key that is not a certificate. It must implement validation + // of user keys or else, if nil, all such keys are rejected. + UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // HostKeyFallback is called when CertChecker.CheckHostKey encounters a + // public key that is not a certificate. It must implement host key + // validation or else, if nil, all such keys are rejected. + HostKeyFallback HostKeyCallback + + // IsRevoked is called for each certificate so that revocation checking + // can be implemented. It should return true if the given certificate + // is revoked and false otherwise. If nil, no certificates are + // considered to have been revoked. + IsRevoked func(cert *Certificate) bool +} + +// CheckHostKey checks a host key certificate. This method can be +// plugged into ClientConfig.HostKeyCallback. +func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { + cert, ok := key.(*Certificate) + if !ok { + if c.HostKeyFallback != nil { + return c.HostKeyFallback(addr, remote, key) + } + return errors.New("ssh: non-certificate host key") + } + if cert.CertType != HostCert { + return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) + } + if !c.IsHostAuthority(cert.SignatureKey, addr) { + return fmt.Errorf("ssh: no authorities for hostname: %v", addr) + } + + hostname, _, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + // Pass hostname only as principal for host certificates (consistent with OpenSSH) + return c.CheckCert(hostname, cert) +} + +// Authenticate checks a user certificate. Authenticate can be used as +// a value for ServerConfig.PublicKeyCallback. +func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { + cert, ok := pubKey.(*Certificate) + if !ok { + if c.UserKeyFallback != nil { + return c.UserKeyFallback(conn, pubKey) + } + return nil, errors.New("ssh: normal key pairs not accepted") + } + + if cert.CertType != UserCert { + return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) + } + if !c.IsUserAuthority(cert.SignatureKey) { + return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") + } + + if err := c.CheckCert(conn.User(), cert); err != nil { + return nil, err + } + + return &cert.Permissions, nil +} + +// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and +// the signature of the certificate. +func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { + if c.IsRevoked != nil && c.IsRevoked(cert) { + return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) + } + + for opt := range cert.CriticalOptions { + // sourceAddressCriticalOption will be enforced by + // serverAuthenticate + if opt == sourceAddressCriticalOption { + continue + } + + found := false + for _, supp := range c.SupportedCriticalOptions { + if supp == opt { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) + } + } + + if len(cert.ValidPrincipals) > 0 { + // By default, certs are valid for all users/hosts. + found := false + for _, p := range cert.ValidPrincipals { + if p == principal { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) + } + } + + clock := c.Clock + if clock == nil { + clock = time.Now + } + + unixNow := clock().Unix() + if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { + return fmt.Errorf("ssh: cert is not yet valid") + } + if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { + return fmt.Errorf("ssh: cert has expired") + } + if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { + return fmt.Errorf("ssh: certificate signature does not verify") + } + + return nil +} + +// SignCert sets c.SignatureKey to the authority's public key and stores a +// Signature, by authority, in the certificate. +func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { + c.Nonce = make([]byte, 32) + if _, err := io.ReadFull(rand, c.Nonce); err != nil { + return err + } + c.SignatureKey = authority.PublicKey() + + sig, err := authority.Sign(rand, c.bytesForSigning()) + if err != nil { + return err + } + c.Signature = sig + return nil +} + +var certAlgoNames = map[string]string{ + KeyAlgoRSA: CertAlgoRSAv01, + KeyAlgoDSA: CertAlgoDSAv01, + KeyAlgoECDSA256: CertAlgoECDSA256v01, + KeyAlgoECDSA384: CertAlgoECDSA384v01, + KeyAlgoECDSA521: CertAlgoECDSA521v01, + KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01, + KeyAlgoED25519: CertAlgoED25519v01, + KeyAlgoSKED25519: CertAlgoSKED25519v01, +} + +// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. +// Panics if a non-certificate algorithm is passed. +func certToPrivAlgo(algo string) string { + for privAlgo, pubAlgo := range certAlgoNames { + if pubAlgo == algo { + return privAlgo + } + } + panic("unknown cert algorithm") +} + +func (cert *Certificate) bytesForSigning() []byte { + c2 := *cert + c2.Signature = nil + out := c2.Marshal() + // Drop trailing signature length. + return out[:len(out)-4] +} + +// Marshal serializes c into OpenSSH's wire format. It is part of the +// PublicKey interface. +func (c *Certificate) Marshal() []byte { + generic := genericCertData{ + Serial: c.Serial, + CertType: c.CertType, + KeyId: c.KeyId, + ValidPrincipals: marshalStringList(c.ValidPrincipals), + ValidAfter: uint64(c.ValidAfter), + ValidBefore: uint64(c.ValidBefore), + CriticalOptions: marshalTuples(c.CriticalOptions), + Extensions: marshalTuples(c.Extensions), + Reserved: c.Reserved, + SignatureKey: c.SignatureKey.Marshal(), + } + if c.Signature != nil { + generic.Signature = Marshal(c.Signature) + } + genericBytes := Marshal(&generic) + keyBytes := c.Key.Marshal() + _, keyBytes, _ = parseString(keyBytes) + prefix := Marshal(&struct { + Name string + Nonce []byte + Key []byte `ssh:"rest"` + }{c.Type(), c.Nonce, keyBytes}) + + result := make([]byte, 0, len(prefix)+len(genericBytes)) + result = append(result, prefix...) + result = append(result, genericBytes...) + return result +} + +// Type returns the key name. It is part of the PublicKey interface. +func (c *Certificate) Type() string { + algo, ok := certAlgoNames[c.Key.Type()] + if !ok { + panic("unknown cert key type " + c.Key.Type()) + } + return algo +} + +// Verify verifies a signature against the certificate's public +// key. It is part of the PublicKey interface. +func (c *Certificate) Verify(data []byte, sig *Signature) error { + return c.Key.Verify(data, sig) +} + +func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { + format, in, ok := parseString(in) + if !ok { + return + } + + out = &Signature{ + Format: string(format), + } + + if out.Blob, in, ok = parseString(in); !ok { + return + } + + switch out.Format { + case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01: + out.Rest = in + return out, nil, ok + } + + return out, in, ok +} + +func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { + sigBytes, rest, ok := parseString(in) + if !ok { + return + } + + out, trailing, ok := parseSignatureBody(sigBytes) + if !ok || len(trailing) > 0 { + return nil, nil, false + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go new file mode 100644 index 00000000000..c0834c00dfe --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -0,0 +1,633 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "sync" +) + +const ( + minPacketLength = 9 + // channelMaxPacket contains the maximum number of bytes that will be + // sent in a single packet. As per RFC 4253, section 6.1, 32k is also + // the minimum. + channelMaxPacket = 1 << 15 + // We follow OpenSSH here. + channelWindowSize = 64 * channelMaxPacket +) + +// NewChannel represents an incoming request to a channel. It must either be +// accepted for use by calling Accept, or rejected by calling Reject. +type NewChannel interface { + // Accept accepts the channel creation request. It returns the Channel + // and a Go channel containing SSH requests. The Go channel must be + // serviced otherwise the Channel will hang. + Accept() (Channel, <-chan *Request, error) + + // Reject rejects the channel creation request. After calling + // this, no other methods on the Channel may be called. + Reject(reason RejectionReason, message string) error + + // ChannelType returns the type of the channel, as supplied by the + // client. + ChannelType() string + + // ExtraData returns the arbitrary payload for this channel, as supplied + // by the client. This data is specific to the channel type. + ExtraData() []byte +} + +// A Channel is an ordered, reliable, flow-controlled, duplex stream +// that is multiplexed over an SSH connection. +type Channel interface { + // Read reads up to len(data) bytes from the channel. + Read(data []byte) (int, error) + + // Write writes len(data) bytes to the channel. + Write(data []byte) (int, error) + + // Close signals end of channel use. No data may be sent after this + // call. + Close() error + + // CloseWrite signals the end of sending in-band + // data. Requests may still be sent, and the other side may + // still send data + CloseWrite() error + + // SendRequest sends a channel request. If wantReply is true, + // it will wait for a reply and return the result as a + // boolean, otherwise the return value will be false. Channel + // requests are out-of-band messages so they may be sent even + // if the data stream is closed or blocked by flow control. + // If the channel is closed before a reply is returned, io.EOF + // is returned. + SendRequest(name string, wantReply bool, payload []byte) (bool, error) + + // Stderr returns an io.ReadWriter that writes to this channel + // with the extended data type set to stderr. Stderr may + // safely be read and written from a different goroutine than + // Read and Write respectively. + Stderr() io.ReadWriter +} + +// Request is a request sent outside of the normal stream of +// data. Requests can either be specific to an SSH channel, or they +// can be global. +type Request struct { + Type string + WantReply bool + Payload []byte + + ch *channel + mux *mux +} + +// Reply sends a response to a request. It must be called for all requests +// where WantReply is true and is a no-op otherwise. The payload argument is +// ignored for replies to channel-specific requests. +func (r *Request) Reply(ok bool, payload []byte) error { + if !r.WantReply { + return nil + } + + if r.ch == nil { + return r.mux.ackRequest(ok, payload) + } + + return r.ch.ackRequest(ok) +} + +// RejectionReason is an enumeration used when rejecting channel creation +// requests. See RFC 4254, section 5.1. +type RejectionReason uint32 + +const ( + Prohibited RejectionReason = iota + 1 + ConnectionFailed + UnknownChannelType + ResourceShortage +) + +// String converts the rejection reason to human readable form. +func (r RejectionReason) String() string { + switch r { + case Prohibited: + return "administratively prohibited" + case ConnectionFailed: + return "connect failed" + case UnknownChannelType: + return "unknown channel type" + case ResourceShortage: + return "resource shortage" + } + return fmt.Sprintf("unknown reason %d", int(r)) +} + +func min(a uint32, b int) uint32 { + if a < uint32(b) { + return a + } + return uint32(b) +} + +type channelDirection uint8 + +const ( + channelInbound channelDirection = iota + channelOutbound +) + +// channel is an implementation of the Channel interface that works +// with the mux class. +type channel struct { + // R/O after creation + chanType string + extraData []byte + localId, remoteId uint32 + + // maxIncomingPayload and maxRemotePayload are the maximum + // payload sizes of normal and extended data packets for + // receiving and sending, respectively. The wire packet will + // be 9 or 13 bytes larger (excluding encryption overhead). + maxIncomingPayload uint32 + maxRemotePayload uint32 + + mux *mux + + // decided is set to true if an accept or reject message has been sent + // (for outbound channels) or received (for inbound channels). + decided bool + + // direction contains either channelOutbound, for channels created + // locally, or channelInbound, for channels created by the peer. + direction channelDirection + + // Pending internal channel messages. + msg chan interface{} + + // Since requests have no ID, there can be only one request + // with WantReply=true outstanding. This lock is held by a + // goroutine that has such an outgoing request pending. + sentRequestMu sync.Mutex + + incomingRequests chan *Request + + sentEOF bool + + // thread-safe data + remoteWin window + pending *buffer + extPending *buffer + + // windowMu protects myWindow, the flow-control window. + windowMu sync.Mutex + myWindow uint32 + + // writeMu serializes calls to mux.conn.writePacket() and + // protects sentClose and packetPool. This mutex must be + // different from windowMu, as writePacket can block if there + // is a key exchange pending. + writeMu sync.Mutex + sentClose bool + + // packetPool has a buffer for each extended channel ID to + // save allocations during writes. + packetPool map[uint32][]byte +} + +// writePacket sends a packet. If the packet is a channel close, it updates +// sentClose. This method takes the lock c.writeMu. +func (ch *channel) writePacket(packet []byte) error { + ch.writeMu.Lock() + if ch.sentClose { + ch.writeMu.Unlock() + return io.EOF + } + ch.sentClose = (packet[0] == msgChannelClose) + err := ch.mux.conn.writePacket(packet) + ch.writeMu.Unlock() + return err +} + +func (ch *channel) sendMessage(msg interface{}) error { + if debugMux { + log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) + } + + p := Marshal(msg) + binary.BigEndian.PutUint32(p[1:], ch.remoteId) + return ch.writePacket(p) +} + +// WriteExtended writes data to a specific extended stream. These streams are +// used, for example, for stderr. +func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { + if ch.sentEOF { + return 0, io.EOF + } + // 1 byte message type, 4 bytes remoteId, 4 bytes data length + opCode := byte(msgChannelData) + headerLength := uint32(9) + if extendedCode > 0 { + headerLength += 4 + opCode = msgChannelExtendedData + } + + ch.writeMu.Lock() + packet := ch.packetPool[extendedCode] + // We don't remove the buffer from packetPool, so + // WriteExtended calls from different goroutines will be + // flagged as errors by the race detector. + ch.writeMu.Unlock() + + for len(data) > 0 { + space := min(ch.maxRemotePayload, len(data)) + if space, err = ch.remoteWin.reserve(space); err != nil { + return n, err + } + if want := headerLength + space; uint32(cap(packet)) < want { + packet = make([]byte, want) + } else { + packet = packet[:want] + } + + todo := data[:space] + + packet[0] = opCode + binary.BigEndian.PutUint32(packet[1:], ch.remoteId) + if extendedCode > 0 { + binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) + } + binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) + copy(packet[headerLength:], todo) + if err = ch.writePacket(packet); err != nil { + return n, err + } + + n += len(todo) + data = data[len(todo):] + } + + ch.writeMu.Lock() + ch.packetPool[extendedCode] = packet + ch.writeMu.Unlock() + + return n, err +} + +func (ch *channel) handleData(packet []byte) error { + headerLen := 9 + isExtendedData := packet[0] == msgChannelExtendedData + if isExtendedData { + headerLen = 13 + } + if len(packet) < headerLen { + // malformed data packet + return parseError(packet[0]) + } + + var extended uint32 + if isExtendedData { + extended = binary.BigEndian.Uint32(packet[5:]) + } + + length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) + if length == 0 { + return nil + } + if length > ch.maxIncomingPayload { + // TODO(hanwen): should send Disconnect? + return errors.New("ssh: incoming packet exceeds maximum payload size") + } + + data := packet[headerLen:] + if length != uint32(len(data)) { + return errors.New("ssh: wrong packet length") + } + + ch.windowMu.Lock() + if ch.myWindow < length { + ch.windowMu.Unlock() + // TODO(hanwen): should send Disconnect with reason? + return errors.New("ssh: remote side wrote too much") + } + ch.myWindow -= length + ch.windowMu.Unlock() + + if extended == 1 { + ch.extPending.write(data) + } else if extended > 0 { + // discard other extended data. + } else { + ch.pending.write(data) + } + return nil +} + +func (c *channel) adjustWindow(n uint32) error { + c.windowMu.Lock() + // Since myWindow is managed on our side, and can never exceed + // the initial window setting, we don't worry about overflow. + c.myWindow += uint32(n) + c.windowMu.Unlock() + return c.sendMessage(windowAdjustMsg{ + AdditionalBytes: uint32(n), + }) +} + +func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { + switch extended { + case 1: + n, err = c.extPending.Read(data) + case 0: + n, err = c.pending.Read(data) + default: + return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) + } + + if n > 0 { + err = c.adjustWindow(uint32(n)) + // sendWindowAdjust can return io.EOF if the remote + // peer has closed the connection, however we want to + // defer forwarding io.EOF to the caller of Read until + // the buffer has been drained. + if n > 0 && err == io.EOF { + err = nil + } + } + + return n, err +} + +func (c *channel) close() { + c.pending.eof() + c.extPending.eof() + close(c.msg) + close(c.incomingRequests) + c.writeMu.Lock() + // This is not necessary for a normal channel teardown, but if + // there was another error, it is. + c.sentClose = true + c.writeMu.Unlock() + // Unblock writers. + c.remoteWin.close() +} + +// responseMessageReceived is called when a success or failure message is +// received on a channel to check that such a message is reasonable for the +// given channel. +func (ch *channel) responseMessageReceived() error { + if ch.direction == channelInbound { + return errors.New("ssh: channel response message received on inbound channel") + } + if ch.decided { + return errors.New("ssh: duplicate response received for channel") + } + ch.decided = true + return nil +} + +func (ch *channel) handlePacket(packet []byte) error { + switch packet[0] { + case msgChannelData, msgChannelExtendedData: + return ch.handleData(packet) + case msgChannelClose: + ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) + ch.mux.chanList.remove(ch.localId) + ch.close() + return nil + case msgChannelEOF: + // RFC 4254 is mute on how EOF affects dataExt messages but + // it is logical to signal EOF at the same time. + ch.extPending.eof() + ch.pending.eof() + return nil + } + + decoded, err := decode(packet) + if err != nil { + return err + } + + switch msg := decoded.(type) { + case *channelOpenFailureMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + ch.mux.chanList.remove(msg.PeersID) + ch.msg <- msg + case *channelOpenConfirmMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) + } + ch.remoteId = msg.MyID + ch.maxRemotePayload = msg.MaxPacketSize + ch.remoteWin.add(msg.MyWindow) + ch.msg <- msg + case *windowAdjustMsg: + if !ch.remoteWin.add(msg.AdditionalBytes) { + return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) + } + case *channelRequestMsg: + req := Request{ + Type: msg.Request, + WantReply: msg.WantReply, + Payload: msg.RequestSpecificData, + ch: ch, + } + + ch.incomingRequests <- &req + default: + ch.msg <- msg + } + return nil +} + +func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { + ch := &channel{ + remoteWin: window{Cond: newCond()}, + myWindow: channelWindowSize, + pending: newBuffer(), + extPending: newBuffer(), + direction: direction, + incomingRequests: make(chan *Request, chanSize), + msg: make(chan interface{}, chanSize), + chanType: chanType, + extraData: extraData, + mux: m, + packetPool: make(map[uint32][]byte), + } + ch.localId = m.chanList.add(ch) + return ch +} + +var errUndecided = errors.New("ssh: must Accept or Reject channel") +var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") + +type extChannel struct { + code uint32 + ch *channel +} + +func (e *extChannel) Write(data []byte) (n int, err error) { + return e.ch.WriteExtended(data, e.code) +} + +func (e *extChannel) Read(data []byte) (n int, err error) { + return e.ch.ReadExtended(data, e.code) +} + +func (ch *channel) Accept() (Channel, <-chan *Request, error) { + if ch.decided { + return nil, nil, errDecidedAlready + } + ch.maxIncomingPayload = channelMaxPacket + confirm := channelOpenConfirmMsg{ + PeersID: ch.remoteId, + MyID: ch.localId, + MyWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + } + ch.decided = true + if err := ch.sendMessage(confirm); err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (ch *channel) Reject(reason RejectionReason, message string) error { + if ch.decided { + return errDecidedAlready + } + reject := channelOpenFailureMsg{ + PeersID: ch.remoteId, + Reason: reason, + Message: message, + Language: "en", + } + ch.decided = true + return ch.sendMessage(reject) +} + +func (ch *channel) Read(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.ReadExtended(data, 0) +} + +func (ch *channel) Write(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.WriteExtended(data, 0) +} + +func (ch *channel) CloseWrite() error { + if !ch.decided { + return errUndecided + } + ch.sentEOF = true + return ch.sendMessage(channelEOFMsg{ + PeersID: ch.remoteId}) +} + +func (ch *channel) Close() error { + if !ch.decided { + return errUndecided + } + + return ch.sendMessage(channelCloseMsg{ + PeersID: ch.remoteId}) +} + +// Extended returns an io.ReadWriter that sends and receives data on the given, +// SSH extended stream. Such streams are used, for example, for stderr. +func (ch *channel) Extended(code uint32) io.ReadWriter { + if !ch.decided { + return nil + } + return &extChannel{code, ch} +} + +func (ch *channel) Stderr() io.ReadWriter { + return ch.Extended(1) +} + +func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + if !ch.decided { + return false, errUndecided + } + + if wantReply { + ch.sentRequestMu.Lock() + defer ch.sentRequestMu.Unlock() + } + + msg := channelRequestMsg{ + PeersID: ch.remoteId, + Request: name, + WantReply: wantReply, + RequestSpecificData: payload, + } + + if err := ch.sendMessage(msg); err != nil { + return false, err + } + + if wantReply { + m, ok := (<-ch.msg) + if !ok { + return false, io.EOF + } + switch m.(type) { + case *channelRequestFailureMsg: + return false, nil + case *channelRequestSuccessMsg: + return true, nil + default: + return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) + } + } + + return false, nil +} + +// ackRequest either sends an ack or nack to the channel request. +func (ch *channel) ackRequest(ok bool) error { + if !ch.decided { + return errUndecided + } + + var msg interface{} + if !ok { + msg = channelRequestFailureMsg{ + PeersID: ch.remoteId, + } + } else { + msg = channelRequestSuccessMsg{ + PeersID: ch.remoteId, + } + } + return ch.sendMessage(msg) +} + +func (ch *channel) ChannelType() string { + return ch.chanType +} + +func (ch *channel) ExtraData() []byte { + return ch.extraData +} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go new file mode 100644 index 00000000000..b0204ee59f2 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -0,0 +1,781 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rc4" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/poly1305" +) + +const ( + packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. + + // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations + // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC + // indicates implementations SHOULD be able to handle larger packet sizes, but then + // waffles on about reasonable limits. + // + // OpenSSH caps their maxPacket at 256kB so we choose to do + // the same. maxPacket is also used to ensure that uint32 + // length fields do not overflow, so it should remain well + // below 4G. + maxPacket = 256 * 1024 +) + +// noneCipher implements cipher.Stream and provides no encryption. It is used +// by the transport before the first key-exchange. +type noneCipher struct{} + +func (c noneCipher) XORKeyStream(dst, src []byte) { + copy(dst, src) +} + +func newAESCTR(key, iv []byte) (cipher.Stream, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + return cipher.NewCTR(c, iv), nil +} + +func newRC4(key, iv []byte) (cipher.Stream, error) { + return rc4.NewCipher(key) +} + +type cipherMode struct { + keySize int + ivSize int + create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) +} + +func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + stream, err := createFunc(key, iv) + if err != nil { + return nil, err + } + + var streamDump []byte + if skip > 0 { + streamDump = make([]byte, 512) + } + + for remainingToDump := skip; remainingToDump > 0; { + dumpThisTime := remainingToDump + if dumpThisTime > len(streamDump) { + dumpThisTime = len(streamDump) + } + stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) + remainingToDump -= dumpThisTime + } + + mac := macModes[algs.MAC].new(macKey) + return &streamPacketCipher{ + mac: mac, + etm: macModes[algs.MAC].etm, + macResult: make([]byte, mac.Size()), + cipher: stream, + }, nil + } +} + +// cipherModes documents properties of supported ciphers. Ciphers not included +// are not supported and will not be negotiated, even if explicitly requested in +// ClientConfig.Crypto.Ciphers. +var cipherModes = map[string]*cipherMode{ + // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms + // are defined in the order specified in the RFC. + "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + + // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. + // They are defined in the order specified in the RFC. + "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, + "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, + + // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. + // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and + // RC4) has problems with weak keys, and should be used with caution." + // RFC4345 introduces improved versions of Arcfour. + "arcfour": {16, 0, streamCipherMode(0, newRC4)}, + + // AEAD ciphers + gcmCipherID: {16, 12, newGCMCipher}, + chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, + + // CBC mode is insecure and so is not included in the default config. + // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely + // needed, it's possible to specify a custom Config to enable it. + // You should expect that an active attacker can recover plaintext if + // you do. + aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, + + // 3des-cbc is insecure and is not included in the default + // config. + tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, +} + +// prefixLen is the length of the packet prefix that contains the packet length +// and number of padding bytes. +const prefixLen = 5 + +// streamPacketCipher is a packetCipher using a stream cipher. +type streamPacketCipher struct { + mac hash.Hash + cipher cipher.Stream + etm bool + + // The following members are to avoid per-packet allocations. + prefix [prefixLen]byte + seqNumBytes [4]byte + padding [2 * packetSizeMultiple]byte + packetData []byte + macResult []byte +} + +// readCipherPacket reads and decrypt a single packet from the reader argument. +func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, s.prefix[:]); err != nil { + return nil, err + } + + var encryptedPaddingLength [1]byte + if s.mac != nil && s.etm { + copy(encryptedPaddingLength[:], s.prefix[4:5]) + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } else { + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + length := binary.BigEndian.Uint32(s.prefix[0:4]) + paddingLength := uint32(s.prefix[4]) + + var macSize uint32 + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + if s.etm { + s.mac.Write(s.prefix[:4]) + s.mac.Write(encryptedPaddingLength[:]) + } else { + s.mac.Write(s.prefix[:]) + } + macSize = uint32(s.mac.Size()) + } + + if length <= paddingLength+1 { + return nil, errors.New("ssh: invalid packet length, packet too small") + } + + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + // the maxPacket check above ensures that length-1+macSize + // does not overflow. + if uint32(cap(s.packetData)) < length-1+macSize { + s.packetData = make([]byte, length-1+macSize) + } else { + s.packetData = s.packetData[:length-1+macSize] + } + + if _, err := io.ReadFull(r, s.packetData); err != nil { + return nil, err + } + mac := s.packetData[length-1:] + data := s.packetData[:length-1] + + if s.mac != nil && s.etm { + s.mac.Write(data) + } + + s.cipher.XORKeyStream(data, data) + + if s.mac != nil { + if !s.etm { + s.mac.Write(data) + } + s.macResult = s.mac.Sum(s.macResult[:0]) + if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { + return nil, errors.New("ssh: MAC failure") + } + } + + return s.packetData[:length-paddingLength-1], nil +} + +// writeCipherPacket encrypts and sends a packet of data to the writer argument +func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + if len(packet) > maxPacket { + return errors.New("ssh: packet too large") + } + + aadlen := 0 + if s.mac != nil && s.etm { + // packet length is not encrypted for EtM modes + aadlen = 4 + } + + paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple + if paddingLength < 4 { + paddingLength += packetSizeMultiple + } + + length := len(packet) + 1 + paddingLength + binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) + s.prefix[4] = byte(paddingLength) + padding := s.padding[:paddingLength] + if _, err := io.ReadFull(rand, padding); err != nil { + return err + } + + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + + if s.etm { + // For EtM algorithms, the packet length must stay unencrypted, + // but the following data (padding length) must be encrypted + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } + + s.mac.Write(s.prefix[:]) + + if !s.etm { + // For non-EtM algorithms, the algorithm is applied on unencrypted data + s.mac.Write(packet) + s.mac.Write(padding) + } + } + + if !(s.mac != nil && s.etm) { + // For EtM algorithms, the padding length has already been encrypted + // and the packet length must remain unencrypted + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + s.cipher.XORKeyStream(packet, packet) + s.cipher.XORKeyStream(padding, padding) + + if s.mac != nil && s.etm { + // For EtM algorithms, packet and padding must be encrypted + s.mac.Write(packet) + s.mac.Write(padding) + } + + if _, err := w.Write(s.prefix[:]); err != nil { + return err + } + if _, err := w.Write(packet); err != nil { + return err + } + if _, err := w.Write(padding); err != nil { + return err + } + + if s.mac != nil { + s.macResult = s.mac.Sum(s.macResult[:0]) + if _, err := w.Write(s.macResult); err != nil { + return err + } + } + + return nil +} + +type gcmCipher struct { + aead cipher.AEAD + prefix [4]byte + iv []byte + buf []byte +} + +func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + aead, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + return &gcmCipher{ + aead: aead, + iv: iv, + }, nil +} + +const gcmTagSize = 16 + +func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + // Pad out to multiple of 16 bytes. This is different from the + // stream cipher because that encrypts the length too. + padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) + if padding < 4 { + padding += packetSizeMultiple + } + + length := uint32(len(packet) + int(padding) + 1) + binary.BigEndian.PutUint32(c.prefix[:], length) + if _, err := w.Write(c.prefix[:]); err != nil { + return err + } + + if cap(c.buf) < int(length) { + c.buf = make([]byte, length) + } else { + c.buf = c.buf[:length] + } + + c.buf[0] = padding + copy(c.buf[1:], packet) + if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { + return err + } + c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if _, err := w.Write(c.buf); err != nil { + return err + } + c.incIV() + + return nil +} + +func (c *gcmCipher) incIV() { + for i := 4 + 7; i >= 4; i-- { + c.iv[i]++ + if c.iv[i] != 0 { + break + } + } +} + +func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, c.prefix[:]); err != nil { + return nil, err + } + length := binary.BigEndian.Uint32(c.prefix[:]) + if length > maxPacket { + return nil, errors.New("ssh: max packet length exceeded") + } + + if cap(c.buf) < int(length+gcmTagSize) { + c.buf = make([]byte, length+gcmTagSize) + } else { + c.buf = c.buf[:length+gcmTagSize] + } + + if _, err := io.ReadFull(r, c.buf); err != nil { + return nil, err + } + + plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if err != nil { + return nil, err + } + c.incIV() + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding+1) >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + plain = plain[1 : length-uint32(padding)] + return plain, nil +} + +// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 +type cbcCipher struct { + mac hash.Hash + macSize uint32 + decrypter cipher.BlockMode + encrypter cipher.BlockMode + + // The following members are to avoid per-packet allocations. + seqNumBytes [4]byte + packetData []byte + macResult []byte + + // Amount of data we should still read to hide which + // verification error triggered. + oracleCamouflage uint32 +} + +func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + cbc := &cbcCipher{ + mac: macModes[algs.MAC].new(macKey), + decrypter: cipher.NewCBCDecrypter(c, iv), + encrypter: cipher.NewCBCEncrypter(c, iv), + packetData: make([]byte, 1024), + } + if cbc.mac != nil { + cbc.macSize = uint32(cbc.mac.Size()) + } + + return cbc, nil +} + +func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func maxUInt32(a, b int) uint32 { + if a > b { + return uint32(a) + } + return uint32(b) +} + +const ( + cbcMinPacketSizeMultiple = 8 + cbcMinPacketSize = 16 + cbcMinPaddingSize = 4 +) + +// cbcError represents a verification error that may leak information. +type cbcError string + +func (e cbcError) Error() string { return string(e) } + +func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + p, err := c.readCipherPacketLeaky(seqNum, r) + if err != nil { + if _, ok := err.(cbcError); ok { + // Verification error: read a fixed amount of + // data, to make distinguishing between + // failing MAC and failing length check more + // difficult. + io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) + } + } + return p, err +} + +func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { + blockSize := c.decrypter.BlockSize() + + // Read the header, which will include some of the subsequent data in the + // case of block ciphers - this is copied back to the payload later. + // How many bytes of payload/padding will be read with this first read. + firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) + firstBlock := c.packetData[:firstBlockLength] + if _, err := io.ReadFull(r, firstBlock); err != nil { + return nil, err + } + + c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength + + c.decrypter.CryptBlocks(firstBlock, firstBlock) + length := binary.BigEndian.Uint32(firstBlock[:4]) + if length > maxPacket { + return nil, cbcError("ssh: packet too large") + } + if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { + // The minimum size of a packet is 16 (or the cipher block size, whichever + // is larger) bytes. + return nil, cbcError("ssh: packet too small") + } + // The length of the packet (including the length field but not the MAC) must + // be a multiple of the block size or 8, whichever is larger. + if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { + return nil, cbcError("ssh: invalid packet length multiple") + } + + paddingLength := uint32(firstBlock[4]) + if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { + return nil, cbcError("ssh: invalid packet length") + } + + // Positions within the c.packetData buffer: + macStart := 4 + length + paddingStart := macStart - paddingLength + + // Entire packet size, starting before length, ending at end of mac. + entirePacketSize := macStart + c.macSize + + // Ensure c.packetData is large enough for the entire packet data. + if uint32(cap(c.packetData)) < entirePacketSize { + // Still need to upsize and copy, but this should be rare at runtime, only + // on upsizing the packetData buffer. + c.packetData = make([]byte, entirePacketSize) + copy(c.packetData, firstBlock) + } else { + c.packetData = c.packetData[:entirePacketSize] + } + + n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) + if err != nil { + return nil, err + } + c.oracleCamouflage -= uint32(n) + + remainingCrypted := c.packetData[firstBlockLength:macStart] + c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) + + mac := c.packetData[macStart:] + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData[:macStart]) + c.macResult = c.mac.Sum(c.macResult[:0]) + if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { + return nil, cbcError("ssh: MAC failure") + } + } + + return c.packetData[prefixLen:paddingStart], nil +} + +func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) + + // Length of encrypted portion of the packet (header, payload, padding). + // Enforce minimum padding and packet size. + encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) + // Enforce block size. + encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize + + length := encLength - 4 + paddingLength := int(length) - (1 + len(packet)) + + // Overall buffer contains: header, payload, padding, mac. + // Space for the MAC is reserved in the capacity but not the slice length. + bufferSize := encLength + c.macSize + if uint32(cap(c.packetData)) < bufferSize { + c.packetData = make([]byte, encLength, bufferSize) + } else { + c.packetData = c.packetData[:encLength] + } + + p := c.packetData + + // Packet header. + binary.BigEndian.PutUint32(p, length) + p = p[4:] + p[0] = byte(paddingLength) + + // Payload. + p = p[1:] + copy(p, packet) + + // Padding. + p = p[len(packet):] + if _, err := io.ReadFull(rand, p); err != nil { + return err + } + + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData) + // The MAC is now appended into the capacity reserved for it earlier. + c.packetData = c.mac.Sum(c.packetData) + } + + c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) + + if _, err := w.Write(c.packetData); err != nil { + return err + } + + return nil +} + +const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" + +// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com +// AEAD, which is described here: +// +// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 +// +// the methods here also implement padding, which RFC4253 Section 6 +// also requires of stream ciphers. +type chacha20Poly1305Cipher struct { + lengthKey [32]byte + contentKey [32]byte + buf []byte +} + +func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + if len(key) != 64 { + panic(len(key)) + } + + c := &chacha20Poly1305Cipher{ + buf: make([]byte, 256), + } + + copy(c.contentKey[:], key[:32]) + copy(c.lengthKey[:], key[32:]) + return c, nil +} + +func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + nonce := make([]byte, 12) + binary.BigEndian.PutUint32(nonce[8:], seqNum) + s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) + if err != nil { + return nil, err + } + var polyKey, discardBuf [32]byte + s.XORKeyStream(polyKey[:], polyKey[:]) + s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes + + encryptedLength := c.buf[:4] + if _, err := io.ReadFull(r, encryptedLength); err != nil { + return nil, err + } + + var lenBytes [4]byte + ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) + if err != nil { + return nil, err + } + ls.XORKeyStream(lenBytes[:], encryptedLength) + + length := binary.BigEndian.Uint32(lenBytes[:]) + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + contentEnd := 4 + length + packetEnd := contentEnd + poly1305.TagSize + if uint32(cap(c.buf)) < packetEnd { + c.buf = make([]byte, packetEnd) + copy(c.buf[:], encryptedLength) + } else { + c.buf = c.buf[:packetEnd] + } + + if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { + return nil, err + } + + var mac [poly1305.TagSize]byte + copy(mac[:], c.buf[contentEnd:packetEnd]) + if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { + return nil, errors.New("ssh: MAC failure") + } + + plain := c.buf[4:contentEnd] + s.XORKeyStream(plain, plain) + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding)+1 >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + + plain = plain[1 : len(plain)-int(padding)] + + return plain, nil +} + +func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { + nonce := make([]byte, 12) + binary.BigEndian.PutUint32(nonce[8:], seqNum) + s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) + if err != nil { + return err + } + var polyKey, discardBuf [32]byte + s.XORKeyStream(polyKey[:], polyKey[:]) + s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes + + // There is no blocksize, so fall back to multiple of 8 byte + // padding, as described in RFC 4253, Sec 6. + const packetSizeMultiple = 8 + + padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple + if padding < 4 { + padding += packetSizeMultiple + } + + // size (4 bytes), padding (1), payload, padding, tag. + totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize + if cap(c.buf) < totalLength { + c.buf = make([]byte, totalLength) + } else { + c.buf = c.buf[:totalLength] + } + + binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) + ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) + if err != nil { + return err + } + ls.XORKeyStream(c.buf, c.buf[:4]) + c.buf[4] = byte(padding) + copy(c.buf[5:], payload) + packetEnd := 5 + len(payload) + padding + if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { + return err + } + + s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) + + var mac [poly1305.TagSize]byte + poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) + + copy(c.buf[packetEnd:], mac[:]) + + if _, err := w.Write(c.buf); err != nil { + return err + } + return nil +} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go new file mode 100644 index 00000000000..7b00bff1caa --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -0,0 +1,278 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "net" + "os" + "sync" + "time" +) + +// Client implements a traditional SSH client that supports shells, +// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. +type Client struct { + Conn + + handleForwardsOnce sync.Once // guards calling (*Client).handleForwards + + forwards forwardList // forwarded tcpip connections from the remote side + mu sync.Mutex + channelHandlers map[string]chan NewChannel +} + +// HandleChannelOpen returns a channel on which NewChannel requests +// for the given type are sent. If the type already is being handled, +// nil is returned. The channel is closed when the connection is closed. +func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { + c.mu.Lock() + defer c.mu.Unlock() + if c.channelHandlers == nil { + // The SSH channel has been closed. + c := make(chan NewChannel) + close(c) + return c + } + + ch := c.channelHandlers[channelType] + if ch != nil { + return nil + } + + ch = make(chan NewChannel, chanSize) + c.channelHandlers[channelType] = ch + return ch +} + +// NewClient creates a Client on top of the given connection. +func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { + conn := &Client{ + Conn: c, + channelHandlers: make(map[string]chan NewChannel, 1), + } + + go conn.handleGlobalRequests(reqs) + go conn.handleChannelOpens(chans) + go func() { + conn.Wait() + conn.forwards.closeAll() + }() + return conn +} + +// NewClientConn establishes an authenticated SSH connection using c +// as the underlying transport. The Request and NewChannel channels +// must be serviced or the connection will hang. +func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.HostKeyCallback == nil { + c.Close() + return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") + } + + conn := &connection{ + sshConn: sshConn{conn: c}, + } + + if err := conn.clientHandshake(addr, &fullConf); err != nil { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) + } + conn.mux = newMux(conn.transport) + return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil +} + +// clientHandshake performs the client side key exchange. See RFC 4253 Section +// 7. +func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { + if config.ClientVersion != "" { + c.clientVersion = []byte(config.ClientVersion) + } else { + c.clientVersion = []byte(packageVersion) + } + var err error + c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) + if err != nil { + return err + } + + c.transport = newClientTransport( + newTransport(c.sshConn.conn, config.Rand, true /* is client */), + c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) + if err := c.transport.waitSession(); err != nil { + return err + } + + c.sessionID = c.transport.getSessionID() + return c.clientAuthenticate(config) +} + +// verifyHostKeySignature verifies the host key obtained in the key +// exchange. +func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { + sig, rest, ok := parseSignatureBody(result.Signature) + if len(rest) > 0 || !ok { + return errors.New("ssh: signature parse error") + } + + return hostKey.Verify(result.H, sig) +} + +// NewSession opens a new Session for this client. (A session is a remote +// execution of a program.) +func (c *Client) NewSession() (*Session, error) { + ch, in, err := c.OpenChannel("session", nil) + if err != nil { + return nil, err + } + return newSession(ch, in) +} + +func (c *Client) handleGlobalRequests(incoming <-chan *Request) { + for r := range incoming { + // This handles keepalive messages and matches + // the behaviour of OpenSSH. + r.Reply(false, nil) + } +} + +// handleChannelOpens channel open messages from the remote side. +func (c *Client) handleChannelOpens(in <-chan NewChannel) { + for ch := range in { + c.mu.Lock() + handler := c.channelHandlers[ch.ChannelType()] + c.mu.Unlock() + + if handler != nil { + handler <- ch + } else { + ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) + } + } + + c.mu.Lock() + for _, ch := range c.channelHandlers { + close(ch) + } + c.channelHandlers = nil + c.mu.Unlock() +} + +// Dial starts a client connection to the given SSH server. It is a +// convenience function that connects to the given network address, +// initiates the SSH handshake, and then sets up a Client. For access +// to incoming channels and requests, use net.Dial with NewClientConn +// instead. +func Dial(network, addr string, config *ClientConfig) (*Client, error) { + conn, err := net.DialTimeout(network, addr, config.Timeout) + if err != nil { + return nil, err + } + c, chans, reqs, err := NewClientConn(conn, addr, config) + if err != nil { + return nil, err + } + return NewClient(c, chans, reqs), nil +} + +// HostKeyCallback is the function type used for verifying server +// keys. A HostKeyCallback must return nil if the host key is OK, or +// an error to reject it. It receives the hostname as passed to Dial +// or NewClientConn. The remote address is the RemoteAddr of the +// net.Conn underlying the SSH connection. +type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + +// BannerCallback is the function type used for treat the banner sent by +// the server. A BannerCallback receives the message sent by the remote server. +type BannerCallback func(message string) error + +// A ClientConfig structure is used to configure a Client. It must not be +// modified after having been passed to an SSH function. +type ClientConfig struct { + // Config contains configuration that is shared between clients and + // servers. + Config + + // User contains the username to authenticate as. + User string + + // Auth contains possible authentication methods to use with the + // server. Only the first instance of a particular RFC 4252 method will + // be used during authentication. + Auth []AuthMethod + + // HostKeyCallback is called during the cryptographic + // handshake to validate the server's host key. The client + // configuration must supply this callback for the connection + // to succeed. The functions InsecureIgnoreHostKey or + // FixedHostKey can be used for simplistic host key checks. + HostKeyCallback HostKeyCallback + + // BannerCallback is called during the SSH dance to display a custom + // server's message. The client configuration can supply this callback to + // handle it as wished. The function BannerDisplayStderr can be used for + // simplistic display on Stderr. + BannerCallback BannerCallback + + // ClientVersion contains the version identification string that will + // be used for the connection. If empty, a reasonable default is used. + ClientVersion string + + // HostKeyAlgorithms lists the key types that the client will + // accept from the server as host key, in order of + // preference. If empty, a reasonable default is used. Any + // string returned from PublicKey.Type method may be used, or + // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + HostKeyAlgorithms []string + + // Timeout is the maximum amount of time for the TCP connection to establish. + // + // A Timeout of zero means no timeout. + Timeout time.Duration +} + +// InsecureIgnoreHostKey returns a function that can be used for +// ClientConfig.HostKeyCallback to accept any host key. It should +// not be used for production code. +func InsecureIgnoreHostKey() HostKeyCallback { + return func(hostname string, remote net.Addr, key PublicKey) error { + return nil + } +} + +type fixedHostKey struct { + key PublicKey +} + +func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { + if f.key == nil { + return fmt.Errorf("ssh: required host key was nil") + } + if !bytes.Equal(key.Marshal(), f.key.Marshal()) { + return fmt.Errorf("ssh: host key mismatch") + } + return nil +} + +// FixedHostKey returns a function for use in +// ClientConfig.HostKeyCallback to accept only a specific host key. +func FixedHostKey(key PublicKey) HostKeyCallback { + hk := &fixedHostKey{key} + return hk.check +} + +// BannerDisplayStderr returns a function that can be used for +// ClientConfig.BannerCallback to display banners on os.Stderr. +func BannerDisplayStderr() BannerCallback { + return func(banner string) error { + _, err := os.Stderr.WriteString(banner) + + return err + } +} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go new file mode 100644 index 00000000000..0590070e220 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -0,0 +1,639 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +type authResult int + +const ( + authFailure authResult = iota + authPartialSuccess + authSuccess +) + +// clientAuthenticate authenticates with the remote server. See RFC 4252. +func (c *connection) clientAuthenticate(config *ClientConfig) error { + // initiate user auth session + if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { + return err + } + packet, err := c.transport.readPacket() + if err != nil { + return err + } + var serviceAccept serviceAcceptMsg + if err := Unmarshal(packet, &serviceAccept); err != nil { + return err + } + + // during the authentication phase the client first attempts the "none" method + // then any untried methods suggested by the server. + tried := make(map[string]bool) + var lastMethods []string + + sessionID := c.transport.getSessionID() + for auth := AuthMethod(new(noneAuth)); auth != nil; { + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) + if err != nil { + return err + } + if ok == authSuccess { + // success + return nil + } else if ok == authFailure { + tried[auth.method()] = true + } + if methods == nil { + methods = lastMethods + } + lastMethods = methods + + auth = nil + + findNext: + for _, a := range config.Auth { + candidateMethod := a.method() + if tried[candidateMethod] { + continue + } + for _, meth := range methods { + if meth == candidateMethod { + auth = a + break findNext + } + } + } + } + return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) +} + +func keys(m map[string]bool) []string { + s := make([]string, 0, len(m)) + + for key := range m { + s = append(s, key) + } + return s +} + +// An AuthMethod represents an instance of an RFC 4252 authentication method. +type AuthMethod interface { + // auth authenticates user over transport t. + // Returns true if authentication is successful. + // If authentication is not successful, a []string of alternative + // method names is returned. If the slice is nil, it will be ignored + // and the previous set of possible methods will be reused. + auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) + + // method returns the RFC 4252 method name. + method() string +} + +// "none" authentication, RFC 4252 section 5.2. +type noneAuth int + +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + if err := c.writePacket(Marshal(&userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: "none", + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (n *noneAuth) method() string { + return "none" +} + +// passwordCallback is an AuthMethod that fetches the password through +// a function call, e.g. by prompting the user. +type passwordCallback func() (password string, err error) + +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + type passwordAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + Reply bool + Password string + } + + pw, err := cb() + // REVIEW NOTE: is there a need to support skipping a password attempt? + // The program may only find out that the user doesn't have a password + // when prompting. + if err != nil { + return authFailure, nil, err + } + + if err := c.writePacket(Marshal(&passwordAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + Reply: false, + Password: pw, + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (cb passwordCallback) method() string { + return "password" +} + +// Password returns an AuthMethod using the given password. +func Password(secret string) AuthMethod { + return passwordCallback(func() (string, error) { return secret, nil }) +} + +// PasswordCallback returns an AuthMethod that uses a callback for +// fetching a password. +func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { + return passwordCallback(prompt) +} + +type publickeyAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + // HasSig indicates to the receiver packet that the auth request is signed and + // should be used for authentication of the request. + HasSig bool + Algoname string + PubKey []byte + // Sig is tagged with "rest" so Marshal will exclude it during + // validateKey + Sig []byte `ssh:"rest"` +} + +// publicKeyCallback is an AuthMethod that uses a set of key +// pairs for authentication. +type publicKeyCallback func() ([]Signer, error) + +func (cb publicKeyCallback) method() string { + return "publickey" +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + // Authentication is performed by sending an enquiry to test if a key is + // acceptable to the remote. If the key is acceptable, the client will + // attempt to authenticate with the valid key. If not the client will repeat + // the process with the remaining keys. + + signers, err := cb() + if err != nil { + return authFailure, nil, err + } + var methods []string + for _, signer := range signers { + ok, err := validateKey(signer.PublicKey(), user, c) + if err != nil { + return authFailure, nil, err + } + if !ok { + continue + } + + pub := signer.PublicKey() + pubKey := pub.Marshal() + sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + }, []byte(pub.Type()), pubKey)) + if err != nil { + return authFailure, nil, err + } + + // manually wrap the serialized signature in a string + s := Marshal(sign) + sig := make([]byte, stringLength(len(s))) + marshalString(sig, s) + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + HasSig: true, + Algoname: pub.Type(), + PubKey: pubKey, + Sig: sig, + } + p := Marshal(&msg) + if err := c.writePacket(p); err != nil { + return authFailure, nil, err + } + var success authResult + success, methods, err = handleAuthResponse(c) + if err != nil { + return authFailure, nil, err + } + + // If authentication succeeds or the list of available methods does not + // contain the "publickey" method, do not attempt to authenticate with any + // other keys. According to RFC 4252 Section 7, the latter can occur when + // additional authentication methods are required. + if success == authSuccess || !containsMethod(methods, cb.method()) { + return success, methods, err + } + } + + return authFailure, methods, nil +} + +func containsMethod(methods []string, method string) bool { + for _, m := range methods { + if m == method { + return true + } + } + + return false +} + +// validateKey validates the key provided is acceptable to the server. +func validateKey(key PublicKey, user string, c packetConn) (bool, error) { + pubKey := key.Marshal() + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: "publickey", + HasSig: false, + Algoname: key.Type(), + PubKey: pubKey, + } + if err := c.writePacket(Marshal(&msg)); err != nil { + return false, err + } + + return confirmKeyAck(key, c) +} + +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { + pubKey := key.Marshal() + algoname := key.Type() + + for { + packet, err := c.readPacket() + if err != nil { + return false, err + } + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return false, err + } + case msgUserAuthPubKeyOk: + var msg userAuthPubKeyOkMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, err + } + if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { + return false, nil + } + return true, nil + case msgUserAuthFailure: + return false, nil + default: + return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +// PublicKeys returns an AuthMethod that uses the given key +// pairs. +func PublicKeys(signers ...Signer) AuthMethod { + return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) +} + +// PublicKeysCallback returns an AuthMethod that runs the given +// function to obtain a list of key pairs. +func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { + return publicKeyCallback(getSigners) +} + +// handleAuthResponse returns whether the preceding authentication request succeeded +// along with a list of remaining authentication methods to try next and +// an error if an unexpected response was received. +func handleAuthResponse(c packetConn) (authResult, []string, error) { + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +func handleBannerResponse(c packetConn, packet []byte) error { + var msg userAuthBannerMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + transport, ok := c.(*handshakeTransport) + if !ok { + return nil + } + + if transport.bannerCallback != nil { + return transport.bannerCallback(msg.Message) + } + + return nil +} + +// KeyboardInteractiveChallenge should print questions, optionally +// disabling echoing (e.g. for passwords), and return all the answers. +// Challenge may be called multiple times in a single session. After +// successful authentication, the server may send a challenge with no +// questions, for which the user and instruction messages should be +// printed. RFC 4256 section 3.3 details how the UI should behave for +// both CLI and GUI environments. +type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) + +// KeyboardInteractive returns an AuthMethod using a prompt/response +// sequence controlled by the server. +func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { + return challenge +} + +func (cb KeyboardInteractiveChallenge) method() string { + return "keyboard-interactive" +} + +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + type initiateMsg struct { + User string `sshtype:"50"` + Service string + Method string + Language string + Submethods string + } + + if err := c.writePacket(Marshal(&initiateMsg{ + User: user, + Service: serviceSSH, + Method: "keyboard-interactive", + })); err != nil { + return authFailure, nil, err + } + + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + // like handleAuthResponse, but with less options. + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + continue + case msgUserAuthInfoRequest: + // OK + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + + var msg userAuthInfoRequestMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + + // Manually unpack the prompt/echo pairs. + rest := msg.Prompts + var prompts []string + var echos []bool + for i := 0; i < int(msg.NumPrompts); i++ { + prompt, r, ok := parseString(rest) + if !ok || len(r) == 0 { + return authFailure, nil, errors.New("ssh: prompt format error") + } + prompts = append(prompts, string(prompt)) + echos = append(echos, r[0] != 0) + rest = r[1:] + } + + if len(rest) != 0 { + return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") + } + + answers, err := cb(msg.User, msg.Instruction, prompts, echos) + if err != nil { + return authFailure, nil, err + } + + if len(answers) != len(prompts) { + return authFailure, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") + } + responseLength := 1 + 4 + for _, a := range answers { + responseLength += stringLength(len(a)) + } + serialized := make([]byte, responseLength) + p := serialized + p[0] = msgUserAuthInfoResponse + p = p[1:] + p = marshalUint32(p, uint32(len(answers))) + for _, a := range answers { + p = marshalString(p, []byte(a)) + } + + if err := c.writePacket(serialized); err != nil { + return authFailure, nil, err + } + } +} + +type retryableAuthMethod struct { + authMethod AuthMethod + maxTries int +} + +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { + for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { + ok, methods, err = r.authMethod.auth(session, user, c, rand) + if ok != authFailure || err != nil { // either success, partial success or error terminate + return ok, methods, err + } + } + return ok, methods, err +} + +func (r *retryableAuthMethod) method() string { + return r.authMethod.method() +} + +// RetryableAuthMethod is a decorator for other auth methods enabling them to +// be retried up to maxTries before considering that AuthMethod itself failed. +// If maxTries is <= 0, will retry indefinitely +// +// This is useful for interactive clients using challenge/response type +// authentication (e.g. Keyboard-Interactive, Password, etc) where the user +// could mistype their response resulting in the server issuing a +// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 +// [keyboard-interactive]); Without this decorator, the non-retryable +// AuthMethod would be removed from future consideration, and never tried again +// (and so the user would never be able to retry their entry). +func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { + return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} +} + +// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. +// See RFC 4462 section 3 +// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. +// target is the server host you want to log in to. +func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { + if gssAPIClient == nil { + panic("gss-api client must be not nil with enable gssapi-with-mic") + } + return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} +} + +type gssAPIWithMICCallback struct { + gssAPIClient GSSAPIClient + target string +} + +func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + m := &userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: g.method(), + } + // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. + // See RFC 4462 section 3.2. + m.Payload = appendU32(m.Payload, 1) + m.Payload = appendString(m.Payload, string(krb5OID)) + if err := c.writePacket(Marshal(m)); err != nil { + return authFailure, nil, err + } + // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an + // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or + // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. + // See RFC 4462 section 3.3. + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check + // selected mech if it is valid. + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + userAuthGSSAPIResp := &userAuthGSSAPIResponse{} + if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { + return authFailure, nil, err + } + // Start the loop into the exchange token. + // See RFC 4462 section 3.4. + var token []byte + defer g.gssAPIClient.DeleteSecContext() + for { + // Initiates the establishment of a security context between the application and a remote peer. + nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) + if err != nil { + return authFailure, nil, err + } + if len(nextToken) > 0 { + if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ + Token: nextToken, + })); err != nil { + return authFailure, nil, err + } + } + if !needContinue { + break + } + packet, err = c.readPacket() + if err != nil { + return authFailure, nil, err + } + switch packet[0] { + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthGSSAPIError: + userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} + if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { + return authFailure, nil, err + } + return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ + "Major Status: %d\n"+ + "Minor Status: %d\n"+ + "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, + userAuthGSSAPIErrorResp.Message) + case msgUserAuthGSSAPIToken: + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return authFailure, nil, err + } + token = userAuthGSSAPITokenReq.Token + } + } + // Binding Encryption Keys. + // See RFC 4462 section 3.5. + micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") + micToken, err := g.gssAPIClient.GetMIC(micField) + if err != nil { + return authFailure, nil, err + } + if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ + MIC: micToken, + })); err != nil { + return authFailure, nil, err + } + return handleAuthResponse(c) +} + +func (g *gssAPIWithMICCallback) method() string { + return "gssapi-with-mic" +} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go new file mode 100644 index 00000000000..290382d059e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -0,0 +1,404 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" + "math" + "sync" + + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +// These are string constants in the SSH protocol. +const ( + compressionNone = "none" + serviceUserAuth = "ssh-userauth" + serviceSSH = "ssh-connection" +) + +// supportedCiphers lists ciphers we support but might not recommend. +var supportedCiphers = []string{ + "aes128-ctr", "aes192-ctr", "aes256-ctr", + "aes128-gcm@openssh.com", + chacha20Poly1305ID, + "arcfour256", "arcfour128", "arcfour", + aes128cbcID, + tripledescbcID, +} + +// preferredCiphers specifies the default preference for ciphers. +var preferredCiphers = []string{ + "aes128-gcm@openssh.com", + chacha20Poly1305ID, + "aes128-ctr", "aes192-ctr", "aes256-ctr", +} + +// supportedKexAlgos specifies the supported key-exchange algorithms in +// preference order. +var supportedKexAlgos = []string{ + kexAlgoCurve25519SHA256, + // P384 and P521 are not constant-time yet, but since we don't + // reuse ephemeral keys, using them for ECDH should be OK. + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA1, kexAlgoDH1SHA1, +} + +// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden +// for the server half. +var serverForbiddenKexAlgos = map[string]struct{}{ + kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests + kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests +} + +// preferredKexAlgos specifies the default preference for key-exchange algorithms +// in preference order. +var preferredKexAlgos = []string{ + kexAlgoCurve25519SHA256, + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA1, +} + +// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods +// of authenticating servers) in preference order. +var supportedHostKeyAlgos = []string{ + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, + + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSA, KeyAlgoDSA, + + KeyAlgoED25519, +} + +// supportedMACs specifies a default set of MAC algorithms in preference order. +// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed +// because they have reached the end of their useful life. +var supportedMACs = []string{ + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", +} + +var supportedCompressions = []string{compressionNone} + +// hashFuncs keeps the mapping of supported algorithms to their respective +// hashes needed for signature verification. +var hashFuncs = map[string]crypto.Hash{ + KeyAlgoRSA: crypto.SHA1, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + CertAlgoRSAv01: crypto.SHA1, + CertAlgoDSAv01: crypto.SHA1, + CertAlgoECDSA256v01: crypto.SHA256, + CertAlgoECDSA384v01: crypto.SHA384, + CertAlgoECDSA521v01: crypto.SHA512, +} + +// unexpectedMessageError results when the SSH message that we received didn't +// match what we wanted. +func unexpectedMessageError(expected, got uint8) error { + return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) +} + +// parseError results from a malformed SSH message. +func parseError(tag uint8) error { + return fmt.Errorf("ssh: parse error in message type %d", tag) +} + +func findCommon(what string, client []string, server []string) (common string, err error) { + for _, c := range client { + for _, s := range server { + if c == s { + return c, nil + } + } + } + return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) +} + +// directionAlgorithms records algorithm choices in one direction (either read or write) +type directionAlgorithms struct { + Cipher string + MAC string + Compression string +} + +// rekeyBytes returns a rekeying intervals in bytes. +func (a *directionAlgorithms) rekeyBytes() int64 { + // According to RFC4344 block ciphers should rekey after + // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is + // 128. + switch a.Cipher { + case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: + return 16 * (1 << 32) + + } + + // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. + return 1 << 30 +} + +type algorithms struct { + kex string + hostKey string + w directionAlgorithms + r directionAlgorithms +} + +func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { + result := &algorithms{} + + result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + if err != nil { + return + } + + result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + if err != nil { + return + } + + stoc, ctos := &result.w, &result.r + if isClient { + ctos, stoc = stoc, ctos + } + + ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + if err != nil { + return + } + + stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + if err != nil { + return + } + + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } + + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } + + ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + if err != nil { + return + } + + stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + if err != nil { + return + } + + return result, nil +} + +// If rekeythreshold is too small, we can't make any progress sending +// stuff. +const minRekeyThreshold uint64 = 256 + +// Config contains configuration data common to both ServerConfig and +// ClientConfig. +type Config struct { + // Rand provides the source of entropy for cryptographic + // primitives. If Rand is nil, the cryptographic random reader + // in package crypto/rand will be used. + Rand io.Reader + + // The maximum number of bytes sent or received after which a + // new key is negotiated. It must be at least 256. If + // unspecified, a size suitable for the chosen cipher is used. + RekeyThreshold uint64 + + // The allowed key exchanges algorithms. If unspecified then a + // default set of algorithms is used. + KeyExchanges []string + + // The allowed cipher algorithms. If unspecified then a sensible + // default is used. + Ciphers []string + + // The allowed MAC algorithms. If unspecified then a sensible default + // is used. + MACs []string +} + +// SetDefaults sets sensible values for unset fields in config. This is +// exported for testing: Configs passed to SSH functions are copied and have +// default values set automatically. +func (c *Config) SetDefaults() { + if c.Rand == nil { + c.Rand = rand.Reader + } + if c.Ciphers == nil { + c.Ciphers = preferredCiphers + } + var ciphers []string + for _, c := range c.Ciphers { + if cipherModes[c] != nil { + // reject the cipher if we have no cipherModes definition + ciphers = append(ciphers, c) + } + } + c.Ciphers = ciphers + + if c.KeyExchanges == nil { + c.KeyExchanges = preferredKexAlgos + } + + if c.MACs == nil { + c.MACs = supportedMACs + } + + if c.RekeyThreshold == 0 { + // cipher specific default + } else if c.RekeyThreshold < minRekeyThreshold { + c.RekeyThreshold = minRekeyThreshold + } else if c.RekeyThreshold >= math.MaxInt64 { + // Avoid weirdness if somebody uses -1 as a threshold. + c.RekeyThreshold = math.MaxInt64 + } +} + +// buildDataSignedForAuth returns the data that is signed in order to prove +// possession of a private key. See RFC 4252, section 7. +func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { + data := struct { + Session []byte + Type byte + User string + Service string + Method string + Sign bool + Algo []byte + PubKey []byte + }{ + sessionID, + msgUserAuthRequest, + req.User, + req.Service, + req.Method, + true, + algo, + pubKey, + } + return Marshal(data) +} + +func appendU16(buf []byte, n uint16) []byte { + return append(buf, byte(n>>8), byte(n)) +} + +func appendU32(buf []byte, n uint32) []byte { + return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendU64(buf []byte, n uint64) []byte { + return append(buf, + byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), + byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendInt(buf []byte, n int) []byte { + return appendU32(buf, uint32(n)) +} + +func appendString(buf []byte, s string) []byte { + buf = appendU32(buf, uint32(len(s))) + buf = append(buf, s...) + return buf +} + +func appendBool(buf []byte, b bool) []byte { + if b { + return append(buf, 1) + } + return append(buf, 0) +} + +// newCond is a helper to hide the fact that there is no usable zero +// value for sync.Cond. +func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } + +// window represents the buffer available to clients +// wishing to write to a channel. +type window struct { + *sync.Cond + win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 + writeWaiters int + closed bool +} + +// add adds win to the amount of window available +// for consumers. +func (w *window) add(win uint32) bool { + // a zero sized window adjust is a noop. + if win == 0 { + return true + } + w.L.Lock() + if w.win+win < win { + w.L.Unlock() + return false + } + w.win += win + // It is unusual that multiple goroutines would be attempting to reserve + // window space, but not guaranteed. Use broadcast to notify all waiters + // that additional window is available. + w.Broadcast() + w.L.Unlock() + return true +} + +// close sets the window to closed, so all reservations fail +// immediately. +func (w *window) close() { + w.L.Lock() + w.closed = true + w.Broadcast() + w.L.Unlock() +} + +// reserve reserves win from the available window capacity. +// If no capacity remains, reserve will block. reserve may +// return less than requested. +func (w *window) reserve(win uint32) (uint32, error) { + var err error + w.L.Lock() + w.writeWaiters++ + w.Broadcast() + for w.win == 0 && !w.closed { + w.Wait() + } + w.writeWaiters-- + if w.win < win { + win = w.win + } + w.win -= win + if w.closed { + err = io.EOF + } + w.L.Unlock() + return win, err +} + +// waitWriterBlocked waits until some goroutine is blocked for further +// writes. It is used in tests only. +func (w *window) waitWriterBlocked() { + w.Cond.L.Lock() + for w.writeWaiters == 0 { + w.Cond.Wait() + } + w.Cond.L.Unlock() +} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go new file mode 100644 index 00000000000..fd6b0681b51 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "fmt" + "net" +) + +// OpenChannelError is returned if the other side rejects an +// OpenChannel request. +type OpenChannelError struct { + Reason RejectionReason + Message string +} + +func (e *OpenChannelError) Error() string { + return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) +} + +// ConnMetadata holds metadata for the connection. +type ConnMetadata interface { + // User returns the user ID for this connection. + User() string + + // SessionID returns the session hash, also denoted by H. + SessionID() []byte + + // ClientVersion returns the client's version string as hashed + // into the session ID. + ClientVersion() []byte + + // ServerVersion returns the server's version string as hashed + // into the session ID. + ServerVersion() []byte + + // RemoteAddr returns the remote address for this connection. + RemoteAddr() net.Addr + + // LocalAddr returns the local address for this connection. + LocalAddr() net.Addr +} + +// Conn represents an SSH connection for both server and client roles. +// Conn is the basis for implementing an application layer, such +// as ClientConn, which implements the traditional shell access for +// clients. +type Conn interface { + ConnMetadata + + // SendRequest sends a global request, and returns the + // reply. If wantReply is true, it returns the response status + // and payload. See also RFC4254, section 4. + SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) + + // OpenChannel tries to open an channel. If the request is + // rejected, it returns *OpenChannelError. On success it returns + // the SSH Channel and a Go channel for incoming, out-of-band + // requests. The Go channel must be serviced, or the + // connection will hang. + OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) + + // Close closes the underlying network connection + Close() error + + // Wait blocks until the connection has shut down, and returns the + // error causing the shutdown. + Wait() error + + // TODO(hanwen): consider exposing: + // RequestKeyChange + // Disconnect +} + +// DiscardRequests consumes and rejects all requests from the +// passed-in channel. +func DiscardRequests(in <-chan *Request) { + for req := range in { + if req.WantReply { + req.Reply(false, nil) + } + } +} + +// A connection represents an incoming connection. +type connection struct { + transport *handshakeTransport + sshConn + + // The connection protocol. + *mux +} + +func (c *connection) Close() error { + return c.sshConn.conn.Close() +} + +// sshconn provides net.Conn metadata, but disallows direct reads and +// writes. +type sshConn struct { + conn net.Conn + + user string + sessionID []byte + clientVersion []byte + serverVersion []byte +} + +func dup(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} + +func (c *sshConn) User() string { + return c.user +} + +func (c *sshConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *sshConn) Close() error { + return c.conn.Close() +} + +func (c *sshConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *sshConn) SessionID() []byte { + return dup(c.sessionID) +} + +func (c *sshConn) ClientVersion() []byte { + return dup(c.clientVersion) +} + +func (c *sshConn) ServerVersion() []byte { + return dup(c.serverVersion) +} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go new file mode 100644 index 00000000000..67b7322c058 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -0,0 +1,21 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package ssh implements an SSH client and server. + +SSH is a transport security protocol, an authentication protocol and a +family of application protocols. The most typical application level +protocol is a remote shell and this is specifically implemented. However, +the multiplexed nature of SSH is exposed to users that wish to support +others. + +References: + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + +This package does not fall under the stability promise of the Go language itself, +so its API may be changed when pressing needs arise. +*/ +package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go new file mode 100644 index 00000000000..2b10b05a498 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -0,0 +1,647 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "log" + "net" + "sync" +) + +// debugHandshake, if set, prints messages sent and received. Key +// exchange messages are printed as if DH were used, so the debug +// messages are wrong when using ECDH. +const debugHandshake = false + +// chanSize sets the amount of buffering SSH connections. This is +// primarily for testing: setting chanSize=0 uncovers deadlocks more +// quickly. +const chanSize = 16 + +// keyingTransport is a packet based transport that supports key +// changes. It need not be thread-safe. It should pass through +// msgNewKeys in both directions. +type keyingTransport interface { + packetConn + + // prepareKeyChange sets up a key change. The key change for a + // direction will be effected if a msgNewKeys message is sent + // or received. + prepareKeyChange(*algorithms, *kexResult) error +} + +// handshakeTransport implements rekeying on top of a keyingTransport +// and offers a thread-safe writePacket() interface. +type handshakeTransport struct { + conn keyingTransport + config *Config + + serverVersion []byte + clientVersion []byte + + // hostKeys is non-empty if we are the server. In that case, + // it contains all host keys that can be used to sign the + // connection. + hostKeys []Signer + + // hostKeyAlgorithms is non-empty if we are the client. In that case, + // we accept these key types from the server as host key. + hostKeyAlgorithms []string + + // On read error, incoming is closed, and readError is set. + incoming chan []byte + readError error + + mu sync.Mutex + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + pendingPackets [][]byte // Used when a key exchange is in progress. + + // If the read loop wants to schedule a kex, it pings this + // channel, and the write loop will send out a kex + // message. + requestKex chan struct{} + + // If the other side requests or confirms a kex, its kexInit + // packet is sent here for the write loop to find it. + startKex chan *pendingKex + + // data for host key checking + hostKeyCallback HostKeyCallback + dialAddress string + remoteAddr net.Addr + + // bannerCallback is non-empty if we are the client and it has been set in + // ClientConfig. In that case it is called during the user authentication + // dance to handle a custom server's message. + bannerCallback BannerCallback + + // Algorithms agreed in the last key exchange. + algorithms *algorithms + + readPacketsLeft uint32 + readBytesLeft int64 + + writePacketsLeft uint32 + writeBytesLeft int64 + + // The session ID or nil if first kex did not complete yet. + sessionID []byte +} + +type pendingKex struct { + otherInit []byte + done chan error +} + +func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { + t := &handshakeTransport{ + conn: conn, + serverVersion: serverVersion, + clientVersion: clientVersion, + incoming: make(chan []byte, chanSize), + requestKex: make(chan struct{}, 1), + startKex: make(chan *pendingKex, 1), + + config: config, + } + t.resetReadThresholds() + t.resetWriteThresholds() + + // We always start with a mandatory key exchange. + t.requestKex <- struct{}{} + return t +} + +func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.dialAddress = dialAddr + t.remoteAddr = addr + t.hostKeyCallback = config.HostKeyCallback + t.bannerCallback = config.BannerCallback + if config.HostKeyAlgorithms != nil { + t.hostKeyAlgorithms = config.HostKeyAlgorithms + } else { + t.hostKeyAlgorithms = supportedHostKeyAlgos + } + go t.readLoop() + go t.kexLoop() + return t +} + +func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.hostKeys = config.hostKeys + go t.readLoop() + go t.kexLoop() + return t +} + +func (t *handshakeTransport) getSessionID() []byte { + return t.sessionID +} + +// waitSession waits for the session to be established. This should be +// the first thing to call after instantiating handshakeTransport. +func (t *handshakeTransport) waitSession() error { + p, err := t.readPacket() + if err != nil { + return err + } + if p[0] != msgNewKeys { + return fmt.Errorf("ssh: first packet should be msgNewKeys") + } + + return nil +} + +func (t *handshakeTransport) id() string { + if len(t.hostKeys) > 0 { + return "server" + } + return "client" +} + +func (t *handshakeTransport) printPacket(p []byte, write bool) { + action := "got" + if write { + action = "sent" + } + + if p[0] == msgChannelData || p[0] == msgChannelExtendedData { + log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) + } else { + msg, err := decode(p) + log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) + } +} + +func (t *handshakeTransport) readPacket() ([]byte, error) { + p, ok := <-t.incoming + if !ok { + return nil, t.readError + } + return p, nil +} + +func (t *handshakeTransport) readLoop() { + first := true + for { + p, err := t.readOnePacket(first) + first = false + if err != nil { + t.readError = err + close(t.incoming) + break + } + if p[0] == msgIgnore || p[0] == msgDebug { + continue + } + t.incoming <- p + } + + // Stop writers too. + t.recordWriteError(t.readError) + + // Unblock the writer should it wait for this. + close(t.startKex) + + // Don't close t.requestKex; it's also written to from writePacket. +} + +func (t *handshakeTransport) pushPacket(p []byte) error { + if debugHandshake { + t.printPacket(p, true) + } + return t.conn.writePacket(p) +} + +func (t *handshakeTransport) getWriteError() error { + t.mu.Lock() + defer t.mu.Unlock() + return t.writeError +} + +func (t *handshakeTransport) recordWriteError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError == nil && err != nil { + t.writeError = err + } +} + +func (t *handshakeTransport) requestKeyExchange() { + select { + case t.requestKex <- struct{}{}: + default: + // something already requested a kex, so do nothing. + } +} + +func (t *handshakeTransport) resetWriteThresholds() { + t.writePacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.writeBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.writeBytesLeft = t.algorithms.w.rekeyBytes() + } else { + t.writeBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) kexLoop() { + +write: + for t.getWriteError() == nil { + var request *pendingKex + var sent bool + + for request == nil || !sent { + var ok bool + select { + case request, ok = <-t.startKex: + if !ok { + break write + } + case <-t.requestKex: + break + } + + if !sent { + if err := t.sendKexInit(); err != nil { + t.recordWriteError(err) + break + } + sent = true + } + } + + if err := t.getWriteError(); err != nil { + if request != nil { + request.done <- err + } + break + } + + // We're not servicing t.requestKex, but that is OK: + // we never block on sending to t.requestKex. + + // We're not servicing t.startKex, but the remote end + // has just sent us a kexInitMsg, so it can't send + // another key change request, until we close the done + // channel on the pendingKex request. + + err := t.enterKeyExchange(request.otherInit) + + t.mu.Lock() + t.writeError = err + t.sentInitPacket = nil + t.sentInitMsg = nil + + t.resetWriteThresholds() + + // we have completed the key exchange. Since the + // reader is still blocked, it is safe to clear out + // the requestKex channel. This avoids the situation + // where: 1) we consumed our own request for the + // initial kex, and 2) the kex from the remote side + // caused another send on the requestKex channel, + clear: + for { + select { + case <-t.requestKex: + // + default: + break clear + } + } + + request.done <- t.writeError + + // kex finished. Push packets that we received while + // the kex was in progress. Don't look at t.startKex + // and don't increment writtenSinceKex: if we trigger + // another kex while we are still busy with the last + // one, things will become very confusing. + for _, p := range t.pendingPackets { + t.writeError = t.pushPacket(p) + if t.writeError != nil { + break + } + } + t.pendingPackets = t.pendingPackets[:0] + t.mu.Unlock() + } + + // drain startKex channel. We don't service t.requestKex + // because nobody does blocking sends there. + go func() { + for init := range t.startKex { + init.done <- t.writeError + } + }() + + // Unblock reader. + t.conn.Close() +} + +// The protocol uses uint32 for packet counters, so we can't let them +// reach 1<<32. We will actually read and write more packets than +// this, though: the other side may send more packets, and after we +// hit this limit on writing we will send a few more packets for the +// key exchange itself. +const packetRekeyThreshold = (1 << 31) + +func (t *handshakeTransport) resetReadThresholds() { + t.readPacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.readBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.readBytesLeft = t.algorithms.r.rekeyBytes() + } else { + t.readBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { + p, err := t.conn.readPacket() + if err != nil { + return nil, err + } + + if t.readPacketsLeft > 0 { + t.readPacketsLeft-- + } else { + t.requestKeyExchange() + } + + if t.readBytesLeft > 0 { + t.readBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if debugHandshake { + t.printPacket(p, false) + } + + if first && p[0] != msgKexInit { + return nil, fmt.Errorf("ssh: first packet should be msgKexInit") + } + + if p[0] != msgKexInit { + return p, nil + } + + firstKex := t.sessionID == nil + + kex := pendingKex{ + done: make(chan error, 1), + otherInit: p, + } + t.startKex <- &kex + err = <-kex.done + + if debugHandshake { + log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) + } + + if err != nil { + return nil, err + } + + t.resetReadThresholds() + + // By default, a key exchange is hidden from higher layers by + // translating it into msgIgnore. + successPacket := []byte{msgIgnore} + if firstKex { + // sendKexInit() for the first kex waits for + // msgNewKeys so the authentication process is + // guaranteed to happen over an encrypted transport. + successPacket = []byte{msgNewKeys} + } + + return successPacket, nil +} + +// sendKexInit sends a key change message. +func (t *handshakeTransport) sendKexInit() error { + t.mu.Lock() + defer t.mu.Unlock() + if t.sentInitMsg != nil { + // kexInits may be sent either in response to the other side, + // or because our side wants to initiate a key change, so we + // may have already sent a kexInit. In that case, don't send a + // second kexInit. + return nil + } + + msg := &kexInitMsg{ + KexAlgos: t.config.KeyExchanges, + CiphersClientServer: t.config.Ciphers, + CiphersServerClient: t.config.Ciphers, + MACsClientServer: t.config.MACs, + MACsServerClient: t.config.MACs, + CompressionClientServer: supportedCompressions, + CompressionServerClient: supportedCompressions, + } + io.ReadFull(rand.Reader, msg.Cookie[:]) + + if len(t.hostKeys) > 0 { + for _, k := range t.hostKeys { + msg.ServerHostKeyAlgos = append( + msg.ServerHostKeyAlgos, k.PublicKey().Type()) + } + } else { + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + } + packet := Marshal(msg) + + // writePacket destroys the contents, so save a copy. + packetCopy := make([]byte, len(packet)) + copy(packetCopy, packet) + + if err := t.pushPacket(packetCopy); err != nil { + return err + } + + t.sentInitMsg = msg + t.sentInitPacket = packet + + return nil +} + +func (t *handshakeTransport) writePacket(p []byte) error { + switch p[0] { + case msgKexInit: + return errors.New("ssh: only handshakeTransport can send kexInit") + case msgNewKeys: + return errors.New("ssh: only handshakeTransport can send newKeys") + } + + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError != nil { + return t.writeError + } + + if t.sentInitMsg != nil { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + + if t.writeBytesLeft > 0 { + t.writeBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if t.writePacketsLeft > 0 { + t.writePacketsLeft-- + } else { + t.requestKeyExchange() + } + + if err := t.pushPacket(p); err != nil { + t.writeError = err + } + + return nil +} + +func (t *handshakeTransport) Close() error { + return t.conn.Close() +} + +func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + if debugHandshake { + log.Printf("%s entered key exchange", t.id()) + } + + otherInit := &kexInitMsg{} + if err := Unmarshal(otherInitPacket, otherInit); err != nil { + return err + } + + magics := handshakeMagics{ + clientVersion: t.clientVersion, + serverVersion: t.serverVersion, + clientKexInit: otherInitPacket, + serverKexInit: t.sentInitPacket, + } + + clientInit := otherInit + serverInit := t.sentInitMsg + isClient := len(t.hostKeys) == 0 + if isClient { + clientInit, serverInit = serverInit, clientInit + + magics.clientKexInit = t.sentInitPacket + magics.serverKexInit = otherInitPacket + } + + var err error + t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) + if err != nil { + return err + } + + // We don't send FirstKexFollows, but we handle receiving it. + // + // RFC 4253 section 7 defines the kex and the agreement method for + // first_kex_packet_follows. It states that the guessed packet + // should be ignored if the "kex algorithm and/or the host + // key algorithm is guessed wrong (server and client have + // different preferred algorithm), or if any of the other + // algorithms cannot be agreed upon". The other algorithms have + // already been checked above so the kex algorithm and host key + // algorithm are checked here. + if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { + // other side sent a kex message for the wrong algorithm, + // which we have to ignore. + if _, err := t.conn.readPacket(); err != nil { + return err + } + } + + kex, ok := kexAlgoMap[t.algorithms.kex] + if !ok { + return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) + } + + var result *kexResult + if len(t.hostKeys) > 0 { + result, err = t.server(kex, t.algorithms, &magics) + } else { + result, err = t.client(kex, t.algorithms, &magics) + } + + if err != nil { + return err + } + + if t.sessionID == nil { + t.sessionID = result.H + } + result.SessionID = t.sessionID + + if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { + return err + } + if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { + return err + } + if packet, err := t.conn.readPacket(); err != nil { + return err + } else if packet[0] != msgNewKeys { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + var hostKey Signer + for _, k := range t.hostKeys { + if algs.hostKey == k.PublicKey().Type() { + hostKey = k + } + } + + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) + return r, err +} + +func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + result, err := kex.Client(t.conn, t.config.Rand, magics) + if err != nil { + return nil, err + } + + hostKey, err := ParsePublicKey(result.HostKey) + if err != nil { + return nil, err + } + + if err := verifyHostKeySignature(hostKey, result); err != nil { + return nil, err + } + + err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go new file mode 100644 index 00000000000..af81d266546 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD. +// +// See https://flak.tedunangst.com/post/bcrypt-pbkdf and +// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c. +package bcrypt_pbkdf + +import ( + "crypto/sha512" + "errors" + "golang.org/x/crypto/blowfish" +) + +const blockSize = 32 + +// Key derives a key from the password, salt and rounds count, returning a +// []byte of length keyLen that can be used as cryptographic key. +func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { + if rounds < 1 { + return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") + } + if len(password) == 0 { + return nil, errors.New("bcrypt_pbkdf: empty password") + } + if len(salt) == 0 || len(salt) > 1<<20 { + return nil, errors.New("bcrypt_pbkdf: bad salt length") + } + if keyLen > 1024 { + return nil, errors.New("bcrypt_pbkdf: keyLen is too large") + } + + numBlocks := (keyLen + blockSize - 1) / blockSize + key := make([]byte, numBlocks*blockSize) + + h := sha512.New() + h.Write(password) + shapass := h.Sum(nil) + + shasalt := make([]byte, 0, sha512.Size) + cnt, tmp := make([]byte, 4), make([]byte, blockSize) + for block := 1; block <= numBlocks; block++ { + h.Reset() + h.Write(salt) + cnt[0] = byte(block >> 24) + cnt[1] = byte(block >> 16) + cnt[2] = byte(block >> 8) + cnt[3] = byte(block) + h.Write(cnt) + bcryptHash(tmp, shapass, h.Sum(shasalt)) + + out := make([]byte, blockSize) + copy(out, tmp) + for i := 2; i <= rounds; i++ { + h.Reset() + h.Write(tmp) + bcryptHash(tmp, shapass, h.Sum(shasalt)) + for j := 0; j < len(out); j++ { + out[j] ^= tmp[j] + } + } + + for i, v := range out { + key[i*numBlocks+(block-1)] = v + } + } + return key[:keyLen], nil +} + +var magic = []byte("OxychromaticBlowfishSwatDynamite") + +func bcryptHash(out, shapass, shasalt []byte) { + c, err := blowfish.NewSaltedCipher(shapass, shasalt) + if err != nil { + panic(err) + } + for i := 0; i < 64; i++ { + blowfish.ExpandKey(shasalt, c) + blowfish.ExpandKey(shapass, c) + } + copy(out, magic) + for i := 0; i < 32; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(out[i:i+8], out[i:i+8]) + } + } + // Swap bytes due to different endianness. + for i := 0; i < 32; i += 4 { + out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] + } +} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go new file mode 100644 index 00000000000..6c3c648fc95 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -0,0 +1,789 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + + "golang.org/x/crypto/curve25519" +) + +const ( + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" + + // For the following kex only the client half contains a production + // ready implementation. The server half only consists of a minimal + // implementation to satisfy the automated tests. + kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" + kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" +) + +// kexResult captures the outcome of a key exchange. +type kexResult struct { + // Session hash. See also RFC 4253, section 8. + H []byte + + // Shared secret. See also RFC 4253, section 8. + K []byte + + // Host key as hashed into H. + HostKey []byte + + // Signature of H. + Signature []byte + + // A cryptographic hash function that matches the security + // level of the key exchange algorithm. It is used for + // calculating H, and for deriving keys from H and K. + Hash crypto.Hash + + // The session ID, which is the first H computed. This is used + // to derive key material inside the transport. + SessionID []byte +} + +// handshakeMagics contains data that is always included in the +// session hash. +type handshakeMagics struct { + clientVersion, serverVersion []byte + clientKexInit, serverKexInit []byte +} + +func (m *handshakeMagics) write(w io.Writer) { + writeString(w, m.clientVersion) + writeString(w, m.serverVersion) + writeString(w, m.clientKexInit) + writeString(w, m.serverKexInit) +} + +// kexAlgorithm abstracts different key exchange algorithms. +type kexAlgorithm interface { + // Server runs server-side key agreement, signing the result + // with a hostkey. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) + + // Client runs the client-side key agreement. Caller is + // responsible for verifying the host key signature. + Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) +} + +// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. +type dhGroup struct { + g, p, pMinus1 *big.Int +} + +func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil +} + +func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + hashFunc := crypto.SHA1 + + var x *big.Int + for { + var err error + if x, err = rand.Int(randSource, group.pMinus1); err != nil { + return nil, err + } + if x.Sign() > 0 { + break + } + } + + X := new(big.Int).Exp(group.g, x, group.p) + kexDHInit := kexDHInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHInit)); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHReply kexDHReplyMsg + if err = Unmarshal(packet, &kexDHReply); err != nil { + return nil, err + } + + ki, err := group.diffieHellman(kexDHReply.Y, x) + if err != nil { + return nil, err + } + + h := hashFunc.New() + magics.write(h) + writeString(h, kexDHReply.HostKey) + writeInt(h, X) + writeInt(h, kexDHReply.Y) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHReply.HostKey, + Signature: kexDHReply.Signature, + Hash: crypto.SHA1, + }, nil +} + +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + hashFunc := crypto.SHA1 + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHInit kexDHInitMsg + if err = Unmarshal(packet, &kexDHInit); err != nil { + return + } + + var y *big.Int + for { + if y, err = rand.Int(randSource, group.pMinus1); err != nil { + return + } + if y.Sign() > 0 { + break + } + } + + Y := new(big.Int).Exp(group.g, y, group.p) + ki, err := group.diffieHellman(kexDHInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeInt(h, kexDHInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H) + if err != nil { + return nil, err + } + + kexDHReply := kexDHReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHReply) + + err = c.writePacket(packet) + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA1, + }, err +} + +// ecdh performs Elliptic Curve Diffie-Hellman key exchange as +// described in RFC 5656, section 4. +type ecdh struct { + curve elliptic.Curve +} + +func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + kexInit := kexECDHInitMsg{ + ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), + } + + serialized := Marshal(&kexInit) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + + x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) + if err != nil { + return nil, err + } + + // generate shared secret + secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kexInit.ClientPubKey) + writeString(h, reply.EphemeralPubKey) + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: ecHash(kex.curve), + }, nil +} + +// unmarshalECKey parses and checks an EC key. +func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { + x, y = elliptic.Unmarshal(curve, pubkey) + if x == nil { + return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") + } + if !validateECPublicKey(curve, x, y) { + return nil, nil, errors.New("ssh: public key not on curve") + } + return x, y, nil +} + +// validateECPublicKey checks that the point is a valid public key for +// the given curve. See [SEC1], 3.2.2 +func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { + if x.Sign() == 0 && y.Sign() == 0 { + return false + } + + if x.Cmp(curve.Params().P) >= 0 { + return false + } + + if y.Cmp(curve.Params().P) >= 0 { + return false + } + + if !curve.IsOnCurve(x, y) { + return false + } + + // We don't check if N * PubKey == 0, since + // + // - the NIST curves have cofactor = 1, so this is implicit. + // (We don't foresee an implementation that supports non NIST + // curves) + // + // - for ephemeral keys, we don't need to worry about small + // subgroup attacks. + return true +} + +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexECDHInit kexECDHInitMsg + if err = Unmarshal(packet, &kexECDHInit); err != nil { + return nil, err + } + + clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) + if err != nil { + return nil, err + } + + // We could cache this key across multiple users/multiple + // connection attempts, but the benefit is small. OpenSSH + // generates a new key for each incoming connection. + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) + + // generate shared secret + secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexECDHInit.ClientPubKey) + writeString(h, serializedEphKey) + + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: serializedEphKey, + HostKey: hostKeyBytes, + Signature: sig, + } + + serialized := Marshal(&reply) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + return &kexResult{ + H: H, + K: K, + HostKey: reply.HostKey, + Signature: sig, + Hash: ecHash(kex.curve), + }, nil +} + +var kexAlgoMap = map[string]kexAlgorithm{} + +func init() { + // This is the group called diffie-hellman-group1-sha1 in RFC + // 4253 and Oakley Group 2 in RFC 2409. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) + kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} + kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} + kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} + kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} + kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} + kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} +} + +// curve25519sha256 implements the curve25519-sha256@libssh.org key +// agreement protocol, as described in +// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +type curve25519sha256 struct{} + +type curve25519KeyPair struct { + priv [32]byte + pub [32]byte +} + +func (kp *curve25519KeyPair) generate(rand io.Reader) error { + if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { + return err + } + curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + return nil +} + +// curve25519Zeros is just an array of 32 zero bytes so that we have something +// convenient to compare against in order to reject curve25519 points with the +// wrong order. +var curve25519Zeros [32]byte + +func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + if len(reply.EphemeralPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var servPub, secret [32]byte + copy(servPub[:], reply.EphemeralPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &servPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kp.pub[:]) + writeString(h, reply.EphemeralPubKey) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: crypto.SHA256, + }, nil +} + +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexInit kexECDHInitMsg + if err = Unmarshal(packet, &kexInit); err != nil { + return + } + + if len(kexInit.ClientPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + + var clientPub, secret [32]byte + copy(clientPub[:], kexInit.ClientPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &clientPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexInit.ClientPubKey) + writeString(h, kp.pub[:]) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: kp.pub[:], + HostKey: hostKeyBytes, + Signature: sig, + } + if err := c.writePacket(Marshal(&reply)); err != nil { + return nil, err + } + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA256, + }, nil +} + +// dhGEXSHA implements the diffie-hellman-group-exchange-sha1 and +// diffie-hellman-group-exchange-sha256 key agreement protocols, +// as described in RFC 4419 +type dhGEXSHA struct { + g, p *big.Int + hashFunc crypto.Hash +} + +const numMRTests = 64 + +const ( + dhGroupExchangeMinimumBits = 2048 + dhGroupExchangePreferredBits = 2048 + dhGroupExchangeMaximumBits = 8192 +) + +func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 { + return nil, fmt.Errorf("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil +} + +func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + // Send GexRequest + kexDHGexRequest := kexDHGexRequestMsg{ + MinBits: dhGroupExchangeMinimumBits, + PreferedBits: dhGroupExchangePreferredBits, + MaxBits: dhGroupExchangeMaximumBits, + } + if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { + return nil, err + } + + // Receive GexGroup + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHGexGroup kexDHGexGroupMsg + if err = Unmarshal(packet, &kexDHGexGroup); err != nil { + return nil, err + } + + // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits + if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits { + return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen()) + } + + gex.p = kexDHGexGroup.P + gex.g = kexDHGexGroup.G + + // Check if p is safe by verifing that p and (p-1)/2 are primes + one := big.NewInt(1) + var pHalf = &big.Int{} + pHalf.Rsh(gex.p, 1) + if !gex.p.ProbablyPrime(numMRTests) || !pHalf.ProbablyPrime(numMRTests) { + return nil, fmt.Errorf("ssh: server provided gex p is not safe") + } + + // Check if g is safe by verifing that g > 1 and g < p - 1 + var pMinusOne = &big.Int{} + pMinusOne.Sub(gex.p, one) + if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 { + return nil, fmt.Errorf("ssh: server provided gex g is not safe") + } + + // Send GexInit + x, err := rand.Int(randSource, pHalf) + if err != nil { + return nil, err + } + X := new(big.Int).Exp(gex.g, x, gex.p) + kexDHGexInit := kexDHGexInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHGexInit)); err != nil { + return nil, err + } + + // Receive GexReply + packet, err = c.readPacket() + if err != nil { + return nil, err + } + + var kexDHGexReply kexDHGexReplyMsg + if err = Unmarshal(packet, &kexDHGexReply); err != nil { + return nil, err + } + + kInt, err := gex.diffieHellman(kexDHGexReply.Y, x) + if err != nil { + return nil, err + } + + // Check if k is safe by verifing that k > 1 and k < p - 1 + if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 { + return nil, fmt.Errorf("ssh: derived k is not safe") + } + + h := gex.hashFunc.New() + magics.write(h) + writeString(h, kexDHGexReply.HostKey) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) + writeInt(h, gex.p) + writeInt(h, gex.g) + writeInt(h, X) + writeInt(h, kexDHGexReply.Y) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHGexReply.HostKey, + Signature: kexDHGexReply.Signature, + Hash: gex.hashFunc, + }, nil +} + +// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. +// +// This is a minimal implementation to satisfy the automated tests. +func (gex *dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + // Receive GexRequest + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHGexRequest kexDHGexRequestMsg + if err = Unmarshal(packet, &kexDHGexRequest); err != nil { + return + } + + // smoosh the user's preferred size into our own limits + if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits { + kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits + } + if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits { + kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits + } + // fix min/max if they're inconsistent. technically, we could just pout + // and hang up, but there's no harm in giving them the benefit of the + // doubt and just picking a bitsize for them. + if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits { + kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits + } + if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits { + kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits + } + + // Send GexGroup + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + gex.p = p + gex.g = big.NewInt(2) + + kexDHGexGroup := kexDHGexGroupMsg{ + P: gex.p, + G: gex.g, + } + if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil { + return nil, err + } + + // Receive GexInit + packet, err = c.readPacket() + if err != nil { + return + } + var kexDHGexInit kexDHGexInitMsg + if err = Unmarshal(packet, &kexDHGexInit); err != nil { + return + } + + var pHalf = &big.Int{} + pHalf.Rsh(gex.p, 1) + + y, err := rand.Int(randSource, pHalf) + if err != nil { + return + } + + Y := new(big.Int).Exp(gex.g, y, gex.p) + kInt, err := gex.diffieHellman(kexDHGexInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := gex.hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) + writeInt(h, gex.p) + writeInt(h, gex.g) + writeInt(h, kexDHGexInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H) + if err != nil { + return nil, err + } + + kexDHGexReply := kexDHGexReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHGexReply) + + err = c.writePacket(packet) + + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: gex.hashFunc, + }, err +} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go new file mode 100644 index 00000000000..5377ec8c3b5 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -0,0 +1,1403 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/md5" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "strings" + + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" +) + +// These constants represent the algorithm names for key types supported by this +// package. +const ( + KeyAlgoRSA = "ssh-rsa" + KeyAlgoDSA = "ssh-dss" + KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" + KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" + KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" + KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" + KeyAlgoED25519 = "ssh-ed25519" + KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" +) + +// These constants represent non-default signature algorithms that are supported +// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See +// [PROTOCOL.agent] section 4.5.1 and +// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 +const ( + SigAlgoRSA = "ssh-rsa" + SigAlgoRSASHA2256 = "rsa-sha2-256" + SigAlgoRSASHA2512 = "rsa-sha2-512" +) + +// parsePubKey parses a public key of the given algorithm. +// Use ParsePublicKey for keys with prepended algorithm. +func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { + switch algo { + case KeyAlgoRSA: + return parseRSA(in) + case KeyAlgoDSA: + return parseDSA(in) + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + return parseECDSA(in) + case KeyAlgoSKECDSA256: + return parseSKECDSA(in) + case KeyAlgoED25519: + return parseED25519(in) + case KeyAlgoSKED25519: + return parseSKEd25519(in) + case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: + cert, err := parseCert(in, certToPrivAlgo(algo)) + if err != nil { + return nil, nil, err + } + return cert, nil, nil + } + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) +} + +// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format +// (see sshd(8) manual page) once the options and key type fields have been +// removed. +func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { + in = bytes.TrimSpace(in) + + i := bytes.IndexAny(in, " \t") + if i == -1 { + i = len(in) + } + base64Key := in[:i] + + key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) + n, err := base64.StdEncoding.Decode(key, base64Key) + if err != nil { + return nil, "", err + } + key = key[:n] + out, err = ParsePublicKey(key) + if err != nil { + return nil, "", err + } + comment = string(bytes.TrimSpace(in[i:])) + return out, comment, nil +} + +// ParseKnownHosts parses an entry in the format of the known_hosts file. +// +// The known_hosts format is documented in the sshd(8) manual page. This +// function will parse a single entry from in. On successful return, marker +// will contain the optional marker value (i.e. "cert-authority" or "revoked") +// or else be empty, hosts will contain the hosts that this entry matches, +// pubKey will contain the public key and comment will contain any trailing +// comment at the end of the line. See the sshd(8) manual page for the various +// forms that a host string can take. +// +// The unparsed remainder of the input will be returned in rest. This function +// can be called repeatedly to parse multiple entries. +// +// If no entries were found in the input then err will be io.EOF. Otherwise a +// non-nil err value indicates a parse error. +func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + // Strip out the beginning of the known_host key. + // This is either an optional marker or a (set of) hostname(s). + keyFields := bytes.Fields(in) + if len(keyFields) < 3 || len(keyFields) > 5 { + return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") + } + + // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated + // list of hosts + marker := "" + if keyFields[0][0] == '@' { + marker = string(keyFields[0][1:]) + keyFields = keyFields[1:] + } + + hosts := string(keyFields[0]) + // keyFields[1] contains the key type (e.g. “ssh-rsa”). + // However, that information is duplicated inside the + // base64-encoded key and so is ignored here. + + key := bytes.Join(keyFields[2:], []byte(" ")) + if pubKey, comment, err = parseAuthorizedKey(key); err != nil { + return "", nil, nil, "", nil, err + } + + return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil + } + + return "", nil, nil, "", nil, io.EOF +} + +// ParseAuthorizedKeys parses a public key from an authorized_keys +// file used in OpenSSH according to the sshd(8) manual page. +func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + return out, comment, options, rest, nil + } + + // No key type recognised. Maybe there's an options field at + // the beginning. + var b byte + inQuote := false + var candidateOptions []string + optionStart := 0 + for i, b = range in { + isEnd := !inQuote && (b == ' ' || b == '\t') + if (b == ',' && !inQuote) || isEnd { + if i-optionStart > 0 { + candidateOptions = append(candidateOptions, string(in[optionStart:i])) + } + optionStart = i + 1 + } + if isEnd { + break + } + if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { + inQuote = !inQuote + } + } + for i < len(in) && (in[i] == ' ' || in[i] == '\t') { + i++ + } + if i == len(in) { + // Invalid line: unmatched quote + in = rest + continue + } + + in = in[i:] + i = bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + options = candidateOptions + return out, comment, options, rest, nil + } + + in = rest + continue + } + + return nil, "", nil, nil, errors.New("ssh: no key found") +} + +// ParsePublicKey parses an SSH public key formatted for use in +// the SSH wire protocol according to RFC 4253, section 6.6. +func ParsePublicKey(in []byte) (out PublicKey, err error) { + algo, in, ok := parseString(in) + if !ok { + return nil, errShortRead + } + var rest []byte + out, rest, err = parsePubKey(in, string(algo)) + if len(rest) > 0 { + return nil, errors.New("ssh: trailing junk in public key") + } + + return out, err +} + +// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH +// authorized_keys file. The return value ends with newline. +func MarshalAuthorizedKey(key PublicKey) []byte { + b := &bytes.Buffer{} + b.WriteString(key.Type()) + b.WriteByte(' ') + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(key.Marshal()) + e.Close() + b.WriteByte('\n') + return b.Bytes() +} + +// PublicKey is an abstraction of different types of public keys. +type PublicKey interface { + // Type returns the key's type, e.g. "ssh-rsa". + Type() string + + // Marshal returns the serialized key data in SSH wire format, + // with the name prefix. To unmarshal the returned data, use + // the ParsePublicKey function. + Marshal() []byte + + // Verify that sig is a signature on the given data using this + // key. This function will hash the data appropriately first. + Verify(data []byte, sig *Signature) error +} + +// CryptoPublicKey, if implemented by a PublicKey, +// returns the underlying crypto.PublicKey form of the key. +type CryptoPublicKey interface { + CryptoPublicKey() crypto.PublicKey +} + +// A Signer can create signatures that verify against a public key. +type Signer interface { + // PublicKey returns an associated PublicKey instance. + PublicKey() PublicKey + + // Sign returns raw signature for the given data. This method + // will apply the hash specified for the keytype to the data. + Sign(rand io.Reader, data []byte) (*Signature, error) +} + +// A AlgorithmSigner is a Signer that also supports specifying a specific +// algorithm to use for signing. +type AlgorithmSigner interface { + Signer + + // SignWithAlgorithm is like Signer.Sign, but allows specification of a + // non-default signing algorithm. See the SigAlgo* constants in this + // package for signature algorithms supported by this package. Callers may + // pass an empty string for the algorithm in which case the AlgorithmSigner + // will use its default algorithm. + SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) +} + +type rsaPublicKey rsa.PublicKey + +func (r *rsaPublicKey) Type() string { + return "ssh-rsa" +} + +// parseRSA parses an RSA key according to RFC 4253, section 6.6. +func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + E *big.Int + N *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if w.E.BitLen() > 24 { + return nil, nil, errors.New("ssh: exponent too large") + } + e := w.E.Int64() + if e < 3 || e&1 == 0 { + return nil, nil, errors.New("ssh: incorrect exponent") + } + + var key rsa.PublicKey + key.E = int(e) + key.N = w.N + return (*rsaPublicKey)(&key), w.Rest, nil +} + +func (r *rsaPublicKey) Marshal() []byte { + e := new(big.Int).SetInt64(int64(r.E)) + // RSA publickey struct layout should match the struct used by + // parseRSACert in the x/crypto/ssh/agent package. + wirekey := struct { + Name string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + e, + r.N, + } + return Marshal(&wirekey) +} + +func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { + var hash crypto.Hash + switch sig.Format { + case SigAlgoRSA: + hash = crypto.SHA1 + case SigAlgoRSASHA2256: + hash = crypto.SHA256 + case SigAlgoRSASHA2512: + hash = crypto.SHA512 + default: + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) + } + h := hash.New() + h.Write(data) + digest := h.Sum(nil) + return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob) +} + +func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*rsa.PublicKey)(r) +} + +type dsaPublicKey dsa.PublicKey + +func (k *dsaPublicKey) Type() string { + return "ssh-dss" +} + +func checkDSAParams(param *dsa.Parameters) error { + // SSH specifies FIPS 186-2, which only provided a single size + // (1024 bits) DSA key. FIPS 186-3 allows for larger key + // sizes, which would confuse SSH. + if l := param.P.BitLen(); l != 1024 { + return fmt.Errorf("ssh: unsupported DSA key size %d", l) + } + + return nil +} + +// parseDSA parses an DSA key according to RFC 4253, section 6.6. +func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + P, Q, G, Y *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + param := dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + } + if err := checkDSAParams(¶m); err != nil { + return nil, nil, err + } + + key := &dsaPublicKey{ + Parameters: param, + Y: w.Y, + } + return key, w.Rest, nil +} + +func (k *dsaPublicKey) Marshal() []byte { + // DSA publickey struct layout should match the struct used by + // parseDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + P, Q, G, Y *big.Int + }{ + k.Type(), + k.P, + k.Q, + k.G, + k.Y, + } + + return Marshal(&w) +} + +func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 4253, section 6.6, + // The value for 'dss_signature_blob' is encoded as a string containing + // r, followed by s (which are 160-bit integers, without lengths or + // padding, unsigned, and in network byte order). + // For DSS purposes, sig.Blob should be exactly 40 bytes in length. + if len(sig.Blob) != 40 { + return errors.New("ssh: DSA signature parse error") + } + r := new(big.Int).SetBytes(sig.Blob[:20]) + s := new(big.Int).SetBytes(sig.Blob[20:]) + if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*dsa.PublicKey)(k) +} + +type dsaPrivateKey struct { + *dsa.PrivateKey +} + +func (k *dsaPrivateKey) PublicKey() PublicKey { + return (*dsaPublicKey)(&k.PrivateKey.PublicKey) +} + +func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { + return k.SignWithAlgorithm(rand, data, "") +} + +func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != "" && algorithm != k.PublicKey().Type() { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + r, s, err := dsa.Sign(rand, k.PrivateKey, digest) + if err != nil { + return nil, err + } + + sig := make([]byte, 40) + rb := r.Bytes() + sb := s.Bytes() + + copy(sig[20-len(rb):20], rb) + copy(sig[40-len(sb):], sb) + + return &Signature{ + Format: k.PublicKey().Type(), + Blob: sig, + }, nil +} + +type ecdsaPublicKey ecdsa.PublicKey + +func (k *ecdsaPublicKey) Type() string { + return "ecdsa-sha2-" + k.nistID() +} + +func (k *ecdsaPublicKey) nistID() string { + switch k.Params().BitSize { + case 256: + return "nistp256" + case 384: + return "nistp384" + case 521: + return "nistp521" + } + panic("ssh: unsupported ecdsa key size") +} + +type ed25519PublicKey ed25519.PublicKey + +func (k ed25519PublicKey) Type() string { + return KeyAlgoED25519 +} + +func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := ed25519.PublicKey(w.KeyBytes) + + return (ed25519PublicKey)(key), w.Rest, nil +} + +func (k ed25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + }{ + KeyAlgoED25519, + []byte(k), + } + return Marshal(&w) +} + +func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + edKey := (ed25519.PublicKey)(k) + if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + +func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return ed25519.PublicKey(k) +} + +func supportedEllipticCurve(curve elliptic.Curve) bool { + return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() +} + +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + +// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. +func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(ecdsa.PublicKey) + + switch w.Curve { + case "nistp256": + key.Curve = elliptic.P256() + case "nistp384": + key.Curve = elliptic.P384() + case "nistp521": + key.Curve = elliptic.P521() + default: + return nil, nil, errors.New("ssh: unsupported curve") + } + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + return (*ecdsaPublicKey)(key), w.Rest, nil +} + +func (k *ecdsaPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) + // ECDSA publickey struct layout should match the struct used by + // parseECDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + ID string + Key []byte + }{ + k.Type(), + k.nistID(), + keyBytes, + } + + return Marshal(&w) +} + +func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + h := ecHash(k.Curve).New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 5656, section 3.1.2, + // The ecdsa_signature_blob value has the following specific encoding: + // mpint r + // mpint s + var ecSig struct { + R *big.Int + S *big.Int + } + + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*ecdsa.PublicKey)(k) +} + +// skFields holds the additional fields present in U2F/FIDO2 signatures. +// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details. +type skFields struct { + // Flags contains U2F/FIDO2 flags such as 'user present' + Flags byte + // Counter is a monotonic signature counter which can be + // used to detect concurrent use of a private key, should + // it be extracted from hardware. + Counter uint32 +} + +type skECDSAPublicKey struct { + // application is a URL-like string, typically "ssh:" for SSH. + // see openssh/PROTOCOL.u2f for details. + application string + ecdsa.PublicKey +} + +func (k *skECDSAPublicKey) Type() string { + return KeyAlgoSKECDSA256 +} + +func (k *skECDSAPublicKey) nistID() string { + return "nistp256" +} + +func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Application string + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(skECDSAPublicKey) + key.application = w.Application + + if w.Curve != "nistp256" { + return nil, nil, errors.New("ssh: unsupported curve") + } + key.Curve = elliptic.P256() + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + + return key, w.Rest, nil +} + +func (k *skECDSAPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) + w := struct { + Name string + ID string + Key []byte + Application string + }{ + k.Type(), + k.nistID(), + keyBytes, + k.application, + } + + return Marshal(&w) +} + +func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + h := ecHash(k.Curve).New() + h.Write([]byte(k.application)) + appDigest := h.Sum(nil) + + h.Reset() + h.Write(data) + dataDigest := h.Sum(nil) + + var ecSig struct { + R *big.Int + S *big.Int + } + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + var skf skFields + if err := Unmarshal(sig.Rest, &skf); err != nil { + return err + } + + blob := struct { + ApplicationDigest []byte `ssh:"rest"` + Flags byte + Counter uint32 + MessageDigest []byte `ssh:"rest"` + }{ + appDigest, + skf.Flags, + skf.Counter, + dataDigest, + } + + original := Marshal(blob) + + h.Reset() + h.Write(original) + digest := h.Sum(nil) + + if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +type skEd25519PublicKey struct { + // application is a URL-like string, typically "ssh:" for SSH. + // see openssh/PROTOCOL.u2f for details. + application string + ed25519.PublicKey +} + +func (k *skEd25519PublicKey) Type() string { + return KeyAlgoSKED25519 +} + +func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Application string + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(skEd25519PublicKey) + key.application = w.Application + key.PublicKey = ed25519.PublicKey(w.KeyBytes) + + return key, w.Rest, nil +} + +func (k *skEd25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + Application string + }{ + KeyAlgoSKED25519, + []byte(k.PublicKey), + k.application, + } + return Marshal(&w) +} + +func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + h := sha256.New() + h.Write([]byte(k.application)) + appDigest := h.Sum(nil) + + h.Reset() + h.Write(data) + dataDigest := h.Sum(nil) + + var edSig struct { + Signature []byte `ssh:"rest"` + } + + if err := Unmarshal(sig.Blob, &edSig); err != nil { + return err + } + + var skf skFields + if err := Unmarshal(sig.Rest, &skf); err != nil { + return err + } + + blob := struct { + ApplicationDigest []byte `ssh:"rest"` + Flags byte + Counter uint32 + MessageDigest []byte `ssh:"rest"` + }{ + appDigest, + skf.Flags, + skf.Counter, + dataDigest, + } + + original := Marshal(blob) + + edKey := (ed25519.PublicKey)(k.PublicKey) + if ok := ed25519.Verify(edKey, original, edSig.Signature); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + +// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, +// *ecdsa.PrivateKey or any other crypto.Signer and returns a +// corresponding Signer instance. ECDSA keys must use P-256, P-384 or +// P-521. DSA keys must use parameter size L1024N160. +func NewSignerFromKey(key interface{}) (Signer, error) { + switch key := key.(type) { + case crypto.Signer: + return NewSignerFromSigner(key) + case *dsa.PrivateKey: + return newDSAPrivateKey(key) + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { + if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { + return nil, err + } + + return &dsaPrivateKey{key}, nil +} + +type wrappedSigner struct { + signer crypto.Signer + pubKey PublicKey +} + +// NewSignerFromSigner takes any crypto.Signer implementation and +// returns a corresponding Signer interface. This can be used, for +// example, with keys kept in hardware modules. +func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { + pubKey, err := NewPublicKey(signer.Public()) + if err != nil { + return nil, err + } + + return &wrappedSigner{signer, pubKey}, nil +} + +func (s *wrappedSigner) PublicKey() PublicKey { + return s.pubKey +} + +func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.SignWithAlgorithm(rand, data, "") +} + +func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + var hashFunc crypto.Hash + + if _, ok := s.pubKey.(*rsaPublicKey); ok { + // RSA keys support a few hash functions determined by the requested signature algorithm + switch algorithm { + case "", SigAlgoRSA: + algorithm = SigAlgoRSA + hashFunc = crypto.SHA1 + case SigAlgoRSASHA2256: + hashFunc = crypto.SHA256 + case SigAlgoRSASHA2512: + hashFunc = crypto.SHA512 + default: + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + } else { + // The only supported algorithm for all other key types is the same as the type of the key + if algorithm == "" { + algorithm = s.pubKey.Type() + } else if algorithm != s.pubKey.Type() { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + + switch key := s.pubKey.(type) { + case *dsaPublicKey: + hashFunc = crypto.SHA1 + case *ecdsaPublicKey: + hashFunc = ecHash(key.Curve) + case ed25519PublicKey: + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } + } + + var digest []byte + if hashFunc != 0 { + h := hashFunc.New() + h.Write(data) + digest = h.Sum(nil) + } else { + digest = data + } + + signature, err := s.signer.Sign(rand, digest, hashFunc) + if err != nil { + return nil, err + } + + // crypto.Signer.Sign is expected to return an ASN.1-encoded signature + // for ECDSA and DSA, but that's not the encoding expected by SSH, so + // re-encode. + switch s.pubKey.(type) { + case *ecdsaPublicKey, *dsaPublicKey: + type asn1Signature struct { + R, S *big.Int + } + asn1Sig := new(asn1Signature) + _, err := asn1.Unmarshal(signature, asn1Sig) + if err != nil { + return nil, err + } + + switch s.pubKey.(type) { + case *ecdsaPublicKey: + signature = Marshal(asn1Sig) + + case *dsaPublicKey: + signature = make([]byte, 40) + r := asn1Sig.R.Bytes() + s := asn1Sig.S.Bytes() + copy(signature[20-len(r):20], r) + copy(signature[40-len(s):40], s) + } + } + + return &Signature{ + Format: algorithm, + Blob: signature, + }, nil +} + +// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, +// or ed25519.PublicKey returns a corresponding PublicKey instance. +// ECDSA keys must use P-256, P-384 or P-521. +func NewPublicKey(key interface{}) (PublicKey, error) { + switch key := key.(type) { + case *rsa.PublicKey: + return (*rsaPublicKey)(key), nil + case *ecdsa.PublicKey: + if !supportedEllipticCurve(key.Curve) { + return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") + } + return (*ecdsaPublicKey)(key), nil + case *dsa.PublicKey: + return (*dsaPublicKey)(key), nil + case ed25519.PublicKey: + return (ed25519PublicKey)(key), nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports +// the same keys as ParseRawPrivateKey. If the private key is encrypted, it +// will return a PassphraseMissingError. +func ParsePrivateKey(pemBytes []byte) (Signer, error) { + key, err := ParseRawPrivateKey(pemBytes) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private +// key and passphrase. It supports the same keys as +// ParseRawPrivateKeyWithPassphrase. +func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) { + key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// encryptedBlock tells whether a private key is +// encrypted by examining its Proc-Type header +// for a mention of ENCRYPTED +// according to RFC 1421 Section 4.6.1.1. +func encryptedBlock(block *pem.Block) bool { + return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") +} + +// A PassphraseMissingError indicates that parsing this private key requires a +// passphrase. Use ParsePrivateKeyWithPassphrase. +type PassphraseMissingError struct { + // PublicKey will be set if the private key format includes an unencrypted + // public key along with the encrypted private key. + PublicKey PublicKey +} + +func (*PassphraseMissingError) Error() string { + return "ssh: this private key is passphrase protected" +} + +// ParseRawPrivateKey returns a private key from a PEM encoded private key. It +// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the +// private key is encrypted, it will return a PassphraseMissingError. +func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + if encryptedBlock(block) { + return nil, &PassphraseMissingError{} + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + // RFC5208 - https://tools.ietf.org/html/rfc5208 + case "PRIVATE KEY": + return x509.ParsePKCS8PrivateKey(block.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(block.Bytes) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with +// passphrase from a PEM encoded private key. If the passphrase is wrong, it +// will return x509.IncorrectPasswordError. +func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + if block.Type == "OPENSSH PRIVATE KEY" { + return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase)) + } + + if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) { + return nil, errors.New("ssh: not an encrypted key") + } + + buf, err := x509.DecryptPEMBlock(block, passphrase) + if err != nil { + if err == x509.IncorrectPasswordError { + return nil, err + } + return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(buf) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(buf) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(buf) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as +// specified by the OpenSSL DSA man page. +func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { + var k struct { + Version int + P *big.Int + Q *big.Int + G *big.Int + Pub *big.Int + Priv *big.Int + } + rest, err := asn1.Unmarshal(der, &k) + if err != nil { + return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) + } + if len(rest) > 0 { + return nil, errors.New("ssh: garbage after DSA key") + } + + return &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Pub, + }, + X: k.Priv, + }, nil +} + +func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { + if kdfName != "none" || cipherName != "none" { + return nil, &PassphraseMissingError{} + } + if kdfOpts != "" { + return nil, errors.New("ssh: invalid openssh private key") + } + return privKeyBlock, nil +} + +func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { + return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { + if kdfName == "none" || cipherName == "none" { + return nil, errors.New("ssh: key is not password protected") + } + if kdfName != "bcrypt" { + return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt") + } + + var opts struct { + Salt string + Rounds uint32 + } + if err := Unmarshal([]byte(kdfOpts), &opts); err != nil { + return nil, err + } + + k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16) + if err != nil { + return nil, err + } + key, iv := k[:32], k[32:] + + if cipherName != "aes256-ctr" { + return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q", cipherName, "aes256-ctr") + } + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + ctr := cipher.NewCTR(c, iv) + ctr.XORKeyStream(privKeyBlock, privKeyBlock) + + return privKeyBlock, nil + } +} + +type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) + +// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt +// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used +// as the decrypt function to parse an unencrypted private key. See +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. +func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { + const magic = "openssh-key-v1\x00" + if len(key) < len(magic) || string(key[:len(magic)]) != magic { + return nil, errors.New("ssh: invalid openssh private key format") + } + remaining := key[len(magic):] + + var w struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte + } + + if err := Unmarshal(remaining, &w); err != nil { + return nil, err + } + if w.NumKeys != 1 { + // We only support single key files, and so does OpenSSH. + // https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171 + return nil, errors.New("ssh: multi-key files are not supported") + } + + privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock) + if err != nil { + if err, ok := err.(*PassphraseMissingError); ok { + pub, errPub := ParsePublicKey(w.PubKey) + if errPub != nil { + return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub) + } + err.PublicKey = pub + } + return nil, err + } + + pk1 := struct { + Check1 uint32 + Check2 uint32 + Keytype string + Rest []byte `ssh:"rest"` + }{} + + if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { + if w.CipherName != "none" { + return nil, x509.IncorrectPasswordError + } + return nil, errors.New("ssh: malformed OpenSSH key") + } + + // we only handle ed25519 and rsa keys currently + switch pk1.Keytype { + case KeyAlgoRSA: + // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 + key := struct { + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int + P *big.Int + Q *big.Int + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err + } + + pk := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: key.N, + E: int(key.E.Int64()), + }, + D: key.D, + Primes: []*big.Int{key.P, key.Q}, + } + + if err := pk.Validate(); err != nil { + return nil, err + } + + pk.Precompute() + + return pk, nil + case KeyAlgoED25519: + key := struct { + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if len(key.Priv) != ed25519.PrivateKeySize { + return nil, errors.New("ssh: private key unexpected length") + } + + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err + } + + pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) + copy(pk, key.Priv) + return &pk, nil + default: + return nil, errors.New("ssh: unhandled key type") + } +} + +func checkOpenSSHKeyPadding(pad []byte) error { + for i, b := range pad { + if int(b) != i+1 { + return errors.New("ssh: padding not as expected") + } + } + return nil +} + +// FingerprintLegacyMD5 returns the user presentation of the key's +// fingerprint as described by RFC 4716 section 4. +func FingerprintLegacyMD5(pubKey PublicKey) string { + md5sum := md5.Sum(pubKey.Marshal()) + hexarray := make([]string, len(md5sum)) + for i, c := range md5sum { + hexarray[i] = hex.EncodeToString([]byte{c}) + } + return strings.Join(hexarray, ":") +} + +// FingerprintSHA256 returns the user presentation of the key's +// fingerprint as unpadded base64 encoded sha256 hash. +// This format was introduced from OpenSSH 6.8. +// https://www.openssh.com/txt/release-6.8 +// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) +func FingerprintSHA256(pubKey PublicKey) string { + sha256sum := sha256.Sum256(pubKey.Marshal()) + hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) + return "SHA256:" + hash +} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go new file mode 100644 index 00000000000..c07a06285e6 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mac.go @@ -0,0 +1,61 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Message authentication support + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "hash" +) + +type macMode struct { + keySize int + etm bool + new func(key []byte) hash.Hash +} + +// truncatingMAC wraps around a hash.Hash and truncates the output digest to +// a given size. +type truncatingMAC struct { + length int + hmac hash.Hash +} + +func (t truncatingMAC) Write(data []byte) (int, error) { + return t.hmac.Write(data) +} + +func (t truncatingMAC) Sum(in []byte) []byte { + out := t.hmac.Sum(in) + return out[:len(in)+t.length] +} + +func (t truncatingMAC) Reset() { + t.hmac.Reset() +} + +func (t truncatingMAC) Size() int { + return t.length +} + +func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } + +var macModes = map[string]*macMode{ + "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha1": {20, false, func(key []byte) hash.Hash { + return hmac.New(sha1.New, key) + }}, + "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { + return truncatingMAC{12, hmac.New(sha1.New, key)} + }}, +} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go new file mode 100644 index 00000000000..ac41a4168bf --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -0,0 +1,866 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strconv" + "strings" +) + +// These are SSH message type numbers. They are scattered around several +// documents but many were taken from [SSH-PARAMETERS]. +const ( + msgIgnore = 2 + msgUnimplemented = 3 + msgDebug = 4 + msgNewKeys = 21 +) + +// SSH messages: +// +// These structures mirror the wire format of the corresponding SSH messages. +// They are marshaled using reflection with the marshal and unmarshal functions +// in this file. The only wrinkle is that a final member of type []byte with a +// ssh tag of "rest" receives the remainder of a packet when unmarshaling. + +// See RFC 4253, section 11.1. +const msgDisconnect = 1 + +// disconnectMsg is the message that signals a disconnect. It is also +// the error type returned from mux.Wait() +type disconnectMsg struct { + Reason uint32 `sshtype:"1"` + Message string + Language string +} + +func (d *disconnectMsg) Error() string { + return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) +} + +// See RFC 4253, section 7.1. +const msgKexInit = 20 + +type kexInitMsg struct { + Cookie [16]byte `sshtype:"20"` + KexAlgos []string + ServerHostKeyAlgos []string + CiphersClientServer []string + CiphersServerClient []string + MACsClientServer []string + MACsServerClient []string + CompressionClientServer []string + CompressionServerClient []string + LanguagesClientServer []string + LanguagesServerClient []string + FirstKexFollows bool + Reserved uint32 +} + +// See RFC 4253, section 8. + +// Diffie-Helman +const msgKexDHInit = 30 + +type kexDHInitMsg struct { + X *big.Int `sshtype:"30"` +} + +const msgKexECDHInit = 30 + +type kexECDHInitMsg struct { + ClientPubKey []byte `sshtype:"30"` +} + +const msgKexECDHReply = 31 + +type kexECDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + EphemeralPubKey []byte + Signature []byte +} + +const msgKexDHReply = 31 + +type kexDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + Y *big.Int + Signature []byte +} + +// See RFC 4419, section 5. +const msgKexDHGexGroup = 31 + +type kexDHGexGroupMsg struct { + P *big.Int `sshtype:"31"` + G *big.Int +} + +const msgKexDHGexInit = 32 + +type kexDHGexInitMsg struct { + X *big.Int `sshtype:"32"` +} + +const msgKexDHGexReply = 33 + +type kexDHGexReplyMsg struct { + HostKey []byte `sshtype:"33"` + Y *big.Int + Signature []byte +} + +const msgKexDHGexRequest = 34 + +type kexDHGexRequestMsg struct { + MinBits uint32 `sshtype:"34"` + PreferedBits uint32 + MaxBits uint32 +} + +// See RFC 4253, section 10. +const msgServiceRequest = 5 + +type serviceRequestMsg struct { + Service string `sshtype:"5"` +} + +// See RFC 4253, section 10. +const msgServiceAccept = 6 + +type serviceAcceptMsg struct { + Service string `sshtype:"6"` +} + +// See RFC 4252, section 5. +const msgUserAuthRequest = 50 + +type userAuthRequestMsg struct { + User string `sshtype:"50"` + Service string + Method string + Payload []byte `ssh:"rest"` +} + +// Used for debug printouts of packets. +type userAuthSuccessMsg struct { +} + +// See RFC 4252, section 5.1 +const msgUserAuthFailure = 51 + +type userAuthFailureMsg struct { + Methods []string `sshtype:"51"` + PartialSuccess bool +} + +// See RFC 4252, section 5.1 +const msgUserAuthSuccess = 52 + +// See RFC 4252, section 5.4 +const msgUserAuthBanner = 53 + +type userAuthBannerMsg struct { + Message string `sshtype:"53"` + // unused, but required to allow message parsing + Language string +} + +// See RFC 4256, section 3.2 +const msgUserAuthInfoRequest = 60 +const msgUserAuthInfoResponse = 61 + +type userAuthInfoRequestMsg struct { + User string `sshtype:"60"` + Instruction string + DeprecatedLanguage string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpen = 90 + +type channelOpenMsg struct { + ChanType string `sshtype:"90"` + PeersID uint32 + PeersWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +const msgChannelExtendedData = 95 +const msgChannelData = 94 + +// Used for debug print outs of packets. +type channelDataMsg struct { + PeersID uint32 `sshtype:"94"` + Length uint32 + Rest []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenConfirm = 91 + +type channelOpenConfirmMsg struct { + PeersID uint32 `sshtype:"91"` + MyID uint32 + MyWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenFailure = 92 + +type channelOpenFailureMsg struct { + PeersID uint32 `sshtype:"92"` + Reason RejectionReason + Message string + Language string +} + +const msgChannelRequest = 98 + +type channelRequestMsg struct { + PeersID uint32 `sshtype:"98"` + Request string + WantReply bool + RequestSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.4. +const msgChannelSuccess = 99 + +type channelRequestSuccessMsg struct { + PeersID uint32 `sshtype:"99"` +} + +// See RFC 4254, section 5.4. +const msgChannelFailure = 100 + +type channelRequestFailureMsg struct { + PeersID uint32 `sshtype:"100"` +} + +// See RFC 4254, section 5.3 +const msgChannelClose = 97 + +type channelCloseMsg struct { + PeersID uint32 `sshtype:"97"` +} + +// See RFC 4254, section 5.3 +const msgChannelEOF = 96 + +type channelEOFMsg struct { + PeersID uint32 `sshtype:"96"` +} + +// See RFC 4254, section 4 +const msgGlobalRequest = 80 + +type globalRequestMsg struct { + Type string `sshtype:"80"` + WantReply bool + Data []byte `ssh:"rest"` +} + +// See RFC 4254, section 4 +const msgRequestSuccess = 81 + +type globalRequestSuccessMsg struct { + Data []byte `ssh:"rest" sshtype:"81"` +} + +// See RFC 4254, section 4 +const msgRequestFailure = 82 + +type globalRequestFailureMsg struct { + Data []byte `ssh:"rest" sshtype:"82"` +} + +// See RFC 4254, section 5.2 +const msgChannelWindowAdjust = 93 + +type windowAdjustMsg struct { + PeersID uint32 `sshtype:"93"` + AdditionalBytes uint32 +} + +// See RFC 4252, section 7 +const msgUserAuthPubKeyOk = 60 + +type userAuthPubKeyOkMsg struct { + Algo string `sshtype:"60"` + PubKey []byte +} + +// See RFC 4462, section 3 +const msgUserAuthGSSAPIResponse = 60 + +type userAuthGSSAPIResponse struct { + SupportMech []byte `sshtype:"60"` +} + +const msgUserAuthGSSAPIToken = 61 + +type userAuthGSSAPIToken struct { + Token []byte `sshtype:"61"` +} + +const msgUserAuthGSSAPIMIC = 66 + +type userAuthGSSAPIMIC struct { + MIC []byte `sshtype:"66"` +} + +// See RFC 4462, section 3.9 +const msgUserAuthGSSAPIErrTok = 64 + +type userAuthGSSAPIErrTok struct { + ErrorToken []byte `sshtype:"64"` +} + +// See RFC 4462, section 3.8 +const msgUserAuthGSSAPIError = 65 + +type userAuthGSSAPIError struct { + MajorStatus uint32 `sshtype:"65"` + MinorStatus uint32 + Message string + LanguageTag string +} + +// typeTags returns the possible type bytes for the given reflect.Type, which +// should be a struct. The possible values are separated by a '|' character. +func typeTags(structType reflect.Type) (tags []byte) { + tagStr := structType.Field(0).Tag.Get("sshtype") + + for _, tag := range strings.Split(tagStr, "|") { + i, err := strconv.Atoi(tag) + if err == nil { + tags = append(tags, byte(i)) + } + } + + return tags +} + +func fieldError(t reflect.Type, field int, problem string) error { + if problem != "" { + problem = ": " + problem + } + return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) +} + +var errShortRead = errors.New("ssh: short read") + +// Unmarshal parses data in SSH wire format into a structure. The out +// argument should be a pointer to struct. If the first member of the +// struct has the "sshtype" tag set to a '|'-separated set of numbers +// in decimal, the packet must start with one of those numbers. In +// case of error, Unmarshal returns a ParseError or +// UnexpectedMessageError. +func Unmarshal(data []byte, out interface{}) error { + v := reflect.ValueOf(out).Elem() + structType := v.Type() + expectedTypes := typeTags(structType) + + var expectedType byte + if len(expectedTypes) > 0 { + expectedType = expectedTypes[0] + } + + if len(data) == 0 { + return parseError(expectedType) + } + + if len(expectedTypes) > 0 { + goodType := false + for _, e := range expectedTypes { + if e > 0 && data[0] == e { + goodType = true + break + } + } + if !goodType { + return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) + } + data = data[1:] + } + + var ok bool + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + t := field.Type() + switch t.Kind() { + case reflect.Bool: + if len(data) < 1 { + return errShortRead + } + field.SetBool(data[0] != 0) + data = data[1:] + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + return fieldError(structType, i, "array of unsupported type") + } + if len(data) < t.Len() { + return errShortRead + } + for j, n := 0, t.Len(); j < n; j++ { + field.Index(j).Set(reflect.ValueOf(data[j])) + } + data = data[t.Len():] + case reflect.Uint64: + var u64 uint64 + if u64, data, ok = parseUint64(data); !ok { + return errShortRead + } + field.SetUint(u64) + case reflect.Uint32: + var u32 uint32 + if u32, data, ok = parseUint32(data); !ok { + return errShortRead + } + field.SetUint(uint64(u32)) + case reflect.Uint8: + if len(data) < 1 { + return errShortRead + } + field.SetUint(uint64(data[0])) + data = data[1:] + case reflect.String: + var s []byte + if s, data, ok = parseString(data); !ok { + return fieldError(structType, i, "") + } + field.SetString(string(s)) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if structType.Field(i).Tag.Get("ssh") == "rest" { + field.Set(reflect.ValueOf(data)) + data = nil + } else { + var s []byte + if s, data, ok = parseString(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(s)) + } + case reflect.String: + var nl []string + if nl, data, ok = parseNameList(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(nl)) + default: + return fieldError(structType, i, "slice of unsupported type") + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + if n, data, ok = parseInt(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(n)) + } else { + return fieldError(structType, i, "pointer to unsupported type") + } + default: + return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) + } + } + + if len(data) != 0 { + return parseError(expectedType) + } + + return nil +} + +// Marshal serializes the message in msg to SSH wire format. The msg +// argument should be a struct or pointer to struct. If the first +// member has the "sshtype" tag set to a number in decimal, that +// number is prepended to the result. If the last of member has the +// "ssh" tag set to "rest", its contents are appended to the output. +func Marshal(msg interface{}) []byte { + out := make([]byte, 0, 64) + return marshalStruct(out, msg) +} + +func marshalStruct(out []byte, msg interface{}) []byte { + v := reflect.Indirect(reflect.ValueOf(msg)) + msgTypes := typeTags(v.Type()) + if len(msgTypes) > 0 { + out = append(out, msgTypes[0]) + } + + for i, n := 0, v.NumField(); i < n; i++ { + field := v.Field(i) + switch t := field.Type(); t.Kind() { + case reflect.Bool: + var v uint8 + if field.Bool() { + v = 1 + } + out = append(out, v) + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) + } + for j, l := 0, t.Len(); j < l; j++ { + out = append(out, uint8(field.Index(j).Uint())) + } + case reflect.Uint32: + out = appendU32(out, uint32(field.Uint())) + case reflect.Uint64: + out = appendU64(out, uint64(field.Uint())) + case reflect.Uint8: + out = append(out, uint8(field.Uint())) + case reflect.String: + s := field.String() + out = appendInt(out, len(s)) + out = append(out, s...) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if v.Type().Field(i).Tag.Get("ssh") != "rest" { + out = appendInt(out, field.Len()) + } + out = append(out, field.Bytes()...) + case reflect.String: + offset := len(out) + out = appendU32(out, 0) + if n := field.Len(); n > 0 { + for j := 0; j < n; j++ { + f := field.Index(j) + if j != 0 { + out = append(out, ',') + } + out = append(out, f.String()...) + } + // overwrite length value + binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) + } + default: + panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + nValue := reflect.ValueOf(&n) + nValue.Elem().Set(field) + needed := intLength(n) + oldLength := len(out) + + if cap(out)-len(out) < needed { + newOut := make([]byte, len(out), 2*(len(out)+needed)) + copy(newOut, out) + out = newOut + } + out = out[:oldLength+needed] + marshalInt(out[oldLength:], n) + } else { + panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) + } + } + } + + return out +} + +var bigOne = big.NewInt(1) + +func parseString(in []byte) (out, rest []byte, ok bool) { + if len(in) < 4 { + return + } + length := binary.BigEndian.Uint32(in) + in = in[4:] + if uint32(len(in)) < length { + return + } + out = in[:length] + rest = in[length:] + ok = true + return +} + +var ( + comma = []byte{','} + emptyNameList = []string{} +) + +func parseNameList(in []byte) (out []string, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + if len(contents) == 0 { + out = emptyNameList + return + } + parts := bytes.Split(contents, comma) + out = make([]string, len(parts)) + for i, part := range parts { + out[i] = string(part) + } + return +} + +func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + out = new(big.Int) + + if len(contents) > 0 && contents[0]&0x80 == 0x80 { + // This is a negative number + notBytes := make([]byte, len(contents)) + for i := range notBytes { + notBytes[i] = ^contents[i] + } + out.SetBytes(notBytes) + out.Add(out, bigOne) + out.Neg(out) + } else { + // Positive number + out.SetBytes(contents) + } + ok = true + return +} + +func parseUint32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, nil, false + } + return binary.BigEndian.Uint32(in), in[4:], true +} + +func parseUint64(in []byte) (uint64, []byte, bool) { + if len(in) < 8 { + return 0, nil, false + } + return binary.BigEndian.Uint64(in), in[8:], true +} + +func intLength(n *big.Int) int { + length := 4 /* length bytes */ + if n.Sign() < 0 { + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bitLen := nMinus1.BitLen() + if bitLen%8 == 0 { + // The number will need 0xff padding + length++ + } + length += (bitLen + 7) / 8 + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bitLen := n.BitLen() + if bitLen%8 == 0 { + // The number will need 0x00 padding + length++ + } + length += (bitLen + 7) / 8 + } + + return length +} + +func marshalUint32(to []byte, n uint32) []byte { + binary.BigEndian.PutUint32(to, n) + return to[4:] +} + +func marshalUint64(to []byte, n uint64) []byte { + binary.BigEndian.PutUint64(to, n) + return to[8:] +} + +func marshalInt(to []byte, n *big.Int) []byte { + lengthBytes := to + to = to[4:] + length := 0 + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + to[0] = 0xff + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with a 0x00 in order to + // stop it looking like a negative number. + to[0] = 0 + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } + + lengthBytes[0] = byte(length >> 24) + lengthBytes[1] = byte(length >> 16) + lengthBytes[2] = byte(length >> 8) + lengthBytes[3] = byte(length) + return to +} + +func writeInt(w io.Writer, n *big.Int) { + length := intLength(n) + buf := make([]byte, length) + marshalInt(buf, n) + w.Write(buf) +} + +func writeString(w io.Writer, s []byte) { + var lengthBytes [4]byte + lengthBytes[0] = byte(len(s) >> 24) + lengthBytes[1] = byte(len(s) >> 16) + lengthBytes[2] = byte(len(s) >> 8) + lengthBytes[3] = byte(len(s)) + w.Write(lengthBytes[:]) + w.Write(s) +} + +func stringLength(n int) int { + return 4 + n +} + +func marshalString(to []byte, s []byte) []byte { + to[0] = byte(len(s) >> 24) + to[1] = byte(len(s) >> 16) + to[2] = byte(len(s) >> 8) + to[3] = byte(len(s)) + to = to[4:] + copy(to, s) + return to[len(s):] +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)) + +// Decode a packet into its corresponding message. +func decode(packet []byte) (interface{}, error) { + var msg interface{} + switch packet[0] { + case msgDisconnect: + msg = new(disconnectMsg) + case msgServiceRequest: + msg = new(serviceRequestMsg) + case msgServiceAccept: + msg = new(serviceAcceptMsg) + case msgKexInit: + msg = new(kexInitMsg) + case msgKexDHInit: + msg = new(kexDHInitMsg) + case msgKexDHReply: + msg = new(kexDHReplyMsg) + case msgUserAuthRequest: + msg = new(userAuthRequestMsg) + case msgUserAuthSuccess: + return new(userAuthSuccessMsg), nil + case msgUserAuthFailure: + msg = new(userAuthFailureMsg) + case msgUserAuthPubKeyOk: + msg = new(userAuthPubKeyOkMsg) + case msgGlobalRequest: + msg = new(globalRequestMsg) + case msgRequestSuccess: + msg = new(globalRequestSuccessMsg) + case msgRequestFailure: + msg = new(globalRequestFailureMsg) + case msgChannelOpen: + msg = new(channelOpenMsg) + case msgChannelData: + msg = new(channelDataMsg) + case msgChannelOpenConfirm: + msg = new(channelOpenConfirmMsg) + case msgChannelOpenFailure: + msg = new(channelOpenFailureMsg) + case msgChannelWindowAdjust: + msg = new(windowAdjustMsg) + case msgChannelEOF: + msg = new(channelEOFMsg) + case msgChannelClose: + msg = new(channelCloseMsg) + case msgChannelRequest: + msg = new(channelRequestMsg) + case msgChannelSuccess: + msg = new(channelRequestSuccessMsg) + case msgChannelFailure: + msg = new(channelRequestFailureMsg) + case msgUserAuthGSSAPIToken: + msg = new(userAuthGSSAPIToken) + case msgUserAuthGSSAPIMIC: + msg = new(userAuthGSSAPIMIC) + case msgUserAuthGSSAPIErrTok: + msg = new(userAuthGSSAPIErrTok) + case msgUserAuthGSSAPIError: + msg = new(userAuthGSSAPIError) + default: + return nil, unexpectedMessageError(0, packet[0]) + } + if err := Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} + +var packetTypeNames = map[byte]string{ + msgDisconnect: "disconnectMsg", + msgServiceRequest: "serviceRequestMsg", + msgServiceAccept: "serviceAcceptMsg", + msgKexInit: "kexInitMsg", + msgKexDHInit: "kexDHInitMsg", + msgKexDHReply: "kexDHReplyMsg", + msgUserAuthRequest: "userAuthRequestMsg", + msgUserAuthSuccess: "userAuthSuccessMsg", + msgUserAuthFailure: "userAuthFailureMsg", + msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", + msgGlobalRequest: "globalRequestMsg", + msgRequestSuccess: "globalRequestSuccessMsg", + msgRequestFailure: "globalRequestFailureMsg", + msgChannelOpen: "channelOpenMsg", + msgChannelData: "channelDataMsg", + msgChannelOpenConfirm: "channelOpenConfirmMsg", + msgChannelOpenFailure: "channelOpenFailureMsg", + msgChannelWindowAdjust: "windowAdjustMsg", + msgChannelEOF: "channelEOFMsg", + msgChannelClose: "channelCloseMsg", + msgChannelRequest: "channelRequestMsg", + msgChannelSuccess: "channelRequestSuccessMsg", + msgChannelFailure: "channelRequestFailureMsg", +} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go new file mode 100644 index 00000000000..f19016270e8 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -0,0 +1,330 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "fmt" + "io" + "log" + "sync" + "sync/atomic" +) + +// debugMux, if set, causes messages in the connection protocol to be +// logged. +const debugMux = false + +// chanList is a thread safe channel list. +type chanList struct { + // protects concurrent access to chans + sync.Mutex + + // chans are indexed by the local id of the channel, which the + // other side should send in the PeersId field. + chans []*channel + + // This is a debugging aid: it offsets all IDs by this + // amount. This helps distinguish otherwise identical + // server/client muxes + offset uint32 +} + +// Assigns a channel ID to the given channel. +func (c *chanList) add(ch *channel) uint32 { + c.Lock() + defer c.Unlock() + for i := range c.chans { + if c.chans[i] == nil { + c.chans[i] = ch + return uint32(i) + c.offset + } + } + c.chans = append(c.chans, ch) + return uint32(len(c.chans)-1) + c.offset +} + +// getChan returns the channel for the given ID. +func (c *chanList) getChan(id uint32) *channel { + id -= c.offset + + c.Lock() + defer c.Unlock() + if id < uint32(len(c.chans)) { + return c.chans[id] + } + return nil +} + +func (c *chanList) remove(id uint32) { + id -= c.offset + c.Lock() + if id < uint32(len(c.chans)) { + c.chans[id] = nil + } + c.Unlock() +} + +// dropAll forgets all channels it knows, returning them in a slice. +func (c *chanList) dropAll() []*channel { + c.Lock() + defer c.Unlock() + var r []*channel + + for _, ch := range c.chans { + if ch == nil { + continue + } + r = append(r, ch) + } + c.chans = nil + return r +} + +// mux represents the state for the SSH connection protocol, which +// multiplexes many channels onto a single packet transport. +type mux struct { + conn packetConn + chanList chanList + + incomingChannels chan NewChannel + + globalSentMu sync.Mutex + globalResponses chan interface{} + incomingRequests chan *Request + + errCond *sync.Cond + err error +} + +// When debugging, each new chanList instantiation has a different +// offset. +var globalOff uint32 + +func (m *mux) Wait() error { + m.errCond.L.Lock() + defer m.errCond.L.Unlock() + for m.err == nil { + m.errCond.Wait() + } + return m.err +} + +// newMux returns a mux that runs over the given connection. +func newMux(p packetConn) *mux { + m := &mux{ + conn: p, + incomingChannels: make(chan NewChannel, chanSize), + globalResponses: make(chan interface{}, 1), + incomingRequests: make(chan *Request, chanSize), + errCond: newCond(), + } + if debugMux { + m.chanList.offset = atomic.AddUint32(&globalOff, 1) + } + + go m.loop() + return m +} + +func (m *mux) sendMessage(msg interface{}) error { + p := Marshal(msg) + if debugMux { + log.Printf("send global(%d): %#v", m.chanList.offset, msg) + } + return m.conn.writePacket(p) +} + +func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { + if wantReply { + m.globalSentMu.Lock() + defer m.globalSentMu.Unlock() + } + + if err := m.sendMessage(globalRequestMsg{ + Type: name, + WantReply: wantReply, + Data: payload, + }); err != nil { + return false, nil, err + } + + if !wantReply { + return false, nil, nil + } + + msg, ok := <-m.globalResponses + if !ok { + return false, nil, io.EOF + } + switch msg := msg.(type) { + case *globalRequestFailureMsg: + return false, msg.Data, nil + case *globalRequestSuccessMsg: + return true, msg.Data, nil + default: + return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) + } +} + +// ackRequest must be called after processing a global request that +// has WantReply set. +func (m *mux) ackRequest(ok bool, data []byte) error { + if ok { + return m.sendMessage(globalRequestSuccessMsg{Data: data}) + } + return m.sendMessage(globalRequestFailureMsg{Data: data}) +} + +func (m *mux) Close() error { + return m.conn.Close() +} + +// loop runs the connection machine. It will process packets until an +// error is encountered. To synchronize on loop exit, use mux.Wait. +func (m *mux) loop() { + var err error + for err == nil { + err = m.onePacket() + } + + for _, ch := range m.chanList.dropAll() { + ch.close() + } + + close(m.incomingChannels) + close(m.incomingRequests) + close(m.globalResponses) + + m.conn.Close() + + m.errCond.L.Lock() + m.err = err + m.errCond.Broadcast() + m.errCond.L.Unlock() + + if debugMux { + log.Println("loop exit", err) + } +} + +// onePacket reads and processes one packet. +func (m *mux) onePacket() error { + packet, err := m.conn.readPacket() + if err != nil { + return err + } + + if debugMux { + if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { + log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) + } else { + p, _ := decode(packet) + log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) + } + } + + switch packet[0] { + case msgChannelOpen: + return m.handleChannelOpen(packet) + case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: + return m.handleGlobalPacket(packet) + } + + // assume a channel packet. + if len(packet) < 5 { + return parseError(packet[0]) + } + id := binary.BigEndian.Uint32(packet[1:]) + ch := m.chanList.getChan(id) + if ch == nil { + return fmt.Errorf("ssh: invalid channel %d", id) + } + + return ch.handlePacket(packet) +} + +func (m *mux) handleGlobalPacket(packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + case *globalRequestMsg: + m.incomingRequests <- &Request{ + Type: msg.Type, + WantReply: msg.WantReply, + Payload: msg.Data, + mux: m, + } + case *globalRequestSuccessMsg, *globalRequestFailureMsg: + m.globalResponses <- msg + default: + panic(fmt.Sprintf("not a global message %#v", msg)) + } + + return nil +} + +// handleChannelOpen schedules a channel to be Accept()ed. +func (m *mux) handleChannelOpen(packet []byte) error { + var msg channelOpenMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + failMsg := channelOpenFailureMsg{ + PeersID: msg.PeersID, + Reason: ConnectionFailed, + Message: "invalid request", + Language: "en_US.UTF-8", + } + return m.sendMessage(failMsg) + } + + c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) + c.remoteId = msg.PeersID + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.PeersWindow) + m.incomingChannels <- c + return nil +} + +func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { + ch, err := m.openChannel(chanType, extra) + if err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { + ch := m.newChannel(chanType, channelOutbound, extra) + + ch.maxIncomingPayload = channelMaxPacket + + open := channelOpenMsg{ + ChanType: chanType, + PeersWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + TypeSpecificData: extra, + PeersID: ch.localId, + } + if err := m.sendMessage(open); err != nil { + return nil, err + } + + switch msg := (<-ch.msg).(type) { + case *channelOpenConfirmMsg: + return ch, nil + case *channelOpenFailureMsg: + return nil, &OpenChannelError{msg.Reason, msg.Message} + default: + return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go new file mode 100644 index 00000000000..7d42a8c88d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -0,0 +1,716 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "strings" +) + +// The Permissions type holds fine-grained permissions that are +// specific to a user or a specific authentication method for a user. +// The Permissions value for a successful authentication attempt is +// available in ServerConn, so it can be used to pass information from +// the user-authentication phase to the application layer. +type Permissions struct { + // CriticalOptions indicate restrictions to the default + // permissions, and are typically used in conjunction with + // user certificates. The standard for SSH certificates + // defines "force-command" (only allow the given command to + // execute) and "source-address" (only allow connections from + // the given address). The SSH package currently only enforces + // the "source-address" critical option. It is up to server + // implementations to enforce other critical options, such as + // "force-command", by checking them after the SSH handshake + // is successful. In general, SSH servers should reject + // connections that specify critical options that are unknown + // or not supported. + CriticalOptions map[string]string + + // Extensions are extra functionality that the server may + // offer on authenticated connections. Lack of support for an + // extension does not preclude authenticating a user. Common + // extensions are "permit-agent-forwarding", + // "permit-X11-forwarding". The Go SSH library currently does + // not act on any extension, and it is up to server + // implementations to honor them. Extensions can be used to + // pass data from the authentication callbacks to the server + // application layer. + Extensions map[string]string +} + +type GSSAPIWithMICConfig struct { + // AllowLogin, must be set, is called when gssapi-with-mic + // authentication is selected (RFC 4462 section 3). The srcName is from the + // results of the GSS-API authentication. The format is username@DOMAIN. + // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. + // This callback is called after the user identity is established with GSSAPI to decide if the user can login with + // which permissions. If the user is allowed to login, it should return a nil error. + AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) + + // Server must be set. It's the implementation + // of the GSSAPIServer interface. See GSSAPIServer interface for details. + Server GSSAPIServer +} + +// ServerConfig holds server specific configuration data. +type ServerConfig struct { + // Config contains configuration shared between client and server. + Config + + hostKeys []Signer + + // NoClientAuth is true if clients are allowed to connect without + // authenticating. + NoClientAuth bool + + // MaxAuthTries specifies the maximum number of authentication attempts + // permitted per connection. If set to a negative number, the number of + // attempts are unlimited. If set to zero, the number of attempts are limited + // to 6. + MaxAuthTries int + + // PasswordCallback, if non-nil, is called when a user + // attempts to authenticate using a password. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback, if non-nil, is called when a client + // offers a public key for authentication. It must return a nil error + // if the given public key can be used to authenticate the + // given user. For example, see CertChecker.Authenticate. A + // call to this function does not guarantee that the key + // offered is in fact used to authenticate. To record any data + // depending on the public key, store it inside a + // Permissions.Extensions entry. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback, if non-nil, is called when + // keyboard-interactive authentication is selected (RFC + // 4256). The client object's Challenge function should be + // used to query the user. The callback may offer multiple + // Challenge rounds. To avoid information leaks, the client + // should be presented a challenge even if the user is + // unknown. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // AuthLogCallback, if non-nil, is called to log all authentication + // attempts. + AuthLogCallback func(conn ConnMetadata, method string, err error) + + // ServerVersion is the version identification string to announce in + // the public handshake. + // If empty, a reasonable default is used. + // Note that RFC 4253 section 4.2 requires that this string start with + // "SSH-2.0-". + ServerVersion string + + // BannerCallback, if present, is called and the return string is sent to + // the client after key exchange completed but before authentication. + BannerCallback func(conn ConnMetadata) string + + // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used + // when gssapi-with-mic authentication is selected (RFC 4462 section 3). + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// AddHostKey adds a private key as a host key. If an existing host +// key exists with the same algorithm, it is overwritten. Each server +// config must have at least one host key. +func (s *ServerConfig) AddHostKey(key Signer) { + for i, k := range s.hostKeys { + if k.PublicKey().Type() == key.PublicKey().Type() { + s.hostKeys[i] = key + return + } + } + + s.hostKeys = append(s.hostKeys, key) +} + +// cachedPubKey contains the results of querying whether a public key is +// acceptable for a user. +type cachedPubKey struct { + user string + pubKeyData []byte + result error + perms *Permissions +} + +const maxCachedPubKeys = 16 + +// pubKeyCache caches tests for public keys. Since SSH clients +// will query whether a public key is acceptable before attempting to +// authenticate with it, we end up with duplicate queries for public +// key validity. The cache only applies to a single ServerConn. +type pubKeyCache struct { + keys []cachedPubKey +} + +// get returns the result for a given user/algo/key tuple. +func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { + for _, k := range c.keys { + if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { + return k, true + } + } + return cachedPubKey{}, false +} + +// add adds the given tuple to the cache. +func (c *pubKeyCache) add(candidate cachedPubKey) { + if len(c.keys) < maxCachedPubKeys { + c.keys = append(c.keys, candidate) + } +} + +// ServerConn is an authenticated SSH connection, as seen from the +// server +type ServerConn struct { + Conn + + // If the succeeding authentication callback returned a + // non-nil Permissions pointer, it is stored here. + Permissions *Permissions +} + +// NewServerConn starts a new SSH server with c as the underlying +// transport. It starts with a handshake and, if the handshake is +// unsuccessful, it closes the connection and returns an error. The +// Request and NewChannel channels must be serviced, or the connection +// will hang. +// +// The returned error may be of type *ServerAuthError for +// authentication errors. +func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.MaxAuthTries == 0 { + fullConf.MaxAuthTries = 6 + } + // Check if the config contains any unsupported key exchanges + for _, kex := range fullConf.KeyExchanges { + if _, ok := serverForbiddenKexAlgos[kex]; ok { + return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) + } + } + + s := &connection{ + sshConn: sshConn{conn: c}, + } + perms, err := s.serverHandshake(&fullConf) + if err != nil { + c.Close() + return nil, nil, nil, err + } + return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil +} + +// signAndMarshal signs the data with the appropriate algorithm, +// and serializes the result in SSH wire format. +func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { + sig, err := k.Sign(rand, data) + if err != nil { + return nil, err + } + + return Marshal(sig), nil +} + +// handshake performs key exchange and user authentication. +func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { + if len(config.hostKeys) == 0 { + return nil, errors.New("ssh: server has no host keys") + } + + if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && + config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || + config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if config.ServerVersion != "" { + s.serverVersion = []byte(config.ServerVersion) + } else { + s.serverVersion = []byte(packageVersion) + } + var err error + s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) + if err != nil { + return nil, err + } + + tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) + s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) + + if err := s.transport.waitSession(); err != nil { + return nil, err + } + + // We just did the key change, so the session ID is established. + s.sessionID = s.transport.getSessionID() + + var packet []byte + if packet, err = s.transport.readPacket(); err != nil { + return nil, err + } + + var serviceRequest serviceRequestMsg + if err = Unmarshal(packet, &serviceRequest); err != nil { + return nil, err + } + if serviceRequest.Service != serviceUserAuth { + return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") + } + serviceAccept := serviceAcceptMsg{ + Service: serviceUserAuth, + } + if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { + return nil, err + } + + perms, err := s.serverAuthenticate(config) + if err != nil { + return nil, err + } + s.mux = newMux(s.transport) + return perms, err +} + +func isAcceptableAlgo(algo string) bool { + switch algo { + case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: + return true + } + return false +} + +func checkSourceAddress(addr net.Addr, sourceAddrs string) error { + if addr == nil { + return errors.New("ssh: no address known for client, but source-address match required") + } + + tcpAddr, ok := addr.(*net.TCPAddr) + if !ok { + return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) + } + + for _, sourceAddr := range strings.Split(sourceAddrs, ",") { + if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { + if allowedIP.Equal(tcpAddr.IP) { + return nil + } + } else { + _, ipNet, err := net.ParseCIDR(sourceAddr) + if err != nil { + return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) + } + + if ipNet.Contains(tcpAddr.IP) { + return nil + } + } + } + + return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) +} + +func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *connection, + sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { + gssAPIServer := gssapiConfig.Server + defer gssAPIServer.DeleteSecContext() + var srcName string + for { + var ( + outToken []byte + needContinue bool + ) + outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(firstToken) + if err != nil { + return err, nil, nil + } + if len(outToken) != 0 { + if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ + Token: outToken, + })); err != nil { + return nil, nil, err + } + } + if !needContinue { + break + } + packet, err := s.transport.readPacket() + if err != nil { + return nil, nil, err + } + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return nil, nil, err + } + } + packet, err := s.transport.readPacket() + if err != nil { + return nil, nil, err + } + userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} + if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { + return nil, nil, err + } + mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) + if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { + return err, nil, nil + } + perms, authErr = gssapiConfig.AllowLogin(s, srcName) + return authErr, perms, nil +} + +// ServerAuthError represents server authentication errors and is +// sometimes returned by NewServerConn. It appends any authentication +// errors that may occur, and is returned if all of the authentication +// methods provided by the user failed to authenticate. +type ServerAuthError struct { + // Errors contains authentication errors returned by the authentication + // callback methods. The first entry is typically ErrNoAuth. + Errors []error +} + +func (l ServerAuthError) Error() string { + var errs []string + for _, err := range l.Errors { + errs = append(errs, err.Error()) + } + return "[" + strings.Join(errs, ", ") + "]" +} + +// ErrNoAuth is the error value returned if no +// authentication method has been passed yet. This happens as a normal +// part of the authentication loop, since the client first tries +// 'none' authentication to discover available methods. +// It is returned in ServerAuthError.Errors from NewServerConn. +var ErrNoAuth = errors.New("ssh: no auth passed yet") + +func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + sessionID := s.transport.getSessionID() + var cache pubKeyCache + var perms *Permissions + + authFailures := 0 + var authErrs []error + var displayedBanner bool + +userAuthLoop: + for { + if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { + discMsg := &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + } + + if err := s.transport.writePacket(Marshal(discMsg)); err != nil { + return nil, err + } + + return nil, discMsg + } + + var userAuthReq userAuthRequestMsg + if packet, err := s.transport.readPacket(); err != nil { + if err == io.EOF { + return nil, &ServerAuthError{Errors: authErrs} + } + return nil, err + } else if err = Unmarshal(packet, &userAuthReq); err != nil { + return nil, err + } + + if userAuthReq.Service != serviceSSH { + return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) + } + + s.user = userAuthReq.User + + if !displayedBanner && config.BannerCallback != nil { + displayedBanner = true + msg := config.BannerCallback(s) + if msg != "" { + bannerMsg := &userAuthBannerMsg{ + Message: msg, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + return nil, err + } + } + } + + perms = nil + authErr := ErrNoAuth + + switch userAuthReq.Method { + case "none": + if config.NoClientAuth { + authErr = nil + } + + // allow initial attempt of 'none' without penalty + if authFailures == 0 { + authFailures-- + } + case "password": + if config.PasswordCallback == nil { + authErr = errors.New("ssh: password auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 || payload[0] != 0 { + return nil, parseError(msgUserAuthRequest) + } + payload = payload[1:] + password, payload, ok := parseString(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + perms, authErr = config.PasswordCallback(s, password) + case "keyboard-interactive": + if config.KeyboardInteractiveCallback == nil { + authErr = errors.New("ssh: keyboard-interactive auth not configured") + break + } + + prompter := &sshClientKeyboardInteractive{s} + perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + case "publickey": + if config.PublicKeyCallback == nil { + authErr = errors.New("ssh: publickey auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 { + return nil, parseError(msgUserAuthRequest) + } + isQuery := payload[0] == 0 + payload = payload[1:] + algoBytes, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + algo := string(algoBytes) + if !isAcceptableAlgo(algo) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) + break + } + + pubKeyData, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + + pubKey, err := ParsePublicKey(pubKeyData) + if err != nil { + return nil, err + } + + candidate, ok := cache.get(s.user, pubKeyData) + if !ok { + candidate.user = s.user + candidate.pubKeyData = pubKeyData + candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) + if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + candidate.result = checkSourceAddress( + s.RemoteAddr(), + candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + } + cache.add(candidate) + } + + if isQuery { + // The client can query if the given public key + // would be okay. + + if len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + if candidate.result == nil { + okMsg := userAuthPubKeyOkMsg{ + Algo: algo, + PubKey: pubKeyData, + } + if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { + return nil, err + } + continue userAuthLoop + } + authErr = candidate.result + } else { + sig, payload, ok := parseSignature(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + // Ensure the public key algo and signature algo + // are supported. Compare the private key + // algorithm name that corresponds to algo with + // sig.Format. This is usually the same, but + // for certs, the names differ. + if !isAcceptableAlgo(sig.Format) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) + break + } + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) + + if err := pubKey.Verify(signedData, sig); err != nil { + return nil, err + } + + authErr = candidate.result + perms = candidate.perms + } + case "gssapi-with-mic": + gssapiConfig := config.GSSAPIWithMICConfig + userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) + if err != nil { + return nil, parseError(msgUserAuthRequest) + } + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. + if userAuthRequestGSSAPI.N == 0 { + authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") + break + } + var i uint32 + present := false + for i = 0; i < userAuthRequestGSSAPI.N; i++ { + if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { + present = true + break + } + } + if !present { + authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") + break + } + // Initial server response, see RFC 4462 section 3.3. + if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ + SupportMech: krb5OID, + })); err != nil { + return nil, err + } + // Exchange token, see RFC 4462 section 3.4. + packet, err := s.transport.readPacket() + if err != nil { + return nil, err + } + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return nil, err + } + authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, + userAuthReq) + if err != nil { + return nil, err + } + default: + authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) + } + + authErrs = append(authErrs, authErr) + + if config.AuthLogCallback != nil { + config.AuthLogCallback(s, userAuthReq.Method, authErr) + } + + if authErr == nil { + break userAuthLoop + } + + authFailures++ + + var failureMsg userAuthFailureMsg + if config.PasswordCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "password") + } + if config.PublicKeyCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "publickey") + } + if config.KeyboardInteractiveCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") + } + if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && + config.GSSAPIWithMICConfig.AllowLogin != nil { + failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") + } + + if len(failureMsg.Methods) == 0 { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { + return nil, err + } + } + + if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { + return nil, err + } + return perms, nil +} + +// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by +// asking the client on the other side of a ServerConn. +type sshClientKeyboardInteractive struct { + *connection +} + +func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + if len(questions) != len(echos) { + return nil, errors.New("ssh: echos and questions must have equal length") + } + + var prompts []byte + for i := range questions { + prompts = appendString(prompts, questions[i]) + prompts = appendBool(prompts, echos[i]) + } + + if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Instruction: instruction, + NumPrompts: uint32(len(questions)), + Prompts: prompts, + })); err != nil { + return nil, err + } + + packet, err := c.transport.readPacket() + if err != nil { + return nil, err + } + if packet[0] != msgUserAuthInfoResponse { + return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) + } + packet = packet[1:] + + n, packet, ok := parseUint32(packet) + if !ok || int(n) != len(questions) { + return nil, parseError(msgUserAuthInfoResponse) + } + + for i := uint32(0); i < n; i++ { + ans, rest, ok := parseString(packet) + if !ok { + return nil, parseError(msgUserAuthInfoResponse) + } + + answers = append(answers, string(ans)) + packet = rest + } + if len(packet) != 0 { + return nil, errors.New("ssh: junk at end of message") + } + + return answers, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go new file mode 100644 index 00000000000..d3321f6b784 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/session.go @@ -0,0 +1,647 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session implements an interactive session described in +// "RFC 4254, section 6". + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "sync" +) + +type Signal string + +// POSIX signals as listed in RFC 4254 Section 6.10. +const ( + SIGABRT Signal = "ABRT" + SIGALRM Signal = "ALRM" + SIGFPE Signal = "FPE" + SIGHUP Signal = "HUP" + SIGILL Signal = "ILL" + SIGINT Signal = "INT" + SIGKILL Signal = "KILL" + SIGPIPE Signal = "PIPE" + SIGQUIT Signal = "QUIT" + SIGSEGV Signal = "SEGV" + SIGTERM Signal = "TERM" + SIGUSR1 Signal = "USR1" + SIGUSR2 Signal = "USR2" +) + +var signals = map[Signal]int{ + SIGABRT: 6, + SIGALRM: 14, + SIGFPE: 8, + SIGHUP: 1, + SIGILL: 4, + SIGINT: 2, + SIGKILL: 9, + SIGPIPE: 13, + SIGQUIT: 3, + SIGSEGV: 11, + SIGTERM: 15, +} + +type TerminalModes map[uint8]uint32 + +// POSIX terminal mode flags as listed in RFC 4254 Section 8. +const ( + tty_OP_END = 0 + VINTR = 1 + VQUIT = 2 + VERASE = 3 + VKILL = 4 + VEOF = 5 + VEOL = 6 + VEOL2 = 7 + VSTART = 8 + VSTOP = 9 + VSUSP = 10 + VDSUSP = 11 + VREPRINT = 12 + VWERASE = 13 + VLNEXT = 14 + VFLUSH = 15 + VSWTCH = 16 + VSTATUS = 17 + VDISCARD = 18 + IGNPAR = 30 + PARMRK = 31 + INPCK = 32 + ISTRIP = 33 + INLCR = 34 + IGNCR = 35 + ICRNL = 36 + IUCLC = 37 + IXON = 38 + IXANY = 39 + IXOFF = 40 + IMAXBEL = 41 + ISIG = 50 + ICANON = 51 + XCASE = 52 + ECHO = 53 + ECHOE = 54 + ECHOK = 55 + ECHONL = 56 + NOFLSH = 57 + TOSTOP = 58 + IEXTEN = 59 + ECHOCTL = 60 + ECHOKE = 61 + PENDIN = 62 + OPOST = 70 + OLCUC = 71 + ONLCR = 72 + OCRNL = 73 + ONOCR = 74 + ONLRET = 75 + CS7 = 90 + CS8 = 91 + PARENB = 92 + PARODD = 93 + TTY_OP_ISPEED = 128 + TTY_OP_OSPEED = 129 +) + +// A Session represents a connection to a remote command or shell. +type Session struct { + // Stdin specifies the remote process's standard input. + // If Stdin is nil, the remote process reads from an empty + // bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr specify the remote process's standard + // output and error. + // + // If either is nil, Run connects the corresponding file + // descriptor to an instance of ioutil.Discard. There is a + // fixed amount of buffering that is shared for the two streams. + // If either blocks it may eventually cause the remote + // command to block. + Stdout io.Writer + Stderr io.Writer + + ch Channel // the channel backing this session + started bool // true once Start, Run or Shell is invoked. + copyFuncs []func() error + errors chan error // one send per copyFunc + + // true if pipe method is active + stdinpipe, stdoutpipe, stderrpipe bool + + // stdinPipeWriter is non-nil if StdinPipe has not been called + // and Stdin was specified by the user; it is the write end of + // a pipe connecting Session.Stdin to the stdin channel. + stdinPipeWriter io.WriteCloser + + exitStatus chan error +} + +// SendRequest sends an out-of-band channel request on the SSH channel +// underlying the session. +func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + return s.ch.SendRequest(name, wantReply, payload) +} + +func (s *Session) Close() error { + return s.ch.Close() +} + +// RFC 4254 Section 6.4. +type setenvRequest struct { + Name string + Value string +} + +// Setenv sets an environment variable that will be applied to any +// command executed by Shell or Run. +func (s *Session) Setenv(name, value string) error { + msg := setenvRequest{ + Name: name, + Value: value, + } + ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: setenv failed") + } + return err +} + +// RFC 4254 Section 6.2. +type ptyRequestMsg struct { + Term string + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 + Modelist string +} + +// RequestPty requests the association of a pty with the session on the remote host. +func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { + var tm []byte + for k, v := range termmodes { + kv := struct { + Key byte + Val uint32 + }{k, v} + + tm = append(tm, Marshal(&kv)...) + } + tm = append(tm, tty_OP_END) + req := ptyRequestMsg{ + Term: term, + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + Modelist: string(tm), + } + ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) + if err == nil && !ok { + err = errors.New("ssh: pty-req failed") + } + return err +} + +// RFC 4254 Section 6.5. +type subsystemRequestMsg struct { + Subsystem string +} + +// RequestSubsystem requests the association of a subsystem with the session on the remote host. +// A subsystem is a predefined command that runs in the background when the ssh session is initiated +func (s *Session) RequestSubsystem(subsystem string) error { + msg := subsystemRequestMsg{ + Subsystem: subsystem, + } + ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: subsystem request failed") + } + return err +} + +// RFC 4254 Section 6.7. +type ptyWindowChangeMsg struct { + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 +} + +// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. +func (s *Session) WindowChange(h, w int) error { + req := ptyWindowChangeMsg{ + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + } + _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) + return err +} + +// RFC 4254 Section 6.9. +type signalMsg struct { + Signal string +} + +// Signal sends the given signal to the remote process. +// sig is one of the SIG* constants. +func (s *Session) Signal(sig Signal) error { + msg := signalMsg{ + Signal: string(sig), + } + + _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) + return err +} + +// RFC 4254 Section 6.5. +type execMsg struct { + Command string +} + +// Start runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start or Shell. +func (s *Session) Start(cmd string) error { + if s.started { + return errors.New("ssh: session already started") + } + req := execMsg{ + Command: cmd, + } + + ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) + if err == nil && !ok { + err = fmt.Errorf("ssh: command %v failed", cmd) + } + if err != nil { + return err + } + return s.start() +} + +// Run runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start, Shell, Output, +// or CombinedOutput. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Run(cmd string) error { + err := s.Start(cmd) + if err != nil { + return err + } + return s.Wait() +} + +// Output runs cmd on the remote host and returns its standard output. +func (s *Session) Output(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + var b bytes.Buffer + s.Stdout = &b + err := s.Run(cmd) + return b.Bytes(), err +} + +type singleWriter struct { + b bytes.Buffer + mu sync.Mutex +} + +func (w *singleWriter) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + return w.b.Write(p) +} + +// CombinedOutput runs cmd on the remote host and returns its combined +// standard output and standard error. +func (s *Session) CombinedOutput(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + var b singleWriter + s.Stdout = &b + s.Stderr = &b + err := s.Run(cmd) + return b.b.Bytes(), err +} + +// Shell starts a login shell on the remote host. A Session only +// accepts one call to Run, Start, Shell, Output, or CombinedOutput. +func (s *Session) Shell() error { + if s.started { + return errors.New("ssh: session already started") + } + + ok, err := s.ch.SendRequest("shell", true, nil) + if err == nil && !ok { + return errors.New("ssh: could not start shell") + } + if err != nil { + return err + } + return s.start() +} + +func (s *Session) start() error { + s.started = true + + type F func(*Session) + for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { + setupFd(s) + } + + s.errors = make(chan error, len(s.copyFuncs)) + for _, fn := range s.copyFuncs { + go func(fn func() error) { + s.errors <- fn() + }(fn) + } + return nil +} + +// Wait waits for the remote command to exit. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Wait() error { + if !s.started { + return errors.New("ssh: session not started") + } + waitErr := <-s.exitStatus + + if s.stdinPipeWriter != nil { + s.stdinPipeWriter.Close() + } + var copyError error + for range s.copyFuncs { + if err := <-s.errors; err != nil && copyError == nil { + copyError = err + } + } + if waitErr != nil { + return waitErr + } + return copyError +} + +func (s *Session) wait(reqs <-chan *Request) error { + wm := Waitmsg{status: -1} + // Wait for msg channel to be closed before returning. + for msg := range reqs { + switch msg.Type { + case "exit-status": + wm.status = int(binary.BigEndian.Uint32(msg.Payload)) + case "exit-signal": + var sigval struct { + Signal string + CoreDumped bool + Error string + Lang string + } + if err := Unmarshal(msg.Payload, &sigval); err != nil { + return err + } + + // Must sanitize strings? + wm.signal = sigval.Signal + wm.msg = sigval.Error + wm.lang = sigval.Lang + default: + // This handles keepalives and matches + // OpenSSH's behaviour. + if msg.WantReply { + msg.Reply(false, nil) + } + } + } + if wm.status == 0 { + return nil + } + if wm.status == -1 { + // exit-status was never sent from server + if wm.signal == "" { + // signal was not sent either. RFC 4254 + // section 6.10 recommends against this + // behavior, but it is allowed, so we let + // clients handle it. + return &ExitMissingError{} + } + wm.status = 128 + if _, ok := signals[Signal(wm.signal)]; ok { + wm.status += signals[Signal(wm.signal)] + } + } + + return &ExitError{wm} +} + +// ExitMissingError is returned if a session is torn down cleanly, but +// the server sends no confirmation of the exit status. +type ExitMissingError struct{} + +func (e *ExitMissingError) Error() string { + return "wait: remote command exited without exit status or exit signal" +} + +func (s *Session) stdin() { + if s.stdinpipe { + return + } + var stdin io.Reader + if s.Stdin == nil { + stdin = new(bytes.Buffer) + } else { + r, w := io.Pipe() + go func() { + _, err := io.Copy(w, s.Stdin) + w.CloseWithError(err) + }() + stdin, s.stdinPipeWriter = r, w + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.ch, stdin) + if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { + err = err1 + } + return err + }) +} + +func (s *Session) stdout() { + if s.stdoutpipe { + return + } + if s.Stdout == nil { + s.Stdout = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stdout, s.ch) + return err + }) +} + +func (s *Session) stderr() { + if s.stderrpipe { + return + } + if s.Stderr == nil { + s.Stderr = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stderr, s.ch.Stderr()) + return err + }) +} + +// sessionStdin reroutes Close to CloseWrite. +type sessionStdin struct { + io.Writer + ch Channel +} + +func (s *sessionStdin) Close() error { + return s.ch.CloseWrite() +} + +// StdinPipe returns a pipe that will be connected to the +// remote command's standard input when the command starts. +func (s *Session) StdinPipe() (io.WriteCloser, error) { + if s.Stdin != nil { + return nil, errors.New("ssh: Stdin already set") + } + if s.started { + return nil, errors.New("ssh: StdinPipe after process started") + } + s.stdinpipe = true + return &sessionStdin{s.ch, s.ch}, nil +} + +// StdoutPipe returns a pipe that will be connected to the +// remote command's standard output when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StdoutPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StdoutPipe() (io.Reader, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.started { + return nil, errors.New("ssh: StdoutPipe after process started") + } + s.stdoutpipe = true + return s.ch, nil +} + +// StderrPipe returns a pipe that will be connected to the +// remote command's standard error when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StderrPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StderrPipe() (io.Reader, error) { + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + if s.started { + return nil, errors.New("ssh: StderrPipe after process started") + } + s.stderrpipe = true + return s.ch.Stderr(), nil +} + +// newSession returns a new interactive session on the remote host. +func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { + s := &Session{ + ch: ch, + } + s.exitStatus = make(chan error, 1) + go func() { + s.exitStatus <- s.wait(reqs) + }() + + return s, nil +} + +// An ExitError reports unsuccessful completion of a remote command. +type ExitError struct { + Waitmsg +} + +func (e *ExitError) Error() string { + return e.Waitmsg.String() +} + +// Waitmsg stores the information about an exited remote command +// as reported by Wait. +type Waitmsg struct { + status int + signal string + msg string + lang string +} + +// ExitStatus returns the exit status of the remote command. +func (w Waitmsg) ExitStatus() int { + return w.status +} + +// Signal returns the exit signal of the remote command if +// it was terminated violently. +func (w Waitmsg) Signal() string { + return w.signal +} + +// Msg returns the exit message given by the remote command +func (w Waitmsg) Msg() string { + return w.msg +} + +// Lang returns the language tag. See RFC 3066 +func (w Waitmsg) Lang() string { + return w.lang +} + +func (w Waitmsg) String() string { + str := fmt.Sprintf("Process exited with status %v", w.status) + if w.signal != "" { + str += fmt.Sprintf(" from signal %v", w.signal) + } + if w.msg != "" { + str += fmt.Sprintf(". Reason was: %v", w.msg) + } + return str +} diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go new file mode 100644 index 00000000000..24bd7c8e830 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/ssh_gss.go @@ -0,0 +1,139 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/asn1" + "errors" +) + +var krb5OID []byte + +func init() { + krb5OID, _ = asn1.Marshal(krb5Mesh) +} + +// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. +type GSSAPIClient interface { + // InitSecContext initiates the establishment of a security context for GSS-API between the + // ssh client and ssh server. Initially the token parameter should be specified as nil. + // The routine may return a outputToken which should be transferred to + // the ssh server, where the ssh server will present it to + // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting + // needContinue to false. To complete the context + // establishment, one or more reply tokens may be required from the ssh + // server;if so, InitSecContext will return a needContinue which is true. + // In this case, InitSecContext should be called again when the + // reply token is received from the ssh server, passing the reply + // token to InitSecContext via the token parameters. + // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. + InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) + // GetMIC generates a cryptographic MIC for the SSH2 message, and places + // the MIC in a token for transfer to the ssh server. + // The contents of the MIC field are obtained by calling GSS_GetMIC() + // over the following, using the GSS-API context that was just + // established: + // string session identifier + // byte SSH_MSG_USERAUTH_REQUEST + // string user name + // string service + // string "gssapi-with-mic" + // See RFC 2743 section 2.3.1 and RFC 4462 3.5. + GetMIC(micFiled []byte) ([]byte, error) + // Whenever possible, it should be possible for + // DeleteSecContext() calls to be successfully processed even + // if other calls cannot succeed, thereby enabling context-related + // resources to be released. + // In addition to deleting established security contexts, + // gss_delete_sec_context must also be able to delete "half-built" + // security contexts resulting from an incomplete sequence of + // InitSecContext()/AcceptSecContext() calls. + // See RFC 2743 section 2.2.3. + DeleteSecContext() error +} + +// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. +type GSSAPIServer interface { + // AcceptSecContext allows a remotely initiated security context between the application + // and a remote peer to be established by the ssh client. The routine may return a + // outputToken which should be transferred to the ssh client, + // where the ssh client will present it to InitSecContext. + // If no token need be sent, AcceptSecContext will indicate this + // by setting the needContinue to false. To + // complete the context establishment, one or more reply tokens may be + // required from the ssh client. if so, AcceptSecContext + // will return a needContinue which is true, in which case it + // should be called again when the reply token is received from the ssh + // client, passing the token to AcceptSecContext via the + // token parameters. + // The srcName return value is the authenticated username. + // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. + AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) + // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, + // fits the supplied message is received from the ssh client. + // See RFC 2743 section 2.3.2. + VerifyMIC(micField []byte, micToken []byte) error + // Whenever possible, it should be possible for + // DeleteSecContext() calls to be successfully processed even + // if other calls cannot succeed, thereby enabling context-related + // resources to be released. + // In addition to deleting established security contexts, + // gss_delete_sec_context must also be able to delete "half-built" + // security contexts resulting from an incomplete sequence of + // InitSecContext()/AcceptSecContext() calls. + // See RFC 2743 section 2.2.3. + DeleteSecContext() error +} + +var ( + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, + // so we also support the krb5 mechanism only. + // See RFC 1964 section 1. + krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} +) + +// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST +// See RFC 4462 section 3.2. +type userAuthRequestGSSAPI struct { + N uint32 + OIDS []asn1.ObjectIdentifier +} + +func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { + n, rest, ok := parseUint32(payload) + if !ok { + return nil, errors.New("parse uint32 failed") + } + s := &userAuthRequestGSSAPI{ + N: n, + OIDS: make([]asn1.ObjectIdentifier, n), + } + for i := 0; i < int(n); i++ { + var ( + desiredMech []byte + err error + ) + desiredMech, rest, ok = parseString(rest) + if !ok { + return nil, errors.New("parse string failed") + } + if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { + return nil, err + } + + } + return s, nil +} + +// See RFC 4462 section 3.6. +func buildMIC(sessionID string, username string, service string, authMethod string) []byte { + out := make([]byte, 0, 0) + out = appendString(out, sessionID) + out = append(out, msgUserAuthRequest) + out = appendString(out, username) + out = appendString(out, service) + out = appendString(out, authMethod) + return out +} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go new file mode 100644 index 00000000000..b171b330bc3 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/streamlocal.go @@ -0,0 +1,116 @@ +package ssh + +import ( + "errors" + "io" + "net" +) + +// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "direct-streamlocal@openssh.com" string. +// +// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 +type streamLocalChannelOpenDirectMsg struct { + socketPath string + reserved0 string + reserved1 uint32 +} + +// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "forwarded-streamlocal@openssh.com" string. +type forwardedStreamLocalPayload struct { + SocketPath string + Reserved0 string +} + +// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message +// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. +type streamLocalChannelForwardMsg struct { + socketPath string +} + +// ListenUnix is similar to ListenTCP but uses a Unix domain socket. +func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + m := streamLocalChannelForwardMsg{ + socketPath, + } + // send message + ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") + } + ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) + + return &unixListener{socketPath, c, ch}, nil +} + +func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { + msg := streamLocalChannelOpenDirectMsg{ + socketPath: socketPath, + } + ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type unixListener struct { + socketPath string + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *unixListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + }, nil +} + +// Close closes the listener. +func (l *unixListener) Close() error { + // this also closes the listener. + l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) + m := streamLocalChannelForwardMsg{ + l.socketPath, + } + ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *unixListener) Addr() net.Addr { + return &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + } +} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go new file mode 100644 index 00000000000..80d35f5ec18 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -0,0 +1,474 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// Listen requests the remote peer open a listening socket on +// addr. Incoming connections will be available by calling Accept on +// the returned net.Listener. The listener must be serviced, or the +// SSH connection may hang. +// N must be "tcp", "tcp4", "tcp6", or "unix". +func (c *Client) Listen(n, addr string) (net.Listener, error) { + switch n { + case "tcp", "tcp4", "tcp6": + laddr, err := net.ResolveTCPAddr(n, addr) + if err != nil { + return nil, err + } + return c.ListenTCP(laddr) + case "unix": + return c.ListenUnix(addr) + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// Automatic port allocation is broken with OpenSSH before 6.0. See +// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In +// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, +// rather than the actual port number. This means you can never open +// two different listeners with auto allocated ports. We work around +// this by trying explicit ports until we succeed. + +const openSSHPrefix = "OpenSSH_" + +var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) + +// isBrokenOpenSSHVersion returns true if the given version string +// specifies a version of OpenSSH that is known to have a bug in port +// forwarding. +func isBrokenOpenSSHVersion(versionStr string) bool { + i := strings.Index(versionStr, openSSHPrefix) + if i < 0 { + return false + } + i += len(openSSHPrefix) + j := i + for ; j < len(versionStr); j++ { + if versionStr[j] < '0' || versionStr[j] > '9' { + break + } + } + version, _ := strconv.Atoi(versionStr[i:j]) + return version < 6 +} + +// autoPortListenWorkaround simulates automatic port allocation by +// trying random ports repeatedly. +func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { + var sshListener net.Listener + var err error + const tries = 10 + for i := 0; i < tries; i++ { + addr := *laddr + addr.Port = 1024 + portRandomizer.Intn(60000) + sshListener, err = c.ListenTCP(&addr) + if err == nil { + laddr.Port = addr.Port + return sshListener, err + } + } + return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) +} + +// RFC 4254 7.1 +type channelForwardMsg struct { + addr string + rport uint32 +} + +// handleForwards starts goroutines handling forwarded connections. +// It's called on first use by (*Client).ListenTCP to not launch +// goroutines until needed. +func (c *Client) handleForwards() { + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) +} + +// ListenTCP requests the remote peer open a listening socket +// on laddr. Incoming connections will be available by calling +// Accept on the returned net.Listener. +func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { + return c.autoPortListenWorkaround(laddr) + } + + m := channelForwardMsg{ + laddr.IP.String(), + uint32(laddr.Port), + } + // send message + ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: tcpip-forward request denied by peer") + } + + // If the original port was 0, then the remote side will + // supply a real port number in the response. + if laddr.Port == 0 { + var p struct { + Port uint32 + } + if err := Unmarshal(resp, &p); err != nil { + return nil, err + } + laddr.Port = int(p.Port) + } + + // Register this forward, using the port number we obtained. + ch := c.forwards.add(laddr) + + return &tcpListener{laddr, c, ch}, nil +} + +// forwardList stores a mapping between remote +// forward requests and the tcpListeners. +type forwardList struct { + sync.Mutex + entries []forwardEntry +} + +// forwardEntry represents an established mapping of a laddr on a +// remote ssh server to a channel connected to a tcpListener. +type forwardEntry struct { + laddr net.Addr + c chan forward +} + +// forward represents an incoming forwarded tcpip connection. The +// arguments to add/remove/lookup should be address as specified in +// the original forward-request. +type forward struct { + newCh NewChannel // the ssh client channel underlying this forward + raddr net.Addr // the raddr of the incoming connection +} + +func (l *forwardList) add(addr net.Addr) chan forward { + l.Lock() + defer l.Unlock() + f := forwardEntry{ + laddr: addr, + c: make(chan forward, 1), + } + l.entries = append(l.entries, f) + return f.c +} + +// See RFC 4254, section 7.2 +type forwardedTCPPayload struct { + Addr string + Port uint32 + OriginAddr string + OriginPort uint32 +} + +// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. +func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { + if port == 0 || port > 65535 { + return nil, fmt.Errorf("ssh: port number out of range: %d", port) + } + ip := net.ParseIP(string(addr)) + if ip == nil { + return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) + } + return &net.TCPAddr{IP: ip, Port: int(port)}, nil +} + +func (l *forwardList) handleChannels(in <-chan NewChannel) { + for ch := range in { + var ( + laddr net.Addr + raddr net.Addr + err error + ) + switch channelType := ch.ChannelType(); channelType { + case "forwarded-tcpip": + var payload forwardedTCPPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) + continue + } + + // RFC 4254 section 7.2 specifies that incoming + // addresses should list the address, in string + // format. It is implied that this should be an IP + // address, as it would be impossible to connect to it + // otherwise. + laddr, err = parseTCPAddr(payload.Addr, payload.Port) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + + case "forwarded-streamlocal@openssh.com": + var payload forwardedStreamLocalPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) + continue + } + laddr = &net.UnixAddr{ + Name: payload.SocketPath, + Net: "unix", + } + raddr = &net.UnixAddr{ + Name: "@", + Net: "unix", + } + default: + panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) + } + if ok := l.forward(laddr, raddr, ch); !ok { + // Section 7.2, implementations MUST reject spurious incoming + // connections. + ch.Reject(Prohibited, "no forward for address") + continue + } + + } +} + +// remove removes the forward entry, and the channel feeding its +// listener. +func (l *forwardList) remove(addr net.Addr) { + l.Lock() + defer l.Unlock() + for i, f := range l.entries { + if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { + l.entries = append(l.entries[:i], l.entries[i+1:]...) + close(f.c) + return + } + } +} + +// closeAll closes and clears all forwards. +func (l *forwardList) closeAll() { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + close(f.c) + } + l.entries = nil +} + +func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { + f.c <- forward{newCh: ch, raddr: raddr} + return true + } + } + return false +} + +type tcpListener struct { + laddr *net.TCPAddr + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *tcpListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: l.laddr, + raddr: s.raddr, + }, nil +} + +// Close closes the listener. +func (l *tcpListener) Close() error { + m := channelForwardMsg{ + l.laddr.IP.String(), + uint32(l.laddr.Port), + } + + // this also closes the listener. + l.conn.forwards.remove(l.laddr) + ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-tcpip-forward failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *tcpListener) Addr() net.Addr { + return l.laddr +} + +// Dial initiates a connection to the addr from the remote host. +// The resulting connection has a zero LocalAddr() and RemoteAddr(). +func (c *Client) Dial(n, addr string) (net.Conn, error) { + var ch Channel + switch n { + case "tcp", "tcp4", "tcp6": + // Parse the address into host and numeric port. + host, portString, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return nil, err + } + ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) + if err != nil { + return nil, err + } + // Use a zero address for local and remote address. + zeroAddr := &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + return &chanConn{ + Channel: ch, + laddr: zeroAddr, + raddr: zeroAddr, + }, nil + case "unix": + var err error + ch, err = c.dialStreamLocal(addr) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: addr, + Net: "unix", + }, + }, nil + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// DialTCP connects to the remote address raddr on the network net, +// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used +// as the local address for the connection. +func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { + if laddr == nil { + laddr = &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + } + ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: laddr, + raddr: raddr, + }, nil +} + +// RFC 4254 7.2 +type channelOpenDirectMsg struct { + raddr string + rport uint32 + laddr string + lport uint32 +} + +func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { + msg := channelOpenDirectMsg{ + raddr: raddr, + rport: uint32(rport), + laddr: laddr, + lport: uint32(lport), + } + ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type tcpChan struct { + Channel // the backing channel +} + +// chanConn fulfills the net.Conn interface without +// the tcpChan having to hold laddr or raddr directly. +type chanConn struct { + Channel + laddr, raddr net.Addr +} + +// LocalAddr returns the local network address. +func (t *chanConn) LocalAddr() net.Addr { + return t.laddr +} + +// RemoteAddr returns the remote network address. +func (t *chanConn) RemoteAddr() net.Addr { + return t.raddr +} + +// SetDeadline sets the read and write deadlines associated +// with the connection. +func (t *chanConn) SetDeadline(deadline time.Time) error { + if err := t.SetReadDeadline(deadline); err != nil { + return err + } + return t.SetWriteDeadline(deadline) +} + +// SetReadDeadline sets the read deadline. +// A zero value for t means Read will not time out. +// After the deadline, the error from Read will implement net.Error +// with Timeout() == true. +func (t *chanConn) SetReadDeadline(deadline time.Time) error { + // for compatibility with previous version, + // the error message contains "tcpChan" + return errors.New("ssh: tcpChan: deadline not supported") +} + +// SetWriteDeadline exists to satisfy the net.Conn interface +// but is not implemented by this type. It always returns an error. +func (t *chanConn) SetWriteDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go new file mode 100644 index 00000000000..49ddc2e7de4 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -0,0 +1,353 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bufio" + "bytes" + "errors" + "io" + "log" +) + +// debugTransport if set, will print packet types as they go over the +// wire. No message decoding is done, to minimize the impact on timing. +const debugTransport = false + +const ( + gcmCipherID = "aes128-gcm@openssh.com" + aes128cbcID = "aes128-cbc" + tripledescbcID = "3des-cbc" +) + +// packetConn represents a transport that implements packet based +// operations. +type packetConn interface { + // Encrypt and send a packet of data to the remote peer. + writePacket(packet []byte) error + + // Read a packet from the connection. The read is blocking, + // i.e. if error is nil, then the returned byte slice is + // always non-empty. + readPacket() ([]byte, error) + + // Close closes the write-side of the connection. + Close() error +} + +// transport is the keyingTransport that implements the SSH packet +// protocol. +type transport struct { + reader connectionState + writer connectionState + + bufReader *bufio.Reader + bufWriter *bufio.Writer + rand io.Reader + isClient bool + io.Closer +} + +// packetCipher represents a combination of SSH encryption/MAC +// protocol. A single instance should be used for one direction only. +type packetCipher interface { + // writeCipherPacket encrypts the packet and writes it to w. The + // contents of the packet are generally scrambled. + writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error + + // readCipherPacket reads and decrypts a packet of data. The + // returned packet may be overwritten by future calls of + // readPacket. + readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) +} + +// connectionState represents one side (read or write) of the +// connection. This is necessary because each direction has its own +// keys, and can even have its own algorithms +type connectionState struct { + packetCipher + seqNum uint32 + dir direction + pendingKeyChange chan packetCipher +} + +// prepareKeyChange sets up key material for a keychange. The key changes in +// both directions are triggered by reading and writing a msgNewKey packet +// respectively. +func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { + ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) + if err != nil { + return err + } + t.reader.pendingKeyChange <- ciph + + ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) + if err != nil { + return err + } + t.writer.pendingKeyChange <- ciph + + return nil +} + +func (t *transport) printPacket(p []byte, write bool) { + if len(p) == 0 { + return + } + who := "server" + if t.isClient { + who = "client" + } + what := "read" + if write { + what = "write" + } + + log.Println(what, who, p[0]) +} + +// Read and decrypt next packet. +func (t *transport) readPacket() (p []byte, err error) { + for { + p, err = t.reader.readPacket(t.bufReader) + if err != nil { + break + } + if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { + break + } + } + if debugTransport { + t.printPacket(p, false) + } + + return p, err +} + +func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { + packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) + s.seqNum++ + if err == nil && len(packet) == 0 { + err = errors.New("ssh: zero length packet") + } + + if len(packet) > 0 { + switch packet[0] { + case msgNewKeys: + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + return nil, errors.New("ssh: got bogus newkeys message") + } + + case msgDisconnect: + // Transform a disconnect message into an + // error. Since this is lowest level at which + // we interpret message types, doing it here + // ensures that we don't have to handle it + // elsewhere. + var msg disconnectMsg + if err := Unmarshal(packet, &msg); err != nil { + return nil, err + } + return nil, &msg + } + } + + // The packet may point to an internal buffer, so copy the + // packet out here. + fresh := make([]byte, len(packet)) + copy(fresh, packet) + + return fresh, err +} + +func (t *transport) writePacket(packet []byte) error { + if debugTransport { + t.printPacket(packet, true) + } + return t.writer.writePacket(t.bufWriter, t.rand, packet) +} + +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { + changeKeys := len(packet) > 0 && packet[0] == msgNewKeys + + err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) + if err != nil { + return err + } + if err = w.Flush(); err != nil { + return err + } + s.seqNum++ + if changeKeys { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + panic("ssh: no key material for msgNewKeys") + } + } + return err +} + +func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { + t := &transport{ + bufReader: bufio.NewReader(rwc), + bufWriter: bufio.NewWriter(rwc), + rand: rand, + reader: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + writer: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + Closer: rwc, + } + t.isClient = isClient + + if isClient { + t.reader.dir = serverKeys + t.writer.dir = clientKeys + } else { + t.reader.dir = clientKeys + t.writer.dir = serverKeys + } + + return t +} + +type direction struct { + ivTag []byte + keyTag []byte + macKeyTag []byte +} + +var ( + serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} + clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} +) + +// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as +// described in RFC 4253, section 6.4. direction should either be serverKeys +// (to setup server->client keys) or clientKeys (for client->server keys). +func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { + cipherMode := cipherModes[algs.Cipher] + macMode := macModes[algs.MAC] + + iv := make([]byte, cipherMode.ivSize) + key := make([]byte, cipherMode.keySize) + macKey := make([]byte, macMode.keySize) + + generateKeyMaterial(iv, d.ivTag, kex) + generateKeyMaterial(key, d.keyTag, kex) + generateKeyMaterial(macKey, d.macKeyTag, kex) + + return cipherModes[algs.Cipher].create(key, iv, macKey, algs) +} + +// generateKeyMaterial fills out with key material generated from tag, K, H +// and sessionId, as specified in RFC 4253, section 7.2. +func generateKeyMaterial(out, tag []byte, r *kexResult) { + var digestsSoFar []byte + + h := r.Hash.New() + for len(out) > 0 { + h.Reset() + h.Write(r.K) + h.Write(r.H) + + if len(digestsSoFar) == 0 { + h.Write(tag) + h.Write(r.SessionID) + } else { + h.Write(digestsSoFar) + } + + digest := h.Sum(nil) + n := copy(out, digest) + out = out[n:] + if len(out) > 0 { + digestsSoFar = append(digestsSoFar, digest...) + } + } +} + +const packageVersion = "SSH-2.0-Go" + +// Sends and receives a version line. The versionLine string should +// be US ASCII, start with "SSH-2.0-", and should not include a +// newline. exchangeVersions returns the other side's version line. +func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { + // Contrary to the RFC, we do not ignore lines that don't + // start with "SSH-2.0-" to make the library usable with + // nonconforming servers. + for _, c := range versionLine { + // The spec disallows non US-ASCII chars, and + // specifically forbids null chars. + if c < 32 { + return nil, errors.New("ssh: junk character in version line") + } + } + if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { + return + } + + them, err = readVersion(rw) + return them, err +} + +// maxVersionStringBytes is the maximum number of bytes that we'll +// accept as a version string. RFC 4253 section 4.2 limits this at 255 +// chars +const maxVersionStringBytes = 255 + +// Read version string as specified by RFC 4253, section 4.2. +func readVersion(r io.Reader) ([]byte, error) { + versionString := make([]byte, 0, 64) + var ok bool + var buf [1]byte + + for length := 0; length < maxVersionStringBytes; length++ { + _, err := io.ReadFull(r, buf[:]) + if err != nil { + return nil, err + } + // The RFC says that the version should be terminated with \r\n + // but several SSH servers actually only send a \n. + if buf[0] == '\n' { + if !bytes.HasPrefix(versionString, []byte("SSH-")) { + // RFC 4253 says we need to ignore all version string lines + // except the one containing the SSH version (provided that + // all the lines do not exceed 255 bytes in total). + versionString = versionString[:0] + continue + } + ok = true + break + } + + // non ASCII chars are disallowed, but we are lenient, + // since Go doesn't use null-terminated strings. + + // The RFC allows a comment after a space, however, + // all of it (version and comments) goes into the + // session hash. + versionString = append(versionString, buf[0]) + } + + if !ok { + return nil, errors.New("ssh: overflow reading version string") + } + + // There might be a '\r' on the end which we should remove. + if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { + versionString = versionString[:len(versionString)-1] + } + return versionString, nil +} diff --git a/vendor/gopkg.in/jcmturner/goidentity.v3/.gitignore b/vendor/gopkg.in/jcmturner/goidentity.v3/.gitignore new file mode 100644 index 00000000000..a1338d68517 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/goidentity.v3/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/gopkg.in/jcmturner/goidentity.v3/LICENSE b/vendor/gopkg.in/jcmturner/goidentity.v3/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/goidentity.v3/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/jcmturner/goidentity.v3/README.md b/vendor/gopkg.in/jcmturner/goidentity.v3/README.md new file mode 100644 index 00000000000..89b33ebb70b --- /dev/null +++ b/vendor/gopkg.in/jcmturner/goidentity.v3/README.md @@ -0,0 +1,13 @@ +# goidentity + +Standard interface to holding authenticated identities and their attributes. + +To get the package, execute: +``` +go get gopkg.in/jcmturner/goidentity.v3 +``` +To import this package, add the following line to your code: +```go +import "gopkg.in/jcmturner/goidentity.v3" + +``` \ No newline at end of file diff --git a/vendor/gopkg.in/jcmturner/goidentity.v3/authenticator.go b/vendor/gopkg.in/jcmturner/goidentity.v3/authenticator.go new file mode 100644 index 00000000000..42ec79b0617 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/goidentity.v3/authenticator.go @@ -0,0 +1,6 @@ +package goidentity + +type Authenticator interface { + Authenticate() (Identity, bool, error) + Mechanism() string // gives the name of the type of authentication mechanism +} diff --git a/vendor/gopkg.in/jcmturner/goidentity.v3/identity.go b/vendor/gopkg.in/jcmturner/goidentity.v3/identity.go new file mode 100644 index 00000000000..d36c23fe050 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/goidentity.v3/identity.go @@ -0,0 +1,32 @@ +package goidentity + +import "time" + +const ( + CTXKey = "jcmturner/goidentity" +) + +type Identity interface { + UserName() string + SetUserName(s string) + Domain() string + SetDomain(s string) + DisplayName() string + SetDisplayName(s string) + Human() bool + SetHuman(b bool) + AuthTime() time.Time + SetAuthTime(t time.Time) + AuthzAttributes() []string + AddAuthzAttribute(a string) + RemoveAuthzAttribute(a string) + Authenticated() bool + SetAuthenticated(b bool) + Authorized(a string) bool + SessionID() string + Expired() bool + Attributes() map[string]interface{} + SetAttribute(k string, v interface{}) + SetAttributes(map[string]interface{}) + RemoveAttribute(k string) +} diff --git a/vendor/gopkg.in/jcmturner/goidentity.v3/user.go b/vendor/gopkg.in/jcmturner/goidentity.v3/user.go new file mode 100644 index 00000000000..d79f140c9f4 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/goidentity.v3/user.go @@ -0,0 +1,154 @@ +package goidentity + +import ( + "github.com/hashicorp/go-uuid" + "time" +) + +type User struct { + authenticated bool + domain string + userName string + displayName string + email string + human bool + groupMembership map[string]bool + authTime time.Time + sessionID string + expiry time.Time + attributes map[string]interface{} +} + +func NewUser(username string) User { + uuid, err := uuid.GenerateUUID() + if err != nil { + uuid = "00unique-sess-ions-uuid-unavailable0" + } + return User{ + userName: username, + groupMembership: make(map[string]bool), + sessionID: uuid, + } +} + +func (u *User) UserName() string { + return u.userName +} + +func (u *User) SetUserName(s string) { + u.userName = s +} + +func (u *User) Domain() string { + return u.domain +} + +func (u *User) SetDomain(s string) { + u.domain = s +} + +func (u *User) DisplayName() string { + if u.displayName == "" { + return u.userName + } + return u.displayName +} + +func (u *User) SetDisplayName(s string) { + u.displayName = s +} + +func (u *User) Human() bool { + return u.human +} + +func (u *User) SetHuman(b bool) { + u.human = b +} + +func (u *User) AuthTime() time.Time { + return u.authTime +} + +func (u *User) SetAuthTime(t time.Time) { + u.authTime = t +} + +func (u *User) AuthzAttributes() []string { + s := make([]string, len(u.groupMembership)) + i := 0 + for a := range u.groupMembership { + s[i] = a + i++ + } + return s +} + +func (u *User) Authenticated() bool { + return u.authenticated +} + +func (u *User) SetAuthenticated(b bool) { + u.authenticated = b +} + +func (u *User) AddAuthzAttribute(a string) { + u.groupMembership[a] = true +} + +func (u *User) RemoveAuthzAttribute(a string) { + if _, ok := u.groupMembership[a]; !ok { + return + } + delete(u.groupMembership, a) +} + +func (u *User) EnableAuthzAttribute(a string) { + if enabled, ok := u.groupMembership[a]; ok && !enabled { + u.groupMembership[a] = true + } +} + +func (u *User) DisableAuthzAttribute(a string) { + if enabled, ok := u.groupMembership[a]; ok && enabled { + u.groupMembership[a] = false + } +} + +func (u *User) Authorized(a string) bool { + if enabled, ok := u.groupMembership[a]; ok && enabled { + return true + } + return false +} + +func (u *User) SessionID() string { + return u.sessionID +} + +func (u *User) SetExpiry(t time.Time) { + u.expiry = t +} + +func (u *User) Expired() bool { + if !u.expiry.IsZero() && time.Now().UTC().After(u.expiry) { + return true + } + return false +} + +func (u *User) Attributes() map[string]interface{} { + return u.attributes +} + +func (u *User) SetAttribute(k string, v interface{}) { + u.attributes[k] = v +} + +func (u *User) SetAttributes(a map[string]interface{}) { + u.attributes = a +} + +func (u *User) RemoveAttribute(k string) { + delete(u.attributes, k) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/service/APExchange.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/service/APExchange.go new file mode 100644 index 00000000000..4126cfa1582 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/service/APExchange.go @@ -0,0 +1,62 @@ +package service + +import ( + "time" + + "gopkg.in/jcmturner/gokrb5.v7/credentials" + "gopkg.in/jcmturner/gokrb5.v7/iana/errorcode" + "gopkg.in/jcmturner/gokrb5.v7/messages" +) + +// VerifyAPREQ verifies an AP_REQ sent to the service. Returns a boolean for if the AP_REQ is valid and the client's principal name and realm. +func VerifyAPREQ(APReq messages.APReq, s *Settings) (bool, *credentials.Credentials, error) { + var creds *credentials.Credentials + + ok, err := APReq.Verify(s.Keytab, s.MaxClockSkew(), s.ClientAddress()) + if err != nil || !ok { + return false, creds, err + } + + if s.RequireHostAddr() && len(APReq.Ticket.DecryptedEncPart.CAddr) < 1 { + return false, creds, + messages.NewKRBError(APReq.Ticket.SName, APReq.Ticket.Realm, errorcode.KRB_AP_ERR_BADADDR, "ticket does not contain HostAddress values required") + } + + // Check for replay + rc := GetReplayCache(s.MaxClockSkew()) + if rc.IsReplay(APReq.Ticket.SName, APReq.Authenticator) { + return false, creds, + messages.NewKRBError(APReq.Ticket.SName, APReq.Ticket.Realm, errorcode.KRB_AP_ERR_REPEAT, "replay detected") + } + + c := credentials.NewFromPrincipalName(APReq.Authenticator.CName, APReq.Authenticator.CRealm) + creds = c + creds.SetAuthTime(time.Now().UTC()) + creds.SetAuthenticated(true) + creds.SetValidUntil(APReq.Ticket.DecryptedEncPart.EndTime) + + //PAC decoding + if !s.disablePACDecoding { + isPAC, pac, err := APReq.Ticket.GetPACType(s.Keytab, s.KeytabPrincipal(), s.Logger()) + if isPAC && err != nil { + return false, creds, err + } + if isPAC { + // There is a valid PAC. Adding attributes to creds + creds.SetADCredentials(credentials.ADCredentials{ + GroupMembershipSIDs: pac.KerbValidationInfo.GetGroupMembershipSIDs(), + LogOnTime: pac.KerbValidationInfo.LogOnTime.Time(), + LogOffTime: pac.KerbValidationInfo.LogOffTime.Time(), + PasswordLastSet: pac.KerbValidationInfo.PasswordLastSet.Time(), + EffectiveName: pac.KerbValidationInfo.EffectiveName.Value, + FullName: pac.KerbValidationInfo.FullName.Value, + UserID: int(pac.KerbValidationInfo.UserID), + PrimaryGroupID: int(pac.KerbValidationInfo.PrimaryGroupID), + LogonServer: pac.KerbValidationInfo.LogonServer.Value, + LogonDomainName: pac.KerbValidationInfo.LogonDomainName.Value, + LogonDomainID: pac.KerbValidationInfo.LogonDomainID.String(), + }) + } + } + return true, creds, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/service/authenticator.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/service/authenticator.go new file mode 100644 index 00000000000..d60d259ad36 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/service/authenticator.go @@ -0,0 +1,118 @@ +package service + +import ( + "encoding/base64" + "fmt" + "strings" + "time" + + goidentity "gopkg.in/jcmturner/goidentity.v3" + "gopkg.in/jcmturner/gokrb5.v7/client" + "gopkg.in/jcmturner/gokrb5.v7/config" + "gopkg.in/jcmturner/gokrb5.v7/credentials" +) + +// NewKRB5BasicAuthenticator creates a new NewKRB5BasicAuthenticator +func NewKRB5BasicAuthenticator(headerVal string, krb5conf *config.Config, serviceSettings *Settings, clientSettings *client.Settings) KRB5BasicAuthenticator { + return KRB5BasicAuthenticator{ + BasicHeaderValue: headerVal, + clientConfig: krb5conf, + serviceSettings: serviceSettings, + clientSettings: clientSettings, + } +} + +// KRB5BasicAuthenticator implements gopkg.in/jcmturner/goidentity.v3.Authenticator interface. +// It takes username and password so can be used for basic authentication. +type KRB5BasicAuthenticator struct { + BasicHeaderValue string + serviceSettings *Settings + clientSettings *client.Settings + clientConfig *config.Config + realm string + username string + password string +} + +// Authenticate and return the identity. The boolean indicates if the authentication was successful. +func (a KRB5BasicAuthenticator) Authenticate() (i goidentity.Identity, ok bool, err error) { + a.realm, a.username, a.password, err = parseBasicHeaderValue(a.BasicHeaderValue) + if err != nil { + err = fmt.Errorf("could not parse basic authentication header: %v", err) + return + } + cl := client.NewClientWithPassword(a.username, a.realm, a.password, a.clientConfig) + err = cl.Login() + if err != nil { + // Username and/or password could be wrong + err = fmt.Errorf("error with user credentials during login: %v", err) + return + } + tkt, _, err := cl.GetServiceTicket(a.serviceSettings.SName()) + if err != nil { + err = fmt.Errorf("could not get service ticket: %v", err) + return + } + err = tkt.DecryptEncPart(a.serviceSettings.Keytab, a.serviceSettings.KeytabPrincipal()) + if err != nil { + err = fmt.Errorf("could not decrypt service ticket: %v", err) + return + } + cl.Credentials.SetAuthTime(time.Now().UTC()) + cl.Credentials.SetAuthenticated(true) + isPAC, pac, err := tkt.GetPACType(a.serviceSettings.Keytab, a.serviceSettings.KeytabPrincipal(), a.serviceSettings.Logger()) + if isPAC && err != nil { + err = fmt.Errorf("error processing PAC: %v", err) + return + } + if isPAC { + // There is a valid PAC. Adding attributes to creds + cl.Credentials.SetADCredentials(credentials.ADCredentials{ + GroupMembershipSIDs: pac.KerbValidationInfo.GetGroupMembershipSIDs(), + LogOnTime: pac.KerbValidationInfo.LogOnTime.Time(), + LogOffTime: pac.KerbValidationInfo.LogOffTime.Time(), + PasswordLastSet: pac.KerbValidationInfo.PasswordLastSet.Time(), + EffectiveName: pac.KerbValidationInfo.EffectiveName.Value, + FullName: pac.KerbValidationInfo.FullName.Value, + UserID: int(pac.KerbValidationInfo.UserID), + PrimaryGroupID: int(pac.KerbValidationInfo.PrimaryGroupID), + LogonServer: pac.KerbValidationInfo.LogonServer.Value, + LogonDomainName: pac.KerbValidationInfo.LogonDomainName.Value, + LogonDomainID: pac.KerbValidationInfo.LogonDomainID.String(), + }) + } + ok = true + i = cl.Credentials + return +} + +// Mechanism returns the authentication mechanism. +func (a KRB5BasicAuthenticator) Mechanism() string { + return "Kerberos Basic" +} + +func parseBasicHeaderValue(s string) (domain, username, password string, err error) { + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return + } + v := string(b) + vc := strings.SplitN(v, ":", 2) + password = vc[1] + // Domain and username can be specified in 2 formats: + // - no domain specified + // \ + // @ + if strings.Contains(vc[0], `\`) { + u := strings.SplitN(vc[0], `\`, 2) + domain = u[0] + username = u[1] + } else if strings.Contains(vc[0], `@`) { + u := strings.SplitN(vc[0], `@`, 2) + domain = u[1] + username = u[0] + } else { + username = vc[0] + } + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/service/cache.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/service/cache.go new file mode 100644 index 00000000000..c844749362d --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/service/cache.go @@ -0,0 +1,148 @@ +// Package service provides server side integrations for Kerberos authentication. +package service + +import ( + "gopkg.in/jcmturner/gokrb5.v7/types" + "sync" + "time" +) + +/*The server MUST utilize a replay cache to remember any authenticator +presented within the allowable clock skew. +The replay cache will store at least the server name, along with the +client name, time, and microsecond fields from the recently-seen +authenticators, and if a matching tuple is found, the +KRB_AP_ERR_REPEAT error is returned. Note that the rejection here is +restricted to authenticators from the same principal to the same +server. Other client principals communicating with the same server +principal should not have their authenticators rejected if the time +and microsecond fields happen to match some other client's +authenticator. + +If a server loses track of authenticators presented within the +allowable clock skew, it MUST reject all requests until the clock +skew interval has passed, providing assurance that any lost or +replayed authenticators will fall outside the allowable clock skew +and can no longer be successfully replayed. If this were not done, +an attacker could subvert the authentication by recording the ticket +and authenticator sent over the network to a server and replaying +them following an event that caused the server to lose track of +recently seen authenticators.*/ + +// Cache for tickets received from clients keyed by fully qualified client name. Used to track replay of tickets. +type Cache struct { + entries map[string]clientEntries + mux sync.RWMutex +} + +// clientEntries holds entries of client details sent to the service. +type clientEntries struct { + replayMap map[time.Time]replayCacheEntry + seqNumber int64 + subKey types.EncryptionKey +} + +// Cache entry tracking client time values of tickets sent to the service. +type replayCacheEntry struct { + presentedTime time.Time + sName types.PrincipalName + cTime time.Time // This combines the ticket's CTime and Cusec +} + +func (c *Cache) getClientEntries(cname types.PrincipalName) (clientEntries, bool) { + c.mux.RLock() + defer c.mux.RUnlock() + ce, ok := c.entries[cname.PrincipalNameString()] + return ce, ok +} + +func (c *Cache) getClientEntry(cname types.PrincipalName, t time.Time) (replayCacheEntry, bool) { + if ce, ok := c.getClientEntries(cname); ok { + c.mux.RLock() + defer c.mux.RUnlock() + if e, ok := ce.replayMap[t]; ok { + return e, true + } + } + return replayCacheEntry{}, false +} + +// Instance of the ServiceCache. This needs to be a singleton. +var replayCache Cache +var once sync.Once + +// GetReplayCache returns a pointer to the Cache singleton. +func GetReplayCache(d time.Duration) *Cache { + // Create a singleton of the ReplayCache and start a background thread to regularly clean out old entries + once.Do(func() { + replayCache = Cache{ + entries: make(map[string]clientEntries), + } + go func() { + for { + // TODO consider using a context here. + time.Sleep(d) + replayCache.ClearOldEntries(d) + } + }() + }) + return &replayCache +} + +// AddEntry adds an entry to the Cache. +func (c *Cache) AddEntry(sname types.PrincipalName, a types.Authenticator) { + ct := a.CTime.Add(time.Duration(a.Cusec) * time.Microsecond) + if ce, ok := c.getClientEntries(a.CName); ok { + c.mux.Lock() + defer c.mux.Unlock() + ce.replayMap[ct] = replayCacheEntry{ + presentedTime: time.Now().UTC(), + sName: sname, + cTime: ct, + } + ce.seqNumber = a.SeqNumber + ce.subKey = a.SubKey + } else { + c.mux.Lock() + defer c.mux.Unlock() + c.entries[a.CName.PrincipalNameString()] = clientEntries{ + replayMap: map[time.Time]replayCacheEntry{ + ct: { + presentedTime: time.Now().UTC(), + sName: sname, + cTime: ct, + }, + }, + seqNumber: a.SeqNumber, + subKey: a.SubKey, + } + } +} + +// ClearOldEntries clears entries from the Cache that are older than the duration provided. +func (c *Cache) ClearOldEntries(d time.Duration) { + c.mux.Lock() + defer c.mux.Unlock() + for ke, ce := range c.entries { + for k, e := range ce.replayMap { + if time.Now().UTC().Sub(e.presentedTime) > d { + delete(ce.replayMap, k) + } + } + if len(ce.replayMap) == 0 { + delete(c.entries, ke) + } + } +} + +// IsReplay tests if the Authenticator provided is a replay within the duration defined. If this is not a replay add the entry to the cache for tracking. +func (c *Cache) IsReplay(sname types.PrincipalName, a types.Authenticator) bool { + ct := a.CTime.Add(time.Duration(a.Cusec) * time.Microsecond) + if e, ok := c.getClientEntry(a.CName, ct); ok { + if e.sName.Equal(sname) { + return true + } + } + c.AddEntry(sname, a) + return false +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/service/settings.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/service/settings.go new file mode 100644 index 00000000000..6e373ced6ba --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/service/settings.go @@ -0,0 +1,136 @@ +package service + +import ( + "log" + "time" + + "gopkg.in/jcmturner/gokrb5.v7/keytab" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// Settings defines service side configuration settings. +type Settings struct { + Keytab *keytab.Keytab + ktprinc *types.PrincipalName + sname string + requireHostAddr bool + disablePACDecoding bool + cAddr types.HostAddress + maxClockSkew time.Duration + logger *log.Logger +} + +// NewSettings creates a new service Settings. +func NewSettings(kt *keytab.Keytab, settings ...func(*Settings)) *Settings { + s := new(Settings) + s.Keytab = kt + for _, set := range settings { + set(s) + } + return s +} + +// RequireHostAddr used to configure service side to required host addresses to be specified in Kerberos tickets. +// +// s := NewSettings(kt, RequireHostAddr(true)) +func RequireHostAddr(b bool) func(*Settings) { + return func(s *Settings) { + s.requireHostAddr = b + } +} + +// RequireHostAddr indicates if the service should require the host address to be included in the ticket. +func (s *Settings) RequireHostAddr() bool { + return s.requireHostAddr +} + +// DecodePAC used to configure service side to enable/disable PAC decoding if the PAC is present. +// Defaults to enabled if not specified. +// +// s := NewSettings(kt, DecodePAC(false)) +func DecodePAC(b bool) func(*Settings) { + return func(s *Settings) { + s.disablePACDecoding = !b + } +} + +// DecodePAC indicates whether the service should decode any PAC information present in the ticket. +func (s *Settings) DecodePAC() bool { + return !s.disablePACDecoding +} + +// ClientAddress used to configure service side with the clients host address to be used during validation. +// +// s := NewSettings(kt, ClientAddress(h)) +func ClientAddress(h types.HostAddress) func(*Settings) { + return func(s *Settings) { + s.cAddr = h + } +} + +// ClientAddress returns the client host address which has been provided to the service. +func (s *Settings) ClientAddress() types.HostAddress { + return s.cAddr +} + +// Logger used to configure service side with a logger. +// +// s := NewSettings(kt, Logger(l)) +func Logger(l *log.Logger) func(*Settings) { + return func(s *Settings) { + s.logger = l + } +} + +// Logger returns the logger instances configured for the service. If none is configured nill will be returned. +func (s *Settings) Logger() *log.Logger { + return s.logger +} + +// KeytabPrincipal used to override the principal name used to find the key in the keytab. +// +// s := NewSettings(kt, KeytabPrincipal("someaccount")) +func KeytabPrincipal(p string) func(*Settings) { + return func(s *Settings) { + pn, _ := types.ParseSPNString(p) + s.ktprinc = &pn + } +} + +// KeytabPrincipal returns the principal name used to find the key in the keytab if it has been overridden. +func (s *Settings) KeytabPrincipal() *types.PrincipalName { + return s.ktprinc +} + +// MaxClockSkew used to configure service side with the maximum acceptable clock skew +// between the service and the issue time of kerberos tickets +// +// s := NewSettings(kt, MaxClockSkew(d)) +func MaxClockSkew(d time.Duration) func(*Settings) { + return func(s *Settings) { + s.maxClockSkew = d + } +} + +// MaxClockSkew returns the maximum acceptable clock skew between the service and the issue time of kerberos tickets. +// If none is defined a duration of 5 minutes is returned. +func (s *Settings) MaxClockSkew() time.Duration { + if s.maxClockSkew.Nanoseconds() == 0 { + return time.Duration(5) * time.Minute + } + return s.maxClockSkew +} + +// SName used provide a specific service name to the service settings. +// +// s := NewSettings(kt, SName("HTTP/some.service.com")) +func SName(sname string) func(*Settings) { + return func(s *Settings) { + s.sname = sname + } +} + +// SName returns the specific service name to the service. +func (s *Settings) SName() string { + return s.sname +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/http.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/http.go new file mode 100644 index 00000000000..0cb28449c5d --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/http.go @@ -0,0 +1,293 @@ +package spnego + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/cookiejar" + "net/url" + "strings" + + "gopkg.in/jcmturner/goidentity.v3" + "gopkg.in/jcmturner/gokrb5.v7/client" + "gopkg.in/jcmturner/gokrb5.v7/gssapi" + "gopkg.in/jcmturner/gokrb5.v7/keytab" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/service" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// Client side functionality // + +// Client will negotiate authentication with a server using SPNEGO. +type Client struct { + *http.Client + krb5Client *client.Client + spn string + reqs []*http.Request +} + +type redirectErr struct { + reqTarget *http.Request +} + +func (e redirectErr) Error() string { + return fmt.Sprintf("redirect to %v", e.reqTarget.URL) +} + +type teeReadCloser struct { + io.Reader + io.Closer +} + +// NewClient returns an SPNEGO enabled HTTP client. +func NewClient(krb5Cl *client.Client, httpCl *http.Client, spn string) *Client { + if httpCl == nil { + httpCl = http.DefaultClient + } + // Add a cookie jar if there isn't one + if httpCl.Jar == nil { + httpCl.Jar, _ = cookiejar.New(nil) + } + // Add a CheckRedirect function that will execute any functional already defined and then error with a redirectErr + f := httpCl.CheckRedirect + httpCl.CheckRedirect = func(req *http.Request, via []*http.Request) error { + if f != nil { + err := f(req, via) + if err != nil { + return err + } + } + return redirectErr{reqTarget: req} + } + return &Client{ + Client: httpCl, + krb5Client: krb5Cl, + spn: spn, + } +} + +// Do is the SPNEGO enabled HTTP client's equivalent of the http.Client's Do method. +func (c *Client) Do(req *http.Request) (resp *http.Response, err error) { + var body bytes.Buffer + if req.Body != nil { + // Use a tee reader to capture any body sent in case we have to replay it again + teeR := io.TeeReader(req.Body, &body) + teeRC := teeReadCloser{teeR, req.Body} + req.Body = teeRC + } + resp, err = c.Client.Do(req) + if err != nil { + if ue, ok := err.(*url.Error); ok { + if e, ok := ue.Err.(redirectErr); ok { + // Picked up a redirect + e.reqTarget.Header.Del(HTTPHeaderAuthRequest) + c.reqs = append(c.reqs, e.reqTarget) + if len(c.reqs) >= 10 { + return resp, errors.New("stopped after 10 redirects") + } + if req.Body != nil { + // Refresh the body reader so the body can be sent again + e.reqTarget.Body = ioutil.NopCloser(&body) + } + return c.Do(e.reqTarget) + } + } + return resp, err + } + if respUnauthorizedNegotiate(resp) { + err := SetSPNEGOHeader(c.krb5Client, req, c.spn) + if err != nil { + return resp, err + } + if req.Body != nil { + // Refresh the body reader so the body can be sent again + req.Body = ioutil.NopCloser(&body) + } + return c.Do(req) + } + return resp, err +} + +// Get is the SPNEGO enabled HTTP client's equivalent of the http.Client's Get method. +func (c *Client) Get(url string) (resp *http.Response, err error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is the SPNEGO enabled HTTP client's equivalent of the http.Client's Post method. +func (c *Client) Post(url, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return c.Do(req) +} + +// PostForm is the SPNEGO enabled HTTP client's equivalent of the http.Client's PostForm method. +func (c *Client) PostForm(url string, data url.Values) (resp *http.Response, err error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// Head is the SPNEGO enabled HTTP client's equivalent of the http.Client's Head method. +func (c *Client) Head(url string) (resp *http.Response, err error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +func respUnauthorizedNegotiate(resp *http.Response) bool { + if resp.StatusCode == http.StatusUnauthorized { + if resp.Header.Get(HTTPHeaderAuthResponse) == HTTPHeaderAuthResponseValueKey { + return true + } + } + return false +} + +// SetSPNEGOHeader gets the service ticket and sets it as the SPNEGO authorization header on HTTP request object. +// To auto generate the SPN from the request object pass a null string "". +func SetSPNEGOHeader(cl *client.Client, r *http.Request, spn string) error { + if spn == "" { + h := strings.TrimSuffix(strings.SplitN(r.URL.Host, ":", 2)[0], ".") + name, err := net.LookupCNAME(h) + if err == nil { + // Underlyng canonical name should be used for SPN + h = strings.TrimSuffix(name, ".") + } + spn = "HTTP/" + h + r.Host = h + } + cl.Log("using SPN %s", spn) + s := SPNEGOClient(cl, spn) + err := s.AcquireCred() + if err != nil { + return fmt.Errorf("could not acquire client credential: %v", err) + } + st, err := s.InitSecContext() + if err != nil { + return fmt.Errorf("could not initialize context: %v", err) + } + nb, err := st.Marshal() + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "could not marshal SPNEGO") + } + hs := "Negotiate " + base64.StdEncoding.EncodeToString(nb) + r.Header.Set(HTTPHeaderAuthRequest, hs) + return nil +} + +// Service side functionality // + +type ctxKey string + +const ( + // spnegoNegTokenRespKRBAcceptCompleted - The response on successful authentication always has this header. Capturing as const so we don't have marshaling and encoding overhead. + spnegoNegTokenRespKRBAcceptCompleted = "Negotiate oRQwEqADCgEAoQsGCSqGSIb3EgECAg==" + // spnegoNegTokenRespReject - The response on a failed authentication always has this rejection header. Capturing as const so we don't have marshaling and encoding overhead. + spnegoNegTokenRespReject = "Negotiate oQcwBaADCgEC" + // spnegoNegTokenRespIncompleteKRB5 - Response token specifying incomplete context and KRB5 as the supported mechtype. + spnegoNegTokenRespIncompleteKRB5 = "Negotiate oRQwEqADCgEBoQsGCSqGSIb3EgECAg==" + // CTXKeyAuthenticated is the request context key holding a boolean indicating if the request has been authenticated. + CTXKeyAuthenticated ctxKey = "github.com/jcmturner/gokrb5/CTXKeyAuthenticated" + // CTXKeyCredentials is the request context key holding the credentials gopkg.in/jcmturner/goidentity.v2/Identity object. + CTXKeyCredentials ctxKey = "github.com/jcmturner/gokrb5/CTXKeyCredentials" + // HTTPHeaderAuthRequest is the header that will hold authn/z information. + HTTPHeaderAuthRequest = "Authorization" + // HTTPHeaderAuthResponse is the header that will hold SPNEGO data from the server. + HTTPHeaderAuthResponse = "WWW-Authenticate" + // HTTPHeaderAuthResponseValueKey is the key in the auth header for SPNEGO. + HTTPHeaderAuthResponseValueKey = "Negotiate" + // UnauthorizedMsg is the message returned in the body when authentication fails. + UnauthorizedMsg = "Unauthorised.\n" +) + +// SPNEGOKRB5Authenticate is a Kerberos SPNEGO authentication HTTP handler wrapper. +func SPNEGOKRB5Authenticate(inner http.Handler, kt *keytab.Keytab, settings ...func(*service.Settings)) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Get the auth header + s := strings.SplitN(r.Header.Get(HTTPHeaderAuthRequest), " ", 2) + if len(s) != 2 || s[0] != HTTPHeaderAuthResponseValueKey { + // No Authorization header set so return 401 with WWW-Authenticate Negotiate header + w.Header().Set(HTTPHeaderAuthResponse, HTTPHeaderAuthResponseValueKey) + http.Error(w, UnauthorizedMsg, http.StatusUnauthorized) + return + } + + // Set up the SPNEGO GSS-API mechanism + var spnego *SPNEGO + h, err := types.GetHostAddress(r.RemoteAddr) + if err == nil { + // put in this order so that if the user provides a ClientAddress it will override the one here. + o := append([]func(*service.Settings){service.ClientAddress(h)}, settings...) + spnego = SPNEGOService(kt, o...) + } else { + spnego = SPNEGOService(kt, settings...) + spnego.Log("%s - SPNEGO could not parse client address: %v", r.RemoteAddr, err) + } + + // Decode the header into an SPNEGO context token + b, err := base64.StdEncoding.DecodeString(s[1]) + if err != nil { + spnegoNegotiateKRB5MechType(spnego, w, "%s - SPNEGO error in base64 decoding negotiation header: %v", r.RemoteAddr, err) + return + } + var st SPNEGOToken + err = st.Unmarshal(b) + if err != nil { + spnegoNegotiateKRB5MechType(spnego, w, "%s - SPNEGO error in unmarshaling SPNEGO token: %v", r.RemoteAddr, err) + return + } + + // Validate the context token + authed, ctx, status := spnego.AcceptSecContext(&st) + if status.Code != gssapi.StatusComplete && status.Code != gssapi.StatusContinueNeeded { + spnegoResponseReject(spnego, w, "%s - SPNEGO validation error: %v", r.RemoteAddr, status) + return + } + if status.Code == gssapi.StatusContinueNeeded { + spnegoNegotiateKRB5MechType(spnego, w, "%s - SPNEGO GSS-API continue needed", r.RemoteAddr) + return + } + if authed { + id := ctx.Value(CTXKeyCredentials).(goidentity.Identity) + requestCtx := r.Context() + requestCtx = context.WithValue(requestCtx, CTXKeyCredentials, id) + requestCtx = context.WithValue(requestCtx, CTXKeyAuthenticated, ctx.Value(CTXKeyAuthenticated)) + spnegoResponseAcceptCompleted(spnego, w, "%s %s@%s - SPNEGO authentication succeeded", r.RemoteAddr, id.UserName(), id.Domain()) + inner.ServeHTTP(w, r.WithContext(requestCtx)) + } else { + spnegoResponseReject(spnego, w, "%s - SPNEGO Kerberos authentication failed", r.RemoteAddr) + return + } + }) +} + +func spnegoNegotiateKRB5MechType(s *SPNEGO, w http.ResponseWriter, format string, v ...interface{}) { + s.Log(format, v...) + w.Header().Set(HTTPHeaderAuthResponse, spnegoNegTokenRespIncompleteKRB5) + http.Error(w, UnauthorizedMsg, http.StatusUnauthorized) +} + +func spnegoResponseReject(s *SPNEGO, w http.ResponseWriter, format string, v ...interface{}) { + s.Log(format, v...) + w.Header().Set(HTTPHeaderAuthResponse, spnegoNegTokenRespReject) + http.Error(w, UnauthorizedMsg, http.StatusUnauthorized) +} + +func spnegoResponseAcceptCompleted(s *SPNEGO, w http.ResponseWriter, format string, v ...interface{}) { + s.Log(format, v...) + w.Header().Set(HTTPHeaderAuthResponse, spnegoNegTokenRespKRBAcceptCompleted) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/krb5Token.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/krb5Token.go new file mode 100644 index 00000000000..8d82df2af39 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/krb5Token.go @@ -0,0 +1,236 @@ +package spnego + +import ( + "context" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/asn1tools" + "gopkg.in/jcmturner/gokrb5.v7/client" + "gopkg.in/jcmturner/gokrb5.v7/credentials" + "gopkg.in/jcmturner/gokrb5.v7/gssapi" + "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/msgtype" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/service" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// GSSAPI KRB5 MechToken IDs. +const ( + TOK_ID_KRB_AP_REQ = "0100" + TOK_ID_KRB_AP_REP = "0200" + TOK_ID_KRB_ERROR = "0300" +) + +// KRB5Token context token implementation for GSSAPI. +type KRB5Token struct { + OID asn1.ObjectIdentifier + tokID []byte + APReq messages.APReq + APRep messages.APRep + KRBError messages.KRBError + settings *service.Settings + context context.Context +} + +// Marshal a KRB5Token into a slice of bytes. +func (m *KRB5Token) Marshal() ([]byte, error) { + // Create the header + b, _ := asn1.Marshal(m.OID) + b = append(b, m.tokID...) + var tb []byte + var err error + switch hex.EncodeToString(m.tokID) { + case TOK_ID_KRB_AP_REQ: + tb, err = m.APReq.Marshal() + if err != nil { + return []byte{}, fmt.Errorf("error marshalling AP_REQ for MechToken: %v", err) + } + case TOK_ID_KRB_AP_REP: + return []byte{}, errors.New("marshal of AP_REP GSSAPI MechToken not supported by gokrb5") + case TOK_ID_KRB_ERROR: + return []byte{}, errors.New("marshal of KRB_ERROR GSSAPI MechToken not supported by gokrb5") + } + if err != nil { + return []byte{}, fmt.Errorf("error mashalling kerberos message within mech token: %v", err) + } + b = append(b, tb...) + return asn1tools.AddASNAppTag(b, 0), nil +} + +// Unmarshal a KRB5Token. +func (m *KRB5Token) Unmarshal(b []byte) error { + var oid asn1.ObjectIdentifier + r, err := asn1.UnmarshalWithParams(b, &oid, fmt.Sprintf("application,explicit,tag:%v", 0)) + if err != nil { + return fmt.Errorf("error unmarshalling KRB5Token OID: %v", err) + } + m.OID = oid + m.tokID = r[0:2] + switch hex.EncodeToString(m.tokID) { + case TOK_ID_KRB_AP_REQ: + var a messages.APReq + err = a.Unmarshal(r[2:]) + if err != nil { + return fmt.Errorf("error unmarshalling KRB5Token AP_REQ: %v", err) + } + m.APReq = a + case TOK_ID_KRB_AP_REP: + var a messages.APRep + err = a.Unmarshal(r[2:]) + if err != nil { + return fmt.Errorf("error unmarshalling KRB5Token AP_REP: %v", err) + } + m.APRep = a + case TOK_ID_KRB_ERROR: + var a messages.KRBError + err = a.Unmarshal(r[2:]) + if err != nil { + return fmt.Errorf("error unmarshalling KRB5Token KRBError: %v", err) + } + m.KRBError = a + } + return nil +} + +// Verify a KRB5Token. +func (m *KRB5Token) Verify() (bool, gssapi.Status) { + switch hex.EncodeToString(m.tokID) { + case TOK_ID_KRB_AP_REQ: + ok, creds, err := service.VerifyAPREQ(m.APReq, m.settings) + if err != nil { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: err.Error()} + } + if !ok { + return false, gssapi.Status{Code: gssapi.StatusDefectiveCredential, Message: "KRB5_AP_REQ token not valid"} + } + m.context = context.Background() + m.context = context.WithValue(m.context, CTXKeyCredentials, creds) + m.context = context.WithValue(m.context, CTXKeyAuthenticated, ok) + return true, gssapi.Status{Code: gssapi.StatusComplete} + case TOK_ID_KRB_AP_REP: + // Client side + // TODO how to verify the AP_REP - not yet implemented + return false, gssapi.Status{Code: gssapi.StatusFailure, Message: "verifying an AP_REP is not currently supported by gokrb5"} + case TOK_ID_KRB_ERROR: + if m.KRBError.MsgType != msgtype.KRB_ERROR { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "KRB5_Error token not valid"} + } + return true, gssapi.Status{Code: gssapi.StatusUnavailable} + } + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "unknown TOK_ID in KRB5 token"} +} + +// IsAPReq tests if the MechToken contains an AP_REQ. +func (m *KRB5Token) IsAPReq() bool { + if hex.EncodeToString(m.tokID) == TOK_ID_KRB_AP_REQ { + return true + } + return false +} + +// IsAPRep tests if the MechToken contains an AP_REP. +func (m *KRB5Token) IsAPRep() bool { + if hex.EncodeToString(m.tokID) == TOK_ID_KRB_AP_REP { + return true + } + return false +} + +// IsKRBError tests if the MechToken contains an KRB_ERROR. +func (m *KRB5Token) IsKRBError() bool { + if hex.EncodeToString(m.tokID) == TOK_ID_KRB_ERROR { + return true + } + return false +} + +// Context returns the KRB5 token's context which will contain any verify user identity information. +func (m *KRB5Token) Context() context.Context { + return m.context +} + +// NewKRB5TokenAPREQ creates a new KRB5 token with AP_REQ +func NewKRB5TokenAPREQ(cl *client.Client, tkt messages.Ticket, sessionKey types.EncryptionKey, GSSAPIFlags []int, APOptions []int) (KRB5Token, error) { + // TODO consider providing the SPN rather than the specific tkt and key and get these from the krb client. + var m KRB5Token + m.OID = gssapi.OID(gssapi.OIDKRB5) + tb, _ := hex.DecodeString(TOK_ID_KRB_AP_REQ) + m.tokID = tb + + auth, err := krb5TokenAuthenticator(cl.Credentials, GSSAPIFlags) + if err != nil { + return m, err + } + APReq, err := messages.NewAPReq( + tkt, + sessionKey, + auth, + ) + if err != nil { + return m, err + } + for _, o := range APOptions { + types.SetFlag(&APReq.APOptions, o) + } + m.APReq = APReq + return m, nil +} + +// krb5TokenAuthenticator creates a new kerberos authenticator for kerberos MechToken +func krb5TokenAuthenticator(creds *credentials.Credentials, flags []int) (types.Authenticator, error) { + //RFC 4121 Section 4.1.1 + auth, err := types.NewAuthenticator(creds.Domain(), creds.CName()) + if err != nil { + return auth, krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator") + } + auth.Cksum = types.Checksum{ + CksumType: chksumtype.GSSAPI, + Checksum: newAuthenticatorChksum(flags), + } + return auth, nil +} + +// Create new authenticator checksum for kerberos MechToken +func newAuthenticatorChksum(flags []int) []byte { + a := make([]byte, 24) + binary.LittleEndian.PutUint32(a[:4], 16) + for _, i := range flags { + if i == gssapi.ContextFlagDeleg { + x := make([]byte, 28-len(a)) + a = append(a, x...) + } + f := binary.LittleEndian.Uint32(a[20:24]) + f |= uint32(i) + binary.LittleEndian.PutUint32(a[20:24], f) + } + return a +} + +/* +The authenticator checksum field SHALL have the following format: + +Octet Name Description +----------------------------------------------------------------- +0..3 Lgth Number of octets in Bnd field; Represented + in little-endian order; Currently contains + hex value 10 00 00 00 (16). +4..19 Bnd Channel binding information, as described in + section 4.1.1.2. +20..23 Flags Four-octet context-establishment flags in + little-endian order as described in section + 4.1.1.1. +24..25 DlgOpt The delegation option identifier (=1) in + little-endian order [optional]. This field + and the next two fields are present if and + only if GSS_C_DELEG_FLAG is set as described + in section 4.1.1.1. +26..27 Dlgth The length of the Deleg field in little-endian order [optional]. +28..(n-1) Deleg A KRB_CRED message (n = Dlgth + 28) [optional]. +n..last Exts Extensions [optional]. +*/ diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/negotiationToken.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/negotiationToken.go new file mode 100644 index 00000000000..4a80f3595e0 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/negotiationToken.go @@ -0,0 +1,338 @@ +package spnego + +import ( + "context" + "errors" + "fmt" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/client" + "gopkg.in/jcmturner/gokrb5.v7/gssapi" + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/service" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +/* +https://msdn.microsoft.com/en-us/library/ms995330.aspx + +NegotiationToken ::= CHOICE { + negTokenInit [0] NegTokenInit, This is the Negotiation token sent from the client to the server. + negTokenResp [1] NegTokenResp +} + +NegTokenInit ::= SEQUENCE { + mechTypes [0] MechTypeList, + reqFlags [1] ContextFlags OPTIONAL, + -- inherited from RFC 2478 for backward compatibility, + -- RECOMMENDED to be left out + mechToken [2] OCTET STRING OPTIONAL, + mechListMIC [3] OCTET STRING OPTIONAL, + ... +} + +NegTokenResp ::= SEQUENCE { + negState [0] ENUMERATED { + accept-completed (0), + accept-incomplete (1), + reject (2), + request-mic (3) + } OPTIONAL, + -- REQUIRED in the first reply from the target + supportedMech [1] MechType OPTIONAL, + -- present only in the first reply from the target + responseToken [2] OCTET STRING OPTIONAL, + mechListMIC [3] OCTET STRING OPTIONAL, + ... +} +*/ + +// Negotiation state values. +const ( + NegStateAcceptCompleted NegState = 0 + NegStateAcceptIncomplete NegState = 1 + NegStateReject NegState = 2 + NegStateRequestMIC NegState = 3 +) + +// NegState is a type to indicate the SPNEGO negotiation state. +type NegState int + +// NegTokenInit implements Negotiation Token of type Init. +type NegTokenInit struct { + MechTypes []asn1.ObjectIdentifier + ReqFlags gssapi.ContextFlags + MechTokenBytes []byte + MechListMIC []byte + mechToken gssapi.ContextToken + settings *service.Settings +} + +type marshalNegTokenInit struct { + MechTypes []asn1.ObjectIdentifier `asn1:"explicit,tag:0"` + ReqFlags gssapi.ContextFlags `asn1:"explicit,optional,tag:1"` + MechTokenBytes []byte `asn1:"explicit,optional,omitempty,tag:2"` + MechListMIC []byte `asn1:"explicit,optional,omitempty,tag:3"` // This field is not used when negotiating Kerberos tokens +} + +// NegTokenResp implements Negotiation Token of type Resp/Targ +type NegTokenResp struct { + NegState asn1.Enumerated + SupportedMech asn1.ObjectIdentifier + ResponseToken []byte + MechListMIC []byte + mechToken gssapi.ContextToken + settings *service.Settings +} + +type marshalNegTokenResp struct { + NegState asn1.Enumerated `asn1:"explicit,tag:0"` + SupportedMech asn1.ObjectIdentifier `asn1:"explicit,optional,tag:1"` + ResponseToken []byte `asn1:"explicit,optional,omitempty,tag:2"` + MechListMIC []byte `asn1:"explicit,optional,omitempty,tag:3"` // This field is not used when negotiating Kerberos tokens +} + +// NegTokenTarg implements Negotiation Token of type Resp/Targ +type NegTokenTarg NegTokenResp + +// Marshal an Init negotiation token +func (n *NegTokenInit) Marshal() ([]byte, error) { + m := marshalNegTokenInit{ + MechTypes: n.MechTypes, + ReqFlags: n.ReqFlags, + MechTokenBytes: n.MechTokenBytes, + MechListMIC: n.MechListMIC, + } + b, err := asn1.Marshal(m) + if err != nil { + return nil, err + } + nt := asn1.RawValue{ + Tag: 0, + Class: 2, + IsCompound: true, + Bytes: b, + } + nb, err := asn1.Marshal(nt) + if err != nil { + return nil, err + } + return nb, nil +} + +// Unmarshal an Init negotiation token +func (n *NegTokenInit) Unmarshal(b []byte) error { + init, nt, err := UnmarshalNegToken(b) + if err != nil { + return err + } + if !init { + return errors.New("bytes were not that of a NegTokenInit") + } + nInit := nt.(NegTokenInit) + n.MechTokenBytes = nInit.MechTokenBytes + n.MechListMIC = nInit.MechListMIC + n.MechTypes = nInit.MechTypes + n.ReqFlags = nInit.ReqFlags + return nil +} + +// Verify an Init negotiation token +func (n *NegTokenInit) Verify() (bool, gssapi.Status) { + // Check if supported mechanisms are in the MechTypeList + var mtSupported bool + for _, m := range n.MechTypes { + if m.Equal(gssapi.OID(gssapi.OIDKRB5)) || m.Equal(gssapi.OID(gssapi.OIDMSLegacyKRB5)) { + if n.mechToken == nil && n.MechTokenBytes == nil { + return false, gssapi.Status{Code: gssapi.StatusContinueNeeded} + } + mtSupported = true + break + } + } + if !mtSupported { + return false, gssapi.Status{Code: gssapi.StatusBadMech, Message: "no supported mechanism specified in negotiation"} + } + // There should be some mechtoken bytes for a KRB5Token (other mech types are not supported) + mt := new(KRB5Token) + mt.settings = n.settings + if n.mechToken == nil { + err := mt.Unmarshal(n.MechTokenBytes) + if err != nil { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: err.Error()} + } + n.mechToken = mt + } else { + var ok bool + mt, ok = n.mechToken.(*KRB5Token) + if !ok { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "MechToken is not a KRB5 token as expected"} + } + } + // RFC4178 states that the initial negotiation message can optionally contain the initial mechanism token for the preferred mechanism of the client. + if !mt.OID.Equal(n.MechTypes[0]) { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "OID of MechToken does not match the first in the MechTypeList"} + } + // Verify the mechtoken + return n.mechToken.Verify() +} + +// Context returns the SPNEGO context which will contain any verify user identity information. +func (n *NegTokenInit) Context() context.Context { + if n.mechToken != nil { + mt, ok := n.mechToken.(*KRB5Token) + if !ok { + return nil + } + return mt.Context() + } + return nil +} + +// Marshal a Resp/Targ negotiation token +func (n *NegTokenResp) Marshal() ([]byte, error) { + m := marshalNegTokenResp{ + NegState: n.NegState, + SupportedMech: n.SupportedMech, + ResponseToken: n.ResponseToken, + MechListMIC: n.MechListMIC, + } + b, err := asn1.Marshal(m) + if err != nil { + return nil, err + } + nt := asn1.RawValue{ + Tag: 1, + Class: 2, + IsCompound: true, + Bytes: b, + } + nb, err := asn1.Marshal(nt) + if err != nil { + return nil, err + } + return nb, nil +} + +// Unmarshal a Resp/Targ negotiation token +func (n *NegTokenResp) Unmarshal(b []byte) error { + init, nt, err := UnmarshalNegToken(b) + if err != nil { + return err + } + if init { + return errors.New("bytes were not that of a NegTokenResp") + } + nResp := nt.(NegTokenResp) + n.MechListMIC = nResp.MechListMIC + n.NegState = nResp.NegState + n.ResponseToken = nResp.ResponseToken + n.SupportedMech = nResp.SupportedMech + return nil +} + +// Verify a Resp/Targ negotiation token +func (n *NegTokenResp) Verify() (bool, gssapi.Status) { + if n.SupportedMech.Equal(gssapi.OID(gssapi.OIDKRB5)) || n.SupportedMech.Equal(gssapi.OID(gssapi.OIDMSLegacyKRB5)) { + if n.mechToken == nil && n.ResponseToken == nil { + return false, gssapi.Status{Code: gssapi.StatusContinueNeeded} + } + mt := new(KRB5Token) + mt.settings = n.settings + if n.mechToken == nil { + err := mt.Unmarshal(n.ResponseToken) + if err != nil { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: err.Error()} + } + n.mechToken = mt + } else { + var ok bool + mt, ok = n.mechToken.(*KRB5Token) + if !ok { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "MechToken is not a KRB5 token as expected"} + } + } + if mt == nil { + return false, gssapi.Status{Code: gssapi.StatusContinueNeeded} + } + // Verify the mechtoken + return mt.Verify() + } + return false, gssapi.Status{Code: gssapi.StatusBadMech, Message: "no supported mechanism specified in negotiation"} +} + +// State returns the negotiation state of the negotiation response. +func (n *NegTokenResp) State() NegState { + return NegState(n.NegState) +} + +// Context returns the SPNEGO context which will contain any verify user identity information. +func (n *NegTokenResp) Context() context.Context { + if n.mechToken != nil { + mt, ok := n.mechToken.(*KRB5Token) + if !ok { + return nil + } + return mt.Context() + } + return nil +} + +// UnmarshalNegToken umarshals and returns either a NegTokenInit or a NegTokenResp. +// +// The boolean indicates if the response is a NegTokenInit. +// If error is nil and the boolean is false the response is a NegTokenResp. +func UnmarshalNegToken(b []byte) (bool, interface{}, error) { + var a asn1.RawValue + _, err := asn1.Unmarshal(b, &a) + if err != nil { + return false, nil, fmt.Errorf("error unmarshalling NegotiationToken: %v", err) + } + switch a.Tag { + case 0: + var n marshalNegTokenInit + _, err = asn1.Unmarshal(a.Bytes, &n) + if err != nil { + return false, nil, fmt.Errorf("error unmarshalling NegotiationToken type %d (Init): %v", a.Tag, err) + } + nt := NegTokenInit{ + MechTypes: n.MechTypes, + ReqFlags: n.ReqFlags, + MechTokenBytes: n.MechTokenBytes, + MechListMIC: n.MechListMIC, + } + return true, nt, nil + case 1: + var n marshalNegTokenResp + _, err = asn1.Unmarshal(a.Bytes, &n) + if err != nil { + return false, nil, fmt.Errorf("error unmarshalling NegotiationToken type %d (Resp/Targ): %v", a.Tag, err) + } + nt := NegTokenResp{ + NegState: n.NegState, + SupportedMech: n.SupportedMech, + ResponseToken: n.ResponseToken, + MechListMIC: n.MechListMIC, + } + return false, nt, nil + default: + return false, nil, errors.New("unknown choice type for NegotiationToken") + } + +} + +// NewNegTokenInitKRB5 creates new Init negotiation token for Kerberos 5 +func NewNegTokenInitKRB5(cl *client.Client, tkt messages.Ticket, sessionKey types.EncryptionKey) (NegTokenInit, error) { + mt, err := NewKRB5TokenAPREQ(cl, tkt, sessionKey, []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}, []int{}) + if err != nil { + return NegTokenInit{}, fmt.Errorf("error getting KRB5 token; %v", err) + } + mtb, err := mt.Marshal() + if err != nil { + return NegTokenInit{}, fmt.Errorf("error marshalling KRB5 token; %v", err) + } + return NegTokenInit{ + MechTypes: []asn1.ObjectIdentifier{gssapi.OID(gssapi.OIDKRB5)}, + MechTokenBytes: mtb, + }, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/spnego.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/spnego.go new file mode 100644 index 00000000000..f82947c7e13 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/spnego/spnego.go @@ -0,0 +1,199 @@ +// Package spnego implements the Simple and Protected GSSAPI Negotiation Mechanism for Kerberos authentication. +package spnego + +import ( + "context" + "errors" + "fmt" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/asn1tools" + "gopkg.in/jcmturner/gokrb5.v7/client" + "gopkg.in/jcmturner/gokrb5.v7/gssapi" + "gopkg.in/jcmturner/gokrb5.v7/keytab" + "gopkg.in/jcmturner/gokrb5.v7/service" +) + +// SPNEGO implements the GSS-API mechanism for RFC 4178 +type SPNEGO struct { + serviceSettings *service.Settings + client *client.Client + spn string +} + +// SPNEGOClient configures the SPNEGO mechanism suitable for client side use. +func SPNEGOClient(cl *client.Client, spn string) *SPNEGO { + s := new(SPNEGO) + s.client = cl + s.spn = spn + s.serviceSettings = service.NewSettings(nil, service.SName(spn)) + return s +} + +// SPNEGOService configures the SPNEGO mechanism suitable for service side use. +func SPNEGOService(kt *keytab.Keytab, options ...func(*service.Settings)) *SPNEGO { + s := new(SPNEGO) + s.serviceSettings = service.NewSettings(kt, options...) + return s +} + +// OID returns the GSS-API assigned OID for SPNEGO. +func (s *SPNEGO) OID() asn1.ObjectIdentifier { + return gssapi.OID(gssapi.OIDSPNEGO) +} + +// AcquireCred is the GSS-API method to acquire a client credential via Kerberos for SPNEGO. +func (s *SPNEGO) AcquireCred() error { + return s.client.Login() +} + +// InitSecContext is the GSS-API method for the client to a generate a context token to the service via Kerberos. +func (s *SPNEGO) InitSecContext() (gssapi.ContextToken, error) { + tkt, key, err := s.client.GetServiceTicket(s.spn) + if err != nil { + return &SPNEGOToken{}, err + } + negTokenInit, err := NewNegTokenInitKRB5(s.client, tkt, key) + if err != nil { + return &SPNEGOToken{}, fmt.Errorf("could not create NegTokenInit: %v", err) + } + return &SPNEGOToken{ + Init: true, + NegTokenInit: negTokenInit, + settings: s.serviceSettings, + }, nil +} + +// AcceptSecContext is the GSS-API method for the service to verify the context token provided by the client and +// establish a context. +func (s *SPNEGO) AcceptSecContext(ct gssapi.ContextToken) (bool, context.Context, gssapi.Status) { + var ctx context.Context + t, ok := ct.(*SPNEGOToken) + if !ok { + return false, ctx, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "context token provided was not an SPNEGO token"} + } + t.settings = s.serviceSettings + var oid asn1.ObjectIdentifier + if t.Init { + oid = t.NegTokenInit.MechTypes[0] + } + if t.Resp { + oid = t.NegTokenResp.SupportedMech + } + if !(oid.Equal(gssapi.OID(gssapi.OIDKRB5)) || oid.Equal(gssapi.OID(gssapi.OIDMSLegacyKRB5))) { + return false, ctx, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "SPNEGO OID of MechToken is not of type KRB5"} + } + // Flags in the NegInit must be used t.NegTokenInit.ReqFlags + ok, status := t.Verify() + ctx = t.Context() + return ok, ctx, status +} + +// Log will write to the service's logger if it is configured. +func (s *SPNEGO) Log(format string, v ...interface{}) { + if s.serviceSettings.Logger() != nil { + s.serviceSettings.Logger().Printf(format, v...) + } +} + +// SPNEGOToken is a GSS-API context token +type SPNEGOToken struct { + Init bool + Resp bool + NegTokenInit NegTokenInit + NegTokenResp NegTokenResp + settings *service.Settings + context context.Context +} + +// Marshal SPNEGO context token +func (s *SPNEGOToken) Marshal() ([]byte, error) { + var b []byte + if s.Init { + hb, _ := asn1.Marshal(gssapi.OID(gssapi.OIDSPNEGO)) + tb, err := s.NegTokenInit.Marshal() + if err != nil { + return b, fmt.Errorf("could not marshal NegTokenInit: %v", err) + } + b = append(hb, tb...) + return asn1tools.AddASNAppTag(b, 0), nil + } + if s.Resp { + b, err := s.NegTokenResp.Marshal() + if err != nil { + return b, fmt.Errorf("could not marshal NegTokenResp: %v", err) + } + return b, nil + } + return b, errors.New("SPNEGO cannot be marshalled. It contains neither a NegTokenInit or NegTokenResp") +} + +// Unmarshal SPNEGO context token +func (s *SPNEGOToken) Unmarshal(b []byte) error { + var r []byte + var err error + if b[0] != byte(161) { + // Not a NegTokenResp/Targ could be a NegTokenInit + var oid asn1.ObjectIdentifier + r, err = asn1.UnmarshalWithParams(b, &oid, fmt.Sprintf("application,explicit,tag:%v", 0)) + if err != nil { + return fmt.Errorf("not a valid SPNEGO token: %v", err) + } + // Check the OID is the SPNEGO OID value + SPNEGOOID := gssapi.OID(gssapi.OIDSPNEGO) + if !oid.Equal(SPNEGOOID) { + return fmt.Errorf("OID %s does not match SPNEGO OID %s", oid.String(), SPNEGOOID.String()) + } + } else { + // Could be a NegTokenResp/Targ + r = b + } + + _, nt, err := UnmarshalNegToken(r) + if err != nil { + return err + } + switch v := nt.(type) { + case NegTokenInit: + s.Init = true + s.NegTokenInit = v + s.NegTokenInit.settings = s.settings + case NegTokenResp: + s.Resp = true + s.NegTokenResp = v + s.NegTokenResp.settings = s.settings + default: + return errors.New("unknown choice type for NegotiationToken") + } + return nil +} + +// Verify the SPNEGOToken +func (s *SPNEGOToken) Verify() (bool, gssapi.Status) { + if (!s.Init && !s.Resp) || (s.Init && s.Resp) { + return false, gssapi.Status{Code: gssapi.StatusDefectiveToken, Message: "invalid SPNEGO token, unclear if NegTokenInit or NegTokenResp"} + } + if s.Init { + s.NegTokenInit.settings = s.settings + ok, status := s.NegTokenInit.Verify() + if ok { + s.context = s.NegTokenInit.Context() + } + return ok, status + } + if s.Resp { + s.NegTokenResp.settings = s.settings + ok, status := s.NegTokenResp.Verify() + if ok { + s.context = s.NegTokenResp.Context() + } + return ok, status + } + // should not be possible to get here + return false, gssapi.Status{Code: gssapi.StatusFailure, Message: "unable to verify SPNEGO token"} +} + +// Context returns the SPNEGO context which will contain any verify user identity information. +func (s *SPNEGOToken) Context() context.Context { + return s.context +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go new file mode 100644 index 00000000000..5893df5bd26 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package httpstream adds multiplexed streaming support to HTTP requests and +// responses via connection upgrades. +package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream" diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go new file mode 100644 index 00000000000..50d9a366f36 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httpstream + +import ( + "fmt" + "io" + "net/http" + "strings" + "time" +) + +const ( + HeaderConnection = "Connection" + HeaderUpgrade = "Upgrade" + HeaderProtocolVersion = "X-Stream-Protocol-Version" + HeaderAcceptedProtocolVersions = "X-Accepted-Stream-Protocol-Versions" +) + +// NewStreamHandler defines a function that is called when a new Stream is +// received. If no error is returned, the Stream is accepted; otherwise, +// the stream is rejected. After the reply frame has been sent, replySent is closed. +type NewStreamHandler func(stream Stream, replySent <-chan struct{}) error + +// NoOpNewStreamHandler is a stream handler that accepts a new stream and +// performs no other logic. +func NoOpNewStreamHandler(stream Stream, replySent <-chan struct{}) error { return nil } + +// Dialer knows how to open a streaming connection to a server. +type Dialer interface { + + // Dial opens a streaming connection to a server using one of the protocols + // specified (in order of most preferred to least preferred). + Dial(protocols ...string) (Connection, string, error) +} + +// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade +// HTTP requests to support multiplexed bidirectional streams. After RoundTrip() +// is invoked, if the upgrade is successful, clients may retrieve the upgraded +// connection by calling UpgradeRoundTripper.Connection(). +type UpgradeRoundTripper interface { + http.RoundTripper + // NewConnection validates the response and creates a new Connection. + NewConnection(resp *http.Response) (Connection, error) +} + +// ResponseUpgrader knows how to upgrade HTTP requests and responses to +// add streaming support to them. +type ResponseUpgrader interface { + // UpgradeResponse upgrades an HTTP response to one that supports multiplexed + // streams. newStreamHandler will be called asynchronously whenever the + // other end of the upgraded connection creates a new stream. + UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection +} + +// Connection represents an upgraded HTTP connection. +type Connection interface { + // CreateStream creates a new Stream with the supplied headers. + CreateStream(headers http.Header) (Stream, error) + // Close resets all streams and closes the connection. + Close() error + // CloseChan returns a channel that is closed when the underlying connection is closed. + CloseChan() <-chan bool + // SetIdleTimeout sets the amount of time the connection may remain idle before + // it is automatically closed. + SetIdleTimeout(timeout time.Duration) +} + +// Stream represents a bidirectional communications channel that is part of an +// upgraded connection. +type Stream interface { + io.ReadWriteCloser + // Reset closes both directions of the stream, indicating that neither client + // or server can use it any more. + Reset() error + // Headers returns the headers used to create the stream. + Headers() http.Header + // Identifier returns the stream's ID. + Identifier() uint32 +} + +// IsUpgradeRequest returns true if the given request is a connection upgrade request +func IsUpgradeRequest(req *http.Request) bool { + for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] { + if strings.Contains(strings.ToLower(h), strings.ToLower(HeaderUpgrade)) { + return true + } + } + return false +} + +func negotiateProtocol(clientProtocols, serverProtocols []string) string { + for i := range clientProtocols { + for j := range serverProtocols { + if clientProtocols[i] == serverProtocols[j] { + return clientProtocols[i] + } + } + } + return "" +} + +// Handshake performs a subprotocol negotiation. If the client did request a +// subprotocol, Handshake will select the first common value found in +// serverProtocols. If a match is found, Handshake adds a response header +// indicating the chosen subprotocol. If no match is found, HTTP forbidden is +// returned, along with a response header containing the list of protocols the +// server can accept. +func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) { + clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)] + if len(clientProtocols) == 0 { + // Kube 1.0 clients didn't support subprotocol negotiation. + // TODO require clientProtocols once Kube 1.0 is no longer supported + return "", nil + } + + if len(serverProtocols) == 0 { + // Kube 1.0 servers didn't support subprotocol negotiation. This is mainly for testing. + // TODO require serverProtocols once Kube 1.0 is no longer supported + return "", nil + } + + negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols) + if len(negotiatedProtocol) == 0 { + for i := range serverProtocols { + w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i]) + } + err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols) + http.Error(w, err.Error(), http.StatusForbidden) + return "", err + } + + w.Header().Add(HeaderProtocolVersion, negotiatedProtocol) + return negotiatedProtocol, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go new file mode 100644 index 00000000000..9d222faa898 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go @@ -0,0 +1,145 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "net" + "net/http" + "sync" + "time" + + "github.com/docker/spdystream" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog" +) + +// connection maintains state about a spdystream.Connection and its associated +// streams. +type connection struct { + conn *spdystream.Connection + streams []httpstream.Stream + streamLock sync.Mutex + newStreamHandler httpstream.NewStreamHandler +} + +// NewClientConnection creates a new SPDY client connection. +func NewClientConnection(conn net.Conn) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, httpstream.NoOpNewStreamHandler), nil +} + +// NewServerConnection creates a new SPDY server connection. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, newStreamHandler), nil +} + +// newConnection returns a new connection wrapping conn. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { + c := &connection{conn: conn, newStreamHandler: newStreamHandler} + go conn.Serve(c.newSpdyStream) + return c +} + +// createStreamResponseTimeout indicates how long to wait for the other side to +// acknowledge the new stream before timing out. +const createStreamResponseTimeout = 30 * time.Second + +// Close first sends a reset for all of the connection's streams, and then +// closes the underlying spdystream.Connection. +func (c *connection) Close() error { + c.streamLock.Lock() + for _, s := range c.streams { + // calling Reset instead of Close ensures that all streams are fully torn down + s.Reset() + } + c.streams = make([]httpstream.Stream, 0) + c.streamLock.Unlock() + + // now that all streams are fully torn down, it's safe to call close on the underlying connection, + // which should be able to terminate immediately at this point, instead of waiting for any + // remaining graceful stream termination. + return c.conn.Close() +} + +// CreateStream creates a new stream with the specified headers and registers +// it with the connection. +func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) { + stream, err := c.conn.CreateStream(headers, nil, false) + if err != nil { + return nil, err + } + if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil { + return nil, err + } + + c.registerStream(stream) + return stream, nil +} + +// registerStream adds the stream s to the connection's list of streams that +// it owns. +func (c *connection) registerStream(s httpstream.Stream) { + c.streamLock.Lock() + c.streams = append(c.streams, s) + c.streamLock.Unlock() +} + +// CloseChan returns a channel that, when closed, indicates that the underlying +// spdystream.Connection has been closed. +func (c *connection) CloseChan() <-chan bool { + return c.conn.CloseChan() +} + +// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve. +// It calls connection's newStreamHandler, giving it the opportunity to accept or reject +// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the +// stream is accepted and registered with the connection. +func (c *connection) newSpdyStream(stream *spdystream.Stream) { + replySent := make(chan struct{}) + err := c.newStreamHandler(stream, replySent) + rejectStream := (err != nil) + if rejectStream { + klog.Warningf("Stream rejected: %v", err) + stream.Reset() + return + } + + c.registerStream(stream) + stream.SendReply(http.Header{}, rejectStream) + close(replySent) +} + +// SetIdleTimeout sets the amount of time the connection may remain idle before +// it is automatically closed. +func (c *connection) SetIdleTimeout(timeout time.Duration) { + c.conn.SetIdleTimeout(timeout) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go new file mode 100644 index 00000000000..2699597e7a5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -0,0 +1,335 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/httpstream" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/third_party/forked/golang/netutil" +) + +// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports +// multiplexed streams. After RoundTrip() is invoked, Conn will be set +// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface. +type SpdyRoundTripper struct { + //tlsConfig holds the TLS configuration settings to use when connecting + //to the remote server. + tlsConfig *tls.Config + + /* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper + must be safe for use by multiple concurrent goroutines. If this is absolutely + necessary, we could keep a map from http.Request to net.Conn. In practice, + a client will create an http.Client, set the transport to a new insteace of + SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue. + */ + // conn is the underlying network connection to the remote server. + conn net.Conn + + // Dialer is the dialer used to connect. Used if non-nil. + Dialer *net.Dialer + + // proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment + // Used primarily for mocking the proxy discovery in tests. + proxier func(req *http.Request) (*url.URL, error) + + // followRedirects indicates if the round tripper should examine responses for redirects and + // follow them. + followRedirects bool + // requireSameHostRedirects restricts redirect following to only follow redirects to the same host + // as the original request. + requireSameHostRedirects bool +} + +var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{} +var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{} +var _ utilnet.Dialer = &SpdyRoundTripper{} + +// NewRoundTripper creates a new SpdyRoundTripper that will use +// the specified tlsConfig. +func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) httpstream.UpgradeRoundTripper { + return NewSpdyRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects) +} + +// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use +// the specified tlsConfig. This function is mostly meant for unit tests. +func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper { + return &SpdyRoundTripper{ + tlsConfig: tlsConfig, + followRedirects: followRedirects, + requireSameHostRedirects: requireSameHostRedirects, + } +} + +// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during +// proxying with a spdy roundtripper. +func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config { + return s.tlsConfig +} + +// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer. +func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) { + conn, err := s.dial(req) + if err != nil { + return nil, err + } + + if err := req.Write(conn); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +// dial dials the host specified by req, using TLS if appropriate, optionally +// using a proxy server if one is configured via environment variables. +func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) { + proxier := s.proxier + if proxier == nil { + proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + proxyURL, err := proxier(req) + if err != nil { + return nil, err + } + + if proxyURL == nil { + return s.dialWithoutProxy(req.Context(), req.URL) + } + + // ensure we use a canonical host with proxyReq + targetHost := netutil.CanonicalAddr(req.URL) + + // proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support + proxyReq := http.Request{ + Method: "CONNECT", + URL: &url.URL{}, + Host: targetHost, + } + + if pa := s.proxyAuth(proxyURL); pa != "" { + proxyReq.Header = http.Header{} + proxyReq.Header.Set("Proxy-Authorization", pa) + } + + proxyDialConn, err := s.dialWithoutProxy(req.Context(), proxyURL) + if err != nil { + return nil, err + } + + proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil) + _, err = proxyClientConn.Do(&proxyReq) + if err != nil && err != httputil.ErrPersistEOF { + return nil, err + } + + rwc, _ := proxyClientConn.Hijack() + + if req.URL.Scheme != "https" { + return rwc, nil + } + + host, _, err := net.SplitHostPort(targetHost) + if err != nil { + return nil, err + } + + tlsConfig := s.tlsConfig + switch { + case tlsConfig == nil: + tlsConfig = &tls.Config{ServerName: host} + case len(tlsConfig.ServerName) == 0: + tlsConfig = tlsConfig.Clone() + tlsConfig.ServerName = host + } + + tlsConn := tls.Client(rwc, tlsConfig) + + // need to manually call Handshake() so we can call VerifyHostname() below + if err := tlsConn.Handshake(); err != nil { + return nil, err + } + + // Return if we were configured to skip validation + if tlsConfig.InsecureSkipVerify { + return tlsConn, nil + } + + if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil { + return nil, err + } + + return tlsConn, nil +} + +// dialWithoutProxy dials the host specified by url, using TLS if appropriate. +func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) { + dialAddr := netutil.CanonicalAddr(url) + + if url.Scheme == "http" { + if s.Dialer == nil { + var d net.Dialer + return d.DialContext(ctx, "tcp", dialAddr) + } else { + return s.Dialer.DialContext(ctx, "tcp", dialAddr) + } + } + + // TODO validate the TLSClientConfig is set up? + var conn *tls.Conn + var err error + if s.Dialer == nil { + conn, err = tls.Dial("tcp", dialAddr, s.tlsConfig) + } else { + conn, err = tls.DialWithDialer(s.Dialer, "tcp", dialAddr, s.tlsConfig) + } + if err != nil { + return nil, err + } + + // Return if we were configured to skip validation + if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify { + return conn, nil + } + + host, _, err := net.SplitHostPort(dialAddr) + if err != nil { + return nil, err + } + if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 { + host = s.tlsConfig.ServerName + } + err = conn.VerifyHostname(host) + if err != nil { + return nil, err + } + + return conn, nil +} + +// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header +func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string { + if proxyURL == nil || proxyURL.User == nil { + return "" + } + credentials := proxyURL.User.String() + encodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials)) + return fmt.Sprintf("Basic %s", encodedAuth) +} + +// RoundTrip executes the Request and upgrades it. After a successful upgrade, +// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded +// connection. +func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + header := utilnet.CloneHeader(req.Header) + header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + header.Add(httpstream.HeaderUpgrade, HeaderSpdy31) + + var ( + conn net.Conn + rawResponse []byte + err error + ) + + if s.followRedirects { + conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects) + } else { + clone := utilnet.CloneRequest(req) + clone.Header = header + conn, err = s.Dial(clone) + } + if err != nil { + return nil, err + } + + responseReader := bufio.NewReader( + io.MultiReader( + bytes.NewBuffer(rawResponse), + conn, + ), + ) + + resp, err := http.ReadResponse(responseReader, nil) + if err != nil { + if conn != nil { + conn.Close() + } + return nil, err + } + + s.conn = conn + + return resp, nil +} + +// NewConnection validates the upgrade response, creating and returning a new +// httpstream.Connection if there were no errors. +func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) { + connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade)) + if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + defer resp.Body.Close() + responseError := "" + responseErrorBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + responseError = "unable to read error from server response" + } else { + // TODO: I don't belong here, I should be abstracted from this class + if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil { + if status, ok := obj.(*metav1.Status); ok { + return nil, &apierrors.StatusError{ErrStatus: *status} + } + } + responseError = string(responseErrorBytes) + responseError = strings.TrimSpace(responseError) + } + + return nil, fmt.Errorf("unable to upgrade connection: %s", responseError) + } + + return NewClientConnection(s.conn) +} + +// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection +var statusScheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var statusCodecs = serializer.NewCodecFactory(statusScheme) + +func init() { + statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion, + &metav1.Status{}, + ) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go new file mode 100644 index 00000000000..045d214d2b7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go @@ -0,0 +1,107 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bufio" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/runtime" +) + +const HeaderSpdy31 = "SPDY/3.1" + +// responseUpgrader knows how to upgrade HTTP responses. It +// implements the httpstream.ResponseUpgrader interface. +type responseUpgrader struct { +} + +// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All +// calls will be handled directly by the underlying net.Conn with the exception +// of Read and Close calls, which will consider data in the bufio.Reader. This +// ensures that data already inside the used bufio.Reader instance is also +// read. +type connWrapper struct { + net.Conn + closed int32 + bufReader *bufio.Reader +} + +func (w *connWrapper) Read(b []byte) (n int, err error) { + if atomic.LoadInt32(&w.closed) == 1 { + return 0, io.EOF + } + return w.bufReader.Read(b) +} + +func (w *connWrapper) Close() error { + err := w.Conn.Close() + atomic.StoreInt32(&w.closed, 1) + return err +} + +// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is +// capable of upgrading HTTP responses using SPDY/3.1 via the +// spdystream package. +func NewResponseUpgrader() httpstream.ResponseUpgrader { + return responseUpgrader{} +} + +// UpgradeResponse upgrades an HTTP response to one that supports multiplexed +// streams. newStreamHandler will be called synchronously whenever the +// other end of the upgraded connection creates a new stream. +func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { + connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade)) + if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header) + http.Error(w, errorMsg, http.StatusBadRequest) + return nil + } + + hijacker, ok := w.(http.Hijacker) + if !ok { + errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response") + http.Error(w, errorMsg, http.StatusInternalServerError) + return nil + } + + w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31) + w.WriteHeader(http.StatusSwitchingProtocols) + + conn, bufrw, err := hijacker.Hijack() + if err != nil { + runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err)) + return nil + } + + connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader} + spdyConn, err := NewServerConnection(connWithBuf, newStreamHandler) + if err != nil { + runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err)) + return nil + } + + return spdyConn +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go new file mode 100644 index 00000000000..c70f431c272 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go @@ -0,0 +1,27 @@ +package netutil + +import ( + "net/url" + "strings" +) + +// FROM: http://golang.org/src/net/http/client.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// FROM: http://golang.org/src/net/http/transport.go +// canonicalAddr returns url.Host but always with a ":port" suffix +func CanonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/k8s.io/client-go/tools/portforward/doc.go b/vendor/k8s.io/client-go/tools/portforward/doc.go new file mode 100644 index 00000000000..2f53406344f --- /dev/null +++ b/vendor/k8s.io/client-go/tools/portforward/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package portforward adds support for SSH-like port forwarding from the client's +// local host to remote containers. +package portforward // import "k8s.io/client-go/tools/portforward" diff --git a/vendor/k8s.io/client-go/tools/portforward/portforward.go b/vendor/k8s.io/client-go/tools/portforward/portforward.go new file mode 100644 index 00000000000..4ab72bb4f3c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/portforward/portforward.go @@ -0,0 +1,429 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package portforward + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "sort" + "strconv" + "strings" + "sync" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/runtime" +) + +// PortForwardProtocolV1Name is the subprotocol used for port forwarding. +// TODO move to API machinery and re-unify with kubelet/server/portfoward +const PortForwardProtocolV1Name = "portforward.k8s.io" + +// PortForwarder knows how to listen for local connections and forward them to +// a remote pod via an upgraded HTTP request. +type PortForwarder struct { + addresses []listenAddress + ports []ForwardedPort + stopChan <-chan struct{} + + dialer httpstream.Dialer + streamConn httpstream.Connection + listeners []io.Closer + Ready chan struct{} + requestIDLock sync.Mutex + requestID int + out io.Writer + errOut io.Writer +} + +// ForwardedPort contains a Local:Remote port pairing. +type ForwardedPort struct { + Local uint16 + Remote uint16 +} + +/* + valid port specifications: + + 5000 + - forwards from localhost:5000 to pod:5000 + + 8888:5000 + - forwards from localhost:8888 to pod:5000 + + 0:5000 + :5000 + - selects a random available local port, + forwards from localhost: to pod:5000 +*/ +func parsePorts(ports []string) ([]ForwardedPort, error) { + var forwards []ForwardedPort + for _, portString := range ports { + parts := strings.Split(portString, ":") + var localString, remoteString string + if len(parts) == 1 { + localString = parts[0] + remoteString = parts[0] + } else if len(parts) == 2 { + localString = parts[0] + if localString == "" { + // support :5000 + localString = "0" + } + remoteString = parts[1] + } else { + return nil, fmt.Errorf("Invalid port format '%s'", portString) + } + + localPort, err := strconv.ParseUint(localString, 10, 16) + if err != nil { + return nil, fmt.Errorf("Error parsing local port '%s': %s", localString, err) + } + + remotePort, err := strconv.ParseUint(remoteString, 10, 16) + if err != nil { + return nil, fmt.Errorf("Error parsing remote port '%s': %s", remoteString, err) + } + if remotePort == 0 { + return nil, fmt.Errorf("Remote port must be > 0") + } + + forwards = append(forwards, ForwardedPort{uint16(localPort), uint16(remotePort)}) + } + + return forwards, nil +} + +type listenAddress struct { + address string + protocol string + failureMode string +} + +func parseAddresses(addressesToParse []string) ([]listenAddress, error) { + var addresses []listenAddress + parsed := make(map[string]listenAddress) + for _, address := range addressesToParse { + if address == "localhost" { + if _, exists := parsed["127.0.0.1"]; !exists { + ip := listenAddress{address: "127.0.0.1", protocol: "tcp4", failureMode: "all"} + parsed[ip.address] = ip + } + if _, exists := parsed["::1"]; !exists { + ip := listenAddress{address: "::1", protocol: "tcp6", failureMode: "all"} + parsed[ip.address] = ip + } + } else if net.ParseIP(address).To4() != nil { + parsed[address] = listenAddress{address: address, protocol: "tcp4", failureMode: "any"} + } else if net.ParseIP(address) != nil { + parsed[address] = listenAddress{address: address, protocol: "tcp6", failureMode: "any"} + } else { + return nil, fmt.Errorf("%s is not a valid IP", address) + } + } + addresses = make([]listenAddress, len(parsed)) + id := 0 + for _, v := range parsed { + addresses[id] = v + id++ + } + // Sort addresses before returning to get a stable order + sort.Slice(addresses, func(i, j int) bool { return addresses[i].address < addresses[j].address }) + + return addresses, nil +} + +// New creates a new PortForwarder with localhost listen addresses. +func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) { + return NewOnAddresses(dialer, []string{"localhost"}, ports, stopChan, readyChan, out, errOut) +} + +// NewOnAddresses creates a new PortForwarder with custom listen addresses. +func NewOnAddresses(dialer httpstream.Dialer, addresses []string, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) { + if len(addresses) == 0 { + return nil, errors.New("You must specify at least 1 address") + } + parsedAddresses, err := parseAddresses(addresses) + if err != nil { + return nil, err + } + if len(ports) == 0 { + return nil, errors.New("You must specify at least 1 port") + } + parsedPorts, err := parsePorts(ports) + if err != nil { + return nil, err + } + return &PortForwarder{ + dialer: dialer, + addresses: parsedAddresses, + ports: parsedPorts, + stopChan: stopChan, + Ready: readyChan, + out: out, + errOut: errOut, + }, nil +} + +// ForwardPorts formats and executes a port forwarding request. The connection will remain +// open until stopChan is closed. +func (pf *PortForwarder) ForwardPorts() error { + defer pf.Close() + + var err error + pf.streamConn, _, err = pf.dialer.Dial(PortForwardProtocolV1Name) + if err != nil { + return fmt.Errorf("error upgrading connection: %s", err) + } + defer pf.streamConn.Close() + + return pf.forward() +} + +// forward dials the remote host specific in req, upgrades the request, starts +// listeners for each port specified in ports, and forwards local connections +// to the remote host via streams. +func (pf *PortForwarder) forward() error { + var err error + + listenSuccess := false + for i := range pf.ports { + port := &pf.ports[i] + err = pf.listenOnPort(port) + switch { + case err == nil: + listenSuccess = true + default: + if pf.errOut != nil { + fmt.Fprintf(pf.errOut, "Unable to listen on port %d: %v\n", port.Local, err) + } + } + } + + if !listenSuccess { + return fmt.Errorf("Unable to listen on any of the requested ports: %v", pf.ports) + } + + if pf.Ready != nil { + close(pf.Ready) + } + + // wait for interrupt or conn closure + select { + case <-pf.stopChan: + case <-pf.streamConn.CloseChan(): + runtime.HandleError(errors.New("lost connection to pod")) + } + + return nil +} + +// listenOnPort delegates listener creation and waits for connections on requested bind addresses. +// An error is raised based on address groups (default and localhost) and their failure modes +func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error { + var errors []error + failCounters := make(map[string]int, 2) + successCounters := make(map[string]int, 2) + for _, addr := range pf.addresses { + err := pf.listenOnPortAndAddress(port, addr.protocol, addr.address) + if err != nil { + errors = append(errors, err) + failCounters[addr.failureMode]++ + } else { + successCounters[addr.failureMode]++ + } + } + if successCounters["all"] == 0 && failCounters["all"] > 0 { + return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors) + } + if failCounters["any"] > 0 { + return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors) + } + return nil +} + +// listenOnPortAndAddress delegates listener creation and waits for new connections +// in the background f +func (pf *PortForwarder) listenOnPortAndAddress(port *ForwardedPort, protocol string, address string) error { + listener, err := pf.getListener(protocol, address, port) + if err != nil { + return err + } + pf.listeners = append(pf.listeners, listener) + go pf.waitForConnection(listener, *port) + return nil +} + +// getListener creates a listener on the interface targeted by the given hostname on the given port with +// the given protocol. protocol is in net.Listen style which basically admits values like tcp, tcp4, tcp6 +func (pf *PortForwarder) getListener(protocol string, hostname string, port *ForwardedPort) (net.Listener, error) { + listener, err := net.Listen(protocol, net.JoinHostPort(hostname, strconv.Itoa(int(port.Local)))) + if err != nil { + return nil, fmt.Errorf("Unable to create listener: Error %s", err) + } + listenerAddress := listener.Addr().String() + host, localPort, _ := net.SplitHostPort(listenerAddress) + localPortUInt, err := strconv.ParseUint(localPort, 10, 16) + + if err != nil { + fmt.Fprintf(pf.out, "Failed to forward from %s:%d -> %d\n", hostname, localPortUInt, port.Remote) + return nil, fmt.Errorf("Error parsing local port: %s from %s (%s)", err, listenerAddress, host) + } + port.Local = uint16(localPortUInt) + if pf.out != nil { + fmt.Fprintf(pf.out, "Forwarding from %s -> %d\n", net.JoinHostPort(hostname, strconv.Itoa(int(localPortUInt))), port.Remote) + } + + return listener, nil +} + +// waitForConnection waits for new connections to listener and handles them in +// the background. +func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) { + for { + conn, err := listener.Accept() + if err != nil { + // TODO consider using something like https://github.com/hydrogen18/stoppableListener? + if !strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") { + runtime.HandleError(fmt.Errorf("Error accepting connection on port %d: %v", port.Local, err)) + } + return + } + go pf.handleConnection(conn, port) + } +} + +func (pf *PortForwarder) nextRequestID() int { + pf.requestIDLock.Lock() + defer pf.requestIDLock.Unlock() + id := pf.requestID + pf.requestID++ + return id +} + +// handleConnection copies data between the local connection and the stream to +// the remote server. +func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) { + defer conn.Close() + + if pf.out != nil { + fmt.Fprintf(pf.out, "Handling connection for %d\n", port.Local) + } + + requestID := pf.nextRequestID() + + // create error stream + headers := http.Header{} + headers.Set(v1.StreamType, v1.StreamTypeError) + headers.Set(v1.PortHeader, fmt.Sprintf("%d", port.Remote)) + headers.Set(v1.PortForwardRequestIDHeader, strconv.Itoa(requestID)) + errorStream, err := pf.streamConn.CreateStream(headers) + if err != nil { + runtime.HandleError(fmt.Errorf("error creating error stream for port %d -> %d: %v", port.Local, port.Remote, err)) + return + } + // we're not writing to this stream + errorStream.Close() + + errorChan := make(chan error) + go func() { + message, err := ioutil.ReadAll(errorStream) + switch { + case err != nil: + errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %v", port.Local, port.Remote, err) + case len(message) > 0: + errorChan <- fmt.Errorf("an error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message)) + } + close(errorChan) + }() + + // create data stream + headers.Set(v1.StreamType, v1.StreamTypeData) + dataStream, err := pf.streamConn.CreateStream(headers) + if err != nil { + runtime.HandleError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %v", port.Local, port.Remote, err)) + return + } + + localError := make(chan struct{}) + remoteDone := make(chan struct{}) + + go func() { + // Copy from the remote side to the local port. + if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + runtime.HandleError(fmt.Errorf("error copying from remote stream to local connection: %v", err)) + } + + // inform the select below that the remote copy is done + close(remoteDone) + }() + + go func() { + // inform server we're not sending any more data after copy unblocks + defer dataStream.Close() + + // Copy from the local port to the remote side. + if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + runtime.HandleError(fmt.Errorf("error copying from local connection to remote stream: %v", err)) + // break out of the select below without waiting for the other copy to finish + close(localError) + } + }() + + // wait for either a local->remote error or for copying from remote->local to finish + select { + case <-remoteDone: + case <-localError: + } + + // always expect something on errorChan (it may be nil) + err = <-errorChan + if err != nil { + runtime.HandleError(err) + } +} + +// Close stops all listeners of PortForwarder. +func (pf *PortForwarder) Close() { + // stop all listeners + for _, l := range pf.listeners { + if err := l.Close(); err != nil { + runtime.HandleError(fmt.Errorf("error closing listener: %v", err)) + } + } +} + +// GetPorts will return the ports that were forwarded; this can be used to +// retrieve the locally-bound port in cases where the input was port 0. This +// function will signal an error if the Ready channel is nil or if the +// listeners are not ready yet; this function will succeed after the Ready +// channel has been closed. +func (pf *PortForwarder) GetPorts() ([]ForwardedPort, error) { + if pf.Ready == nil { + return nil, fmt.Errorf("no Ready channel provided") + } + select { + case <-pf.Ready: + return pf.ports, nil + default: + return nil, fmt.Errorf("listeners not ready") + } +} diff --git a/vendor/k8s.io/client-go/tools/watch/informerwatcher.go b/vendor/k8s.io/client-go/tools/watch/informerwatcher.go new file mode 100644 index 00000000000..4e0a400bb55 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/informerwatcher.go @@ -0,0 +1,150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" +) + +func newEventProcessor(out chan<- watch.Event) *eventProcessor { + return &eventProcessor{ + out: out, + cond: sync.NewCond(&sync.Mutex{}), + done: make(chan struct{}), + } +} + +// eventProcessor buffers events and writes them to an out chan when a reader +// is waiting. Because of the requirement to buffer events, it synchronizes +// input with a condition, and synchronizes output with a channels. It needs to +// be able to yield while both waiting on an input condition and while blocked +// on writing to the output channel. +type eventProcessor struct { + out chan<- watch.Event + + cond *sync.Cond + buff []watch.Event + + done chan struct{} +} + +func (e *eventProcessor) run() { + for { + batch := e.takeBatch() + e.writeBatch(batch) + if e.stopped() { + return + } + } +} + +func (e *eventProcessor) takeBatch() []watch.Event { + e.cond.L.Lock() + defer e.cond.L.Unlock() + + for len(e.buff) == 0 && !e.stopped() { + e.cond.Wait() + } + + batch := e.buff + e.buff = nil + return batch +} + +func (e *eventProcessor) writeBatch(events []watch.Event) { + for _, event := range events { + select { + case e.out <- event: + case <-e.done: + return + } + } +} + +func (e *eventProcessor) push(event watch.Event) { + e.cond.L.Lock() + defer e.cond.L.Unlock() + defer e.cond.Signal() + e.buff = append(e.buff, event) +} + +func (e *eventProcessor) stopped() bool { + select { + case <-e.done: + return true + default: + return false + } +} + +func (e *eventProcessor) stop() { + close(e.done) + e.cond.Signal() +} + +// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface +// so you can use it anywhere where you'd have used a regular Watcher returned from Watch method. +// it also returns a channel you can use to wait for the informers to fully shutdown. +func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface, <-chan struct{}) { + ch := make(chan watch.Event) + w := watch.NewProxyWatcher(ch) + e := newEventProcessor(ch) + + indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + e.push(watch.Event{ + Type: watch.Added, + Object: obj.(runtime.Object), + }) + }, + UpdateFunc: func(old, new interface{}) { + e.push(watch.Event{ + Type: watch.Modified, + Object: new.(runtime.Object), + }) + }, + DeleteFunc: func(obj interface{}) { + staleObj, stale := obj.(cache.DeletedFinalStateUnknown) + if stale { + // We have no means of passing the additional information down using + // watch API based on watch.Event but the caller can filter such + // objects by checking if metadata.deletionTimestamp is set + obj = staleObj + } + + e.push(watch.Event{ + Type: watch.Deleted, + Object: obj.(runtime.Object), + }) + }, + }, cache.Indexers{}) + + go e.run() + + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer e.stop() + informer.Run(w.StopChan()) + }() + + return indexer, informer, w, doneCh +} diff --git a/vendor/k8s.io/client-go/tools/watch/retrywatcher.go b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go new file mode 100644 index 00000000000..47ae9df4afd --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go @@ -0,0 +1,287 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/davecgh/go-spew/spew" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" +) + +// resourceVersionGetter is an interface used to get resource version from events. +// We can't reuse an interface from meta otherwise it would be a cyclic dependency and we need just this one method +type resourceVersionGetter interface { + GetResourceVersion() string +} + +// RetryWatcher will make sure that in case the underlying watcher is closed (e.g. due to API timeout or etcd timeout) +// it will get restarted from the last point without the consumer even knowing about it. +// RetryWatcher does that by inspecting events and keeping track of resourceVersion. +// Especially useful when using watch.UntilWithoutRetry where premature termination is causing issues and flakes. +// Please note that this is not resilient to etcd cache not having the resource version anymore - you would need to +// use Informers for that. +type RetryWatcher struct { + lastResourceVersion string + watcherClient cache.Watcher + resultChan chan watch.Event + stopChan chan struct{} + doneChan chan struct{} + minRestartDelay time.Duration +} + +// NewRetryWatcher creates a new RetryWatcher. +// It will make sure that watches gets restarted in case of recoverable errors. +// The initialResourceVersion will be given to watch method when first called. +func NewRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher) (*RetryWatcher, error) { + return newRetryWatcher(initialResourceVersion, watcherClient, 1*time.Second) +} + +func newRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher, minRestartDelay time.Duration) (*RetryWatcher, error) { + switch initialResourceVersion { + case "", "0": + // TODO: revisit this if we ever get WATCH v2 where it means start "now" + // without doing the synthetic list of objects at the beginning (see #74022) + return nil, fmt.Errorf("initial RV %q is not supported due to issues with underlying WATCH", initialResourceVersion) + default: + break + } + + rw := &RetryWatcher{ + lastResourceVersion: initialResourceVersion, + watcherClient: watcherClient, + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + resultChan: make(chan watch.Event, 0), + minRestartDelay: minRestartDelay, + } + + go rw.receive() + return rw, nil +} + +func (rw *RetryWatcher) send(event watch.Event) bool { + // Writing to an unbuffered channel is blocking operation + // and we need to check if stop wasn't requested while doing so. + select { + case rw.resultChan <- event: + return true + case <-rw.stopChan: + return false + } +} + +// doReceive returns true when it is done, false otherwise. +// If it is not done the second return value holds the time to wait before calling it again. +func (rw *RetryWatcher) doReceive() (bool, time.Duration) { + watcher, err := rw.watcherClient.Watch(metav1.ListOptions{ + ResourceVersion: rw.lastResourceVersion, + }) + // We are very unlikely to hit EOF here since we are just establishing the call, + // but it may happen that the apiserver is just shutting down (e.g. being restarted) + // This is consistent with how it is handled for informers + switch err { + case nil: + break + + case io.EOF: + // watch closed normally + return false, 0 + + case io.ErrUnexpectedEOF: + klog.V(1).Infof("Watch closed with unexpected EOF: %v", err) + return false, 0 + + default: + msg := "Watch failed: %v" + if net.IsProbableEOF(err) { + klog.V(5).Infof(msg, err) + // Retry + return false, 0 + } + + klog.Errorf(msg, err) + // Retry + return false, 0 + } + + if watcher == nil { + klog.Error("Watch returned nil watcher") + // Retry + return false, 0 + } + + ch := watcher.ResultChan() + defer watcher.Stop() + + for { + select { + case <-rw.stopChan: + klog.V(4).Info("Stopping RetryWatcher.") + return true, 0 + case event, ok := <-ch: + if !ok { + klog.V(4).Infof("Failed to get event! Re-creating the watcher. Last RV: %s", rw.lastResourceVersion) + return false, 0 + } + + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified, watch.Deleted, watch.Bookmark: + metaObject, ok := event.Object.(resourceVersionGetter) + if !ok { + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(errors.New("retryWatcher: doesn't support resourceVersion")).ErrStatus, + }) + // We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + + resourceVersion := metaObject.GetResourceVersion() + if resourceVersion == "" { + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher: object %#v doesn't support resourceVersion", event.Object)).ErrStatus, + }) + // We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + + // All is fine; send the event and update lastResourceVersion + ok = rw.send(event) + if !ok { + return true, 0 + } + rw.lastResourceVersion = resourceVersion + + continue + + case watch.Error: + // This round trip allows us to handle unstructured status + errObject := apierrors.FromObject(event.Object) + statusErr, ok := errObject.(*apierrors.StatusError) + if !ok { + klog.Error(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) + // Retry unknown errors + return false, 0 + } + + status := statusErr.ErrStatus + + statusDelay := time.Duration(0) + if status.Details != nil { + statusDelay = time.Duration(status.Details.RetryAfterSeconds) * time.Second + } + + switch status.Code { + case http.StatusGone: + // Never retry RV too old errors + _ = rw.send(event) + return true, 0 + + case http.StatusGatewayTimeout, http.StatusInternalServerError: + // Retry + return false, statusDelay + + default: + // We retry by default. RetryWatcher is meant to proceed unless it is certain + // that it can't. If we are not certain, we proceed with retry and leave it + // up to the user to timeout if needed. + + // Log here so we have a record of hitting the unexpected error + // and we can whitelist some error codes if we missed any that are expected. + klog.V(5).Info(spew.Sprintf("Retrying after unexpected error: %#+v", event.Object)) + + // Retry + return false, statusDelay + } + + default: + klog.Errorf("Failed to recognize Event type %q", event.Type) + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher failed to recognize Event type %q", event.Type)).ErrStatus, + }) + // We are unable to restart the watch and have to stop the loop or this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + } + } +} + +// receive reads the result from a watcher, restarting it if necessary. +func (rw *RetryWatcher) receive() { + defer close(rw.doneChan) + defer close(rw.resultChan) + + klog.V(4).Info("Starting RetryWatcher.") + defer klog.V(4).Info("Stopping RetryWatcher.") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-rw.stopChan: + cancel() + return + case <-ctx.Done(): + return + } + }() + + // We use non sliding until so we don't introduce delays on happy path when WATCH call + // timeouts or gets closed and we need to reestablish it while also avoiding hot loops. + wait.NonSlidingUntilWithContext(ctx, func(ctx context.Context) { + done, retryAfter := rw.doReceive() + if done { + cancel() + return + } + + time.Sleep(retryAfter) + + klog.V(4).Infof("Restarting RetryWatcher at RV=%q", rw.lastResourceVersion) + }, rw.minRestartDelay) +} + +// ResultChan implements Interface. +func (rw *RetryWatcher) ResultChan() <-chan watch.Event { + return rw.resultChan +} + +// Stop implements Interface. +func (rw *RetryWatcher) Stop() { + close(rw.stopChan) +} + +// Done allows the caller to be notified when Retry watcher stops. +func (rw *RetryWatcher) Done() <-chan struct{} { + return rw.doneChan +} diff --git a/vendor/k8s.io/client-go/tools/watch/until.go b/vendor/k8s.io/client-go/tools/watch/until.go new file mode 100644 index 00000000000..e12d82aca48 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/until.go @@ -0,0 +1,236 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" +) + +// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition failed or detected an error state. +type PreconditionFunc func(store cache.Store) (bool, error) + +// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition cannot be checked and should terminate. In general, it is better to define +// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed +// from false to true). +type ConditionFunc func(event watch.Event) (bool, error) + +// ErrWatchClosed is returned when the watch channel is closed before timeout in UntilWithoutRetry. +var ErrWatchClosed = errors.New("watch closed before UntilWithoutRetry timeout") + +// UntilWithoutRetry reads items from the watch until each provided condition succeeds, and then returns the last watch +// encountered. The first condition that returns an error terminates the watch (and the event is also returned). +// If no event has been received, the returned event will be nil. +// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition. +// Waits until context deadline or until context is canceled. +// +// Warning: Unless you have a very specific use case (probably a special Watcher) don't use this function!!! +// Warning: This will fail e.g. on API timeouts and/or 'too old resource version' error. +// Warning: You are most probably looking for a function *Until* or *UntilWithSync* below, +// Warning: solving such issues. +// TODO: Consider making this function private to prevent misuse when the other occurrences in our codebase are gone. +func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...ConditionFunc) (*watch.Event, error) { + ch := watcher.ResultChan() + defer watcher.Stop() + var lastEvent *watch.Event + for _, condition := range conditions { + // check the next condition against the previous event and short circuit waiting for the next watch + if lastEvent != nil { + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + continue + } + } + ConditionSucceeded: + for { + select { + case event, ok := <-ch: + if !ok { + return lastEvent, ErrWatchClosed + } + lastEvent = &event + + done, err := condition(event) + if err != nil { + return lastEvent, err + } + if done { + break ConditionSucceeded + } + + case <-ctx.Done(): + return lastEvent, wait.ErrWaitTimeout + } + } + } + return lastEvent, nil +} + +// Until wraps the watcherClient's watch function with RetryWatcher making sure that watcher gets restarted in case of errors. +// The initialResourceVersion will be given to watch method when first called. It shall not be "" or "0" +// given the underlying WATCH call issues (#74022). If you want the initial list ("", "0") done for you use ListWatchUntil instead. +// Remaining behaviour is identical to function UntilWithoutRetry. (See above.) +// Until can deal with API timeouts and lost connections. +// It guarantees you to see all events and in the order they happened. +// Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case. +// (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing +// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.) +// The most frequent usage for Until would be a test where you want to verify exact order of events ("edges"). +func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) { + w, err := NewRetryWatcher(initialResourceVersion, watcherClient) + if err != nil { + return nil, err + } + + return UntilWithoutRetry(ctx, w, conditions...) +} + +// UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced, +// and watches the output until each provided condition succeeds, in a way that is identical +// to function UntilWithoutRetry. (See above.) +// UntilWithSync can deal with all errors like API timeout, lost connections and 'Resource version too old'. +// It is the only function that can recover from 'Resource version too old', Until and UntilWithoutRetry will +// just fail in that case. On the other hand it can't provide you with guarantees as strong as using simple +// Watch method with Until. It can skip some intermediate events in case of watch function failing but it will +// re-list to recover and you always get an event, if there has been a change, after recovery. +// Also with the current implementation based on DeltaFIFO, order of the events you receive is guaranteed only for +// particular object, not between more of them even it's the same resource. +// The most frequent usage would be a command that needs to watch the "state of the world" and should't fail, like: +// waiting for object reaching a state, "small" controllers, ... +func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition PreconditionFunc, conditions ...ConditionFunc) (*watch.Event, error) { + indexer, informer, watcher, done := NewIndexerInformerWatcher(lw, objType) + // We need to wait for the internal informers to fully stop so it's easier to reason about + // and it works with non-thread safe clients. + defer func() { <-done }() + // Proxy watcher can be stopped multiple times so it's fine to use defer here to cover alternative branches and + // let UntilWithoutRetry to stop it + defer watcher.Stop() + + if precondition != nil { + if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { + return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %v", ctx.Err()) + } + + done, err := precondition(indexer) + if err != nil { + return nil, err + } + + if done { + return nil, nil + } + } + + return UntilWithoutRetry(ctx, watcher, conditions...) +} + +// ContextWithOptionalTimeout wraps context.WithTimeout and handles infinite timeouts expressed as 0 duration. +func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + if timeout < 0 { + // This should be handled in validation + klog.Errorf("Timeout for context shall not be negative!") + timeout = 0 + } + + if timeout == 0 { + return context.WithCancel(parent) + } + + return context.WithTimeout(parent, timeout) +} + +// ListWatchUntil first lists objects, converts them into synthetic ADDED events +// and checks conditions for those synthetic events. If the conditions have not been reached so far +// it continues by calling Until which establishes a watch from resourceVersion of the list call +// to evaluate those conditions based on new events. +// ListWatchUntil provides the same guarantees as Until and replaces the old WATCH from RV "" (or "0") +// which was mixing list and watch calls internally and having severe design issues. (see #74022) +// There is no resourceVersion order guarantee for the initial list and those synthetic events. +func ListWatchUntil(ctx context.Context, lw cache.ListerWatcher, conditions ...ConditionFunc) (*watch.Event, error) { + if len(conditions) == 0 { + return nil, nil + } + + list, err := lw.List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + initialItems, err := meta.ExtractList(list) + if err != nil { + return nil, err + } + + // use the initial items as simulated "adds" + var lastEvent *watch.Event + currIndex := 0 + passedConditions := 0 + for _, condition := range conditions { + // check the next condition against the previous event and short circuit waiting for the next watch + if lastEvent != nil { + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + passedConditions = passedConditions + 1 + continue + } + } + + ConditionSucceeded: + for currIndex < len(initialItems) { + lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]} + currIndex++ + + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + passedConditions = passedConditions + 1 + break ConditionSucceeded + } + } + } + if passedConditions == len(conditions) { + return lastEvent, nil + } + remainingConditions := conditions[passedConditions:] + + metaObj, err := meta.ListAccessor(list) + if err != nil { + return nil, err + } + currResourceVersion := metaObj.GetResourceVersion() + + return Until(ctx, currResourceVersion, lw, remainingConditions...) +} diff --git a/vendor/k8s.io/client-go/transport/spdy/spdy.go b/vendor/k8s.io/client-go/transport/spdy/spdy.go new file mode 100644 index 00000000000..53cc7ee18c5 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/spdy/spdy.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "fmt" + "net/http" + "net/url" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/httpstream/spdy" + restclient "k8s.io/client-go/rest" +) + +// Upgrader validates a response from the server after a SPDY upgrade. +type Upgrader interface { + // NewConnection validates the response and creates a new Connection. + NewConnection(resp *http.Response) (httpstream.Connection, error) +} + +// RoundTripperFor returns a round tripper and upgrader to use with SPDY. +func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, error) { + tlsConfig, err := restclient.TLSConfigFor(config) + if err != nil { + return nil, nil, err + } + upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true, false) + wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper) + if err != nil { + return nil, nil, err + } + return wrapper, upgradeRoundTripper, nil +} + +// dialer implements the httpstream.Dialer interface. +type dialer struct { + client *http.Client + upgrader Upgrader + method string + url *url.URL +} + +var _ httpstream.Dialer = &dialer{} + +// NewDialer will create a dialer that connects to the provided URL and upgrades the connection to SPDY. +func NewDialer(upgrader Upgrader, client *http.Client, method string, url *url.URL) httpstream.Dialer { + return &dialer{ + client: client, + upgrader: upgrader, + method: method, + url: url, + } +} + +func (d *dialer) Dial(protocols ...string) (httpstream.Connection, string, error) { + req, err := http.NewRequest(d.method, d.url.String(), nil) + if err != nil { + return nil, "", fmt.Errorf("error creating request: %v", err) + } + return Negotiate(d.upgrader, d.client, req, protocols...) +} + +// Negotiate opens a connection to a remote server and attempts to negotiate +// a SPDY connection. Upon success, it returns the connection and the protocol selected by +// the server. The client transport must use the upgradeRoundTripper - see RoundTripperFor. +func Negotiate(upgrader Upgrader, client *http.Client, req *http.Request, protocols ...string) (httpstream.Connection, string, error) { + for i := range protocols { + req.Header.Add(httpstream.HeaderProtocolVersion, protocols[i]) + } + resp, err := client.Do(req) + if err != nil { + return nil, "", fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + conn, err := upgrader.NewConnection(resp) + if err != nil { + return nil, "", err + } + return conn, resp.Header.Get(httpstream.HeaderProtocolVersion), nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 11759d97a7a..5dc5b2efc51 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -388,6 +388,9 @@ github.com/docker/go-metrics github.com/docker/go-plugins-helpers/sdk # github.com/docker/go-units v0.4.0 github.com/docker/go-units +# github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 +github.com/docker/spdystream +github.com/docker/spdystream/spdy # github.com/dop251/goja v0.0.0-00010101000000-000000000000 => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 github.com/dop251/goja github.com/dop251/goja/ast @@ -843,9 +846,13 @@ go.uber.org/zap/zapcore go.uber.org/zap/zaptest/observer # golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 golang.org/x/crypto/blake2b +golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 +golang.org/x/crypto/chacha20 +golang.org/x/crypto/curve25519 golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 +golang.org/x/crypto/internal/subtle golang.org/x/crypto/md4 golang.org/x/crypto/openpgp golang.org/x/crypto/openpgp/armor @@ -856,7 +863,10 @@ golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 +golang.org/x/crypto/poly1305 golang.org/x/crypto/sha3 +golang.org/x/crypto/ssh +golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/terminal # golang.org/x/exp v0.0.0-20191227195350-da58074b4299 golang.org/x/exp/apidiff @@ -1055,6 +1065,8 @@ gopkg.in/inf.v0 gopkg.in/jcmturner/aescts.v1 # gopkg.in/jcmturner/dnsutils.v1 v1.0.1 gopkg.in/jcmturner/dnsutils.v1 +# gopkg.in/jcmturner/goidentity.v3 v3.0.0 +gopkg.in/jcmturner/goidentity.v3 # gopkg.in/jcmturner/gokrb5.v7 v7.3.0 gopkg.in/jcmturner/gokrb5.v7/asn1tools gopkg.in/jcmturner/gokrb5.v7/client @@ -1085,6 +1097,8 @@ gopkg.in/jcmturner/gokrb5.v7/keytab gopkg.in/jcmturner/gokrb5.v7/krberror gopkg.in/jcmturner/gokrb5.v7/messages gopkg.in/jcmturner/gokrb5.v7/pac +gopkg.in/jcmturner/gokrb5.v7/service +gopkg.in/jcmturner/gokrb5.v7/spnego gopkg.in/jcmturner/gokrb5.v7/types # gopkg.in/jcmturner/rpc.v1 v1.1.0 gopkg.in/jcmturner/rpc.v1/mstypes @@ -1189,6 +1203,8 @@ k8s.io/apimachinery/pkg/util/clock k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer +k8s.io/apimachinery/pkg/util/httpstream +k8s.io/apimachinery/pkg/util/httpstream/spdy k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/naming @@ -1201,6 +1217,7 @@ k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/client-go v0.0.0-20190620085101-78d2af792bab k8s.io/client-go/discovery @@ -1257,8 +1274,11 @@ k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/tools/metrics k8s.io/client-go/tools/pager +k8s.io/client-go/tools/portforward k8s.io/client-go/tools/reference +k8s.io/client-go/tools/watch k8s.io/client-go/transport +k8s.io/client-go/transport/spdy k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation k8s.io/client-go/util/flowcontrol diff --git a/winlogbeat/magefile.go b/winlogbeat/magefile.go index 84596c9a677..65bb92af96e 100644 --- a/winlogbeat/magefile.go +++ b/winlogbeat/magefile.go @@ -50,15 +50,3 @@ func init() { // Update is an alias for update:all. This is a workaround for // https://github.com/magefile/mage/issues/217. func Update() { mg.Deps(winlogbeat.Update.All) } - -// Fields is an alias for update:fields. -// -// TODO: dev-tools/jenkins_ci.ps1 uses this. This should be removed when all -// projects have update to use goUnitTest. -func Fields() { mg.Deps(winlogbeat.Update.Fields) } - -// GoTestUnit is an alias for goUnitTest. -// -// TODO: dev-tools/jenkins_ci.ps1 uses this. This should be removed when all -// projects have update to use goUnitTest. -func GoTestUnit() { mg.Deps(unittest.GoUnitTest) } diff --git a/winlogbeat/sys/strings.go b/winlogbeat/sys/strings.go index 2310808086a..12d91fac31d 100644 --- a/winlogbeat/sys/strings.go +++ b/winlogbeat/sys/strings.go @@ -20,71 +20,12 @@ package sys import ( "errors" "fmt" - "io" "strings" "unicode/utf16" - "unicode/utf8" -) - -// The conditions replacementChar==unicode.ReplacementChar and -// maxRune==unicode.MaxRune are verified in the tests. -// Defining them locally avoids this package depending on package unicode. - -const ( - replacementChar = '\uFFFD' // Unicode replacement character - maxRune = '\U0010FFFF' // Maximum valid Unicode code point. -) - -const ( - // 0xd800-0xdc00 encodes the high 10 bits of a pair. - // 0xdc00-0xe000 encodes the low 10 bits of a pair. - // the value is those 20 bits plus 0x10000. - surr1 = 0xd800 - surr2 = 0xdc00 - surr3 = 0xe000 - - surrSelf = 0x10000 ) var ErrBufferTooSmall = errors.New("buffer too small") -func UTF16ToUTF8Bytes(in []byte, out io.Writer) error { - if len(in)%2 != 0 { - return fmt.Errorf("input buffer must have an even length (length=%d)", len(in)) - } - - var runeBuf [4]byte - var v1, v2 uint16 - for i := 0; i < len(in); i += 2 { - v1 = uint16(in[i]) | uint16(in[i+1])<<8 - // Stop at null-terminator. - if v1 == 0 { - return nil - } - - switch { - case v1 < surr1, surr3 <= v1: - n := utf8.EncodeRune(runeBuf[:], rune(v1)) - out.Write(runeBuf[:n]) - case surr1 <= v1 && v1 < surr2 && len(in) > i+2: - v2 = uint16(in[i+2]) | uint16(in[i+3])<<8 - if surr2 <= v2 && v2 < surr3 { - // valid surrogate sequence - r := utf16.DecodeRune(rune(v1), rune(v2)) - n := utf8.EncodeRune(runeBuf[:], r) - out.Write(runeBuf[:n]) - } - i += 2 - default: - // invalid surrogate sequence - n := utf8.EncodeRune(runeBuf[:], replacementChar) - out.Write(runeBuf[:n]) - } - } - - return nil -} - // UTF16BytesToString returns a string that is decoded from the UTF-16 bytes. // The byte slice must be of even length otherwise an error will be returned. // The integer returned is the offset to the start of the next string with diff --git a/winlogbeat/sys/strings_test.go b/winlogbeat/sys/strings_test.go index 0c92c9f3c47..358e61ed6b2 100644 --- a/winlogbeat/sys/strings_test.go +++ b/winlogbeat/sys/strings_test.go @@ -19,23 +19,16 @@ package sys import ( "bytes" - "encoding/binary" "testing" - "unicode/utf16" "github.com/stretchr/testify/assert" -) -func toUTF16Bytes(in string) []byte { - var u16 []uint16 = utf16.Encode([]rune(in)) - buf := &bytes.Buffer{} - binary.Write(buf, binary.LittleEndian, u16) - return buf.Bytes() -} + "github.com/elastic/beats/v7/libbeat/common" +) func TestUTF16BytesToString(t *testing.T) { input := "abc白鵬翔\u145A6" - utf16Bytes := toUTF16Bytes(input) + utf16Bytes := common.StringToUTF16Bytes(input) output, _, err := UTF16BytesToString(utf16Bytes) if err != nil { @@ -45,7 +38,7 @@ func TestUTF16BytesToString(t *testing.T) { } func TestUTF16BytesToStringOffset(t *testing.T) { - in := bytes.Join([][]byte{toUTF16Bytes("one"), toUTF16Bytes("two"), toUTF16Bytes("three")}, []byte{0, 0}) + in := bytes.Join([][]byte{common.StringToUTF16Bytes("one"), common.StringToUTF16Bytes("two"), common.StringToUTF16Bytes("three")}, []byte{0, 0}) output, offset, err := UTF16BytesToString(in) if err != nil { @@ -72,7 +65,7 @@ func TestUTF16BytesToStringOffset(t *testing.T) { } func TestUTF16BytesToStringOffsetWithEmptyString(t *testing.T) { - in := bytes.Join([][]byte{toUTF16Bytes(""), toUTF16Bytes("two")}, []byte{0, 0}) + in := bytes.Join([][]byte{common.StringToUTF16Bytes(""), common.StringToUTF16Bytes("two")}, []byte{0, 0}) output, offset, err := UTF16BytesToString(in) if err != nil { @@ -91,7 +84,7 @@ func TestUTF16BytesToStringOffsetWithEmptyString(t *testing.T) { } func BenchmarkUTF16BytesToString(b *testing.B) { - utf16Bytes := toUTF16Bytes("A logon was attempted using explicit credentials.") + utf16Bytes := common.StringToUTF16Bytes("A logon was attempted using explicit credentials.") b.Run("simple_string", func(b *testing.B) { b.ResetTimer() @@ -111,40 +104,3 @@ func BenchmarkUTF16BytesToString(b *testing.B) { } }) } - -func TestUTF16ToUTF8(t *testing.T) { - input := "abc白鵬翔\u145A6" - utf16Bytes := toUTF16Bytes(input) - - outputBuf := &bytes.Buffer{} - err := UTF16ToUTF8Bytes(utf16Bytes, outputBuf) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, []byte(input), outputBuf.Bytes()) -} - -func TestUTF16BytesToStringTrimNullTerm(t *testing.T) { - input := "abc" - utf16Bytes := append(toUTF16Bytes(input), []byte{0, 0, 0, 0, 0, 0}...) - - outputBuf := &bytes.Buffer{} - err := UTF16ToUTF8Bytes(utf16Bytes, outputBuf) - if err != nil { - t.Fatal(err) - } - b := outputBuf.Bytes() - assert.Len(t, b, 3) - assert.Equal(t, input, string(b)) -} - -func BenchmarkUTF16ToUTF8(b *testing.B) { - utf16Bytes := toUTF16Bytes("A logon was attempted using explicit credentials.") - outputBuf := &bytes.Buffer{} - b.ResetTimer() - - for i := 0; i < b.N; i++ { - UTF16ToUTF8Bytes(utf16Bytes, outputBuf) - outputBuf.Reset() - } -} diff --git a/winlogbeat/sys/wineventlog/bufferpool.go b/winlogbeat/sys/wineventlog/bufferpool.go index 104d45f938a..90bdf825de1 100644 --- a/winlogbeat/sys/wineventlog/bufferpool.go +++ b/winlogbeat/sys/wineventlog/bufferpool.go @@ -20,7 +20,7 @@ package wineventlog import ( "sync" - "github.com/elastic/beats/v7/winlogbeat/sys" + "github.com/elastic/beats/v7/libbeat/common" ) // bufferPool contains a pool of byteBuffer objects. @@ -104,7 +104,7 @@ func UTF16BytesToString(b []byte) (string, error) { bb := newByteBuffer() defer bb.free() - if err := sys.UTF16ToUTF8Bytes(b, bb); err != nil { + if err := common.UTF16ToUTF8Bytes(b, bb); err != nil { return "", err } diff --git a/winlogbeat/sys/wineventlog/wineventlog_windows.go b/winlogbeat/sys/wineventlog/wineventlog_windows.go index 57aaf456947..3b282fd41d7 100644 --- a/winlogbeat/sys/wineventlog/wineventlog_windows.go +++ b/winlogbeat/sys/wineventlog/wineventlog_windows.go @@ -27,6 +27,8 @@ import ( "runtime" "syscall" + "github.com/elastic/beats/v7/libbeat/common" + "golang.org/x/sys/windows" "github.com/elastic/beats/v7/winlogbeat/sys" @@ -414,7 +416,7 @@ func FormatEventString( // This assumes there is only a single string value to read. This will // not work to read keys (when messageFlag == EvtFormatMessageKeyword). - return sys.UTF16ToUTF8Bytes(buffer[:bufferUsed], out) + return common.UTF16ToUTF8Bytes(buffer[:bufferUsed], out) } // offset reads a pointer value from the reader then calculates an offset from @@ -505,5 +507,5 @@ func renderXML(eventHandle EvtHandle, flag EvtRenderFlag, renderBuf []byte, out "to the buffer, but the buffer can only hold %d bytes", bufferUsed, len(renderBuf)) } - return sys.UTF16ToUTF8Bytes(renderBuf[:bufferUsed], out) + return common.UTF16ToUTF8Bytes(renderBuf[:bufferUsed], out) } diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index d0e11d083e8..471b6c4e7fc 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -449,6 +449,27 @@ output.elasticsearch: # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. @@ -717,6 +738,9 @@ output.elasticsearch: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password @@ -1303,6 +1327,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m diff --git a/x-pack/auditbeat/Makefile b/x-pack/auditbeat/Makefile index 56633e2b3e5..019d3b9309a 100644 --- a/x-pack/auditbeat/Makefile +++ b/x-pack/auditbeat/Makefile @@ -1,3 +1,3 @@ ES_BEATS ?= ../.. -include $(ES_BEATS)/dev-tools/make/xpack.mk +include $(ES_BEATS)/dev-tools/make/mage.mk diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index a164895d2d0..5e98dab34de 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -582,6 +582,27 @@ output.elasticsearch: # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. @@ -850,6 +871,9 @@ output.elasticsearch: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password @@ -1436,6 +1460,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m diff --git a/x-pack/auditbeat/magefile.go b/x-pack/auditbeat/magefile.go index 91659ed3ca4..cb70fb3d44d 100644 --- a/x-pack/auditbeat/magefile.go +++ b/x-pack/auditbeat/magefile.go @@ -20,7 +20,7 @@ import ( // mage:import "github.com/elastic/beats/v7/dev-tools/mage/target/common" // mage:import - "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" + _ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/integtest" // mage:import @@ -35,12 +35,6 @@ func init() { devtools.Platforms = devtools.Platforms.Filter("!linux/ppc64 !linux/mips64") } -// Aliases provides compatibility with CI while we transition all Beats -// to having common testing targets. -var Aliases = map[string]interface{}{ - "goTestUnit": unittest.GoUnitTest, // dev-tools/jenkins_ci.ps1 uses this. -} - // Build builds the Beat binary. func Build() error { return devtools.Build(devtools.DefaultBuildArgs()) diff --git a/x-pack/dockerlogbeat/Makefile b/x-pack/dockerlogbeat/Makefile index 950d9029145..2367bfdeaa4 100644 --- a/x-pack/dockerlogbeat/Makefile +++ b/x-pack/dockerlogbeat/Makefile @@ -9,7 +9,7 @@ ES_BEATS?=../../ GOX_OS=linux GOX_FLAGS=-arch="amd64 386 arm ppc64 ppc64le" -include $(ES_BEATS)/dev-tools/make/xpack.mk +include $(ES_BEATS)/dev-tools/make/mage.mk .PHONY: unit-tests diff --git a/x-pack/elastic-agent/CHANGELOG.asciidoc b/x-pack/elastic-agent/CHANGELOG.asciidoc index e178ffe3e8b..bb57c61f03e 100644 --- a/x-pack/elastic-agent/CHANGELOG.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.asciidoc @@ -23,7 +23,10 @@ - Remove the kbn-version on each request to the Kibana API. {pull}17764[17764] - Fixed process spawning on Windows {pull}17751[17751] - Fixed injected log path to monitoring beat {pull}17833[17833] -- Make sure that the Elastic Agent connect over TLS in cloud. {pull}xx[xxx] +- Make sure that the Elastic Agent connect over TLS in cloud. {pull}17843[17843] +- Moved stream.* fields to top of event {pull}17858[17858] +- Fix an issue where the checkin_frequency, jitter, and backoff options where not configurable. {pull}17843[17843] +- Use default output by default {pull}18091[18091] ==== New features @@ -34,3 +37,9 @@ - Introduced `mage demo` command {pull}17312[17312] - Display the stability of the agent at enroll and start. {pull}17336[17336] - Expose stream.* variables in events {pull}17468[17468] +- Monitoring configuration reloadable {pull}17855[17855] +- Pack ECS metadata to request payload send to fleet {pull}17894[17894] +- Allow CLI overrides of paths {pull}17781[17781] +- Enable Filebeat input: S3, Azureeventhub, cloudfoundry, httpjson, netflow, o365audit. {pull}17909[17909] +- Use data subfolder as default for process logs {pull}17960[17960] +- Enable debug log level for Metricbeat and Filebeat when run under the Elastic Agent. {pull}17935[17935] diff --git a/x-pack/elastic-agent/Makefile b/x-pack/elastic-agent/Makefile index 71dd1dea852..50e8845e1b2 100644 --- a/x-pack/elastic-agent/Makefile +++ b/x-pack/elastic-agent/Makefile @@ -7,7 +7,7 @@ ES_BEATS ?= ../.. # # Includes # -include $(ES_BEATS)/dev-tools/make/xpack.mk +include $(ES_BEATS)/dev-tools/make/mage.mk .PHONY: docs docs: ## @build Builds the documentation for the beat diff --git a/x-pack/elastic-agent/_meta/elastic-agent.fleet.yml b/x-pack/elastic-agent/_meta/elastic-agent.fleet.yml index 08304cbaa92..8d817c8212e 100644 --- a/x-pack/elastic-agent/_meta/elastic-agent.fleet.yml +++ b/x-pack/elastic-agent/_meta/elastic-agent.fleet.yml @@ -5,6 +5,19 @@ management: mode: "fleet" + # Add variance between API calls to better distribute the calls. + #jitter: 5s + + # The Elastic Agent does Exponential backoff when an error happen. + # + #backoff: + # + # Initial time to wait before retrying the call. + # init: 1s + # + # Maximum time to wait before retrying the call. + # max: 10s + download: # source of the artifacts, requires elastic like structure and naming of the binaries # e.g /windows-x86.zip diff --git a/x-pack/elastic-agent/magefile.go b/x-pack/elastic-agent/magefile.go index a626d638cf1..8168890998e 100644 --- a/x-pack/elastic-agent/magefile.go +++ b/x-pack/elastic-agent/magefile.go @@ -20,15 +20,16 @@ import ( "github.com/magefile/mage/sh" devtools "github.com/elastic/beats/v7/dev-tools/mage" - "github.com/elastic/beats/v7/dev-tools/mage/target/common" - "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" // mage:import - _ "github.com/elastic/beats/v7/dev-tools/mage/target/common" - + "github.com/elastic/beats/v7/dev-tools/mage/target/common" // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/docs" + // mage:import + _ "github.com/elastic/beats/v7/dev-tools/mage/target/integtest/notests" + // mage:import + "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) const ( @@ -48,6 +49,7 @@ var Aliases = map[string]interface{}{ func init() { common.RegisterCheckDeps(Update, Check.All) + test.RegisterDeps(UnitTest) devtools.BeatDescription = "Agent manages other beats based on configuration provided." devtools.BeatLicense = "Elastic License" @@ -421,22 +423,11 @@ func combineErr(errors ...error) error { return e } -// GoTestUnit is an alias for goUnitTest. -func GoTestUnit() { - mg.Deps(unittest.GoUnitTest) -} - // UnitTest performs unit test on agent. func UnitTest() { mg.Deps(Test.All) } -// IntegTest calls go integtest, we dont have python integ test so far -// TODO: call integtest mage package when python tests are available -func IntegTest() { - os.Create(filepath.Join("build", "TEST-go-integration.out")) -} - // BuildFleetCfg embed the default fleet configuration as part of the binary. func BuildFleetCfg() error { goF := filepath.Join("dev-tools", "cmd", "buildfleetcfg", "buildfleetcfg.go") @@ -447,11 +438,6 @@ func BuildFleetCfg() error { return RunGo("run", goF, "--in", in, "--out", out) } -// Fields placeholder methods to fix the windows build. -func Fields() error { - return nil -} - // Enroll runs agent which enrolls before running. func (Demo) Enroll() error { env := map[string]string{ diff --git a/x-pack/elastic-agent/main_test.go b/x-pack/elastic-agent/main_test.go index cdced7ea677..79e2a9dc719 100644 --- a/x-pack/elastic-agent/main_test.go +++ b/x-pack/elastic-agent/main_test.go @@ -8,17 +8,21 @@ import ( "flag" "testing" - // Just using this a place holder. - "github.com/elastic/beats/v7/x-pack/filebeat/cmd" + "github.com/spf13/cobra" ) var systemTest *bool func init() { testing.Init() + + cmd := &cobra.Command{ + Use: "elastic-agent [subcommand]", + } + systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") - cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) - cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) } // Test started when the test binary is started. Only calls main. diff --git a/x-pack/elastic-agent/pkg/agent/application/application.go b/x-pack/elastic-agent/pkg/agent/application/application.go index 78634055989..878809dbd08 100644 --- a/x-pack/elastic-agent/pkg/agent/application/application.go +++ b/x-pack/elastic-agent/pkg/agent/application/application.go @@ -26,28 +26,28 @@ func New(log *logger.Logger, pathConfigFile string) (Application, error) { // Load configuration from disk to understand in which mode of operation // we must start the elastic-agent, the mode of operation cannot be changed without restarting the // elastic-agent. - config, err := config.LoadYAML(pathConfigFile) + rawConfig, err := config.LoadYAML(pathConfigFile) if err != nil { return nil, err } - if err := InjectAgentConfig(config); err != nil { + if err := InjectAgentConfig(rawConfig); err != nil { return nil, err } - return createApplication(log, pathConfigFile, config) + return createApplication(log, pathConfigFile, rawConfig) } func createApplication( log *logger.Logger, pathConfigFile string, - config *config.Config, + rawConfig *config.Config, ) (Application, error) { warn.LogNotGA(log) log.Info("Detecting execution mode") c := localDefaultConfig() - err := config.Unpack(c) + err := rawConfig.Unpack(c) if err != nil { return nil, errors.New(err, "initiating application") } @@ -63,10 +63,10 @@ func createApplication( switch mgmt.Mode { case localMode: log.Info("Agent is managed locally") - return newLocal(ctx, log, pathConfigFile, config) + return newLocal(ctx, log, pathConfigFile, rawConfig) case fleetMode: log.Info("Agent is managed by Fleet") - return newManaged(ctx, log, config) + return newManaged(ctx, log, rawConfig) default: return nil, ErrInvalidMgmtMode } diff --git a/x-pack/elastic-agent/pkg/agent/application/configuration_embed.go b/x-pack/elastic-agent/pkg/agent/application/configuration_embed.go index 8a064cf9268..db61023400e 100644 --- a/x-pack/elastic-agent/pkg/agent/application/configuration_embed.go +++ b/x-pack/elastic-agent/pkg/agent/application/configuration_embed.go @@ -15,7 +15,7 @@ var DefaultAgentFleetConfig []byte func init() { // Packed File // _meta/elastic-agent.fleet.yml - unpacked := packer.MustUnpack("eJyMlk2Xo7oRhvf5GbPPDR/NnCE7CzdCjKFjfFtfmxwkeQS2ZHNigw05+e85wm53z6Rzz130xkh6q956qqr//eWfdnuu/7Y19encyr/Wens4//bDbLfn30Zrvvz9C5q8+B+//7k/9KyMsHhk5KL/7J3H3/q4Q8+5YbSaEDQTguUgLO94iEdO1lqRaM8p0pW9Gk7XJ5SZQW3AjpEnzSG2NYkMgv4gw7WWAfYUND0Pi+9oBFba+Iyy6sg3YFWTvBH2WePXfY/S9zcUBXtGK+PO1bTQ3JoT3wBPjGAngsjWRPnSvmoFG4Oy0qis6oRVkzvP6NrpNO67sLGHstKXGRjkoTI8Ae12A6CA2Kjk6bsgac+IMoLgXi2P31ECziKozEu7aPkttzbR3V6Eqhcwbvh8ZrFDWTkomu/4Bgy8BV4NXzWjuVcT3rCwmlYJGDlN/ZrmRo7ACJhOCpodgteuDl61DKtRkdJTNDcIpj1PQM+I3wkrtQjYLS+Yd8KmY03wlLQLjUZgVlbqVag6YatBhcV5u9TB6tB1cnHUj5gw3mx+P2pkG09lYHppvw0/xWbxidHSq0nRs+DbwGEcCnsdGKmmVXCr+WpE9zxBo6DWCn67x1R10uKdgvG43cy18BgxfU2iA4LVoILoJIJ07xiRMN3VQXrgtHC+eozmB06rHxyakZPSE2EeOZ/z7HxhtDqubNUomMar4P0d+Yij6gTBg6JrzW08ftSatTPQsODc8OB11qqJb0SIvZd2MRVZcXsD8k7AVxeXVyczSw3KgM/stWMj+Bene61o6RixIkRaWTzWhEef5fbRz3ttOt6CXoTrN1Y7OYKLtHjiJPLQ88NzvXa8HdZaBeYkEuDJw372d+YrARdF8pM7Vwc4QpCPIvA0J9dGhlXHxmgnRhBykk81rcxKu7qDg7zl9hUlaL8N55r1HKYeo97w4e6Fh4v2LV5xKD1Grid6O+/6dmJh3jGLp3t/nkRQNgqaQbQz5487v9R3kFk+8DBvBC169Gy828xITzL4dvPU8ZkUt9pluWGkmll/3NuASUK8qwnvWJB6NYl7OT65Wv4a56c51iSaFExPIkHfkwMYRVAaGRZf77meaxJ1gqQnNM+BtRYHfGYWjwjyQbZOOw2FxXuUvd0tjbwcNYJ+J0x8EWHuWLoWy8WluOciSPpUE98XGzB/RzDyBcmNbG/8SBdjEDmeLtLGO07L6d4LZ0abHxLGo1oedTEtLsVy8X84L0dOnB+Owcio8ae3epSpIydPs48swJMcZ+5mDmWId/NvAfcFvEZvb838zsw17uyjV+5eOR9HlYBBQjwyWnUieNJuBrMg7mVg9pzmLqcGQd7U5OrmsMvp5qGJ/6f3kkNuVJZHL/rOAIka5mZrAvacVkZaN/ccc+rIaXV0e0VaPPeF05UwnmRgWgFfe/RcGW5TX2RrfdsLjrHX3sX+eHd51CrLfb6+6c1vWWOkH+9EiHuVvLPGafGBhdg6NlWWd7O3v2gVM4+LHUpv+bidqFxvu3vLZy0IbsSh0DV99HbLN24PVG6G7xGMe7fDnD8IplbBWdP9ZhkxJ7cLbv38FkP5gwWxLw7rr2j5NsMqt0silJUXTsqOu7xGsHc9vt2ATrTgLEfQclqFnOD+D/OZwDy7OHSzae80LjPzd7Zp8KmWY7cTFJ8fsUDTI+hPCDotZWYe3zwfn3QFsWUUn1Ti2FxMxfKm+1HjpV344l6vijYXEURGHKrO/e/CIfY4zV0PO/2whtirZ+8e+m5evbPtuPj5jf7ONq1pdZzzfPesEXati/FRp2nmNrnocrc4oeXz+Ngfn7Pn2DfbDAzC4l5B04jl8fHty3/+8t8AAAD///ycVl0=") + unpacked := packer.MustUnpack("eJyMVkGTozjSvX8/o+/fLIaidtiIOViuQog29Bp3SUiXDSS5BbZkE2sDho397xsStqt6pnZjDj4Yocx8me/l419f/mF2l+ovO12dL434/0rtjpdffujd7vLLaPSXv31Bkxf9/fuf+6FXqbnBIyWD+rN3Hr/NaY9eU03LYkJQTwjmPTesZQEeGdkoScIDK5EqzFWzcnNGie7lFuwpeVIMYlORUCO46EWwUcLHnoS6Y0H2FY3ACBNdUFKc2BasK5LW3Lwq/HboUPweQ5bgQMtC2/eqMlPM6DPbAo+PYM/90FRELoR5UxLWGiW5lknRciMn+z4tNzZPbc+5iTyU5AuRgF4cC81WoNltAeQQa7l6+spJ3FEiNSe4ky+nr2gFLtwv9Ldm2bAZW7NSJ4VWmdqQ4oASVguja25yjWCqZSI1I09qg0GKYF5zeLU19PNZoUUDDlWZe8LoRt7wMIfxehbj01e0Wu4roz0J8fitWS7EcHLPUOzeizmMJwn1Hr3GR0ZCD8GiZ2WmirIeuB9qfixaO19q4n3lR4Y1IKgg7hCMOwTxKEw0IljXIgGaN3O+VbNUyE9r6l96Zuiz+z/MGLckbCXUNV8BryIL7bAksq7KjaIGGx6kGiUWWxpWJDx+xLO+9amyMV5OKiuz93wjyGlZt7zEdvYtJ3Z2vyrpx61cgYYR1tt5CYM9cdQtN2Kerevp0N1iXGipntHL62D7xGAUcHPtKdk8384nHuCR+m+K+/R2Px4l1IaS3BPjYONfJdGj7SEj11oERUtHcK7IxfLIE0e8lwke2RbU3GwUN/HF4YSRuWFtKhJaDmhx6xnbhke0ioKKhAceyGldqqe8CZ+rcmn5dKsJ47fD4Rmt0pNMikFMp37tf6wt1PdZr03er/2il3545n58EGPUMBJ7Yvy1ueEcaFmc5tm4mg5VmWoaFL04HhT1o07CuOVGd2wEtqaOw6hmEB9QAuzca+bjycaSMB6Zj73SL1ph8F7CaNx9PynUFH8VMPaqVWh1WP/YRu9xkuxeh+MID7CHIOst199zudwDJfk/rb7mXPrCSLRw3JjAdIthKnLVti4JleME3YJBkvRckUxVPg5Rgie7c5jlcsK0MNrsyCfYPs56uHORzjsAspbDN2V3iUhSLXzcydUn+kpky+GgLL9nHTt+KZHghtv34EXvtrcdBvGZlrlXkayjvuUyboSvPba1OgMDC2ZsdpekycXNbG2KWsI4WvvvdwWUA3rXziRhfOZxNM8YFlr4+ViRVAvbb/imOIz2tCxa7j85nj/u/G6+6wCM3JcjJYtp3YBUrkArRlDzJO9vPZ13zTy7kZH4MGv6fs9TIgCa+tpUJK8l1D0/Zk6Lv6/zc4y6E0Fh9dusVDsIE+1ZmU/fblg50V1FFnbXDDxIPQTDBbc4G2B4gGzuWvqh3dmPu8ztFGA1+UPAaHRc+r4cspflDcui3kF9kbZ2e74CnSSLxnrVzJ98oKXsmJtp2lMfT+KmBU7ipzKwPrF5Ri+ZjTn8F57vhcG1nDnesVL8FGt938Guj7kWQeZ4d/ci6p7lRpLrebe9x7L8dZxzfvfQynLule2jOG4UD4AWJvYqEnUPXz7mLYOzL9MtMJTosyxTp7lbD3/8QXuqHVlZWK3f+oY7StKz0xnEHitT22PLOYvFqyAe73vf5bV+GuQtNVe9bsArI6yW5OrNNTHLMe384D3uM0qKUZK3Od8cq2Vl/oP60YIfNx+4hqcPXOiZ5WZZjPb5H3K9ZHdv2Do8W7DngdN2z5rlFcGFpiS03zAPbdO7zxhtudVz9x2hL3bns6PzbPeMmbjlzgucnh81lH7eSxJ635rlfYcdrJfstmASEO8rwlrnL24/HFRFnhQvM+ufnvSx5v8bz+B2FyysH4Q2R5Zk79yOo09zWe5WZLHg20ctLW/ARYygYWURMGL5eO951qHXQjMTL3iycdzMpuUt74cc308qJ96c+xU/CRh1dkdWJD7b+uw3i9WwzS/9Wlu/tR5wz2/31Tu34wnBn2Pcvxewr71q5XA+ekZJeEAv2WNOYrK8XZ7RCx3Wq+U12z/841Pu2TP28zfS8+NM/fbbl3//338CAAD//7QM6Mk=") raw, ok := unpacked["_meta/elastic-agent.fleet.yml"] if !ok { // ensure we have something loaded. diff --git a/x-pack/elastic-agent/pkg/agent/application/emitter.go b/x-pack/elastic-agent/pkg/agent/application/emitter.go index 2e4c07bf0dd..2279d78565d 100644 --- a/x-pack/elastic-agent/pkg/agent/application/emitter.go +++ b/x-pack/elastic-agent/pkg/agent/application/emitter.go @@ -17,12 +17,16 @@ import ( type decoratorFunc = func(string, *transpiler.AST, []program.Program) ([]program.Program, error) type filterFunc = func(*logger.Logger, *transpiler.AST) error +type reloadable interface { + Reload(cfg *config.Config) error +} + type configModifiers struct { Filters []filterFunc Decorators []decoratorFunc } -func emitter(log *logger.Logger, router *router, modifiers *configModifiers) emitterFunc { +func emitter(log *logger.Logger, router *router, modifiers *configModifiers, reloadables ...reloadable) emitterFunc { return func(c *config.Config) error { if err := InjectAgentConfig(c); err != nil { return err @@ -62,6 +66,12 @@ func emitter(log *logger.Logger, router *router, modifiers *configModifiers) emi } } + for _, r := range reloadables { + if err := r.Reload(c); err != nil { + return err + } + } + return router.Dispatch(ast.HashStr(), programsToRun) } } diff --git a/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go b/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go index b1743e92417..323937b080c 100644 --- a/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go +++ b/x-pack/elastic-agent/pkg/agent/application/enroll_cmd.go @@ -141,7 +141,7 @@ func (c *EnrollCmd) Execute() error { metadata, err := metadata() if err != nil { - return errors.New(err, "acquiring hostname") + return errors.New(err, "acquiring metadata failed") } r := &fleetapi.EnrollRequest{ diff --git a/x-pack/elastic-agent/pkg/agent/application/enroll_cmd_test.go b/x-pack/elastic-agent/pkg/agent/application/enroll_cmd_test.go index 678513f5245..d59cc620e1a 100644 --- a/x-pack/elastic-agent/pkg/agent/application/enroll_cmd_test.go +++ b/x-pack/elastic-agent/pkg/agent/application/enroll_cmd_test.go @@ -11,6 +11,7 @@ import ( "io/ioutil" "net" "net/http" + "net/http/httptest" "os" "runtime" "strconv" @@ -326,15 +327,9 @@ func withServer( test func(t *testing.T, host string), ) func(t *testing.T) { return func(t *testing.T) { - listener, err := net.Listen("tcp", ":0") - require.NoError(t, err) - defer listener.Close() - - port := listener.Addr().(*net.TCPAddr).Port - - go http.Serve(listener, m(t)) - - test(t, "localhost:"+strconv.Itoa(port)) + s := httptest.NewServer(m(t)) + defer s.Close() + test(t, s.Listener.Addr().String()) } } @@ -343,7 +338,6 @@ func withTLSServer( test func(t *testing.T, caBytes []byte, host string), ) func(t *testing.T) { return func(t *testing.T) { - ca, err := authority.NewCA() require.NoError(t, err) pair, err := ca.GeneratePair() @@ -352,7 +346,7 @@ func withTLSServer( serverCert, err := tls.X509KeyPair(pair.Crt, pair.Key) require.NoError(t, err) - listener, err := net.Listen("tcp", ":0") + listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) defer listener.Close() diff --git a/x-pack/elastic-agent/pkg/agent/application/filters/constraints_filter.go b/x-pack/elastic-agent/pkg/agent/application/filters/constraints_filter.go index 9241f3dd3e6..2cb92cffd97 100644 --- a/x-pack/elastic-agent/pkg/agent/application/filters/constraints_filter.go +++ b/x-pack/elastic-agent/pkg/agent/application/filters/constraints_filter.go @@ -6,7 +6,6 @@ package filters import ( "fmt" - "runtime" "github.com/Masterminds/semver" @@ -15,8 +14,6 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/transpiler" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/boolexp" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" - "github.com/elastic/go-sysinfo" ) const ( @@ -25,24 +22,6 @@ const ( validateVersionFuncName = "validate_version" ) -// List of variables available to be used in constraint definitions. -const ( - // `agent.id` is a generated (in standalone) or assigned (in fleet) agent identifier. - agentIDKey = "agent.id" - // `agent.version` specifies current version of an agent. - agentVersionKey = "agent.version" - // `host.architecture` defines architecture of a host (e.g. x86_64, arm, ppc, mips). - hostArchKey = "host.architecture" - // `os.family` defines a family of underlying operating system (e.g. redhat, debian, freebsd, windows). - osFamilyKey = "os.family" - // `os.kernel` specifies current version of a kernel in a semver format. - osKernelKey = "os.kernel" - // `os.platform` specifies platform agent is running on (e.g. centos, ubuntu, windows). - osPlatformKey = "os.platform" - // `os.version` specifies version of underlying operating system (e.g. 10.12.6). - osVersionKey = "os.version" -) - var ( boolexpVarStore *constraintVarStore boolexpMethodsRegs *boolexp.MethodsReg @@ -245,30 +224,20 @@ func newVarStore() (*constraintVarStore, error) { } func initVarStore(store *constraintVarStore) error { - sysInfo, err := sysinfo.Host() + agentInfo, err := info.NewAgentInfo() if err != nil { return err } - agentInfo, err := info.NewAgentInfo() + meta, err := agentInfo.ECSMetadata() if err != nil { - return err + return errors.New(err, "failed to gather host metadata") } - info := sysInfo.Info() - - // Agent - store.vars[agentIDKey] = agentInfo.AgentID() - store.vars[agentVersionKey] = release.Version() - - // Host - store.vars[hostArchKey] = info.Architecture - - // Operating system - store.vars[osFamilyKey] = runtime.GOOS - store.vars[osKernelKey] = info.KernelVersion - store.vars[osPlatformKey] = info.OS.Family - store.vars[osVersionKey] = info.OS.Version + // keep existing, overwrite gathered + for k, v := range meta { + store.vars[k] = v + } return nil } diff --git a/x-pack/elastic-agent/pkg/agent/application/fleet_gateway.go b/x-pack/elastic-agent/pkg/agent/application/fleet_gateway.go index 97c1964f8ac..d21e2392f5b 100644 --- a/x-pack/elastic-agent/pkg/agent/application/fleet_gateway.go +++ b/x-pack/elastic-agent/pkg/agent/application/fleet_gateway.go @@ -11,11 +11,33 @@ import ( "github.com/elastic/beats/v7/libbeat/common/backoff" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/scheduler" ) +// Default Configuration for the Fleet Gateway. +var defaultGatewaySettings = &fleetGatewaySettings{ + Duration: 30 * time.Second, + Jitter: 5 * time.Second, + Backoff: backoffSettings{ + Init: 1 * time.Second, + Max: 10 * time.Second, + }, +} + +type fleetGatewaySettings struct { + Duration time.Duration `config:"checkin_frequency"` + Jitter time.Duration `config:"jitter"` + Backoff backoffSettings `config:"backoff"` +} + +type backoffSettings struct { + Init time.Duration `config:"init"` + Max time.Duration `config:"max"` +} + type dispatcher interface { Dispatch(acker fleetAcker, actions ...action) error } @@ -52,27 +74,22 @@ type fleetGateway struct { acker fleetAcker } -type fleetGatewaySettings struct { - Duration time.Duration - Jitter time.Duration - Backoff backoffSettings -} - -type backoffSettings struct { - Init time.Duration - Max time.Duration -} - func newFleetGateway( ctx context.Context, log *logger.Logger, - settings *fleetGatewaySettings, + rawConfig *config.Config, agentInfo agentInfo, client clienter, d dispatcher, r fleetReporter, acker fleetAcker, ) (*fleetGateway, error) { + + settings := defaultGatewaySettings + if err := rawConfig.Unpack(settings); err != nil { + return nil, errors.New(err, "fail to read gateway configuration") + } + scheduler := scheduler.NewPeriodicJitter(settings.Duration, settings.Jitter) return newFleetGatewayWithScheduler( ctx, @@ -182,6 +199,8 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, var metaData map[string]interface{} if m, err := metadata(); err == nil { metaData = m + } else { + f.log.Error(errors.New("failed to load metadata", err)) } // checkin diff --git a/x-pack/elastic-agent/pkg/agent/application/global_config.go b/x-pack/elastic-agent/pkg/agent/application/global_config.go index 44e9f2772ff..16d5f21639e 100644 --- a/x-pack/elastic-agent/pkg/agent/application/global_config.go +++ b/x-pack/elastic-agent/pkg/agent/application/global_config.go @@ -5,26 +5,15 @@ package application import ( - "os" - "path/filepath" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" ) -var ( - homePath string - dataPath string -) - -func init() { - homePath = retrieveExecutablePath() - dataPath = retrieveDataPath() -} - // InjectAgentConfig injects config to a provided configuration. func InjectAgentConfig(c *config.Config) error { - globalConfig := AgentGlobalConfig() + globalConfig := agentGlobalConfig() if err := c.Merge(globalConfig); err != nil { return errors.New("failed to inject agent global config", err, errors.TypeConfig) } @@ -32,29 +21,13 @@ func InjectAgentConfig(c *config.Config) error { return nil } -// AgentGlobalConfig gets global config used for resolution of variables inside configuration +// agentGlobalConfig gets global config used for resolution of variables inside configuration // such as ${path.data}. -func AgentGlobalConfig() map[string]interface{} { +func agentGlobalConfig() map[string]interface{} { return map[string]interface{}{ "path": map[string]interface{}{ - "data": dataPath, - "home": homePath, + "data": paths.Data(), + "home": paths.Home(), }, } } - -// retrieveExecutablePath returns a directory where binary lives -// Executable is not supported on nacl. -func retrieveExecutablePath() string { - execPath, err := os.Executable() - if err != nil { - panic(err) - } - - return filepath.Dir(execPath) -} - -// retrieveHomePath returns a home directory of current user -func retrieveDataPath() string { - return filepath.Join(retrieveExecutablePath(), "data") -} diff --git a/x-pack/elastic-agent/pkg/agent/application/info/agent_metadata.go b/x-pack/elastic-agent/pkg/agent/application/info/agent_metadata.go new file mode 100644 index 00000000000..79371e76600 --- /dev/null +++ b/x-pack/elastic-agent/pkg/agent/application/info/agent_metadata.go @@ -0,0 +1,115 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package info + +import ( + "fmt" + "os" + "runtime" + "strings" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" + "github.com/elastic/go-sysinfo" + "github.com/elastic/go-sysinfo/types" +) + +// List of variables available to be used in constraint definitions. +const ( + // `agent.id` is a generated (in standalone) or assigned (in fleet) agent identifier. + agentIDKey = "agent.id" + // `agent.version` specifies current version of an agent. + agentVersionKey = "agent.version" + + // `os.family` defines a family of underlying operating system (e.g. redhat, debian, freebsd, windows). + osFamilyKey = "os.family" + // `os.kernel` specifies current version of a kernel in a semver format. + osKernelKey = "os.kernel" + // `os.platform` specifies platform agent is running on (e.g. centos, ubuntu, windows). + osPlatformKey = "os.platform" + // `os.version` specifies version of underlying operating system (e.g. 10.12.6). + osVersionKey = "os.version" + // `os.name` is a operating system name. + // Currently we just normalize the name (i.e. macOS, Windows, Linux). See https://www.elastic.co/guide/en/ecs/current/ecs-os.html + osNameKey = "os.name" + // `os.full` is an operating system name, including the version or code name. + osFullKey = "os.full" + + // `host.architecture` defines architecture of a host (e.g. x86_64, arm, ppc, mips). + hostArchKey = "host.architecture" + // `host.hostname` specifies hostname of the host. + hostHostnameKey = "host.hostname" + // `host.name` specifies hostname of the host. + hostNameKey = "host.name" + // `host.id` is a Unique host id. + // As hostname is not always unique, use values that are meaningful in your environment. + hostIDKey = "host.id" + // `host.ip` is Host ip addresses. + // Note: this field should contain an array of values. + hostIPKey = "host.ip" + // `host.mac` is Host mac addresses. + // Note: this field should contain an array of values. + hostMACKey = "host.mac" +) + +// ECSMetadata returns an agent ECS compliant metadata. +func (i *AgentInfo) ECSMetadata() (map[string]interface{}, error) { + hostname, err := os.Hostname() + if err != nil { + return nil, err + } + + // TODO: remove these values when kibana migrates to ECS + meta := map[string]interface{}{ + "platform": runtime.GOOS, + "version": release.Version(), + "host": hostname, + } + + sysInfo, err := sysinfo.Host() + if err != nil { + return nil, err + } + + info := sysInfo.Info() + + // Agent + meta[agentIDKey] = i.agentID + meta[agentVersionKey] = release.Version() + + // Host + meta[hostArchKey] = info.Architecture + meta[hostHostnameKey] = hostname + meta[hostNameKey] = hostname + meta[hostIDKey] = info.UniqueID + meta[hostIPKey] = fmt.Sprintf("[%s]", strings.Join(info.IPs, ",")) + meta[hostMACKey] = fmt.Sprintf("[%s]", strings.Join(info.MACs, ",")) + + // Operating system + meta[osFamilyKey] = runtime.GOOS + meta[osKernelKey] = info.KernelVersion + meta[osPlatformKey] = info.OS.Family + meta[osVersionKey] = info.OS.Version + meta[osNameKey] = info.OS.Name + meta[osFullKey] = getFullOSName(info) + + return meta, nil +} + +func getFullOSName(info types.HostInfo) string { + var sb strings.Builder + sb.WriteString(info.OS.Name) + if codeName := info.OS.Codename; codeName != "" { + sb.WriteString(" ") + sb.WriteString(codeName) + } + + if version := info.OS.Version; version != "" { + sb.WriteString("(") + sb.WriteString(version) + sb.WriteString(")") + } + + return sb.String() +} diff --git a/x-pack/elastic-agent/pkg/agent/application/local_meta.go b/x-pack/elastic-agent/pkg/agent/application/local_meta.go index 47e358b6262..3456075baa9 100644 --- a/x-pack/elastic-agent/pkg/agent/application/local_meta.go +++ b/x-pack/elastic-agent/pkg/agent/application/local_meta.go @@ -5,21 +5,20 @@ package application import ( - "os" - "runtime" - - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" ) func metadata() (map[string]interface{}, error) { - hostname, err := os.Hostname() + agentInfo, err := info.NewAgentInfo() if err != nil { return nil, err } - return map[string]interface{}{ - "platform": runtime.GOOS, - "version": release.Version(), - "host": hostname, - }, nil + meta, err := agentInfo.ECSMetadata() + if err != nil { + return nil, errors.New(err, "failed to gather host metadata") + } + + return meta, nil } diff --git a/x-pack/elastic-agent/pkg/agent/application/local_mode.go b/x-pack/elastic-agent/pkg/agent/application/local_mode.go index 58a05bc86d0..65a4927a55d 100644 --- a/x-pack/elastic-agent/pkg/agent/application/local_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/local_mode.go @@ -13,6 +13,7 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/dir" reporting "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter" logreporter "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter/log" @@ -50,7 +51,7 @@ func newLocal( ctx context.Context, log *logger.Logger, pathConfigFile string, - config *config.Config, + rawConfig *config.Config, ) (*Local, error) { var err error if log == nil { @@ -65,7 +66,7 @@ func newLocal( } c := localConfigDefault() - if err := config.Unpack(c); err != nil { + if err := rawConfig.Unpack(c); err != nil { return nil, errors.New(err, "initialize local mode") } @@ -80,13 +81,18 @@ func newLocal( reporter := reporting.NewReporter(localApplication.bgContext, log, localApplication.agentInfo, logR) - router, err := newRouter(log, streamFactory(localApplication.bgContext, config, nil, reporter)) + monitor, err := monitoring.NewMonitor(rawConfig) + if err != nil { + return nil, errors.New(err, "failed to initialize monitoring") + } + + router, err := newRouter(log, streamFactory(localApplication.bgContext, rawConfig, nil, reporter, monitor)) if err != nil { return nil, errors.New(err, "fail to initialize pipeline router") } discover := discoverer(pathConfigFile, c.Management.Path) - emit := emitter(log, router, &configModifiers{Decorators: []decoratorFunc{injectMonitoring}, Filters: []filterFunc{filters.ConstraintFilter}}) + emit := emitter(log, router, &configModifiers{Decorators: []decoratorFunc{injectMonitoring}, Filters: []filterFunc{filters.ConstraintFilter}}, monitor) var cfgSource source if !c.Management.Reload.Enabled { diff --git a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go index 5267c8fdfda..08ff4c7a479 100644 --- a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go @@ -11,27 +11,21 @@ import ( "net/http" "net/url" - "time" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/filters" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi" reporting "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter" fleetreporter "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter/fleet" logreporter "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter/log" ) -var gatewaySettings = &fleetGatewaySettings{ - Duration: 2 * time.Second, - Jitter: 1 * time.Second, - Backoff: backoffSettings{ - Init: 1 * time.Second, - Max: 10 * time.Second, - }, +type managementCfg struct { + Management *config.Config `config:"management"` } type apiClient interface { @@ -86,6 +80,7 @@ func newManaged( errors.M(errors.MetaKeyPath, path)) } + // merge local configuration and configuration persisted from fleet. rawConfig.Merge(config) cfg := defaultFleetAgentConfig() @@ -96,6 +91,15 @@ func newManaged( errors.M(errors.MetaKeyPath, path)) } + // Extract only management related configuration. + managementCfg := &managementCfg{} + if err := rawConfig.Unpack(managementCfg); err != nil { + return nil, errors.New(err, + fmt.Sprintf("fail to unpack configuration from %s", path), + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, path)) + } + client, err := fleetapi.NewAuthWithConfig(log, cfg.API.AccessAPIKey, cfg.API.Kibana) if err != nil { return nil, errors.New(err, @@ -118,13 +122,25 @@ func newManaged( } combinedReporter := reporting.NewReporter(managedApplication.bgContext, log, agentInfo, logR, fleetR) + monitor, err := monitoring.NewMonitor(rawConfig) + if err != nil { + return nil, errors.New(err, "failed to initialize monitoring") + } - router, err := newRouter(log, streamFactory(managedApplication.bgContext, rawConfig, client, combinedReporter)) + router, err := newRouter(log, streamFactory(managedApplication.bgContext, rawConfig, client, combinedReporter, monitor)) if err != nil { return nil, errors.New(err, "fail to initialize pipeline router") } - emit := emitter(log, router, &configModifiers{Decorators: []decoratorFunc{injectMonitoring}, Filters: []filterFunc{filters.ConstraintFilter}}) + emit := emitter( + log, + router, + &configModifiers{ + Decorators: []decoratorFunc{injectMonitoring}, + Filters: []filterFunc{filters.ConstraintFilter}, + }, + monitor, + ) acker, err := newActionAcker(log, agentInfo, client) if err != nil { return nil, err @@ -170,7 +186,7 @@ func newManaged( gateway, err := newFleetGateway( managedApplication.bgContext, log, - gatewaySettings, + managementCfg.Management, agentInfo, client, actionDispatcher, diff --git a/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go b/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go index bd0b240c65d..89328dd05b2 100644 --- a/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go +++ b/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go @@ -19,10 +19,11 @@ const ( monitoringOutputFormatKey = "outputs.%s" outputKey = "output" - enabledKey = "settings.monitoring.enabled" - outputsKey = "outputs" - elasticsearchKey = "elasticsearch" - typeKey = "type" + enabledKey = "settings.monitoring.enabled" + outputsKey = "outputs" + elasticsearchKey = "elasticsearch" + typeKey = "type" + defaultOutputName = "default" ) func injectMonitoring(outputGroup string, rootAst *transpiler.AST, programsToRun []program.Program) ([]program.Program, error) { @@ -40,17 +41,17 @@ func injectMonitoring(outputGroup string, rootAst *transpiler.AST, programsToRun config[enabledKey] = false } else { // get monitoring output name to be used + monitoringOutputName := defaultOutputName useOutputNode, found := transpiler.Lookup(rootAst, monitoringUseOutputKey) - if !found { - return programsToRun, nil - } + if found { - monitoringOutputNameKey, ok := useOutputNode.Value().(*transpiler.StrVal) - if !ok { - return programsToRun, nil - } + monitoringOutputNameKey, ok := useOutputNode.Value().(*transpiler.StrVal) + if !ok { + return programsToRun, nil + } - monitoringOutputName := monitoringOutputNameKey.String() + monitoringOutputName = monitoringOutputNameKey.String() + } ast := rootAst.Clone() if err := getMonitoringRule(monitoringOutputName).Apply(ast); err != nil { diff --git a/x-pack/elastic-agent/pkg/agent/application/paths/paths.go b/x-pack/elastic-agent/pkg/agent/application/paths/paths.go new file mode 100644 index 00000000000..a45000b40ae --- /dev/null +++ b/x-pack/elastic-agent/pkg/agent/application/paths/paths.go @@ -0,0 +1,45 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package paths + +import ( + "flag" + "os" + "path/filepath" +) + +var ( + homePath string + dataPath string +) + +func init() { + exePath := retrieveExecutablePath() + + fs := flag.CommandLine + fs.StringVar(&homePath, "path.home", exePath, "Agent root path") + fs.StringVar(&dataPath, "path.data", filepath.Join(exePath, "data"), "Data path contains Agent managed binaries") +} + +// Home returns a directory where binary lives +// Executable is not supported on nacl. +func Home() string { + return homePath +} + +// Data returns a home directory of current user +func Data() string { + return dataPath +} + +func retrieveExecutablePath() string { + + execPath, err := os.Executable() + if err != nil { + panic(err) + } + + return filepath.Dir(execPath) +} diff --git a/x-pack/elastic-agent/pkg/agent/application/stream.go b/x-pack/elastic-agent/pkg/agent/application/stream.go index ab3e6426093..1e8a1f10048 100644 --- a/x-pack/elastic-agent/pkg/agent/application/stream.go +++ b/x-pack/elastic-agent/pkg/agent/application/stream.go @@ -18,6 +18,7 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact/install" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring" ) // EventProcessor is an processor of application event @@ -44,6 +45,7 @@ type sender interface { type operatorStream struct { configHandler ConfigHandler log *logger.Logger + monitor monitoring.Monitor } func (b *operatorStream) Close() error { @@ -54,10 +56,10 @@ func (b *operatorStream) Execute(cfg *configRequest) error { return b.configHandler.HandleConfig(cfg) } -func streamFactory(ctx context.Context, cfg *config.Config, client sender, r reporter) func(*logger.Logger, routingKey) (stream, error) { +func streamFactory(ctx context.Context, cfg *config.Config, client sender, r reporter, m monitoring.Monitor) func(*logger.Logger, routingKey) (stream, error) { return func(log *logger.Logger, id routingKey) (stream, error) { // new operator per stream to isolate processes without using tags - operator, err := newOperator(ctx, log, id, cfg, r) + operator, err := newOperator(ctx, log, id, cfg, r, m) if err != nil { return nil, err } @@ -69,7 +71,7 @@ func streamFactory(ctx context.Context, cfg *config.Config, client sender, r rep } } -func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config *config.Config, r reporter) (*operation.Operator, error) { +func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config *config.Config, r reporter, m monitoring.Monitor) (*operation.Operator, error) { operatorConfig := &operatorCfg.Config{} if err := config.Unpack(&operatorConfig); err != nil { return nil, err @@ -95,5 +97,6 @@ func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config installer, stateResolver, r, + m, ) } diff --git a/x-pack/elastic-agent/pkg/agent/cmd/common.go b/x-pack/elastic-agent/pkg/agent/cmd/common.go index 0189f8e408d..5fe9947e34f 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/common.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/common.go @@ -5,30 +5,30 @@ package cmd import ( + "flag" "fmt" "os" "path/filepath" "github.com/spf13/cobra" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/basecmd" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/cli" ) -var defaultConfig = "elastic-agent.yml" +const defaultConfig = "elastic-agent.yml" type globalFlags struct { - PathConfigFile string PathConfig string - PathData string - PathHome string - PathLogs string + PathConfigFile string FlagStrictPerms bool } +// Config returns path which identifies configuration file. func (f *globalFlags) Config() string { - if len(f.PathConfigFile) == 0 { - return filepath.Join(f.PathHome, defaultConfig) + if len(f.PathConfigFile) == 0 || f.PathConfigFile == defaultConfig { + return filepath.Join(paths.Home(), defaultConfig) } return f.PathConfigFile } @@ -50,11 +50,11 @@ func NewCommandWithArgs(args []string, streams *cli.IOStreams) *cobra.Command { flags := &globalFlags{} + cmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("path.home")) + cmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("path.data")) + cmd.PersistentFlags().StringVarP(&flags.PathConfigFile, "", "c", defaultConfig, fmt.Sprintf(`Configuration file, relative to path.config (default "%s")`, defaultConfig)) - cmd.PersistentFlags().StringVarP(&flags.PathHome, "path.home", "", "", "Home path") cmd.PersistentFlags().StringVarP(&flags.PathConfig, "path.config", "", "${path.home}", "Configuration path") - cmd.PersistentFlags().StringVarP(&flags.PathData, "path.data", "", "${path.home}/data", "Data path") - cmd.PersistentFlags().StringVarP(&flags.PathLogs, "path.logs", "", "${path.home}/logs", "Logs path") cmd.PersistentFlags().BoolVarP(&flags.FlagStrictPerms, "strict.perms", "", true, "Strict permission checking on config files") // Add version. diff --git a/x-pack/elastic-agent/pkg/agent/cmd/enroll.go b/x-pack/elastic-agent/pkg/agent/cmd/enroll.go index a2a7ee48d22..abc5efb3b90 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/enroll.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/enroll.go @@ -46,13 +46,13 @@ func newEnrollCommandWithArgs(flags *globalFlags, _ []string, streams *cli.IOStr func enroll(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags, args []string) error { warn.PrintNotGA(streams.Out) - - config, err := config.LoadYAML(flags.PathConfigFile) + pathConfigFile := flags.Config() + config, err := config.LoadYAML(pathConfigFile) if err != nil { return errors.New(err, - fmt.Sprintf("could not read configuration file %s", flags.PathConfigFile), + fmt.Sprintf("could not read configuration file %s", pathConfigFile), errors.TypeFilesystem, - errors.M(errors.MetaKeyPath, flags.PathConfigFile)) + errors.M(errors.MetaKeyPath, pathConfigFile)) } force, _ := cmd.Flags().GetBool("force") @@ -95,7 +95,7 @@ func enroll(streams *cli.IOStreams, cmd *cobra.Command, flags *globalFlags, args c, err := application.NewEnrollCmd( logger, &options, - flags.PathConfigFile, + pathConfigFile, ) if err != nil { diff --git a/x-pack/elastic-agent/pkg/agent/cmd/run.go b/x-pack/elastic-agent/pkg/agent/cmd/run.go index f0196ba7875..db199e2b47d 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/run.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/run.go @@ -33,12 +33,13 @@ func newRunCommandWithArgs(flags *globalFlags, _ []string, streams *cli.IOStream } func run(flags *globalFlags, streams *cli.IOStreams) error { - config, err := config.LoadYAML(flags.PathConfigFile) + pathConfigFile := flags.Config() + config, err := config.LoadYAML(pathConfigFile) if err != nil { return errors.New(err, - fmt.Sprintf("could not read configuration file %s", flags.PathConfigFile), + fmt.Sprintf("could not read configuration file %s", pathConfigFile), errors.TypeFilesystem, - errors.M(errors.MetaKeyPath, flags.PathConfigFile)) + errors.M(errors.MetaKeyPath, pathConfigFile)) } logger, err := logger.NewFromConfig(config) @@ -46,7 +47,7 @@ func run(flags *globalFlags, streams *cli.IOStreams) error { return err } - app, err := application.New(logger, flags.PathConfigFile) + app, err := application.New(logger, pathConfigFile) if err != nil { return err } diff --git a/x-pack/elastic-agent/pkg/agent/operation/common_test.go b/x-pack/elastic-agent/pkg/agent/operation/common_test.go index a50b4dfc7a0..10a3aab90d1 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/common_test.go +++ b/x-pack/elastic-agent/pkg/agent/operation/common_test.go @@ -17,7 +17,7 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/noop" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/process" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/retry" ) @@ -36,9 +36,6 @@ func getTestOperator(t *testing.T, installPath string) (*Operator, *operatorCfg. DownloadConfig: &artifact.Config{ InstallPath: installPath, }, - MonitoringConfig: &monitoring.Config{ - MonitorMetrics: false, - }, } cfg, err := config.NewConfigFrom(operatorConfig) @@ -56,7 +53,7 @@ func getTestOperator(t *testing.T, installPath string) (*Operator, *operatorCfg. t.Fatal(err) } - operator, err := NewOperator(context.Background(), l, "p1", cfg, fetcher, installer, stateResolver, nil) + operator, err := NewOperator(context.Background(), l, "p1", cfg, fetcher, installer, stateResolver, nil, noop.NewMonitor()) if err != nil { t.Fatal(err) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/config/config.go b/x-pack/elastic-agent/pkg/agent/operation/config/config.go index 74a4212e83c..f1c15df7007 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/config/config.go +++ b/x-pack/elastic-agent/pkg/agent/operation/config/config.go @@ -6,7 +6,6 @@ package config import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/process" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/retry" ) @@ -17,6 +16,4 @@ type Config struct { RetryConfig *retry.Config `yaml:"retry" config:"retry"` DownloadConfig *artifact.Config `yaml:"download" config:"download"` - - MonitoringConfig *monitoring.Config `yaml:"settings.monitoring" config:"settings.monitoring"` } diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go index 85b6bb2a865..34302dc58b8 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go @@ -9,29 +9,18 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring" ) const ( monitoringName = "FLEET_MONITORING" - settingsKey = "settings" - monitoringKey = "monitoring" outputKey = "output" monitoringEnabledSubkey = "enabled" ) func (o *Operator) handleStartSidecar(s configrequest.Step) (result error) { - cfg, err := getConfigFromStep(s) - if err != nil { - return errors.New(err, - errors.TypeConfig, - "operator.handleStartSidecar failed to retrieve config from step") - } - // if monitoring is disabled and running stop it - if isEnabled := isMonitoringEnabled(o.logger, cfg); !isEnabled { + if !o.monitor.IsMonitoringEnabled() { if o.isMonitoring { o.logger.Info("operator.handleStartSidecar: monitoring is running and disabled, proceeding to stop") return o.handleStopSidecar(s) @@ -97,42 +86,6 @@ func monitoringTags() map[app.Tag]string { } } -func isMonitoringEnabled(logger *logger.Logger, cfg map[string]interface{}) bool { - settingsVal, found := cfg[settingsKey] - if !found { - logger.Error("operator.isMonitoringEnabled: settings not found in config") - return false - } - - settingsMap, ok := settingsVal.(map[string]interface{}) - if !ok { - logger.Error("operator.isMonitoringEnabled: settings not a map") - return false - } - - monitoringVal, found := settingsMap[monitoringKey] - if !found { - logger.Error("operator.isMonitoringEnabled: settings.monitoring not found in config") - return false - } - - monitoringMap, ok := monitoringVal.(map[string]interface{}) - if !ok { - logger.Error("operator.isMonitoringEnabled: settings.monitoring not a map") - return false - } - - enabledVal, found := monitoringMap[monitoringEnabledSubkey] - if !found { - logger.Infof("operator.isMonitoringEnabled: monitoring.enabled key not found: %v", monitoringMap) - return false - } - - enabled, ok := enabledVal.(bool) - - return enabled && ok -} - func (o *Operator) getMonitoringSteps(step configrequest.Step) []configrequest.Step { // get output config, err := getConfigFromStep(step) @@ -159,13 +112,13 @@ func (o *Operator) getMonitoringSteps(step configrequest.Step) []configrequest.S return nil } - return o.generateMonitoringSteps(o.config.MonitoringConfig, step.Version, output) + return o.generateMonitoringSteps(step.Version, output) } -func (o *Operator) generateMonitoringSteps(cfg *monitoring.Config, version string, output interface{}) []configrequest.Step { +func (o *Operator) generateMonitoringSteps(version string, output interface{}) []configrequest.Step { var steps []configrequest.Step - if cfg.MonitorLogs { + if o.monitor.WatchLogs() { fbConfig, any := o.getMonitoringFilebeatConfig(output) stepID := configrequest.StepRun if !any { @@ -183,7 +136,7 @@ func (o *Operator) generateMonitoringSteps(cfg *monitoring.Config, version strin steps = append(steps, filebeatStep) } - if cfg.MonitorMetrics { + if o.monitor.WatchMetrics() { mbConfig, any := o.getMonitoringMetricbeatConfig(output) stepID := configrequest.StepRun if !any { @@ -217,6 +170,7 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i map[string]interface{}{ "type": "log", "paths": paths, + "index": "logs-agent-default", }, }, }, @@ -244,6 +198,7 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string "metricsets": []string{"stats", "state"}, "period": "10s", "hosts": hosts, + "index": "metrics-agent-default", }, }, }, @@ -264,7 +219,7 @@ func (o *Operator) getLogFilePaths() []string { defer o.appsLock.Unlock() for _, a := range o.apps { - logPath := a.Monitor().LogPath() + logPath := a.Monitor().LogPath(a.Name(), o.pipelineID) if logPath != "" { paths = append(paths, logPath) } @@ -280,7 +235,7 @@ func (o *Operator) getMetricbeatEndpoints() []string { defer o.appsLock.Unlock() for _, a := range o.apps { - metricEndpoint := a.Monitor().MetricsPathPrefixed() + metricEndpoint := a.Monitor().MetricsPathPrefixed(a.Name(), o.pipelineID) if metricEndpoint != "" { endpoints = append(endpoints, metricEndpoint) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go index 4ae08d4001c..826e5bbd03d 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go @@ -15,7 +15,7 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats" + monitoringConfig "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/process" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/retry" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/state" @@ -23,26 +23,27 @@ import ( func TestGenerateSteps(t *testing.T) { const sampleOutput = "sample-output" - operator, _ := getMonitorableTestOperator(t, "tests/scripts") type testCase struct { Name string - Config *monitoring.Config + Config *monitoringConfig.MonitoringConfig ExpectedSteps int FilebeatStep bool MetricbeatStep bool } testCases := []testCase{ - testCase{"NO monitoring", &monitoring.Config{MonitorLogs: false, MonitorMetrics: false}, 0, false, false}, - testCase{"FB monitoring", &monitoring.Config{MonitorLogs: true, MonitorMetrics: false}, 1, true, false}, - testCase{"MB monitoring", &monitoring.Config{MonitorLogs: false, MonitorMetrics: true}, 1, false, true}, - testCase{"ALL monitoring", &monitoring.Config{MonitorLogs: true, MonitorMetrics: true}, 2, true, true}, + {"NO monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: false, MonitorMetrics: false}, 0, false, false}, + {"FB monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: true, MonitorMetrics: false}, 1, true, false}, + {"MB monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: false, MonitorMetrics: true}, 1, false, true}, + {"ALL monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: true, MonitorMetrics: true}, 2, true, true}, } for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - steps := operator.generateMonitoringSteps(tc.Config, "8.0", sampleOutput) + m := &testMonitor{monitorLogs: tc.Config.MonitorLogs, monitorMetrics: tc.Config.MonitorMetrics} + operator, _ := getMonitorableTestOperator(t, "tests/scripts", m) + steps := operator.generateMonitoringSteps("8.0", sampleOutput) if actualSteps := len(steps); actualSteps != tc.ExpectedSteps { t.Fatalf("invalid number of steps, expected %v, got %v", tc.ExpectedSteps, actualSteps) } @@ -91,7 +92,7 @@ func checkStep(t *testing.T, stepName string, expectedOutput interface{}, s conf } } -func getMonitorableTestOperator(t *testing.T, installPath string) (*Operator, *operatorCfg.Config) { +func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.Monitor) (*Operator, *operatorCfg.Config) { operatorConfig := &operatorCfg.Config{ RetryConfig: &retry.Config{ Enabled: true, @@ -104,9 +105,6 @@ func getMonitorableTestOperator(t *testing.T, installPath string) (*Operator, *o InstallPath: installPath, OperatingSystem: "darwin", }, - MonitoringConfig: &monitoring.Config{ - MonitorMetrics: true, - }, } cfg, err := config.NewConfigFrom(operatorConfig) @@ -124,19 +122,19 @@ func getMonitorableTestOperator(t *testing.T, installPath string) (*Operator, *o t.Fatal(err) } ctx := context.Background() - operator, err := NewOperator(ctx, l, "p1", cfg, fetcher, installer, stateResolver, nil) + + operator, err := NewOperator(ctx, l, "p1", cfg, fetcher, installer, stateResolver, nil, m) if err != nil { t.Fatal(err) } - monitor := beats.NewMonitor("dummmy", "p1234", &artifact.Config{OperatingSystem: "linux", InstallPath: "/install/path"}, true, true) - operator.apps["dummy"] = &testMonitorableApp{monitor: monitor} + operator.apps["dummy"] = &testMonitorableApp{monitor: m} return operator, operatorConfig } type testMonitorableApp struct { - monitor *beats.Monitor + monitor monitoring.Monitor } func (*testMonitorableApp) Name() string { return "" } @@ -147,3 +145,53 @@ func (*testMonitorableApp) Configure(_ context.Context, config map[string]interf } func (*testMonitorableApp) State() state.State { return state.State{} } func (a *testMonitorableApp) Monitor() monitoring.Monitor { return a.monitor } + +type testMonitor struct { + monitorLogs bool + monitorMetrics bool +} + +// EnrichArgs enriches arguments provided to application, in order to enable +// monitoring +func (b *testMonitor) EnrichArgs(_ string, _ string, args []string) []string { return args } + +// Cleanup cleans up all drops. +func (b *testMonitor) Cleanup(string, string) error { return nil } + +// Prepare executes steps in order for monitoring to work correctly +func (b *testMonitor) Prepare(string, string, int, int) error { return nil } + +// LogPath describes a path where application stores logs. Empty if +// application is not monitorable +func (b *testMonitor) LogPath(string, string) string { + if !b.monitorLogs { + return "" + } + return "path" +} + +// MetricsPath describes a location where application exposes metrics +// collectable by metricbeat. +func (b *testMonitor) MetricsPath(string, string) string { + if !b.monitorMetrics { + return "" + } + return "path" +} + +// MetricsPathPrefixed return metrics path prefixed with http+ prefix. +func (b *testMonitor) MetricsPathPrefixed(string, string) string { + return "http+path" +} + +// Reload reloads state based on configuration. +func (b *testMonitor) Reload(cfg *config.Config) error { return nil } + +// IsMonitoringEnabled returns true if monitoring is configured. +func (b *testMonitor) IsMonitoringEnabled() bool { return b.monitorLogs || b.monitorMetrics } + +// WatchLogs return true if monitoring is configured and monitoring logs is enabled. +func (b *testMonitor) WatchLogs() bool { return b.monitorLogs } + +// WatchMetrics return true if monitoring is configured and monitoring metrics is enabled. +func (b *testMonitor) WatchMetrics() bool { return b.monitorMetrics } diff --git a/x-pack/elastic-agent/pkg/agent/operation/operator.go b/x-pack/elastic-agent/pkg/agent/operation/operator.go index ed72f3884d8..4a2b2208ee8 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/operator.go +++ b/x-pack/elastic-agent/pkg/agent/operation/operator.go @@ -41,6 +41,7 @@ type Operator struct { handlers map[string]handleFunc stateResolver *stateresolver.StateResolver eventProcessor callbackHooks + monitor monitoring.Monitor isMonitoring bool apps map[string]Application @@ -61,7 +62,8 @@ func NewOperator( fetcher download.Downloader, installer install.Installer, stateResolver *stateresolver.StateResolver, - eventProcessor callbackHooks) (*Operator, error) { + eventProcessor callbackHooks, + monitor monitoring.Monitor) (*Operator, error) { operatorConfig := defaultOperatorConfig() if err := config.Unpack(&operatorConfig); err != nil { @@ -86,6 +88,7 @@ func NewOperator( stateResolver: stateResolver, apps: make(map[string]Application), eventProcessor: eventProcessor, + monitor: monitor, } operator.initHandlerMap() @@ -98,10 +101,6 @@ func NewOperator( func defaultOperatorConfig() *operatorCfg.Config { return &operatorCfg.Config{ - MonitoringConfig: &monitoring.Config{ - MonitorLogs: false, - MonitorMetrics: false, - }, RetryConfig: &retry.Config{ Enabled: false, RetriesCount: 0, @@ -254,9 +253,7 @@ func (o *Operator) getApp(p Descriptor) (Application, error) { return nil, fmt.Errorf("descriptor is not an app.Specifier") } - monitor := monitoring.NewMonitor(isMonitorable(p), p.BinaryName(), o.pipelineID, o.config.DownloadConfig, o.config.MonitoringConfig.MonitorLogs, o.config.MonitoringConfig.MonitorMetrics) - - a, err := app.NewApplication(o.bgContext, p.ID(), p.BinaryName(), o.pipelineID, specifier, factory, o.config, o.logger, o.eventProcessor.OnFailing, monitor) + a, err := app.NewApplication(o.bgContext, p.ID(), p.BinaryName(), o.pipelineID, specifier, factory, o.config, o.logger, o.eventProcessor.OnFailing, o.monitor) if err != nil { return nil, err } diff --git a/x-pack/elastic-agent/pkg/agent/program/supported.go b/x-pack/elastic-agent/pkg/agent/program/supported.go index fe62d0afc28..ca70abd8771 100644 --- a/x-pack/elastic-agent/pkg/agent/program/supported.go +++ b/x-pack/elastic-agent/pkg/agent/program/supported.go @@ -19,7 +19,7 @@ func init() { // Packed Files // spec/filebeat.yml // spec/metricbeat.yml - unpacked := packer.MustUnpack("eJzsV0uTo7oV3udnzDqVy6PdCanKwtAXAfbQ17hbEtohyQ3YEqbG2BhS+e8pCYMfPXNTk1Q2qSy63AXSeX7nfB9//3KoN+yXj1Js6CZr/tRJ8eWvX6j0G/K2zxMpDgRFIsWrRWqZu9fSlRk6i1TCgs/rgknev5YuDUvTD8s2D6tY8AC2SykOdD0TVPolBXD3GyIFDWKhzzyerRJBsXtIcSKWEh5TFB0IWjlE+gdmvZdLb14u34dfivxjirigCB65N2uolYjfcN4w4G+zzpQUQMG98BB6YZOs1W/UpGhWEAs2BM2MW/s8iEyyjrgn4xOVpCY27PS7t31O7KhN27rj6CxYv18s125NZS1SO/nI0GxHcP7slfM89Fxjg13xWroHavHey/dNCMQxk3DLfafnQSRSZH6wIDqlFuyZ5XSv+T4PvXlOrdlHajlHIs91aq+e1T1mwY77TkGqRLDLOR6IVsVEgVOxVsciNkHSpSg2sHWumYoJqxp+HWNqU5zsX0t3l+KkYJZjMhkL1g729Nm3fZ6hWctx0o950P76jNlJR5DfXPw1KZ4/2q5p5Zo8iMccRztTzZSv0DPy1HLaDXQKCs4fHDgfFIiev4zvr75vfObTO+AcsaX7c8hQbLyW7pFaTjv4Gv4ILrYEu4bGYRUbTMKC4q8aLxla6V+CZkUqz4JobEQtk86W4LindtSHwniM1aCmc8hwbIy9GmIRBkHGcxi4HbViwez4xKqv/24eNa1iwarkI5VQUjsSXv5Tvf1R/WsOYPNauo/P7/o74NQs2NiHwC04yAcMBtAYY2cVPJDJ97wJdWyzngLfIG93PRzj0vNw2x+K/JYB2N3VUj1XeO4HXN+eDz1XcKnm29G7Zpgttri9GwJo6Xm2Y4MAcdT5gqSmDz4IJoJWqxMHcatiS+35D+zAHXvZ5xwl7UMsyveJAGebWbBTM0Wt+BvB4YOd84l0TkdQUjPT6SlwbJXXa+kOz9rPuS/teMas84lozIhe12HAgMrbIDj64NI/cAT7sXbMggeCYoPa4adeZRac3dfftVJ0Nu/6p+K8nF+u72u2XLsmCebTLKiaXe8lp9Rqpp20XN/npWxtcKz75JVGTqQ4cAA7bKu9D6/7IIgFBXDLgdNdMXyZIdCIjdq946zOL/vvoQbLtWuwCooBw/GJBbtxNiSTTvPJbpCc7nnrtsbheLcn6r2dqJm+qRepKYAlQb5xfeaYPHBNPp/i+Uaw6FVOZFXbGYAKj2GK40jNIutysTGbq39vNs4u/1jv8qhM8zXw+zfFuThuUxSLhcdPHCctx6tq6c0rgs4Fs5M6tWOR4mibeewQenzoQccOKrbIagoimyLq2jxSuLJjI8XxPup2iy9/HGhebppvJfsO0b8haDApthdi31KkCMkUPIjq1LoIABzpgUXdRK49wYnJVELAOE6NezFlis49Wd8R8XjWIMhs9QL5VwJBmgWVfkWQqYb4SJGzI2/m0xK7RWodGqJ84dXvCYSrfZx0HL2XeFVvqTWTGeIm0+/en0PAOwa+Lhhweu47ajEbKTofBsAZOUWOReBlGU0LZlx4yqez06TXzXZ6qO1E0PVsRlFbTsskSJRYEvxlr/I6kUAv1iPxHDU4iliOG2QeljKpmTXEHOY/JUAagpMuQ/F/V4RccPJ/IfI/LUSaEMAnHkSFwpuuk+9oYXFd5A+CwZ7yXdzH+PB8WMbTgv4uWZiOneFkj+1IEAs+TSLlQhAai6sLIcr359B/Oi46Z8J/NP9dsvxPCXbaKT8k2UDvGp2bEj+TbeAb2csnvH/Cqao7rWL9QYRtXnNQfDAJK4KL9g4HYw+CZMbA+23/OzWruMx/eX856/3wW/n0bbH+XKPBjvKRP4decivemjCIxIUfbm2PJPtJ6HHwF8UZw25bTeTYpKgR2PI7Jv3Zd3E88owdC34Xl8bKFDOZanaJbdjL3wgeBcD8tk8391ZTbZmEDbWJwJbG0m1e2k4Ipg/En7iTFBz42o/quZ7d8RwgHbWMT1i61OqOX9Vdpj9QzYlnSBWd6M3evRcQdzOlPoZNCuBl/5OaBolg28/C6xZrd3avgmYUH9M5VVOmxJF8X3gV3xP09Bz+WhTMUD00+4UH/4ysu3yOEy+aprNYz2U0iKJXlR+zFYe/7yPrIqYGQSMo8HsOxJZZsGBSiZdWxVAxCXcZ/qrFUGb5MrN+1f/rjwhbcUteLVb7v335xx/+GQAA//+LKLK5") + unpacked := packer.MustUnpack("eJzsWF9zs7j1vv99jNz+Oi2I15mlM3thyCLADnmNE0noDkkOYEuYDdgYOv3uHWHjf8m73W2nN51excZHOv+e85yH/O2hrlb8L++FXLFV2vy5U/Lhrw9MeQ193WaxkjXFoUzIYpYAc/NSOCrFB5kolItplXMl+pfCYUFhekHRZkEZSeGjdq5kzZYTyZRXMIg23zHNmR/JwebetowlI06dkFjOFdolOKwpXthUeTUHb8XcnRbzt+Nfhr1dgoVkGO2EO2kYiOV3kjUceuu0MxWDSAo3qAM3aOKl/hs2CZ7kFKCG4olxfb/wQ5Mub2xrBkSZ4kk5VwcpFKq/41gmJSoDacwSYO+okqUgYZ6og3wpnJKXztotQ5NB1L9k2yaAcpcqtBaeXTEVy9XTdha40yzw4wmHb48BPOyp9TxzCyNL8eRXiiODWJHBFcoZsluu7DUlUc+s4NEtplngOnsm7TUDE8WgXIunrT7XUxIaBHhKQNSd7IwVcXRMOofeHWJB34Qf5okVvzMoex2TgKjh/RiTkwuYPQYwzgX0emahLgGoP9032L4UTsVKxxT+85jH/vIs6oUfygSbJ39mzp/u75Y77iODjzmS8Z5zzTL9fb501sxyJgR4NfNsg5l2nZLIGH+/8n3xWUzPvzEweT/251Al1uIxgJM9s6az81nXkSsYSe4vHgOvLrgVdxR7De+Gvld0wEx4xusRn07HQCS5Fe15+VyQxV2sVrwn4FBxa/F4HUtKYsletxn3w72uJwd2x9t/NQ+54wB1wrMLiumeq7fZH+vtD+oPpUGx8Rj493257u8Rpwx77dgHDj0jPWKwFSQesdIJfJBn366R6dhYGQ0zfdvDU1zHebjuT5MQp6UkuKll4B7xfMQ1v7LXOSAwzDSgFRtn67rO7jSjhEpWLvbcijcp/jbkS6Hc3fnQs74Trm0k1lTHtubTr+8ROG5fCsek/vQulsOedvaGgehD5xDAeJ+ARvLs9h4G7XJuhZJC2RMrqpkldF6PgX989jl3vueW7PU5jZkViYY6HDFAK+bHkksbJPhgUvI8ckZPNUdb8Z6vP/Xqg5LNbf19zc3opn/a3ymW5q5mTeCjzVif+dLRNTufo9Bep2DkJG17k5e+a8LBpU/zpdM/Xz7vKIkVg7Z1toe5IXznVw7si3/4Ux+tUS5wXIlLHPmqRB3FR0ykPiou9rrOaEOVbTIVd6vLHJcM2CWDqBU47AUOznmkwFMp+GXAP1Wy1jxLrLs6+ZFkEK0FtLtPPAkbuXrdZhRPhl1BT3EK5dUC3/CewUskj/2M9tzfjD1UXNk/4t+rHbydzZfDd+MyO1f9L0dMTI94gaig2DMuz2xT+I55VccPSmSvc6KLykoh2r0UTpCQKBz4pcvkymwu/t3JuAvE+3KThUWSLaHXv2r9QKI2wZGcuWIvSNwKsijn7rSk+JBzK64SK5IJCdepy+vAFR3FccU7XuvYQtDkVDV52LVZqPFiRUZCom3YbWYPfzpKFrVqPgr+hWh5xcjgSq5PImXNsCZSUwo/rBJwEjMkLHm/zXB3Xv49JbHJdULQ2J0b92SqBB/6O6Ew2hoUmy2DnkH/mdhRZs6UV1JsaiLZMWxv6Kv5bU6cPAF1Q7UvsvgtsXO5n8SdwHfCCNolBXJHu0k9kNWTuaE4NGkXCldFe6ZoRfWC1+JHA9MK26StWmZFBgFyx604Z7AdwDJfOg2zqCQnYj2T0kjebqiF17DA52oxEJQGx7yUDXODMzEK6HUUIOOl0LnYm2FJdJPN3Iom3IolW04mDLc7TRAnUVb8MTF16ul/VlA1lMRdiqP/iar/ZlGliZ7kBleexttQJwK0SDL7H4mfc76nOM4x3j0X8KdLz9prwg7fx4VALFEJmL9zhUpK8rPgSgGavBRHLI5iS88vKb59zMEZ/5vfXPz/rliA55emHwqG4WVqMeTWJGQ63t0mJN5+xvsnnOq67zg45AKidwGkkXp2R7GQK396g4OxB8KXLV1c9z/Us7qaPdmL70d++P95UVefa3TiJu3jaZuFN0LUyPjwgmjeilZIOwaMr0SrwfptNnLbZamaeQrQe0LCLiGbL3HMTjuKA2TcxDVgZYxZvyA/X8d24uVBXFzHcerT1bnLQu8oNvdCofcBSzd5fS1SftcZ6OmFLE8CZJjd0Y6W4V7neIelU62ud/NwttP+GX67Fz4j7w6CZa7O83Ndpz31UU3J841QO88rQLXeB8wKr7F2c+/vE0LbmQC5ZOvtIG6WuofkeRuSht3kc/yHxIBtQowqcBMV/JLn3ND5oZ5Db02XWcksZOhcjuIG1QmJDL1fKPa6BGTlfIjhIogCV3wkmH4ky+Hz8EKkd0vq8srNfv754e//948AAAD//7sC+54=") SupportedMap = make(map[string]bool) for f, v := range unpacked { diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/constraints_config-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/constraints_config-filebeat.yml index c19f9c36629..20e08dbdd7e 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/constraints_config-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/constraints_config-filebeat.yml @@ -7,10 +7,11 @@ filebeat: index: logs-generic-default processors: - add_fields: + target: "stream" fields: - stream.type: logs - stream.dataset: generic - stream.namespace: default + type: logs + dataset: generic + namespace: default output: elasticsearch: hosts: diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml index 413a6866e91..26d5dfdca2f 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml @@ -7,10 +7,11 @@ filebeat: index: logs-generic-default processors: - add_fields: + target: "stream" fields: - stream.type: logs - stream.dataset: generic - stream.namespace: default + type: logs + dataset: generic + namespace: default output: elasticsearch: enabled: true diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml index a31fe4e37dd..feac81692f7 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml @@ -8,10 +8,11 @@ filebeat: index: logs-generic-default processors: - add_fields: + target: "stream" fields: - stream.type: logs - stream.dataset: generic - stream.namespace: default + type: logs + dataset: generic + namespace: default output: elasticsearch: hosts: diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml index 39c54159d10..54604b76801 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml @@ -9,10 +9,11 @@ filebeat: var: value processors: - add_fields: + target: "stream" fields: - stream.type: logs - stream.dataset: generic - stream.namespace: default + type: logs + dataset: generic + namespace: default output: elasticsearch: hosts: diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml index 056233819f3..6342d2b5426 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml @@ -6,10 +6,11 @@ metricbeat: hosts: ["http://127.0.0.1:8080"] processors: - add_fields: + target: "stream" fields: - stream.type: metrics - stream.dataset: docker.status - stream.namespace: default + type: metrics + dataset: docker.status + namespace: default - module: apache metricsets: [info] index: metrics-generic-testing @@ -19,10 +20,11 @@ metricbeat: fields: should_be: first - add_fields: + target: "stream" fields: - stream.type: metrics - stream.dataset: generic - stream.namespace: testing + type: metrics + dataset: generic + namespace: testing output: elasticsearch: hosts: [127.0.0.1:9200, 127.0.0.1:9300] diff --git a/x-pack/elastic-agent/pkg/agent/transpiler/rules.go b/x-pack/elastic-agent/pkg/agent/transpiler/rules.go index 286c162bc0d..1b6a2c56cab 100644 --- a/x-pack/elastic-agent/pkg/agent/transpiler/rules.go +++ b/x-pack/elastic-agent/pkg/agent/transpiler/rules.go @@ -523,10 +523,11 @@ func (r *InjectStreamProcessorRule) Apply(ast *AST) error { } processorMap := &Dict{value: make([]Node, 0)} + processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "stream"}}) processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ - &Key{name: "stream.type", value: &StrVal{value: r.Type}}, - &Key{name: "stream.namespace", value: &StrVal{value: namespace}}, - &Key{name: "stream.dataset", value: &StrVal{value: dataset}}, + &Key{name: "type", value: &StrVal{value: r.Type}}, + &Key{name: "namespace", value: &StrVal{value: namespace}}, + &Key{name: "dataset", value: &StrVal{value: dataset}}, }}}) addFieldsMap := &Dict{value: []Node{&Key{"add_fields", processorMap}}} diff --git a/x-pack/elastic-agent/pkg/core/plugin/app/app.go b/x-pack/elastic-agent/pkg/core/plugin/app/app.go index dcfd33851e4..be361354a5e 100644 --- a/x-pack/elastic-agent/pkg/core/plugin/app/app.go +++ b/x-pack/elastic-agent/pkg/core/plugin/app/app.go @@ -139,7 +139,7 @@ func (a *Application) Stop() { } // cleanup drops - a.monitor.Cleanup() + a.monitor.Cleanup(a.name, a.pipelineID) } } @@ -199,7 +199,7 @@ func (a *Application) waitProc(proc *os.Process) <-chan *os.ProcessState { } func (a *Application) reportCrash(ctx context.Context) { - a.monitor.Cleanup() + a.monitor.Cleanup(a.name, a.pipelineID) // TODO: reporting crash if a.failureReporter != nil { diff --git a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/beats_monitor.go b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/beats_monitor.go index 8c245960a0a..f1fb92d3a71 100644 --- a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/beats_monitor.go +++ b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/beats_monitor.go @@ -12,64 +12,89 @@ import ( "unicode" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" + monitoringConfig "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/config" ) const httpPlusPrefix = "http+" +type wrappedConfig struct { + MonitoringConfig *monitoringConfig.MonitoringConfig `config:"settings.monitoring" yaml:"settings.monitoring"` +} + // Monitor is a monitoring interface providing information about the way // how beat is monitored type Monitor struct { - pipelineID string - - process string - monitoringEndpoint string - loggingPath string - loggingFile string - - monitorLogs bool - monitorMetrics bool + operatingSystem string + config *monitoringConfig.MonitoringConfig + installPath string } // NewMonitor creates a beats monitor. -func NewMonitor(process, pipelineID string, downloadConfig *artifact.Config, monitorLogs, monitorMetrics bool) *Monitor { - var monitoringEndpoint, loggingPath, loggingFile string - - if monitorMetrics { - monitoringEndpoint = getMonitoringEndpoint(process, downloadConfig.OS(), pipelineID) +func NewMonitor(downloadConfig *artifact.Config) *Monitor { + return &Monitor{ + operatingSystem: downloadConfig.OS(), + installPath: downloadConfig.InstallPath, + config: &monitoringConfig.MonitoringConfig{}, } - if monitorLogs { - operatingSystem := downloadConfig.OS() - loggingFile = getLoggingFile(process, operatingSystem, downloadConfig.InstallPath, pipelineID) - loggingPath = filepath.Dir(loggingFile) +} + +// Reload reloads state of the monitoring based on config. +func (b *Monitor) Reload(rawConfig *config.Config) error { + cfg := &wrappedConfig{} + if err := rawConfig.Unpack(&cfg); err != nil { + return err } - return &Monitor{ - pipelineID: pipelineID, - process: process, - monitoringEndpoint: monitoringEndpoint, - loggingPath: loggingPath, - loggingFile: loggingFile, - monitorLogs: monitorLogs, - monitorMetrics: monitorMetrics, + if cfg == nil || cfg.MonitoringConfig == nil { + b.config = &monitoringConfig.MonitoringConfig{} + } else { + b.config = cfg.MonitoringConfig } + + return nil +} + +// IsMonitoringEnabled returns true if monitoring is enabled. +func (b *Monitor) IsMonitoringEnabled() bool { return b.config.Enabled } + +// WatchLogs returns true if monitoring is enabled and monitor should watch logs. +func (b *Monitor) WatchLogs() bool { return b.config.Enabled && b.config.MonitorLogs } + +// WatchMetrics returns true if monitoring is enabled and monitor should watch metrics. +func (b *Monitor) WatchMetrics() bool { return b.config.Enabled && b.config.MonitorMetrics } + +func (b *Monitor) generateMonitoringEndpoint(process, pipelineID string) string { + return getMonitoringEndpoint(process, b.operatingSystem, pipelineID) +} + +func (b *Monitor) generateLoggingFile(process, pipelineID string) string { + return getLoggingFile(process, b.operatingSystem, b.installPath, pipelineID) +} + +func (b *Monitor) generateLoggingPath(process, pipelineID string) string { + return filepath.Dir(b.generateLoggingFile(process, pipelineID)) + } // EnrichArgs enriches arguments provided to application, in order to enable // monitoring -func (b *Monitor) EnrichArgs(args []string) []string { +func (b *Monitor) EnrichArgs(process, pipelineID string, args []string) []string { appendix := make([]string, 0, 7) - if b.monitoringEndpoint != "" { + monitoringEndpoint := b.generateMonitoringEndpoint(process, pipelineID) + if monitoringEndpoint != "" { appendix = append(appendix, "-E", "http.enabled=true", - "-E", "http.host="+b.monitoringEndpoint, + "-E", "http.host="+monitoringEndpoint, ) } - if b.loggingPath != "" { + loggingPath := b.generateLoggingPath(process, pipelineID) + if loggingPath != "" { appendix = append(appendix, - "-E", "logging.files.path="+b.loggingPath, - "-E", "logging.files.name="+b.process, + "-E", "logging.files.path="+loggingPath, + "-E", "logging.files.name="+process, "-E", "logging.files.keepfiles=7", "-E", "logging.files.permission=0644", "-E", "logging.files.interval=1h", @@ -80,9 +105,9 @@ func (b *Monitor) EnrichArgs(args []string) []string { } // Cleanup removes -func (b *Monitor) Cleanup() error { +func (b *Monitor) Cleanup(process, pipelineID string) error { // do not cleanup logs, they might not be all processed - drop := b.monitoringDrop() + drop := b.monitoringDrop(process, pipelineID) if drop == "" { return nil } @@ -91,9 +116,9 @@ func (b *Monitor) Cleanup() error { } // Prepare executes steps in order for monitoring to work correctly -func (b *Monitor) Prepare(uid, gid int) error { - drops := []string{b.loggingPath} - if drop := b.monitoringDrop(); drop != "" { +func (b *Monitor) Prepare(process, pipelineID string, uid, gid int) error { + drops := []string{b.generateLoggingPath(process, pipelineID)} + if drop := b.monitoringDrop(process, pipelineID); drop != "" { drops = append(drops, drop) } @@ -124,31 +149,31 @@ func (b *Monitor) Prepare(uid, gid int) error { // LogPath describes a path where application stores logs. Empty if // application is not monitorable -func (b *Monitor) LogPath() string { - if !b.monitorLogs { +func (b *Monitor) LogPath(process, pipelineID string) string { + if !b.WatchLogs() { return "" } - return b.loggingFile + return b.generateLoggingFile(process, pipelineID) } // MetricsPath describes a location where application exposes metrics // collectable by metricbeat. -func (b *Monitor) MetricsPath() string { - if !b.monitorMetrics { +func (b *Monitor) MetricsPath(process, pipelineID string) string { + if !b.WatchMetrics() { return "" } - return b.monitoringEndpoint + return b.generateMonitoringEndpoint(process, pipelineID) } // MetricsPathPrefixed return metrics path prefixed with http+ prefix. -func (b *Monitor) MetricsPathPrefixed() string { - return httpPlusPrefix + b.MetricsPath() +func (b *Monitor) MetricsPathPrefixed(process, pipelineID string) string { + return httpPlusPrefix + b.MetricsPath(process, pipelineID) } -func (b *Monitor) monitoringDrop() string { - return monitoringDrop(b.monitoringEndpoint) +func (b *Monitor) monitoringDrop(process, pipelineID string) string { + return monitoringDrop(b.generateMonitoringEndpoint(process, pipelineID)) } func monitoringDrop(path string) (drop string) { diff --git a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/drop_test.go b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/drop_test.go index a4d06169ca8..5c2f6be7f19 100644 --- a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/drop_test.go +++ b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/drop_test.go @@ -15,22 +15,22 @@ type testCase struct { func TestMonitoringDrops(t *testing.T) { cases := []testCase{ - testCase{`/var/lib/drop/abc.sock`, "/var/lib/drop"}, - testCase{`npipe://drop`, ""}, - testCase{`http+npipe://drop`, ""}, - testCase{`\\.\pipe\drop`, ""}, - testCase{`unix:///var/lib/drop/abc.sock`, "/var/lib/drop"}, - testCase{`http+unix:///var/lib/drop/abc.sock`, "/var/lib/drop"}, - testCase{`file:///var/lib/drop/abc.sock`, "/var/lib/drop"}, - testCase{`http://localhost/stats`, ""}, - testCase{`localhost/stats`, ""}, - testCase{`http://localhost:8080/stats`, ""}, - testCase{`localhost:8080/stats`, ""}, - testCase{`http://1.2.3.4/stats`, ""}, - testCase{`http://1.2.3.4:5678/stats`, ""}, - testCase{`1.2.3.4:5678/stats`, ""}, - testCase{`http://hithere.com:5678/stats`, ""}, - testCase{`hithere.com:5678/stats`, ""}, + {`/var/lib/drop/abc.sock`, "/var/lib/drop"}, + {`npipe://drop`, ""}, + {`http+npipe://drop`, ""}, + {`\\.\pipe\drop`, ""}, + {`unix:///var/lib/drop/abc.sock`, "/var/lib/drop"}, + {`http+unix:///var/lib/drop/abc.sock`, "/var/lib/drop"}, + {`file:///var/lib/drop/abc.sock`, "/var/lib/drop"}, + {`http://localhost/stats`, ""}, + {`localhost/stats`, ""}, + {`http://localhost:8080/stats`, ""}, + {`localhost:8080/stats`, ""}, + {`http://1.2.3.4/stats`, ""}, + {`http://1.2.3.4:5678/stats`, ""}, + {`1.2.3.4:5678/stats`, ""}, + {`http://hithere.com:5678/stats`, ""}, + {`hithere.com:5678/stats`, ""}, } for _, c := range cases { diff --git a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/monitoring.go b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/monitoring.go index c551f5ef18c..7e6b820611c 100644 --- a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/monitoring.go +++ b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats/monitoring.go @@ -6,16 +6,18 @@ package beats import ( "fmt" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" ) const ( - // args: pipeline name, application name - logFileFormat = "/var/log/elastic-agent/%s/%s" - // args: install path, pipeline name, application name - logFileFormatWin = "%s\\logs\\elastic-agent\\%s\\%s" + // args: data path, pipeline name, application name + logFileFormat = "%s/logs/%s/%s" + // args: data path, install path, pipeline name, application name + logFileFormatWin = "%s\\logs\\%s\\%s" // args: pipeline name, application name - mbEndpointFileFormat = "unix:///var/run/elastic-agent/%s/%s/%s.sock" + mbEndpointFileFormat = "unix://%s/run/%s/%s/%s.sock" // args: pipeline name, application name mbEndpointFileFormatWin = `npipe:///%s-%s` ) @@ -25,13 +27,13 @@ func getMonitoringEndpoint(program, operatingSystem, pipelineID string) string { return fmt.Sprintf(mbEndpointFileFormatWin, pipelineID, program) } - return fmt.Sprintf(mbEndpointFileFormat, pipelineID, program, program) + return fmt.Sprintf(mbEndpointFileFormat, paths.Data(), pipelineID, program, program) } func getLoggingFile(program, operatingSystem, installPath, pipelineID string) string { if operatingSystem == "windows" { - return fmt.Sprintf(logFileFormatWin, installPath, pipelineID, program) + return fmt.Sprintf(logFileFormatWin, paths.Data(), pipelineID, program) } - return fmt.Sprintf(logFileFormat, pipelineID, program) + return fmt.Sprintf(logFileFormat, paths.Data(), pipelineID, program) } diff --git a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/config.go b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/config/config.go similarity index 67% rename from x-pack/elastic-agent/pkg/core/plugin/app/monitoring/config.go rename to x-pack/elastic-agent/pkg/core/plugin/app/monitoring/config/config.go index c5e2eba6a57..7f5a5b4bffe 100644 --- a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/config.go +++ b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/config/config.go @@ -2,10 +2,11 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package monitoring +package config -// Config describes a configuration of a monitoring -type Config struct { +// MonitoringConfig describes a configuration of a monitoring +type MonitoringConfig struct { + Enabled bool `yaml:"enabled" config:"enabled"` MonitorLogs bool `yaml:"logs" config:"logs"` MonitorMetrics bool `yaml:"metrics" config:"metrics"` } diff --git a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/monitor.go b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/monitor.go index dd4744de8ee..67fd1a96aee 100644 --- a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/monitor.go +++ b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/monitor.go @@ -6,27 +6,35 @@ package monitoring import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/beats" - "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/noop" ) // Monitor is a monitoring interface providing information about the way // how application is monitored type Monitor interface { - EnrichArgs([]string) []string - Prepare(uid, gid int) error - Cleanup() error - LogPath() string - MetricsPath() string - MetricsPathPrefixed() string + LogPath(process, pipelineID string) string + MetricsPath(process, pipelineID string) string + MetricsPathPrefixed(process, pipelineID string) string + + Prepare(process, pipelineID string, uid, gid int) error + EnrichArgs(string, string, []string) []string + Cleanup(process, pipelineID string) error + Reload(cfg *config.Config) error + IsMonitoringEnabled() bool + WatchLogs() bool + WatchMetrics() bool +} + +type wrappedConfig struct { + DownloadConfig *artifact.Config `yaml:"download" config:"download"` } // NewMonitor creates a monitor based on a process configuration. -func NewMonitor(isMonitorable bool, process, pipelineID string, downloadConfig *artifact.Config, monitorLogs, monitorMetrics bool) Monitor { - if !isMonitorable { - return noop.NewMonitor() +func NewMonitor(config *config.Config) (Monitor, error) { + cfg := &wrappedConfig{} + if err := config.Unpack(&cfg); err != nil { + return nil, err } - - // so far we support only beats monitoring - return beats.NewMonitor(process, pipelineID, downloadConfig, monitorLogs, monitorMetrics) + return beats.NewMonitor(cfg.DownloadConfig), nil } diff --git a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/noop/noop_monitor.go b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/noop/noop_monitor.go index 93e8c2c46dc..f3b49602f69 100644 --- a/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/noop/noop_monitor.go +++ b/x-pack/elastic-agent/pkg/core/plugin/app/monitoring/noop/noop_monitor.go @@ -4,6 +4,8 @@ package noop +import "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" + // Monitor is a monitoring interface providing information about the way // how beat is monitored type Monitor struct { @@ -16,33 +18,45 @@ func NewMonitor() *Monitor { // EnrichArgs enriches arguments provided to application, in order to enable // monitoring -func (b *Monitor) EnrichArgs(args []string) []string { +func (b *Monitor) EnrichArgs(_ string, _ string, args []string) []string { return args } // Cleanup cleans up all drops. -func (b *Monitor) Cleanup() error { +func (b *Monitor) Cleanup(string, string) error { return nil } // Prepare executes steps in order for monitoring to work correctly -func (b *Monitor) Prepare(uid, gid int) error { +func (b *Monitor) Prepare(string, string, int, int) error { return nil } // LogPath describes a path where application stores logs. Empty if // application is not monitorable -func (b *Monitor) LogPath() string { +func (b *Monitor) LogPath(string, string) string { return "" } // MetricsPath describes a location where application exposes metrics // collectable by metricbeat. -func (b *Monitor) MetricsPath() string { +func (b *Monitor) MetricsPath(string, string) string { return "" } // MetricsPathPrefixed return metrics path prefixed with http+ prefix. -func (b *Monitor) MetricsPathPrefixed() string { +func (b *Monitor) MetricsPathPrefixed(string, string) string { return "" } + +// Reload reloads state based on configuration. +func (b *Monitor) Reload(cfg *config.Config) error { return nil } + +// IsMonitoringEnabled returns true if monitoring is configured. +func (b *Monitor) IsMonitoringEnabled() bool { return false } + +// WatchLogs return true if monitoring is configured and monitoring logs is enabled. +func (b *Monitor) WatchLogs() bool { return false } + +// WatchMetrics return true if monitoring is configured and monitoring metrics is enabled. +func (b *Monitor) WatchMetrics() bool { return false } diff --git a/x-pack/elastic-agent/pkg/core/plugin/app/start.go b/x-pack/elastic-agent/pkg/core/plugin/app/start.go index e7f8c3f677a..9bfc40781e2 100644 --- a/x-pack/elastic-agent/pkg/core/plugin/app/start.go +++ b/x-pack/elastic-agent/pkg/core/plugin/app/start.go @@ -15,6 +15,7 @@ import ( "gopkg.in/yaml.v2" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/authority" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/plugin/process" @@ -57,7 +58,7 @@ func (a *Application) Start(ctx context.Context, cfg map[string]interface{}) (er } }() - if err := a.monitor.Prepare(a.uid, a.gid); err != nil { + if err := a.monitor.Prepare(a.name, a.pipelineID, a.uid, a.gid); err != nil { return err } @@ -80,7 +81,7 @@ func (a *Application) Start(ctx context.Context, cfg map[string]interface{}) (er a.limiter.Add() } - spec.Args = a.monitor.EnrichArgs(spec.Args) + spec.Args = a.monitor.EnrichArgs(a.name, a.pipelineID, spec.Args) // specify beat name to avoid data lock conflicts // as for https://github.com/elastic/beats/v7/pull/14030 more than one instance @@ -210,12 +211,7 @@ func (a *Application) checkGrpcHTTP(ctx context.Context, address string, ca *aut } func injectDataPath(args []string, pipelineID, id string) []string { - wd := "" - if w, err := os.Getwd(); err == nil { - wd = w - } - - dataPath := filepath.Join(wd, "data", pipelineID, id) + dataPath := filepath.Join(paths.Data(), pipelineID, id) return append(args, "-E", "path.data="+dataPath) } diff --git a/x-pack/elastic-agent/pkg/fleetapi/helper_test.go b/x-pack/elastic-agent/pkg/fleetapi/helper_test.go index 89b9e913a47..e9b2baed178 100644 --- a/x-pack/elastic-agent/pkg/fleetapi/helper_test.go +++ b/x-pack/elastic-agent/pkg/fleetapi/helper_test.go @@ -5,9 +5,8 @@ package fleetapi import ( - "net" "net/http" - "strconv" + "net/http/httptest" "strings" "testing" @@ -33,15 +32,9 @@ func authHandler(handler http.HandlerFunc, apiKey string) http.HandlerFunc { func withServer(m func(t *testing.T) *http.ServeMux, test func(t *testing.T, host string)) func(t *testing.T) { return func(t *testing.T) { - listener, err := net.Listen("tcp", ":0") - require.NoError(t, err) - defer listener.Close() - - port := listener.Addr().(*net.TCPAddr).Port - - go http.Serve(listener, m(t)) - - test(t, "localhost:"+strconv.Itoa(port)) + s := httptest.NewServer(m(t)) + defer s.Close() + test(t, s.Listener.Addr().String()) } } diff --git a/x-pack/elastic-agent/pkg/kibana/client_test.go b/x-pack/elastic-agent/pkg/kibana/client_test.go index c750c889d81..c8af7b7b151 100644 --- a/x-pack/elastic-agent/pkg/kibana/client_test.go +++ b/x-pack/elastic-agent/pkg/kibana/client_test.go @@ -9,9 +9,8 @@ import ( "context" "fmt" "io/ioutil" - "net" "net/http" - "strconv" + "net/http/httptest" "sync" "testing" @@ -272,15 +271,9 @@ func TestHTTPClient(t *testing.T) { func withServer(m func(t *testing.T) *http.ServeMux, test func(t *testing.T, host string)) func(t *testing.T) { return func(t *testing.T) { - listener, err := net.Listen("tcp", ":0") - require.NoError(t, err) - defer listener.Close() - - port := listener.Addr().(*net.TCPAddr).Port - - go http.Serve(listener, m(t)) - - test(t, "localhost:"+strconv.Itoa(port)) + s := httptest.NewServer(m(t)) + defer s.Close() + test(t, s.Listener.Addr().String()) } } diff --git a/x-pack/elastic-agent/spec/filebeat.yml b/x-pack/elastic-agent/spec/filebeat.yml index 232ac9b12bb..0ed7bd422d4 100644 --- a/x-pack/elastic-agent/spec/filebeat.yml +++ b/x-pack/elastic-agent/spec/filebeat.yml @@ -1,6 +1,6 @@ name: Filebeat cmd: filebeat -args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false", "-E", "management.mode=x-pack-fleet", "-E", "management.enabled=true"] +args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false", "-E", "management.mode=x-pack-fleet", "-E", "management.enabled=true", "-E", "logging.level=debug"] configurable: grpc rules: - inject_index: @@ -60,6 +60,14 @@ rules: - docker - redis - syslog + - s3 + - netflow + - httpjson + - o365audit + - azureeventhub + - cloudfoundry + - googlepubsub + - kafka - filter_values: selector: inputs diff --git a/x-pack/elastic-agent/spec/metricbeat.yml b/x-pack/elastic-agent/spec/metricbeat.yml index b9085c8fbb6..3dc7f6507d5 100644 --- a/x-pack/elastic-agent/spec/metricbeat.yml +++ b/x-pack/elastic-agent/spec/metricbeat.yml @@ -1,6 +1,6 @@ name: Metricbeat cmd: metricbeat -args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false", "-E", "management.mode=x-pack-fleet", "-E", "management.enabled=true"] +args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false", "-E", "management.mode=x-pack-fleet", "-E", "management.enabled=true", "-E", "logging.level=debug"] configurable: grpc post_install: - move_file: diff --git a/x-pack/filebeat/Makefile b/x-pack/filebeat/Makefile index 56633e2b3e5..019d3b9309a 100644 --- a/x-pack/filebeat/Makefile +++ b/x-pack/filebeat/Makefile @@ -1,3 +1,3 @@ ES_BEATS ?= ../.. -include $(ES_BEATS)/dev-tools/make/xpack.mk +include $(ES_BEATS)/dev-tools/make/mage.mk diff --git a/x-pack/filebeat/docs/inputs/input-azure-eventhub.asciidoc b/x-pack/filebeat/docs/inputs/input-azure-eventhub.asciidoc index 15b628169ce..ac91fb476d6 100644 --- a/x-pack/filebeat/docs/inputs/input-azure-eventhub.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-azure-eventhub.asciidoc @@ -28,6 +28,8 @@ Example configuration: storage_account: "azureeph" storage_account_key: "....." storage_account_container: "" + resource_manager_endpoint: "" + ---- ==== Configuration options @@ -36,7 +38,7 @@ The `azure-eventhub` input supports the following configuration: ==== `eventhub` -The name of the eventhub users would like to read from. +The name of the eventhub users would like to read from, field required. ==== `consumer_group` @@ -50,14 +52,23 @@ A Blob Storage account is required in order to store/retrieve/update the offset ==== `storage_account` -The name of the storage account. +The name of the storage account. Required. ==== `storage_account_key` -The storage account key, this key will be used to authorize access to data in your storage account. +The storage account key, this key will be used to authorize access to data in your storage account, option is required. ==== `storage_account_container` Optional, the name of the storage account container you would like to store the offset information in. +==== `resource_manager_endpoint` + +Optional, by default we are using the azure public environment, to override, users can provide a specific resource manager endpoint in order to use a different azure environment. +Ex: +https://management.chinacloudapi.cn/ for azure ChinaCloud +https://management.microsoftazure.de/ for azure GermanCloud +https://management.azure.com/ for azure PublicCloud +https://management.usgovcloudapi.net/ for azure USGovernmentCloud +Users can also use this in case of a Hybrid Cloud model, where one may define their own endpoints. diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 068e332a5a7..9ac62c5e34f 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -306,15 +306,15 @@ filebeat.modules: activitylogs: enabled: true var: - # Eventhub name containing the activity logs, overwrite he default value if the logs are exported in a different eventhub + # eventhub name containing the activity logs, overwrite he default value if the logs are exported in a different eventhub eventhub: "insights-operational-logs" - # Consumer group name that has access to the event hub, we advise creating a dedicated consumer group for the azure module + # consumer group name that has access to the event hub, we advise creating a dedicated consumer group for the azure module consumer_group: "$Default" # the connection string required to communicate with Event Hubs, steps to generate one here https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string connection_string: "" - # the name of the storage account the state/offsets will be stored and updated. + # the name of the storage account the state/offsets will be stored and updated storage_account: "" - #The storage account key, this key will be used to authorize access to data in your storage account. + # the storage account key, this key will be used to authorize access to data in your storage account storage_account_key: "" auditlogs: @@ -1970,6 +1970,27 @@ output.elasticsearch: # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. @@ -2238,6 +2259,9 @@ output.elasticsearch: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password @@ -2824,6 +2848,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m diff --git a/x-pack/filebeat/input/azureeventhub/config.go b/x-pack/filebeat/input/azureeventhub/config.go index b567b25e35b..0521d3a76e6 100644 --- a/x-pack/filebeat/input/azureeventhub/config.go +++ b/x-pack/filebeat/input/azureeventhub/config.go @@ -17,6 +17,8 @@ type azureInputConfig struct { SAName string `config:"storage_account"` SAKey string `config:"storage_account_key"` SAContainer string `config:"storage_account_container"` + // by default the azure public environment is used, to override, users can provide a specific resource manager endpoint + OverrideEnvironment string `config:"resource_manager_endpoint"` } const ephContainerName = "filebeat" diff --git a/x-pack/filebeat/input/azureeventhub/eph.go b/x-pack/filebeat/input/azureeventhub/eph.go index 8848483c8be..bab54a45223 100644 --- a/x-pack/filebeat/input/azureeventhub/eph.go +++ b/x-pack/filebeat/input/azureeventhub/eph.go @@ -16,6 +16,14 @@ import ( "github.com/Azure/go-autorest/autorest/azure" ) +// users can select from one of the already defined azure cloud envs +var environments = map[string]azure.Environment{ + azure.ChinaCloud.ResourceManagerEndpoint: azure.ChinaCloud, + azure.GermanCloud.ResourceManagerEndpoint: azure.GermanCloud, + azure.PublicCloud.ResourceManagerEndpoint: azure.PublicCloud, + azure.USGovernmentCloud.ResourceManagerEndpoint: azure.USGovernmentCloud, +} + // runWithEPH will consume ingested events using the Event Processor Host (EPH) https://github.com/Azure/azure-event-hubs-go#event-processor-host, https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-event-processor-host func (a *azureInput) runWithEPH() error { // create a new Azure Storage Leaser / Checkpointer @@ -23,7 +31,11 @@ func (a *azureInput) runWithEPH() error { if err != nil { return err } - leaserCheckpointer, err := storage.NewStorageLeaserCheckpointer(cred, a.config.SAName, a.config.SAContainer, azure.PublicCloud) + env, err := getAzureEnvironment(a.config.OverrideEnvironment) + if err != nil { + return err + } + leaserCheckpointer, err := storage.NewStorageLeaserCheckpointer(cred, a.config.SAName, a.config.SAContainer, env) if err != nil { return err } @@ -74,3 +86,15 @@ func (a *azureInput) runWithEPH() error { } return nil } + +func getAzureEnvironment(overrideResManager string) (azure.Environment, error) { + // if no overrride is set then the azure public cloud is used + if overrideResManager == "" { + return azure.PublicCloud, nil + } + if env, ok := environments[overrideResManager]; ok { + return env, nil + } + // can retrieve hybrid env from the resource manager endpoint + return azure.EnvironmentFromURL(overrideResManager) +} diff --git a/x-pack/filebeat/input/azureeventhub/eph_test.go b/x-pack/filebeat/input/azureeventhub/eph_test.go index 3a0ac99db7d..b48499eb7c4 100644 --- a/x-pack/filebeat/input/azureeventhub/eph_test.go +++ b/x-pack/filebeat/input/azureeventhub/eph_test.go @@ -7,6 +7,8 @@ package azureeventhub import ( "testing" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/stretchr/testify/assert" ) @@ -26,3 +28,17 @@ func TestRunWithEPH(t *testing.T) { err := input.runWithEPH() assert.Error(t, err, '7') } + +func TestGetAzureEnvironment(t *testing.T) { + resMan := "" + env, err := getAzureEnvironment(resMan) + assert.NoError(t, err) + assert.Equal(t, env, azure.PublicCloud) + resMan = "https://management.microsoftazure.de/" + env, err = getAzureEnvironment(resMan) + assert.NoError(t, err) + assert.Equal(t, env, azure.GermanCloud) + resMan = "http://management.invalidhybrid.com/" + env, err = getAzureEnvironment(resMan) + assert.Errorf(t, err, "invalid character 'F' looking for beginning of value") +} diff --git a/x-pack/filebeat/input/googlepubsub/pubsub_test.go b/x-pack/filebeat/input/googlepubsub/pubsub_test.go index 17863862c8a..58d4db9331c 100644 --- a/x-pack/filebeat/input/googlepubsub/pubsub_test.go +++ b/x-pack/filebeat/input/googlepubsub/pubsub_test.go @@ -208,7 +208,7 @@ func defaultTestConfig() *common.Config { } func isInDockerIntegTestEnv() bool { - return os.Getenv("BEATS_DOCKER_INTEGRATION_TEST_ENV") != "" + return os.Getenv("BEATS_INSIDE_INTEGRATION_TEST_ENV") != "" } func runTest(t *testing.T, cfg *common.Config, run func(client *pubsub.Client, input *pubsubInput, out *stubOutleter, t *testing.T)) { diff --git a/x-pack/filebeat/magefile.go b/x-pack/filebeat/magefile.go index e25385fb0a3..66d90e26e80 100644 --- a/x-pack/filebeat/magefile.go +++ b/x-pack/filebeat/magefile.go @@ -22,7 +22,7 @@ import ( // mage:import generate _ "github.com/elastic/beats/v7/filebeat/scripts/mage/generate" // mage:import - "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" + _ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" // mage:import "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) @@ -35,12 +35,6 @@ func init() { devtools.BeatLicense = "Elastic License" } -// Aliases provides compatibility with CI while we transition all Beats -// to having common testing targets. -var Aliases = map[string]interface{}{ - "goTestUnit": unittest.GoUnitTest, // dev-tools/jenkins_ci.ps1 uses this. -} - // Build builds the Beat binary. func Build() error { return devtools.Build(devtools.DefaultBuildArgs()) @@ -154,8 +148,6 @@ func includeList() error { // IntegTest executes integration tests (it uses Docker to run the tests). func IntegTest() { - devtools.AddIntegTestUsage() - defer devtools.StopIntegTestEnv() mg.SerialDeps(GoIntegTest, PythonIntegTest) } @@ -163,7 +155,11 @@ func IntegTest() { // Use TEST_COVERAGE=true to enable code coverage profiling. // Use RACE_DETECTOR=true to enable the race detector. func GoIntegTest(ctx context.Context) error { - return devtools.RunIntegTest("goIntegTest", func() error { + runner, err := devtools.NewDockerIntegrationRunner() + if err != nil { + return err + } + return runner.Test("goIntegTest", func() error { return devtools.GoTest(ctx, devtools.DefaultGoTestIntegrationArgs()) }) } @@ -176,10 +172,14 @@ func PythonIntegTest(ctx context.Context) error { if !devtools.IsInIntegTestEnv() { mg.Deps(Fields) } - return devtools.RunIntegTest("pythonIntegTest", func() error { + runner, err := devtools.NewDockerIntegrationRunner(append(devtools.ListMatchingEnvVars("TESTING_FILEBEAT_", "NOSE_"), "GENERATE")...) + if err != nil { + return err + } + return runner.Test("pythonIntegTest", func() error { mg.Deps(devtools.BuildSystemTestBinary) args := devtools.DefaultPythonTestIntegrationArgs() args.Env["MODULES_PATH"] = devtools.CWD("module") return devtools.PythonNoseTest(args) - }, "GENERATE", "TESTING_FILEBEAT_MODULES", "TESTING_FILEBEAT_FILESETS") + }) } diff --git a/x-pack/filebeat/module/activemq/_meta/docs.asciidoc b/x-pack/filebeat/module/activemq/_meta/docs.asciidoc index 6e7d6d74551..f632747c8a4 100644 --- a/x-pack/filebeat/module/activemq/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/activemq/_meta/docs.asciidoc @@ -5,8 +5,6 @@ == ActiveMQ module -ga[] - This module parses Apache ActiveMQ logs. It supports application and audit logs. include::../include/what-happens.asciidoc[] diff --git a/x-pack/filebeat/module/aws/cloudtrail/_meta/fields.epr.yml b/x-pack/filebeat/module/aws/cloudtrail/_meta/fields.epr.yml new file mode 100644 index 00000000000..91c417b502a --- /dev/null +++ b/x-pack/filebeat/module/aws/cloudtrail/_meta/fields.epr.yml @@ -0,0 +1,45 @@ +- name: event.action + type: keyword + description: The action captured by the event. +- name: event.original + type: keyword + description: Raw text message of entire event. Used to demonstrate log integrity. +- name: user.name + type: keyword + description: Short name or login of the user. +- name: user.id + type: keyword + description: Unique identifier of the user. +- name: cloud.account.id + type: keyword + description: The cloud account or organization id used to identify different entities in a multi-tenant environment. +- name: event.provider + type: keyword + description: Source of the event. +- name: cloud.region + type: keyword + description: Region in which this host is running. +- name: source.address + type: keyword + description: Some event source addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the .address field. +- name: source.ip + type: ip + description: IP address of the source (IPv4 or IPv6). +- name: user_agent.device.name + type: keyword + description: Name of the device. +- name: user_agent.name + type: keyword + description: Name of the user agent. +- name: user_agent.original + type: keyword + description: Unparsed user_agent string. +- name: related.user + type: keyword + description: All the user names seen on your event. +- name: event.kind + type: keyword + description: Event kind (e.g. event, alert, metric, state, pipeline_error, signal) +- name: event.type + type: keyword + description: Event severity (e.g. info, error) diff --git a/x-pack/filebeat/module/aws/cloudwatch/_meta/fields.yml b/x-pack/filebeat/module/aws/cloudwatch/_meta/fields.yml index 844c13309d6..7d80e27ed15 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/_meta/fields.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/_meta/fields.yml @@ -5,3 +5,7 @@ description: > Fields for AWS CloudWatch logs. fields: + - name: message + type: text + description: > + CloudWatch log message. diff --git a/x-pack/filebeat/module/aws/cloudwatch/ingest/pipeline.yml b/x-pack/filebeat/module/aws/cloudwatch/ingest/pipeline.yml index d1f65f3ba85..ff7e20d1c3d 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/ingest/pipeline.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/ingest/pipeline.yml @@ -4,8 +4,8 @@ processors: - grok: field: message patterns: - - "%{TIMESTAMP_ISO8601:_tmp.timestamp} %{SYSLOGTIMESTAMP:_tmp.syslog_timestamp} %{GREEDYDATA:message}" - - "%{TIMESTAMP_ISO8601:_tmp.timestamp} %{GREEDYDATA:message}" + - "%{TIMESTAMP_ISO8601:_tmp.timestamp} %{SYSLOGTIMESTAMP:_tmp.syslog_timestamp} %{GREEDYDATA:aws.cloudwatch.message}" + - "%{TIMESTAMP_ISO8601:_tmp.timestamp} %{GREEDYDATA:aws.cloudwatch.message}" - date: field: '_tmp.timestamp' diff --git a/x-pack/filebeat/module/aws/cloudwatch/test/cloudwatch_ec2.log-expected.json b/x-pack/filebeat/module/aws/cloudwatch/test/cloudwatch_ec2.log-expected.json index 11d33c51e0b..bdc8b0c3a72 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/test/cloudwatch_ec2.log-expected.json +++ b/x-pack/filebeat/module/aws/cloudwatch/test/cloudwatch_ec2.log-expected.json @@ -1,62 +1,68 @@ [ { "@timestamp": "2020-02-20T07:01:01.000Z", + "aws.cloudwatch.message": "ip-172-31-81-156 systemd: Stopping User Slice of root.", "event.dataset": "aws.cloudwatch", "event.module": "aws", "fileset.name": "cloudwatch", "input.type": "log", "log.offset": 0, - "message": "ip-172-31-81-156 systemd: Stopping User Slice of root.", + "message": "2020-02-20T07:01:01.000Z Feb 20 07:01:01 ip-172-31-81-156 systemd: Stopping User Slice of root.", "service.type": "aws" }, { "@timestamp": "2020-02-20T07:02:18.000Z", + "aws.cloudwatch.message": "ip-172-31-81-156 dhclient[3000]: XMT: Solicit on eth0, interval 125240ms.", "event.dataset": "aws.cloudwatch", "event.module": "aws", "fileset.name": "cloudwatch", "input.type": "log", "log.offset": 96, - "message": "ip-172-31-81-156 dhclient[3000]: XMT: Solicit on eth0, interval 125240ms.", + "message": "2020-02-20T07:02:18.000Z Feb 20 07:02:18 ip-172-31-81-156 dhclient[3000]: XMT: Solicit on eth0, interval 125240ms.", "service.type": "aws" }, { "@timestamp": "2020-02-20T07:02:37.000Z", + "aws.cloudwatch.message": "ip-172-31-81-156 dhclient[2898]: DHCPREQUEST on eth0 to 172.31.80.1 port 67 (xid=0x4575af22)", "event.dataset": "aws.cloudwatch", "event.module": "aws", "fileset.name": "cloudwatch", "input.type": "log", "log.offset": 211, - "message": "ip-172-31-81-156 dhclient[2898]: DHCPREQUEST on eth0 to 172.31.80.1 port 67 (xid=0x4575af22)", + "message": "2020-02-20T07:02:37.000Z Feb 20 07:02:37 ip-172-31-81-156 dhclient[2898]: DHCPREQUEST on eth0 to 172.31.80.1 port 67 (xid=0x4575af22)", "service.type": "aws" }, { "@timestamp": "2020-02-20T07:02:37.000Z", + "aws.cloudwatch.message": "ip-172-31-81-156 dhclient[2898]: DHCPACK from 172.31.80.1 (xid=0x4575af22)", "event.dataset": "aws.cloudwatch", "event.module": "aws", "fileset.name": "cloudwatch", "input.type": "log", "log.offset": 345, - "message": "ip-172-31-81-156 dhclient[2898]: DHCPACK from 172.31.80.1 (xid=0x4575af22)", + "message": "2020-02-20T07:02:37.000Z Feb 20 07:02:37 ip-172-31-81-156 dhclient[2898]: DHCPACK from 172.31.80.1 (xid=0x4575af22)", "service.type": "aws" }, { "@timestamp": "2020-02-20T07:02:37.000Z", + "aws.cloudwatch.message": "ip-172-31-81-156 dhclient[2898]: bound to 172.31.81.156 -- renewal in 1599 seconds.", "event.dataset": "aws.cloudwatch", "event.module": "aws", "fileset.name": "cloudwatch", "input.type": "log", "log.offset": 461, - "message": "ip-172-31-81-156 dhclient[2898]: bound to 172.31.81.156 -- renewal in 1599 seconds.", + "message": "2020-02-20T07:02:37.000Z Feb 20 07:02:37 ip-172-31-81-156 dhclient[2898]: bound to 172.31.81.156 -- renewal in 1599 seconds.", "service.type": "aws" }, { "@timestamp": "2020-02-20T07:02:37.000Z", + "aws.cloudwatch.message": "ip-172-31-81-156 ec2net: [get_meta] Trying to get http://169.254.169.254/latest/meta-data/network/interfaces/macs/12:e2:a9:95:8b:97/local-ipv4s", "event.dataset": "aws.cloudwatch", "event.module": "aws", "fileset.name": "cloudwatch", "input.type": "log", "log.offset": 586, - "message": "ip-172-31-81-156 ec2net: [get_meta] Trying to get http://169.254.169.254/latest/meta-data/network/interfaces/macs/12:e2:a9:95:8b:97/local-ipv4s", + "message": "2020-02-20T07:02:37.000Z Feb 20 07:02:37 ip-172-31-81-156 ec2net: [get_meta] Trying to get http://169.254.169.254/latest/meta-data/network/interfaces/macs/12:e2:a9:95:8b:97/local-ipv4s", "service.type": "aws" } ] \ No newline at end of file diff --git a/x-pack/filebeat/module/aws/ec2/_meta/fields.epr.yml b/x-pack/filebeat/module/aws/ec2/_meta/fields.epr.yml new file mode 100644 index 00000000000..3a22e7a7e80 --- /dev/null +++ b/x-pack/filebeat/module/aws/ec2/_meta/fields.epr.yml @@ -0,0 +1,3 @@ +- name: process.name + type: keyword + description: Process name. diff --git a/x-pack/filebeat/module/aws/elb/_meta/fields.epr.yml b/x-pack/filebeat/module/aws/elb/_meta/fields.epr.yml new file mode 100644 index 00000000000..f548842e70f --- /dev/null +++ b/x-pack/filebeat/module/aws/elb/_meta/fields.epr.yml @@ -0,0 +1,78 @@ +- name: destination.domain + type: keyword + description: Destination domain. +- name: event.start + type: date + description: event.start contains the date when the event started or when the activity was first observed. +- name: destination.bytes + type: long + description: Bytes sent from the destination to the source. +- name: http.response.status_code + type: long + description: HTTP response status code. +- name: http.request.body.bytes + type: long + description: Size in bytes of the request body. +- name: http.response.body.bytes + type: long + description: Size in bytes of the response body. +- name: http.request.method + type: keyword + description: HTTP request method. +- name: http.request.referrer + type: keyword + description: Referrer for this HTTP request. +- name: http.version + type: keyword + description: HTTP version. +- name: user_agent.original + type: keyword + description: Unparsed user_agent string. +- name: cloud.provider + type: keyword + description: Name of the cloud provider. Example values are aws, azure, gcp, or digitalocean. +- name: event.kind + type: keyword + description: Event kind (e.g. event, alert, metric, state, pipeline_error, sig +- name: event.category + type: keyword + description: Event category (e.g. database) +- name: event.outcome + type: keyword + description: This is one of four ECS Categorization Fields, and indicates the lowest level in the ECS category hierarchy. +- name: tracing.trace.id + type: keyword + description: Unique identifier of the trace. +- name: event.end + type: date + description: event.end contains the date when the event ended or when the activity was last observed. +- name: source.ip + type: ip + description: IP address of the source. +- name: source.as.number + type: long + description: Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. +- name: source.as.organization.name + type: keyword + description: Organization name. +- name: source.geo.city_name + type: keyword + description: City name. +- name: source.geo.continent_name + type: keyword + description: Name of the continent. +- name: source.geo.country_iso_code + type: keyword + description: Country ISO code. +- name: source.geo.location + type: geo_point + description: Longitude and latitude. +- name: source.geo.region_iso_code + type: keyword + description: Region ISO code. +- name: source.geo.region_name + type: keyword + description: Region name. +- name: source.port + type: long + description: Port of the source. diff --git a/x-pack/filebeat/module/aws/elb/_meta/fields.yml b/x-pack/filebeat/module/aws/elb/_meta/fields.yml index 8e2597c2d09..9ddfb123901 100644 --- a/x-pack/filebeat/module/aws/elb/_meta/fields.yml +++ b/x-pack/filebeat/module/aws/elb/_meta/fields.yml @@ -98,5 +98,5 @@ The URL used if a redirection action was executed. - name: error.reason type: keyword - description: + description: > The error reason if the executed action failed. diff --git a/x-pack/filebeat/module/aws/fields.go b/x-pack/filebeat/module/aws/fields.go index b2ac0ac3729..22308cf2722 100644 --- a/x-pack/filebeat/module/aws/fields.go +++ b/x-pack/filebeat/module/aws/fields.go @@ -19,5 +19,5 @@ func init() { // AssetAws returns asset data. // This is the base64 encoded gzipped contents of module/aws. func AssetAws() string { - return "eJzcXN9z47Zzf7+/YicvX9+MpE4umU7HnXRG5/M1apyLa+mS9omBwJWEGgIYALRO99d3FgApUgQl25IuneohkUVy8dnF/sbyhvCI22tgG/sGwAkn8RrGf0zfABiUyCxewxwdewOQo+VGFE5odQ3/9gYA4FedlxJhoQ2smMqlUEuQemlhYfSayIzeACwEytxe+weGoNgaq+Xo47YFXsPS6LKIvyTWoc9HT6am7NcZxavNJZrLcKnL3BkmZH0ptSJ99rmtPjkuWCld5pe4hgWTFluXk2CbgLXxeG8Iy4ywtKCn4DdZwCdULntCY4VWrTsqTh5xu9Em37t2ABh9ZitsIor0QS/ArZAAhoUJ/Zq5URJaadFkIkflhNsmoe0LuQtsmERGlCeRMKDENUHhWjkmlIUcHRPSApvr0nm8tBroRYfWZPwrVADBrZiDNcvRP2LwrxKtGwBTOWxWgq+AG/T3MmlhgwY75EqL+QgmC3C4LrRhZtt5xt8z8CtUuO1Kbyys9IZ+7dDsENBz4hLz0d6tKSVp7gbJoHPxsI50tyNxQ9iRKGHPWM+WN6zb7Gvqy5F0FaOCMl6zr1rBA1pdGo7wia0RrsYPn95WAAsjFBcFk3t7zpmU+2JtoOYcrc0ecZuJFL5z4Q/rECGYfAgIN8x6xQGnwYqlampoP2CLlow2I8PAL64XcsoKnwt4smhi8UC9ODfCrRpmYJGXJqUS0FZxMrfaMDzrhdFPIkcLQgVfQ25oZ9mRxyTdWnTcIHOYe1frVtpic8nEo32m1BTuesEyVroVUeFEPXn3ca14rqAhascTkyWCsOAM/T+KX2vnnSJo452a/74hVnuJJT1TFNFuQ5m02suwxWvYXpYWO31+/TiGHJ8Ex38F7VZoNsLiIETHrsI25er3irQ2Z64PfJDpgRteIlAi4528E2uEzQqDdXV1tysxYW3ZdcQ7XoR60o+YZ/OU3p/LXdBSlWOjPMKiIbn3hTNb8hWwlNZD5Tlvb97BuHQappz5lC1mKLeSWSc4vEemrGPy8bjr8RIy/988T5JWDODPD9jwTE/TE7jhAs4lRsyoTYdNIKhSLzlm4UFrNyB39NmiGZAWPWh5xPzr0JyOsZdgWiiHRjFJ0TZy3swIm7F3iem9h7YuHWYxnf1cgrPxw6eKo7izV4xzXaqwLT5K+H0xWuLbXlIpMRxRjiMSCCC+3RbHBQMneqPs+Xe5rseM0SbjOt+32ecXY+map+nb/SJ18I8e0KArjbLej9H1Q/jWaC1bnhPi5DCYUPI0iNSm1g810soKZtgaHZr9qHWqSHeEByRMpraDqA3kty1FghBN+rLtHVJbaGUxiwHk3EAr+nWAojSNcXrGVoH+EYGvmFqihauQ8A66BWpB2Y43+RwlUuITiLxN88XyXNB1JjNf7ues1fs4lbVxTZ6yMNZI60NzobZMpR1tlotK0yFUaR25siiqwyrV8Tyn7lBIyz3gUP4uBNqW4oQAGw14jkItu7U+kxJzWKJCw5x/XthAuseYffMnkSCcZMlt/FVl39iSCmDe2CuDXJu8R40KcXKH6ijO8f2kblMxazUXu0rFX9/YcSFumJQdSp6DGfF5QNZrptjSW1+whXPaAbzXWiJTPWq0WSHVUA1pCwv7hggNhOGuPhNgeaaVTHfkTt6KHVZhQRekJ7QjBNgvPaSldxd6/anPV9J+9DVdwzFIYb3/qGnHRgvmINROtC9tq12ul1U3sMYPn7r12rNyqXPAGMfcaZcdVxKkTCpR3J214XhENN451ZZiqp0M9Zo/qLi+Zhs7jH536JFdU7AZ0qP+7x4N5KIQZOy9Aj7FYB6wMEgJRvBdbCdjb/sGOYon71+FPWTMka/gkbLY+L6cj63zT1puAEJxWeZCLWFDqJ0RyyWaEBbSTjY0WoIOlbKPqRUzmEeezir2f/88+dCIXvNt84zDaSiV+KtEua1Uqnk9zVA8cPLCYeuQpYecKnpxG/JIpyEXiwUa+iOcn7U/UQV6qoqngmeo8kKLc4tkb4d/v7+BaiGypnDyEXOY2NDwbRbPdjcG0fNOA1O+69csWurWU9Vmmv6Q5pVrZbXETOqlSOcLrwkA8bTNFsjFQnACeRMWuqN14m6+1Pkfz48Po+4iT/vgXaJ8683oA6XLVAQc5+EQH01e1nouJPbkaW1O5iFbObEwTyQNLW4o8fdaFpDBIWQVE15nMqe/QV/h88NdZwcOC3jBstIeOSi4mGjXpXQCFow7Kh13zXzyVJuefjDEM03IS0NePslq6yx9w1yrJ/h3nqX/QViOnqXXpRR/9/fjvr15F+YjhGrw8NxRAFFkLM8N2tdnAMnaNjRI0UGkvssDfVTARvumlqacP1+aS/Y6Yd29f9GkRCfXO1kwzaMXqVkOcyaZ4tjTzzqpTk8CaJ66twB4IT29gzv68X38sSe3cMws0WV+e0bdcuZEiI0edFgo6MFuHuhoc40KN+wWGifi8toTKXeSbqQkRCHvr1ALo53mej8rPBFURTW9p1cr5wrQBhwvehp2dcvUaKpthVpmTqxxZJEnkS6kZvudjOfonXZMhgNTocAi14pyK6E47kkvtORCd07YnYxL5YQE0UqQGRhc0pZQCTFn/BFVT0MpXvw/xGaDDboSAYITUrZ+sI4ZZ2OXkkqnIw3lv5nDuvHc3Lu64mlx2d5LLx4pegvXnegCX+t0zJK60yZ9GVfRkJo7pWAtpBSR2UHkNsDXBSpsMcSltvtnt7XnlDYjP2ZX7BEvy0c1lzG7m0K9JAma63XhO/l7fIFOaOmKWZgjKkDr2FwKu+pjrTI/sV+xnOjhJvf7WUSlRDtND12PYx64Qlho8/pebNoLa+OqTtKp6Mhlj+ozCeuYK+1pp4NJyIEwEOGueV41r3YNNDxA8ZDs12kTOqJ/9kL/M921sTLjolidO1BPp3cQ6IZaRCgygn/yP9eb0JPaEKbLRGpavo7WL8bFV9qiyjgad9GMK6wDtI5Y+NEtiH3HcNLdUILXwrdoBDuzcANNUOV6jubCvAjF9dqHV2kzJvHczoSKpyWaeLKjF96D+3UaEXW+TaXwlR0HrvwRdeS4PxzRtzxLtZpO5INge+Ihf+9BYBjHU/qTyaX9/KpydcT487+G4/VXNZzRasNJ/ieskOV9VdeaSmjMM1NKSqeETkyDnZy2B6q7Tfa5UynrKTwPoT2IJxaU8NI98XLP2WlIkvAL8rI7ZXoi8NiproiH6cdWqNtNdFwttNkwkw9gIb5gPqwiw6A1Gjoajd6OYOKAM1VN5IPFJzRMBvH02KHBXBjkLivNmb3J54e76KG9xOM6/piYV+2vWgQHhmZGBpl99Ql2ElqY4wl0q3GeejMiugUTsomrjm0/hPPLi/dZpj/4Fj6aajL9JU2Xeckf0WWpk8JTvQJTWgnOZBh23h1P+rX25t4CjJ5UzV+7XGco0A+eYH9iNdoa7faSCdU/X7TWDrOeZLzz83MsvyiYCbHy2c096DYavtmm1uv54SUGQ6+epWqPpVfnVBcb/umAHoN1vjfeOlWsT7jSh4rIdmdqaaj1fMR5xbubx/CtrxxWYYwdcuSSUeLPLEx/G9+P6jsH8HA7nY1+ns3uszW6lc5H1fm/HzwawB+376eT2e2hW7SB9+PZzc+jD7d3t7Pb0W/v/+P2ZpZm/RHPHJ2/e8Ttd83xsV0MptCAimqa3IP8bvhd5YZ3oso1hgk0RyU38y/m1JODhzWtNOK8vDwEwsPPD5MWRyT72rHE4c40NKrqslDMnbFToco1GsEDjma5uRsZOTCVd4aZ2XQlVJvhrQ+0NzrH5j4rHSOw5rw0prcXsnVoM9s38/VqicXipu6x+XV8gT4A/FKNVniR7hrDT2go222y8RWN7nEj8/+hlMqKr2nJntKWIqJ1yPXrUN3lNbC3K+Kf9I2yi0iy1f/aD7VCwUKK5co1JjZ8WvMPCwUaW1BS+NSjoa40KmNGlyr/ZvCZayiwLShYN1LzrS7NsaHkBRpz7gjdKnG8bj7EdWIVdrxO9e8Fs+UpM5TPhPbZohmOaaWDJWKcLDh74VqNo04+VO3EOvI8N9hEEpP8WMhZ6TNnNsTAlyFbfx2KfPjOvwNWayN+cajyXcIFkw89DTixVMyVpm+u5NRGUUW+ktMApmL5u0dLX34cdN/uaGaMLSfx6rwydCczW4rOC4mndhmRE3NTTRWEhTu2RQNX0+nd26oluhuRx6V2on7NldR/mmKNLvR0GlpDIadNlafjRjytrt8tbU+hhDfjx6Vb/extNbzk0L4nWLEdwH+WaLbTkHrTfX/R31UuflUYHJJuYE4p3tvXb623qrDoeWVRz9hVahkblfT1yHyck/Yy1jQzTFl/+BEUbVq92HU1u5u+rb1ZQ9Ni33L/oK8xtbiQevP8DkVnrua5PYrf72+AlnpRb+IiQiQkHwnJnV7aagn/j0JsdUm7HV+E8hNDcVA5vBxVyVdYeFc/EOYVt8CAl9bpdd8TPbpyhuHldGbtx1zroeXqdLLagr5Wu0OzuESHeNcnUOg22jzu1vLYwhSrn0ExbLEQPJ5na5Mf7rtepN1aDQan3lKJ+AYwvrm5vZ/5l2Zv+4tlqZeHirlXI5V6uSRPGku5KNxqewfw2y8D+PTbh/Fs7EPtL5N7+t637dYxddFdr5bwov1HV7Kv0IpBlZvVtIX1rUXv9ba67JkLenSZNZzleTpgvKZXVzAK/0OJTyjhShuxFIrJt1Vvs3ukHtnpR5hb900Q5lQMqhC6GzArd3EQ51PBL6gxfk6d7LD+V4PO6j1sOVd4fre7wx8WuCQLjhfZQrLlmT3LXLg1s4+xWKsDh5ZSb8jjzG7uwS97De9+mv73p8H3/0L/G45vfhl8/9PHyafBjz89TGdpyJcbsAxSu4bJ/dOPA/rvP/sa7vbjePTmfwMAAP//jE4gsA==" + return "eJzcXN9z47Zzf7+/YicvX9+MpE4umU7HnXRG5/M1apyLa+mS9omBwJWEGgIYALRO99d3FgApUgQl25IuneohkUVy8dnF/sbyhvCI22tgG/sGwAkn8RrGf0zfABiUyCxewxwdewOQo+VGFE5odQ3/9gYA4FedlxJhoQ2smMqlUEuQemlhYfSayIzeACwEytxe+weGoNgaq+Xo47YFXsPS6LKIvyTWoc9HT6am7NcZxavNJZrLcKnL3BkmZH0ptSJ99rmtPjkuWCld5pe4hgWTFluXk2CbgLXxeG8Iy4ywtKCn4DdZwCdULntCY4VWrTsqTh5xu9Em37t2ABh9ZitsIor0QS/ArZAAhoUJ/Zq5URJaadFkIkflhNsmoe0LuQtsmERGlCeRMKDENUHhWjkmlIUcHRPSApvr0nm8tBroRYfWZPwrVADBrZiDNcvRP2LwrxKtGwBTOWxWgq+AG/T3MmlhgwY75EqL+QgmC3C4LrRhZtt5xt8z8CtUuO1Kbyys9IZ+7dDsENBz4hLz0d6tKSVp7gbJoHPxsI50tyNxQ9iRKGHPWM+WN6zb7Gvqy5F0FaOCMl6zr1rBA1pdGo7wia0RrsYPn95WAAsjFBcFk3t7zpmU+2JtoOYcrc0ecZuJFL5z4Q/rECGYfAgIN8x6xQGnwYqlampoP2CLlow2I8PAL64XcsoKnwt4smhi8UC9ODfCrRpmYJGXJqUS0FZxMrfaMDzrhdFPIkcLQgVfQ25oZ9mRxyTdWnTcIHOYe1frVtpic8nEo32m1BTuesEyVroVUeFEPXn3ca14rqAhascTkyWCsOAM/T+KX2vnnSJo452a/74hVnuJJT1TFNFuQ5m02suwxWvYXpYWO31+/TiGHJ8Ex38F7VZoNsLiIETHrsI25er3irQ2Z64PfJDpgRteIlAi4528E2uEzQqDdXV1tysxYW3ZdcQ7XoR60o+YZ/OU3p/LXdBSlWOjPMKiIbn3hTNb8hWwlNZD5Tlvb97BuHQappz5lC1mKLeSWSc4vEemrGPy8bjr8RIy/988T5JWDODPD9jwTE/TE7jhAs4lRsyoTYdNIKhSLzlm4UFrNyB39NmiGZAWPWh5xPzr0JyOsZdgWiiHRjFJ0TZy3swIm7F3iem9h7YuHWYxnf1cgrPxw6eKo7izV4xzXaqwLT5K+H0xWuLbXlIpMRxRjiMSCCC+3RbHBQMneqPs+Xe5rseM0SbjOt+32ecXY+map+nb/SJ18I8e0KArjbLej9H1Q/jWaC1bnhPi5DCYUPI0iNSm1g810soKZtgaHZr9qHWqSHeEByRMpraDqA3kty1FghBN+rLtHVJbaGUxiwHk3EAr+nWAojSNcXrGVoH+EYGvmFqihauQ8A66BWpB2Y43+RwlUuITiLxN88XyXNB1JjNf7ues1fs4lbVxTZ6yMNZI60NzobZMpR1tlotK0yFUaR25siiqwyrV8Tyn7lBIyz3gUP4uBNqW4oQAGw14jkItu7U+kxJzWKJCw5x/XthAuseYffMnkSCcZMlt/FVl39iSCmDe2CuDXJu8R40KcXKH6ijO8f2kblMxazUXu0rFX9/YcSFumJQdSp6DGfF5QNZrptjSW1+whXPaAbzXWiJTPWq0WSHVUA1pCwv7hggNhOGuPhNgeaaVTHfkTt6KHVZhQRekJ7QjBNgvPaSldxd6/anPV9J+9DVdwzFIYb3/qGnHRgvmINROtC9tq12ul1U3sMYPn7r12rNyqXPAGMfcaZcdVxKkTCpR3J214XhENN451ZZiqp0M9Zo/qLi+Zhs7jH536JFdU7AZ0qP+7x4N5KIQZOy9Aj7FYB6wMEgJRvBdbCdjb/sGOYon71+FPWTMka/gkbLY+L6cj63zT1puAEJxWeZCLWFDqJ0RyyWaEBbSTjY0WoIOlbKPqRUzmEeezir2f/88+dCIXvNt84zDaSiV+KtEua1Uqnk9zVA8cPLCYeuQpYecKnpxG/JIpyEXiwUa+iOcn7U/UQV6qoqngmeo8kKLc4tkb4d/v7+BaiGypnDyEXOY2NDwbRbPdjcG0fNOA1O+69csWurWU9Vmmv6Q5pVrZbXETOqlSOcLrwkA8bTNFsjFQnACeRMWuqN14m6+1Pkfz48Po+4iT/vgXaJ8683oA6XLVAQc5+EQH01e1nouJPbkaW1O5iFbObEwTyQNLW4o8fdaFpDBIWQVE15nMqe/QV/h88NdZwcOC3jBstIeOSi4mGjXpXQCFow7Kh13zXzyVJuefjDEM03IS0NePslq6yx9w1yrJ/h3nqX/QVhedJZ+qCuSOEE7YrhtEBXxUUdwyN/9/RK7vXkXJjOEagB/ruBEkbE8N2hfn3skq+rQmkUHkfouA/XxCE1CmnL+fGku2euEdff+RXrVyTJPFkzz0EdqlsOcSaY49nTSTuoQJAE0z/tbALyQnt7BHf34Pv7Yk9U4ZpboMr89o24hdSLERvc7LBT0YDeJdLStRyUjdkucE3F57YmUO+k+UvqjkPfXxoXRTnO9n4+eCKqimt7Tq5VzBWgDjhc9rcK6WWs0VdVCLTMn1jiyyJNIF1KzFzpUr3faMRmOaoUCi1wryuqE4rgnvdAMDH1BYXcyLpUTEkQrNWdgcElbQsXLnPFHVD2trHjx/xCbDTboSgQITkjZ+sE6ZpyN/VEq2o60sv9mDuuWd3Pv6lqrxWV7L714pOgtmXeiC3yt0zFL6k6D9mVcRUNq7pSCtZBSRGYHkdsAXxeosMUQl9runxrXnlPajPyYXbFHvCwf1UTI7G4K9ZIkaK7XhT9D2OMLdEJLV8zCHFEBWsfmUthVH2uV+Yn9WulEDze5388iKiXaaXrotxzzwBXCQpvXd4HTXlgbV/WwTkVHLntUn4ZYx1xpTzuXTEIOhIEId83zqnm1a6DhAYqHZL9Om9CL/bMX+p/pfpGVGRfF6tyBejq9g0A3VEFCkRH8k/+53oSe1IYwXSZS0/J1tH4xLr7SFlXG0biLZlxhHaB1xMIPjUHseIYz9oYSvBa+RSPYmYUbaIIq13M0F+ZFKK7XPrxKmzGJ53YmVDwt0cQzJb3wHtyv04io820qha/sOHDlD8cjx/3hiL7lWarJdSIfBNsTD/l7DwLDOJ7SGU0u7Sdnlasjxp//NRyvv6rhjFYbTvI/YYUs76u61lRCY56ZUlI6JXRiDu3ktD1Q3W2yz51KWc//eQjtEUCxoISX7omXe05tQ5KEX5CX3fnWE4HHHnlFPMxdtkLdbpbkaqHNhpl8AAvxBfNhFRkGraHU0Wj0dgQTB5yp6l0AsPiEhskgnh47NJgLg9xlpTmzN/n8cBc9tJd4XMcfUPOq8VaL4MC4zsggs+d+uyPMEAXK1ShRvR0R34IJ2URWR7cfwtnpxTst0x/88QGaair+JW2Xeckf0WWpU8pT/QJTWgnOZBi03h2N+rX2Zu4CjJ5kzV+7XG8o0A++YH9aNlob7faSCdU/27TWDrOedLzz83NsvyiYCdHy2e096LYavtmm1uv5wSkGQ6+epWqPxFdnZBcbPOqAHoN1vi/fOtGsT9fSB5rIdud5aaj1bMZ5xbubBfHNrxxWYYQecuSSUerPLEx/G9+P6jsH8HA7nY1+ns3uszW6lc5H1eyBH3oawB+376eT2e2hW7SB9+PZzc+jD7d3t7Pb0W/v/+P2ZpZm/RHPHJ+/e8Ttd83RtV0UpuCAiqqa3IP8bvhd5YZ3oso1huk3R0U38y8F1VOLhzWtNOK8vDwEwsPPD5MWRyT72rF0Tjma0Kiuy0I5d8ZehSrXaAQPOJoF525c5cBE4BnmddO1UG2Gtz7Q3ugcm/usdIzAmvPSmN5uyNahzWzfvNmrJRbLm7rL5tfxJfoA8Es11uFFumsNP6GhfLfJxlc0useNzP+HkiorvqYle0pjiojWIdevQ5WX18Devoh/0rfKLiLJVgdsP9QKBQsplivXmBbxac0/LBRobEFp4VOPhrrSqIwZXar8m8FnrqHAtqBg3UjOt7o0xwaiF2jMuSN0q8jxuvkQ14l12PFK1b+TzJanzG8+E9pni2Y4ppUOFolxquHspWs1Cjv5UDUU68jz3GATSUzyYyFnpc+c2RADX4Zs/XUo8uE7//5ZrY34xaHKdwkXTD70tODEUjFXmr6ZllNbRRX5Sk4DmIrl7x4tfflx0H2zpJkxtpzEq/PK0J/MbCk6L0Oe2mdETsxNNVUQFu7YFg1cTad3b6um6G48H5faifoVW1L/aYo1utDTa2gNpJw20Z6OG/G8un6vtT0BE97KH5du9bO31fCCRfueYMV2AP9ZotlOQ+pN9/1Ff1e5+FVhcEi6gTmleG9fv7XeqsKiZ678q/m+Si1jq5K+HpnNc9Jexppmhinrjz+Cok2rl8quZnfTt7U3a2ha7FzuH/U1JiYXUm+e36HoTNY8t0fx+/0N0FIv6k1cRIiE5CMhudNLWy3h/0GKrS5pt+NLWH5mKA5JhxezKvkKC+/qB8Ks5BYY8NI6ve57okdXzjA4nc6s/YhtPTBdnU9WW9DXbHdoFpfoEe/6BArdRpvH3VoeW5ig9VMohi0WgscTbW3yw53XizRcq6Hk1BsyEd8Axjc3t/cz/8LubX+xLPXyUDH3aqRSL5fkSWMpF4Vbbe8AfvtlAJ9++zCejX2o/WVyT9/7tt06pi6669USXrT/6Er2FVoxqHKzmrawvrXovd5Wlz2TQY8us4azPE8HjNf06gpG4X8o8QklXGkjlkIx+bbqbXYP1SM7/Qhz674JwpyKQRVCdwNm5S4O4nwq+AU1xs/Ikx3W/2LRWb2HLecKz+92d/jDApdkwfEiW0i2PLNnmQu3ZvYxFmt14NBS6g15nNnNPfhlr+HdT9P//jT4/l/of8PxzS+D73/6OPk0+PGnh+ksDflyI5ZBatcwuX/6cUD//Wdfw91+HI/e/G8AAAD///fyPwM=" } diff --git a/x-pack/filebeat/module/aws/s3access/_meta/fields.epr.yml b/x-pack/filebeat/module/aws/s3access/_meta/fields.epr.yml new file mode 100644 index 00000000000..5f5693a8279 --- /dev/null +++ b/x-pack/filebeat/module/aws/s3access/_meta/fields.epr.yml @@ -0,0 +1,90 @@ +- name: related.user + type: keyword + description: All the user names seen on your event. +- name: related.ip + type: ip + description: All of the IPs seen on your event. +- name: client.ip + type: ip + description: IP address of the client. +- name: client.address + type: keyword + description: Some event client addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the .address field. +- name: client.user.id + type: keyword + description: Unique identifiers of the user. +- name: event.id + type: keyword + description: Unique ID to describe the event. +- name: event.action + type: keyword + description: The action captured by the event. +- name: http.response.status_code + type: long + description: HTTP response status code. +- name: event.outcome + type: keyword + description: This is one of four ECS Categorization Fields, and indicates the lowest level in the ECS category hierarchy. +- name: event.code + type: keyword + description: Identification code for this event, if one exists. +- name: event.duration + type: long + description: Duration of the event in nanoseconds. +- name: http.request.referrer + type: keyword + description: Referrer for this HTTP request. +- name: tls.cipher + type: keyword + description: String indicating the cipher used during the current connection. +- name: tls.version + type: keyword + description: Numeric part of the version parsed from the original string. +- name: tls.version_protocol + type: keyword + description: Normalized lowercase protocol name parsed from original string. +- name: cloud.provider + type: keyword + description: Name of the cloud provider. Example values are aws, azure, gcp, or digitalocean. +- name: event.kind + type: keyword + description: Event kind (e.g. event, alert, metric, state, pipeline_error, signal) +- name: geo.city_name + type: keyword + description: City name. +- name: geo.continent_name + type: keyword + description: Name of the continent. +- name: geo.country_iso_code + type: keyword + description: Country ISO code. +- name: geo.location + type: geo_point + description: Longitude and latitude. +- name: geo.region_iso_code + type: keyword + description: Region ISO code. +- name: geo.region_name + type: keyword + description: Region name. +- name: user_agent.device.name + type: keyword + description: Name of the device. +- name: user_agent.name + type: keyword + description: Name of the user agent. +- name: user_agent.original + type: keyword + description: Unparsed user_agent string. +- name: user_agent.os.full + type: keyword + description: Operating system name, including the version or code name. +- name: user_agent.os.name + type: keyword + description: Operating system name, without the version. +- name: user_agent.os.version + type: keyword + description: Operating system version as a raw string. +- name: user_agent.version + type: keyword + description: Version of the user agent. diff --git a/x-pack/filebeat/module/aws/vpcflow/_meta/fields.epr.yml b/x-pack/filebeat/module/aws/vpcflow/_meta/fields.epr.yml new file mode 100644 index 00000000000..7293e8090ff --- /dev/null +++ b/x-pack/filebeat/module/aws/vpcflow/_meta/fields.epr.yml @@ -0,0 +1,123 @@ +- name: event.start + type: date + description: event.start contains the date when the event started or when the activity was first observed. +- name: event.end + type: date + description: event.end contains the date when the event ended or when the activity was last observed. +- name: destination.geo.continent_name + type: keyword + description: Name of the continent. +- name: destination.geo.country_iso_code + type: keyword + description: Country ISO code. +- name: destination.geo.location + type: geo_point + description: Longitude and latitude. +- name: destination.ip + type: ip + description: IP address of the destination. +- name: destination.address + type: keyword + description: Some event destination addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the .address field. +- name: destination.port + type: long + description: Port of the destination. +- name: event.category + type: keyword + description: Event category (e.g. database) +- name: event.outcome + type: keyword + description: This is one of four ECS Categorization Fields, and indicates the lowest level in the ECS category hierarchy. +- name: event.type + type: keyword + description: Event severity (e.g. info, error) +- name: source.as.number + type: long + description: Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. +- name: source.as.organization.name + type: keyword + description: Organization name. +- name: destination.as.number + type: long + description: Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. +- name: destination.as.organization.name + type: keyword + description: Organization name. +- name: event.original + type: keyword + description: Raw text message of entire event. Used to demonstrate log integrity. +- name: cloud.account.id + type: keyword + description: The cloud account or organization id used to identify different entities in a multi-tenant environment. +- name: cloud.instance.id + type: keyword + description: Instance ID of the host machine. +- name: cloud.provider + type: keyword + description: Name of the cloud provider. +- name: related.ip + type: ip + description: All of the IPs seen on your event. +- name: event.kind + type: keyword + description: Event kind (e.g. event, alert, metric, state, pipeline_error, signal) +- name: cloud.account.id + type: keyword + description: The cloud account or organization id used to identify different entities in a multi-tenant environment. +- name: network.bytes + type: long + description: Total bytes transferred in both directions. +- name: network.community_id + type: keyword + description: A hash of source and destination IPs and ports, as well as the protocol used in a communication. This is a tool-agnostic standard to identify flows. +- name: network.iana_number + type: keyword + description: IANA Protocol Number (https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). Standardized list of protocols. This aligns well with NetFlow and sFlow related logs which use the IANA Protocol Number. +- name: network.packets + type: long + description: Total packets transferred in both directions. +- name: network.transport + type: keyword + description: Same as network.iana_number, but instead using the Keyword name of the transport layer (udp, tcp, ipv6-icmp, etc.) +- name: network.type + type: keyword + description: In the OSI Model this would be the Network Layer. ipv4, ipv6, ipsec, pim, etc +- name: source.address + type: keyword + description: Some event source addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the .address field. +- name: source.as.number + type: long + description: Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. +- name: source.as.organization.name + type: keyword + description: Organization name. +- name: source.bytes + type: long + description: Bytes sent from the source to the destination. +- name: source.geo.city_name + type: keyword + description: City name. +- name: source.geo.continent_name + type: keyword + description: Name of the continent. +- name: source.geo.country_iso_code + type: keyword + description: Country ISO code. +- name: source.geo.location + type: geo_point + description: Longitude and latitude. +- name: source.geo.region_iso_code + type: keyword + description: Region ISO code. +- name: source.geo.region_name + type: keyword + description: Region name. +- name: source.ip + type: ip + description: IP address of the source (IPv4 or IPv6). +- name: source.packets + type: long + description: Packets sent from the source to the destination. +- name: source.port + type: long + description: Port of the source. diff --git a/x-pack/filebeat/module/azure/_meta/config.yml b/x-pack/filebeat/module/azure/_meta/config.yml index 7509037c28e..ab7f477b8bb 100644 --- a/x-pack/filebeat/module/azure/_meta/config.yml +++ b/x-pack/filebeat/module/azure/_meta/config.yml @@ -3,15 +3,15 @@ activitylogs: enabled: true var: - # Eventhub name containing the activity logs, overwrite he default value if the logs are exported in a different eventhub + # eventhub name containing the activity logs, overwrite he default value if the logs are exported in a different eventhub eventhub: "insights-operational-logs" - # Consumer group name that has access to the event hub, we advise creating a dedicated consumer group for the azure module + # consumer group name that has access to the event hub, we advise creating a dedicated consumer group for the azure module consumer_group: "$Default" # the connection string required to communicate with Event Hubs, steps to generate one here https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string connection_string: "" - # the name of the storage account the state/offsets will be stored and updated. + # the name of the storage account the state/offsets will be stored and updated storage_account: "" - #The storage account key, this key will be used to authorize access to data in your storage account. + # the storage account key, this key will be used to authorize access to data in your storage account storage_account_key: "" auditlogs: diff --git a/x-pack/filebeat/module/azure/_meta/docs.asciidoc b/x-pack/filebeat/module/azure/_meta/docs.asciidoc index 5bf7bb576d0..eea82995532 100644 --- a/x-pack/filebeat/module/azure/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/azure/_meta/docs.asciidoc @@ -38,6 +38,7 @@ Will retrieve azure Active Directory audit logs. The audit logs provide traceabi connection_string: "" storage_account: "" storage_account_key: "" + resource_manager_endpoint: "" auditlogs: enabled: false @@ -47,6 +48,7 @@ Will retrieve azure Active Directory audit logs. The audit logs provide traceabi connection_string: "" storage_account: "" storage_account_key: "" + resource_manager_endpoint: "" signinlogs: enabled: false @@ -56,6 +58,7 @@ Will retrieve azure Active Directory audit logs. The audit logs provide traceabi connection_string: "" storage_account: "" storage_account_key: "" + resource_manager_endpoint: "" ``` @@ -85,6 +88,16 @@ The name of the storage account the state/offsets will be stored and updated. _string_ The storage account key, this key will be used to authorize access to data in your storage account. +`resource_manager_endpoint` :: +_string_ +Optional, by default we are using the azure public environment, to override, users can provide a specific resource manager endpoint in order to use a different azure environment. +Ex: +https://management.chinacloudapi.cn/ for azure ChinaCloud +https://management.microsoftazure.de/ for azure GermanCloud +https://management.azure.com/ for azure PublicCloud +https://management.usgovcloudapi.net/ for azure USGovernmentCloud +Users can also use this in case of a Hybrid Cloud model, where one may define their own endpoints. + include::../include/what-happens.asciidoc[] include::../include/gs-link.asciidoc[] diff --git a/x-pack/filebeat/module/azure/activitylogs/config/azure-eventhub.yml b/x-pack/filebeat/module/azure/activitylogs/config/azure-eventhub.yml index b89bebb30f9..9b747e1092d 100644 --- a/x-pack/filebeat/module/azure/activitylogs/config/azure-eventhub.yml +++ b/x-pack/filebeat/module/azure/activitylogs/config/azure-eventhub.yml @@ -4,3 +4,4 @@ eventhub: {{ .eventhub }} consumer_group: {{ .consumer_group }} storage_account: {{ .storage_account }} storage_account_key: {{ .storage_account_key }} +resource_manager_endpoint: {{ .resource_manager_endpoint }} diff --git a/x-pack/filebeat/module/azure/activitylogs/manifest.yml b/x-pack/filebeat/module/azure/activitylogs/manifest.yml index 7375b6e42a4..4d5c20a7271 100644 --- a/x-pack/filebeat/module/azure/activitylogs/manifest.yml +++ b/x-pack/filebeat/module/azure/activitylogs/manifest.yml @@ -10,6 +10,7 @@ var: - name: connection_string - name: storage_account - name: storage_account_key + - name: resource_manager_endpoint ingest_pipeline: - ingest/pipeline.json diff --git a/x-pack/filebeat/module/azure/auditlogs/config/azure-eventhub.yml b/x-pack/filebeat/module/azure/auditlogs/config/azure-eventhub.yml index 01796611504..3c2ea50cf8b 100644 --- a/x-pack/filebeat/module/azure/auditlogs/config/azure-eventhub.yml +++ b/x-pack/filebeat/module/azure/auditlogs/config/azure-eventhub.yml @@ -4,4 +4,5 @@ eventhub: {{ .eventhub }} consumer_group: {{ .consumer_group }} storage_account: {{ .storage_account }} storage_account_key: {{ .storage_account_key }} +resource_manager_endpoint: {{ .resource_manager_endpoint }} diff --git a/x-pack/filebeat/module/azure/auditlogs/manifest.yml b/x-pack/filebeat/module/azure/auditlogs/manifest.yml index d6cd469718b..095371bff16 100644 --- a/x-pack/filebeat/module/azure/auditlogs/manifest.yml +++ b/x-pack/filebeat/module/azure/auditlogs/manifest.yml @@ -10,6 +10,7 @@ var: - name: connection_string - name: storage_account - name: storage_account_key + - name: resource_manager_endpoint ingest_pipeline: - ingest/pipeline.json diff --git a/x-pack/filebeat/module/azure/signinlogs/config/azure-eventhub.yml b/x-pack/filebeat/module/azure/signinlogs/config/azure-eventhub.yml index b89bebb30f9..9b747e1092d 100644 --- a/x-pack/filebeat/module/azure/signinlogs/config/azure-eventhub.yml +++ b/x-pack/filebeat/module/azure/signinlogs/config/azure-eventhub.yml @@ -4,3 +4,4 @@ eventhub: {{ .eventhub }} consumer_group: {{ .consumer_group }} storage_account: {{ .storage_account }} storage_account_key: {{ .storage_account_key }} +resource_manager_endpoint: {{ .resource_manager_endpoint }} diff --git a/x-pack/filebeat/module/azure/signinlogs/manifest.yml b/x-pack/filebeat/module/azure/signinlogs/manifest.yml index f68109af4a1..97fddae51e9 100644 --- a/x-pack/filebeat/module/azure/signinlogs/manifest.yml +++ b/x-pack/filebeat/module/azure/signinlogs/manifest.yml @@ -10,6 +10,7 @@ var: - name: connection_string - name: storage_account - name: storage_account_key + - name: resource_manager_endpoint ingest_pipeline: - ingest/pipeline.json diff --git a/x-pack/filebeat/module/checkpoint/firewall/ingest/pipeline.json b/x-pack/filebeat/module/checkpoint/firewall/ingest/pipeline.json index cff272b7db1..e478d54e73d 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/ingest/pipeline.json +++ b/x-pack/filebeat/module/checkpoint/firewall/ingest/pipeline.json @@ -61,9 +61,9 @@ } }, { - "set": { + "append": { "field": "event.category", - "value": ["network"], + "value": "network", "if": "ctx.checkpoint?.operation != 'Log In'" } }, @@ -242,9 +242,9 @@ } }, { - "set": { + "append": { "field": "event.category", - "value": ["authentication"], + "value": "authentication", "if": "ctx.checkpoint?.operation == 'Log In'" } }, @@ -270,7 +270,7 @@ } }, { - "set" : { + "append" : { "field": "event.type", "value": ["allowed", "connection"], "if": "['Accept', 'Allow'].contains(ctx.checkpoint?.rule_action)" @@ -298,7 +298,7 @@ } }, { - "set" : { + "append" : { "field": "event.type", "value": ["connection", "denied"], "if": "['Drop', 'Reject', 'Block', 'Prevent'].contains(ctx.checkpoint?.rule_action)" @@ -1028,6 +1028,68 @@ "if": "ctx.checkpoint?.sys_message != null" } }, + { + "geoip" : { + "field": "source.ip", + "target_field": "source.geo", + "ignore_missing": true, + "if": "ctx.source?.geo == null" + } + }, + { + "geoip" : { + "field": "destination.ip", + "target_field": "destination.geo", + "ignore_missing": true, + "if": "ctx.destination?.geo == null" + } + }, + { + "geoip" : { + "database_file": "GeoLite2-ASN.mmdb", + "field": "source.ip", + "target_field": "source.as", + "properties": ["asn", "organization_name"], + "ignore_missing": true + } + }, + { + "geoip" : { + "database_file": "GeoLite2-ASN.mmdb", + "field": "destination.ip", + "target_field": "destination.as", + "properties": ["asn", "organization_name"], + "ignore_missing": true + } + }, + { + "rename" : { + "field": "source.as.asn", + "target_field": "source.as.number", + "ignore_missing": true + } + }, + { + "rename" : { + "field": "source.as.organization_name", + "target_field": "source.as.organization.name", + "ignore_missing": true + } + }, + { + "rename" : { + "field": "destination.as.asn", + "target_field": "destination.as.number", + "ignore_missing": true + } + }, + { + "rename" : { + "field": "destination.as.organization_name", + "target_field": "destination.as.organization.name", + "ignore_missing": true + } + }, { "remove" : { "field": [ @@ -1050,4 +1112,4 @@ } } ] -} \ No newline at end of file +} diff --git a/x-pack/filebeat/module/checkpoint/firewall/test/checkpoint.log-expected.json b/x-pack/filebeat/module/checkpoint/firewall/test/checkpoint.log-expected.json index f966163307d..4e8517f4794 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/test/checkpoint.log-expected.json +++ b/x-pack/filebeat/module/checkpoint/firewall/test/checkpoint.log-expected.json @@ -140,6 +140,15 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "61794", + "destination.as.number": 25046, + "destination.as.organization.name": "Check Point Software Technologies LTD", + "destination.geo.city_name": "Tel Aviv", + "destination.geo.continent_name": "Asia", + "destination.geo.country_iso_code": "IL", + "destination.geo.location.lat": 32.0678, + "destination.geo.location.lon": 34.7647, + "destination.geo.region_iso_code": "IL-TA", + "destination.geo.region_name": "Tel Aviv", "destination.ip": "194.29.39.10", "destination.port": "443", "event.action": "Accept", @@ -249,6 +258,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "41566", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.99.234.45", "destination.port": "443", "event.action": "Accept", @@ -358,6 +373,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "48698", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.31", "destination.port": "80", "event.action": "Accept", @@ -467,6 +488,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "61150", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.36", "destination.port": "80", "event.action": "Accept", @@ -576,6 +603,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "55110", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.81.142.43", "destination.port": "443", "event.action": "Accept", @@ -685,6 +718,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "48718", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.31", "destination.port": "80", "event.action": "Accept", @@ -794,6 +833,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "62206", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.41", "destination.port": "80", "event.action": "Accept", @@ -903,6 +948,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "41596", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.99.234.45", "destination.port": "443", "event.action": "Accept", @@ -1012,6 +1063,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "61180", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.36", "destination.port": "80", "event.action": "Accept", @@ -1121,6 +1178,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "48732", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.31", "destination.port": "80", "event.action": "Accept", @@ -1230,6 +1293,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "62222", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.41", "destination.port": "80", "event.action": "Accept", @@ -1339,6 +1408,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "61188", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.36", "destination.port": "80", "event.action": "Accept", @@ -1448,6 +1523,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "41624", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.99.234.45", "destination.port": "443", "event.action": "Accept", @@ -1557,6 +1638,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "48758", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.31", "destination.port": "80", "event.action": "Accept", @@ -1666,6 +1753,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "62246", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.41", "destination.port": "80", "event.action": "Accept", @@ -1775,6 +1868,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "41638", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.99.234.45", "destination.port": "443", "event.action": "Accept", @@ -1884,6 +1983,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "61224", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.36", "destination.port": "80", "event.action": "Accept", @@ -2037,6 +2142,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "48776", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.31", "destination.port": "80", "event.action": "Accept", @@ -2119,6 +2230,15 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "51436", + "destination.as.number": 25046, + "destination.as.organization.name": "Check Point Software Technologies LTD", + "destination.geo.city_name": "Tel Aviv", + "destination.geo.continent_name": "Asia", + "destination.geo.country_iso_code": "IL", + "destination.geo.location.lat": 32.0678, + "destination.geo.location.lon": 34.7647, + "destination.geo.region_iso_code": "IL-TA", + "destination.geo.region_name": "Tel Aviv", "destination.ip": "194.29.39.47", "destination.port": "443", "event.action": "Accept", @@ -2334,6 +2454,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "62396", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.41", "destination.port": "80", "event.action": "Accept", @@ -2443,6 +2569,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "48914", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.31", "destination.port": "80", "event.action": "Accept", @@ -2552,6 +2684,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "41844", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.99.234.45", "destination.port": "443", "event.action": "Accept", @@ -2661,6 +2799,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "62468", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.41", "destination.port": "80", "event.action": "Accept", @@ -2770,6 +2914,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "61434", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.36", "destination.port": "80", "event.action": "Accept", @@ -2879,6 +3029,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "41856", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.99.234.45", "destination.port": "443", "event.action": "Accept", @@ -3032,6 +3188,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "48990", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.31", "destination.port": "80", "event.action": "Accept", @@ -3141,6 +3303,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "62478", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.41", "destination.port": "80", "event.action": "Accept", @@ -3250,6 +3418,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "41864", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.99.234.45", "destination.port": "443", "event.action": "Accept", @@ -3359,6 +3533,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "61446", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.36", "destination.port": "80", "event.action": "Accept", @@ -3468,6 +3648,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "48998", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.31", "destination.port": "80", "event.action": "Accept", @@ -3524,6 +3710,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "41870", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.99.234.45", "destination.port": "443", "event.action": "Accept", @@ -3686,6 +3878,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "62488", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.41", "destination.port": "80", "event.action": "Accept", @@ -3795,6 +3993,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "61454", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.36", "destination.port": "80", "event.action": "Accept", @@ -3929,6 +4133,15 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "62122", + "destination.as.number": 25046, + "destination.as.organization.name": "Check Point Software Technologies LTD", + "destination.geo.city_name": "Tel Aviv", + "destination.geo.continent_name": "Asia", + "destination.geo.country_iso_code": "IL", + "destination.geo.location.lat": 32.0678, + "destination.geo.location.lon": 34.7647, + "destination.geo.region_iso_code": "IL-TA", + "destination.geo.region_name": "Tel Aviv", "destination.ip": "194.29.39.10", "destination.port": "443", "event.action": "Accept", @@ -4091,6 +4304,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "55424", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.81.142.43", "destination.port": "443", "event.action": "Accept", @@ -4200,6 +4419,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "49026", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.31", "destination.port": "80", "event.action": "Accept", @@ -4309,6 +4534,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "62514", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.41", "destination.port": "80", "event.action": "Accept", @@ -4418,6 +4649,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "41902", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.99.234.45", "destination.port": "443", "event.action": "Accept", @@ -4527,6 +4764,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "61490", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.36", "destination.port": "80", "event.action": "Accept", @@ -4636,6 +4879,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "49042", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.31", "destination.port": "80", "event.action": "Accept", @@ -4745,6 +4994,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "41914", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.99.234.45", "destination.port": "443", "event.action": "Accept", @@ -4854,6 +5109,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "62534", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.41", "destination.port": "80", "event.action": "Accept", @@ -4963,6 +5224,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "61500", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.36", "destination.port": "80", "event.action": "Accept", @@ -5072,6 +5339,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "41938", + "destination.as.number": 16625, + "destination.as.organization.name": "Akamai Technologies, Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "104.99.234.45", "destination.port": "443", "event.action": "Accept", @@ -5181,6 +5454,12 @@ "checkpoint.rule_action": "Accept", "client.ip": "192.168.1.100", "client.port": "49102", + "destination.as.number": 30148, + "destination.as.organization.name": "Sucuri", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "192.124.249.31", "destination.port": "80", "event.action": "Accept", diff --git a/x-pack/filebeat/module/cisco/_meta/docs.asciidoc b/x-pack/filebeat/module/cisco/_meta/docs.asciidoc index f1a40037f6e..b72070d4918 100644 --- a/x-pack/filebeat/module/cisco/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/cisco/_meta/docs.asciidoc @@ -289,7 +289,7 @@ parameters on your Elasticsearch cluster: - {ref}/circuit-breaker.html#script-compilation-circuit-breaker[script.max_compilations_rate]: Increase to at least `100/5m`. -- {ref}/modules-scripting-using.html#modules-scripting-using-caching[script.cache_max_size]: +- {ref}/modules-scripting-using.html#modules-scripting-using-caching[script.cache.max_size]: Increase to at least `200` if using both filesets or other script-heavy modules. [float] diff --git a/x-pack/filebeat/module/cisco/asa/test/asa-fix.log b/x-pack/filebeat/module/cisco/asa/test/asa-fix.log new file mode 100644 index 00000000000..00819e8eec1 --- /dev/null +++ b/x-pack/filebeat/module/cisco/asa/test/asa-fix.log @@ -0,0 +1,5 @@ +Apr 17 2020 14:08:08 SNL-ASA-VPN-A01 : %ASA-6-302016: Teardown UDP connection 110577675 for Outside:10.123.123.123/53723(LOCAL\Elastic) to Inside:10.233.123.123/53 duration 0:00:00 bytes 148 (zzzzzz) +Apr 17 2020 14:00:31 SNL-ASA-VPN-A01 : %ASA-4-106023: Deny icmp src Inside:10.123.123.123 dst Outside:10.123.123.123 (type 11, code 0) by access-group "Inside_access_in" [0x0, 0x0] +Apr 15 2013 09:36:50: %ASA-4-106023: Deny tcp src dmz:10.123.123.123/6316 dst outside:10.123.123.123/53 type 3, code 0, by access-group "acl_dmz" [0xe3afb522, 0x0] +Apr 17 2020 14:16:20 SNL-ASA-VPN-A01 : %ASA-4-106023: Deny udp src Inside:10.123.123.123/57621(LOCAL\Elastic) dst Outside:10.123.123.123/57621 by access-group "Inside_access_in" [0x0, 0x0] +Apr 17 2020 14:15:07 SNL-ASA-VPN-A01 : %ASA-2-106017: Deny IP due to Land Attack from 10.123.123.123 to 10.123.123.123 diff --git a/x-pack/filebeat/module/cisco/asa/test/asa-fix.log-expected.json b/x-pack/filebeat/module/cisco/asa/test/asa-fix.log-expected.json new file mode 100644 index 00000000000..de470786f66 --- /dev/null +++ b/x-pack/filebeat/module/cisco/asa/test/asa-fix.log-expected.json @@ -0,0 +1,152 @@ +[ + { + "cisco.asa.connection_id": "110577675", + "cisco.asa.destination_interface": "Inside", + "cisco.asa.message_id": "302016", + "cisco.asa.source_interface": "Outside", + "cisco.asa.source_username": "(LOCAL\\Elastic)", + "destination.address": "10.233.123.123", + "destination.ip": "10.233.123.123", + "destination.port": 53, + "event.action": "flow-expiration", + "event.code": 302016, + "event.dataset": "cisco.asa", + "event.duration": 0, + "event.end": "2020-04-17T14:08:08.000-02:00", + "event.module": "cisco", + "event.original": "%ASA-6-302016: Teardown UDP connection 110577675 for Outside:10.123.123.123/53723(LOCAL\\Elastic) to Inside:10.233.123.123/53 duration 0:00:00 bytes 148 (zzzzzz)", + "event.severity": 6, + "event.start": "2020-04-17T16:08:08.000Z", + "event.timezone": "-02:00", + "fileset.name": "asa", + "host.hostname": "SNL-ASA-VPN-A01", + "input.type": "log", + "log.level": "informational", + "log.offset": 0, + "network.bytes": 148, + "network.iana_number": 17, + "network.transport": "udp", + "service.type": "cisco", + "source.address": "10.123.123.123", + "source.ip": "10.123.123.123", + "source.port": 53723, + "tags": [ + "cisco-asa" + ] + }, + { + "cisco.asa.destination_interface": "Outside", + "cisco.asa.message_id": "106023", + "cisco.asa.rule_name": "Inside_access_in", + "cisco.asa.source_interface": "Inside", + "destination.address": "10.123.123.123", + "destination.ip": "10.123.123.123", + "event.action": "firewall-rule", + "event.code": 106023, + "event.dataset": "cisco.asa", + "event.module": "cisco", + "event.original": "%ASA-4-106023: Deny icmp src Inside:10.123.123.123 dst Outside:10.123.123.123 (type 11, code 0) by access-group \"Inside_access_in\" [0x0, 0x0]", + "event.outcome": "deny", + "event.severity": 4, + "event.timezone": "-02:00", + "fileset.name": "asa", + "host.hostname": "SNL-ASA-VPN-A01", + "input.type": "log", + "log.level": "warning", + "log.offset": 200, + "network.iana_number": 1, + "network.transport": "icmp", + "service.type": "cisco", + "source.address": "10.123.123.123", + "source.ip": "10.123.123.123", + "tags": [ + "cisco-asa" + ] + }, + { + "cisco.asa.destination_interface": "outside", + "cisco.asa.message_id": "106023", + "cisco.asa.rule_name": "acl_dmz", + "cisco.asa.source_interface": "dmz", + "destination.address": "10.123.123.123", + "destination.ip": "10.123.123.123", + "destination.port": 53, + "event.action": "firewall-rule", + "event.code": 106023, + "event.dataset": "cisco.asa", + "event.module": "cisco", + "event.original": "%ASA-4-106023: Deny tcp src dmz:10.123.123.123/6316 dst outside:10.123.123.123/53 type 3, code 0, by access-group \"acl_dmz\" [0xe3afb522, 0x0]", + "event.outcome": "deny", + "event.severity": 4, + "event.timezone": "-02:00", + "fileset.name": "asa", + "input.type": "log", + "log.level": "warning", + "log.offset": 381, + "network.iana_number": 6, + "network.transport": "tcp", + "service.type": "cisco", + "source.address": "10.123.123.123", + "source.ip": "10.123.123.123", + "source.port": 6316, + "tags": [ + "cisco-asa" + ] + }, + { + "cisco.asa.destination_interface": "Outside", + "cisco.asa.message_id": "106023", + "cisco.asa.rule_name": "Inside_access_in", + "cisco.asa.source_interface": "Inside", + "cisco.asa.source_username": "(LOCAL\\Elastic)", + "destination.address": "10.123.123.123", + "destination.ip": "10.123.123.123", + "destination.port": 57621, + "event.action": "firewall-rule", + "event.code": 106023, + "event.dataset": "cisco.asa", + "event.module": "cisco", + "event.original": "%ASA-4-106023: Deny udp src Inside:10.123.123.123/57621(LOCAL\\Elastic) dst Outside:10.123.123.123/57621 by access-group \"Inside_access_in\" [0x0, 0x0]", + "event.outcome": "deny", + "event.severity": 4, + "event.timezone": "-02:00", + "fileset.name": "asa", + "host.hostname": "SNL-ASA-VPN-A01", + "input.type": "log", + "log.level": "warning", + "log.offset": 545, + "network.iana_number": 17, + "network.transport": "udp", + "service.type": "cisco", + "source.address": "10.123.123.123", + "source.ip": "10.123.123.123", + "source.port": 57621, + "tags": [ + "cisco-asa" + ] + }, + { + "cisco.asa.message_id": "106017", + "destination.address": "10.123.123.123", + "destination.ip": "10.123.123.123", + "event.action": "firewall-rule", + "event.code": 106017, + "event.dataset": "cisco.asa", + "event.module": "cisco", + "event.original": "%ASA-2-106017: Deny IP due to Land Attack from 10.123.123.123 to 10.123.123.123", + "event.outcome": "deny", + "event.severity": 2, + "event.timezone": "-02:00", + "fileset.name": "asa", + "host.hostname": "SNL-ASA-VPN-A01", + "input.type": "log", + "log.level": "critical", + "log.offset": 734, + "service.type": "cisco", + "source.address": "10.123.123.123", + "source.ip": "10.123.123.123", + "tags": [ + "cisco-asa" + ] + } +] \ No newline at end of file diff --git a/x-pack/filebeat/module/cisco/ftd/test/asa-fix.log b/x-pack/filebeat/module/cisco/ftd/test/asa-fix.log new file mode 100644 index 00000000000..00819e8eec1 --- /dev/null +++ b/x-pack/filebeat/module/cisco/ftd/test/asa-fix.log @@ -0,0 +1,5 @@ +Apr 17 2020 14:08:08 SNL-ASA-VPN-A01 : %ASA-6-302016: Teardown UDP connection 110577675 for Outside:10.123.123.123/53723(LOCAL\Elastic) to Inside:10.233.123.123/53 duration 0:00:00 bytes 148 (zzzzzz) +Apr 17 2020 14:00:31 SNL-ASA-VPN-A01 : %ASA-4-106023: Deny icmp src Inside:10.123.123.123 dst Outside:10.123.123.123 (type 11, code 0) by access-group "Inside_access_in" [0x0, 0x0] +Apr 15 2013 09:36:50: %ASA-4-106023: Deny tcp src dmz:10.123.123.123/6316 dst outside:10.123.123.123/53 type 3, code 0, by access-group "acl_dmz" [0xe3afb522, 0x0] +Apr 17 2020 14:16:20 SNL-ASA-VPN-A01 : %ASA-4-106023: Deny udp src Inside:10.123.123.123/57621(LOCAL\Elastic) dst Outside:10.123.123.123/57621 by access-group "Inside_access_in" [0x0, 0x0] +Apr 17 2020 14:15:07 SNL-ASA-VPN-A01 : %ASA-2-106017: Deny IP due to Land Attack from 10.123.123.123 to 10.123.123.123 diff --git a/x-pack/filebeat/module/cisco/ftd/test/asa-fix.log-expected.json b/x-pack/filebeat/module/cisco/ftd/test/asa-fix.log-expected.json new file mode 100644 index 00000000000..bf6c6b521da --- /dev/null +++ b/x-pack/filebeat/module/cisco/ftd/test/asa-fix.log-expected.json @@ -0,0 +1,157 @@ +[ + { + "@timestamp": "2020-04-17T14:08:08.000-02:00", + "cisco.ftd.connection_id": "110577675", + "cisco.ftd.destination_interface": "Inside", + "cisco.ftd.message_id": "302016", + "cisco.ftd.source_interface": "Outside", + "cisco.ftd.source_username": "(LOCAL\\Elastic)", + "destination.address": "10.233.123.123", + "destination.ip": "10.233.123.123", + "destination.port": 53, + "event.action": "flow-expiration", + "event.code": 302016, + "event.dataset": "cisco.ftd", + "event.duration": 0, + "event.end": "2020-04-17T14:08:08.000-02:00", + "event.module": "cisco", + "event.original": "%ASA-6-302016: Teardown UDP connection 110577675 for Outside:10.123.123.123/53723(LOCAL\\Elastic) to Inside:10.233.123.123/53 duration 0:00:00 bytes 148 (zzzzzz)", + "event.severity": 6, + "event.start": "2020-04-17T16:08:08.000Z", + "event.timezone": "-02:00", + "fileset.name": "ftd", + "host.hostname": "SNL-ASA-VPN-A01", + "input.type": "log", + "log.level": "informational", + "log.offset": 0, + "network.bytes": 148, + "network.iana_number": 17, + "network.transport": "udp", + "service.type": "cisco", + "source.address": "10.123.123.123", + "source.ip": "10.123.123.123", + "source.port": 53723, + "tags": [ + "cisco-ftd" + ] + }, + { + "@timestamp": "2020-04-17T14:00:31.000-02:00", + "cisco.ftd.destination_interface": "Outside", + "cisco.ftd.message_id": "106023", + "cisco.ftd.rule_name": "Inside_access_in", + "cisco.ftd.source_interface": "Inside", + "destination.address": "10.123.123.123", + "destination.ip": "10.123.123.123", + "event.action": "firewall-rule", + "event.code": 106023, + "event.dataset": "cisco.ftd", + "event.module": "cisco", + "event.original": "%ASA-4-106023: Deny icmp src Inside:10.123.123.123 dst Outside:10.123.123.123 (type 11, code 0) by access-group \"Inside_access_in\" [0x0, 0x0]", + "event.outcome": "deny", + "event.severity": 4, + "event.timezone": "-02:00", + "fileset.name": "ftd", + "host.hostname": "SNL-ASA-VPN-A01", + "input.type": "log", + "log.level": "warning", + "log.offset": 200, + "network.iana_number": 1, + "network.transport": "icmp", + "service.type": "cisco", + "source.address": "10.123.123.123", + "source.ip": "10.123.123.123", + "tags": [ + "cisco-ftd" + ] + }, + { + "@timestamp": "2013-04-15T09:36:50.000-02:00", + "cisco.ftd.destination_interface": "outside", + "cisco.ftd.message_id": "106023", + "cisco.ftd.rule_name": "acl_dmz", + "cisco.ftd.source_interface": "dmz", + "destination.address": "10.123.123.123", + "destination.ip": "10.123.123.123", + "destination.port": 53, + "event.action": "firewall-rule", + "event.code": 106023, + "event.dataset": "cisco.ftd", + "event.module": "cisco", + "event.original": "%ASA-4-106023: Deny tcp src dmz:10.123.123.123/6316 dst outside:10.123.123.123/53 type 3, code 0, by access-group \"acl_dmz\" [0xe3afb522, 0x0]", + "event.outcome": "deny", + "event.severity": 4, + "event.timezone": "-02:00", + "fileset.name": "ftd", + "input.type": "log", + "log.level": "warning", + "log.offset": 381, + "network.iana_number": 6, + "network.transport": "tcp", + "service.type": "cisco", + "source.address": "10.123.123.123", + "source.ip": "10.123.123.123", + "source.port": 6316, + "tags": [ + "cisco-ftd" + ] + }, + { + "@timestamp": "2020-04-17T14:16:20.000-02:00", + "cisco.ftd.destination_interface": "Outside", + "cisco.ftd.message_id": "106023", + "cisco.ftd.rule_name": "Inside_access_in", + "cisco.ftd.source_interface": "Inside", + "cisco.ftd.source_username": "(LOCAL\\Elastic)", + "destination.address": "10.123.123.123", + "destination.ip": "10.123.123.123", + "destination.port": 57621, + "event.action": "firewall-rule", + "event.code": 106023, + "event.dataset": "cisco.ftd", + "event.module": "cisco", + "event.original": "%ASA-4-106023: Deny udp src Inside:10.123.123.123/57621(LOCAL\\Elastic) dst Outside:10.123.123.123/57621 by access-group \"Inside_access_in\" [0x0, 0x0]", + "event.outcome": "deny", + "event.severity": 4, + "event.timezone": "-02:00", + "fileset.name": "ftd", + "host.hostname": "SNL-ASA-VPN-A01", + "input.type": "log", + "log.level": "warning", + "log.offset": 545, + "network.iana_number": 17, + "network.transport": "udp", + "service.type": "cisco", + "source.address": "10.123.123.123", + "source.ip": "10.123.123.123", + "source.port": 57621, + "tags": [ + "cisco-ftd" + ] + }, + { + "@timestamp": "2020-04-17T14:15:07.000-02:00", + "cisco.ftd.message_id": "106017", + "destination.address": "10.123.123.123", + "destination.ip": "10.123.123.123", + "event.action": "firewall-rule", + "event.code": 106017, + "event.dataset": "cisco.ftd", + "event.module": "cisco", + "event.original": "%ASA-2-106017: Deny IP due to Land Attack from 10.123.123.123 to 10.123.123.123", + "event.outcome": "deny", + "event.severity": 2, + "event.timezone": "-02:00", + "fileset.name": "ftd", + "host.hostname": "SNL-ASA-VPN-A01", + "input.type": "log", + "log.level": "critical", + "log.offset": 734, + "service.type": "cisco", + "source.address": "10.123.123.123", + "source.ip": "10.123.123.123", + "tags": [ + "cisco-ftd" + ] + } +] \ No newline at end of file diff --git a/x-pack/filebeat/module/cisco/shared/ingest/asa-ftd-pipeline.yml b/x-pack/filebeat/module/cisco/shared/ingest/asa-ftd-pipeline.yml index 9dfc96d77e8..babf697616b 100644 --- a/x-pack/filebeat/module/cisco/shared/ingest/asa-ftd-pipeline.yml +++ b/x-pack/filebeat/module/cisco/shared/ingest/asa-ftd-pipeline.yml @@ -1,3 +1,4 @@ +--- description: "Pipeline for Cisco {< .internal_PREFIX >} logs" processors: # @@ -240,10 +241,11 @@ processors: if: "ctx._temp_.cisco.message_id == '106022'" field: "message" pattern: "%{event.outcome} %{network.transport} connection spoof from %{source.address} to %{destination.address} on interface %{_temp_.cisco.source_interface}" - - dissect: + - grok: if: "ctx._temp_.cisco.message_id == '106023'" field: "message" - pattern: '%{event.outcome} %{network.transport} src %{_temp_.cisco.source_interface}:%{source.address}/%{source.port} dst %{_temp_.cisco.destination_interface}:%{destination.address}/%{destination.port} %{} access%{}group "%{_temp_.cisco.list_id}"%{}' + patterns: + - ^%{NOTSPACE:event.outcome} %{NOTSPACE:network.transport} src %{NOTSPACE:_temp_.cisco.source_interface}:%{IPORHOST:source.address}(/%{POSINT:source.port})?\s*(%{GREEDYDATA:_temp_.cisco.source_username} )?dst %{NOTSPACE:_temp_.cisco.destination_interface}:%{IPORHOST:destination.address}(/%{POSINT:destination.port})?%{DATA}by access.group "%{NOTSPACE:_temp_.cisco.list_id}" - dissect: if: "ctx._temp_.cisco.message_id == '106027'" field: "message" @@ -440,8 +442,8 @@ processors: field: "message" if: '["302014", "302016", "302018", "302021", "302036", "302304", "302306"].contains(ctx._temp_.cisco.message_id)' patterns: - - "Teardown %{NOTSPACE:network.transport} (?:state-bypass )?connection %{NOTSPACE:_temp_.cisco.connection_id} (?:for|from) %{NOTCOLON:_temp_.cisco.source_interface}:%{DATA:source.address}/%{NUMBER:source.port:int} (?:%{NOTSPACE:_temp_.cisco.source_username} )?to %{NOTCOLON:_temp_.cisco.destination_interface}:%{DATA:destination.address}/%{NUMBER:destination.port:int} (?:%{NOTSPACE:_temp_.cisco.destination_username} )?(?:duration %{TIME:_temp_.duration_hms} bytes %{NUMBER:network.bytes:int})%{GREEDYDATA}" - - "Teardown %{NOTSPACE:network.transport} connection for faddr (?:%{NOTCOLON:_temp_.cisco.source_interface}:)?%{ECSDESTIPORHOST}/%{NUMBER} (?:%{NOTSPACE:_temp_.cisco.destination_username} )?gaddr (?:%{NOTCOLON}:)?%{MAPPEDSRC}/%{NUMBER} laddr (?:%{NOTCOLON:_temp_.cisco.source_interface}:)?%{ECSSOURCEIPORHOST}/%{NUMBER}(?: %{NOTSPACE:_temp_.cisco.source_username})?%{GREEDYDATA}" + - Teardown %{NOTSPACE:network.transport} (?:state-bypass )?connection %{NOTSPACE:_temp_.cisco.connection_id} (?:for|from) %{NOTCOLON:_temp_.cisco.source_interface}:%{DATA:source.address}/%{NUMBER:source.port:int}\s*(?:%{NOTSPACE:_temp_.cisco.source_username} )?to %{NOTCOLON:_temp_.cisco.destination_interface}:%{DATA:destination.address}/%{NUMBER:destination.port:int}\s*(?:%{NOTSPACE:_temp_.cisco.destination_username} )?(?:duration %{TIME:_temp_.duration_hms} bytes %{NUMBER:network.bytes:int})%{GREEDYDATA} + - Teardown %{NOTSPACE:network.transport} connection for faddr (?:%{NOTCOLON:_temp_.cisco.source_interface}:)?%{ECSDESTIPORHOST}/%{NUMBER}\s*(?:%{NOTSPACE:_temp_.cisco.destination_username} )?gaddr (?:%{NOTCOLON}:)?%{MAPPEDSRC}/%{NUMBER} laddr (?:%{NOTCOLON:_temp_.cisco.source_interface}:)?%{ECSSOURCEIPORHOST}/%{NUMBER}\s*(?:%{NOTSPACE:_temp_.cisco.source_username})?%{GREEDYDATA} pattern_definitions: NOTCOLON: "[^:]*" ECSSOURCEIPORHOST: "(?:%{IP:source.address}|%{HOSTNAME:source.domain})" diff --git a/x-pack/filebeat/module/mssql/_meta/docs.asciidoc b/x-pack/filebeat/module/mssql/_meta/docs.asciidoc index ff4dc54b3d5..2861d2754ee 100644 --- a/x-pack/filebeat/module/mssql/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/mssql/_meta/docs.asciidoc @@ -20,7 +20,7 @@ file to override the default paths for Træfik logs: ["source","yaml",subs="attributes"] ----- - module: mssql - access: + log: enabled: true var.paths: ["/var/opt/mssql/log/error*"] ----- @@ -30,7 +30,7 @@ To specify the same settings at the command line, you use: ["source","sh",subs="attributes"] ----- --M "mssql.access.var.paths=[/var/opt/mssql/log/error*]" +-M "mssql.log.var.paths=[/var/opt/mssql/log/error*]" ----- //set the fileset name used in the included example diff --git a/x-pack/filebeat/module/panw/fields.go b/x-pack/filebeat/module/panw/fields.go index 8d877ad9d7e..5864f7597ab 100644 --- a/x-pack/filebeat/module/panw/fields.go +++ b/x-pack/filebeat/module/panw/fields.go @@ -19,5 +19,5 @@ func init() { // AssetPanw returns asset data. // This is the base64 encoded gzipped contents of module/panw. func AssetPanw() string { - return "eJzMmM9u4zYQxu95irm5BeLcesmhQNpFgABpauw26HFBUyOJNcXRDkdxtU+/IC3bWpu2tLbXiG6STH6/+fNRpKewwPYeauWWNwBixOLmLkOv2dRiyN3D7zcAAH9R1liEnBhmyhI8WCF4QVkSLzz8Mnt4mf796dcbgNygzfx9HDQFp6rttOGStsZ7KJiaunuSEAvXY5wHcqYKpMQ4B1SR4q77UV8KdvTIbx+nVI9Jf6dPHOUTQa9iBkuFv+uP3cPqkXFj0aN8L9XhLbBdEmc7744xAsCLqhAoj4xhcpBSCVRKdIkZSGk8ePTekLtLAnlqWGOSZy9dwzRd0oQA/xd0WcQSqqcW39B2YkDz/1DL3c7oVNr6pF/J7XIO5W4Ecbg+rbCCQFfvQ1nr8xgnyLnaS95loTYqP0Dm1G6DwdGijiSakZfpy8M/6zKqLGP0/hZMvn4U3hoPNXJOXGG2z3i4zr3MpgDXARx4OYJ/P4KnWQpws4oQp/K4BrHkisuhBLEtTNKqGXoxToV5r+TXnuK7M+2HHtv7cm6f7D3at1/Vvof7z88z8qCVB8w80kMHYkq7eoSvB519HtcIi7vV3uJK9u7UTrR2rVX92aQ8cgEPzZReoIBWtTSM8PQh+keBlIzqUBbhTBONaWhNVdU4I2069DHhj0xBuP5cq8UMWFpOS+XLzaY0dNhvU2nq7Z74QGPlxl5rkxekTmypENxp/fKHcYpb2GRn3SkrGo9OAu8cQTll26+Yrsu8jbH8a2z2aDiM4zejMbWcpIuczH3D9kqpb9iemHmtBAvi9ue4+TH2a6zH68fn8LWRiY/srx+fN9rpVTuMXRfkNo55Q86MFiAXb2OFlcvA+OQEaKREhkmlrNGGGj+5hUnBql0qxsktEMNkjs4UbjJkIkvLfdufcXh7CrsDpyy4pkI2GkyGTkxukGMXo9Ll/n4hfY7DLw06jZ9dU82Rk4yJ79oA4DMVgE647ZPFE6bxYJxmrNAJZp28GGVtoo6vznxpcBuSpSIiDcTULfaMRw6pJ+U9dB3xqnOC1JiPyw7UJdsg8d/CTiNEm/8AXbj5qXybrPXJvgUAAP//KvmK+g==" + return "eJzMmM9u4zYQxu95irm5BeLcesmhQNpFgABpauw26HFBUyOJNcXRDkdxtU+/IC3LWouOtE42iG6STH6/+fNRpJewwfYaauW2FwBixGJ/l6HXbGox5K7h9wsAgL8oayxCTgwrZQlurBA8oGyJNx5+Wd08LP/+9OsFQG7QZv46DlqCU9Vh2nBJW+M1FExN3T1JiIXrNs4DOVMFUmKcA6pIcdX9aCgFR3rkD49Tqs9Jf6dPHOUTQe9iBkuFvxqOHWENyLix6FG+l+rwNthuibOjd88xAsCDqhAoj4xhcpBSCVRKdIkZSGk8ePTekLtKAnlqWGOSZ5SuaZouaUKA/wu6LGIJ1UuLT2g7MaD1f6jl6mh0Km1D0q/kjjmncjeDOFyfdlhBoKv3qawNeYwT5FyNkve6UL3KD5A5ddxg8GxRZxKtyMvy4eaffRlVljF6fwkm3z8Kb42HGjknrjAbM56u8yCzKcB9ACdezuAfR3C3SgH2qwhxKo97EEuueD2UIHaASVo1Qy/GqTDvG/l1oPjuTPthwPa+nDske4/2HVZ16OHh85cZedLKE2ae6aETMaVdPcPXk85+GdcMi7vd3uKN7N2pnWntWqv6s0l55BU8tFJ6gwJa1dIwwt2H6B8FUjKqU1mEF5poTkNrqqrGGWnToc8Jf2YKwvXnXi1mwNJ2WSpf9pvS0GG/LaWpD3viE42VG/tWm7wgdWZLheDO65c/jFPcQp+dfafsaDw6CbxrBOWUbb9iui7rNsbyr7HZreEwjp+MxtRyki5yMvcN2zdKfcP2zMxrJVgQtz/HzbexX2M9Hj/eh6+NLHxkf/x432unV+0wdl+QyzjmCTkzWoBcvI0VVi4D45MToJESGRaVskYbavziEhYFq3arGBeXQAyLNTpTuMWUiSxtx7Z/weHtLuwOnLLgmgrZaDAZOjG5QY5djEqX4/1C+hyHXxp0Gj+7plojJxkT37UJwHsqAJ1wOySLJ0zjwTjNWKETzDp5McraRB0fnfnS4CEkS0VEmoipW+wZnzmknpX30HXEu84JUnM+LkdQr9kGif8Wjhoh2vwH6MLNT+Xrs5YiGwIpffK8ModlOYK5iROCqA26nqD3yLcAAAD//yC/rB0=" } diff --git a/x-pack/filebeat/module/panw/panos/_meta/fields.yml b/x-pack/filebeat/module/panw/panos/_meta/fields.yml index 14920667ca6..a5900461f08 100644 --- a/x-pack/filebeat/module/panw/panos/_meta/fields.yml +++ b/x-pack/filebeat/module/panw/panos/_meta/fields.yml @@ -127,3 +127,7 @@ type: keyword description: > Palo Alto Networks name for the threat. + - name: action + type: keyword + description: >- + Action taken for the session. diff --git a/x-pack/filebeat/module/panw/panos/config/input.yml b/x-pack/filebeat/module/panw/panos/config/input.yml index 7998f04511a..929237b99af 100644 --- a/x-pack/filebeat/module/panw/panos/config/input.yml +++ b/x-pack/filebeat/module/panw/panos/config/input.yml @@ -70,7 +70,7 @@ processors: destination.nat.port: 27 _temp_.labels: 28 network.transport: 29 - event.outcome: 30 + panw.panos.action: 30 network.bytes: 31 client.bytes: 32 destination.bytes: 32 @@ -123,7 +123,7 @@ processors: destination.nat.port: 27 _temp_.labels: 28 network.transport: 29 - event.outcome: 30 + panw.panos.action: 30 panw.panos.threat.resource: 31 url.original: 31 panw.panos.threat.name: 32 diff --git a/x-pack/filebeat/module/panw/panos/ingest/pipeline.yml b/x-pack/filebeat/module/panw/panos/ingest/pipeline.yml index 135d90a04dc..1c2c912bd87 100644 --- a/x-pack/filebeat/module/panw/panos/ingest/pipeline.yml +++ b/x-pack/filebeat/module/panw/panos/ingest/pipeline.yml @@ -175,34 +175,82 @@ processors: # Set event.category depending on log type. - set: + field: event.kind + value: event + if: 'ctx?._temp_?.message_type == "TRAFFIC"' + - append: field: event.category - value: network_traffic + value: + - network_traffic + - network if: 'ctx?._temp_?.message_type == "TRAFFIC"' - set: + field: event.kind + value: alert + if: 'ctx?._temp_?.message_type == "THREAT"' + - append: field: event.category - value: security_threat + value: + - security_threat + - intrusion_detection + - network if: 'ctx?._temp_?.message_type == "THREAT"' - - drop: if: 'ctx?.event?.category == null' + - append: + field: event.type + value: allowed + if: "ctx?.panw?.panos?.action != null && ['alert', 'allow', 'continue'].contains(ctx.panw.panos.action)" + - append: + field: event.type + value: denied + if: "ctx?.panw?.panos?.action != null && ['deny', 'drop', 'reset-client', 'reset-server', 'reset-both', 'block-url', 'block-ip', 'random-drop', 'sinkhole', 'block'].contains(ctx.panw.panos.action)" + - set: + field: event.outcome + value: success + # event.action for traffic logs. - set: field: event.action value: flow_started if: 'ctx?._temp_?.message_subtype == "start"' + - append: + field: event.type + value: + - start + - connection + if: 'ctx?._temp_?.message_subtype == "start"' - set: field: event.action value: flow_terminated if: 'ctx?._temp_?.message_subtype == "end"' + - append: + field: event.type + value: + - end + - connection + if: 'ctx?._temp_?.message_subtype == "end"' - set: field: event.action value: flow_dropped if: 'ctx?._temp_?.message_subtype == "drop"' + - append: + field: event.type + value: + - denied + - connection + if: 'ctx?._temp_?.message_subtype == "drop"' - set: field: event.action value: flow_denied if: 'ctx?._temp_?.message_subtype == "deny"' + - append: + field: event.type + value: + - denied + - connection + if: 'ctx?._temp_?.message_subtype == "deny"' # event.action for threat logs. - set: @@ -276,21 +324,21 @@ processors: # Normalize event.outcome. # These values appear in the TRAFFIC docs but look like a mistake. - set: - field: event.outcome + field: panw.panos.action value: 'drop-icmp' - if: 'ctx?.event?.outcome == "drop icmp" || ctx?.event?.outcome == "drop ICMP"' + if: 'ctx?.panw?.panos?.action == "drop icmp" || ctx?.panw?.panos?.action == "drop ICMP"' - set: - field: event.outcome + field: panw.panos.action value: 'reset-both' - if: 'ctx?.event?.outcome == "reset both"' + if: 'ctx?.panw?.panos?.action == "reset both"' - set: - field: event.outcome + field: panw.panos.action value: 'reset-client' - if: 'ctx?.event?.outcome == "reset client"' + if: 'ctx?.panw?.panos?.action == "reset client"' - set: - field: event.outcome + field: panw.panos.action value: 'reset-server' - if: 'ctx?.event?.outcome == "reset server"' + if: 'ctx?.panw?.panos?.action == "reset server"' # Build related.ip array from src/dest/NAT IPs. - append: @@ -391,6 +439,36 @@ processors: value: 'URL-filtering' if: 'ctx?.panw?.panos?.threat?.id == "9999"' + - set: + field: rule.name + value: "{{panw.panos.ruleset}}" + if: "ctx?.panw?.panos?.ruleset != null" + + - append: + field: related.user + value: "{{client.user.name}}" + if: "ctx?.client?.user?.name != null" + + - append: + field: related.user + value: "{{source.user.name}}" + if: "ctx?.source?.user?.name != null" + + - append: + field: related.user + value: "{{server.user.name}}" + if: "ctx?.server?.user?.name != null" + + - append: + field: related.user + value: "{{destination.user.name}}" + if: "ctx?.destination?.user?.name != null" + + - append: + field: related.hash + value: "{{panw.panos.file.hash}}" + if: "ctx?.panw?.panos?.file?.hash != null" + # Remove temporary fields. - remove: field: diff --git a/x-pack/filebeat/module/panw/panos/test/pan_inc_other.log-expected.json b/x-pack/filebeat/module/panw/panos/test/pan_inc_other.log-expected.json index e94019b5a55..5b43295399c 100644 --- a/x-pack/filebeat/module/panw/panos/test/pan_inc_other.log-expected.json +++ b/x-pack/filebeat/module/panw/panos/test/pan_inc_other.log-expected.json @@ -23,14 +23,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:56.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:56.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -44,6 +53,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -62,6 +72,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, diff --git a/x-pack/filebeat/module/panw/panos/test/pan_inc_threat.log-expected.json b/x-pack/filebeat/module/panw/panos/test/pan_inc_threat.log-expected.json index ecf18d56eb3..f6ca00ac200 100644 --- a/x-pack/filebeat/module/panw/panos/test/pan_inc_threat.log-expected.json +++ b/x-pack/filebeat/module/panw/panos/test/pan_inc_threat.log-expected.json @@ -20,12 +20,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -38,6 +46,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -58,6 +67,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -94,12 +108,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -112,6 +134,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -132,6 +155,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -168,12 +196,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -186,6 +222,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -206,6 +243,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -242,12 +284,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -260,6 +310,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -280,6 +331,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -316,12 +372,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -334,6 +398,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -354,6 +419,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -390,12 +460,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -408,6 +486,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -428,6 +507,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -464,12 +548,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -482,6 +574,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -502,6 +595,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -538,12 +636,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -556,6 +662,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -576,6 +683,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -612,12 +724,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -630,6 +750,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -650,6 +771,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -686,12 +812,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -704,6 +838,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -724,6 +859,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -760,12 +900,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -778,6 +926,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -798,6 +947,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -834,12 +988,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -852,6 +1014,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -872,6 +1035,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -908,12 +1076,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -926,6 +1102,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -946,6 +1123,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -982,12 +1164,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -999,6 +1189,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1019,6 +1210,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1055,12 +1251,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1073,6 +1277,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1093,6 +1298,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1129,12 +1339,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1147,6 +1365,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1167,6 +1386,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1200,12 +1424,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1217,6 +1449,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1237,6 +1470,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "78.159.99.224", "server.port": 80, "service.type": "panw", @@ -1273,12 +1511,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1291,6 +1537,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1311,6 +1558,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1347,12 +1599,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1365,6 +1625,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1385,6 +1646,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1421,12 +1687,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1439,6 +1713,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1459,6 +1734,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1495,12 +1775,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1513,6 +1801,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1533,6 +1822,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1569,12 +1863,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1587,6 +1889,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1607,6 +1910,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1643,12 +1951,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1661,6 +1977,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1681,6 +1998,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1717,12 +2039,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1735,6 +2065,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1755,6 +2086,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1791,12 +2127,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1809,6 +2153,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1829,6 +2174,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1865,12 +2215,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1883,6 +2241,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1903,6 +2262,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -1939,12 +2303,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1957,6 +2329,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1977,6 +2350,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -2013,12 +2391,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2031,6 +2417,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2051,6 +2438,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -2087,12 +2479,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2105,6 +2505,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2125,6 +2526,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -2161,12 +2567,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2179,6 +2593,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2199,6 +2614,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -2235,12 +2655,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2253,6 +2681,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2273,6 +2702,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -2309,12 +2743,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2327,6 +2769,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2347,6 +2790,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -2383,12 +2831,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2401,6 +2857,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2421,6 +2878,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -2454,12 +2916,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2471,6 +2941,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2491,6 +2962,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "69.43.161.167", "server.port": 80, "service.type": "panw", @@ -2524,12 +3000,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2541,6 +3025,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2561,6 +3046,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "202.31.187.154", "server.port": 80, "service.type": "panw", @@ -2594,12 +3084,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2611,6 +3109,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2631,6 +3130,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "89.111.176.67", "server.port": 80, "service.type": "panw", @@ -2667,12 +3171,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2684,6 +3196,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2704,6 +3217,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -2737,12 +3255,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2754,6 +3280,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2774,6 +3301,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "208.73.210.29", "server.port": 80, "service.type": "panw", @@ -2807,12 +3339,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2824,6 +3364,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2844,6 +3385,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "208.73.210.29", "server.port": 80, "service.type": "panw", @@ -2880,12 +3426,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2897,6 +3451,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2917,6 +3472,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -2950,12 +3510,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2967,6 +3535,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2987,6 +3556,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "208.73.210.29", "server.port": 80, "service.type": "panw", @@ -3020,12 +3594,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3037,6 +3619,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3057,6 +3640,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "89.108.64.156", "server.port": 80, "service.type": "panw", @@ -3090,12 +3678,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3107,6 +3703,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3127,6 +3724,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "89.108.64.156", "server.port": 80, "service.type": "panw", @@ -3154,10 +3756,15 @@ "destination.port": 58849, "destination.user.name": "crusher", "event.action": "spyware_detected", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "drop-all-packets", + "event.outcome": "success", "event.severity": 1, "event.timezone": "-02:00", "fileset.name": "panos", @@ -3171,6 +3778,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "drop-all-packets", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3191,6 +3799,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 58849, "server.user.name": "crusher", @@ -3236,12 +3849,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3253,6 +3874,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3273,6 +3895,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "216.8.179.25", "server.port": 80, "service.type": "panw", @@ -3306,12 +3933,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3323,6 +3958,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3343,6 +3979,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "69.43.161.154", "server.port": 80, "service.type": "panw", @@ -3376,12 +4017,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3393,6 +4042,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3413,6 +4063,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "208.91.196.252", "server.port": 80, "service.type": "panw", @@ -3446,12 +4101,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3463,6 +4126,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3483,6 +4147,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "208.73.210.29", "server.port": 80, "service.type": "panw", @@ -3519,12 +4188,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3536,6 +4213,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3556,6 +4234,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -3592,12 +4275,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3609,6 +4300,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3629,6 +4321,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -3665,12 +4362,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3682,6 +4387,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3702,6 +4408,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -3738,12 +4449,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3755,6 +4474,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "1606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3775,6 +4495,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -3811,12 +4536,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3828,6 +4561,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "1606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3848,6 +4582,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -3875,12 +4614,20 @@ "destination.port": 54431, "destination.user.name": "crusher", "event.action": "file_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "deny", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3892,6 +4639,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "1606001116", + "panw.panos.action": "deny", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3912,6 +4660,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 54431, "server.user.name": "crusher", @@ -3957,12 +4710,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3974,6 +4735,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "1606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3994,6 +4756,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -4021,12 +4788,20 @@ "destination.port": 61220, "destination.user.name": "crusher", "event.action": "file_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "deny", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4038,6 +4813,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "deny", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4058,6 +4834,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 61220, "server.user.name": "crusher", @@ -4094,12 +4875,20 @@ "destination.port": 61726, "destination.user.name": "crusher", "event.action": "file_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "deny", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4111,6 +4900,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "deny", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4131,6 +4921,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 61726, "server.user.name": "crusher", @@ -4175,12 +4970,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4192,6 +4995,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4212,6 +5016,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -4239,12 +5048,20 @@ "destination.port": 60212, "destination.user.name": "crusher", "event.action": "file_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "deny", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4256,6 +5073,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "deny", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4276,6 +5094,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 60212, "server.user.name": "crusher", @@ -4309,12 +5132,20 @@ "destination.port": 60392, "destination.user.name": "crusher", "event.action": "file_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "deny", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4326,6 +5157,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "deny", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4346,6 +5178,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 60392, "server.user.name": "crusher", @@ -4388,12 +5225,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4405,6 +5250,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4425,6 +5271,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "213.180.199.61", "server.port": 80, "service.type": "panw", @@ -4458,12 +5309,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4475,6 +5334,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4495,6 +5355,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "213.180.199.61", "server.port": 80, "service.type": "panw", @@ -4528,12 +5393,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4545,6 +5418,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4565,6 +5439,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "213.180.199.61", "server.port": 80, "service.type": "panw", @@ -4592,12 +5471,20 @@ "destination.port": 54431, "destination.user.name": "crusher", "event.action": "file_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "deny", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4609,6 +5496,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "deny", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4629,6 +5517,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 54431, "server.user.name": "crusher", @@ -4674,12 +5567,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4691,6 +5592,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4711,6 +5613,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.ip": "204.232.231.46", "server.port": 80, "service.type": "panw", @@ -4747,12 +5654,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4764,6 +5679,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4784,6 +5700,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "207.46.140.46", "server.port": 80, "service.type": "panw", @@ -4811,12 +5732,20 @@ "destination.port": 1039, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4828,6 +5757,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4848,6 +5778,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.6", "server.port": 1039, "server.user.name": "jordy", @@ -4884,12 +5819,20 @@ "destination.port": 1064, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4901,6 +5844,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4921,6 +5865,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.6", "server.port": 1064, "server.user.name": "jordy", @@ -4966,12 +5915,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4983,6 +5940,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5003,6 +5961,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "65.54.71.11", "server.port": 80, "service.type": "panw", @@ -5030,12 +5993,20 @@ "destination.port": 1071, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5047,6 +6018,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5067,6 +6039,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.6", "server.port": 1071, "server.user.name": "jordy", @@ -5106,12 +6083,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5123,6 +6108,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5143,6 +6129,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "208.85.40.48", "server.port": 80, "service.type": "panw", @@ -5170,12 +6161,20 @@ "destination.port": 57876, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5187,6 +6186,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5207,6 +6207,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 57876, "server.user.name": "picard", @@ -5240,12 +6245,20 @@ "destination.port": 1082, "destination.user.name": "jordy", "event.action": "file_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "deny", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5257,6 +6270,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "deny", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5277,6 +6291,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.6", "server.port": 1082, "server.user.name": "jordy", @@ -5313,12 +6332,20 @@ "destination.port": 50986, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5330,6 +6357,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5350,6 +6378,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 50986, "server.user.name": "picard", @@ -5383,12 +6416,20 @@ "destination.port": 51716, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5400,6 +6441,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5420,6 +6462,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 51716, "server.user.name": "picard", @@ -5453,12 +6500,20 @@ "destination.port": 52119, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5470,6 +6525,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5490,6 +6546,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 52119, "server.user.name": "picard", @@ -5523,12 +6584,20 @@ "destination.port": 52411, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5540,6 +6609,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5560,6 +6630,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 52411, "server.user.name": "picard", @@ -5599,12 +6674,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5616,6 +6699,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5636,6 +6720,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "74.125.239.6", "server.port": 80, "service.type": "panw", @@ -5663,12 +6752,20 @@ "destination.port": 53026, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5680,6 +6777,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5700,6 +6798,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 53026, "server.user.name": "picard", @@ -5733,12 +6836,20 @@ "destination.port": 53809, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5750,6 +6861,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5770,6 +6882,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 53809, "server.user.name": "picard", @@ -5803,12 +6920,20 @@ "destination.port": 55912, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5820,6 +6945,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5840,6 +6966,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 55912, "server.user.name": "picard", @@ -5873,12 +7004,20 @@ "destination.port": 55916, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5890,6 +7029,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5910,6 +7050,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 55916, "server.user.name": "picard", @@ -5943,12 +7088,20 @@ "destination.port": 1046, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5960,6 +7113,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5980,6 +7134,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.6", "server.port": 1046, "server.user.name": "jordy", @@ -6016,12 +7175,20 @@ "destination.port": 61734, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6033,6 +7200,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6053,6 +7221,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 61734, "server.user.name": "jordy", @@ -6086,12 +7259,20 @@ "destination.port": 62292, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6103,6 +7284,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6123,6 +7305,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 62292, "server.user.name": "jordy", @@ -6156,12 +7343,20 @@ "destination.port": 64669, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6173,6 +7368,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6193,6 +7389,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 64669, "server.user.name": "jordy", @@ -6229,12 +7430,20 @@ "destination.port": 65265, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6246,6 +7455,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6266,6 +7476,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 65265, "server.user.name": "picard", @@ -6299,12 +7514,20 @@ "destination.port": 64979, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6316,6 +7539,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6336,6 +7560,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 64979, "server.user.name": "picard", @@ -6369,12 +7598,20 @@ "destination.port": 49432, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6386,6 +7623,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6406,6 +7644,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 49432, "server.user.name": "picard", @@ -6442,12 +7685,20 @@ "destination.port": 49722, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6459,6 +7710,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6479,6 +7731,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 49722, "server.user.name": "picard", @@ -6518,12 +7775,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6535,6 +7800,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6555,6 +7821,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "74.125.224.201", "server.port": 80, "service.type": "panw", @@ -6582,12 +7853,20 @@ "destination.port": 50108, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6599,6 +7878,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6619,6 +7899,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 50108, "server.user.name": "picard", @@ -6652,12 +7937,20 @@ "destination.port": 50387, "destination.user.name": "picard", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6669,6 +7962,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6689,6 +7983,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "picard", + "picard" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 50387, "server.user.name": "picard", @@ -6728,12 +8027,20 @@ "destination.nat.port": 0, "destination.port": 80, "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6745,6 +8052,7 @@ "network.direction": "inbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6765,6 +8073,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "208.85.40.48", "server.port": 80, "service.type": "panw", @@ -6792,12 +8105,20 @@ "destination.port": 60005, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6809,6 +8130,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6829,6 +8151,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 60005, "server.user.name": "jordy", @@ -6862,12 +8189,20 @@ "destination.port": 60443, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6879,6 +8214,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6899,6 +8235,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 60443, "server.user.name": "jordy", @@ -6932,12 +8273,20 @@ "destination.port": 60822, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6949,6 +8298,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6969,6 +8319,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 60822, "server.user.name": "jordy", @@ -7002,12 +8357,20 @@ "destination.port": 61105, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7019,6 +8382,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7039,6 +8403,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 61105, "server.user.name": "jordy", @@ -7072,12 +8441,20 @@ "destination.port": 60782, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "alert", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7089,6 +8466,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "alert", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7109,6 +8487,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 60782, "server.user.name": "jordy", @@ -7142,12 +8525,20 @@ "destination.port": 61470, "destination.user.name": "jordy", "event.action": "data_match", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7159,6 +8550,7 @@ "network.direction": "outbound", "network.transport": "tcp", "observer.serial_number": "01606001116", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7179,6 +8571,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "jordy", + "jordy" + ], + "rule.name": "rule1", "server.ip": "192.168.0.2", "server.port": 61470, "server.user.name": "jordy", diff --git a/x-pack/filebeat/module/panw/panos/test/pan_inc_traffic.log-expected.json b/x-pack/filebeat/module/panw/panos/test/pan_inc_traffic.log-expected.json index 4565c577acd..c285f88d43d 100644 --- a/x-pack/filebeat/module/panw/panos/test/pan_inc_traffic.log-expected.json +++ b/x-pack/filebeat/module/panw/panos/test/pan_inc_traffic.log-expected.json @@ -23,14 +23,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:59.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:59.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -44,6 +53,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -62,6 +72,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -100,14 +115,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:58.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:58.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -121,6 +145,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -139,6 +164,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -177,14 +207,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:58.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:58.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -198,6 +237,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -216,6 +256,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -257,14 +302,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:58.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:58.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -278,6 +332,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -296,6 +351,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -337,14 +397,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:58.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:58.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -358,6 +427,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -376,6 +446,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -414,14 +489,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:58.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:58.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -435,6 +519,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -453,6 +538,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -491,14 +581,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:58.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:58.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -512,6 +611,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -530,6 +630,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -571,14 +676,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2012-04-10T04:39:28.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:27.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -592,6 +706,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -610,6 +725,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 806, "server.ip": "204.232.231.46", "server.packets": 6, @@ -651,14 +771,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:28.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:28.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -672,6 +801,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -690,6 +820,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 806, "server.ip": "204.232.231.46", "server.packets": 6, @@ -731,14 +866,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2012-04-10T04:39:28.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:27.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -752,6 +896,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -770,6 +915,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 806, "server.ip": "204.232.231.46", "server.packets": 6, @@ -811,14 +961,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:58.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:58.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -832,6 +991,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -850,6 +1010,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -891,14 +1056,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:57.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:57.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -912,6 +1086,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -930,6 +1105,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -971,14 +1151,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:57.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:57.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -992,6 +1181,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1010,6 +1200,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -1051,14 +1246,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:57.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:57.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1072,6 +1276,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1090,6 +1295,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -1131,14 +1341,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:27.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:27.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1152,6 +1371,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1170,6 +1390,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 806, "server.ip": "204.232.231.46", "server.packets": 6, @@ -1211,14 +1436,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2012-04-10T04:39:27.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:26.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1232,6 +1466,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1250,6 +1485,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 806, "server.ip": "204.232.231.46", "server.packets": 6, @@ -1291,14 +1531,23 @@ "destination.packets": 18, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 512000000000, "event.end": "2012-04-10T04:38:26.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:29:54.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1312,6 +1561,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1330,6 +1580,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 551, "server.ip": "204.232.231.46", "server.packets": 18, @@ -1371,14 +1626,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:56.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:56.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1392,6 +1656,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1410,6 +1675,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -1451,14 +1721,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:56.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:56.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1472,6 +1751,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1490,6 +1770,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -1528,14 +1813,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:56.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:56.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1549,6 +1843,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1567,6 +1862,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -1605,14 +1905,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:56.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:56.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1626,6 +1935,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1644,6 +1954,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -1685,14 +2000,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:56.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:56.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1706,6 +2030,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1724,6 +2049,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -1762,14 +2092,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:26.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:26.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1783,6 +2122,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1801,6 +2141,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 98, "server.ip": "205.171.2.25", "server.packets": 1, @@ -1842,14 +2187,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:26.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:26.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1863,6 +2217,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1881,6 +2236,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 806, "server.ip": "204.232.231.46", "server.packets": 6, @@ -1922,14 +2282,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:26.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:26.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -1943,6 +2312,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -1961,6 +2331,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 806, "server.ip": "204.232.231.46", "server.packets": 6, @@ -2002,14 +2377,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:56.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:56.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2023,6 +2407,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2041,6 +2426,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -2079,14 +2469,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:55.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:55.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2100,6 +2499,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2118,6 +2518,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -2156,14 +2561,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:55.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:55.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2177,6 +2591,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2195,6 +2610,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -2236,14 +2656,23 @@ "destination.packets": 8, "destination.port": 13069, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 125000000000, "event.end": "2012-04-10T04:39:55.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:37:50.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2257,6 +2686,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2275,6 +2705,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 504, "server.ip": "98.149.55.63", "server.packets": 8, @@ -2316,14 +2751,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:55.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:55.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2337,6 +2781,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2355,6 +2800,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -2393,14 +2843,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:55.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:55.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2414,6 +2873,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2432,6 +2892,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -2473,14 +2938,23 @@ "destination.packets": 10, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2012-04-10T04:39:25.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:24.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2494,6 +2968,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2512,6 +2987,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 9130, "server.ip": "212.48.10.58", "server.packets": 10, @@ -2553,14 +3033,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:55.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:55.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2574,6 +3063,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2592,6 +3082,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -2630,14 +3125,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:54.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:54.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2651,6 +3155,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2669,6 +3174,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -2707,14 +3217,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:54.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:54.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2728,6 +3247,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2746,6 +3266,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -2787,14 +3312,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:54.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:54.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2808,6 +3342,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2826,6 +3361,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -2867,14 +3407,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:54.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:54.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2888,6 +3437,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2906,6 +3456,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -2944,14 +3499,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:54.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:54.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -2965,6 +3529,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -2983,6 +3548,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -3021,14 +3591,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:54.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:54.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3042,6 +3621,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3060,6 +3640,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -3097,14 +3682,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:24.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:24.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "log.offset": 14217, @@ -3117,6 +3711,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3135,6 +3730,7 @@ "0.0.0.0", "0.0.0.0" ], + "rule.name": "rule1", "server.bytes": 111, "server.ip": "8.8.8.8", "server.packets": 1, @@ -3172,14 +3768,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2012-04-10T04:39:24.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:23.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3193,6 +3798,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3211,6 +3817,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 906, "server.ip": "62.211.68.12", "server.packets": 6, @@ -3251,14 +3862,23 @@ "destination.packets": 10, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:24.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:24.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "log.offset": 14933, @@ -3271,6 +3891,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3289,6 +3910,7 @@ "0.0.0.0", "0.0.0.0" ], + "rule.name": "rule1", "server.bytes": 5013, "server.ip": "50.19.102.116", "server.packets": 10, @@ -3329,14 +3951,23 @@ "destination.packets": 1, "destination.port": 40026, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:24.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:24.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3350,6 +3981,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3368,6 +4000,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 99, "server.ip": "65.55.223.19", "server.packets": 1, @@ -3409,14 +4046,23 @@ "destination.packets": 1, "destination.port": 40029, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:24.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:24.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3430,6 +4076,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3448,6 +4095,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 902, "server.ip": "65.55.223.24", "server.packets": 1, @@ -3485,14 +4137,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:24.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:24.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "log.offset": 16061, @@ -3505,6 +4166,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3523,6 +4185,7 @@ "0.0.0.0", "0.0.0.0" ], + "rule.name": "rule1", "server.bytes": 141, "server.ip": "8.8.8.8", "server.packets": 1, @@ -3563,14 +4226,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:54.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:54.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3584,6 +4256,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3602,6 +4275,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -3640,14 +4318,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:53.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:53.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3661,6 +4348,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3679,6 +4367,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -3720,14 +4413,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:53.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:53.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3741,6 +4443,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3759,6 +4462,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -3797,14 +4505,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:53.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:53.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3818,6 +4535,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3836,6 +4554,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -3874,14 +4597,23 @@ "destination.packets": 2, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2012-04-10T04:39:23.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:22.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3895,6 +4627,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3913,6 +4646,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 316, "server.ip": "205.171.2.25", "server.packets": 2, @@ -3951,14 +4689,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:23.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:23.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -3972,6 +4719,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -3990,6 +4738,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 121, "server.ip": "205.171.2.25", "server.packets": 1, @@ -4028,14 +4781,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:23.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:23.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4049,6 +4811,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4067,6 +4830,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 169, "server.ip": "205.171.2.25", "server.packets": 1, @@ -4105,14 +4873,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:23.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:23.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4126,6 +4903,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4144,6 +4922,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 954, "server.ip": "62.211.68.12", "server.packets": 6, @@ -4185,14 +4968,23 @@ "destination.packets": 12, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 2000000000, "event.end": "2012-04-10T04:39:23.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4206,6 +4998,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4224,6 +5017,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 9130, "server.ip": "212.48.10.58", "server.packets": 12, @@ -4265,14 +5063,23 @@ "destination.packets": 18, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 512000000000, "event.end": "2012-04-10T04:38:23.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:29:51.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4286,6 +5093,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4304,6 +5112,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 555, "server.ip": "204.232.231.46", "server.packets": 18, @@ -4342,14 +5155,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:53.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:53.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4363,6 +5185,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4381,6 +5204,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -4422,14 +5250,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:53.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:53.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4443,6 +5280,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4461,6 +5299,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -4499,14 +5342,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:52.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4520,6 +5372,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4538,6 +5391,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -4576,14 +5434,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:52.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4597,6 +5464,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4615,6 +5483,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -4656,14 +5529,23 @@ "destination.packets": 1, "destination.port": 40043, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:52.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4677,6 +5559,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4695,6 +5578,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "65.55.223.31", "server.packets": 1, @@ -4736,14 +5624,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:52.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4757,6 +5654,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4775,6 +5673,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -4813,14 +5716,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:52.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4834,6 +5746,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4852,6 +5765,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -4890,14 +5808,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:52.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4911,6 +5838,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -4929,6 +5857,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -4967,14 +5900,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2012-04-10T04:39:22.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -4988,6 +5930,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5006,6 +5949,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 906, "server.ip": "62.211.68.12", "server.packets": 6, @@ -5044,14 +5992,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:22.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:22.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5065,6 +6022,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5083,6 +6041,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 163, "server.ip": "205.171.2.25", "server.packets": 1, @@ -5121,14 +6084,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:51.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:51.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5142,6 +6114,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5160,6 +6133,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -5198,14 +6176,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:51.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:51.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5219,6 +6206,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5237,6 +6225,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -5278,14 +6271,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:51.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:51.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5299,6 +6301,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5317,6 +6320,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -5355,14 +6363,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2012-04-10T04:39:21.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:20.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5376,6 +6393,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5394,6 +6412,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 922, "server.ip": "62.211.68.12", "server.packets": 6, @@ -5435,14 +6458,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:51.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:51.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5456,6 +6488,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5474,6 +6507,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -5512,14 +6550,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:50.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:50.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5533,6 +6580,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5551,6 +6599,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -5589,14 +6642,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:50.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:50.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5610,6 +6672,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5628,6 +6691,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -5669,14 +6737,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:50.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:50.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5690,6 +6767,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5708,6 +6786,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -5746,14 +6829,23 @@ "destination.packets": 17, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:20.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:20.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5767,6 +6859,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5785,6 +6878,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 26786, "server.ip": "8.5.1.1", "server.packets": 17, @@ -5823,14 +6921,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:50.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:50.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5844,6 +6951,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5862,6 +6970,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -5900,14 +7013,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:50.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:50.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -5921,6 +7043,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -5939,6 +7062,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -5980,14 +7108,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:50.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:50.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6001,6 +7138,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6019,6 +7157,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -6051,14 +7194,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:20.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:20.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6072,6 +7224,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6090,6 +7243,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 169, "server.ip": "192.168.0.1", "server.packets": 1, @@ -6131,14 +7289,23 @@ "destination.packets": 12, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 3000000000, "event.end": "2012-04-10T04:39:20.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:17.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6152,6 +7319,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6170,6 +7338,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 9064, "server.ip": "212.48.10.58", "server.packets": 12, @@ -6211,14 +7384,23 @@ "destination.packets": 12, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 7000000000, "event.end": "2012-04-10T04:39:20.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6232,6 +7414,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6250,6 +7433,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 9124, "server.ip": "212.48.10.58", "server.packets": 12, @@ -6282,14 +7470,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:20.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:20.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6303,6 +7500,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6321,6 +7519,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 137, "server.ip": "192.168.0.1", "server.packets": 1, @@ -6353,14 +7556,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:20.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:20.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6374,6 +7586,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6392,6 +7605,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 93, "server.ip": "192.168.0.1", "server.packets": 1, @@ -6433,14 +7651,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:49.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:49.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6454,6 +7681,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6472,6 +7700,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -6510,14 +7743,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:49.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:49.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6531,6 +7773,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6549,6 +7792,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -6587,14 +7835,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:49.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:49.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6608,6 +7865,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6626,6 +7884,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -6667,14 +7930,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:49.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:49.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6688,6 +7960,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6706,6 +7979,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -6744,14 +8022,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:49.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:49.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6765,6 +8052,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6783,6 +8071,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -6815,14 +8108,23 @@ "destination.packets": 2, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2012-04-10T04:39:19.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:18.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6836,6 +8138,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6854,6 +8157,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "192.168.0.1", "server.packets": 2, @@ -6892,14 +8200,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:49.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:49.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6913,6 +8230,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -6931,6 +8249,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -6972,14 +8295,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:48.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:48.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -6993,6 +8325,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7011,6 +8344,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -7049,14 +8387,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:48.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:48.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7070,6 +8417,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7088,6 +8436,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -7126,14 +8479,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:48.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:48.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7147,6 +8509,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7165,6 +8528,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "205.171.2.25", "server.packets": 1, @@ -7203,14 +8571,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2012-04-10T04:39:18.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:17.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7224,6 +8601,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7242,6 +8620,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 906, "server.ip": "62.211.68.12", "server.packets": 6, @@ -7283,14 +8666,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:48.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:48.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7304,6 +8696,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7322,6 +8715,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -7363,14 +8761,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:48.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:48.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7384,6 +8791,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7402,6 +8810,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -7443,14 +8856,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:47.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:47.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7464,6 +8886,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7482,6 +8905,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, @@ -7514,14 +8942,23 @@ "destination.packets": 2, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2012-04-10T04:39:17.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:16.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7535,6 +8972,7 @@ "network.transport": "udp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7553,6 +8991,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "192.168.0.1", "server.packets": 2, @@ -7594,14 +9037,23 @@ "destination.packets": 3, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:47.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:47.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7615,6 +9067,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7633,6 +9086,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 78, "server.ip": "204.232.231.46", "server.packets": 3, @@ -7674,14 +9132,23 @@ "destination.packets": 3, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:47.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:47.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7695,6 +9162,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7713,6 +9181,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 78, "server.ip": "204.232.231.46", "server.packets": 3, @@ -7754,14 +9227,23 @@ "destination.packets": 1, "destination.port": 80, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2012-04-10T04:39:46.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2012-04-10T04:39:46.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.captive_portal": true, @@ -7775,6 +9257,7 @@ "network.transport": "tcp", "network.type": "ipv4", "observer.serial_number": "01606001116", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "0.0.0.0", "panw.panos.destination.nat.port": 0, @@ -7793,6 +9276,11 @@ "0.0.0.0", "0.0.0.0" ], + "related.user": [ + "crusher", + "crusher" + ], + "rule.name": "rule1", "server.bytes": 0, "server.ip": "204.232.231.46", "server.packets": 1, diff --git a/x-pack/filebeat/module/panw/panos/test/threat.log-expected.json b/x-pack/filebeat/module/panw/panos/test/threat.log-expected.json index c8c9082e093..c17fcbee131 100644 --- a/x-pack/filebeat/module/panw/panos/test/threat.log-expected.json +++ b/x-pack/filebeat/module/panw/panos/test/threat.log-expected.json @@ -16,12 +16,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -38,6 +46,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -59,6 +68,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -90,12 +100,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -112,6 +130,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -133,6 +152,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -164,12 +184,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -186,6 +214,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -207,6 +236,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -238,12 +268,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -260,6 +298,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -281,6 +320,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -312,12 +352,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -334,6 +382,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -355,6 +404,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -386,12 +436,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -408,6 +466,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -429,6 +488,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -460,12 +520,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -482,6 +550,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -503,6 +572,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -534,12 +604,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -556,6 +634,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -577,6 +656,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -608,12 +688,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -630,6 +718,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -651,6 +740,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -682,12 +772,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -704,6 +802,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -725,6 +824,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -756,12 +856,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -778,6 +886,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -799,6 +908,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -830,12 +940,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -852,6 +970,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -873,6 +992,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -904,12 +1024,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -926,6 +1054,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -947,6 +1076,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -978,12 +1108,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1000,6 +1138,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1021,6 +1160,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1052,12 +1192,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1074,6 +1222,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1095,6 +1244,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1126,12 +1276,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1148,6 +1306,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1169,6 +1328,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1200,12 +1360,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1222,6 +1390,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1243,6 +1412,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1274,12 +1444,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1296,6 +1474,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1317,6 +1496,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1348,12 +1528,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1370,6 +1558,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1391,6 +1580,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1422,12 +1612,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1444,6 +1642,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1465,6 +1664,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1496,12 +1696,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1518,6 +1726,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.72.137.131", "panw.panos.destination.nat.port": 443, @@ -1539,6 +1748,7 @@ "192.168.1.63", "23.72.137.131" ], + "rule.name": "new_outbound_from_trust", "server.ip": "23.72.137.131", "server.port": 443, "service.type": "panw", @@ -1570,12 +1780,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1592,6 +1810,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1613,6 +1832,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1644,12 +1864,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1666,6 +1894,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1687,6 +1916,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1718,12 +1948,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1740,6 +1978,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1761,6 +2000,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1792,12 +2032,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1814,6 +2062,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1835,6 +2084,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1866,12 +2116,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1888,6 +2146,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1909,6 +2168,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -1940,12 +2200,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1962,6 +2230,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -1983,6 +2252,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -2014,12 +2284,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2036,6 +2314,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -2057,6 +2336,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -2088,12 +2368,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2110,6 +2398,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -2131,6 +2420,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -2162,12 +2452,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2184,6 +2482,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -2205,6 +2504,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -2236,12 +2536,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2258,6 +2566,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -2279,6 +2588,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -2310,12 +2620,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2332,6 +2650,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -2353,6 +2672,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -2384,12 +2704,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2406,6 +2734,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -2427,6 +2756,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -2458,12 +2788,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2480,6 +2818,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -2501,6 +2840,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -2532,12 +2872,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2554,6 +2902,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "152.195.55.192", "panw.panos.destination.nat.port": 443, @@ -2575,6 +2924,7 @@ "192.168.1.63", "152.195.55.192" ], + "rule.name": "new_outbound_from_trust", "server.ip": "152.195.55.192", "server.port": 443, "service.type": "panw", @@ -2606,12 +2956,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2628,6 +2986,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "151.101.2.2", "panw.panos.destination.nat.port": 443, @@ -2649,6 +3008,7 @@ "192.168.1.63", "151.101.2.2" ], + "rule.name": "new_outbound_from_trust", "server.ip": "151.101.2.2", "server.port": 443, "service.type": "panw", @@ -2683,12 +3043,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2705,6 +3073,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.192.7.152", "panw.panos.destination.nat.port": 443, @@ -2726,6 +3095,7 @@ "192.168.1.63", "54.192.7.152" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.192.7.152", "server.port": 443, "service.type": "panw", @@ -2760,12 +3130,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2782,6 +3160,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -2803,6 +3182,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -2837,12 +3217,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2859,6 +3247,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -2880,6 +3269,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -2914,12 +3304,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2936,6 +3334,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -2957,6 +3356,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -2991,12 +3391,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3013,6 +3421,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -3034,6 +3443,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -3068,12 +3478,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3090,6 +3508,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -3111,6 +3530,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -3145,12 +3565,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3167,6 +3595,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -3188,6 +3617,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -3222,12 +3652,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3244,6 +3682,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -3265,6 +3704,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -3299,12 +3739,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3321,6 +3769,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -3342,6 +3791,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -3376,12 +3826,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3398,6 +3856,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -3419,6 +3878,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -3453,12 +3913,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3475,6 +3943,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -3496,6 +3965,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -3530,12 +4000,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3552,6 +4030,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -3573,6 +4052,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -3607,12 +4087,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3629,6 +4117,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.4.120.175", "panw.panos.destination.nat.port": 443, @@ -3650,6 +4139,7 @@ "192.168.1.63", "52.4.120.175" ], + "rule.name": "new_outbound_from_trust", "server.ip": "52.4.120.175", "server.port": 443, "service.type": "panw", @@ -3684,12 +4174,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3706,6 +4204,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "216.58.194.98", "panw.panos.destination.nat.port": 443, @@ -3727,6 +4226,7 @@ "192.168.1.63", "216.58.194.98" ], + "rule.name": "new_outbound_from_trust", "server.ip": "216.58.194.98", "server.port": 443, "service.type": "panw", @@ -3758,12 +4258,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3780,6 +4288,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.72.145.245", "panw.panos.destination.nat.port": 443, @@ -3801,6 +4310,7 @@ "192.168.1.63", "23.72.145.245" ], + "rule.name": "new_outbound_from_trust", "server.ip": "23.72.145.245", "server.port": 443, "service.type": "panw", @@ -3832,12 +4342,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3854,6 +4372,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.72.145.245", "panw.panos.destination.nat.port": 443, @@ -3875,6 +4394,7 @@ "192.168.1.63", "23.72.145.245" ], + "rule.name": "new_outbound_from_trust", "server.ip": "23.72.145.245", "server.port": 443, "service.type": "panw", @@ -3906,12 +4426,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3928,6 +4456,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.72.145.245", "panw.panos.destination.nat.port": 443, @@ -3949,6 +4478,7 @@ "192.168.1.63", "23.72.145.245" ], + "rule.name": "new_outbound_from_trust", "server.ip": "23.72.145.245", "server.port": 443, "service.type": "panw", @@ -3980,12 +4510,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4002,6 +4540,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.72.145.245", "panw.panos.destination.nat.port": 443, @@ -4023,6 +4562,7 @@ "192.168.1.63", "23.72.145.245" ], + "rule.name": "new_outbound_from_trust", "server.ip": "23.72.145.245", "server.port": 443, "service.type": "panw", @@ -4054,12 +4594,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4076,6 +4624,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.72.145.245", "panw.panos.destination.nat.port": 443, @@ -4097,6 +4646,7 @@ "192.168.1.63", "23.72.145.245" ], + "rule.name": "new_outbound_from_trust", "server.ip": "23.72.145.245", "server.port": 443, "service.type": "panw", @@ -4128,12 +4678,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4150,6 +4708,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.72.145.245", "panw.panos.destination.nat.port": 443, @@ -4171,6 +4730,7 @@ "192.168.1.63", "23.72.145.245" ], + "rule.name": "new_outbound_from_trust", "server.ip": "23.72.145.245", "server.port": 443, "service.type": "panw", @@ -4202,12 +4762,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4224,6 +4792,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.72.145.245", "panw.panos.destination.nat.port": 443, @@ -4245,6 +4814,7 @@ "192.168.1.63", "23.72.145.245" ], + "rule.name": "new_outbound_from_trust", "server.ip": "23.72.145.245", "server.port": 443, "service.type": "panw", @@ -4276,12 +4846,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4298,6 +4876,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.72.145.245", "panw.panos.destination.nat.port": 443, @@ -4319,6 +4898,7 @@ "192.168.1.63", "23.72.145.245" ], + "rule.name": "new_outbound_from_trust", "server.ip": "23.72.145.245", "server.port": 443, "service.type": "panw", @@ -4350,12 +4930,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4372,6 +4960,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.72.145.245", "panw.panos.destination.nat.port": 443, @@ -4393,6 +4982,7 @@ "192.168.1.63", "23.72.145.245" ], + "rule.name": "new_outbound_from_trust", "server.ip": "23.72.145.245", "server.port": 443, "service.type": "panw", @@ -4424,12 +5014,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4446,6 +5044,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.72.145.245", "panw.panos.destination.nat.port": 443, @@ -4467,6 +5066,7 @@ "192.168.1.63", "23.72.145.245" ], + "rule.name": "new_outbound_from_trust", "server.ip": "23.72.145.245", "server.port": 443, "service.type": "panw", @@ -4501,12 +5101,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4523,6 +5131,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -4544,6 +5153,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -4578,12 +5188,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4600,6 +5218,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -4621,6 +5240,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -4655,12 +5275,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4677,6 +5305,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -4698,6 +5327,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -4732,12 +5362,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4754,6 +5392,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -4775,6 +5414,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -4809,12 +5449,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4831,6 +5479,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -4852,6 +5501,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -4886,12 +5536,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4908,6 +5566,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -4929,6 +5588,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -4963,12 +5623,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4985,6 +5653,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -5006,6 +5675,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -5040,12 +5710,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5062,6 +5740,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -5083,6 +5762,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -5117,12 +5797,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5139,6 +5827,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -5160,6 +5849,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -5194,12 +5884,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5216,6 +5914,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -5237,6 +5936,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -5271,12 +5971,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5293,6 +6001,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -5314,6 +6023,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -5348,12 +6058,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5370,6 +6088,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -5391,6 +6110,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -5425,12 +6145,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5447,6 +6175,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -5468,6 +6197,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -5502,12 +6232,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5524,6 +6262,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -5545,6 +6284,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -5579,12 +6319,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5601,6 +6349,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -5622,6 +6371,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", @@ -5656,12 +6406,20 @@ "destination.nat.port": 443, "destination.port": 443, "event.action": "url_filtering", - "event.category": "security_threat", + "event.category": [ + "security_threat", + "intrusion_detection", + "network" + ], "event.dataset": "panw.panos", + "event.kind": "alert", "event.module": "panw", - "event.outcome": "block-url", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", + "event.type": [ + "denied" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5678,6 +6436,7 @@ "network.transport": "tcp", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "block-url", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.209.101.70", "panw.panos.destination.nat.port": 443, @@ -5699,6 +6458,7 @@ "192.168.1.63", "54.209.101.70" ], + "rule.name": "new_outbound_from_trust", "server.ip": "54.209.101.70", "server.port": 443, "service.type": "panw", diff --git a/x-pack/filebeat/module/panw/panos/test/traffic.log-expected.json b/x-pack/filebeat/module/panw/panos/test/traffic.log-expected.json index 563290f9dba..9e1333f9fb8 100644 --- a/x-pack/filebeat/module/panw/panos/test/traffic.log-expected.json +++ b/x-pack/filebeat/module/panw/panos/test/traffic.log-expected.json @@ -19,14 +19,23 @@ "destination.packets": 16, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 586000000000, "event.end": "2018-11-30T16:08:50.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T15:59:04.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -44,6 +53,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "184.51.253.152", "panw.panos.destination.nat.port": 443, @@ -63,6 +73,7 @@ "192.168.1.63", "184.51.253.152" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 5976, "server.ip": "184.51.253.152", "server.packets": 16, @@ -99,14 +110,23 @@ "destination.packets": 6, "destination.port": 0, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:08:55.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:55.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -124,6 +144,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 0, @@ -143,6 +164,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 588, "server.ip": "8.8.8.8", "server.packets": 6, @@ -182,14 +204,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2018-11-30T16:08:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:51.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -207,6 +238,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "17.253.3.202", "panw.panos.destination.nat.port": 80, @@ -226,6 +258,7 @@ "192.168.1.63", "17.253.3.202" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 1035, "server.ip": "17.253.3.202", "server.packets": 6, @@ -262,14 +295,23 @@ "destination.packets": 6, "destination.port": 0, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:01.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:01.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -287,6 +329,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 0, @@ -306,6 +349,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 588, "server.ip": "8.8.8.8", "server.packets": 6, @@ -345,14 +389,23 @@ "destination.packets": 5, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:07:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:07:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -370,6 +423,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "216.58.194.99", "panw.panos.destination.nat.port": 443, @@ -389,6 +443,7 @@ "192.168.1.63", "216.58.194.99" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 1613, "server.ip": "216.58.194.99", "server.packets": 5, @@ -425,14 +480,23 @@ "destination.packets": 62, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 85000000000, "event.end": "2018-11-30T16:08:58.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:07:33.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -450,6 +514,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "209.234.224.22", "panw.panos.destination.nat.port": 443, @@ -469,6 +534,7 @@ "192.168.1.63", "209.234.224.22" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 21111, "server.ip": "209.234.224.22", "server.packets": 62, @@ -505,14 +571,23 @@ "destination.packets": 6, "destination.port": 0, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:07.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:07.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -530,6 +605,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 0, @@ -549,6 +625,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 588, "server.ip": "8.8.8.8", "server.packets": 6, @@ -585,14 +662,23 @@ "destination.packets": 7, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 15000000000, "event.end": "2018-11-30T16:07:19.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:07:04.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -610,6 +696,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "172.217.2.238", "panw.panos.destination.nat.port": 443, @@ -629,6 +716,7 @@ "192.168.1.63", "172.217.2.238" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 3732, "server.ip": "172.217.2.238", "server.packets": 7, @@ -665,14 +753,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:08:50.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:50.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -690,6 +787,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -709,6 +807,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 221, "server.ip": "8.8.8.8", "server.packets": 1, @@ -745,14 +844,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:08:51.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:51.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -770,6 +878,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -789,6 +898,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 221, "server.ip": "8.8.8.8", "server.packets": 1, @@ -825,14 +935,23 @@ "destination.packets": 16, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 593000000000, "event.end": "2018-11-30T16:08:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T15:58:59.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -850,6 +969,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "17.249.60.78", "panw.panos.destination.nat.port": 443, @@ -869,6 +989,7 @@ "192.168.1.63", "17.249.60.78" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 5469, "server.ip": "17.249.60.78", "server.packets": 16, @@ -905,14 +1026,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:08:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:52.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -930,6 +1060,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -949,6 +1080,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 224, "server.ip": "8.8.8.8", "server.packets": 1, @@ -985,14 +1117,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:08:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:52.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1010,6 +1151,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -1029,6 +1171,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 117, "server.ip": "8.8.8.8", "server.packets": 1, @@ -1065,14 +1208,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:08:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:52.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1090,6 +1242,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -1109,6 +1262,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 307, "server.ip": "8.8.8.8", "server.packets": 1, @@ -1145,14 +1299,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:08:52.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:52.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1170,6 +1333,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -1189,6 +1353,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 365, "server.ip": "8.8.8.8", "server.packets": 1, @@ -1225,14 +1390,23 @@ "destination.packets": 6, "destination.port": 0, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1250,6 +1424,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 0, @@ -1269,6 +1444,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 588, "server.ip": "8.8.8.8", "server.packets": 6, @@ -1305,14 +1481,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2018-11-30T16:08:55.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:54.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1330,6 +1515,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -1349,6 +1535,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 161, "server.ip": "8.8.8.8", "server.packets": 1, @@ -1385,14 +1572,23 @@ "destination.packets": 14, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 17000000000, "event.end": "2018-11-30T16:09:11.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:54.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1410,6 +1606,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "98.138.49.44", "panw.panos.destination.nat.port": 443, @@ -1429,6 +1626,7 @@ "192.168.1.63", "98.138.49.44" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 7805, "server.ip": "98.138.49.44", "server.packets": 14, @@ -1465,14 +1663,23 @@ "destination.packets": 13, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 17000000000, "event.end": "2018-11-30T16:09:11.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:54.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1490,6 +1697,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "72.30.3.43", "panw.panos.destination.nat.port": 443, @@ -1509,6 +1717,7 @@ "192.168.1.63", "72.30.3.43" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 6106, "server.ip": "72.30.3.43", "server.packets": 13, @@ -1545,14 +1754,23 @@ "destination.packets": 2, "destination.port": 0, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:15.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:15.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1570,6 +1788,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 0, @@ -1589,6 +1808,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 196, "server.ip": "8.8.8.8", "server.packets": 2, @@ -1625,14 +1845,23 @@ "destination.packets": 19, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 116000000000, "event.end": "2018-11-30T16:09:12.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:07:16.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1650,6 +1879,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "172.217.9.142", "panw.panos.destination.nat.port": 80, @@ -1669,6 +1899,7 @@ "192.168.1.63", "172.217.9.142" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 3245, "server.ip": "172.217.9.142", "server.packets": 19, @@ -1705,14 +1936,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:08:57.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:08:57.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1730,6 +1970,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -1749,6 +1990,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 179, "server.ip": "8.8.8.8", "server.packets": 1, @@ -1788,14 +2030,23 @@ "destination.packets": 13, "destination.port": 443, "event.action": "flow_started", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "start", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1813,6 +2064,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.84.80.198", "panw.panos.destination.nat.port": 443, @@ -1832,6 +2084,7 @@ "192.168.1.63", "54.84.80.198" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 4537, "server.ip": "54.84.80.198", "server.packets": 13, @@ -1869,14 +2122,23 @@ "destination.packets": 8, "destination.port": 4282, "event.action": "flow_dropped", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 13000000000, "event.end": "2018-11-30T16:09:25.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:12.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "denied", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1894,6 +2156,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "199.167.55.52", "panw.panos.destination.nat.port": 4282, @@ -1913,6 +2176,7 @@ "192.168.1.63", "199.167.55.52" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 0, "server.ip": "199.167.55.52", "server.packets": 8, @@ -1949,14 +2213,23 @@ "destination.packets": 6, "destination.port": 0, "event.action": "flow_denied", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:19.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:19.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "denied", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -1974,6 +2247,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 0, @@ -1993,6 +2267,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 588, "server.ip": "8.8.8.8", "server.packets": 6, @@ -2028,14 +2303,21 @@ "destination.nat.port": 53, "destination.packets": 1, "destination.port": 53, - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:02.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:02.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2053,6 +2335,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -2072,6 +2355,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 130, "server.ip": "8.8.8.8", "server.packets": 1, @@ -2107,14 +2391,21 @@ "destination.nat.port": 443, "destination.packets": 6, "destination.port": 443, - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 15000000000, "event.end": "2018-11-30T16:07:35.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:07:20.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2132,6 +2423,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "172.217.9.142", "panw.panos.destination.nat.port": 443, @@ -2151,6 +2443,7 @@ "192.168.1.63", "172.217.9.142" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 1991, "server.ip": "172.217.9.142", "server.packets": 6, @@ -2187,14 +2480,23 @@ "destination.packets": 8, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:21.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2212,6 +2514,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "151.101.2.2", "panw.panos.destination.nat.port": 443, @@ -2231,6 +2534,7 @@ "192.168.1.63", "151.101.2.2" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 523, "server.ip": "151.101.2.2", "server.packets": 8, @@ -2270,14 +2574,23 @@ "destination.packets": 5, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:07:36.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:07:36.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2295,6 +2608,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "216.58.194.66", "panw.panos.destination.nat.port": 443, @@ -2314,6 +2628,7 @@ "192.168.1.63", "216.58.194.66" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 2428, "server.ip": "216.58.194.66", "server.packets": 5, @@ -2350,14 +2665,23 @@ "destination.packets": 6, "destination.port": 0, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:25.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:25.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2375,6 +2699,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 0, @@ -2394,6 +2719,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 588, "server.ip": "8.8.8.8", "server.packets": 6, @@ -2430,14 +2756,23 @@ "destination.packets": 2, "destination.port": 0, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:25.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:25.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2455,6 +2790,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 0, @@ -2474,6 +2810,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 196, "server.ip": "8.8.8.8", "server.packets": 2, @@ -2510,14 +2847,23 @@ "destination.packets": 12, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:22.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:22.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2535,6 +2881,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "184.51.253.193", "panw.panos.destination.nat.port": 443, @@ -2554,6 +2901,7 @@ "192.168.1.63", "184.51.253.193" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 5003, "server.ip": "184.51.253.193", "server.packets": 12, @@ -2590,14 +2938,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:08.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:08.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2615,6 +2972,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -2634,6 +2992,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 171, "server.ip": "8.8.8.8", "server.packets": 1, @@ -2671,14 +3030,23 @@ "destination.packets": 1, "destination.port": 4282, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:33.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:33.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2696,6 +3064,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "199.167.55.52", "panw.panos.destination.nat.port": 4282, @@ -2715,6 +3084,7 @@ "192.168.1.63", "199.167.55.52" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 0, "server.ip": "199.167.55.52", "server.packets": 1, @@ -2754,14 +3124,23 @@ "destination.packets": 11, "destination.port": 17472, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:25.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:25.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2779,6 +3158,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "199.167.52.219", "panw.panos.destination.nat.port": 17472, @@ -2798,6 +3178,7 @@ "192.168.1.63", "199.167.52.219" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 2316, "server.ip": "199.167.52.219", "server.packets": 11, @@ -2837,14 +3218,23 @@ "destination.packets": 19, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 4000000000, "event.end": "2018-11-30T16:09:25.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2862,6 +3252,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.71.117.196", "panw.panos.destination.nat.port": 443, @@ -2881,6 +3272,7 @@ "192.168.1.63", "52.71.117.196" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 13966, "server.ip": "52.71.117.196", "server.packets": 19, @@ -2917,14 +3309,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:12.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:12.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -2942,6 +3343,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -2961,6 +3363,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 244, "server.ip": "8.8.8.8", "server.packets": 1, @@ -2997,14 +3400,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:12.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:12.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3022,6 +3434,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -3041,6 +3454,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 205, "server.ip": "8.8.8.8", "server.packets": 1, @@ -3080,14 +3494,23 @@ "destination.packets": 24, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 8000000000, "event.end": "2018-11-30T16:09:27.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:19.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3105,6 +3528,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "35.186.194.41", "panw.panos.destination.nat.port": 443, @@ -3124,6 +3548,7 @@ "192.168.1.63", "35.186.194.41" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 2302, "server.ip": "35.186.194.41", "server.packets": 24, @@ -3159,14 +3584,23 @@ "destination.packets": 63, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 8000000000, "event.end": "2018-11-30T16:09:27.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:19.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3184,6 +3618,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "35.201.124.9", "panw.panos.destination.nat.port": 443, @@ -3203,6 +3638,7 @@ "192.168.1.63", "35.201.124.9" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 6757, "server.ip": "35.201.124.9", "server.packets": 63, @@ -3242,14 +3678,23 @@ "destination.packets": 17, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 6000000000, "event.end": "2018-11-30T16:09:27.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3267,6 +3712,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "100.24.131.237", "panw.panos.destination.nat.port": 443, @@ -3286,6 +3732,7 @@ "192.168.1.63", "100.24.131.237" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 9007, "server.ip": "100.24.131.237", "server.packets": 17, @@ -3322,14 +3769,23 @@ "destination.packets": 8, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 13000000000, "event.end": "2018-11-30T16:09:27.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:14.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3347,6 +3803,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "184.51.252.247", "panw.panos.destination.nat.port": 443, @@ -3366,6 +3823,7 @@ "192.168.1.63", "184.51.252.247" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 661, "server.ip": "184.51.252.247", "server.packets": 8, @@ -3405,14 +3863,23 @@ "destination.packets": 15, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 8000000000, "event.end": "2018-11-30T16:09:27.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:19.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3430,6 +3897,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "35.190.88.148", "panw.panos.destination.nat.port": 443, @@ -3449,6 +3917,7 @@ "192.168.1.63", "35.190.88.148" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 11136, "server.ip": "35.190.88.148", "server.packets": 15, @@ -3488,14 +3957,23 @@ "destination.packets": 15, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 8000000000, "event.end": "2018-11-30T16:09:27.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:19.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3513,6 +3991,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "35.186.243.83", "panw.panos.destination.nat.port": 443, @@ -3532,6 +4011,7 @@ "192.168.1.63", "35.186.243.83" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 11136, "server.ip": "35.186.243.83", "server.packets": 15, @@ -3568,14 +4048,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:12.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:12.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3593,6 +4082,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -3612,6 +4102,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 182, "server.ip": "8.8.8.8", "server.packets": 1, @@ -3648,14 +4139,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:12.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:12.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3673,6 +4173,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -3692,6 +4193,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 90, "server.ip": "8.8.8.8", "server.packets": 1, @@ -3731,14 +4233,23 @@ "destination.packets": 17, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 6000000000, "event.end": "2018-11-30T16:09:27.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3756,6 +4267,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "100.24.165.74", "panw.panos.destination.nat.port": 443, @@ -3775,6 +4287,7 @@ "192.168.1.63", "100.24.165.74" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 6669, "server.ip": "100.24.165.74", "server.packets": 17, @@ -3811,14 +4324,23 @@ "destination.packets": 8, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 13000000000, "event.end": "2018-11-30T16:09:27.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:14.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3836,6 +4358,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "184.51.252.247", "panw.panos.destination.nat.port": 443, @@ -3855,6 +4378,7 @@ "192.168.1.63", "184.51.252.247" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 661, "server.ip": "184.51.252.247", "server.packets": 8, @@ -3890,14 +4414,23 @@ "destination.packets": 15, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 8000000000, "event.end": "2018-11-30T16:09:27.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:19.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3915,6 +4448,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "35.201.94.140", "panw.panos.destination.nat.port": 443, @@ -3934,6 +4468,7 @@ "192.168.1.63", "35.201.94.140" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 11136, "server.ip": "35.201.94.140", "server.packets": 15, @@ -3970,14 +4505,23 @@ "destination.packets": 6, "destination.port": 0, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:31.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:31.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -3995,6 +4539,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 0, @@ -4012,6 +4557,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 588, "server.ip": "8.8.8.8", "server.packets": 6, @@ -4048,14 +4594,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4073,6 +4628,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4092,6 +4648,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 144, "server.ip": "8.8.8.8", "server.packets": 1, @@ -4128,14 +4685,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4153,6 +4719,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4172,6 +4739,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 206, "server.ip": "8.8.8.8", "server.packets": 1, @@ -4208,14 +4776,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4233,6 +4810,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4252,6 +4830,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 206, "server.ip": "8.8.8.8", "server.packets": 1, @@ -4288,14 +4867,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4313,6 +4901,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4332,6 +4921,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 169, "server.ip": "8.8.8.8", "server.packets": 1, @@ -4368,14 +4958,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4393,6 +4992,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4412,6 +5012,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 132, "server.ip": "8.8.8.8", "server.packets": 1, @@ -4448,14 +5049,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4473,6 +5083,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4492,6 +5103,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 127, "server.ip": "8.8.8.8", "server.packets": 1, @@ -4528,14 +5140,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4553,6 +5174,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4572,6 +5194,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 105, "server.ip": "8.8.8.8", "server.packets": 1, @@ -4608,14 +5231,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4633,6 +5265,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4652,6 +5285,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 172, "server.ip": "8.8.8.8", "server.packets": 1, @@ -4688,14 +5322,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4713,6 +5356,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4732,6 +5376,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 134, "server.ip": "8.8.8.8", "server.packets": 1, @@ -4768,14 +5413,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4793,6 +5447,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4812,6 +5467,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 179, "server.ip": "8.8.8.8", "server.packets": 1, @@ -4848,14 +5504,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4873,6 +5538,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4892,6 +5558,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 218, "server.ip": "8.8.8.8", "server.packets": 1, @@ -4928,14 +5595,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -4953,6 +5629,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -4972,6 +5649,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 172, "server.ip": "8.8.8.8", "server.packets": 1, @@ -5008,14 +5686,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:13.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5033,6 +5720,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -5052,6 +5740,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 305, "server.ip": "8.8.8.8", "server.packets": 1, @@ -5091,14 +5780,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:14.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:14.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5116,6 +5814,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "66.28.0.45", "panw.panos.destination.nat.port": 53, @@ -5135,6 +5834,7 @@ "192.168.1.63", "66.28.0.45" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 527, "server.ip": "66.28.0.45", "server.packets": 1, @@ -5171,14 +5871,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:14.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:14.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5196,6 +5905,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -5215,6 +5925,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 153, "server.ip": "8.8.8.8", "server.packets": 1, @@ -5251,14 +5962,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:14.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:14.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5276,6 +5996,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -5295,6 +6016,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 169, "server.ip": "8.8.8.8", "server.packets": 1, @@ -5331,14 +6053,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:14.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:14.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5356,6 +6087,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -5375,6 +6107,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 128, "server.ip": "8.8.8.8", "server.packets": 1, @@ -5411,14 +6144,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:14.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:14.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5436,6 +6178,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -5455,6 +6198,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 181, "server.ip": "8.8.8.8", "server.packets": 1, @@ -5491,14 +6235,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:14.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:14.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5516,6 +6269,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -5535,6 +6289,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 121, "server.ip": "8.8.8.8", "server.packets": 1, @@ -5574,14 +6329,23 @@ "destination.packets": 6, "destination.port": 80, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:29.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:29.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5599,6 +6363,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "23.52.174.25", "panw.panos.destination.nat.port": 80, @@ -5618,6 +6383,7 @@ "192.168.1.63", "23.52.174.25" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 1246, "server.ip": "23.52.174.25", "server.packets": 6, @@ -5654,14 +6420,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 1000000000, "event.end": "2018-11-30T16:09:14.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:13.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5679,6 +6454,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -5698,6 +6474,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 315, "server.ip": "8.8.8.8", "server.packets": 1, @@ -5734,14 +6511,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:14.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:14.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5759,6 +6545,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -5778,6 +6565,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 130, "server.ip": "8.8.8.8", "server.packets": 1, @@ -5817,14 +6605,23 @@ "destination.packets": 5, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 12000000000, "event.end": "2018-11-30T16:09:29.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:17.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5842,6 +6639,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "54.230.5.228", "panw.panos.destination.nat.port": 443, @@ -5861,6 +6659,7 @@ "192.168.1.63", "54.230.5.228" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 288, "server.ip": "54.230.5.228", "server.packets": 5, @@ -5897,14 +6696,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:14.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:14.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -5922,6 +6730,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -5941,6 +6750,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 149, "server.ip": "8.8.8.8", "server.packets": 1, @@ -5977,14 +6787,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:15.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:15.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6002,6 +6821,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -6021,6 +6841,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 202, "server.ip": "8.8.8.8", "server.packets": 1, @@ -6057,14 +6878,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:15.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:15.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6082,6 +6912,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -6101,6 +6932,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 195, "server.ip": "8.8.8.8", "server.packets": 1, @@ -6137,14 +6969,23 @@ "destination.packets": 1, "destination.port": 123, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:15.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:15.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6162,6 +7003,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "208.83.246.20", "panw.panos.destination.nat.port": 123, @@ -6181,6 +7023,7 @@ "192.168.1.63", "208.83.246.20" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 90, "server.ip": "208.83.246.20", "server.packets": 1, @@ -6217,14 +7060,22 @@ "destination.packets": 2, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:16.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "drop-icmp", + "event.outcome": "success", "event.start": "2018-11-30T16:09:16.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6242,6 +7093,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "drop-icmp", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -6261,6 +7113,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 192, "server.ip": "8.8.8.8", "server.packets": 2, @@ -6297,14 +7150,22 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:16.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "reset-client", + "event.outcome": "success", "event.start": "2018-11-30T16:09:16.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6322,6 +7183,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "reset-client", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -6341,6 +7203,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 208, "server.ip": "8.8.8.8", "server.packets": 1, @@ -6377,14 +7240,22 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:16.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "reset-server", + "event.outcome": "success", "event.start": "2018-11-30T16:09:16.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6402,6 +7273,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "reset-server", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -6421,6 +7293,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 100, "server.ip": "8.8.8.8", "server.packets": 1, @@ -6459,14 +7332,22 @@ "destination.packets": 13, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 10000000000, "event.end": "2018-11-30T16:09:31.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "reset-both", + "event.outcome": "success", "event.start": "2018-11-30T16:09:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6484,6 +7365,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "reset-both", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "35.185.88.112", "panw.panos.destination.nat.port": 443, @@ -6503,6 +7385,7 @@ "192.168.1.63", "35.185.88.112" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 7237, "server.ip": "35.185.88.112", "server.packets": 13, @@ -6539,14 +7422,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:16.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:16.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6564,6 +7456,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -6583,6 +7476,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 109, "server.ip": "8.8.8.8", "server.packets": 1, @@ -6619,14 +7513,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:16.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:16.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6644,6 +7547,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -6663,6 +7567,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 116, "server.ip": "8.8.8.8", "server.packets": 1, @@ -6699,14 +7604,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:16.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:16.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6724,6 +7638,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -6743,6 +7658,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 96, "server.ip": "8.8.8.8", "server.packets": 1, @@ -6782,14 +7698,23 @@ "destination.packets": 8, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 11000000000, "event.end": "2018-11-30T16:09:32.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6807,6 +7732,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "50.19.85.24", "panw.panos.destination.nat.port": 443, @@ -6826,6 +7752,7 @@ "192.168.1.63", "50.19.85.24" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 654, "server.ip": "50.19.85.24", "server.packets": 8, @@ -6865,14 +7792,23 @@ "destination.packets": 8, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 11000000000, "event.end": "2018-11-30T16:09:32.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6890,6 +7826,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "50.19.85.24", "panw.panos.destination.nat.port": 443, @@ -6909,6 +7846,7 @@ "192.168.1.63", "50.19.85.24" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 654, "server.ip": "50.19.85.24", "server.packets": 8, @@ -6948,14 +7886,23 @@ "destination.packets": 8, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 11000000000, "event.end": "2018-11-30T16:09:32.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -6973,6 +7920,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "50.19.85.24", "panw.panos.destination.nat.port": 443, @@ -6992,6 +7940,7 @@ "192.168.1.63", "50.19.85.24" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 654, "server.ip": "50.19.85.24", "server.packets": 8, @@ -7028,14 +7977,23 @@ "destination.packets": 12, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 11000000000, "event.end": "2018-11-30T16:09:32.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7053,6 +8011,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "104.254.150.9", "panw.panos.destination.nat.port": 443, @@ -7072,6 +8031,7 @@ "192.168.1.63", "104.254.150.9" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 7820, "server.ip": "104.254.150.9", "server.packets": 12, @@ -7111,14 +8071,23 @@ "destination.packets": 8, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 11000000000, "event.end": "2018-11-30T16:09:32.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:21.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7136,6 +8105,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "50.19.85.24", "panw.panos.destination.nat.port": 443, @@ -7155,6 +8125,7 @@ "192.168.1.63", "50.19.85.24" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 654, "server.ip": "50.19.85.24", "server.packets": 8, @@ -7194,14 +8165,23 @@ "destination.packets": 4, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 12000000000, "event.end": "2018-11-30T16:09:32.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:20.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7219,6 +8199,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.0.218.108", "panw.panos.destination.nat.port": 443, @@ -7238,6 +8219,7 @@ "192.168.1.63", "52.0.218.108" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 214, "server.ip": "52.0.218.108", "server.packets": 4, @@ -7277,14 +8259,23 @@ "destination.packets": 4, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 12000000000, "event.end": "2018-11-30T16:09:32.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:20.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7302,6 +8293,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "52.6.117.19", "panw.panos.destination.nat.port": 443, @@ -7321,6 +8313,7 @@ "192.168.1.63", "52.6.117.19" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 214, "server.ip": "52.6.117.19", "server.packets": 4, @@ -7360,14 +8353,23 @@ "destination.packets": 4, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 12000000000, "event.end": "2018-11-30T16:09:32.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:20.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7385,6 +8387,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "34.238.96.22", "panw.panos.destination.nat.port": 443, @@ -7404,6 +8407,7 @@ "192.168.1.63", "34.238.96.22" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 214, "server.ip": "34.238.96.22", "server.packets": 4, @@ -7443,14 +8447,23 @@ "destination.packets": 4, "destination.port": 443, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 12000000000, "event.end": "2018-11-30T16:09:32.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:20.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7468,6 +8481,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "130.211.47.17", "panw.panos.destination.nat.port": 443, @@ -7487,6 +8501,7 @@ "192.168.1.63", "130.211.47.17" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 280, "server.ip": "130.211.47.17", "server.packets": 4, @@ -7523,14 +8538,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:18.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:18.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7548,6 +8572,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -7567,6 +8592,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 172, "server.ip": "8.8.8.8", "server.packets": 1, @@ -7603,14 +8629,23 @@ "destination.packets": 6, "destination.port": 0, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:37.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:37.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7628,6 +8663,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 0, @@ -7647,6 +8683,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 588, "server.ip": "8.8.8.8", "server.packets": 6, @@ -7683,14 +8720,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:19.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:19.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7708,6 +8754,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -7727,6 +8774,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 94, "server.ip": "8.8.8.8", "server.packets": 1, @@ -7763,14 +8811,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:19.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:19.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7788,6 +8845,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -7807,6 +8865,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 170, "server.ip": "8.8.8.8", "server.packets": 1, @@ -7843,14 +8902,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:19.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:19.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7868,6 +8936,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -7887,6 +8956,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 94, "server.ip": "8.8.8.8", "server.packets": 1, @@ -7923,14 +8993,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:19.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:19.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -7948,6 +9027,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -7967,6 +9047,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 94, "server.ip": "8.8.8.8", "server.packets": 1, @@ -8003,14 +9084,23 @@ "destination.packets": 1, "destination.port": 53, "event.action": "flow_terminated", - "event.category": "network_traffic", + "event.category": [ + "network_traffic", + "network" + ], "event.dataset": "panw.panos", "event.duration": 0, "event.end": "2018-11-30T16:09:19.000-02:00", + "event.kind": "event", "event.module": "panw", - "event.outcome": "allow", + "event.outcome": "success", "event.start": "2018-11-30T16:09:19.000-02:00", "event.timezone": "-02:00", + "event.type": [ + "allowed", + "end", + "connection" + ], "fileset.name": "panos", "input.type": "log", "labels.nat_translated": true, @@ -8028,6 +9118,7 @@ "network.type": "ipv4", "observer.hostname": "PA-220", "observer.serial_number": "012801096514", + "panw.panos.action": "allow", "panw.panos.destination.interface": "ethernet1/1", "panw.panos.destination.nat.ip": "8.8.8.8", "panw.panos.destination.nat.port": 53, @@ -8047,6 +9138,7 @@ "192.168.1.63", "8.8.8.8" ], + "rule.name": "new_outbound_from_trust", "server.bytes": 166, "server.ip": "8.8.8.8", "server.packets": 1, diff --git a/x-pack/filebeat/module/rabbitmq/log/ingest/pipeline.yml b/x-pack/filebeat/module/rabbitmq/log/ingest/pipeline.yml index b6bc5f57f63..58097c578d8 100644 --- a/x-pack/filebeat/module/rabbitmq/log/ingest/pipeline.yml +++ b/x-pack/filebeat/module/rabbitmq/log/ingest/pipeline.yml @@ -26,6 +26,9 @@ processors: - remove: field: - timestamp +- set: + field: event.kind + value: event on_failure: - set: field: error.message diff --git a/x-pack/filebeat/module/rabbitmq/log/test/test.log-expected.json b/x-pack/filebeat/module/rabbitmq/log/test/test.log-expected.json index 747b866dabe..0bdae14b894 100644 --- a/x-pack/filebeat/module/rabbitmq/log/test/test.log-expected.json +++ b/x-pack/filebeat/module/rabbitmq/log/test/test.log-expected.json @@ -2,6 +2,7 @@ { "@timestamp": "2019-04-03T11:13:15.076-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -15,6 +16,7 @@ { "@timestamp": "2019-04-03T11:13:15.510-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -31,6 +33,7 @@ { "@timestamp": "2019-04-03T11:13:15.512-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -47,6 +50,7 @@ { "@timestamp": "2019-04-12T10:00:53.458-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -60,6 +64,7 @@ { "@timestamp": "2019-04-12T10:00:53.550-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -76,6 +81,7 @@ { "@timestamp": "2019-04-12T10:00:53.550-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -89,6 +95,7 @@ { "@timestamp": "2019-04-12T10:00:54.553-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -102,6 +109,7 @@ { "@timestamp": "2019-04-12T10:00:54.555-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -115,6 +123,7 @@ { "@timestamp": "2019-04-12T10:00:54.567-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -128,6 +137,7 @@ { "@timestamp": "2019-04-12T10:00:54.567-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -141,6 +151,7 @@ { "@timestamp": "2019-04-12T10:00:54.568-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -154,6 +165,7 @@ { "@timestamp": "2019-04-12T10:00:54.569-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -167,6 +179,7 @@ { "@timestamp": "2019-04-12T10:00:54.579-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -180,6 +193,7 @@ { "@timestamp": "2019-04-12T10:00:54.588-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -193,6 +207,7 @@ { "@timestamp": "2019-04-12T10:00:54.589-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -206,6 +221,7 @@ { "@timestamp": "2019-04-12T10:00:54.598-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -219,6 +235,7 @@ { "@timestamp": "2019-04-12T10:00:54.606-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -232,6 +249,7 @@ { "@timestamp": "2019-04-12T10:00:54.615-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -245,6 +263,7 @@ { "@timestamp": "2019-04-12T10:00:54.615-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -261,6 +280,7 @@ { "@timestamp": "2019-04-12T10:01:01.031-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -277,6 +297,7 @@ { "@timestamp": "2019-04-12T10:11:15.094-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -290,6 +311,7 @@ { "@timestamp": "2019-04-12T10:11:15.101-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -303,6 +325,7 @@ { "@timestamp": "2019-04-12T10:19:14.450-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -319,6 +342,7 @@ { "@timestamp": "2019-04-12T10:19:14.450-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", @@ -332,6 +356,7 @@ { "@timestamp": "2019-04-12T10:19:14.451-02:00", "event.dataset": "rabbitmq.log", + "event.kind": "event", "event.module": "rabbitmq", "event.timezone": "-02:00", "fileset.name": "log", diff --git a/x-pack/filebeat/module/zeek/capture_loss/ingest/pipeline.json b/x-pack/filebeat/module/zeek/capture_loss/ingest/pipeline.json deleted file mode 100644 index 7d662ab7da1..00000000000 --- a/x-pack/filebeat/module/zeek/capture_loss/ingest/pipeline.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek capture_loss.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.capture_loss.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.capture_loss.ts" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/capture_loss/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/capture_loss/ingest/pipeline.yml new file mode 100644 index 00000000000..3c6171bc045 --- /dev/null +++ b/x-pack/filebeat/module/zeek/capture_loss/ingest/pipeline.yml @@ -0,0 +1,21 @@ +description: Pipeline for normalizing Zeek capture_loss.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.capture_loss.ts + formats: + - UNIX +- remove: + field: zeek.capture_loss.ts +- set: + field: event.kind + value: metric +- set: + field: event.type + value: info +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/capture_loss/manifest.yml b/x-pack/filebeat/module/zeek/capture_loss/manifest.yml index 97ae0f09d40..5349b0581c6 100644 --- a/x-pack/filebeat/module/zeek/capture_loss/manifest.yml +++ b/x-pack/filebeat/module/zeek/capture_loss/manifest.yml @@ -11,5 +11,5 @@ var: - name: tags default: [zeek.capture_loss] -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/capture_loss.yml diff --git a/x-pack/filebeat/module/zeek/capture_loss/test/capture_loss-json.log-expected.json b/x-pack/filebeat/module/zeek/capture_loss/test/capture_loss-json.log-expected.json index 0ae18ff9c37..14f20eb3189 100644 --- a/x-pack/filebeat/module/zeek/capture_loss/test/capture_loss-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/capture_loss/test/capture_loss-json.log-expected.json @@ -2,7 +2,9 @@ { "@timestamp": "2019-09-10T16:19:28.465Z", "event.dataset": "zeek.capture_loss", + "event.kind": "metric", "event.module": "zeek", + "event.type": "info", "fileset.name": "capture_loss", "input.type": "log", "log.offset": 0, diff --git a/x-pack/filebeat/module/zeek/connection/config/connection.yml b/x-pack/filebeat/module/zeek/connection/config/connection.yml index 14c5b529708..f91d24f8020 100644 --- a/x-pack/filebeat/module/zeek/connection/config/connection.yml +++ b/x-pack/filebeat/module/zeek/connection/config/connection.yml @@ -75,20 +75,27 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network {{ if .community_id }} - if: equals.network.transport: icmp then: community_id: fields: - source_ip: source.address - destination_ip: destination.address icmp_type: zeek.connection.icmp.type icmp_code: zeek.connection.icmp.code else: community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/connection/ingest/pipeline.json b/x-pack/filebeat/module/zeek/connection/ingest/pipeline.json deleted file mode 100644 index a930fd08ec9..00000000000 --- a/x-pack/filebeat/module/zeek/connection/ingest/pipeline.json +++ /dev/null @@ -1,160 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek conn.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.connection.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.connection.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "script": { - "source": "ctx.event.duration = Math.round(ctx.temp.duration * params.scale)", - "params": { - "scale": 1000000000 - }, - "if": "ctx.temp?.duration != null" - } - }, - { - "remove": { - "field": "temp.duration", - "ignore_missing": true - } - }, - { - "script": { - "source": "if (ctx.zeek.connection.local_orig) ctx.tags.add(\"local_orig\");", - "if": "ctx.zeek.connection.local_orig != null" - } - }, - { - "script": { - "source": "if (ctx.zeek.connection.local_resp) ctx.tags.add(\"local_resp\");", - "if": "ctx.zeek.connection.local_resp != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - }, - { - "script": { - "source": "ctx.network.packets = ctx.source.packets + ctx.destination.packets", - "ignore_failure": true - } - }, - { - "script": { - "source": "ctx.network.bytes = ctx.source.bytes + ctx.destination.bytes", - "ignore_failure": true - } - }, - { - "script": { - "source": "if (ctx.zeek.connection.local_orig == true && ctx.zeek.connection.local_resp == true) {ctx.network.direction = \"internal\"} else if (ctx.zeek.connection.local_orig == true && ctx.zeek.connection.local_resp == false) {ctx.network.direction = \"outbound\"} else if (ctx.zeek.connection.local_orig == false && ctx.zeek.connection.local_resp == true) {ctx.network.direction = \"inbound\"} else {ctx.network.direction = \"external\"}" - } - }, - { - "geoip": { - "field": "destination.ip", - "target_field": "destination.geo" - } - }, - { - "geoip": { - "field": "source.ip", - "target_field": "source.geo" - } - }, - { - "geoip": { - "database_file": "GeoLite2-ASN.mmdb", - "field": "source.ip", - "target_field": "source.as", - "properties": [ - "asn", - "organization_name" - ], - "ignore_missing": true - } - }, - { - "geoip": { - "database_file": "GeoLite2-ASN.mmdb", - "field": "destination.ip", - "target_field": "destination.as", - "properties": [ - "asn", - "organization_name" - ], - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.asn", - "target_field": "source.as.number", - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.organization_name", - "target_field": "source.as.organization.name", - "ignore_missing": true - } - }, - { - "rename": { - "field": "destination.as.asn", - "target_field": "destination.as.number", - "ignore_missing": true - } - }, - { - "rename": { - "field": "destination.as.organization_name", - "target_field": "destination.as.organization.name", - "ignore_missing": true - } - }, - { - "script": { - "source": "if (ctx.zeek.connection.state == \"S0\") {ctx.zeek.connection.state_message = \"Connection attempt seen, no reply.\"} else if (ctx.zeek.connection.state == \"S1\") {ctx.zeek.connection.state_message = \"Connection established, not terminated.\"} else if (ctx.zeek.connection.state == \"SF\") {ctx.zeek.connection.state_message = \"Normal establishment and termination.\"} else if (ctx.zeek.connection.state == \"REJ\") {ctx.zeek.connection.state_message = \"Connection attempt rejected.\"} else if (ctx.zeek.connection.state == \"S2\") {ctx.zeek.connection.state_message = \" Connection established and close attempt by originator seen (but no reply from responder).\"} else if (ctx.zeek.connection.state == \"S3\") {ctx.zeek.connection.state_message = \"Connection established and close attempt by responder seen (but no reply from originator).\"} else if (ctx.zeek.connection.state == \"RSTO\") {ctx.zeek.connection.state_message = \"Connection established, originator aborted (sent a RST).\"} else if (ctx.zeek.connection.state == \"RSTR\") {ctx.zeek.connection.state_message = \"Responder sent a RST.\"} else if (ctx.zeek.connection.state == \"RSTOS0\") {ctx.zeek.connection.state_message = \"Originator sent a SYN followed by a RST, we never saw a SYN-ACK from the responder.\"} else if (ctx.zeek.connection.state == \"RSTRH\") {ctx.zeek.connection.state_message = \"Responder sent a SYN ACK followed by a RST, we never saw a SYN from the (purported) originator.\"} else if (ctx.zeek.connection.state == \"SH\") {ctx.zeek.connection.state_message = \"Originator sent a SYN followed by a FIN, we never saw a SYN ACK from the responder (hence the connection was 'half' open).\"} else if (ctx.zeek.connection.state == \"SHR\") {ctx.zeek.connection.state_message = \"Responder sent a SYN ACK followed by a FIN, we never saw a SYN from the originator.\"} else if (ctx.zeek.connection.state == \"OTH\") {ctx.zeek.connection.state_message = \"No SYN seen, just midstream traffic (a 'partial connection' that was not later closed).\"}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml new file mode 100644 index 00000000000..b660079324a --- /dev/null +++ b/x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml @@ -0,0 +1,187 @@ +description: Pipeline for normalizing Zeek conn.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.connection.ts + formats: + - UNIX +- remove: + field: zeek.connection.ts +- set: + field: event.id + value: '{{zeek.session_id}}' + if: ctx.zeek.session_id != null +- script: + source: ctx.event.duration = Math.round(ctx.temp.duration * params.scale) + params: + scale: 1000000000 + if: ctx.temp?.duration != null +- remove: + field: temp.duration + ignore_missing: true +- script: + source: if (ctx.zeek.connection.local_orig) ctx.tags.add("local_orig"); + if: ctx.zeek.connection.local_orig != null +- script: + source: if (ctx.zeek.connection.local_resp) ctx.tags.add("local_resp"); + if: ctx.zeek.connection.local_resp != null +- set: + field: source.ip + value: '{{source.address}}' +- append: + field: related.ip + value: '{{source.address}}' +- set: + field: destination.ip + value: '{{destination.address}}' +- append: + field: related.ip + value: '{{destination.address}}' +- script: + source: ctx.network.packets = ctx.source.packets + ctx.destination.packets + ignore_failure: true +- script: + source: ctx.network.bytes = ctx.source.bytes + ctx.destination.bytes + ignore_failure: true +- script: + source: >- + if (ctx?.zeek?.connection?.local_orig == true) { + if (ctx?.zeek?.connection?.local_resp == true) { + ctx.network.direction = "internal"; + } else { + ctx.network.direction = "outbound"; + } + } else { + if (ctx?.zeek?.connection?.local_resp == true) { + ctx.network.direction = "inbound"; + } else { + ctx.network.direction = "external"; + } + } +- geoip: + field: destination.ip + target_field: destination.geo +- geoip: + field: source.ip + target_field: source.geo +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- set: + field: event.kind + value: event +- append: + field: event.category + value: network +- script: + params: + S0: + conn_str: "Connection attempt seen, no reply." + types: + - connection + - start + S1: + conn_str: "Connection established, not terminated." + types: + - connection + - start + SF: + conn_str: "Normal establishment and termination." + types: + - connection + - start + - end + REG: + conn_str: "Connection attempt rejected." + types: + - connection + - start + - denied + S2: + conn_str: "Connection established and close attempt by originator seen (but no reply from responder)." + types: + - connection + - info + S3: + conn_str: "Connection established and close attempt by responder seen (but no reply from originator)." + types: + - connection + - info + RSTO: + conn_str: "Connection established, originator aborted (sent a RST)." + types: + - connection + - info + RSTR: + conn_str: "Responder sent a RST." + types: + - connection + - info + RSTOS0: + conn_str: "Originator sent a SYN followed by a RST, we never saw a SYN-ACK from the responder." + types: + - connection + - info + RSTRH: + conn_str: "Responder sent a SYN ACK followed by a RST, we never saw a SYN from the (purported) originator." + types: + - connection + - info + SH: + conn_str: "Originator sent a SYN followed by a FIN, we never saw a SYN ACK from the responder (hence the connection was 'half' open)." + types: + - connection + - info + SHR: + conn_str: "Responder sent a SYN ACK followed by a FIN, we never saw a SYN from the originator." + types: + - connection + - info + OTH: + conn_str: "No SYN seen, just midstream traffic (a 'partial connection' that was not later closed)." + types: + - connection + - info + source: >- + if (ctx?.zeek?.connection?.state == null) { + return; + } + if (params.containsKey(ctx.zeek.connection.state)) { + ctx.zeek.connection.state_message = params[ctx.zeek.connection.state]["conn_str"]; + ctx.event.type = params[ctx.zeek.connection.state]["types"]; + } +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/connection/manifest.yml b/x-pack/filebeat/module/zeek/connection/manifest.yml index 0361f0c89fa..0acad34d69c 100644 --- a/x-pack/filebeat/module/zeek/connection/manifest.yml +++ b/x-pack/filebeat/module/zeek/connection/manifest.yml @@ -13,7 +13,7 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/connection.yml requires.processors: diff --git a/x-pack/filebeat/module/zeek/connection/test/connection-json.log-expected.json b/x-pack/filebeat/module/zeek/connection/test/connection-json.log-expected.json index 4e5615a3a51..35a539b1493 100644 --- a/x-pack/filebeat/module/zeek/connection/test/connection-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/connection/test/connection-json.log-expected.json @@ -6,10 +6,20 @@ "destination.ip": "192.168.86.1", "destination.packets": 1, "destination.port": 53, + "event.category": [ + "network", + "network" + ], "event.dataset": "zeek.connection", "event.duration": 76967000, "event.id": "CAcJw21BbVedgFnYH3", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "start", + "end" + ], "fileset.name": "connection", "input.type": "log", "log.offset": 0, @@ -19,6 +29,10 @@ "network.packets": 2, "network.protocol": "dns", "network.transport": "udp", + "related.ip": [ + "192.168.86.167", + "192.168.86.1" + ], "service.type": "zeek", "source.address": "192.168.86.167", "source.bytes": 103, @@ -51,10 +65,20 @@ "destination.ip": "8.8.8.8", "destination.packets": 1, "destination.port": 53, + "event.category": [ + "network", + "network" + ], "event.dataset": "zeek.connection", "event.duration": 76967000, "event.id": "CAcJw21BbVedgFnYH4", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "start", + "end" + ], "fileset.name": "connection", "input.type": "log", "log.offset": 398, @@ -64,6 +88,10 @@ "network.packets": 2, "network.protocol": "dns", "network.transport": "udp", + "related.ip": [ + "192.168.86.167", + "8.8.8.8" + ], "service.type": "zeek", "source.address": "192.168.86.167", "source.bytes": 103, @@ -95,10 +123,20 @@ "destination.ip": "8.8.8.8", "destination.packets": 1, "destination.port": 53, + "event.category": [ + "network", + "network" + ], "event.dataset": "zeek.connection", "event.duration": 76967000, "event.id": "CAcJw21BbVedgFnYH5", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "start", + "end" + ], "fileset.name": "connection", "input.type": "log", "log.offset": 792, @@ -108,6 +146,10 @@ "network.packets": 2, "network.protocol": "dns", "network.transport": "udp", + "related.ip": [ + "4.4.2.2", + "8.8.8.8" + ], "service.type": "zeek", "source.address": "4.4.2.2", "source.as.number": 3356, @@ -137,9 +179,18 @@ "destination.bytes": 0, "destination.ip": "198.51.100.249", "destination.packets": 0, + "event.category": [ + "network", + "network" + ], "event.dataset": "zeek.connection", "event.id": "Cc6NJ3GRlfjE44I3h", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "info" + ], "fileset.name": "connection", "input.type": "log", "log.offset": 1181, @@ -148,6 +199,10 @@ "network.direction": "external", "network.packets": 1, "network.transport": "icmp", + "related.ip": [ + "192.0.2.205", + "198.51.100.249" + ], "service.type": "zeek", "source.address": "192.0.2.205", "source.bytes": 107, @@ -165,4 +220,4 @@ "zeek.connection.state_message": "No SYN seen, just midstream traffic (a 'partial connection' that was not later closed).", "zeek.session_id": "Cc6NJ3GRlfjE44I3h" } -] +] \ No newline at end of file diff --git a/x-pack/filebeat/module/zeek/dce_rpc/config/dce_rpc.yml b/x-pack/filebeat/module/zeek/dce_rpc/config/dce_rpc.yml index e7875bca0df..0ba1b0fc673 100644 --- a/x-pack/filebeat/module/zeek/dce_rpc/config/dce_rpc.yml +++ b/x-pack/filebeat/module/zeek/dce_rpc/config/dce_rpc.yml @@ -36,7 +36,23 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol + - info {{ if .community_id }} - community_id: {{ end }} diff --git a/x-pack/filebeat/module/zeek/dce_rpc/ingest/pipeline.json b/x-pack/filebeat/module/zeek/dce_rpc/ingest/pipeline.json deleted file mode 100644 index 0f274438186..00000000000 --- a/x-pack/filebeat/module/zeek/dce_rpc/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek dce_rpc.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.dce_rpc.ts", - "formats": ["UNIX"] - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "remove": { - "field": "zeek.dce_rpc.ts" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/dce_rpc/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/dce_rpc/ingest/pipeline.yml new file mode 100644 index 00000000000..1ecda252cc8 --- /dev/null +++ b/x-pack/filebeat/module/zeek/dce_rpc/ingest/pipeline.yml @@ -0,0 +1,63 @@ +description: Pipeline for normalizing Zeek dce_rpc.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.dce_rpc.ts + formats: + - UNIX +- remove: + field: zeek.dce_rpc.ts +- append: + field: related.ip + value: '{{source.ip}}' +- geoip: + field: source.ip + target_field: source.geo +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: '{{destination.ip}}' +- geoip: + field: destination.ip + target_field: destination.geo +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- set: + field: event.action + value: '{{zeek.dce_rpc.operation}}' + if: "ctx?.zeek?.dce_rpc?.operation != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/dce_rpc/manifest.yml b/x-pack/filebeat/module/zeek/dce_rpc/manifest.yml index 853c7084f7e..21ba27eac96 100644 --- a/x-pack/filebeat/module/zeek/dce_rpc/manifest.yml +++ b/x-pack/filebeat/module/zeek/dce_rpc/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/dce_rpc.yml diff --git a/x-pack/filebeat/module/zeek/dce_rpc/test/dce_rpc-json.log-expected.json b/x-pack/filebeat/module/zeek/dce_rpc/test/dce_rpc-json.log-expected.json index 881f30d1b79..6128801caa7 100644 --- a/x-pack/filebeat/module/zeek/dce_rpc/test/dce_rpc-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/dce_rpc/test/dce_rpc-json.log-expected.json @@ -4,14 +4,29 @@ "destination.address": "172.16.128.202", "destination.ip": "172.16.128.202", "destination.port": 445, + "event.action": "BrowserrQueryOtherDomains", + "event.category": [ + "network" + ], "event.dataset": "zeek.dce_rpc", "event.id": "CsNHVHa1lzFtvJzT8", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "protocol", + "info" + ], "fileset.name": "dce_rpc", "input.type": "log", "log.offset": 0, + "network.community_id": "1:SJNAD5vtzZuhQjGtfaI8svTnyuw=", "network.protocol": "dce_rpc", "network.transport": "tcp", + "related.ip": [ + "172.16.133.6", + "172.16.128.202" + ], "service.type": "zeek", "source.address": "172.16.133.6", "source.ip": "172.16.133.6", diff --git a/x-pack/filebeat/module/zeek/dhcp/config/dhcp.yml b/x-pack/filebeat/module/zeek/dhcp/config/dhcp.yml index 5878c8d7894..97c45a17920 100644 --- a/x-pack/filebeat/module/zeek/dhcp/config/dhcp.yml +++ b/x-pack/filebeat/module/zeek/dhcp/config/dhcp.yml @@ -94,9 +94,27 @@ processors: fields: port: 67 + - convert: + fields: + - {from: "zeek.dhcp.address.client", to: "source.address"} + - {from: "zeek.dhcp.address.client", to: "source.ip", type: "ip"} + - {from: "zeek.dhcp.address.client", to: "client.address"} + - {from: "zeek.dhcp.address.server", to: "destination.address"} + - {from: "zeek.dhcp.address.server", to: "destination.ip", type: "ip"} + - {from: "zeek.dhcp.address.server", to: "server.address"} + - {from: "zeek.dhcp.domain", to: "network.name"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol + - info {{ if .community_id }} - community_id: - fields: - source.address: zeek.dhcp.address.client - destination.address: zeek.dhcp.address.server {{ end }} diff --git a/x-pack/filebeat/module/zeek/dhcp/ingest/pipeline.json b/x-pack/filebeat/module/zeek/dhcp/ingest/pipeline.json deleted file mode 100644 index 92c1a43dd4a..00000000000 --- a/x-pack/filebeat/module/zeek/dhcp/ingest/pipeline.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek dhcp.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.dhcp.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.dhcp.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.address", - "value": "{{zeek.dhcp.address.client}}", - "if": "ctx.zeek.dhcp.address?.client != null" - } - }, - { - "set": { - "field": "client.address", - "value": "{{zeek.dhcp.address.client}}", - "if": "ctx.zeek.dhcp.address?.client != null" - } - }, - { - "set": { - "field": "destination.address", - "value": "{{zeek.dhcp.address.server}}", - "if": "ctx.zeek.dhcp.address?.server != null" - } - }, - { - "set": { - "field": "server.address", - "value": "{{zeek.dhcp.address.server}}", - "if": "ctx.zeek.dhcp.address?.server != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}", - "if": "ctx.source?.address != null" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}", - "if": "ctx.destination?.address != null" - } - }, - { - "set": { - "field": "network.name", - "value": "{{zeek.dhcp.domain}}", - "if": "ctx.zeek.dhcp.domain != null" - } - } - ], - "on_failure": [{ - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/dhcp/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/dhcp/ingest/pipeline.yml new file mode 100644 index 00000000000..49df687ecc3 --- /dev/null +++ b/x-pack/filebeat/module/zeek/dhcp/ingest/pipeline.yml @@ -0,0 +1,27 @@ +description: Pipeline for normalizing Zeek dhcp.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.dhcp.ts + formats: + - UNIX +- remove: + field: zeek.dhcp.ts +- set: + field: event.id + value: '{{zeek.session_id}}' + if: ctx.zeek.session_id != null +- append: + field: related.ip + value: '{{source.ip}}' + if: 'ctx?.source?.ip != null' +- append: + field: related.ip + value: '{{destination.ip}}' + if: 'ctx?.destination?.ip != null' +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/dhcp/manifest.yml b/x-pack/filebeat/module/zeek/dhcp/manifest.yml index a09038725e3..7cb434b1955 100644 --- a/x-pack/filebeat/module/zeek/dhcp/manifest.yml +++ b/x-pack/filebeat/module/zeek/dhcp/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/dhcp.yml diff --git a/x-pack/filebeat/module/zeek/dhcp/test/dhcp-json.log-expected.json b/x-pack/filebeat/module/zeek/dhcp/test/dhcp-json.log-expected.json index 63fd7367dd8..ec36a36c503 100644 --- a/x-pack/filebeat/module/zeek/dhcp/test/dhcp-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/dhcp/test/dhcp-json.log-expected.json @@ -5,15 +5,29 @@ "destination.address": "192.168.199.254", "destination.ip": "192.168.199.254", "destination.port": 67, + "event.category": [ + "network" + ], "event.dataset": "zeek.dhcp", "event.id": "{0=CmWOt6VWaNGqXYcH6, 1=CLObLo4YHn0u23Tp8a}", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "protocol", + "info" + ], "fileset.name": "dhcp", "input.type": "log", "log.offset": 0, + "network.community_id": "1:HsGjbon+HsK9xnMq+1A32BR9C4Y=", "network.name": "localdomain", "network.protocol": "dhcp", "network.transport": "udp", + "related.ip": [ + "192.168.199.132", + "192.168.199.254" + ], "server.address": "192.168.199.254", "service.type": "zeek", "source.address": "192.168.199.132", diff --git a/x-pack/filebeat/module/zeek/dnp3/config/dnp3.yml b/x-pack/filebeat/module/zeek/dnp3/config/dnp3.yml index 4dec34b4b59..d059b4c79f9 100644 --- a/x-pack/filebeat/module/zeek/dnp3/config/dnp3.yml +++ b/x-pack/filebeat/module/zeek/dnp3/config/dnp3.yml @@ -46,9 +46,23 @@ processors: ignore_missing: true fail_on_error: false + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol + - info {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/dnp3/ingest/pipeline.json b/x-pack/filebeat/module/zeek/dnp3/ingest/pipeline.json deleted file mode 100644 index 3f7e3c4baee..00000000000 --- a/x-pack/filebeat/module/zeek/dnp3/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek dnp3.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.dnp3.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.dnp3.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/dnp3/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/dnp3/ingest/pipeline.yml new file mode 100644 index 00000000000..ad4670dc350 --- /dev/null +++ b/x-pack/filebeat/module/zeek/dnp3/ingest/pipeline.yml @@ -0,0 +1,64 @@ +description: Pipeline for normalizing Zeek dnp3.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.dnp3.ts + formats: + - UNIX +- remove: + field: zeek.dnp3.ts +- set: + field: event.action + value: '{{zeek.dnp3.function.request}}' + if: "ctx?.zeek?.dnp3?.function?.request != null" +- set: + field: event.action + value: '{{zeek.dnp3.function.reply}}' + if: "ctx?.zeek?.dnp3?.function?.reply != null" +- lowercase: + field: event.action + ignore_missing: true +- geoip: + field: destination.ip + target_field: destination.geo +- geoip: + field: source.ip + target_field: source.geo +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/dnp3/manifest.yml b/x-pack/filebeat/module/zeek/dnp3/manifest.yml index 73488debb12..98de1c3af82 100644 --- a/x-pack/filebeat/module/zeek/dnp3/manifest.yml +++ b/x-pack/filebeat/module/zeek/dnp3/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/dnp3.yml diff --git a/x-pack/filebeat/module/zeek/dnp3/test/dnp3-json.log-expected.json b/x-pack/filebeat/module/zeek/dnp3/test/dnp3-json.log-expected.json index 040dabff377..fa386feb1ce 100644 --- a/x-pack/filebeat/module/zeek/dnp3/test/dnp3-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/dnp3/test/dnp3-json.log-expected.json @@ -4,9 +4,19 @@ "destination.address": "127.0.0.1", "destination.ip": "127.0.0.1", "destination.port": 20000, + "event.action": "read", + "event.category": [ + "network" + ], "event.dataset": "zeek.dnp3", "event.id": "CQV6tj1w1t4WzQpHoe", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "protocol", + "info" + ], "fileset.name": "dnp3", "input.type": "log", "log.offset": 0, diff --git a/x-pack/filebeat/module/zeek/dns/config/dns.yml b/x-pack/filebeat/module/zeek/dns/config/dns.yml index 96e67d9f840..7b4c332f5df 100644 --- a/x-pack/filebeat/module/zeek/dns/config/dns.yml +++ b/x-pack/filebeat/module/zeek/dns/config/dns.yml @@ -13,6 +13,11 @@ processors: - decode_json_fields: fields: [event.original] target: zeek.dns + - registered_domain: + ignore_missing: true + ignore_failure: true + field: zeek.dns.query + target_field: dns.question.registered_domain - script: lang: javascript id: zeek_dns_flags @@ -105,12 +110,54 @@ processors: evt.Put("event.duration", rttSec * 1000000000); } + function addTopLevelDomain(evt) { + var rd = evt.Get("dns.question.registered_domain"); + if (!rd) { + return; + } + var firstPeriod = rd.indexOf("."); + if (firstPeriod == -1) { + return; + } + evt.Put("dns.question.top_level_domain", rd.substr(firstPeriod + 1)); + } + + function addEventOutcome(evt) { + var rcode = evt.Get("zeek.dns.rcode"); + if (rcode == null) { + return; + } + if (rcode == 0) { + evt.Put("event.outcome", "success"); + } else { + evt.Put("event.outcome", "failure"); + } + } + + function addRelatedIP(evt) { + var related = []; + var src = evt.Get("zeek.dns.id.orig_h"); + if (src != null) { + related.push(src); + } + var dst = evt.Get("zeek.dns.id.resp_h"); + if (dst != null) { + related.push(dst); + } + if (related.length > 0) { + evt.Put("related.ip", related); + } + } + function process(evt) { addDnsHeaderFlags(evt); addDnsQuestionClass(evt); addDnsAnswers(evt); setDnsType(evt); addEventDuration(evt); + addTopLevelDomain(evt); + addEventOutcome(evt); + addRelatedIP(evt); } - convert: ignore_missing: true @@ -136,13 +183,18 @@ processors: - {from: zeek.dns.query, to: dns.question.name} - {from: zeek.dns.qtype_name, to: dns.question.type} - {from: zeek.dns.rcode_name, to: dns.response_code} - - registered_domain: - ignore_missing: true - ignore_failure: true - field: dns.question.name - target_field: dns.question.registered_domain + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - info + - protocol {{ if .community_id }} - - community_id: ~ + - community_id: {{ end }} - timestamp: ignore_missing: true diff --git a/x-pack/filebeat/module/zeek/dns/test/dns-json.log-expected.json b/x-pack/filebeat/module/zeek/dns/test/dns-json.log-expected.json index a8e2cd94b3a..0c01c52e428 100644 --- a/x-pack/filebeat/module/zeek/dns/test/dns-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/dns/test/dns-json.log-expected.json @@ -26,22 +26,37 @@ "dns.question.class": "IN", "dns.question.name": "dd625ffb4fc54735b281862aa1cd6cd4.us-west1.gcp.cloud.es.io", "dns.question.registered_domain": "es.io", + "dns.question.top_level_domain": "io", "dns.question.type": "A", "dns.resolved_ip": [ "35.199.178.4" ], "dns.response_code": "NOERROR", "dns.type": "answer", + "event.category": [ + "network" + ], "event.dataset": "zeek.dns", "event.duration": 76967000, "event.id": "CAcJw21BbVedgFnYH3", + "event.kind": "event", "event.module": "zeek", "event.original": "{\"ts\":1547188415.857497,\"uid\":\"CAcJw21BbVedgFnYH3\",\"id.orig_h\":\"192.168.86.167\",\"id.orig_p\":38339,\"id.resp_h\":\"192.168.86.1\",\"id.resp_p\":53,\"proto\":\"udp\",\"trans_id\":15209,\"rtt\":0.076967,\"query\":\"dd625ffb4fc54735b281862aa1cd6cd4.us-west1.gcp.cloud.es.io\",\"qclass\":1,\"qclass_name\":\"C_INTERNET\",\"qtype\":1,\"qtype_name\":\"A\",\"rcode\":0,\"rcode_name\":\"NOERROR\",\"AA\":false,\"TC\":false,\"RD\":true,\"RA\":true,\"Z\":0,\"answers\":[\"proxy-production-us-west1.gcp.cloud.es.io\",\"proxy-production-us-west1-v1-009.gcp.cloud.es.io\",\"35.199.178.4\"],\"TTLs\":[119.0,119.0,59.0],\"rejected\":false}", + "event.outcome": "success", + "event.type": [ + "connection", + "info", + "protocol" + ], "fileset.name": "dns", "input.type": "log", "log.offset": 0, "network.community_id": "1:Z26DBGVYoBKQ1FT6qfPaAqBnJik=", "network.transport": "udp", + "related.ip": [ + "192.168.86.167", + "192.168.86.1" + ], "service.type": "zeek", "source.address": "192.168.86.167", "source.ip": "192.168.86.167", @@ -84,17 +99,31 @@ "dns.question.class": "IN", "dns.question.name": "_googlecast._tcp.local", "dns.question.registered_domain": "_tcp.local", + "dns.question.top_level_domain": "local", "dns.question.type": "PTR", "dns.type": "query", + "event.category": [ + "network" + ], "event.dataset": "zeek.dns", "event.id": "C19a1k4lTv46YMbeOk", + "event.kind": "event", "event.module": "zeek", "event.original": "{\"ts\":1567095830.680046,\"uid\":\"C19a1k4lTv46YMbeOk\",\"id.orig_h\":\"fe80::4ef:15cf:769f:ff21\",\"id.orig_p\":5353,\"id.resp_h\":\"ff02::fb\",\"id.resp_p\":5353,\"proto\":\"udp\",\"trans_id\":0,\"query\":\"_googlecast._tcp.local\",\"qclass\":1,\"qclass_name\":\"C_INTERNET\",\"qtype\":12,\"qtype_name\":\"PTR\",\"AA\":false,\"TC\":false,\"RD\":false,\"RA\":false,\"Z\":0,\"rejected\":false}", + "event.type": [ + "connection", + "info", + "protocol" + ], "fileset.name": "dns", "input.type": "log", "log.offset": 566, "network.community_id": "1:Jq0sRtlGSMjsvMBE1ZYybbR2tI0=", "network.transport": "udp", + "related.ip": [ + "fe80::4ef:15cf:769f:ff21", + "ff02::fb" + ], "service.type": "zeek", "source.address": "fe80::4ef:15cf:769f:ff21", "source.ip": "fe80::4ef:15cf:769f:ff21", @@ -130,17 +159,32 @@ "dns.id": 0, "dns.question.name": "_googlecast._tcp.local", "dns.question.registered_domain": "_tcp.local", + "dns.question.top_level_domain": "local", "dns.response_code": "NOERROR", "dns.type": "answer", + "event.category": [ + "network" + ], "event.dataset": "zeek.dns", "event.id": "CdiVAw7jJw6gsX5H", + "event.kind": "event", "event.module": "zeek", "event.original": "{\"ts\":1567095830.734329,\"uid\":\"CdiVAw7jJw6gsX5H\",\"id.orig_h\":\"192.168.86.237\",\"id.orig_p\":5353,\"id.resp_h\":\"224.0.0.251\",\"id.resp_p\":5353,\"proto\":\"udp\",\"trans_id\":0,\"query\":\"_googlecast._tcp.local\",\"rcode\":0,\"rcode_name\":\"NOERROR\",\"AA\":true,\"TC\":false,\"RD\":false,\"RA\":false,\"Z\":0,\"answers\":[\"bravia-4k-gb-5c89f865c9d569ab338815b35e3acc56._googlecast._tcp.local\"],\"TTLs\":[120.0],\"rejected\":false}", + "event.outcome": "success", + "event.type": [ + "connection", + "info", + "protocol" + ], "fileset.name": "dns", "input.type": "log", "log.offset": 909, "network.community_id": "1:QIR5YXlirWwWA18ZyY/RnvQoaic=", "network.transport": "udp", + "related.ip": [ + "192.168.86.237", + "224.0.0.251" + ], "service.type": "zeek", "source.address": "192.168.86.237", "source.ip": "192.168.86.237", diff --git a/x-pack/filebeat/module/zeek/dpd/config/dpd.yml b/x-pack/filebeat/module/zeek/dpd/config/dpd.yml index 9e6a0138ef2..0a31b70f6bd 100644 --- a/x-pack/filebeat/module/zeek/dpd/config/dpd.yml +++ b/x-pack/filebeat/module/zeek/dpd/config/dpd.yml @@ -36,10 +36,22 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.session_id", to: "event.id"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - info {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/dpd/ingest/pipeline.json b/x-pack/filebeat/module/zeek/dpd/ingest/pipeline.json deleted file mode 100644 index 7a8958013fc..00000000000 --- a/x-pack/filebeat/module/zeek/dpd/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek dpd.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.dpd.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.dpd.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/dpd/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/dpd/ingest/pipeline.yml new file mode 100644 index 00000000000..f30ff172fa8 --- /dev/null +++ b/x-pack/filebeat/module/zeek/dpd/ingest/pipeline.yml @@ -0,0 +1,63 @@ +description: Pipeline for normalizing Zeek dpd.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.dpd.ts + formats: + - UNIX +- remove: + field: zeek.dpd.ts +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/dpd/manifest.yml b/x-pack/filebeat/module/zeek/dpd/manifest.yml index b331bca2921..aeba0ef31fc 100644 --- a/x-pack/filebeat/module/zeek/dpd/manifest.yml +++ b/x-pack/filebeat/module/zeek/dpd/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/dpd.yml diff --git a/x-pack/filebeat/module/zeek/dpd/test/dpd-json.log-expected.json b/x-pack/filebeat/module/zeek/dpd/test/dpd-json.log-expected.json index d3f58dbd4e0..0d6173e172e 100644 --- a/x-pack/filebeat/module/zeek/dpd/test/dpd-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/dpd/test/dpd-json.log-expected.json @@ -4,14 +4,26 @@ "destination.address": "192.168.10.10", "destination.ip": "192.168.10.10", "destination.port": 445, + "event.category": [ + "network" + ], "event.dataset": "zeek.dpd", "event.id": "CRrT7S1ccw9H6hzCR", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "info" + ], "fileset.name": "dpd", "input.type": "log", "log.offset": 0, "network.community_id": "1:b+Szw+ia464igf5e+MwW1WUzw9Y=", "network.transport": "tcp", + "related.ip": [ + "192.168.10.31", + "192.168.10.10" + ], "service.type": "zeek", "source.address": "192.168.10.31", "source.ip": "192.168.10.31", diff --git a/x-pack/filebeat/module/zeek/files/config/files.yml b/x-pack/filebeat/module/zeek/files/config/files.yml index 7148b82a481..74259307f41 100644 --- a/x-pack/filebeat/module/zeek/files/config/files.yml +++ b/x-pack/filebeat/module/zeek/files/config/files.yml @@ -15,9 +15,25 @@ processors: fields: - from: "json" to: "zeek.files" - - from: "zeek.files.conn_uids" to: "zeek.files.session_ids" - ignore_missing: true fail_on_error: false + - convert: + fields: + - {from: "zeek.files.mime_type", to: "file.mime_type"} + - {from: "zeek.files.filename", to: "file.name"} + - {from: "zeek.files.total_bytes", to: "file.size"} + - {from: "zeek.files.md5", to: "file.hash.md5"} + - {from: "zeek.files.sha1", to: "file.hash.sha1"} + - {from: "zeek.files.sha256", to: "file.hash.sha256"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - file + type: + - info diff --git a/x-pack/filebeat/module/zeek/files/ingest/pipeline.json b/x-pack/filebeat/module/zeek/files/ingest/pipeline.json deleted file mode 100644 index 1c47b4d0b42..00000000000 --- a/x-pack/filebeat/module/zeek/files/ingest/pipeline.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek files.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.files.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.files.ts" - } - }, - { - "script": { - "lang": "painless", - "source": "ctx.zeek.session_id = ctx.zeek.files.session_ids[0];", - "if": "ctx.zeek.files.session_ids != null", - "ignore_failure": true - } - }, - { - "script": { - "lang": "painless", - "source": "ctx.zeek.files.rx_host = ctx.zeek.files.rx_hosts[0]; ctx.zeek.files.remove('rx_hosts');", - "ignore_failure": true - } - }, - { - "script": { - "lang": "painless", - "source": "ctx.zeek.files.tx_host = ctx.zeek.files.tx_hosts[0]; ctx.zeek.files.remove('tx_hosts');", - "ignore_failure": true - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - } - ], - "on_failure": [{ - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/files/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/files/ingest/pipeline.yml new file mode 100644 index 00000000000..0d5abf9bdda --- /dev/null +++ b/x-pack/filebeat/module/zeek/files/ingest/pipeline.yml @@ -0,0 +1,66 @@ +description: Pipeline for normalizing Zeek files.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.files.ts + formats: + - UNIX +- remove: + field: zeek.files.ts +- script: + lang: painless + source: ctx.zeek.session_id = ctx.zeek.files.session_ids[0]; + if: ctx.zeek.files.session_ids != null + ignore_failure: true +- set: + field: event.id + value: '{{zeek.session_id}}' + if: ctx.zeek.session_id != null +- foreach: + field: zeek.files.tx_hosts + processor: + append: + field: related.ip + value: "{{_ingest._value}}" + ignore_missing: true +- script: + lang: painless + source: ctx.zeek.files.tx_host = ctx.zeek.files.tx_hosts[0]; ctx.zeek.files.remove('tx_hosts'); + ignore_failure: true +- set: + field: server.ip + value: "{{zeek.files.tx_host}}" + if: "ctx?.zeek?.files?.tx_host != null" +- foreach: + field: zeek.files.rx_hosts + processor: + append: + field: related.ip + value: "{{_ingest._value}}" + ignore_missing: true +- script: + lang: painless + source: ctx.zeek.files.rx_host = ctx.zeek.files.rx_hosts[0]; ctx.zeek.files.remove('rx_hosts'); + ignore_failure: true +- set: + field: client.ip + value: "{{zeek.files.rx_host}}" + if: "ctx?.zeek?.files?.rx_host != null" +- append: + field: related.hash + value: "{{file.hash.md5}}" + if: "ctx?.file?.hash?.md5 != null" +- append: + field: related.hash + value: "{{file.hash.sha1}}" + if: "ctx?.file?.hash?.sha1 != null" +- append: + field: related.hash + value: "{{file.hash.sha256}}" + if: "ctx?.file?.hash?.sha256 != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/files/manifest.yml b/x-pack/filebeat/module/zeek/files/manifest.yml index 68b53467346..bef3d7211b6 100644 --- a/x-pack/filebeat/module/zeek/files/manifest.yml +++ b/x-pack/filebeat/module/zeek/files/manifest.yml @@ -13,7 +13,7 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/files.yml requires.processors: diff --git a/x-pack/filebeat/module/zeek/files/test/files-json.log-expected.json b/x-pack/filebeat/module/zeek/files/test/files-json.log-expected.json index 4cc0e2d38e0..6fc38a5d22a 100644 --- a/x-pack/filebeat/module/zeek/files/test/files-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/files/test/files-json.log-expected.json @@ -1,12 +1,32 @@ [ { "@timestamp": "2019-01-17T01:33:16.636Z", + "client.ip": "10.178.98.102", + "event.category": [ + "file" + ], "event.dataset": "zeek.files", "event.id": "C8I0zn3r9EPbfLgta6", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "info" + ], + "file.hash.md5": "79e4a9840d7d3a96d7c04fe2434c892e", + "file.hash.sha1": "a8985d3a65e5e5c4b2d7d66d40c6dd2fb19c5436", + "file.mime_type": "application/pkix-cert", "fileset.name": "files", "input.type": "log", "log.offset": 0, + "related.hash": [ + "79e4a9840d7d3a96d7c04fe2434c892e", + "a8985d3a65e5e5c4b2d7d66d40c6dd2fb19c5436" + ], + "related.ip": [ + "35.199.178.4", + "10.178.98.102" + ], + "server.ip": "35.199.178.4", "service.type": "zeek", "tags": [ "zeek.files" @@ -38,12 +58,32 @@ }, { "@timestamp": "2019-01-17T01:33:21.566Z", + "client.ip": "10.178.98.102", + "event.category": [ + "file" + ], "event.dataset": "zeek.files", "event.id": "C6sjVo23iNApLnlAt6", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "info" + ], + "file.hash.md5": "b9742f12eb97eff531d94f7800c6706c", + "file.hash.sha1": "b88d13fe319d342e7a808ce3a0a1158111fc3c2a", + "file.mime_type": "application/pkix-cert", "fileset.name": "files", "input.type": "log", "log.offset": 452, + "related.hash": [ + "b9742f12eb97eff531d94f7800c6706c", + "b88d13fe319d342e7a808ce3a0a1158111fc3c2a" + ], + "related.ip": [ + "17.134.127.250", + "10.178.98.102" + ], + "server.ip": "17.134.127.250", "service.type": "zeek", "tags": [ "zeek.files" diff --git a/x-pack/filebeat/module/zeek/ftp/config/ftp.yml b/x-pack/filebeat/module/zeek/ftp/config/ftp.yml index 7c9e90cb96a..3e91ace4831 100644 --- a/x-pack/filebeat/module/zeek/ftp/config/ftp.yml +++ b/x-pack/filebeat/module/zeek/ftp/config/ftp.yml @@ -60,10 +60,27 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.ftp.user", to: "user.name"} + - {from: "zeek.ftp.command", to: "event.action"} + - {from: "zeek.ftp.mime.type", to: "file.mime_type"} + - {from: "zeek.ftp.file.size", to: "file.size"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - info + - protocol {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/ftp/ingest/pipeline.json b/x-pack/filebeat/module/zeek/ftp/ingest/pipeline.json deleted file mode 100644 index 06b896b53d3..00000000000 --- a/x-pack/filebeat/module/zeek/ftp/ingest/pipeline.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek ftp.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.ftp.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.ftp.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - }, - { - "dot_expander": { - "field": "data_channel.passive", - "path": "zeek.ftp" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/ftp/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/ftp/ingest/pipeline.yml new file mode 100644 index 00000000000..7c15dce3ac5 --- /dev/null +++ b/x-pack/filebeat/module/zeek/ftp/ingest/pipeline.yml @@ -0,0 +1,68 @@ +description: Pipeline for normalizing Zeek ftp.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.ftp.ts + formats: + - UNIX +- remove: + field: zeek.ftp.ts +- dot_expander: + field: data_channel.passive + path: zeek.ftp +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +- geoip: + field: destination.ip + target_field: destination.geo +- geoip: + field: source.ip + target_field: source.geo +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/ftp/manifest.yml b/x-pack/filebeat/module/zeek/ftp/manifest.yml index 3dd47573af9..cf51575cf84 100644 --- a/x-pack/filebeat/module/zeek/ftp/manifest.yml +++ b/x-pack/filebeat/module/zeek/ftp/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/ftp.yml diff --git a/x-pack/filebeat/module/zeek/ftp/test/ftp.log-expected.json b/x-pack/filebeat/module/zeek/ftp/test/ftp.log-expected.json index 7de6cc8897c..e6a47bd369e 100644 --- a/x-pack/filebeat/module/zeek/ftp/test/ftp.log-expected.json +++ b/x-pack/filebeat/module/zeek/ftp/test/ftp.log-expected.json @@ -4,15 +4,32 @@ "destination.address": "192.168.1.231", "destination.ip": "192.168.1.231", "destination.port": 21, + "event.action": "EPSV", + "event.category": [ + "network" + ], "event.dataset": "zeek.ftp", "event.id": "CpQoCn3o28tke89zv9", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "info", + "protocol" + ], "fileset.name": "ftp", "input.type": "log", "log.offset": 0, "network.community_id": "1:Szmpl33Czo3dQvU2V4/SrHfmBC0=", "network.protocol": "ftp", "network.transport": "tcp", + "related.ip": [ + "192.168.1.182", + "192.168.1.231" + ], + "related.user": [ + "ftp" + ], "service.type": "zeek", "source.address": "192.168.1.182", "source.ip": "192.168.1.182", @@ -20,6 +37,7 @@ "tags": [ "zeek.ftp" ], + "user.name": "ftp", "zeek.ftp.command": "EPSV", "zeek.ftp.data_channel.originating_host": "192.168.1.182", "zeek.ftp.data_channel.passive": true, @@ -36,15 +54,33 @@ "destination.address": "192.168.1.231", "destination.ip": "192.168.1.231", "destination.port": 21, + "event.action": "RETR", + "event.category": [ + "network" + ], "event.dataset": "zeek.ftp", "event.id": "CpQoCn3o28tke89zv9", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "info", + "protocol" + ], + "file.size": 39424, "fileset.name": "ftp", "input.type": "log", "log.offset": 394, "network.community_id": "1:Szmpl33Czo3dQvU2V4/SrHfmBC0=", "network.protocol": "ftp", "network.transport": "tcp", + "related.ip": [ + "192.168.1.182", + "192.168.1.231" + ], + "related.user": [ + "ftp" + ], "service.type": "zeek", "source.address": "192.168.1.182", "source.ip": "192.168.1.182", @@ -52,6 +88,7 @@ "tags": [ "zeek.ftp" ], + "user.name": "ftp", "zeek.ftp.arg": "ftp://192.168.1.231/resume.doc", "zeek.ftp.command": "RETR", "zeek.ftp.file.size": 39424, @@ -66,15 +103,32 @@ "destination.address": "192.168.1.231", "destination.ip": "192.168.1.231", "destination.port": 21, + "event.action": "STOR", + "event.category": [ + "network" + ], "event.dataset": "zeek.ftp", "event.id": "CpQoCn3o28tke89zv9", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "info", + "protocol" + ], "fileset.name": "ftp", "input.type": "log", "log.offset": 688, "network.community_id": "1:Szmpl33Czo3dQvU2V4/SrHfmBC0=", "network.protocol": "ftp", "network.transport": "tcp", + "related.ip": [ + "192.168.1.182", + "192.168.1.231" + ], + "related.user": [ + "ftp" + ], "service.type": "zeek", "source.address": "192.168.1.182", "source.ip": "192.168.1.182", @@ -82,6 +136,7 @@ "tags": [ "zeek.ftp" ], + "user.name": "ftp", "zeek.ftp.arg": "ftp://192.168.1.231/uploads/README", "zeek.ftp.command": "STOR", "zeek.ftp.password": "ftp", diff --git a/x-pack/filebeat/module/zeek/http/config/http.yml b/x-pack/filebeat/module/zeek/http/config/http.yml index 2c024397018..584160639cb 100644 --- a/x-pack/filebeat/module/zeek/http/config/http.yml +++ b/x-pack/filebeat/module/zeek/http/config/http.yml @@ -68,9 +68,26 @@ processors: ignore_missing: true fail_on_error: false + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "destination.port", to: "url.port"} + - {from: "http.request.method", to: "event.action"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + - web + type: + - connection + - info + - protocol {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/http/ingest/pipeline.json b/x-pack/filebeat/module/zeek/http/ingest/pipeline.json deleted file mode 100644 index af771f8c745..00000000000 --- a/x-pack/filebeat/module/zeek/http/ingest/pipeline.json +++ /dev/null @@ -1,123 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek http.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.http.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.http.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - }, - { - "set": { - "field": "url.port", - "value": "{{destination.port}}" - } - }, - { - "geoip": { - "field": "destination.ip", - "target_field": "destination.geo" - } - }, - { - "geoip": { - "field": "source.ip", - "target_field": "source.geo" - } - }, - { - "geoip": { - "database_file": "GeoLite2-ASN.mmdb", - "field": "source.ip", - "target_field": "source.as", - "properties": [ - "asn", - "organization_name" - ], - "ignore_missing": true - } - }, - { - "geoip": { - "database_file": "GeoLite2-ASN.mmdb", - "field": "destination.ip", - "target_field": "destination.as", - "properties": [ - "asn", - "organization_name" - ], - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.asn", - "target_field": "source.as.number", - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.organization_name", - "target_field": "source.as.organization.name", - "ignore_missing": true - } - }, - { - "rename": { - "field": "destination.as.asn", - "target_field": "destination.as.number", - "ignore_missing": true - } - }, - { - "rename": { - "field": "destination.as.organization_name", - "target_field": "destination.as.organization.name", - "ignore_missing": true - } - }, - { - "user_agent": { - "field": "user_agent.original", - "ignore_missing": true - } - } - ], - "on_failure": [{ - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/http/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/http/ingest/pipeline.yml new file mode 100644 index 00000000000..62ffef0db45 --- /dev/null +++ b/x-pack/filebeat/module/zeek/http/ingest/pipeline.yml @@ -0,0 +1,82 @@ +description: Pipeline for normalizing Zeek http.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.http.ts + formats: + - UNIX +- remove: + field: zeek.http.ts +- geoip: + field: destination.ip + target_field: destination.geo +- geoip: + field: source.ip + target_field: source.geo +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- user_agent: + field: user_agent.original + ignore_missing: true +- lowercase: + field: "http.request.method" + ignore_missing: true +- lowercase: + field: "event.action" + ignore_missing: true +- set: + field: event.outcome + value: success + if: "ctx?.http?.response?.status_code != null && ctx.http.response.status_code < 400" +- set: + field: event.outcome + value: failure + if: "ctx?.http?.response?.status_code != null && ctx.http.response.status_code >= 400" +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: related.user + value: "{{url.username}}" + if: "ctx?.url?.username != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/http/manifest.yml b/x-pack/filebeat/module/zeek/http/manifest.yml index a9ceabbaaa1..ddd253bb218 100644 --- a/x-pack/filebeat/module/zeek/http/manifest.yml +++ b/x-pack/filebeat/module/zeek/http/manifest.yml @@ -13,7 +13,7 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/http.yml requires.processors: diff --git a/x-pack/filebeat/module/zeek/http/test/http-json.log-expected.json b/x-pack/filebeat/module/zeek/http/test/http-json.log-expected.json index 20d3fedb1c7..ee72065d771 100644 --- a/x-pack/filebeat/module/zeek/http/test/http-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/http/test/http-json.log-expected.json @@ -13,12 +13,24 @@ "destination.geo.region_name": "California", "destination.ip": "17.253.5.203", "destination.port": 80, + "event.action": "get", + "event.category": [ + "network", + "web" + ], "event.dataset": "zeek.http", "event.id": "CCNp8v1SNzY7v9d1Ih", + "event.kind": "event", "event.module": "zeek", + "event.outcome": "success", + "event.type": [ + "connection", + "info", + "protocol" + ], "fileset.name": "http", "http.request.body.bytes": 0, - "http.request.method": "GET", + "http.request.method": "get", "http.response.body.bytes": 3735, "http.response.status_code": 200, "http.version": "1.1", @@ -26,6 +38,10 @@ "log.offset": 0, "network.community_id": "1:dtBPRfpKEZyg1iOHss95buwv+cw=", "network.transport": "tcp", + "related.ip": [ + "10.178.98.102", + "17.253.5.203" + ], "service.type": "zeek", "source.address": "10.178.98.102", "source.ip": "10.178.98.102", @@ -35,7 +51,7 @@ ], "url.domain": "ocsp.apple.com", "url.original": "/ocsp04-aaica02/ME4wTKADAgEAMEUwQzBBMAkGBSsOAwIaBQAEFNqvF+Za6oA4ceFRLsAWwEInjUhJBBQx6napI3Sl39T97qDBpp7GEQ4R7AIIUP1IOZZ86ns=", - "url.port": "80", + "url.port": 80, "user_agent.device.name": "Other", "user_agent.name": "Other", "user_agent.original": "com.apple.trustd/2.0", diff --git a/x-pack/filebeat/module/zeek/intel/config/intel.yml b/x-pack/filebeat/module/zeek/intel/config/intel.yml index 38fe388bec0..2896ed72db9 100644 --- a/x-pack/filebeat/module/zeek/intel/config/intel.yml +++ b/x-pack/filebeat/module/zeek/intel/config/intel.yml @@ -61,3 +61,12 @@ processors: - zeek.intel.id.orig_p - zeek.intel.id.resp_h - zeek.intel.id.resp_p + - add_fields: + target: event + fields: + kind: alert + type: + - info +{{ if .community_id }} + - community_id: +{{ end }} diff --git a/x-pack/filebeat/module/zeek/intel/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/intel/ingest/pipeline.yml index 512cf67ff93..6a2bd6382ad 100644 --- a/x-pack/filebeat/module/zeek/intel/ingest/pipeline.yml +++ b/x-pack/filebeat/module/zeek/intel/ingest/pipeline.yml @@ -66,6 +66,15 @@ processors: field: destination.as.organization_name target_field: destination.as.organization.name ignore_missing: true + - append: + field: "related.ip" + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" + - append: + field: "related.ip" + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" + on_failure: - set: field: error.message diff --git a/x-pack/filebeat/module/zeek/intel/test/intel-json.log-expected.json b/x-pack/filebeat/module/zeek/intel/test/intel-json.log-expected.json index 1b2ac5464bf..d9de4e04efd 100644 --- a/x-pack/filebeat/module/zeek/intel/test/intel-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/intel/test/intel-json.log-expected.json @@ -12,11 +12,19 @@ "destination.ip": "198.41.0.4", "destination.port": 53, "event.dataset": "zeek.intel", + "event.kind": "alert", "event.module": "zeek", "event.original": "{\"ts\":1573030980.989353,\"uid\":\"Ctefoj1tgOPt4D0EK2\",\"id.orig_h\":\"192.168.1.1\",\"id.orig_p\":37598,\"id.resp_h\":\"198.41.0.4\",\"id.resp_p\":53,\"seen.indicator\":\"198.41.0.4\",\"seen.indicator_type\":\"Intel::ADDR\",\"seen.where\":\"Conn::IN_RESP\",\"seen.node\":\"worker-1-2\",\"matched\":[\"Intel::ADDR\"],\"sources\":[\"ETPRO Rep: AbusedTLD Score: 127\"]}", + "event.type": [ + "info" + ], "fileset.name": "intel", "input.type": "log", "log.offset": 0, + "related.ip": [ + "192.168.1.1", + "198.41.0.4" + ], "service.type": "zeek", "source.address": "192.168.1.1", "source.ip": "192.168.1.1", diff --git a/x-pack/filebeat/module/zeek/irc/config/irc.yml b/x-pack/filebeat/module/zeek/irc/config/irc.yml index 1ee45c0dc57..4d5783b8087 100644 --- a/x-pack/filebeat/module/zeek/irc/config/irc.yml +++ b/x-pack/filebeat/module/zeek/irc/config/irc.yml @@ -45,10 +45,28 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.irc.user", to: "user.name"} + - {from: "zeek.irc.command", to: "event.action"} + - {from: "zeek.irc.dcc.file.name", to: "file.name"} + - {from: "zeek.irc.dcc.file.size", to: "file.size"} + - {from: "zeek.irc.dcc.mime_type", to: "file.mime_type"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol + - info {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/irc/ingest/pipeline.json b/x-pack/filebeat/module/zeek/irc/ingest/pipeline.json deleted file mode 100644 index 40723512349..00000000000 --- a/x-pack/filebeat/module/zeek/irc/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek irc.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.irc.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.irc.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/irc/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/irc/ingest/pipeline.yml new file mode 100644 index 00000000000..ec04f4e7c93 --- /dev/null +++ b/x-pack/filebeat/module/zeek/irc/ingest/pipeline.yml @@ -0,0 +1,65 @@ +description: Pipeline for normalizing Zeek irc.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.irc.ts + formats: + - UNIX +- remove: + field: zeek.irc.ts +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +- geoip: + field: destination.ip + target_field: destination.geo +- geoip: + field: source.ip + target_field: source.geo +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/irc/manifest.yml b/x-pack/filebeat/module/zeek/irc/manifest.yml index ce7cd7b714e..3bf899fd2c0 100644 --- a/x-pack/filebeat/module/zeek/irc/manifest.yml +++ b/x-pack/filebeat/module/zeek/irc/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/irc.yml diff --git a/x-pack/filebeat/module/zeek/irc/test/irc-json.log-expected.json b/x-pack/filebeat/module/zeek/irc/test/irc-json.log-expected.json index 2a12e671ea5..245d1154e86 100644 --- a/x-pack/filebeat/module/zeek/irc/test/irc-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/irc/test/irc-json.log-expected.json @@ -2,17 +2,37 @@ { "@timestamp": "2013-12-20T15:44:10.647Z", "destination.address": "38.229.70.20", + "destination.as.number": 23028, + "destination.as.organization.name": "Team Cymru Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "38.229.70.20", "destination.port": 8000, + "event.action": "USER", + "event.category": [ + "network" + ], "event.dataset": "zeek.irc", "event.id": "CNJBX5FQdL62VUUP1", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "protocol", + "info" + ], "fileset.name": "irc", "input.type": "log", "log.offset": 0, "network.community_id": "1:YdkGov/c+KLtmg7Cf5DLDB4+YdQ=", "network.protocol": "irc", "network.transport": "tcp", + "related.ip": [ + "10.180.156.249", + "38.229.70.20" + ], "service.type": "zeek", "source.address": "10.180.156.249", "source.ip": "10.180.156.249", @@ -28,17 +48,40 @@ { "@timestamp": "2013-12-20T15:44:10.647Z", "destination.address": "38.229.70.20", + "destination.as.number": 23028, + "destination.as.organization.name": "Team Cymru Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "38.229.70.20", "destination.port": 8000, + "event.action": "NICK", + "event.category": [ + "network" + ], "event.dataset": "zeek.irc", "event.id": "CNJBX5FQdL62VUUP1", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "protocol", + "info" + ], "fileset.name": "irc", "input.type": "log", "log.offset": 206, "network.community_id": "1:YdkGov/c+KLtmg7Cf5DLDB4+YdQ=", "network.protocol": "irc", "network.transport": "tcp", + "related.ip": [ + "10.180.156.249", + "38.229.70.20" + ], + "related.user": [ + "xxxxx" + ], "service.type": "zeek", "source.address": "10.180.156.249", "source.ip": "10.180.156.249", @@ -46,6 +89,7 @@ "tags": [ "zeek.irc" ], + "user.name": "xxxxx", "zeek.irc.addl": "+iw xxxxx XxxxxxXxxx ", "zeek.irc.command": "NICK", "zeek.irc.user": "xxxxx", @@ -55,17 +99,40 @@ { "@timestamp": "2013-12-20T15:44:10.706Z", "destination.address": "38.229.70.20", + "destination.as.number": 23028, + "destination.as.organization.name": "Team Cymru Inc.", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "38.229.70.20", "destination.port": 8000, + "event.action": "JOIN", + "event.category": [ + "network" + ], "event.dataset": "zeek.irc", "event.id": "CNJBX5FQdL62VUUP1", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "protocol", + "info" + ], "fileset.name": "irc", "input.type": "log", "log.offset": 432, "network.community_id": "1:YdkGov/c+KLtmg7Cf5DLDB4+YdQ=", "network.protocol": "irc", "network.transport": "tcp", + "related.ip": [ + "10.180.156.249", + "38.229.70.20" + ], + "related.user": [ + "xxxxx" + ], "service.type": "zeek", "source.address": "10.180.156.249", "source.ip": "10.180.156.249", @@ -73,6 +140,7 @@ "tags": [ "zeek.irc" ], + "user.name": "xxxxx", "zeek.irc.addl": " with channel key: '-'", "zeek.irc.command": "JOIN", "zeek.irc.nick": "molochtest", diff --git a/x-pack/filebeat/module/zeek/kerberos/config/kerberos.yml b/x-pack/filebeat/module/zeek/kerberos/config/kerberos.yml index 4bbcf677b70..28c49507406 100644 --- a/x-pack/filebeat/module/zeek/kerberos/config/kerberos.yml +++ b/x-pack/filebeat/module/zeek/kerberos/config/kerberos.yml @@ -72,10 +72,33 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "source.address", to: "client.address"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "destination.address", to: "server.address"} + - {from: "zeek.kerberos.request_type", to: "event.action"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol + - authentication + - dissect: + when: + contains: + zeek.kerberos.client: "/" + tokenizer: "%{user.name}/%{user.domain}" + field: zeek.kerberos.client + target_prefix: "" {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/kerberos/ingest/pipeline.json b/x-pack/filebeat/module/zeek/kerberos/ingest/pipeline.json deleted file mode 100644 index 988e9b7f2b1..00000000000 --- a/x-pack/filebeat/module/zeek/kerberos/ingest/pipeline.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek kerberos.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.kerberos.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.kerberos.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "client.address", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "server.address", - "value": "{{destination.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - }, - { - "script": { - "source": "ctx.zeek.kerberos.valid.days = Math.round( (ctx.zeek.kerberos.valid.until - ctx.zeek.kerberos.valid.from) / 86400 )", - "if": "ctx.zeek.kerberos.valid?.from != null && ctx.zeek.kerberos.valid?.until != null" - } - }, - { - "date": { - "field": "zeek.kerberos.valid.until", - "target_field": "zeek.kerberos.valid.until", - "formats": ["UNIX"], - "if": "ctx.zeek.kerberos.valid?.until != null" - } - }, - { - "date": { - "field": "zeek.kerberos.valid.from", - "target_field": "zeek.kerberos.valid.from", - "formats": ["UNIX"], - "if": "ctx.zeek.kerberos.valid?.from != null" - } - } - ], - "on_failure": [{ - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/kerberos/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/kerberos/ingest/pipeline.yml new file mode 100644 index 00000000000..05005491115 --- /dev/null +++ b/x-pack/filebeat/module/zeek/kerberos/ingest/pipeline.yml @@ -0,0 +1,90 @@ +description: Pipeline for normalizing Zeek kerberos.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.kerberos.ts + formats: + - UNIX +- remove: + field: zeek.kerberos.ts +- script: + source: "ctx.zeek.kerberos.valid.days = Math.round( (ctx.zeek.kerberos.valid.until - ctx.zeek.kerberos.valid.from) / 86400 )" + if: "ctx.zeek.kerberos.valid?.from != null && ctx.zeek.kerberos.valid?.until != null" +- date: + field: zeek.kerberos.valid.until + target_field: zeek.kerberos.valid.until + formats: + - UNIX + if: ctx.zeek.kerberos.valid?.until != null +- date: + field: zeek.kerberos.valid.from + target_field: zeek.kerberos.valid.from + formats: + - UNIX + if: ctx.zeek.kerberos.valid?.from != null +- set: + field: event.outcome + value: success + if: "ctx?.zeek?.kerberos?.success == true" +- set: + field: event.outcome + value: failure + if: "ctx?.zeek?.kerberos?.success == false" +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/kerberos/manifest.yml b/x-pack/filebeat/module/zeek/kerberos/manifest.yml index a2e040be371..4a94434f1d4 100644 --- a/x-pack/filebeat/module/zeek/kerberos/manifest.yml +++ b/x-pack/filebeat/module/zeek/kerberos/manifest.yml @@ -13,7 +13,7 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/kerberos.yml requires.processors: diff --git a/x-pack/filebeat/module/zeek/kerberos/test/kerberos-json.log-expected.json b/x-pack/filebeat/module/zeek/kerberos/test/kerberos-json.log-expected.json index a09e3ac8a4f..e01e42a4036 100644 --- a/x-pack/filebeat/module/zeek/kerberos/test/kerberos-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/kerberos/test/kerberos-json.log-expected.json @@ -5,15 +5,33 @@ "destination.address": "192.168.10.10", "destination.ip": "192.168.10.10", "destination.port": 88, + "event.action": "TGS", + "event.category": [ + "network" + ], "event.dataset": "zeek.kerberos", "event.id": "C56Flhb4WQBNkfMOl", + "event.kind": "event", "event.module": "zeek", + "event.outcome": "success", + "event.type": [ + "connection", + "protocol", + "authentication" + ], "fileset.name": "kerberos", "input.type": "log", "log.offset": 0, "network.community_id": "1:DW/lSsosl8gZ8pqO9kKMm7cZheQ=", "network.protocol": "kerberos", "network.transport": "tcp", + "related.ip": [ + "192.168.10.31", + "192.168.10.10" + ], + "related.user": [ + "RonHD" + ], "server.address": "192.168.10.10", "service.type": "zeek", "source.address": "192.168.10.31", @@ -22,6 +40,8 @@ "tags": [ "zeek.kerberos" ], + "user.domain": "CONTOSO.LOCAL", + "user.name": "RonHD", "zeek.kerberos.cipher": "aes256-cts-hmac-sha1-96", "zeek.kerberos.client": "RonHD/CONTOSO.LOCAL", "zeek.kerberos.forwardable": true, diff --git a/x-pack/filebeat/module/zeek/modbus/config/modbus.yml b/x-pack/filebeat/module/zeek/modbus/config/modbus.yml index fec2b954224..6dc8c3004d4 100644 --- a/x-pack/filebeat/module/zeek/modbus/config/modbus.yml +++ b/x-pack/filebeat/module/zeek/modbus/config/modbus.yml @@ -39,10 +39,35 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.modbus.function", to: "event.action"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol + - if: + has_fields: ['zeek.modbus.exception'] + then: + - add_fields: + target: event + fields: + outcome: failure + else: + - add_fields: + target: event + fields: + outcome: success {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/modbus/ingest/pipeline.json b/x-pack/filebeat/module/zeek/modbus/ingest/pipeline.json deleted file mode 100644 index 78026f2dc87..00000000000 --- a/x-pack/filebeat/module/zeek/modbus/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek modbus.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.modbus.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.modbus.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/modbus/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/modbus/ingest/pipeline.yml new file mode 100644 index 00000000000..d053a541ef5 --- /dev/null +++ b/x-pack/filebeat/module/zeek/modbus/ingest/pipeline.yml @@ -0,0 +1,63 @@ +description: Pipeline for normalizing Zeek modbus.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.modbus.ts + formats: + - UNIX +- remove: + field: zeek.modbus.ts +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/modbus/manifest.yml b/x-pack/filebeat/module/zeek/modbus/manifest.yml index 98e51ae2bec..e20412fadc6 100644 --- a/x-pack/filebeat/module/zeek/modbus/manifest.yml +++ b/x-pack/filebeat/module/zeek/modbus/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/modbus.yml diff --git a/x-pack/filebeat/module/zeek/modbus/test/modbus-json.log-expected.json b/x-pack/filebeat/module/zeek/modbus/test/modbus-json.log-expected.json index 9817176e098..ba9034a3621 100644 --- a/x-pack/filebeat/module/zeek/modbus/test/modbus-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/modbus/test/modbus-json.log-expected.json @@ -4,15 +4,29 @@ "destination.address": "192.168.1.164", "destination.ip": "192.168.1.164", "destination.port": 502, + "event.action": "READ_COILS", + "event.category": [ + "network" + ], "event.dataset": "zeek.modbus", "event.id": "CpIIXl4DFGswmjH2bl", + "event.kind": "event", "event.module": "zeek", + "event.outcome": "success", + "event.type": [ + "connection", + "protocol" + ], "fileset.name": "modbus", "input.type": "log", "log.offset": 0, "network.community_id": "1:jEXbR2FqHyMgLJgyYyFQN3yxbpc=", "network.protocol": "modbus", "network.transport": "tcp", + "related.ip": [ + "192.168.1.10", + "192.168.1.164" + ], "service.type": "zeek", "source.address": "192.168.1.10", "source.ip": "192.168.1.10", diff --git a/x-pack/filebeat/module/zeek/mysql/config/mysql.yml b/x-pack/filebeat/module/zeek/mysql/config/mysql.yml index fcd226131bc..b28262b5bd5 100644 --- a/x-pack/filebeat/module/zeek/mysql/config/mysql.yml +++ b/x-pack/filebeat/module/zeek/mysql/config/mysql.yml @@ -36,10 +36,37 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.mysql.cmd", to: "event.action"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - database + - network + type: + - connection + - protocol + - if: + equals: + zeek.mysql.success: true + then: + - add_fields: + target: event + fields: + outcome: success + else: + - add_fields: + target: event + fields: + outcome: failure {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/mysql/ingest/pipeline.json b/x-pack/filebeat/module/zeek/mysql/ingest/pipeline.json deleted file mode 100644 index ec55df982d7..00000000000 --- a/x-pack/filebeat/module/zeek/mysql/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek mysql.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.mysql.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.mysql.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/mysql/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/mysql/ingest/pipeline.yml new file mode 100644 index 00000000000..ca2c6c57172 --- /dev/null +++ b/x-pack/filebeat/module/zeek/mysql/ingest/pipeline.yml @@ -0,0 +1,83 @@ +description: Pipeline for normalizing Zeek mysql.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.mysql.ts + formats: + - UNIX +- remove: + field: zeek.mysql.ts +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: event.type + value: access + if: "ctx?.zeek?.mysql?.cmd != null && (ctx.zeek.mysql.cmd == 'connect' || ctx.zeek.mysql.cmd == 'connect_out')" +- append: + field: event.type + value: change + if: "ctx?.zeek?.mysql?.cmd != null && (ctx.zeek.mysql.cmd == 'init_db' || ctx.zeek.mysql.cmd == 'change_user' || ctx.zeek.mysql.cmd == 'set_option' || ctx.zeek.mysql.cmd == 'drop_db' || ctx.zeek.mysql.cmd == 'create_db' || ctx.zeek.mysql.cmd == 'process_kill' || ctx.zeek.mysql.cmd == 'delayed_insert')" +- append: + field: event.type + value: info + if: "ctx?.zeek?.mysql?.cmd != null && ctx.zeek.mysql.cmd != 'init_db' && ctx.zeek.mysql.cmd != 'change_user' && ctx.zeek.mysql.cmd != 'set_option' && ctx.zeek.mysql.cmd != 'drop_db' && ctx.zeek.mysql.cmd != 'create_db' && ctx.zeek.mysql.cmd != 'process_kill' && ctx.zeek.mysql.cmd != 'delayed_insert' && ctx.zeek.mysql.cmd != 'connect' && ctx.zeek.mysql.cmd != 'connect_out'" +- append: + field: event.type + value: start + if: "ctx?.zeek?.mysql?.cmd != null && ctx.zeek.mysql.cmd == 'connect'" +- append: + field: event.type + value: end + if: "ctx?.zeek?.mysql?.cmd != null && ctx.zeek.mysql.cmd == 'connect_out'" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/mysql/manifest.yml b/x-pack/filebeat/module/zeek/mysql/manifest.yml index a16c6092cc7..1b7ec4edb19 100644 --- a/x-pack/filebeat/module/zeek/mysql/manifest.yml +++ b/x-pack/filebeat/module/zeek/mysql/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/mysql.yml diff --git a/x-pack/filebeat/module/zeek/mysql/test/mysql-json.log-expected.json b/x-pack/filebeat/module/zeek/mysql/test/mysql-json.log-expected.json index 279b1019404..bf68cae48fe 100644 --- a/x-pack/filebeat/module/zeek/mysql/test/mysql-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/mysql/test/mysql-json.log-expected.json @@ -4,15 +4,31 @@ "destination.address": "192.168.0.254", "destination.ip": "192.168.0.254", "destination.port": 3306, + "event.action": "query", + "event.category": [ + "database", + "network" + ], "event.dataset": "zeek.mysql", "event.id": "C5Hol527kLMUw36hj3", + "event.kind": "event", "event.module": "zeek", + "event.outcome": "success", + "event.type": [ + "connection", + "protocol", + "info" + ], "fileset.name": "mysql", "input.type": "log", "log.offset": 0, "network.community_id": "1:0HUQbshhYbATQXDHv/ysOs0DlZA=", "network.protocol": "mysql", "network.transport": "tcp", + "related.ip": [ + "192.168.0.254", + "192.168.0.254" + ], "service.type": "zeek", "source.address": "192.168.0.254", "source.ip": "192.168.0.254", diff --git a/x-pack/filebeat/module/zeek/notice/config/notice.yml b/x-pack/filebeat/module/zeek/notice/config/notice.yml index 7f5c9c0869c..32ab849b6b5 100644 --- a/x-pack/filebeat/module/zeek/notice/config/notice.yml +++ b/x-pack/filebeat/module/zeek/notice/config/notice.yml @@ -78,9 +78,25 @@ processors: - drop_fields: fields: ["zeek.notice.remote_location", "zeek.notice.f"] + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.notice.file.total_bytes", to: "file.size"} + - {from: "zeek.notice.file.mime_type", to: "file.mime_type"} + - {from: "zeek.notice.note", to: "rule.name"} + - {from: "zeek.notice.msg", to: "rule.description"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: alert + category: + - intrusion_detection + type: + - info {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/notice/ingest/pipeline.json b/x-pack/filebeat/module/zeek/notice/ingest/pipeline.json deleted file mode 100644 index b343068d6c6..00000000000 --- a/x-pack/filebeat/module/zeek/notice/ingest/pipeline.json +++ /dev/null @@ -1,115 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek notice.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.notice.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.notice.ts" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}", - "if": "ctx.destination?.address != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}", - "if": "ctx.source?.address != null" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "geoip": { - "field": "destination.ip", - "target_field": "destination.geo", - "ignore_missing": true - } - }, - { - "geoip": { - "field": "source.ip", - "target_field": "source.geo", - "ignore_missing": true - } - }, - { - "geoip": { - "database_file": "GeoLite2-ASN.mmdb", - "field": "source.ip", - "target_field": "source.as", - "properties": [ - "asn", - "organization_name" - ], - "ignore_missing": true - } - }, - { - "geoip": { - "database_file": "GeoLite2-ASN.mmdb", - "field": "destination.ip", - "target_field": "destination.as", - "properties": [ - "asn", - "organization_name" - ], - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.asn", - "target_field": "source.as.number", - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.organization_name", - "target_field": "source.as.organization.name", - "ignore_missing": true - } - }, - { - "rename": { - "field": "destination.as.asn", - "target_field": "destination.as.number", - "ignore_missing": true - } - }, - { - "rename": { - "field": "destination.as.organization_name", - "target_field": "destination.as.organization.name", - "ignore_missing": true - } - } - ], - "on_failure": [{ - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/notice/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/notice/ingest/pipeline.yml new file mode 100644 index 00000000000..c4dee6b78f2 --- /dev/null +++ b/x-pack/filebeat/module/zeek/notice/ingest/pipeline.yml @@ -0,0 +1,71 @@ +description: Pipeline for normalizing Zeek notice.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.notice.ts + formats: + - UNIX +- remove: + field: zeek.notice.ts +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: event.type + value: allowed + if: "ctx?.zeek?.notice?.dropped == false" +- append: + field: event.type + value: denied + if: "ctx?.zeek?.notice?.dropped == true" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/notice/manifest.yml b/x-pack/filebeat/module/zeek/notice/manifest.yml index 7b98a8efefc..e2bdf695027 100644 --- a/x-pack/filebeat/module/zeek/notice/manifest.yml +++ b/x-pack/filebeat/module/zeek/notice/manifest.yml @@ -13,7 +13,7 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/notice.yml requires.processors: diff --git a/x-pack/filebeat/module/zeek/notice/test/notice-json.log-expected.json b/x-pack/filebeat/module/zeek/notice/test/notice-json.log-expected.json index 58a59ab4d7b..a5838e9f3f1 100644 --- a/x-pack/filebeat/module/zeek/notice/test/notice-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/notice/test/notice-json.log-expected.json @@ -1,11 +1,24 @@ [ { "@timestamp": "2011-11-04T19:44:35.879Z", + "event.category": [ + "intrusion_detection" + ], "event.dataset": "zeek.notice", + "event.kind": "alert", "event.module": "zeek", + "event.type": [ + "info", + "allowed" + ], "fileset.name": "notice", "input.type": "log", "log.offset": 0, + "related.ip": [ + "172.16.238.1" + ], + "rule.description": "172.16.238.1 appears to be guessing SSH passwords (seen in 30 connections).", + "rule.name": "SSH::Password_Guessing", "service.type": "zeek", "source.address": "172.16.238.1", "source.ip": "172.16.238.1", @@ -32,11 +45,25 @@ "destination.geo.region_iso_code": "DE-HE", "destination.geo.region_name": "Hesse", "destination.ip": "207.154.238.205", + "event.category": [ + "intrusion_detection" + ], "event.dataset": "zeek.notice", + "event.kind": "alert", "event.module": "zeek", + "event.type": [ + "info", + "allowed" + ], "fileset.name": "notice", "input.type": "log", "log.offset": 357, + "related.ip": [ + "8.42.77.171", + "207.154.238.205" + ], + "rule.description": "8.42.77.171 scanned at least 15 unique ports of host 207.154.238.205 in 0m0s", + "rule.name": "Scan::Port_Scan", "service.type": "zeek", "source.address": "8.42.77.171", "source.as.number": 393552, diff --git a/x-pack/filebeat/module/zeek/ntlm/config/ntlm.yml b/x-pack/filebeat/module/zeek/ntlm/config/ntlm.yml index 76cfecaaf54..55a6795b6fa 100644 --- a/x-pack/filebeat/module/zeek/ntlm/config/ntlm.yml +++ b/x-pack/filebeat/module/zeek/ntlm/config/ntlm.yml @@ -48,10 +48,39 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.ntlm.username", to: "user.name"} + - {from: "zeek.ntlm.domain", to: "user.domain"} + - add_fields: + target: event + fields: + kind: event + category: + - authentication + - network + type: + - info + - connection + - if: + equals: + zeek.ntlm.success: true + then: + - add_fields: + target: event + fields: + outcome: success + - if: + equals: + zeek.ntlm.success: false + then: + - add_fields: + target: event + fields: + outcome: failure {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/ntlm/ingest/pipeline.json b/x-pack/filebeat/module/zeek/ntlm/ingest/pipeline.json deleted file mode 100644 index 680ea8815e0..00000000000 --- a/x-pack/filebeat/module/zeek/ntlm/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek ntlm.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.ntlm.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.ntlm.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/ntlm/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/ntlm/ingest/pipeline.yml new file mode 100644 index 00000000000..9f76d461392 --- /dev/null +++ b/x-pack/filebeat/module/zeek/ntlm/ingest/pipeline.yml @@ -0,0 +1,67 @@ +description: Pipeline for normalizing Zeek ntlm.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.ntlm.ts + formats: + - UNIX +- remove: + field: zeek.ntlm.ts +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/ntlm/manifest.yml b/x-pack/filebeat/module/zeek/ntlm/manifest.yml index 0248af27d3b..545bef85aaa 100644 --- a/x-pack/filebeat/module/zeek/ntlm/manifest.yml +++ b/x-pack/filebeat/module/zeek/ntlm/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/ntlm.yml diff --git a/x-pack/filebeat/module/zeek/ntlm/test/ntlm-json.log-expected.json b/x-pack/filebeat/module/zeek/ntlm/test/ntlm-json.log-expected.json index 90aebbec10b..c85d3127476 100644 --- a/x-pack/filebeat/module/zeek/ntlm/test/ntlm-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/ntlm/test/ntlm-json.log-expected.json @@ -4,15 +4,31 @@ "destination.address": "192.168.10.31", "destination.ip": "192.168.10.31", "destination.port": 445, + "event.category": [ + "authentication", + "network" + ], "event.dataset": "zeek.ntlm", "event.id": "CHphiNUKDC20fsy09", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "info", + "connection" + ], "fileset.name": "ntlm", "input.type": "log", "log.offset": 0, "network.community_id": "1:zxnXAE/Cme5fQhh6sJLs7GItc08=", "network.protocol": "ntlm", "network.transport": "tcp", + "related.ip": [ + "192.168.10.50", + "192.168.10.31" + ], + "related.user": [ + "JeffV" + ], "service.type": "zeek", "source.address": "192.168.10.50", "source.ip": "192.168.10.50", @@ -20,6 +36,8 @@ "tags": [ "zeek.ntlm" ], + "user.domain": "contoso.local", + "user.name": "JeffV", "zeek.ntlm.domain": "contoso.local", "zeek.ntlm.hostname": "ybaARon55QykXrgu", "zeek.ntlm.server.name.dns": "Victim-PC.contoso.local", diff --git a/x-pack/filebeat/module/zeek/ocsp/config/ocsp.yml b/x-pack/filebeat/module/zeek/ocsp/config/ocsp.yml index a6a74d6d05e..f6298a36d1e 100644 --- a/x-pack/filebeat/module/zeek/ocsp/config/ocsp.yml +++ b/x-pack/filebeat/module/zeek/ocsp/config/ocsp.yml @@ -56,3 +56,7 @@ processors: ignore_missing: true fail_on_error: false + - add_fields: + target: event + fields: + kind: event diff --git a/x-pack/filebeat/module/zeek/ocsp/ingest/pipeline.json b/x-pack/filebeat/module/zeek/ocsp/ingest/pipeline.json deleted file mode 100644 index e56642bd4a8..00000000000 --- a/x-pack/filebeat/module/zeek/ocsp/ingest/pipeline.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek ocsp.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.ocsp.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.ocsp.ts" - } - }, - { - "date": { - "field": "zeek.ocsp.revoke.date", - "target_field": "zeek.ocsp.revoke.date", - "formats": ["UNIX"], - "if": "ctx.zeek.ocsp.revoke?.date != null" - } - }, - { - "date": { - "field": "zeek.ocsp.update.this", - "target_field": "zeek.ocsp.update.this", - "formats": ["UNIX"], - "if": "ctx.zeek.ocsp.update?.this != null" - } - }, - { - "date": { - "field": "zeek.ocsp.update.next", - "target_field": "zeek.ocsp.update.next", - "formats": ["UNIX"], - "if": "ctx.zeek.ocsp.update?.next != null" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/ocsp/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/ocsp/ingest/pipeline.yml new file mode 100644 index 00000000000..63a878825d7 --- /dev/null +++ b/x-pack/filebeat/module/zeek/ocsp/ingest/pipeline.yml @@ -0,0 +1,41 @@ +description: Pipeline for normalizing Zeek ocsp.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.ocsp.ts + formats: + - UNIX +- remove: + field: zeek.ocsp.ts +- date: + field: zeek.ocsp.revoke.date + target_field: zeek.ocsp.revoke.date + formats: + - UNIX + if: ctx.zeek.ocsp.revoke?.date != null +- date: + field: zeek.ocsp.update.this + target_field: zeek.ocsp.update.this + formats: + - UNIX + if: ctx.zeek.ocsp.update?.this != null +- date: + field: zeek.ocsp.update.next + target_field: zeek.ocsp.update.next + formats: + - UNIX + if: ctx.zeek.ocsp.update?.next != null +- append: + field: related.hash + value: "{{zeek.ocsp.issuerNameHash}}" + if: "ctx?.zeek?.ocsp?.issuerNameHash != null" +- append: + field: related.hash + value: "{{zeek.ocsp.issuerKeyHash}}" + if: "ctx?.zeek?.ocsp?.issuerKeyHash != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/ocsp/manifest.yml b/x-pack/filebeat/module/zeek/ocsp/manifest.yml index 739873d645f..35bcfccdcb6 100644 --- a/x-pack/filebeat/module/zeek/ocsp/manifest.yml +++ b/x-pack/filebeat/module/zeek/ocsp/manifest.yml @@ -11,5 +11,5 @@ var: - name: tags default: [zeek.ocsp] -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/ocsp.yml diff --git a/x-pack/filebeat/module/zeek/pe/config/pe.yml b/x-pack/filebeat/module/zeek/pe/config/pe.yml index ee4c78bb8cc..cf5f54396ad 100644 --- a/x-pack/filebeat/module/zeek/pe/config/pe.yml +++ b/x-pack/filebeat/module/zeek/pe/config/pe.yml @@ -21,3 +21,11 @@ processors: ignore_missing: true fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - file + type: + - info diff --git a/x-pack/filebeat/module/zeek/pe/ingest/pipeline.json b/x-pack/filebeat/module/zeek/pe/ingest/pipeline.json deleted file mode 100644 index f950772464c..00000000000 --- a/x-pack/filebeat/module/zeek/pe/ingest/pipeline.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek pe.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.pe.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.pe.ts" - } - }, - { - "date": { - "field": "zeek.pe.compile_time", - "target_field": "zeek.pe.compile_time", - "formats": ["UNIX"], - "if": "ctx.zeek.pe.compile_time != null" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/pe/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/pe/ingest/pipeline.yml new file mode 100644 index 00000000000..6a7fa7dca87 --- /dev/null +++ b/x-pack/filebeat/module/zeek/pe/ingest/pipeline.yml @@ -0,0 +1,21 @@ +description: Pipeline for normalizing Zeek pe.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.pe.ts + formats: + - UNIX +- remove: + field: zeek.pe.ts +- date: + field: zeek.pe.compile_time + target_field: zeek.pe.compile_time + formats: + - UNIX + if: ctx.zeek.pe.compile_time != null +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/pe/manifest.yml b/x-pack/filebeat/module/zeek/pe/manifest.yml index 02a352c5dfd..16dfe2e4634 100644 --- a/x-pack/filebeat/module/zeek/pe/manifest.yml +++ b/x-pack/filebeat/module/zeek/pe/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/pe.yml diff --git a/x-pack/filebeat/module/zeek/pe/test/pe-json.log-expected.json b/x-pack/filebeat/module/zeek/pe/test/pe-json.log-expected.json index ccad0e8e2fc..3356f0ef793 100644 --- a/x-pack/filebeat/module/zeek/pe/test/pe-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/pe/test/pe-json.log-expected.json @@ -1,8 +1,15 @@ [ { "@timestamp": "2017-10-09T16:13:19.578Z", + "event.category": [ + "file" + ], "event.dataset": "zeek.pe", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "info" + ], "fileset.name": "pe", "input.type": "log", "log.offset": 0, diff --git a/x-pack/filebeat/module/zeek/radius/config/radius.yml b/x-pack/filebeat/module/zeek/radius/config/radius.yml index fdbb468450c..38338b1c84f 100644 --- a/x-pack/filebeat/module/zeek/radius/config/radius.yml +++ b/x-pack/filebeat/module/zeek/radius/config/radius.yml @@ -36,10 +36,23 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.radius.username", to: "user.name"} + - {from: "zeek.radius.result", to: "event.outcome"} + - add_fields: + target: event + fields: + kind: event + category: + - authentication + - network + type: + - info + - connection {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/radius/ingest/pipeline.json b/x-pack/filebeat/module/zeek/radius/ingest/pipeline.json deleted file mode 100644 index 72f645dd651..00000000000 --- a/x-pack/filebeat/module/zeek/radius/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek radius.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.radius.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.radius.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/radius/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/radius/ingest/pipeline.yml new file mode 100644 index 00000000000..c69dfaefbb4 --- /dev/null +++ b/x-pack/filebeat/module/zeek/radius/ingest/pipeline.yml @@ -0,0 +1,67 @@ +description: Pipeline for normalizing Zeek radius.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.radius.ts + formats: + - UNIX +- remove: + field: zeek.radius.ts +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/radius/manifest.yml b/x-pack/filebeat/module/zeek/radius/manifest.yml index 505abcbbbd6..f881f404d7a 100644 --- a/x-pack/filebeat/module/zeek/radius/manifest.yml +++ b/x-pack/filebeat/module/zeek/radius/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/radius.yml diff --git a/x-pack/filebeat/module/zeek/radius/test/radius-json.log-expected.json b/x-pack/filebeat/module/zeek/radius/test/radius-json.log-expected.json index 9b4ddfa91f2..894b85f435f 100644 --- a/x-pack/filebeat/module/zeek/radius/test/radius-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/radius/test/radius-json.log-expected.json @@ -4,15 +4,32 @@ "destination.address": "10.0.0.100", "destination.ip": "10.0.0.100", "destination.port": 1812, + "event.category": [ + "authentication", + "network" + ], "event.dataset": "zeek.radius", "event.id": "CRe9VD3flCDWbPmpIh", + "event.kind": "event", "event.module": "zeek", + "event.outcome": "success", + "event.type": [ + "info", + "connection" + ], "fileset.name": "radius", "input.type": "log", "log.offset": 0, "network.community_id": "1:3SdDgWXPnheV2oGfVmxQjfwtr8E=", "network.protocol": "radius", "network.transport": "udp", + "related.ip": [ + "10.0.0.1", + "10.0.0.100" + ], + "related.user": [ + "John.McGuirk" + ], "service.type": "zeek", "source.address": "10.0.0.1", "source.ip": "10.0.0.1", @@ -20,6 +37,7 @@ "tags": [ "zeek.radius" ], + "user.name": "John.McGuirk", "zeek.radius.mac": "00:14:22:e9:54:5e", "zeek.radius.result": "success", "zeek.radius.username": "John.McGuirk", diff --git a/x-pack/filebeat/module/zeek/rdp/config/rdp.yml b/x-pack/filebeat/module/zeek/rdp/config/rdp.yml index d9dac8f2e9b..b9b19e79dd7 100644 --- a/x-pack/filebeat/module/zeek/rdp/config/rdp.yml +++ b/x-pack/filebeat/module/zeek/rdp/config/rdp.yml @@ -69,10 +69,20 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - protocol + - info {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/rdp/ingest/pipeline.json b/x-pack/filebeat/module/zeek/rdp/ingest/pipeline.json deleted file mode 100644 index ae56b98801f..00000000000 --- a/x-pack/filebeat/module/zeek/rdp/ingest/pipeline.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek rdp.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.rdp.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.rdp.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "convert": { - "field": "zeek.rdp.ssl", - "target_field": "tls.established", - "type": "boolean", - "ignore_missing": true - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/rdp/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/rdp/ingest/pipeline.yml new file mode 100644 index 00000000000..d6b70dd92e6 --- /dev/null +++ b/x-pack/filebeat/module/zeek/rdp/ingest/pipeline.yml @@ -0,0 +1,68 @@ +description: Pipeline for normalizing Zeek rdp.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.rdp.ts + formats: + - UNIX +- remove: + field: zeek.rdp.ts +- convert: + field: zeek.rdp.ssl + target_field: tls.established + type: boolean + ignore_missing: true +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/rdp/manifest.yml b/x-pack/filebeat/module/zeek/rdp/manifest.yml index 044352bb2fd..b0c76c9f3a3 100644 --- a/x-pack/filebeat/module/zeek/rdp/manifest.yml +++ b/x-pack/filebeat/module/zeek/rdp/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/rdp.yml diff --git a/x-pack/filebeat/module/zeek/rdp/test/rdp-json.log-expected.json b/x-pack/filebeat/module/zeek/rdp/test/rdp-json.log-expected.json index 6d39caef60b..878eb3e2050 100644 --- a/x-pack/filebeat/module/zeek/rdp/test/rdp-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/rdp/test/rdp-json.log-expected.json @@ -4,15 +4,27 @@ "destination.address": "192.168.131.131", "destination.ip": "192.168.131.131", "destination.port": 3389, + "event.category": [ + "network" + ], "event.dataset": "zeek.rdp", "event.id": "C2PcYV7D3ntaHm056", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "protocol", + "info" + ], "fileset.name": "rdp", "input.type": "log", "log.offset": 0, "network.community_id": "1:PsQu6lSZioPVi0A5K7UaeGsVqS0=", "network.protocol": "rdp", "network.transport": "tcp", + "related.ip": [ + "192.168.131.1", + "192.168.131.131" + ], "service.type": "zeek", "source.address": "192.168.131.1", "source.ip": "192.168.131.1", diff --git a/x-pack/filebeat/module/zeek/rfb/config/rfb.yml b/x-pack/filebeat/module/zeek/rfb/config/rfb.yml index 61e984131cd..f9a2618b02b 100644 --- a/x-pack/filebeat/module/zeek/rfb/config/rfb.yml +++ b/x-pack/filebeat/module/zeek/rfb/config/rfb.yml @@ -54,10 +54,20 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - info {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/rfb/ingest/pipeline.json b/x-pack/filebeat/module/zeek/rfb/ingest/pipeline.json deleted file mode 100644 index 14ae112ffea..00000000000 --- a/x-pack/filebeat/module/zeek/rfb/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek rfb.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.rfb.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.rfb.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/rfb/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/rfb/ingest/pipeline.yml new file mode 100644 index 00000000000..8cf2cebdf4d --- /dev/null +++ b/x-pack/filebeat/module/zeek/rfb/ingest/pipeline.yml @@ -0,0 +1,63 @@ +description: Pipeline for normalizing Zeek rfb.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.rfb.ts + formats: + - UNIX +- remove: + field: zeek.rfb.ts +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/rfb/manifest.yml b/x-pack/filebeat/module/zeek/rfb/manifest.yml index 2f96e4f618e..2b9daaab107 100644 --- a/x-pack/filebeat/module/zeek/rfb/manifest.yml +++ b/x-pack/filebeat/module/zeek/rfb/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/rfb.yml diff --git a/x-pack/filebeat/module/zeek/rfb/test/rfb-json.log-expected.json b/x-pack/filebeat/module/zeek/rfb/test/rfb-json.log-expected.json index c860f5377e3..83b5544b655 100644 --- a/x-pack/filebeat/module/zeek/rfb/test/rfb-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/rfb/test/rfb-json.log-expected.json @@ -4,15 +4,27 @@ "destination.address": "192.168.1.10", "destination.ip": "192.168.1.10", "destination.port": 5900, + "event.category": [ + "network" + ], "event.dataset": "zeek.rfb", "event.id": "CXoIzM3wH3fUwXtKN1", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "info" + ], "fileset.name": "rfb", "input.type": "log", "log.offset": 0, "network.community_id": "1:AtPVA5phuztnwqMfO/2142WXVdY=", "network.protocol": "rfb", "network.transport": "tcp", + "related.ip": [ + "192.168.1.123", + "192.168.1.10" + ], "service.type": "zeek", "source.address": "192.168.1.123", "source.ip": "192.168.1.123", diff --git a/x-pack/filebeat/module/zeek/sip/config/sip.yml b/x-pack/filebeat/module/zeek/sip/config/sip.yml index bd22de69672..c94dbe5e40e 100644 --- a/x-pack/filebeat/module/zeek/sip/config/sip.yml +++ b/x-pack/filebeat/module/zeek/sip/config/sip.yml @@ -72,10 +72,24 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.sip.sequence.method", to: "event.action"} + - {from: "zeek.sip.uri", to: "url.full"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/sip/ingest/pipeline.json b/x-pack/filebeat/module/zeek/sip/ingest/pipeline.json deleted file mode 100644 index c3b7eab58fb..00000000000 --- a/x-pack/filebeat/module/zeek/sip/ingest/pipeline.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek sip.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.sip.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.sip.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - }, - { - "grok": { - "field": "zeek.sip.seq", - "patterns": ["%{NUMBER:zeek.sip.sequence.number}"], - "ignore_missing": true - } - }, - { - "remove": { - "field": "zeek.sip.seq", - "ignore_missing": true - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/sip/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/sip/ingest/pipeline.yml new file mode 100644 index 00000000000..9982cb82d87 --- /dev/null +++ b/x-pack/filebeat/module/zeek/sip/ingest/pipeline.yml @@ -0,0 +1,83 @@ +description: Pipeline for normalizing Zeek sip.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.sip.ts + formats: + - UNIX +- remove: + field: zeek.sip.ts +- grok: + field: zeek.sip.seq + patterns: + - '%{NUMBER:zeek.sip.sequence.number}' + ignore_missing: true +- remove: + field: zeek.sip.seq + ignore_missing: true +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: event.type + value: error + if: "ctx?.zeek?.sip?.status?.code != null && ctx.zeek.sip.status.code >= 400" +- set: + field: event.outcome + value: failure + if: "ctx?.zeek?.sip?.status?.code != null && ctx.zeek.sip.status.code >= 400" +- set: + field: event.outcome + value: success + if: "ctx?.zeek?.sip?.status?.code != null && ctx.zeek.sip.status.code < 400" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/sip/manifest.yml b/x-pack/filebeat/module/zeek/sip/manifest.yml index 8b022a943af..8da0cc443dd 100644 --- a/x-pack/filebeat/module/zeek/sip/manifest.yml +++ b/x-pack/filebeat/module/zeek/sip/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/sip.yml diff --git a/x-pack/filebeat/module/zeek/sip/test/sip-json.log-expected.json b/x-pack/filebeat/module/zeek/sip/test/sip-json.log-expected.json index c24f5405435..79b38a0717d 100644 --- a/x-pack/filebeat/module/zeek/sip/test/sip-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/sip/test/sip-json.log-expected.json @@ -2,17 +2,38 @@ { "@timestamp": "2013-02-26T22:02:39.055Z", "destination.address": "74.63.41.218", + "destination.as.number": 29791, + "destination.as.organization.name": "Internap Corporation", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "74.63.41.218", "destination.port": 5060, + "event.action": "REGISTER", + "event.category": [ + "network" + ], "event.dataset": "zeek.sip", "event.id": "CPRLCB4eWHdjP852Bk", + "event.kind": "event", "event.module": "zeek", + "event.outcome": "failure", + "event.type": [ + "connection", + "protocol", + "error" + ], "fileset.name": "sip", "input.type": "log", "log.offset": 0, "network.community_id": "1:t8Jl0amIXPHemzxKgsLjtkB+ewo=", "network.protocol": "sip", "network.transport": "udp", + "related.ip": [ + "172.16.133.19", + "74.63.41.218" + ], "service.type": "zeek", "source.address": "172.16.133.19", "source.ip": "172.16.133.19", @@ -20,6 +41,7 @@ "tags": [ "zeek.sip" ], + "url.full": "sip:newyork.voip.ms:5060", "zeek.session_id": "CPRLCB4eWHdjP852Bk", "zeek.sip.call_id": "8694cd7e-976e4fc3-d76f6e38@172.16.133.19", "zeek.sip.request.body_length": 0, @@ -45,24 +67,57 @@ { "@timestamp": "2005-01-14T17:58:02.965Z", "destination.address": "200.57.7.195", + "destination.as.number": 18734, + "destination.as.organization.name": "Operbes, S.A. de C.V.", + "destination.geo.city_name": "Mexico City", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "MX", + "destination.geo.location.lat": 19.4357, + "destination.geo.location.lon": -99.1438, + "destination.geo.region_iso_code": "MX-CMX", + "destination.geo.region_name": "Mexico City", "destination.ip": "200.57.7.195", "destination.port": 5060, + "event.action": "INVITE", + "event.category": [ + "network" + ], "event.dataset": "zeek.sip", "event.id": "ComJz236lSOcuOmix3", + "event.kind": "event", "event.module": "zeek", + "event.outcome": "success", + "event.type": [ + "connection", + "protocol" + ], "fileset.name": "sip", "input.type": "log", "log.offset": 805, "network.community_id": "1:U/Makwsc8lm6pVKLfRMzoNTI++0=", "network.protocol": "sip", "network.transport": "udp", + "related.ip": [ + "200.57.7.204", + "200.57.7.195" + ], "service.type": "zeek", "source.address": "200.57.7.204", + "source.as.number": 18734, + "source.as.organization.name": "Operbes, S.A. de C.V.", + "source.geo.city_name": "Mexico City", + "source.geo.continent_name": "North America", + "source.geo.country_iso_code": "MX", + "source.geo.location.lat": 19.4357, + "source.geo.location.lon": -99.1438, + "source.geo.region_iso_code": "MX-CMX", + "source.geo.region_name": "Mexico City", "source.ip": "200.57.7.204", "source.port": 5061, "tags": [ "zeek.sip" ], + "url.full": "sip:francisco@bestel.com:55060", "zeek.session_id": "ComJz236lSOcuOmix3", "zeek.sip.call_id": "12013223@200.57.7.195", "zeek.sip.request.body_length": 229, @@ -91,24 +146,57 @@ { "@timestamp": "2005-01-14T17:58:07.022Z", "destination.address": "200.57.7.195", + "destination.as.number": 18734, + "destination.as.organization.name": "Operbes, S.A. de C.V.", + "destination.geo.city_name": "Mexico City", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "MX", + "destination.geo.location.lat": 19.4357, + "destination.geo.location.lon": -99.1438, + "destination.geo.region_iso_code": "MX-CMX", + "destination.geo.region_name": "Mexico City", "destination.ip": "200.57.7.195", "destination.port": 5060, + "event.action": "REGISTER", + "event.category": [ + "network" + ], "event.dataset": "zeek.sip", "event.id": "CJZDWgixtwqXctWEg", + "event.kind": "event", "event.module": "zeek", + "event.outcome": "success", + "event.type": [ + "connection", + "protocol" + ], "fileset.name": "sip", "input.type": "log", "log.offset": 1654, "network.community_id": "1:0hvHF/bh5wFKg7nfRXxsno4F198=", "network.protocol": "sip", "network.transport": "udp", + "related.ip": [ + "200.57.7.205", + "200.57.7.195" + ], "service.type": "zeek", "source.address": "200.57.7.205", + "source.as.number": 18734, + "source.as.organization.name": "Operbes, S.A. de C.V.", + "source.geo.city_name": "Mexico City", + "source.geo.continent_name": "North America", + "source.geo.country_iso_code": "MX", + "source.geo.location.lat": 19.4357, + "source.geo.location.lon": -99.1438, + "source.geo.region_iso_code": "MX-CMX", + "source.geo.region_name": "Mexico City", "source.ip": "200.57.7.205", "source.port": 5061, "tags": [ "zeek.sip" ], + "url.full": "sip:Verso.com", "zeek.session_id": "CJZDWgixtwqXctWEg", "zeek.sip.call_id": "46E1C3CB36304F84A020CF6DD3F96461@Verso.com", "zeek.sip.request.body_length": 0, diff --git a/x-pack/filebeat/module/zeek/smb_cmd/config/smb_cmd.yml b/x-pack/filebeat/module/zeek/smb_cmd/config/smb_cmd.yml index d9839c7dc16..ada63493d6f 100644 --- a/x-pack/filebeat/module/zeek/smb_cmd/config/smb_cmd.yml +++ b/x-pack/filebeat/module/zeek/smb_cmd/config/smb_cmd.yml @@ -78,10 +78,24 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.smb_cmd.command", to: "event.action"} + - {from: "zeek.smb_cmd.username", to: "user.name"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/smb_cmd/ingest/pipeline.json b/x-pack/filebeat/module/zeek/smb_cmd/ingest/pipeline.json deleted file mode 100644 index 6b1f7f1b2af..00000000000 --- a/x-pack/filebeat/module/zeek/smb_cmd/ingest/pipeline.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek smb_cmd.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.smb_cmd.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.smb_cmd.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "remove": { - "field": "zeek.smb_cmd.referenced_file", - "ignore_missing": true - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/smb_cmd/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/smb_cmd/ingest/pipeline.yml new file mode 100644 index 00000000000..838e9f2e8bc --- /dev/null +++ b/x-pack/filebeat/module/zeek/smb_cmd/ingest/pipeline.yml @@ -0,0 +1,82 @@ +description: Pipeline for normalizing Zeek smb_cmd.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.smb_cmd.ts + formats: + - UNIX +- remove: + field: zeek.smb_cmd.ts +- remove: + field: zeek.smb_cmd.referenced_file + ignore_missing: true +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +- append: + field: event.type + value: error + if: "ctx?.zeek?.smb_cmd?.status != null && ctx.zeek.smb_cmd.status.toLowerCase() != 'success'" +- set: + field: event.outcome + value: success + if: "ctx?.zeek?.smb_cmd?.status != null && ctx.zeek.smb_cmd.status.toLowerCase() == 'success'" +- set: + field: event.outcome + value: failure + if: "ctx?.zeek?.smb_cmd?.status != null && ctx.zeek.smb_cmd.status.toLowerCase() != 'success'" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/smb_cmd/manifest.yml b/x-pack/filebeat/module/zeek/smb_cmd/manifest.yml index 089269869e8..a4ad3a78ce1 100644 --- a/x-pack/filebeat/module/zeek/smb_cmd/manifest.yml +++ b/x-pack/filebeat/module/zeek/smb_cmd/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/smb_cmd.yml diff --git a/x-pack/filebeat/module/zeek/smb_cmd/test/smb_cmd-json.log-expected.json b/x-pack/filebeat/module/zeek/smb_cmd/test/smb_cmd-json.log-expected.json index 872ce4a8238..e18caef3fd2 100644 --- a/x-pack/filebeat/module/zeek/smb_cmd/test/smb_cmd-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/smb_cmd/test/smb_cmd-json.log-expected.json @@ -4,15 +4,29 @@ "destination.address": "172.16.128.202", "destination.ip": "172.16.128.202", "destination.port": 445, + "event.action": "NT_CREATE_ANDX", + "event.category": [ + "network" + ], "event.dataset": "zeek.smb_cmd", "event.id": "CbT8mpAXseu6Pt4R7", + "event.kind": "event", "event.module": "zeek", + "event.outcome": "success", + "event.type": [ + "connection", + "protocol" + ], "fileset.name": "smb_cmd", "input.type": "log", "log.offset": 0, "network.community_id": "1:SJNAD5vtzZuhQjGtfaI8svTnyuw=", "network.protocol": "smb", "network.transport": "tcp", + "related.ip": [ + "172.16.133.6", + "172.16.128.202" + ], "service.type": "zeek", "source.address": "172.16.133.6", "source.ip": "172.16.133.6", diff --git a/x-pack/filebeat/module/zeek/smb_files/config/smb_files.yml b/x-pack/filebeat/module/zeek/smb_files/config/smb_files.yml index ed5d4cdecbb..8ab5ee36395 100644 --- a/x-pack/filebeat/module/zeek/smb_files/config/smb_files.yml +++ b/x-pack/filebeat/module/zeek/smb_files/config/smb_files.yml @@ -36,10 +36,26 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.smb_files.action", to: "event.action"} + - {from: "zeek.smb_files.name", to: "file.name"} + - {from: "zeek.smb_files.size", to: "file.size"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + - file + type: + - connection + - protocol {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/smb_files/ingest/pipeline.json b/x-pack/filebeat/module/zeek/smb_files/ingest/pipeline.json deleted file mode 100644 index b4cfcfaa5b1..00000000000 --- a/x-pack/filebeat/module/zeek/smb_files/ingest/pipeline.json +++ /dev/null @@ -1,103 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek smb_files.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.smb_files.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.smb_files.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - }, - { - "dot_expander": { - "field": "times.accessed", - "path": "zeek.smb_files" - } - }, - { - "dot_expander": { - "field": "times.changed", - "path": "zeek.smb_files" - } - }, - { - "dot_expander": { - "field": "times.created", - "path": "zeek.smb_files" - } - }, - { - "dot_expander": { - "field": "times.modified", - "path": "zeek.smb_files" - } - }, - { - "date": { - "field": "zeek.smb_files.times.accessed", - "target_field": "zeek.smb_files.times.accessed", - "formats": ["UNIX"], - "if": "ctx.zeek.smb_files.times?.accessed != null" - } - }, - { - "date": { - "field": "zeek.smb_files.times.changed", - "target_field": "zeek.smb_files.times.changed", - "formats": ["UNIX"], - "if": "ctx.zeek.smb_files.times?.accessed != null" - } - }, - { - "date": { - "field": "zeek.smb_files.times.created", - "target_field": "zeek.smb_files.times.created", - "formats": ["UNIX"], - "if": "ctx.zeek.smb_files.times?.accessed != null" - } - }, - { - "date": { - "field": "zeek.smb_files.times.modified", - "target_field": "zeek.smb_files.times.modified", - "formats": ["UNIX"], - "if": "ctx.zeek.smb_files.times?.accessed != null" - } - } - ], - "on_failure": [{ - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/smb_files/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/smb_files/ingest/pipeline.yml new file mode 100644 index 00000000000..b2c7f52a29b --- /dev/null +++ b/x-pack/filebeat/module/zeek/smb_files/ingest/pipeline.yml @@ -0,0 +1,135 @@ +description: Pipeline for normalizing Zeek smb_files.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.smb_files.ts + formats: + - UNIX +- remove: + field: zeek.smb_files.ts +- dot_expander: + field: times.accessed + path: zeek.smb_files +- dot_expander: + field: times.changed + path: zeek.smb_files +- dot_expander: + field: times.created + path: zeek.smb_files +- dot_expander: + field: times.modified + path: zeek.smb_files +- date: + field: zeek.smb_files.times.accessed + target_field: zeek.smb_files.times.accessed + formats: + - UNIX + if: ctx.zeek.smb_files.times?.accessed != null +- set: + field: file.accessed + value: "{{zeek.smb_files.times.accessed}}" + if: "ctx?.zeek?.smb_files?.times?.accessed != null" +- date: + field: zeek.smb_files.times.changed + target_field: zeek.smb_files.times.changed + formats: + - UNIX + if: ctx.zeek.smb_files.times?.accessed != null +- set: + field: file.ctime + value: "{{zeek.smb_files.times.changed}}" + if: "ctx?.zeek?.smb_files?.times?.changed != null" +- date: + field: zeek.smb_files.times.created + target_field: zeek.smb_files.times.created + formats: + - UNIX + if: ctx.zeek.smb_files.times?.accessed != null +- set: + field: file.created + value: "{{zeek.smb_files.times.created}}" + if: "ctx?.zeek?.smb_files?.times?.created != null" +- date: + field: zeek.smb_files.times.modified + target_field: zeek.smb_files.times.modified + formats: + - UNIX + if: ctx.zeek.smb_files.times?.accessed != null +- set: + field: file.mtime + value: "{{zeek.smb_files.times.modified}}" + if: "ctx?.zeek?.smb_files?.times?.modified != null" +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +- set: + field: file.path + value: "{{zeek.smb_files.path}}\\{{zeek.smb_files.name}}" + if: "ctx?.zeek?.smb_files?.path != null && ctx?.zeek?.smb_files?.name != null" +- append: + field: event.type + value: deletion + if: "ctx?.zeek?.smb_files?.action == 'SMB::FILE_DELETE'" +- append: + field: event.type + value: change + if: "ctx?.zeek?.smb_files?.action == 'SMB::FILE_RENAME' || ctx?.zeek?.smb_files?.action == 'SMB::FILE_SET_ATTRIBUTE'" +- append: + field: event.type + value: info + if: "ctx?.zeek?.smb_files?.action != null && ctx.zeek.smb_files != 'SMB::FILE_DELETE' && ctx.zeek.smb_files != 'SMB::FILE_RENAME' && ctx.zeek.smb_files != 'SMB::FILE_SET_ATTRIBUTE'" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/smb_files/manifest.yml b/x-pack/filebeat/module/zeek/smb_files/manifest.yml index 154b445e765..f59a04153a5 100644 --- a/x-pack/filebeat/module/zeek/smb_files/manifest.yml +++ b/x-pack/filebeat/module/zeek/smb_files/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/smb_files.yml diff --git a/x-pack/filebeat/module/zeek/smb_files/test/smb_files-json.log-expected.json b/x-pack/filebeat/module/zeek/smb_files/test/smb_files-json.log-expected.json index fc7b8496d08..c7d5ab98b78 100644 --- a/x-pack/filebeat/module/zeek/smb_files/test/smb_files-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/smb_files/test/smb_files-json.log-expected.json @@ -4,15 +4,37 @@ "destination.address": "192.168.10.30", "destination.ip": "192.168.10.30", "destination.port": 445, + "event.action": "SMB::FILE_OPEN", + "event.category": [ + "network", + "file" + ], "event.dataset": "zeek.smb_files", "event.id": "C9YAaEzWLL62yWMn5", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "protocol", + "info" + ], + "file.accessed": "2017-10-09T16:13:19.607Z", + "file.created": "2017-10-09T16:13:19.607Z", + "file.ctime": "2017-10-09T16:13:19.607Z", + "file.mtime": "2017-10-09T16:13:19.607Z", + "file.name": "PSEXESVC.exe", + "file.path": "\\\\\\\\admin-pc\\\\ADMIN$\\PSEXESVC.exe", + "file.size": 0, "fileset.name": "smb_files", "input.type": "log", "log.offset": 0, "network.community_id": "1:k308wDxRMx/FIEzeh+YwD86zgoA=", "network.protocol": "smb", "network.transport": "tcp", + "related.ip": [ + "192.168.10.31", + "192.168.10.30" + ], "service.type": "zeek", "source.address": "192.168.10.31", "source.ip": "192.168.10.31", diff --git a/x-pack/filebeat/module/zeek/smb_mapping/config/smb_mapping.yml b/x-pack/filebeat/module/zeek/smb_mapping/config/smb_mapping.yml index 72ea3647344..0d0934c62c8 100644 --- a/x-pack/filebeat/module/zeek/smb_mapping/config/smb_mapping.yml +++ b/x-pack/filebeat/module/zeek/smb_mapping/config/smb_mapping.yml @@ -36,10 +36,22 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/smb_mapping/ingest/pipeline.json b/x-pack/filebeat/module/zeek/smb_mapping/ingest/pipeline.json deleted file mode 100644 index c15ad371ed3..00000000000 --- a/x-pack/filebeat/module/zeek/smb_mapping/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek smb_mapping.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.smb_mapping.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.smb_mapping.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure": [{ - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/smb_mapping/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/smb_mapping/ingest/pipeline.yml new file mode 100644 index 00000000000..b5752120267 --- /dev/null +++ b/x-pack/filebeat/module/zeek/smb_mapping/ingest/pipeline.yml @@ -0,0 +1,63 @@ +description: Pipeline for normalizing Zeek smb_mapping.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.smb_mapping.ts + formats: + - UNIX +- remove: + field: zeek.smb_mapping.ts +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/smb_mapping/manifest.yml b/x-pack/filebeat/module/zeek/smb_mapping/manifest.yml index 403d2951c0c..7382e529b27 100644 --- a/x-pack/filebeat/module/zeek/smb_mapping/manifest.yml +++ b/x-pack/filebeat/module/zeek/smb_mapping/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/smb_mapping.yml diff --git a/x-pack/filebeat/module/zeek/smb_mapping/test/smb_mapping-json.log-expected.json b/x-pack/filebeat/module/zeek/smb_mapping/test/smb_mapping-json.log-expected.json index fbd3dd29693..71efd1e51ac 100644 --- a/x-pack/filebeat/module/zeek/smb_mapping/test/smb_mapping-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/smb_mapping/test/smb_mapping-json.log-expected.json @@ -4,15 +4,27 @@ "destination.address": "192.168.10.30", "destination.ip": "192.168.10.30", "destination.port": 445, + "event.category": [ + "network" + ], "event.dataset": "zeek.smb_mapping", "event.id": "C9YAaEzWLL62yWMn5", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "protocol" + ], "fileset.name": "smb_mapping", "input.type": "log", "log.offset": 0, "network.community_id": "1:k308wDxRMx/FIEzeh+YwD86zgoA=", "network.protocol": "smb", "network.transport": "tcp", + "related.ip": [ + "192.168.10.31", + "192.168.10.30" + ], "service.type": "zeek", "source.address": "192.168.10.31", "source.ip": "192.168.10.31", diff --git a/x-pack/filebeat/module/zeek/smtp/config/smtp.yml b/x-pack/filebeat/module/zeek/smtp/config/smtp.yml index af4855948ea..fc8c3b0074f 100644 --- a/x-pack/filebeat/module/zeek/smtp/config/smtp.yml +++ b/x-pack/filebeat/module/zeek/smtp/config/smtp.yml @@ -45,10 +45,23 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.smtp.tls", to: "tls.established", type: boolean} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/smtp/ingest/pipeline.json b/x-pack/filebeat/module/zeek/smtp/ingest/pipeline.json deleted file mode 100644 index 44bc0b189aa..00000000000 --- a/x-pack/filebeat/module/zeek/smtp/ingest/pipeline.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek smtp.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.smtp.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.smtp.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - }, - { - "convert": { - "field": "zeek.smtp.tls", - "target_field": "tls.established", - "type": "boolean", - "ignore_missing": true - } - }, - { - "date": { - "field": "zeek.smtp.date", - "target_field": "zeek.smtp.date", - "formats": ["EEE, d MMM yyyy HH:mm:ss Z"], - "if": "ctx.zeek.smtp.date != null" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/smtp/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/smtp/ingest/pipeline.yml new file mode 100644 index 00000000000..4424d3674ff --- /dev/null +++ b/x-pack/filebeat/module/zeek/smtp/ingest/pipeline.yml @@ -0,0 +1,69 @@ +description: Pipeline for normalizing Zeek smtp.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.smtp.ts + formats: + - UNIX +- remove: + field: zeek.smtp.ts +- date: + field: zeek.smtp.date + target_field: zeek.smtp.date + formats: + - EEE, d MMM yyyy HH:mm:ss Z + if: ctx.zeek.smtp.date != null +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/smtp/manifest.yml b/x-pack/filebeat/module/zeek/smtp/manifest.yml index 489c984b1c4..6d69b3b5e3e 100644 --- a/x-pack/filebeat/module/zeek/smtp/manifest.yml +++ b/x-pack/filebeat/module/zeek/smtp/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/smtp.yml diff --git a/x-pack/filebeat/module/zeek/smtp/test/smtp-json.log-expected.json b/x-pack/filebeat/module/zeek/smtp/test/smtp-json.log-expected.json index 3d4bd56ac4a..61e1be27bf6 100644 --- a/x-pack/filebeat/module/zeek/smtp/test/smtp-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/smtp/test/smtp-json.log-expected.json @@ -4,15 +4,27 @@ "destination.address": "192.168.1.9", "destination.ip": "192.168.1.9", "destination.port": 25, + "event.category": [ + "network" + ], "event.dataset": "zeek.smtp", "event.id": "CWWzPB3RjqhFf528c", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "protocol" + ], "fileset.name": "smtp", "input.type": "log", "log.offset": 0, "network.community_id": "1:38H0puTqOoHT/5r2bKFUVSXifQw=", "network.protocol": "smtp", "network.transport": "tcp", + "related.ip": [ + "192.168.1.10", + "192.168.1.9" + ], "service.type": "zeek", "source.address": "192.168.1.10", "source.ip": "192.168.1.10", diff --git a/x-pack/filebeat/module/zeek/snmp/config/snmp.yml b/x-pack/filebeat/module/zeek/snmp/config/snmp.yml index 76ff0c05f93..3431a990e0f 100644 --- a/x-pack/filebeat/module/zeek/snmp/config/snmp.yml +++ b/x-pack/filebeat/module/zeek/snmp/config/snmp.yml @@ -48,10 +48,22 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/snmp/ingest/pipeline.json b/x-pack/filebeat/module/zeek/snmp/ingest/pipeline.json deleted file mode 100644 index 646b7edf845..00000000000 --- a/x-pack/filebeat/module/zeek/snmp/ingest/pipeline.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek snmp.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.snmp.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.snmp.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - }, - { - "date": { - "field": "zeek.snmp.up_since", - "target_field": "zeek.snmp.up_since", - "formats": ["UNIX"], - "if": "ctx.zeek.snmp.up_since != null" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/snmp/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/snmp/ingest/pipeline.yml new file mode 100644 index 00000000000..f0070ef790d --- /dev/null +++ b/x-pack/filebeat/module/zeek/snmp/ingest/pipeline.yml @@ -0,0 +1,69 @@ +description: Pipeline for normalizing Zeek snmp.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.snmp.ts + formats: + - UNIX +- remove: + field: zeek.snmp.ts +- date: + field: zeek.snmp.up_since + target_field: zeek.snmp.up_since + formats: + - UNIX + if: ctx.zeek.snmp.up_since != null +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/snmp/manifest.yml b/x-pack/filebeat/module/zeek/snmp/manifest.yml index c11cd0b3491..b980b6fb82e 100644 --- a/x-pack/filebeat/module/zeek/snmp/manifest.yml +++ b/x-pack/filebeat/module/zeek/snmp/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/snmp.yml diff --git a/x-pack/filebeat/module/zeek/snmp/test/snmp-json.log-expected.json b/x-pack/filebeat/module/zeek/snmp/test/snmp-json.log-expected.json index 44cd6c16319..65345db7957 100644 --- a/x-pack/filebeat/module/zeek/snmp/test/snmp-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/snmp/test/snmp-json.log-expected.json @@ -4,15 +4,27 @@ "destination.address": "192.168.1.1", "destination.ip": "192.168.1.1", "destination.port": 161, + "event.category": [ + "network" + ], "event.dataset": "zeek.snmp", "event.id": "CnKW1B4w9fpRa6Nkf2", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection", + "protocol" + ], "fileset.name": "snmp", "input.type": "log", "log.offset": 0, "network.community_id": "1:X15ey/8/tEH+tlelK6P+GfgwBPc=", "network.protocol": "snmp", "network.transport": "udp", + "related.ip": [ + "192.168.1.2", + "192.168.1.1" + ], "service.type": "zeek", "source.address": "192.168.1.2", "source.ip": "192.168.1.2", diff --git a/x-pack/filebeat/module/zeek/socks/config/socks.yml b/x-pack/filebeat/module/zeek/socks/config/socks.yml index 5bf93e22f91..ddbcd51d0b0 100644 --- a/x-pack/filebeat/module/zeek/socks/config/socks.yml +++ b/x-pack/filebeat/module/zeek/socks/config/socks.yml @@ -45,10 +45,23 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.socks.user", to: "user.name"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/socks/ingest/pipeline.json b/x-pack/filebeat/module/zeek/socks/ingest/pipeline.json deleted file mode 100644 index eabb2837d82..00000000000 --- a/x-pack/filebeat/module/zeek/socks/ingest/pipeline.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek socks.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.socks.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.socks.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - }, - { - "dot_expander": { - "field": "bound.host", - "path": "zeek.socks" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/socks/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/socks/ingest/pipeline.yml new file mode 100644 index 00000000000..04a84b13177 --- /dev/null +++ b/x-pack/filebeat/module/zeek/socks/ingest/pipeline.yml @@ -0,0 +1,82 @@ +description: Pipeline for normalizing Zeek socks.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.socks.ts + formats: + - UNIX +- remove: + field: zeek.socks.ts +- dot_expander: + field: bound.host + path: zeek.socks +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- append: + field: related.user + value: "{{user.name}}" + if: "ctx?.user?.name != null" +- append: + field: event.type + value: error + if: "ctx?.zeek?.socks?.status != null && ctx.zeek.socks.status != 'succeeded'" +- append: + field: event.outcome + value: success + if: "ctx?.zeek?.socks?.status != null && ctx.zeek.socks.status == 'succeeded'" +- append: + field: event.outcome + value: failure + if: "ctx?.zeek?.socks?.status != null && ctx.zeek.socks.status != 'succeeded'" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/socks/manifest.yml b/x-pack/filebeat/module/zeek/socks/manifest.yml index c24b9aae6db..68fea837fde 100644 --- a/x-pack/filebeat/module/zeek/socks/manifest.yml +++ b/x-pack/filebeat/module/zeek/socks/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/socks.yml diff --git a/x-pack/filebeat/module/zeek/socks/test/socks-json.log-expected.json b/x-pack/filebeat/module/zeek/socks/test/socks-json.log-expected.json index cf2a629e475..c8172d23d1a 100644 --- a/x-pack/filebeat/module/zeek/socks/test/socks-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/socks/test/socks-json.log-expected.json @@ -4,15 +4,30 @@ "destination.address": "127.0.0.1", "destination.ip": "127.0.0.1", "destination.port": 8080, + "event.category": [ + "network" + ], "event.dataset": "zeek.socks", "event.id": "Cmz4Cb4qCw1hGqYw1c", + "event.kind": "event", "event.module": "zeek", + "event.outcome": [ + "success" + ], + "event.type": [ + "connection", + "protocol" + ], "fileset.name": "socks", "input.type": "log", "log.offset": 0, "network.community_id": "1:1Hp/o0hOC62lAwrV+a0ZKDE3rrs=", "network.protocol": "socks", "network.transport": "tcp", + "related.ip": [ + "127.0.0.1", + "127.0.0.1" + ], "service.type": "zeek", "source.address": "127.0.0.1", "source.ip": "127.0.0.1", diff --git a/x-pack/filebeat/module/zeek/ssh/config/ssh.yml b/x-pack/filebeat/module/zeek/ssh/config/ssh.yml index f463b62e895..e33f4e0e29e 100644 --- a/x-pack/filebeat/module/zeek/ssh/config/ssh.yml +++ b/x-pack/filebeat/module/zeek/ssh/config/ssh.yml @@ -57,10 +57,20 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection + - protocol {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/ssh/ingest/pipeline.json b/x-pack/filebeat/module/zeek/ssh/ingest/pipeline.json deleted file mode 100644 index 2eefd208860..00000000000 --- a/x-pack/filebeat/module/zeek/ssh/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek ssh.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.ssh.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.ssh.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/ssh/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/ssh/ingest/pipeline.yml new file mode 100644 index 00000000000..019a44b89e0 --- /dev/null +++ b/x-pack/filebeat/module/zeek/ssh/ingest/pipeline.yml @@ -0,0 +1,71 @@ +description: Pipeline for normalizing Zeek ssh.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.ssh.ts + formats: + - UNIX +- remove: + field: zeek.ssh.ts +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +- set: + field: event.outcome + value: failure + if: "ctx?.zeek?.ssh?.auth?.success != null && ctx.zeek.ssh.auth.success == false" +- set: + field: event.outcome + value: success + if: "ctx?.zeek?.ssh?.auth?.success != null && ctx.zeek.ssh.auth.success == true" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/ssh/manifest.yml b/x-pack/filebeat/module/zeek/ssh/manifest.yml index da635a43771..60249e25c21 100644 --- a/x-pack/filebeat/module/zeek/ssh/manifest.yml +++ b/x-pack/filebeat/module/zeek/ssh/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/ssh.yml diff --git a/x-pack/filebeat/module/zeek/ssh/test/ssh-json.log-expected.json b/x-pack/filebeat/module/zeek/ssh/test/ssh-json.log-expected.json index 8ab4788abc7..343aa7392e5 100644 --- a/x-pack/filebeat/module/zeek/ssh/test/ssh-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/ssh/test/ssh-json.log-expected.json @@ -4,15 +4,28 @@ "destination.address": "192.168.1.1", "destination.ip": "192.168.1.1", "destination.port": 22, + "event.category": [ + "network" + ], "event.dataset": "zeek.ssh", "event.id": "CajWfz1b3qnnWT0BU9", + "event.kind": "event", "event.module": "zeek", + "event.outcome": "failure", + "event.type": [ + "connection", + "protocol" + ], "fileset.name": "ssh", "input.type": "log", "log.offset": 0, "network.community_id": "1:42tg9bemt74qgrdvJOy2n5Veg4A=", "network.protocol": "ssh", "network.transport": "tcp", + "related.ip": [ + "192.168.1.2", + "192.168.1.1" + ], "service.type": "zeek", "source.address": "192.168.1.2", "source.ip": "192.168.1.2", diff --git a/x-pack/filebeat/module/zeek/ssl/config/ssl.yml b/x-pack/filebeat/module/zeek/ssl/config/ssl.yml index 878267f549a..88bfcc4b53e 100644 --- a/x-pack/filebeat/module/zeek/ssl/config/ssl.yml +++ b/x-pack/filebeat/module/zeek/ssl/config/ssl.yml @@ -56,10 +56,24 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "source.address", to: "client.address"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "destination.address", to: "server.address"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + kind: + - connection + - protocol {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.address {{ end }} diff --git a/x-pack/filebeat/module/zeek/ssl/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/ssl/ingest/pipeline.yml index 2a5ebf4ce7a..bbeaa24d1bd 100644 --- a/x-pack/filebeat/module/zeek/ssl/ingest/pipeline.yml +++ b/x-pack/filebeat/module/zeek/ssl/ingest/pipeline.yml @@ -10,22 +10,14 @@ processors: - UNIX - remove: field: zeek.ssl.ts -- set: - field: event.id - value: '{{zeek.session_id}}' - if: ctx.zeek.session_id != null -- set: - field: source.ip - value: '{{source.address}}' -- set: - field: destination.ip - value: '{{destination.address}}' - geoip: field: destination.ip target_field: destination.geo + ignore_missing: true - geoip: field: source.ip target_field: source.geo + ignore_missing: true - geoip: database_file: GeoLite2-ASN.mmdb field: source.ip @@ -248,7 +240,14 @@ processors: ctx.tls.version = parts[1].substring(0,1) + "." + parts[1].substring(1); } ctx.tls.version_protocol = parts[0].toLowerCase(); - +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" on_failure: - set: field: error.message diff --git a/x-pack/filebeat/module/zeek/ssl/test/ssl-json.log-expected.json b/x-pack/filebeat/module/zeek/ssl/test/ssl-json.log-expected.json index d7d7ac33ff9..526a43a350b 100644 --- a/x-pack/filebeat/module/zeek/ssl/test/ssl-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/ssl/test/ssl-json.log-expected.json @@ -1,6 +1,7 @@ [ { "@timestamp": "2019-01-17T01:32:16.805Z", + "client.address": "10.178.98.102", "destination.address": "35.199.178.4", "destination.as.number": 15169, "destination.as.organization.name": "Google LLC", @@ -13,14 +14,26 @@ "destination.geo.region_name": "California", "destination.ip": "35.199.178.4", "destination.port": 9243, + "event.category": [ + "network" + ], "event.dataset": "zeek.ssl", "event.id": "CAOvs1BMFCX2Eh0Y3", + "event.kind": [ + "connection", + "protocol" + ], "event.module": "zeek", "fileset.name": "ssl", "input.type": "log", "log.offset": 0, "network.community_id": "1:1PMhYqOKBIyRAQeMbg/pWiJ198g=", "network.transport": "tcp", + "related.ip": [ + "10.178.98.102", + "35.199.178.4" + ], + "server.address": "35.199.178.4", "service.type": "zeek", "source.address": "10.178.98.102", "source.ip": "10.178.98.102", @@ -59,6 +72,7 @@ }, { "@timestamp": "2019-01-17T01:32:16.805Z", + "client.address": "10.178.98.102", "destination.address": "35.199.178.4", "destination.as.number": 15169, "destination.as.organization.name": "Google LLC", @@ -71,14 +85,26 @@ "destination.geo.region_name": "California", "destination.ip": "35.199.178.4", "destination.port": 9243, + "event.category": [ + "network" + ], "event.dataset": "zeek.ssl", "event.id": "C3mki91FnnNtm0u1ok", + "event.kind": [ + "connection", + "protocol" + ], "event.module": "zeek", "fileset.name": "ssl", "input.type": "log", "log.offset": 635, "network.community_id": "1:zYbLmqRN6PLPB067HNAiAQISqvI=", "network.transport": "tcp", + "related.ip": [ + "10.178.98.102", + "35.199.178.4" + ], + "server.address": "35.199.178.4", "service.type": "zeek", "source.address": "10.178.98.102", "source.ip": "10.178.98.102", diff --git a/x-pack/filebeat/module/zeek/stats/ingest/pipeline.json b/x-pack/filebeat/module/zeek/stats/ingest/pipeline.json deleted file mode 100644 index 6115bc6c1d2..00000000000 --- a/x-pack/filebeat/module/zeek/stats/ingest/pipeline.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek stats.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.stats.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.stats.ts" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/stats/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/stats/ingest/pipeline.yml new file mode 100644 index 00000000000..c0347161190 --- /dev/null +++ b/x-pack/filebeat/module/zeek/stats/ingest/pipeline.yml @@ -0,0 +1,18 @@ +description: Pipeline for normalizing Zeek stats.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.stats.ts + formats: + - UNIX +- remove: + field: zeek.stats.ts +- set: + field: event.kind + value: metric +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/stats/manifest.yml b/x-pack/filebeat/module/zeek/stats/manifest.yml index c4b122a19bf..f63ad40bf33 100644 --- a/x-pack/filebeat/module/zeek/stats/manifest.yml +++ b/x-pack/filebeat/module/zeek/stats/manifest.yml @@ -11,5 +11,5 @@ var: - name: tags default: [zeek.stats] -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/stats.yml diff --git a/x-pack/filebeat/module/zeek/stats/test/stats-json.log-expected.json b/x-pack/filebeat/module/zeek/stats/test/stats-json.log-expected.json index a2d8e3ab311..bcb5f24f2a2 100644 --- a/x-pack/filebeat/module/zeek/stats/test/stats-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/stats/test/stats-json.log-expected.json @@ -2,6 +2,7 @@ { "@timestamp": "2016-10-16T08:17:58.714Z", "event.dataset": "zeek.stats", + "event.kind": "metric", "event.module": "zeek", "fileset.name": "stats", "input.type": "log", diff --git a/x-pack/filebeat/module/zeek/syslog/config/syslog.yml b/x-pack/filebeat/module/zeek/syslog/config/syslog.yml index b7accce096d..a8420237af0 100644 --- a/x-pack/filebeat/module/zeek/syslog/config/syslog.yml +++ b/x-pack/filebeat/module/zeek/syslog/config/syslog.yml @@ -41,10 +41,17 @@ processors: ignore_missing: true fail_on_error: false - + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.syslog.facility", to: "log.syslog.facility.name"} + - {from: "zeek.syslog.severity", to: "log.syslog.severity.name"} + - add_fields: + target: event + fields: + kind: event {{ if .community_id }} - community_id: - fields: - source_ip: source.address - destination_ip: destination.addresss {{ end }} diff --git a/x-pack/filebeat/module/zeek/syslog/ingest/pipeline.json b/x-pack/filebeat/module/zeek/syslog/ingest/pipeline.json deleted file mode 100644 index fcb98b1b91d..00000000000 --- a/x-pack/filebeat/module/zeek/syslog/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek syslog.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.syslog.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.syslog.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/syslog/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/syslog/ingest/pipeline.yml new file mode 100644 index 00000000000..7fd848682b1 --- /dev/null +++ b/x-pack/filebeat/module/zeek/syslog/ingest/pipeline.yml @@ -0,0 +1,63 @@ +description: Pipeline for normalizing Zeek syslog.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.syslog.ts + formats: + - UNIX +- remove: + field: zeek.syslog.ts +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/syslog/manifest.yml b/x-pack/filebeat/module/zeek/syslog/manifest.yml index 2d75d440d2f..8db76ab5b36 100644 --- a/x-pack/filebeat/module/zeek/syslog/manifest.yml +++ b/x-pack/filebeat/module/zeek/syslog/manifest.yml @@ -13,5 +13,5 @@ var: - name: community_id default: true -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/syslog.yml diff --git a/x-pack/filebeat/module/zeek/traceroute/config/traceroute.yml b/x-pack/filebeat/module/zeek/traceroute/config/traceroute.yml index 1cd1a7031fd..8b4b40e0234 100644 --- a/x-pack/filebeat/module/zeek/traceroute/config/traceroute.yml +++ b/x-pack/filebeat/module/zeek/traceroute/config/traceroute.yml @@ -27,3 +27,17 @@ processors: ignore_missing: true fail_on_error: false + - convert: + fields: + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - info diff --git a/x-pack/filebeat/module/zeek/traceroute/ingest/pipeline.json b/x-pack/filebeat/module/zeek/traceroute/ingest/pipeline.json deleted file mode 100644 index 9a755fa3913..00000000000 --- a/x-pack/filebeat/module/zeek/traceroute/ingest/pipeline.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek traceroute.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.traceroute.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.traceroute.ts" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/traceroute/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/traceroute/ingest/pipeline.yml new file mode 100644 index 00000000000..6fa5a0bc993 --- /dev/null +++ b/x-pack/filebeat/module/zeek/traceroute/ingest/pipeline.yml @@ -0,0 +1,63 @@ +description: Pipeline for normalizing Zeek traceroute.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.traceroute.ts + formats: + - UNIX +- remove: + field: zeek.traceroute.ts +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/traceroute/manifest.yml b/x-pack/filebeat/module/zeek/traceroute/manifest.yml index c0dd44654df..0761e9b3bf4 100644 --- a/x-pack/filebeat/module/zeek/traceroute/manifest.yml +++ b/x-pack/filebeat/module/zeek/traceroute/manifest.yml @@ -11,5 +11,5 @@ var: - name: tags default: [zeek.traceroute] -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/traceroute.yml diff --git a/x-pack/filebeat/module/zeek/traceroute/test/traceroute-json.log-expected.json b/x-pack/filebeat/module/zeek/traceroute/test/traceroute-json.log-expected.json index 90bd0dd4eec..8fdfd983c94 100644 --- a/x-pack/filebeat/module/zeek/traceroute/test/traceroute-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/traceroute/test/traceroute-json.log-expected.json @@ -2,13 +2,30 @@ { "@timestamp": "2013-02-26T22:02:38.650Z", "destination.address": "8.8.8.8", + "destination.as.number": 15169, + "destination.as.organization.name": "Google LLC", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "8.8.8.8", + "event.category": [ + "network" + ], "event.dataset": "zeek.traceroute", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "info" + ], "fileset.name": "traceroute", "input.type": "log", "log.offset": 0, "network.transport": "udp", + "related.ip": [ + "192.168.1.1", + "8.8.8.8" + ], "service.type": "zeek", "source.address": "192.168.1.1", "source.ip": "192.168.1.1", diff --git a/x-pack/filebeat/module/zeek/tunnel/config/tunnel.yml b/x-pack/filebeat/module/zeek/tunnel/config/tunnel.yml index 3fdd2c1faaa..ed9af2117ad 100644 --- a/x-pack/filebeat/module/zeek/tunnel/config/tunnel.yml +++ b/x-pack/filebeat/module/zeek/tunnel/config/tunnel.yml @@ -36,3 +36,19 @@ processors: ignore_missing: true fail_on_error: false + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.tunnel.action", to: "event.action"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: event + category: + - network + type: + - connection diff --git a/x-pack/filebeat/module/zeek/tunnel/ingest/pipeline.json b/x-pack/filebeat/module/zeek/tunnel/ingest/pipeline.json deleted file mode 100644 index bc9eacce8b0..00000000000 --- a/x-pack/filebeat/module/zeek/tunnel/ingest/pipeline.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek tunnel.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.tunnel.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.tunnel.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/tunnel/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/tunnel/ingest/pipeline.yml new file mode 100644 index 00000000000..402bce5fa5d --- /dev/null +++ b/x-pack/filebeat/module/zeek/tunnel/ingest/pipeline.yml @@ -0,0 +1,63 @@ +description: Pipeline for normalizing Zeek tunnel.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.tunnel.ts + formats: + - UNIX +- remove: + field: zeek.tunnel.ts +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/tunnel/manifest.yml b/x-pack/filebeat/module/zeek/tunnel/manifest.yml index ad3d712c33c..a0618a12b7e 100644 --- a/x-pack/filebeat/module/zeek/tunnel/manifest.yml +++ b/x-pack/filebeat/module/zeek/tunnel/manifest.yml @@ -11,5 +11,5 @@ var: - name: tags default: [zeek.tunnel] -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/tunnel.yml diff --git a/x-pack/filebeat/module/zeek/tunnel/test/tunnel-json.log-expected.json b/x-pack/filebeat/module/zeek/tunnel/test/tunnel-json.log-expected.json index 9504931de51..1e00e616e36 100644 --- a/x-pack/filebeat/module/zeek/tunnel/test/tunnel-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/tunnel/test/tunnel-json.log-expected.json @@ -2,15 +2,39 @@ { "@timestamp": "2018-12-10T01:34:26.743Z", "destination.address": "132.16.110.133", + "destination.as.number": 427, + "destination.as.organization.name": "Air Force Systems Networking", + "destination.geo.continent_name": "North America", + "destination.geo.country_iso_code": "US", + "destination.geo.location.lat": 37.751, + "destination.geo.location.lon": -97.822, "destination.ip": "132.16.110.133", "destination.port": 8080, + "event.action": "Tunnel::DISCOVER", + "event.category": [ + "network" + ], "event.dataset": "zeek.tunnel", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "connection" + ], "fileset.name": "tunnel", "input.type": "log", "log.offset": 0, + "related.ip": [ + "132.16.146.79", + "132.16.110.133" + ], "service.type": "zeek", "source.address": "132.16.146.79", + "source.as.number": 427, + "source.as.organization.name": "Air Force Systems Networking", + "source.geo.continent_name": "North America", + "source.geo.country_iso_code": "US", + "source.geo.location.lat": 37.751, + "source.geo.location.lon": -97.822, "source.ip": "132.16.146.79", "source.port": 0, "tags": [ diff --git a/x-pack/filebeat/module/zeek/weird/config/weird.yml b/x-pack/filebeat/module/zeek/weird/config/weird.yml index 6f67c90ae4f..1256f96902b 100644 --- a/x-pack/filebeat/module/zeek/weird/config/weird.yml +++ b/x-pack/filebeat/module/zeek/weird/config/weird.yml @@ -36,3 +36,19 @@ processors: ignore_missing: true fail_on_error: false + - convert: + fields: + - {from: "zeek.session_id", to: "event.id"} + - {from: "source.address", to: "source.ip", type: "ip"} + - {from: "destination.address", to: "destination.ip", type: "ip"} + - {from: "zeek.weird.name", to: "rule.name"} + ignore_missing: true + fail_on_error: false + - add_fields: + target: event + fields: + kind: alert + category: + - network + type: + - info diff --git a/x-pack/filebeat/module/zeek/weird/ingest/pipeline.json b/x-pack/filebeat/module/zeek/weird/ingest/pipeline.json deleted file mode 100644 index a97cdeb22bb..00000000000 --- a/x-pack/filebeat/module/zeek/weird/ingest/pipeline.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "description": "Pipeline for normalizing Zeek weird.log", - "processors": [ - { - "set": { - "field": "event.created", - "value": "{{_ingest.timestamp}}" - } - }, - { - "date": { - "field": "zeek.weird.ts", - "formats": ["UNIX"] - } - }, - { - "remove": { - "field": "zeek.weird.ts" - } - }, - { - "set": { - "field": "event.id", - "value": "{{zeek.session_id}}", - "if": "ctx.zeek.session_id != null" - } - }, - { - "set": { - "field": "source.ip", - "value": "{{source.address}}", - "if": "ctx?.source?.address != null" - } - }, - { - "set": { - "field": "destination.ip", - "value": "{{destination.address}}", - "if": "ctx?.destination?.address != null" - } - } - ], - "on_failure" : [{ - "set" : { - "field" : "error.message", - "value" : "{{ _ingest.on_failure_message }}" - } - }] -} diff --git a/x-pack/filebeat/module/zeek/weird/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/weird/ingest/pipeline.yml new file mode 100644 index 00000000000..e0325d9a1c5 --- /dev/null +++ b/x-pack/filebeat/module/zeek/weird/ingest/pipeline.yml @@ -0,0 +1,63 @@ +description: Pipeline for normalizing Zeek weird.log +processors: +- set: + field: event.created + value: '{{_ingest.timestamp}}' +- date: + field: zeek.weird.ts + formats: + - UNIX +- remove: + field: zeek.weird.ts +- geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true +- geoip: + field: source.ip + target_field: source.geo + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true +- geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true +- rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true +- rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true +- rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true +- rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true +- append: + field: related.ip + value: "{{source.ip}}" + if: "ctx?.source?.ip != null" +- append: + field: related.ip + value: "{{destination.ip}}" + if: "ctx?.destination?.ip != null" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/x-pack/filebeat/module/zeek/weird/manifest.yml b/x-pack/filebeat/module/zeek/weird/manifest.yml index 63d48d32ee3..3e91c91c64a 100644 --- a/x-pack/filebeat/module/zeek/weird/manifest.yml +++ b/x-pack/filebeat/module/zeek/weird/manifest.yml @@ -11,5 +11,5 @@ var: - name: tags default: [zeek.weird] -ingest_pipeline: ingest/pipeline.json +ingest_pipeline: ingest/pipeline.yml input: config/weird.yml diff --git a/x-pack/filebeat/module/zeek/weird/test/weird-json.log-expected.json b/x-pack/filebeat/module/zeek/weird/test/weird-json.log-expected.json index f1fdb20678f..cc9f7f49508 100644 --- a/x-pack/filebeat/module/zeek/weird/test/weird-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/weird/test/weird-json.log-expected.json @@ -4,12 +4,24 @@ "destination.address": "192.168.1.2", "destination.ip": "192.168.1.2", "destination.port": 53, + "event.category": [ + "network" + ], "event.dataset": "zeek.weird", "event.id": "C1ralPp062bkwWt4e", + "event.kind": "alert", "event.module": "zeek", + "event.type": [ + "info" + ], "fileset.name": "weird", "input.type": "log", "log.offset": 0, + "related.ip": [ + "192.168.1.1", + "192.168.1.2" + ], + "rule.name": "dns_unmatched_reply", "service.type": "zeek", "source.address": "192.168.1.1", "source.ip": "192.168.1.1", @@ -24,11 +36,19 @@ }, { "@timestamp": "2020-01-28T16:00:59.342Z", + "event.category": [ + "network" + ], "event.dataset": "zeek.weird", + "event.kind": "alert", "event.module": "zeek", + "event.type": [ + "info" + ], "fileset.name": "weird", "input.type": "log", "log.offset": 197, + "rule.name": "non_ip_packet_in_ethernet", "service.type": "zeek", "tags": [ "zeek.weird" diff --git a/x-pack/filebeat/module/zeek/x509/config/x509.yml b/x-pack/filebeat/module/zeek/x509/config/x509.yml index 3bebeab5697..49a670e46e5 100644 --- a/x-pack/filebeat/module/zeek/x509/config/x509.yml +++ b/x-pack/filebeat/module/zeek/x509/config/x509.yml @@ -57,3 +57,9 @@ processors: ignore_missing: true fail_on_error: false + - add_fields: + target: event + fields: + kind: event + type: + - info diff --git a/x-pack/filebeat/module/zeek/x509/test/x509-json.log-expected.json b/x-pack/filebeat/module/zeek/x509/test/x509-json.log-expected.json index 1cff57241ba..fff83c5969e 100644 --- a/x-pack/filebeat/module/zeek/x509/test/x509-json.log-expected.json +++ b/x-pack/filebeat/module/zeek/x509/test/x509-json.log-expected.json @@ -3,7 +3,11 @@ "@timestamp": "2018-12-03T20:00:00.143Z", "event.dataset": "zeek.x509", "event.id": "FxZ6gZ3YR6vFlIocq3", + "event.kind": "event", "event.module": "zeek", + "event.type": [ + "info" + ], "fileset.name": "x509", "input.type": "log", "log.offset": 0, diff --git a/x-pack/filebeat/modules.d/azure.yml.disabled b/x-pack/filebeat/modules.d/azure.yml.disabled index c8003fbcf96..0c7eb3d6e01 100644 --- a/x-pack/filebeat/modules.d/azure.yml.disabled +++ b/x-pack/filebeat/modules.d/azure.yml.disabled @@ -6,15 +6,15 @@ activitylogs: enabled: true var: - # Eventhub name containing the activity logs, overwrite he default value if the logs are exported in a different eventhub + # eventhub name containing the activity logs, overwrite he default value if the logs are exported in a different eventhub eventhub: "insights-operational-logs" - # Consumer group name that has access to the event hub, we advise creating a dedicated consumer group for the azure module + # consumer group name that has access to the event hub, we advise creating a dedicated consumer group for the azure module consumer_group: "$Default" # the connection string required to communicate with Event Hubs, steps to generate one here https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string connection_string: "" - # the name of the storage account the state/offsets will be stored and updated. + # the name of the storage account the state/offsets will be stored and updated storage_account: "" - #The storage account key, this key will be used to authorize access to data in your storage account. + # the storage account key, this key will be used to authorize access to data in your storage account storage_account_key: "" auditlogs: diff --git a/x-pack/functionbeat/Makefile b/x-pack/functionbeat/Makefile index fe15d9fe5c4..60d069da395 100644 --- a/x-pack/functionbeat/Makefile +++ b/x-pack/functionbeat/Makefile @@ -7,7 +7,7 @@ ES_BEATS?=../../ # # Includes # -include $(ES_BEATS)/dev-tools/make/xpack.mk +include $(ES_BEATS)/dev-tools/make/mage.mk .PHONY: test-gcp-functions test-gcp-functions: mage diff --git a/x-pack/functionbeat/functionbeat.reference.yml b/x-pack/functionbeat/functionbeat.reference.yml index 0e945a8dea2..9cf9d78c613 100644 --- a/x-pack/functionbeat/functionbeat.reference.yml +++ b/x-pack/functionbeat/functionbeat.reference.yml @@ -792,6 +792,27 @@ output.elasticsearch: # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. @@ -1305,6 +1326,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m diff --git a/x-pack/functionbeat/magefile.go b/x-pack/functionbeat/magefile.go index d4554e4da1a..a317bd0cb71 100644 --- a/x-pack/functionbeat/magefile.go +++ b/x-pack/functionbeat/magefile.go @@ -14,10 +14,11 @@ import ( "github.com/magefile/mage/mg" + devtools "github.com/elastic/beats/v7/dev-tools/mage" + functionbeat "github.com/elastic/beats/v7/x-pack/functionbeat/scripts/mage" + // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/common" - "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" - // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/pkg" // mage:import @@ -26,9 +27,6 @@ import ( _ "github.com/elastic/beats/v7/dev-tools/mage/target/integtest/notests" // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/test" - - devtools "github.com/elastic/beats/v7/dev-tools/mage" - functionbeat "github.com/elastic/beats/v7/x-pack/functionbeat/scripts/mage" ) func init() { @@ -153,11 +151,6 @@ func TestPackages() error { return devtools.TestPackages() } -// GoTestUnit is an alias for goUnitTest. -func GoTestUnit() { - mg.Deps(unittest.GoUnitTest) -} - // BuildPkgForFunctions creates a folder named pkg and adds functions to it. // This makes testing the manager more comfortable. func BuildPkgForFunctions() error { diff --git a/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go b/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go index 15d8015e52c..4d457c46a8c 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go +++ b/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go @@ -15,6 +15,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/logp" awsauto "github.com/elastic/beats/v7/x-pack/libbeat/autodiscover/providers/aws" awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" @@ -33,10 +34,11 @@ type Provider struct { stopListener bus.Listener watcher *watcher uuid uuid.UUID + keystore keystore.Keystore } // AutodiscoverBuilder is the main builder for this provider. -func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodiscover.Provider, error) { +func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config, keystore keystore.Keystore) (autodiscover.Provider, error) { cfgwarn.Experimental("aws_ec2 autodiscover is experimental") config := awsauto.DefaultConfig() @@ -78,12 +80,12 @@ func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodis config.AWSConfig.Endpoint, "ec2", region, awsCfg))) } - return internalBuilder(uuid, bus, config, newAPIFetcher(clients)) + return internalBuilder(uuid, bus, config, newAPIFetcher(clients), keystore) } // internalBuilder is mainly intended for testing via mocks and stubs. // it can be configured to use a fetcher that doesn't actually hit the AWS API. -func internalBuilder(uuid uuid.UUID, bus bus.Bus, config *awsauto.Config, fetcher fetcher) (*Provider, error) { +func internalBuilder(uuid uuid.UUID, bus bus.Bus, config *awsauto.Config, fetcher fetcher, keystore keystore.Keystore) (*Provider, error) { mapper, err := template.NewConfigMapper(config.Templates) if err != nil { return nil, err @@ -94,6 +96,7 @@ func internalBuilder(uuid uuid.UUID, bus bus.Bus, config *awsauto.Config, fetche bus: bus, templates: &mapper, uuid: uuid, + keystore: keystore, } p.watcher = newWatcher( diff --git a/x-pack/libbeat/autodiscover/providers/aws/ec2/provider_test.go b/x-pack/libbeat/autodiscover/providers/aws/ec2/provider_test.go index 255ae2e141b..b22321eeb23 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/ec2/provider_test.go +++ b/x-pack/libbeat/autodiscover/providers/aws/ec2/provider_test.go @@ -15,6 +15,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/logp" awsauto "github.com/elastic/beats/v7/x-pack/libbeat/autodiscover/providers/aws" "github.com/elastic/beats/v7/x-pack/libbeat/autodiscover/providers/aws/test" @@ -33,7 +34,8 @@ func Test_internalBuilder(t *testing.T) { } uuid, _ := uuid.NewV4() - provider, err := internalBuilder(uuid, pBus, cfg, fetcher) + k, _ := keystore.NewFileKeystore("test") + provider, err := internalBuilder(uuid, pBus, cfg, fetcher, k) require.NoError(t, err) startListener := pBus.Subscribe("start") diff --git a/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go b/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go index 8513e0f9d2a..522b5ba9a4f 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go +++ b/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go @@ -15,6 +15,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/logp" awsauto "github.com/elastic/beats/v7/x-pack/libbeat/autodiscover/providers/aws" awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" @@ -35,10 +36,11 @@ type Provider struct { stopListener bus.Listener watcher *watcher uuid uuid.UUID + keystore keystore.Keystore } // AutodiscoverBuilder is the main builder for this provider. -func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodiscover.Provider, error) { +func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config, keystore keystore.Keystore) (autodiscover.Provider, error) { cfgwarn.Experimental("aws_elb autodiscover is experimental") config := awsauto.DefaultConfig() @@ -85,12 +87,12 @@ func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodis config.AWSConfig.Endpoint, "elasticloadbalancing", region, awsCfg))) } - return internalBuilder(uuid, bus, config, newAPIFetcher(clients)) + return internalBuilder(uuid, bus, config, newAPIFetcher(clients), keystore) } // internalBuilder is mainly intended for testing via mocks and stubs. // it can be configured to use a fetcher that doesn't actually hit the AWS API. -func internalBuilder(uuid uuid.UUID, bus bus.Bus, config *awsauto.Config, fetcher fetcher) (*Provider, error) { +func internalBuilder(uuid uuid.UUID, bus bus.Bus, config *awsauto.Config, fetcher fetcher, keystore keystore.Keystore) (*Provider, error) { mapper, err := template.NewConfigMapper(config.Templates) if err != nil { return nil, err @@ -101,6 +103,7 @@ func internalBuilder(uuid uuid.UUID, bus bus.Bus, config *awsauto.Config, fetche bus: bus, templates: &mapper, uuid: uuid, + keystore: keystore, } p.watcher = newWatcher( diff --git a/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go b/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go index e6012c39d33..d6f9a918377 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go +++ b/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go @@ -16,6 +16,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/bus" + "github.com/elastic/beats/v7/libbeat/keystore" "github.com/elastic/beats/v7/libbeat/logp" awsauto "github.com/elastic/beats/v7/x-pack/libbeat/autodiscover/providers/aws" ) @@ -74,7 +75,8 @@ func Test_internalBuilder(t *testing.T) { } uuid, _ := uuid.NewV4() - provider, err := internalBuilder(uuid, pBus, cfg, fetcher) + k, _ := keystore.NewFileKeystore("test") + provider, err := internalBuilder(uuid, pBus, cfg, fetcher, k) require.NoError(t, err) startListener := pBus.Subscribe("start") diff --git a/x-pack/libbeat/common/cloudfoundry/events.go b/x-pack/libbeat/common/cloudfoundry/events.go index c588491f6f7..e946499c49e 100644 --- a/x-pack/libbeat/common/cloudfoundry/events.go +++ b/x-pack/libbeat/common/cloudfoundry/events.go @@ -5,6 +5,7 @@ package cloudfoundry import ( + "encoding/binary" "fmt" "net/url" "strings" @@ -377,18 +378,14 @@ func newEventBase(env *events.Envelope) eventBase { func newEventHttpAccess(env *events.Envelope) *EventHttpAccess { msg := env.GetHttpStartStop() - appID := "" - if msg.ApplicationId != nil { - appID = msg.ApplicationId.String() - } return &EventHttpAccess{ eventAppBase: eventAppBase{ eventBase: newEventBase(env), - appGuid: appID, + appGuid: formatUUID(msg.ApplicationId), }, startTimestamp: time.Unix(0, *msg.StartTimestamp), stopTimestamp: time.Unix(0, *msg.StopTimestamp), - requestID: msg.RequestId.String(), + requestID: formatUUID(msg.RequestId), peerType: strings.ToLower(msg.PeerType.String()), method: msg.Method.String(), uri: *msg.Uri, @@ -525,3 +522,13 @@ func urlMap(uri string) common.MapStr { "domain": u.Hostname(), } } + +func formatUUID(uuid *events.UUID) string { + if uuid == nil { + return "" + } + var uuidBytes [16]byte + binary.LittleEndian.PutUint64(uuidBytes[:8], uuid.GetLow()) + binary.LittleEndian.PutUint64(uuidBytes[8:], uuid.GetHigh()) + return fmt.Sprintf("%x-%x-%x-%x-%x", uuidBytes[0:4], uuidBytes[4:6], uuidBytes[6:8], uuidBytes[8:10], uuidBytes[10:]) +} diff --git a/x-pack/libbeat/common/cloudfoundry/events_test.go b/x-pack/libbeat/common/cloudfoundry/events_test.go new file mode 100644 index 00000000000..e4fe6f39caf --- /dev/null +++ b/x-pack/libbeat/common/cloudfoundry/events_test.go @@ -0,0 +1,402 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !integration + +package cloudfoundry + +import ( + "testing" + "time" + + "github.com/elastic/beats/v7/libbeat/common" + + "github.com/cloudfoundry/sonde-go/events" + "github.com/stretchr/testify/assert" +) + +func TestEventTypeHttpAccess(t *testing.T) { + eventType := events.Envelope_HttpStartStop + startTimestamp := int64(1587469726082) + stopTimestamp := int64(1587469875895) + peerType := events.PeerType_Client + method := events.Method_GET + uri := "https://uri.full-domain.com:8443/subpath" + remoteAddress := "remote_address" + userAgent := "user_agent" + statusCode := int32(200) + contentLength := int64(128) + appID := makeUUID() + instanceIdx := int32(1) + instanceID := "instance_id" + forwarded := []string{"forwarded"} + cfEvt := makeEnvelope(&eventType) + cfEvt.HttpStartStop = &events.HttpStartStop{ + StartTimestamp: &startTimestamp, + StopTimestamp: &stopTimestamp, + RequestId: makeUUID(), + PeerType: &peerType, + Method: &method, + Uri: &uri, + RemoteAddress: &remoteAddress, + UserAgent: &userAgent, + StatusCode: &statusCode, + ContentLength: &contentLength, + ApplicationId: appID, + InstanceIndex: &instanceIdx, + InstanceId: &instanceID, + Forwarded: forwarded, + } + evt := newEventHttpAccess(cfEvt) + + assert.Equal(t, EventTypeHttpAccess, evt.EventType()) + assert.Equal(t, "access", evt.String()) + assert.Equal(t, "origin", evt.Origin()) + assert.Equal(t, time.Unix(0, 1587469726082), evt.Timestamp()) + assert.Equal(t, "deployment", evt.Deployment()) + assert.Equal(t, "job", evt.Job()) + assert.Equal(t, "index", evt.Index()) + assert.Equal(t, "ip", evt.IP()) + assert.Equal(t, map[string]string{"tag": "value"}, evt.Tags()) + assert.Equal(t, "f47ac10b-58cc-4372-a567-0e02b2c3d479", evt.AppGuid()) + assert.Equal(t, time.Unix(0, 1587469726082), evt.StartTimestamp()) + assert.Equal(t, time.Unix(0, 1587469875895), evt.StopTimestamp()) + assert.Equal(t, "f47ac10b-58cc-4372-a567-0e02b2c3d479", evt.RequestID()) + assert.Equal(t, "client", evt.PeerType()) + assert.Equal(t, "GET", evt.Method()) + assert.Equal(t, "https://uri.full-domain.com:8443/subpath", evt.URI()) + assert.Equal(t, "remote_address", evt.RemoteAddress()) + assert.Equal(t, "user_agent", evt.UserAgent()) + assert.Equal(t, int32(200), evt.StatusCode()) + assert.Equal(t, int64(128), evt.ContentLength()) + assert.Equal(t, int32(1), evt.InstanceIndex()) + assert.Equal(t, []string{"forwarded"}, evt.Forwarded()) + + assert.Equal(t, common.MapStr{ + "cloudfoundry": common.MapStr{ + "type": "access", + "access": common.MapStr{ + "timestamp": time.Unix(0, 1587469726082), + }, + "envelope": common.MapStr{ + "origin": "origin", + "deployment": "deployment", + "ip": "ip", + "job": "job", + "index": "index", + }, + "app": common.MapStr{ + "id": "f47ac10b-58cc-4372-a567-0e02b2c3d479", + }, + }, + "http": common.MapStr{ + "response": common.MapStr{ + "status_code": int32(200), + "method": "GET", + "bytes": int64(128), + }, + }, + "user_agent": common.MapStr{ + "original": "user_agent", + }, + "url": common.MapStr{ + "original": "https://uri.full-domain.com:8443/subpath", + "scheme": "https", + "port": "8443", + "path": "/subpath", + "domain": "uri.full-domain.com", + }, + }, evt.ToFields()) +} + +func TestEventTypeLog(t *testing.T) { + eventType := events.Envelope_LogMessage + message := "log message" + messageType := events.LogMessage_OUT + timestamp := int64(1587469726082) + appID := "f47ac10b-58cc-4372-a567-0e02b2c3d479" + sourceType := "source_type" + sourceInstance := "source_instance" + cfEvt := makeEnvelope(&eventType) + cfEvt.LogMessage = &events.LogMessage{ + Message: []byte(message), + MessageType: &messageType, + Timestamp: ×tamp, + AppId: &appID, + SourceType: &sourceType, + SourceInstance: &sourceInstance, + } + evt := newEventLog(cfEvt) + + assert.Equal(t, EventTypeLog, evt.EventType()) + assert.Equal(t, "log", evt.String()) + assert.Equal(t, "origin", evt.Origin()) + assert.Equal(t, time.Unix(0, 1587469726082), evt.Timestamp()) + assert.Equal(t, "deployment", evt.Deployment()) + assert.Equal(t, "job", evt.Job()) + assert.Equal(t, "index", evt.Index()) + assert.Equal(t, "ip", evt.IP()) + assert.Equal(t, map[string]string{"tag": "value"}, evt.Tags()) + assert.Equal(t, "f47ac10b-58cc-4372-a567-0e02b2c3d479", evt.AppGuid()) + assert.Equal(t, "log message", evt.Message()) + assert.Equal(t, EventLogMessageTypeStdout, evt.MessageType()) + assert.Equal(t, "source_type", evt.SourceType()) + assert.Equal(t, "source_instance", evt.SourceID()) + + assert.Equal(t, common.MapStr{ + "cloudfoundry": common.MapStr{ + "type": "log", + "log": common.MapStr{ + "timestamp": time.Unix(0, 1587469726082), + "source": common.MapStr{ + "instance": evt.SourceID(), + "type": evt.SourceType(), + }, + }, + "envelope": common.MapStr{ + "origin": "origin", + "deployment": "deployment", + "ip": "ip", + "job": "job", + "index": "index", + }, + "app": common.MapStr{ + "id": "f47ac10b-58cc-4372-a567-0e02b2c3d479", + }, + }, + "message": "log message", + "stream": "stdout", + }, evt.ToFields()) +} + +func TestEventCounter(t *testing.T) { + eventType := events.Envelope_CounterEvent + name := "name" + delta := uint64(10) + total := uint64(999) + cfEvt := makeEnvelope(&eventType) + cfEvt.CounterEvent = &events.CounterEvent{ + Name: &name, + Delta: &delta, + Total: &total, + } + evt := newEventCounter(cfEvt) + + assert.Equal(t, EventTypeCounter, evt.EventType()) + assert.Equal(t, "counter", evt.String()) + assert.Equal(t, "origin", evt.Origin()) + assert.Equal(t, time.Unix(0, 1587469726082), evt.Timestamp()) + assert.Equal(t, "deployment", evt.Deployment()) + assert.Equal(t, "job", evt.Job()) + assert.Equal(t, "index", evt.Index()) + assert.Equal(t, "ip", evt.IP()) + assert.Equal(t, map[string]string{"tag": "value"}, evt.Tags()) + assert.Equal(t, "name", evt.Name()) + assert.Equal(t, uint64(10), evt.Delta()) + assert.Equal(t, uint64(999), evt.Total()) + + assert.Equal(t, common.MapStr{ + "cloudfoundry": common.MapStr{ + "type": "counter", + "counter": common.MapStr{ + "timestamp": time.Unix(0, 1587469726082), + "name": "name", + "delta": uint64(10), + "total": uint64(999), + }, + "envelope": common.MapStr{ + "origin": "origin", + "deployment": "deployment", + "ip": "ip", + "job": "job", + "index": "index", + }, + }, + }, evt.ToFields()) +} + +func TestEventValueMetric(t *testing.T) { + eventType := events.Envelope_ValueMetric + name := "name" + value := 10.1 + unit := "unit" + cfEvt := makeEnvelope(&eventType) + cfEvt.ValueMetric = &events.ValueMetric{ + Name: &name, + Value: &value, + Unit: &unit, + } + evt := newEventValueMetric(cfEvt) + + assert.Equal(t, EventTypeValueMetric, evt.EventType()) + assert.Equal(t, "value", evt.String()) + assert.Equal(t, "origin", evt.Origin()) + assert.Equal(t, time.Unix(0, 1587469726082), evt.Timestamp()) + assert.Equal(t, "deployment", evt.Deployment()) + assert.Equal(t, "job", evt.Job()) + assert.Equal(t, "index", evt.Index()) + assert.Equal(t, "ip", evt.IP()) + assert.Equal(t, map[string]string{"tag": "value"}, evt.Tags()) + assert.Equal(t, "name", evt.Name()) + assert.Equal(t, 10.1, evt.Value()) + assert.Equal(t, "unit", evt.Unit()) + + assert.Equal(t, common.MapStr{ + "cloudfoundry": common.MapStr{ + "type": "value", + "value": common.MapStr{ + "timestamp": time.Unix(0, 1587469726082), + "name": "name", + "value": 10.1, + "unit": "unit", + }, + "envelope": common.MapStr{ + "origin": "origin", + "deployment": "deployment", + "ip": "ip", + "job": "job", + "index": "index", + }, + }, + }, evt.ToFields()) +} + +func TestEventContainerMetric(t *testing.T) { + eventType := events.Envelope_ContainerMetric + appID := "f47ac10b-58cc-4372-a567-0e02b2c3d479" + instanceIdx := int32(1) + cpuPercentage := 0.2 + memoryBytes := uint64(1024) + diskBytes := uint64(2048) + memoryBytesQuota := uint64(2048) + diskBytesQuota := uint64(4096) + cfEvt := makeEnvelope(&eventType) + cfEvt.ContainerMetric = &events.ContainerMetric{ + ApplicationId: &appID, + InstanceIndex: &instanceIdx, + CpuPercentage: &cpuPercentage, + MemoryBytes: &memoryBytes, + DiskBytes: &diskBytes, + MemoryBytesQuota: &memoryBytesQuota, + DiskBytesQuota: &diskBytesQuota, + } + evt := newEventContainerMetric(cfEvt) + + assert.Equal(t, EventTypeContainerMetric, evt.EventType()) + assert.Equal(t, "container", evt.String()) + assert.Equal(t, "origin", evt.Origin()) + assert.Equal(t, time.Unix(0, 1587469726082), evt.Timestamp()) + assert.Equal(t, "deployment", evt.Deployment()) + assert.Equal(t, "job", evt.Job()) + assert.Equal(t, "index", evt.Index()) + assert.Equal(t, "ip", evt.IP()) + assert.Equal(t, map[string]string{"tag": "value"}, evt.Tags()) + assert.Equal(t, "f47ac10b-58cc-4372-a567-0e02b2c3d479", evt.AppGuid()) + assert.Equal(t, int32(1), evt.InstanceIndex()) + assert.Equal(t, 0.2, evt.CPUPercentage()) + assert.Equal(t, uint64(1024), evt.MemoryBytes()) + assert.Equal(t, uint64(2048), evt.DiskBytes()) + assert.Equal(t, uint64(2048), evt.MemoryBytesQuota()) + assert.Equal(t, uint64(4096), evt.DiskBytesQuota()) + + assert.Equal(t, common.MapStr{ + "cloudfoundry": common.MapStr{ + "type": "container", + "container": common.MapStr{ + "timestamp": time.Unix(0, 1587469726082), + "instance_index": int32(1), + "cpu.pct": 0.2, + "memory.bytes": uint64(1024), + "memory.quota.bytes": uint64(2048), + "disk.bytes": uint64(2048), + "disk.quota.bytes": uint64(4096), + }, + "envelope": common.MapStr{ + "origin": "origin", + "deployment": "deployment", + "ip": "ip", + "job": "job", + "index": "index", + }, + "app": common.MapStr{ + "id": "f47ac10b-58cc-4372-a567-0e02b2c3d479", + }, + }, + }, evt.ToFields()) +} + +func TestEventError(t *testing.T) { + eventType := events.Envelope_Error + source := "source" + code := int32(100) + message := "message" + cfEvt := makeEnvelope(&eventType) + cfEvt.Error = &events.Error{ + Source: &source, + Code: &code, + Message: &message, + } + evt := newEventError(cfEvt) + + assert.Equal(t, EventTypeError, evt.EventType()) + assert.Equal(t, "error", evt.String()) + assert.Equal(t, "origin", evt.Origin()) + assert.Equal(t, time.Unix(0, 1587469726082), evt.Timestamp()) + assert.Equal(t, "deployment", evt.Deployment()) + assert.Equal(t, "job", evt.Job()) + assert.Equal(t, "index", evt.Index()) + assert.Equal(t, "ip", evt.IP()) + assert.Equal(t, map[string]string{"tag": "value"}, evt.Tags()) + assert.Equal(t, "message", evt.Message()) + assert.Equal(t, int32(100), evt.Code()) + assert.Equal(t, "source", evt.Source()) + + assert.Equal(t, common.MapStr{ + "cloudfoundry": common.MapStr{ + "type": "error", + "error": common.MapStr{ + "timestamp": time.Unix(0, 1587469726082), + "source": "source", + }, + "envelope": common.MapStr{ + "origin": "origin", + "deployment": "deployment", + "ip": "ip", + "job": "job", + "index": "index", + }, + }, + "message": "message", + "code": int32(100), + }, evt.ToFields()) +} + +func makeEnvelope(eventType *events.Envelope_EventType) *events.Envelope { + timestamp := int64(1587469726082) + origin := "origin" + deployment := "deployment" + job := "job" + index := "index" + ip := "ip" + return &events.Envelope{ + Origin: &origin, + EventType: eventType, + Timestamp: ×tamp, + Deployment: &deployment, + Job: &job, + Index: &index, + Ip: &ip, + Tags: map[string]string{"tag": "value"}, + } +} + +func makeUUID() *events.UUID { + // UUID `f47ac10b-58cc-4372-a567-0e02b2c3d479` + low := uint64(0x7243cc580bc17af4) + high := uint64(0x79d4c3b2020e67a5) + return &events.UUID{ + Low: &low, + High: &high, + } +} diff --git a/x-pack/libbeat/licenser/elastic_fetcher.go b/x-pack/libbeat/licenser/elastic_fetcher.go index 6ffa5f6fa37..b2c855df6b7 100644 --- a/x-pack/libbeat/licenser/elastic_fetcher.go +++ b/x-pack/libbeat/licenser/elastic_fetcher.go @@ -18,9 +18,9 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" ) -const xPackURL = "/_license" +const licenseURL = "/_license" -// params defaults query parameters to send to the '_xpack' endpoint by default we only need +// params defaults query parameters to send to the '_license' endpoint by default we only need // machine parseable data. var params = map[string]string{ "human": "false", @@ -88,12 +88,12 @@ func NewElasticFetcher(client esclient) *ElasticFetcher { return &ElasticFetcher{client: client, log: logp.NewLogger("elasticfetcher")} } -// Fetch retrieves the license information from an Elasticsearch Client, it will call the `_xpack` -// end point and will return a parsed license. If the `_xpack` endpoint is unreacheable we will +// Fetch retrieves the license information from an Elasticsearch Client, it will call the `_license` +// endpoint and will return a parsed license. If the `_license` endpoint is unreacheable we will // return the OSS License otherwise we return an error. func (f *ElasticFetcher) Fetch() (*License, error) { - status, body, err := f.client.Request("GET", xPackURL, "", params, nil) - // When we are running an OSS release of elasticsearch the _xpack endpoint will return a 405, + status, body, err := f.client.Request("GET", licenseURL, "", params, nil) + // When we are running an OSS release of elasticsearch the _license endpoint will return a 405, // "Method Not Allowed", so we return the default OSS license. if status == http.StatusBadRequest { f.log.Debug("Received 'Bad request' (400) response from server, fallback to OSS license") diff --git a/x-pack/libbeat/licenser/es_callback.go b/x-pack/libbeat/licenser/es_callback.go index 0ca99db8f5f..06cc055083c 100644 --- a/x-pack/libbeat/licenser/es_callback.go +++ b/x-pack/libbeat/licenser/es_callback.go @@ -30,7 +30,7 @@ func Enforce(name string, checks ...CheckFunc) { license, err := fetcher.Fetch() if err != nil { - return errors.Wrapf(err, "cannot retrieve the elasticsearch license from the /_xpack endpoint, "+ + return errors.Wrapf(err, "cannot retrieve the elasticsearch license from the /_license endpoint, "+ "%s requires the default distribution of Elasticsearch. Please make the endpoint accessible "+ "to %s so it can verify the license.", name, name) } diff --git a/x-pack/libbeat/magefile.go b/x-pack/libbeat/magefile.go index 9e1f254af56..01e9b4d7f2d 100644 --- a/x-pack/libbeat/magefile.go +++ b/x-pack/libbeat/magefile.go @@ -14,7 +14,7 @@ import ( // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/integtest" // mage:import - "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" + _ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) @@ -32,9 +32,3 @@ func Build() error { func Fields() error { return devtools.GenerateFieldsYAML() } - -// Aliases provides compatibility with CI while we transition all Beats -// to having common testing targets. -var Aliases = map[string]interface{}{ - "goTestUnit": unittest.GoUnitTest, // dev-tools/jenkins_ci.ps1 uses this. -} diff --git a/x-pack/metricbeat/Makefile b/x-pack/metricbeat/Makefile index 56633e2b3e5..c77d4c68517 100644 --- a/x-pack/metricbeat/Makefile +++ b/x-pack/metricbeat/Makefile @@ -1,3 +1,8 @@ ES_BEATS ?= ../.. -include $(ES_BEATS)/dev-tools/make/xpack.mk +include $(ES_BEATS)/dev-tools/make/mage.mk + +# Creates a new metricset. Requires the params MODULE and METRICSET +.PHONY: create-metricset +create-metricset: + mage createMetricset diff --git a/x-pack/metricbeat/docker-compose.yml b/x-pack/metricbeat/docker-compose.yml index c89b558605d..83ac016ffd7 100644 --- a/x-pack/metricbeat/docker-compose.yml +++ b/x-pack/metricbeat/docker-compose.yml @@ -10,6 +10,7 @@ services: volumes: - ${PWD}/../..:/go/src/github.com/elastic/beats/ - /var/run/docker.sock:/var/run/docker.sock + - ${HOME}/.docker:/root/.docker:ro network_mode: host command: make diff --git a/x-pack/metricbeat/magefile.go b/x-pack/metricbeat/magefile.go index 030b2b1889e..ee5ecc9e0f0 100644 --- a/x-pack/metricbeat/magefile.go +++ b/x-pack/metricbeat/magefile.go @@ -25,22 +25,19 @@ import ( "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" // mage:import "github.com/elastic/beats/v7/dev-tools/mage/target/test" + // mage:import + _ "github.com/elastic/beats/v7/metricbeat/scripts/mage/target/metricset" ) func init() { common.RegisterCheckDeps(Update) + unittest.RegisterPythonTestDeps(Fields) test.RegisterDeps(IntegTest) devtools.BeatDescription = "Metricbeat is a lightweight shipper for metrics." devtools.BeatLicense = "Elastic License" } -// Aliases provides compatibility with CI while we transition all Beats -// to having common testing targets. -var Aliases = map[string]interface{}{ - "goTestUnit": unittest.GoUnitTest, // dev-tools/jenkins_ci.ps1 uses this. -} - // Build builds the Beat binary. func Build() error { return devtools.Build(devtools.DefaultBuildArgs()) @@ -109,7 +106,7 @@ func moduleFieldsGo() error { return devtools.GenerateModuleFieldsGo("module") } -// fieldsYML generates a fields.yml based on filebeat + x-pack/filebeat/modules. +// fieldsYML generates a fields.yml based on metricbeat + x-pack/metricbeat/modules. func fieldsYML() error { return devtools.GenerateFieldsYAML(devtools.OSSBeatDir("module"), "module") } @@ -137,8 +134,6 @@ func Update() { // IntegTest executes integration tests (it uses Docker to run the tests). func IntegTest() { - devtools.AddIntegTestUsage() - defer devtools.StopIntegTestEnv() mg.SerialDeps(GoIntegTest, PythonIntegTest) } @@ -148,15 +143,26 @@ func IntegTest() { // Use TEST_TAGS=tag1,tag2 to add additional build tags. // Use MODULE=module to run only tests for `module`. func GoIntegTest(ctx context.Context) error { + if !devtools.IsInIntegTestEnv() { + mg.SerialDeps(Fields, Dashboards) + } return devtools.GoTestIntegrationForModule(ctx) } -// PythonIntegTest executes the python system tests in the integration environment (Docker). +// PythonIntegTest executes the python system tests in the integration +// environment (Docker). +// Use NOSE_TESTMATCH=pattern to only run tests matching the specified pattern. +// Use any other NOSE_* environment variable to influence the behavior of +// nosetests. func PythonIntegTest(ctx context.Context) error { if !devtools.IsInIntegTestEnv() { mg.SerialDeps(Fields, Dashboards) } - return devtools.RunIntegTest("pythonIntegTest", func() error { + runner, err := devtools.NewDockerIntegrationRunner(devtools.ListMatchingEnvVars("NOSE_")...) + if err != nil { + return err + } + return runner.Test("pythonIntegTest", func() error { mg.Deps(devtools.BuildSystemTestBinary) return devtools.PythonNoseTest(devtools.DefaultPythonTestIntegrationArgs()) }) diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 704cc820a65..be35d457ced 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -498,13 +498,21 @@ metricbeat.modules: - module: googlecloud metricsets: - compute + region: "us-central1" + project_id: "your project id" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 300s + +- module: googlecloud + metricsets: - pubsub - loadbalancing zone: "us-central1-a" project_id: "your project id" credentials_file_path: "your JSON credentials file path" exclude_labels: false - period: 300s + period: 60s - module: googlecloud metricsets: @@ -515,6 +523,15 @@ metricbeat.modules: exclude_labels: false period: 300s +- module: googlecloud + metricsets: + - compute + region: "us-" + project_id: "your project id" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 60s + #------------------------------- Graphite Module ------------------------------- - module: graphite metricsets: ["server"] @@ -826,7 +843,7 @@ metricbeat.modules: #--------------------------------- Kvm Module --------------------------------- - module: kvm - metricsets: ["dommemstat"] + metricsets: ["dommemstat", "status"] enabled: true period: 10s hosts: ["unix:///var/run/libvirt/libvirt-sock"] @@ -1256,11 +1273,14 @@ metricbeat.modules: period: 10s perfmon.ignore_non_existent_counters: false perfmon.group_measurements_by_instance: false - perfmon.counters: - # - instance_label: processor.name - # instance_name: total - # measurement_label: processor.time.total.pct - # query: '\Processor Information(_Total)\% Processor Time' + perfmon.queries: +# - object: 'Process' +# instance: ["*"] +# counters: +# - name: 'Disk Writes/sec' +# field: physical_disk.write.per_sec +# format: "float" +# - name: "% Disk Write Time" - module: windows metricsets: ["service"] @@ -1684,6 +1704,27 @@ output.elasticsearch: # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. @@ -1952,6 +1993,9 @@ output.elasticsearch: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password @@ -2538,6 +2582,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m diff --git a/x-pack/metricbeat/module/aws/billing/_meta/fields.yml b/x-pack/metricbeat/module/aws/billing/_meta/fields.yml index 94b06a2f151..2b246415653 100644 --- a/x-pack/metricbeat/module/aws/billing/_meta/fields.yml +++ b/x-pack/metricbeat/module/aws/billing/_meta/fields.yml @@ -4,3 +4,9 @@ `billing` contains the estimated charges for your AWS account in Cloudwatch. release: beta fields: + - name: metrics + type: group + fields: + - name: EstimatedCharges.max + type: long + description: Maximum estimated charges for AWS acccount. diff --git a/x-pack/metricbeat/module/aws/ebs/_meta/fields.yml b/x-pack/metricbeat/module/aws/ebs/_meta/fields.yml index cd6f0ada0e3..eac4fd48d70 100644 --- a/x-pack/metricbeat/module/aws/ebs/_meta/fields.yml +++ b/x-pack/metricbeat/module/aws/ebs/_meta/fields.yml @@ -4,3 +4,39 @@ `ebs` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS EBS. release: ga fields: + - name: metrics + type: group + fields: + - name: VolumeReadBytes.avg + type: double + description: Average size of each read operation during the period, except on volumes attached to a Nitro-based instance, where the average represents the average over the specified period. + - name: VolumeWriteBytes.avg + type: double + description: Average size of each write operation during the period, except on volumes attached to a Nitro-based instance, where the average represents the average over the specified period. + - name: VolumeReadOps.avg + type: double + description: The total number of read operations in a specified period of time. + - name: VolumeWriteOps.avg + type: double + description: The total number of write operations in a specified period of time. + - name: VolumeQueueLength.avg + type: double + description: The number of read and write operation requests waiting to be completed in a specified period of time. + - name: VolumeThroughputPercentage.avg + type: double + description: The percentage of I/O operations per second (IOPS) delivered of the total IOPS provisioned for an Amazon EBS volume. Used with Provisioned IOPS SSD volumes only. + - name: VolumeConsumedReadWriteOps.avg + type: double + description: The total amount of read and write operations (normalized to 256K capacity units) consumed in a specified period of time. Used with Provisioned IOPS SSD volumes only. + - name: BurstBalance.avg + type: double + description: Used with General Purpose SSD (gp2), Throughput Optimized HDD (st1), and Cold HDD (sc1) volumes only. Provides information about the percentage of I/O credits (for gp2) or throughput credits (for st1 and sc1) remaining in the burst bucket. + - name: VolumeTotalReadTime.sum + type: double + description: The total number of seconds spent by all read operations that completed in a specified period of time. + - name: VolumeTotalWriteTime.sum + type: double + description: The total number of seconds spent by all write operations that completed in a specified period of time. + - name: VolumeIdleTime.sum + type: double + description: The total number of seconds in a specified period of time when no read or write operations were submitted. diff --git a/x-pack/metricbeat/module/aws/elb/_meta/docs.asciidoc b/x-pack/metricbeat/module/aws/elb/_meta/docs.asciidoc index 3975a252e88..8f242e8867a 100644 --- a/x-pack/metricbeat/module/aws/elb/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/aws/elb/_meta/docs.asciidoc @@ -98,17 +98,25 @@ https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-cl |Metric Name|Statistic Method |ActiveFlowCount | Average |ActiveFlowCount_TLS | Average -|ClientTLSNegotiationErrorCount | Sum +|ActiveFlowCount_TCP | Average +|ActiveFlowCount_UDP | Average |ConsumedLCUs | Average -|HealthyHostCount | Maximum +|ConsumedLCUs_TCP | Average +|ConsumedLCUs_TLS | Average +|ConsumedLCUs_UDP | Average +|ClientTLSNegotiationErrorCount | Sum |NewFlowCount | Sum |NewFlowCount_TLS | Sum +|NewFlowCount_TCP | Sum +|NewFlowCount_UDP | Sum |ProcessedBytes | Sum +|ProcessedBytes_TCP | Sum |ProcessedBytes_TLS | Sum +|ProcessedBytes_UDP| Sum |TargetTLSNegotiationErrorCount | Sum |TCP_Client_Reset_Count | Sum |TCP_ELB_Reset_Count | Sum |TCP_Target_Reset_Count | Sum -|UnHealthyHostCount | Average -|EstimatedALBConsumedLCUs | Maximum +|UnHealthyHostCount | Maximum +|HealthyHostCount | Maximum |=== diff --git a/x-pack/metricbeat/module/aws/elb/_meta/fields.yml b/x-pack/metricbeat/module/aws/elb/_meta/fields.yml index 634bc04774d..c49b5a7caeb 100644 --- a/x-pack/metricbeat/module/aws/elb/_meta/fields.yml +++ b/x-pack/metricbeat/module/aws/elb/_meta/fields.yml @@ -4,3 +4,183 @@ `elb` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS ELB. release: ga fields: + - name: metrics + type: group + fields: + - name: BackendConnectionErrors.sum + type: long + description: The number of connections that were not successfully established between the load balancer and the registered instances. + - name: HTTPCode_Backend_2XX.sum + type: long + description: The number of HTTP 2XX response code generated by registered instances. + - name: HTTPCode_Backend_3XX.sum + type: long + description: The number of HTTP 3XX response code generated by registered instances. + - name: HTTPCode_Backend_4XX.sum + type: long + description: The number of HTTP 4XX response code generated by registered instances. + - name: HTTPCode_Backend_5XX.sum + type: long + description: The number of HTTP 5XX response code generated by registered instances. + - name: HTTPCode_ELB_4XX.sum + type: long + description: The number of HTTP 4XX client error codes generated by the load balancer. + - name: HTTPCode_ELB_5XX.sum + type: long + description: The number of HTTP 5XX server error codes generated by the load balancer. + - name: RequestCount.sum + type: long + description: The number of requests completed or connections made during the specified interval. + - name: SpilloverCount.sum + type: long + description: The total number of requests that were rejected because the surge queue is full. + - name: HealthyHostCount.max + type: long + description: The number of healthy instances registered with your load balancer. + - name: SurgeQueueLength.max + type: long + description: The total number of requests (HTTP listener) or connections (TCP listener) that are pending routing to a healthy instance. + - name: UnHealthyHostCount.max + type: long + description: The number of unhealthy instances registered with your load balancer. + - name: Latency.avg + type: double + description: The total time elapsed, in seconds, from the time the load balancer sent the request to a registered instance until the instance started to send the response headers. + - name: EstimatedALBActiveConnectionCount.avg + type: double + description: The estimated number of concurrent TCP connections active from clients to the load balancer and from the load balancer to targets. + - name: EstimatedALBConsumedLCUs.avg + type: double + description: The estimated number of load balancer capacity units (LCU) used by an Application Load Balancer. + - name: EstimatedALBNewConnectionCount.avg + type: double + description: The estimated number of new TCP connections established from clients to the load balancer and from the load balancer to targets. + - name: EstimatedProcessedBytes.avg + type: double + description: The estimated number of bytes processed by an Application Load Balancer. +- name: applicationelb + type: group + description: > + `applicationelb` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS ApplicationELB. + release: ga + fields: + - name: metrics + type: group + fields: + - name: ActiveConnectionCount.sum + type: long + description: The total number of concurrent TCP connections active from clients to the load balancer and from the load balancer to targets. + - name: ClientTLSNegotiationErrorCount.sum + type: long + description: The number of TLS connections initiated by the client that did not establish a session with the load balancer due to a TLS error. + - name: HTTP_Fixed_Response_Count.sum + type: long + description: The number of fixed-response actions that were successful. + - name: HTTP_Redirect_Count.sum + type: long + description: The number of redirect actions that were successful. + - name: HTTP_Redirect_Url_Limit_Exceeded_Count.sum + type: long + description: The number of redirect actions that couldn't be completed because the URL in the response location header is larger than 8K. + - name: HTTPCode_ELB_3XX_Count.sum + type: long + description: The number of HTTP 3XX redirection codes that originate from the load balancer. + - name: HTTPCode_ELB_4XX_Count.sum + type: long + description: The number of HTTP 4XX client error codes that originate from the load balancer. + - name: HTTPCode_ELB_5XX_Count.sum + type: long + description: The number of HTTP 5XX server error codes that originate from the load balancer. + - name: HTTPCode_ELB_500_Count.sum + type: long + description: The number of HTTP 500 error codes that originate from the load balancer. + - name: HTTPCode_ELB_502_Count.sum + type: long + description: The number of HTTP 502 error codes that originate from the load balancer. + - name: HTTPCode_ELB_503_Count.sum + type: long + description: The number of HTTP 503 error codes that originate from the load balancer. + - name: HTTPCode_ELB_504_Count.sum + type: long + description: The number of HTTP 504 error codes that originate from the load balancer. + - name: IPv6ProcessedBytes.sum + type: long + description: The total number of bytes processed by the load balancer over IPv6. + - name: IPv6RequestCount.sum + type: long + description: The number of IPv6 requests received by the load balancer. + - name: NewConnectionCount.sum + type: long + description: The total number of new TCP connections established from clients to the load balancer and from the load balancer to targets. + - name: ProcessedBytes.sum + type: long + description: The total number of bytes processed by the load balancer over IPv4 and IPv6. + - name: RejectedConnectionCount.sum + type: long + description: The number of connections that were rejected because the load balancer had reached its maximum number of connections. + - name: RequestCount.sum + type: long + description: The number of requests processed over IPv4 and IPv6. + - name: RuleEvaluations.sum + type: long + description: The number of rules processed by the load balancer given a request rate averaged over an hour. + - name: ConsumedLCUs.avg + type: double + description: The number of load balancer capacity units (LCU) used by your load balancer. +- name: networkelb + type: group + description: > + `networkelb` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS NetworkELB. + release: ga + fields: + - name: metrics + type: group + fields: + - name: ActiveFlowCount.avg + type: double + description: The total number of concurrent flows (or connections) from clients to targets. + - name: ActiveFlowCount_TCP.avg + type: double + description: The total number of concurrent TCP flows (or connections) from clients to targets. + - name: ActiveFlowCount_TLS.avg + type: double + description: The total number of concurrent TLS flows (or connections) from clients to targets. + - name: ActiveFlowCount_UDP.avg + type: double + description: The total number of concurrent UDP flows (or connections) from clients to targets. + - name: ConsumedLCUs.avg + type: double + description: The number of load balancer capacity units (LCU) used by your load balancer. + - name: ClientTLSNegotiationErrorCount.sum + type: long + description: The total number of TLS handshakes that failed during negotiation between a client and a TLS listener. + - name: NewFlowCount.sum + type: long + description: The total number of new flows (or connections) established from clients to targets in the time period. + - name: NewFlowCount_TLS.sum + type: long + description: The total number of new TLS flows (or connections) established from clients to targets in the time period. + - name: ProcessedBytes.sum + type: long + description: The total number of bytes processed by the load balancer, including TCP/IP headers. + - name: ProcessedBytes_TLS.sum + type: long + description: The total number of bytes processed by TLS listeners. + - name: TargetTLSNegotiationErrorCount.sum + type: long + description: The total number of TLS handshakes that failed during negotiation between a TLS listener and a target. + - name: TCP_Client_Reset_Count.sum + type: long + description: The total number of reset (RST) packets sent from a client to a target. + - name: TCP_ELB_Reset_Count.sum + type: long + description: The total number of reset (RST) packets generated by the load balancer. + - name: TCP_Target_Reset_Count.sum + type: long + description: The total number of reset (RST) packets sent from a target to a client. + - name: HealthyHostCount.max + type: long + description: The number of targets that are considered healthy. + - name: UnHealthyHostCount.max + type: long + description: The number of targets that are considered unhealthy. diff --git a/x-pack/metricbeat/module/aws/elb/manifest.yml b/x-pack/metricbeat/module/aws/elb/manifest.yml index 211a42c63bd..daa6a34fffd 100644 --- a/x-pack/metricbeat/module/aws/elb/manifest.yml +++ b/x-pack/metricbeat/module/aws/elb/manifest.yml @@ -32,12 +32,12 @@ input: tags.resource_type_filter: elasticloadbalancing - namespace: AWS/NetworkELB statistic: ["Average"] - name: ["ActiveFlowCount", "ActiveFlowCount_TLS", "ConsumedLCUs"] + name: ["ActiveFlowCount", "ActiveFlowCount_TCP", "ActiveFlowCount_TLS", "ActiveFlowCount_UDP", "ConsumedLCUs", "ConsumedLCUs_TCP", "ConsumedLCUs_TLS", "ConsumedLCUs_UDP"] tags.resource_type_filter: elasticloadbalancing - namespace: AWS/NetworkELB statistic: ["Sum"] - name: ["ClientTLSNegotiationErrorCount", "NewFlowCount", "NewFlowCount_TLS", - "ProcessedBytes", "ProcessedBytes_TLS", "TargetTLSNegotiationErrorCount", + name: ["ClientTLSNegotiationErrorCount", "NewFlowCount", "NewFlowCount_TLS", "NewFlowCount_TCP", "NewFlowCount_UDP", + "ProcessedBytes", "ProcessedBytes_TCP", "ProcessedBytes_TLS", "ProcessedBytes_UDP", "TargetTLSNegotiationErrorCount", "TCP_Client_Reset_Count", "TCP_ELB_Reset_Count", "TCP_Target_Reset_Count"] tags.resource_type_filter: elasticloadbalancing - namespace: AWS/NetworkELB diff --git a/x-pack/metricbeat/module/aws/fields.go b/x-pack/metricbeat/module/aws/fields.go index d50507f86c0..f3bcf9a8eb6 100644 --- a/x-pack/metricbeat/module/aws/fields.go +++ b/x-pack/metricbeat/module/aws/fields.go @@ -19,5 +19,5 @@ func init() { // AssetAws returns asset data. // This is the base64 encoded gzipped contents of module/aws. func AssetAws() string { - return "eJzsXUtz47ay3s+vQGWTmZStk5PHXcziVnls5xxXeSaO5dxZMhDQonAMAhwAlKxUfvwtvPgS9aBEypOq40UyJUpAf92N7kZ3A7xEz7B+j/BKv0HIMMPhPfoGr/Q3bxCioIliuWFSvEf/+wYhhP7AK/0HyiQtOCAiOQdiNLr6PEWZFMxIxUSKMjCKEY3mSmbu2TWXBV1hQxaTNwgp4IA1vEcpfoPQnAGn+r0b/RIJnMF7ROz3J5gQWQgzsZ+5xwiZdQ7vLcUrqWj4rINK+/e0AD8OCuO4sZFUCHOGNSo0UGQkYhSEYfM1omw+BwXCIPuBYaAREwijrOCGXRoQ2D1aMiVFBsJMGiR7BlY0pkoW+S4K67jrAxmc6sl35cdxPDn7DxBT+9h/kHRxpPU4yXCeM5GG737z3Te1723hnuMgTu3AaIl5ASjHTAWR4pVGCrQsFAE92UCgf5zMCvIMDcltk94eGj45mc0RRtMfURh1Y0LKMhCaSfGVMO6j0/86WRskf/vdJKySyXeT777tSTWVxYzDGERrZBbYIAWmUAKol3e1fNHVwx36UoBab0KaMc6ZSDeg1FfCHhr+CGP8gYgUBjNhyQEE2rAMG6CILLBKQaO5VGgtC+WsS1zfTLQMTfwrDc4MDK59vm0JknKUk8BUw7TwZHVWr0AB0kThPLK7tJifHctXC0YW1QAddlZbozWrWzCLQ+e4sTzbhncbF+qcKMdpPN2+kvewBAW7XA6LdA6EzRlQtFqA8KpV4z/COetY72uBM0lnJ0knDnIm2dgf3rgpbz4co5t1/GHsTpm0WbBtrPp404IQ0Hpe8Ef4UoA299iAIOuNH+yaZA+/458VP/fDW7uuy6mR8nNr65Qjp6y7vsrwn1JUH02NApy10QcCCsd8K8hKswzLAOWgmKSTjl9t406dQ3iZdj6PDNkwyJtDZPjlmCHiz38VnAm4ExReHkAREAan8KBkqkDrSTd9e2g7UFp5OZ0VGJFZzsH+xq9YjASsUMrlDHOkgUhBsVojZglFTKMZWIFgSn20hZHBMw6bcog4H5RcMus0gX5WzMA1zjFhZv27YGZcnKLIZqAsxryiAa0sEYgEKlBhyXDuJyBxAeUW/AehfARMXxukAkwHx3gthS6yDoBjmZUKWxceEshBcglqu4W46BxeSxtuIIIFMgqTZ7SQK5QVZGFnc4FInZ1moWSRLvLC2BVgNxqvaHd0ke0cgkuxOUdbhptLcXwhdq68/0qxe4idUnyEnDOCLetH9uzAca6jKGZgVmBdhEBFTl38zgxkCOc5YOenmXAiLF27dq7dmp7OGaQApDwWb5guEBbUh2qbI2MhzQJU+YswWTBjW9zQ3yUceFJYaEysPK6lmHPW2Cw2hzpBpo9g95VBcJccllCL02gB1qWbihTM7QJ11OhS/EQKUii7LemcohxOevlou+O30+m/63K78vvRbT7eMM7+dItxVE+Pl6BszNYM3/Z5/cJRB9RuWaw0Yg6uB9imr/hq0Ha6k95wp2ttILtVSio96daSHRpyMJyeeyFvAlMQoLDp1mws0L+fnh7Qz99/j7TBprC+lMIRO6ZabECZX/XXCyDPv2DGrap7ykdkThUizN2UCBsDWe65lYOaS5VZqxOp86LvMCdlNA6CMpHW3OS104JzQHB+y7vHIEaswFFsQFhAm06vc9RZYfzPF3gJSEiD1mDQzBrg2mBH+cQqjMD0aaGkMRxulyBGE/Jjl/Y7cPBCwIVmsN2SdQ450J4mwh9bzXtzoBascpZ1x+c2gEK4zNyjt9qGvlg3WCI8C95t54Gz71+nHjRt/JiKENzeR/xiV4WedIdzQ5iKDL+wrMj27G4dV+zGZga+sjVbW1lu9Wd+dKa9tiAqQTujgfOcr73ZuaSQufDacklbNnUzaZdlrdj0ZEe5twHkV8ywSiM81O5dZCvJJed1TqNfpNpknqlYTXAeMs2ezi1BMaYxCAgEH6CuDk+hd5jwLnl89t7xnALpjMW+bol4kkcVyVctiNe3JR/xS22X4fR3275qFwuHTYf33U8tWLoA3b0T3hirpft79LwP47bu0V6Hc2017GZa/Sc71uiRXIvcglk9dupfV4SZPmNJ8fbDtH+Ft4RKfjgNKvnhnFCvfzitmE3yYmKkwXySb2TLPHhNMAeazLnE7S8cUNVu6jfmXBKX9Ly9/sGVzQoDdQtr93khMcatblpbG5NgTGiDBWm5jToQooAykxQap93F+Q5XcUhlvnQN1w+/Iz+JRjoPcqjT5oJo+62iZj/20TvD3P52FIoBu5aVOuGe0aKieWG3O4SoAijSzH7CDFphjTguBFkAtYZDG6xMO91fB6MLlfNCJ2cAFaZqInJ7fLe3LyVjwwzhHDDQCq4r0dqfXT/8fu1G+OBpDU1dTKM/QclDkerEN+C0ze9AUB2WTsB2rdiQIseMIipXwkLelLevB3izYhaFRkyQwm1qMS2zQR5CN2QBZiXV84SJSY7JM2zso4dBGsZGCgiwpVU6Ye1FnB4xYUDNMQHdXnSHkp3koBINZFTyc1DBu3pDLQszFBJZmLNIYES6/+4iYGIyWxs4mP9zqTJs3qOuH/XC5gYYY224gUcViye9JpShUVj9ek2pjLFeXkEsQ8GgTD8zObG7tbOJ5UNYHjgE1Zb+0uFrIxWgpeRFBhrhJWbcbYeMPAbNwEKpUV6TxTgg3F7wzDKpF1pGxDOKVCLtNcGMAcPplsyHcurXvhsxJkxkDsrtQ/TxWuUPfVS1UF8GtVbEsOwwcAMLaDvIIVbRCXi9To4hTZ/JqSE9UheHADeaNDdAnr72jsHruwEmZAHkOfEV9YGgPkIuldF2F+qqzg1K7U48x9rll6VZNB/GDgVLU+jxAqRd70XzWShgc6wNypgozOEgEz/embGOASTO8wpQuiV2KJjSZRCp7H+KjR4xj8GGZSm0mwL7p7OkCmeH9vur8inLcAoT1r0mjj4Bc3fjFqUlw45fnkb0aag+9FVZ04mVwYAnde4EZcT1pURNoGB8x00tVcs0AmFt0ZZ8WUlortgSG5hQoZPWwcABGBpGRzefpuGgp2fvRmR/IJWs3TEZNLH9cQ/S7h6WPyFMqQKtEdZaEubywysWzF9vWosZZ2QshrrBN/h5oFYG0gbkYmRcoOPWGhdG0N1D+eStZfA7NJOFd6DHsNQtoQmRtJubRxsiN26bhxe++eaf/3M5YwYVQrNUuOytm+QgSoeXeyel6G3ue+TQX0gVQvh/6UVhDBPppcvI/oUMqIwJp9N/2YjFnUON/wT6bg8is7DxrY+3rKkeyxWEeVy4Fd1CRx2Qn3a+EPg5jxbe3nefKjyoDshxNqP4JLR+iDMCvncTnnLKV2CTYgMrvD4JeTXMFvTtI9S+ZZ48I1e7swr56eoJhTEsCuy3ehvn6/tDVPS0Wrai56xlP94cUctGQxV4Y/YnVHGPcJiN8m/fwuh/C7nnLuRSbPAMa0iIFALc+ZVxak1xIlSbKNT7t1A2K7d0E6zEcP49HB94jB3An3AGb68eP71zKgCYLKzJ2E8U4Vh38+oosq7rFqYebMauCSwoyiCTal01CTka4hdvPuxLP9aoDze+sI0oYggI2IpVXeoizzkDWgm/mnXiu+WqD+weyqIoBPtSgLtzxZ9pi9+ww/aC6Hfjw8GbhoySpzN0rdRDRaZLpNuzasmXAgpIKORm0UnbkY031VKThbEccJHq3a8avbWR3j8avZv6HVphZtyhF2lNiN06WFSWwm7aY77oC080qCWoBKcgTPIfORvHYoRzLdPf7tHUTYiu7ITITlg/sbM3wTJXAHZvnvjVc9bSIc7crSxyXktXKiyozCLXA1FbKU+0kQqn5yvjbCM70IHcnSXd9Ibe2qTQQJPakciE0SF1JLbw1mZAdzexK0j7piB3VhldOQvkUucPUptUwfS3+27iJaegTaKqA1CJ5tIkHKeTbDYg+RynqVVezf4sjXw8ixSfuTBTanefj91ROiP/+ereGZiyoN4Ln7UCCZN7s/lH2p94+LDm8pl+9tWau3/82p3i30apY4bjfI+aQ9R5WviJTlF7d/gPu9Ne8eR43flYOVk9W7CYlvexRN0/1WXzcT397f4CfcSK4ZsPvkOrkldjmi2Rh17h3MfHr2QILAF+7fs8bWjSbCB2Lt3vaqw7d/3vpf2w0VVlzLtR1m0Gl6lOwnHOTWmesgCdYtag2K1AzZTYiXutLOdaz7+0vEfvuba+FKDY4epzFHVhDgQvQAoDdC9RFDDlkjyPS1Y5SyzNlGHpPvp8ydG5tddafcH5Rn31974VSqqGXbqw0Nxku4D4Gj3rUbMeroGKcR5r+C3N9cV7RHihDahA6oV1BtLuXxE26OdLH+f5TPYS890wffX6VXD6temWaQtmyMkPAdOFh1wSzF85SIza2TT2BrJcKqzW8cST9XrWuO7TUi5TJlxdt1Ajm6qwyXAzVkfo99mD6qDxhMgsY915tsGsvZ+jj5WvEUiBw5aC6HDuyM1R2v0+1NH2acaBSbu5ua8ukuhDWDYyYUxoUEZfxHsPfFDoOdmLUj/QOYg9RsCh9jgoeaXdiYXN2sF/10ZTNhx7n2JjdBvfZcwY32FAOANhtD9AQRaN1iFrnYNndWG79a/BWleG6wgWJIGqIVnBBJGZ3S++ffSDv6t4ovB8zkhHnG5REF64/JBjFym0kRmoKiCKP7asi/nSm2n5sYtCrImvFTOwa/Qu984HcyVKZki2yMKk0rHlKYz+9+GLDY3GWMytva3Bz6HRdTNI2UujBg5bikuDmRw/xzEmxxvUcanzcxxDnYsMxyVu1m7EdiLeR2O4kLZvRDNk1iXeiWuX0EbQ44xvxjhnHsW24DHA6BNZjIXBJesozJlgPrOARVpYWb29ubl/V8YlfZH1CE3GQrYzeumJp2cAMy6kuKR7YuhltQdAMJRRj/T3tOhjyaBp9HvKoKfdHwtD0zX0xNDPO3yFitRzuzma5W3sSA8UgivPhhw7cwnoV8qn1BLUkpAiZz7pN2MCq7VLocTwNcN2X7JZa4iXJ+0qKdTgtotewxa8OvLttQmRnRDNGYd+Wfca+e2ywejkn1QuqP1YT+z/t+wJh8pxxU6F+rwhN283KNJdThx2vFWnRtwR7w1t62hmXJLnQW9G2ITTgNHO5JcXJQRK9pceag0jdJaEjX4yRnvMkQ0vMVMcDjQRzLm3cWEDWlUBwjf3A1Vy4wqjE3DdfEB2QI04ewb0+fHu6fYRSYUeb69ubh8vhiQcRMoEDNzof4vJolHcVYUIvPfzXXhk7SJurYBrg18wpBsAdjiT4FKSWnV7yHXSLl2rqmodNSg2rVe8d6cuvMMgMsuxYTPGmVnvqG/vlFWA6i+dSuisdCxAk7JK2sun7j2eUzNe//J3Xd0EY3DhW+laNZlWOaYi0CcL/W2aLLOO1vXjQmqNfGfVJhw0LK/Wqr5/IHes2fIJsDmoM/OlUhgFVFov5rerkRxV54gPM1oMOQl6PeJwHTZDIf/FX+B6GHSOU389UEmOSMuLiHfow4EBZUAdBp+MiDM0j5yGr1FFPgZdkuGX4RDWW72akOKt+l3Ee1tsTfpmebx810Izo38cVCYGhsrE1wB1hsmzO5qRkAUWKSQKiFRUT4gCv1w7rlcf6IaSODXyU6MwNXJTx/edzNkSQr9n7R14+zzTVljuUPmgESsxBeaHwGo0cxwOYMUElauJn2fQfY5/cx6BhtYZrFIwNRR+/vJEesDbfn4oCr4t93eqNtk4KJyZ2EGmjcJ1hjl3Z7zxbsj+xuyULX1y5IBbBVxfRGgcwuS5yBMFxsb3UiTh3QJDun3LAXf4p2ZF/Lxlj0ZZwYynrXWR51J5JuWSCXPJxKULIhX4GxfmgE2hwEWLzQJppbTf6jhRCXCnIjRYowXO9UL2vNVpQF6Ub1Sard2VEwFepCvcld6xZXHN9oyCu1+wFwMIJgtIFswkLhSdzAq7+gbE3jyKVb1MI+6QXRd/eb2sn95TdRjBCnTBTaJhyOXbj+hHR4IGs4vusGcscrdOh71dq502jcamcULLtaOHvZdzwjv9r6I6MTIJEUfu95j6C0+O7IrumUJNawT2CB0fb6b1/XCJ30jkX7YhJIUyXbPX0RV57GhLfMdg4o80vpZ9sMvfn+Ncy8Lnl3wjY90jHJjPCJINPaUc5mYkcAoyzNyGv3aIw6Ux4+0f7SbEDLAulDt93+7PK+32jwnFjK+jfN60ae1ztLY9WOucrXtWCmPMU7fTH087dBteyq3Zn6/Vgun27qW++qDW5yfaLwyv0+2jpUTOE//y7OHXVu1Ymp+hgza/ijgvRe2ONW7RvuATTtW7MExN4+JrDb5mPYsO0Z/wHlFY7jVZpfvNMAX/5qSYuy1fRH+BFKRYUR6vJVnnW/xwSXs6aMTQovlft08tuq1yRd1jogvDHnrzYkR6H34fnN4dJdhBSL65vb99uh2a6sW2DopBaP737dXNQfq8TxfkxnsrhlSGX6dtbTiKyh3dHKfSWVEyvb2/vX5Cvzqhu7Pf1tANrBUeSaIJFuLMh2/a/XTRyQZafO3kYHacgl6BKdTXAj8Scw78nI252pq7SztXuG/Bke4Q746eqFwJLjF9Hcl4sVQ0uMV2mMteLWxY488d61wKV+8nvKCu5jyTdMt59CJ/bbiRgvB+Jh92uWqnD94s7Rf9LSf4d5n+9NJ+8c+A6vbTy0s4d+Cna7yB9BC5+RWHw1sr5BwBc1vr75FU6J87gf08JrCfX158XkadEVjsN5szpU1ilaNHNeb0rrMc1GXUOZf6KTMiJFxtW6kk2N1AeSTFv6ZkgwVG+mxLY1G6q3tcT9EMSsO7mx8ukI+7m7OyJL72fDtrnKzcQq7YEQrr7hIP98Rtl/at3XI/KE6720uLc97tNf3UfbfXgReZ6S8ngv1yVrC/nXiRWbiPIwOtcQoJTnslbwdoLM1zJV9Yhg2gkI+2PPNkIeHe5+fvsA4kxuymu+BnywUp/ptuj4bXw5Udm1Y5ztIgqMqhh7ldAW/zigYF2J2AYlkGlGEDfEswUGIR0iRLptnme/GGcTIlnBIBE2jOWbrY4s1Lys5CVZt9RjFYYl6ZvQP1warSuJRGfe1FWbTU45JW7ipma0Qw5+U5+XCu8WNYYr7vcw/JevPqwqFlTqn3XXgXDyHLzToe+xznkqwWe64e7iL77FqhzK9wz12EI4AtJVkQlbk9eyo73obUk8f+0bANodPfpsFmNsZtdDyzQe5NbQ519N2pYZhx7k9tX8rZH6Ub4WhwWmbgIopB0CxzcRKWZS6ORvJ/D59OE83/BwAA//8uhdEU" + return "eJzsfctyIzfS7t5PgZiNJYfEaXfbEye8OBGSqB7r/Gq1LKrH3tFgVZLECAVUAyhKdMzivMN5w/MkfyAB1I1VvFZR6oi/FxMekQS+vCAzASQyz8kTLH8h9Fl/R4hhhsMv5G/0Wf/tO0Ji0JFiqWFS/EL+93eEEPInfdZ/kkTGGQcSSc4hMppc/D4iiRTMSMXEjCRgFIs0mSqZ4GdXXGbxMzXRfPAdIQo4UA2/kBn9jpApAx7rX3D0cyJoAr+QyH5/QKNIZsIM7N/wY0LMMoVfLOJnqWL/twaU9t/jHNw4xI+DYxOpCOWMapJpiImRhMUgDJsuScymU1AgDLF/MAw0YYJQkmTcsHMDguJHC6akSECYQQWyY2CBcaZklq5DWKa7PJChMz34If9zGE9O/g2RKf3Z/WHcxJHax+OEpikTM//dv/3wt9L3WriHHKQzOzBZUJ4BSSlTXqT0WRMFWmYqAj1YoUB/GEyy6AkqkmuT3gYMdyizKaFk9IH4UVcmjFkCQjMp3gjjPqH+l2GtQP7+h4FfJYMfBj98vyPqWGYTDn2A1sTMqSEKTKYExE7exfIlF/c35GsGarlK0oRxzsRshZTyStiA4U8/xp8kksJQJiwcIKANS6iBmERzqmagyVQqspSZQusS1jcTNUMT/uUGZwKGlv5eX4Jlarx0Kp+1UdQ2Vnm860DDlSNhkNCXlS+HCbis8LGRc5/oC0uypIU5ni/OgK6IKsrZdJC0imFqAkvKuvQMCoiOFE2DPuUu4XfUqec5i+bFAA2ORFurPCmbaEuHTmnF/tQ9yzZizsdpFPSqddjAEuIdTz4s0SlEbMogJs9zEG7tlPhPaMoaDNpS0ETGk4OkEwY5kmzsD4c45fDyrS2+URZFoPU04w/wNQNtbqkBES1bF2DTJBv4Hf5Z8XM3vHVcOp+aKDe3tlFH4JSNRy4S+pcUxZ9GRgFN6tR7ABky3wqy0CzDEiApKCbjQcOv2rhT5hBdNBmcgiErHmd1iGaDtnGI8PPPgjMBNyKGl3tQEQhDZ3Cv5EyB1oNmfBuwbSmtNJ/OCiySScrB/satWEoEPJMZlxPKiYZIipiqJWEWKGGaTMAKhMaxCycpMXTCYVUOgc57JRfMRgUQ/66YgSua0oiZ5RfBTL90iiyZgLI0pgUG8mxBkMijIJmFgS7EU4IRcwv9W1H5ADR+bSIV0LhzGq+k0FnSQGBfZqWgrYmeyMMhcgGq3UKcNQ6vpY2nSEQFMYpGT2Qun0mSRXM7G0ZaZXaauZLZbJ5mxq4Au5N6Rbujs2TtEI3BVF2Gq0uxfyE2rrz/kWLzEGul+AApZxG1rO/ZswOnqQ6imIB5BusiBMnSGGNwZiAhNE2Bop9mAkWYu3aNrt2ansYZpACiHC3OMJ0RKmIXqq2OTIU0c1D5L/xk3oy1uKFvJRx4VFRoGll5XEkx5ayyG64OdYBMH8BunL3gzjksoBSnxRlYl24KKJTbBYpodC7+SIooU3Zb0jhFPpx08tE0AZxOf6vL7cLtK9t8vGGc/YWLsVdPTxegbMxWDd82ef0M0UFstyxWGrS+R95MbNVXvBlqG93JzuSOltpAcq2UVHrQrCXbn1G0k7PjXsiZwBkIUNQ0azYV5NfHx3vy87t3RBtqMutLY9hjx1SKDWLmVv3VHKKnj5Rxq+oOeY/MKUKEKU5JqDGQpI5bKaipVIm1OgGdE32DOcmjcRAxE7OSm7xCLTgGCei3nHv0YqQKELEBYQladXqNo04y434+pwsgQhqyBEMm1gCXBtvLJxZhBI0f50oaw+F6AaI3IT80aT8SBy8RYGgG7ZascciO9jSB/L7VfGcOlIJVzpLm+NwGUITmVxPkRNvQl+oKS4RjwWk7D9C+v009qNr4PhXBu71P9MWuisOPq9tNReIPstfvbpErdmMzAXd1N1laWbb6Mzc6005bSCxBo9GgacqXzuycx5BgeG25pC2bmpm0zrIWbHq0o9zaAPINM6zQCEdq8y6ydsglp2VOk49SrTLPFKyOaOpPmh3OlqCYxiEI8IC3UFekJ9NrTHiTPH533vGYAmmMxd62RBzkXkXypgXx+rbkE30p7TJQf9v2VetY2O1x+K77qTmbzUE374RXxqrp/gY934VxrXu01+FcXQ2bmVb+yZo1uifXArdgUo6ddr9XhIk+4pXi9eXosBveru8S/yV5luDCvFxaa3b4pv/Cb+01+wsVB2g0d+tDpna/y6Qo72L9ATCGiKmxIe8CIWm7TaTRPNxD3TGj5PmEWgPHhDZURHBGnudWPqZ0oqAgVWBZrit/bjh/3rRhdqzBpdcrb9wy+CaZY/Xmc9oFZ6zBMdJQXo8Dc774/LU6RPtFw5I1Hrskx/6w1oR4INjfMsjgFsTMzDvCW+Oqde51vcsPsZ4pM6iB0oYU/gYZNesAkh7zHW9xH94RbVVHdfP3z2U5pKC8SyEnN5/vR6ckBs4WoMBBz2VpP6x4uanbX/szvOvLkV98A/LFrrNnZubli2E3wGg0zNeoFHy5iS3lW9deVJQmmE22RvCanAipEup8uJHk/c//+K9aYHRa3OSt14JueHOZKW0uKbd2rANuFJj+iWeunNxnKpUaENLJLH1/ekYKBSWfU8MS5MavwyE50ebHU3d1dSV5+Fv042mVGEdvjHm2U8tPXFR0IvGkr0lLIwWxDTpPrKZZEDYKKp0MVT7X5keEgBMrSCgTpSu5iWXYSkZps8o9Wr3Aw0ErsHVHQfubQ7fitNUTF/xQzlfsudu4dGReLAB31HVkqlZWU5dk3cT8GAStxegSh4T08lOrFLsgOZskzJjytXseo0fvD4vRo/fHjNGv3h8Wo0dpNkBOD9KVa15HvI4oh3g85ZLWv7BFOmbVklDOZYS39ddX71HvMgPlowGqgPgbXW43VSTTEG5vQ7A4aCXEGaFxpumsOau04Yxjm5TSXAev7r/kli5fWGVs6Ijtt7LSxncT3olzHr0gBorJ5GXgjtGiwDyn2u5ZVQYx0cz+hRnyTDXhNBMYuKNNp8rU81TKxOhMpTzT4yMQ5aeqUoSXU3gpVZg8QTKBJ0elvYYzEfZnV/dfrnAE7739cwumyV+g5LaU6rHL/q6fG3REKtLSSLBdK0IaklIWk1g+C0vyqrxdNODMipln1oBGGUaLNM6vMR0JzSQLMM9SPQ2YGKTUOu3mHf2hlPqxiYII2MIqnUCf5acnTBhQUxqBri+6bWGPU1BjDVGv8EsxPBpqG1J1RInMzFEk0CPub10ETAwmSwNb898F17+Qph/tRBsO0MfawIF7FYuDXhJK11RY/XpNqfSxXl5BLF2RETP9xOTARuBHE8ulXx7UB9UWf+7wtZEKitPIBWUcz/GN3IeajoVSQl6SRT9E4GboyDIpZwj1SE8vUgnYS4LpgwzULZl25dSv8u18/RBjb63a5WCghbiOBdROZBer6AB6nU72Ic3Vw5v9dLEL4nqT5gqRh6+9feh1aayDaA7R09ilgnZE6gOkUhltd6GYLllBanfiKdWYGCHNvPphSK21mPzjBCAak4arn/lzVk61IQkTmdmeyLEb78i09kFImOcVSGmW2LbE5C4jksr+T7byuMHRYMOyGdRfs+x+nCWVf7i+2V/ln7KEzmDAmtfE3k+3b4bhlgvHz+uEuGOoXfAVp6YDK4MOn5jfiJhFmFAdNCEG41LFS0e1TBMQ1ha1nJflQFPFFtTAIBZ6XCvZ0QFD/ehkeDfyJVgce1ci+y1RsnrGhtfE+p93gHZzv/iJ0DhWoDWhWsuI4fkw3oDthTWbcBb1xVAcfIWfW2qlh9YhFwPjPI5ra1xYRG7u809OLINPyURmzoHuw1JcQoNIxs3c3NsQ4bh1Hp65rPEf/3E+YYZkQrOZwNNbnGQrpN3LvREpOUnd4w7yH6IyIdx/6XlmDBOzczyR/Q8xoBImUKf/YyMWrBAT/hPi0w0UmbmNb128ZU11X67Az4PhVnALDZdj/LDCGMCPWRPj+ra5HMarJbBd0ugJRHwlhQB81NjRY6+qKKN8+DJbhTSlihh8SUAbOuFMz22w6d9TYoAiaUz87Y3K40wFM6YNZqIE3VyTT/vr4+P9lYxh7Ckev//jj46pxBdn7//4gyjQqRQa3Juz8FANEzwPBP2hH9AfegX9Uz+gf+oV9M/9gP65F9DXt5d9cjniDCvhWdOAoHUV9coa3RJyjzzWoBagOoHs32V180iynkzocwaLfBeEW1jLhLa9WsVQaUH5mte7KeNcLkB1B301xzS8WcutugqP6CcQ0Uy7DFqdqRmQrxm4y2xr7tfoCFBu5stfZWD6oW9Eqkyfu+GLBVZedRjkY3GMLbVjZCkrJ5x2AbaVzSeo4NyiFaBO69py8nhV/jS/kw9RoZJZSE2lK3xop/GL6FkkmehWKL7uR6eZoJjL5St+nBEmQvbXmQsLMRPWfmU1YMEA0BTv3B37G0w9yYRhfOXARhl36KAhj3y8A5kDjUGt8RB5ocGL28uLyLAFFJGeE2Q3LCrqDlaCPp83RaxalvWUIhTHOOdcdNgJrsZ6OXurH9nvUzUDsyX5IVX49upLVynCTVRXQdbeR53cXn05Lb8yu0jzR/jk1v7ycqNul2m6g+fjyVPA84ogyxH78aR5r6TdNEBnj27aSPYX0mG67YWWF2cpvnroRrU61BH3rCVy39z2tdmm9RHpvAFrdoVjP96O7mAmDaP5dr2P0PTxdlQhkglmWDl69psC1LiYxbibz80BoUSD1lg3MRybVgn25ZQoToRh+vpNw/gje4F4/OBd37gPmqd2ivPcu9KVE4vitGID2AeImYLI9AJT+cE7AfhF8fEtS5gZX2OVCYiPiDmSGY/F96b6UKq8cfjycBuuqXK5YMK2VS0X/tgNBbdrR9lBBflf/7Xl9vPDH3/0QmvpSMURbbG6PShSLRWb4flrizHYfsPfH/yWbX+X+H/uE3/LGUCn+N+96xH/u3c9An/fJ/D3PQL/0CfwDz0C/6lP4D91CfzmfvGPWoDdRzzVEFqvBgn4stoCWg+3xxM6O3xx/JKnCe92gtiwTeuDpa++QXtravMTErRef0LNzz4EtOkCrPGotErKHCsjuVoFzOiGojaloV/3DLsQyk78zzhcLyjPXHJd1+AyvlldZmwBrlScO55T1mz64g6eGCrIXGZrlngPp0t7nSmtOyWtJfUfeiBRDHPEw4g7N+kbPYj4yOVzl8dwaw4hplw+a3JSvQA4XbXxm2x2Dfj48eq+f/DWS/VGwO3oCATcjnoj4MvwCBL4MuxOAt+C7VvB3P9ZWp37VmfmVMR6Tp9CmO5L+voLXlFgKcrJh224daXutCxc8K0NOAtT1Feo2aI+ayNOp0rhRGerwstlWnBx9xY6t6/prml6I4HyGWEi4hleDT9e3f/95n7zjWIVem8CaYBfVv01AB9RHt/Eyi5T5Ne306Y11F3dj53tGj+Ahi4PmFeTDjQYcvIwejytvsN2b5jyCwC5Jezr28tXwbxv3o/F7JTp1Vnt2OtY7dj+atkzwdwVtVGk0CzGPAafxPGKiSTr0OVJJqs7Ik6TSUwP2g25IY64E7rFCd9ac70bsfC3M92Hgta1ahflTTNRXKvg25YXiDLjMnOCSyt123Mfu9taEZf/r+9wqjNu3Ku8fOgNl5I+UbprIlnBwP2xDYHGt2AMqM5QfpSKUL0U0VxJIbFmSwB65p5w1OTk9LPSrQITmKggsMgdRww0PucI1acHTjLnPNe4+CFowwTOPXTVCJcfKeOZ6iQZpDdKc9Bb0ZiprvrI4LOcvI6hT1Kjpmkl6RREnMdd2MvQE7G5S0Sfa6GWaEqxMKtvTrHmbsBYxy/VRSe1Mq1eOHn6jhK+t3K+TkOlB9fXSztl8WXG8neWCiKp4rBb2MDaq3zDfp1brM65nGtAkXtZJI4WigCu98Ua326jGLXAo/Q+UH/0BUV/H5EHmDWsRoewAO+acVb2EIFW/61Yiu99ma4AvjgkidYEMqXinI3UdhvV7CQhIkWlDuve9ERbl0rfhiKUHlmAwqSg0HjfidAVOZPTTWwlMVuwuAjk6yVaW8guavztyoByNNPtxcQ2sUyHksyfArw+RVaDY6qqEnLNdTlvFSHTvvRiw3UGNTNq4JkuD7vOyIdpCeLr3e9dM8DoiWBxR8uCu4tH4sewwTh1tUCct2huTf6KkTqe39yIj0ompXiqY6Wo1Qjz67bMp/y6uRQfralsXIAeIVtfB284/mPCKfy/7q82YP6cmUfZN5/zclm+CvIKeH/pvz2rEXaPnPZPI9ai3YnZxTX+hQvH+73NL4J+fKjVQsk2cK+LI+a+ExDKp9k7I8YN5b1U5oKHPMxeHEldGTBV1DXa8d6c0BCIp1KtCaJDEWiZ9awMPirDDrFYxRRK2TQ+Sdw92w/lgGLu/7LGn7vjwKGSaR/ow2ljrPDtf4PF2witbx+yUoX1YC9SAd6Lddsa807GzePu2Zes1FztwpuUoffK8c49SvMTkz5eeZau5721qGfzbbTWAbSKD2vrpOJjtnV6GB7Y1umgkvGhnqSvC//dGqltU1B+11Lr/1Ma/til4WNq6IRqGJeWVi/khIlqD6lWOyTmyCZ5kbgBVaIR1F4Vg3wXnofQDPeOJnBy8XB3iirgeozFejOoiFPdzKu9YF2VLUy5fFXow0BFTBJIpFoWqT+IIXxxeLmpoGkJPYtBGDZlK3WJuiCBWrGqc52lKWcQF8IvZh24xpHFHwhzpGeCfc3AAnD6nn/DDrsTia6+X3fkjXy9CYczuKdS8Smmc0rb63SO8WpnHENq5o3Y9mzlUSw1mRk8WLIu5uazJicKaPz3ShtTfVpuzUXxbtAFMEw/NWMPFSi/8rF7TDSmMxBm/G856cdi+KyR0W+3ZOReL13YCYmdsFwGZGPJxqkCoBMOY7d6jlqMvDiQLQqgKipimQSue1CtyMfaSEVnxysM3Qbb4yA6ba1J5zPyx5mGeIx7P/fEccziLnUkJP6XZiA3w9BnRLs2IxbDwD3YBryHvJfazBSMfrttBi+5jd7HvkE+wtZcmjGns0Ey6RA+p7MZ3sn71o3uQadryx8+wzBTarzrNqASNPK/X9yigcm3UjvRZ63AmMmN9YH3tD+hA2TJ5TP95K4CWxvptSFFZiDnd6hiHHQ+9tfFh6g93gxT8mDRP3jZlJyPlZPVszkLhX5dLFH2T2XZfFqOfrs9I5+oYnR46Xq+FPKqTNMSeehnmrr4+JUMgQXg1r5LMvZtnyoUo0t3uxrrzvGEKrcfNroqjHkzlWWbweVMj33C2qo0D1mAqJglUuxWoGRK7MQ7rSx0rcdfWs6j77i2vmag2Pbqsxc6P0dx1bUJVAw05jJ66hdWPkvIOMjD0k34XBFzdGuvtfq8863k919kSqqKXcJiTDjZOkJc1X+2QxX87lqyMM5DV4Ca5uaptJk2oDzUM+sMJFZ9oob8fO7ivLzg23oyXT38V6HTrU1cpjUy83O3w8nE8JDLiPJXDhKDdlaNvYEklYqqZWj+b72eNa6btJTLGRNYKT5TPZsqv8nAGYsLrE32oOisOohkkrDmc7bOrL2bYxcrXwIYA4eWEuvduSOcI7f7u6CLeb/QhsPb0rPcHYAlPQNjQoMy+oxkaUwN+EaAjpM7IXUDHQPsPgL2L2M7hZfbnVAqvdTpGBtz5FdNzqfYGN3Gd66prLXA4dYDWzJG80ozEmudvWfFsN36V2+tC8O1BwvGHlWXrGAikondL548uMFPC54oOp2yqCFOL+eFI7uiTBuZgCoCovBjy7pwXjoc5X/GKMSa+NJlBsXWcfneeWuuBMl0yRaZmZlEtjz60b8dvtjQqI/FXM96pk++ddZqkLIRowYOLZdLnZkcN8c+JscZ1H7RuTn2QYeRYb/gJvXWbijiTRi5LxS7Y0TT5amLh4BLaCXoQeObMM6Zrza7noxdIou+aMDDuhimWDBQCsKpmGVWVifD4e1pHpfsStkOoUlflK2NXnakZ8cApl+SwpLekYadrHYHFHRl1AP+HS16XzKoGv0dZbCj3e+Lhqpr2JGG3bzDG1SkHbebvVneyo50SyHg9aw/Y2d4AP1K5ymlA2oZRVnK3KHfhAmqlniEEsLXhNp9yepdgzthU2uvFErk1i+9ur3wajhvL01I7IRkyjjsdupegl+/Nugd/kHXBaUf64HLbuv1jCtkKpTnDc9+xQz7SYuw4y0yNcKOeGNoW6ZmwmX01FkzzmZyKmTUT/KLB28Oyearh1LCSDwZ+43+uI/0mD0TXsJJsW93ElHOnY3zG9DiFsB/czOhSq48MTyAruElsQNqwtkTkN8fbh6vH4hU5OH6Ynj9cNYlcBAzJqDj1oHXNJpXLndVJjzv3XxnjrL6JW7pAhcf0puomQCKdI69SxmXbre7XCf1q2tV3FoHDQpt8AreY0Fy5zAimaTUsAnjzCzX3G+vlZUndcblhPJxPMkdC8Tj/JZ0J5+6gfSbsvH6J05Lht4Y1N/ENt6XFgCLxPlUscQ62uJ5bfOtjW9djNal+v0tuWPNljsAm4I6Ml8KhVEQS+vF3HY1wFFljrgwo8aQg0gvRxyYYdMV5eFp9Fakczpz7y1zOGIWtrTr9GHLgNJT7Qcf9EinTx45jL7KLfI+1I0T+tIdheVUrypJ5YaIdfDOFluTvno9HsKF2on+fqQy0TGpTLwFUic0esK3vONoTsUMxq5Kgx5ECtxyVW277EMzPvOpiZvaF4jQBKcO9WenbAE+39N1xsZciE2eqZUsbFPfacQamaxav62NrEoyx/YEPDMRy+eBm6fTfc50Cgqs8pS1zhfcKqhw8+e9Rz299c+3pYK3nf0dqk3h7SQ162DaKFwnlPPQMmMdyVMs3OBqJLu6hmGilqQ9lxfhE4do9JSlYwXGxvdSjH1lxC7d/mNDJQg3b56jkd9ghv7tOktTqRyTUsmEOWfiHINIBbg4yBSoyRRgtFi9IC2U9nsdJsoJXKsIFdZoQVM9l+bVeBH5sq3Y1YrzQF7A5ewMbdiyYLI9iwELku/EgIhGcxjPmRljKDqYZHb1dUh79SnWatEgX+PFv4Ny0ztU2wF2xbjGGrpcvruBfkAIGsw63H7PmKW4TnfIJ95915Ubm8oLLUxH93uvcrfEluTnWI+NHPuII3V7TP2Vj/fMit7xCHVWArhD6PgwHJX3wzn9RhJp5qCIwH4c3npsdHRZGjLaxi5jcOyeNL6WfbDL373jXMrMnS+5RMayR9jyPMNL1ueUcpianohTkFCGG/7SIw48xsTsvIYkxASozlwXznp+Xm63P4xjyvgyyOe7OtZdntbWB6u9s8XPcmH0+ep29OGwR7eTLHoCM9Dsr9dKwcS9e66vLqh15xMeWyNuFy2N5XQsJ/+GyHS/tkrP0twMDdjcKuI8FzU+a2zRPu8TDtU7P0xJ40LLi7esZ8EhuhfePQoLuynl7tcVdJEYAbmz29EHL7szomBGVczBP0Rdpi1+OMc+6zRiqGH+5/VjDbdVrqB7TDTRsAFvmvWI9/5L53jXXMF2Anl4fXv9eN016nlbBkUnmH+9vhhupc+bdEHqPpXh86iuDXuhXJPNcSjOAsno+vb66pF8RqHj229r6DrWCkfJWEdUiCM/vqnn0wUn67G4u5Ot2XEI9QpMpt4K+QHMMejnrM/VVt1d2rl8vQWEjhSvj55i+Sy4pPHrSMaJpcCAi207l/08BwXVRrIu9RnvnCcybnmPnqWvTW5AEHrmYthV6ldmsZ/tbjnBlQb/6aVeyahDdfvp5aXaRtbVp3CFQbeRm1txtCgRCwy31u+IVOTHtYT93CdhP7+8VPvLHoOwkG82ZUqbsVWOHW5jDs86S0GdB53Do5/8RCT0bi5UEksvl6ufNbHASHfaUlmUWLoHc4omkBve9fzAQD7sbo7KEuA01S7jpoU1KCtcyAU7Qu9NGj7RoUr8urWb7wfFYbW9tDhmba/RXXNtr1esfHufYfXLEfuri7LwVg1CVYsEtKYz0CTNfIHN9rpyo0+jkWtQ8UBNV0CUr8tTan0x+jQKuEjsuiWw+iPUMq47NHSfp588Lfc5Kd0W7FvllV0B7o23XwN3I2JkyqIt0N5Jg+lWmODim0L0B7lgL18GpobV0kyBu3Sa2KEnWKtdxHjvtCtpH/Htbl90oQEoYfcvhY0MRO6KlnFjOfM567puaRVyrZvuMrAaf0qmiIKkkrNoK9Vvo+H8RiwoZ/GFMYpNsq5at3VCVaWHcBjne0JzqHiCzxwB5P//3//nKr+9UOu5zyq/zn9D/s/o850rvR5JpSAyLpkxoWZtMf2NfLyT3rp8M5x0TSKELDF0R/ofIFZsAeJRDvnXXqlFqHgBl0gfbjQ02tnL8DxKT0b/VGDBZ/G9jSX3pGP0afRJCjN/lENqYJSCMF9Gw05AR3OqZq7dgWN3tSIlZo/aODavZ+jT0SPKQcQUH8uauX/+46rWlfx00yXA1wODvq9HDfp+O7Cgq69L5vkxprOdLrE7eGCTpkq+sATLjBcdfBwsIqQ4dwfOcR5a+VveBpUswlgv3Bg4XXaXftWyiMqAilwCPzcmMq2WqlJAURdZkkDMqAHeciiS0yKkGS+YZqvxaTeb7apNcC6MTDmbzVtONXJkR0FVZ59RDBaUF9u/LfXBqlK/SIO+7oQs7Fj7hZafrk6W1kDyvF6Qr+/gYwXi3r9sgKxXSzh3LfM4Ds5oDQ8hSc0ylL/op1hojT0X9zeBfdjbirkV7rhLaCCgJTUNRGFuj36lv7J/3o7H7qNuH8aMfht5m1kZt/Lyi3XScKg61N5Nh/ww31zjoaN07qkxpz1WDN1u+utykxverTHlbSqO1JxiV2Dds6vSxGE/VHmjlEtOo6e55H01msg7phS7xSVJ7CK14RWZhOmJkitVmtfAvpMP+P0jgg6eAsETWgecX4TpQ1PfcIS9DZ2WCeDu4s1ativKeR9NevxjUojRx1dL4lnP6zLL8OCRRhECaMUYWgD0gRO3vTnWXEz5G8w6SJ+xGb7m9idTJgqTFLMEhHZ9m7WWEUPXhldnhfKsquoiFQcp6iIVe6vpv+7v3r4PfsyEAD4y3d08lFoCADE4/ADf69kPWGTZos/IO8JEjE9PNRl+/v0O96E/lv745d796vKf9/4n5U+vR48Xl7c3o1+vh/jLd4TpogAZ5dwnXiOYNQd0jvwhNXSDc92e/lr8Ue7UYzXCc2QLRJu86q6QVhoileH8dwAAAP//9ZNpiA==" } diff --git a/x-pack/metricbeat/module/aws/lambda/_meta/docs.asciidoc b/x-pack/metricbeat/module/aws/lambda/_meta/docs.asciidoc index 1d1f8e22a7e..70f1263823c 100644 --- a/x-pack/metricbeat/module/aws/lambda/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/aws/lambda/_meta/docs.asciidoc @@ -43,6 +43,7 @@ https://docs.aws.amazon.com/lambda/latest/dg/monitoring-functions-metrics.html[l |Invocations | Average |Errors | Average |DeadLetterErrors | Average +|DestinationDeliveryFailures | Average |Duration | Average |Throttles | Average |IteratorAge | Average diff --git a/x-pack/metricbeat/module/aws/lambda/_meta/fields.yml b/x-pack/metricbeat/module/aws/lambda/_meta/fields.yml index 1cebeff318f..91becec6fef 100644 --- a/x-pack/metricbeat/module/aws/lambda/_meta/fields.yml +++ b/x-pack/metricbeat/module/aws/lambda/_meta/fields.yml @@ -4,3 +4,45 @@ `lambda` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS Lambda. release: beta fields: + - name: metrics + type: group + fields: + - name: Invocations.avg + type: double + description: The number of times your function code is executed, including successful executions and executions that result in a function error. + - name: Errors.avg + type: double + description: The number of invocations that result in a function error. + - name: DeadLetterErrors.avg + type: double + description: For asynchronous invocation, the number of times Lambda attempts to send an event to a dead-letter queue but fails. + - name: DestinationDeliveryFailures.avg + type: double + description: For asynchronous invocation, the number of times Lambda attempts to send an event to a destination but fails. + - name: Duration.avg + type: double + description: The amount of time that your function code spends processing an event. + - name: Throttles.avg + type: double + description: The number of invocation requests that are throttled. + - name: IteratorAge.avg + type: double + description: For event source mappings that read from streams, the age of the last record in the event. + - name: ConcurrentExecutions.avg + type: double + description: The number of function instances that are processing events. + - name: UnreservedConcurrentExecutions.avg + type: double + description: For an AWS Region, the number of events that are being processed by functions that don't have reserved concurrency. + - name: ProvisionedConcurrentExecutions.max + type: long + description: The number of function instances that are processing events on provisioned concurrency. + - name: ProvisionedConcurrencyUtilization.max + type: long + description: For a version or alias, the value of ProvisionedConcurrentExecutions divided by the total amount of provisioned concurrency allocated. + - name: ProvisionedConcurrencyInvocations.sum + type: long + description: The number of times your function code is executed on provisioned concurrency. + - name: ProvisionedConcurrencySpilloverInvocations.sum + type: long + description: The number of times your function code is executed on standard concurrency when all provisioned concurrency is in use. diff --git a/x-pack/metricbeat/module/aws/lambda/manifest.yml b/x-pack/metricbeat/module/aws/lambda/manifest.yml index 71fd0b7ef1c..f1f18fd8783 100644 --- a/x-pack/metricbeat/module/aws/lambda/manifest.yml +++ b/x-pack/metricbeat/module/aws/lambda/manifest.yml @@ -6,7 +6,7 @@ input: metrics: - namespace: AWS/Lambda statistic: ["Average"] - name: ["Invocations", "Errors", "DeadLetterErrors", "Duration", + name: ["Invocations", "Errors", "DeadLetterErrors", "DestinationDeliveryFailures", "Duration", "Throttles", "IteratorAge", "ConcurrentExecutions", "UnreservedConcurrentExecutions"] tags.resource_type_filter: lambda diff --git a/x-pack/metricbeat/module/aws/natgateway/_meta/fields.yml b/x-pack/metricbeat/module/aws/natgateway/_meta/fields.yml index d6694c4c656..8ff5a3a2edd 100644 --- a/x-pack/metricbeat/module/aws/natgateway/_meta/fields.yml +++ b/x-pack/metricbeat/module/aws/natgateway/_meta/fields.yml @@ -4,3 +4,48 @@ `natgateway` contains the metrics from Cloudwatch to track usage of NAT gateway related resources. release: beta fields: + - name: metrics + type: group + fields: + - name: BytesInFromDestination.sum + type: long + description: The number of bytes received by the NAT gateway from the destination. + - name: BytesInFromSource.sum + type: long + description: The number of bytes received by the NAT gateway from clients in your VPC. + - name: BytesOutToDestination.sum + type: long + description: The number of bytes sent out through the NAT gateway to the destination. + - name: BytesOutToSource.sum + type: long + description: The number of bytes sent through the NAT gateway to the clients in your VPC. + - name: ConnectionAttemptCount.sum + type: long + description: The number of connection attempts made through the NAT gateway. + - name: ConnectionEstablishedCount.sum + type: long + description: The number of connections established through the NAT gateway. + - name: ErrorPortAllocation.sum + type: long + description: The number of times the NAT gateway could not allocate a source port. + - name: IdleTimeoutCount.sum + type: long + description: The number of connections that transitioned from the active state to the idle state. + - name: PacketsDropCount.sum + type: long + description: The number of packets dropped by the NAT gateway. + - name: PacketsInFromDestination.sum + type: long + description: The number of packets received by the NAT gateway from the destination. + - name: PacketsInFromSource.sum + type: long + description: The number of packets received by the NAT gateway from clients in your VPC. + - name: PacketsOutToDestination.sum + type: long + description: The number of packets sent out through the NAT gateway to the destination. + - name: PacketsOutToSource.sum + type: long + description: The number of packets sent through the NAT gateway to the clients in your VPC. + - name: ActiveConnectionCount.max + type: long + description: The total number of concurrent active TCP connections through the NAT gateway. diff --git a/x-pack/metricbeat/module/aws/sns/_meta/fields.yml b/x-pack/metricbeat/module/aws/sns/_meta/fields.yml index a70ffa1568c..5c34d717373 100644 --- a/x-pack/metricbeat/module/aws/sns/_meta/fields.yml +++ b/x-pack/metricbeat/module/aws/sns/_meta/fields.yml @@ -4,3 +4,39 @@ `sns` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS SNS. release: beta fields: + - name: metrics + type: group + fields: + - name: PublishSize.avg + type: double + description: The size of messages published. + - name: SMSSuccessRate.avg + type: double + description: The rate of successful SMS message deliveries. + - name: NumberOfMessagesPublished.sum + type: long + description: The number of messages published to your Amazon SNS topics. + - name: NumberOfNotificationsDelivered.sum + type: long + description: The number of messages successfully delivered from your Amazon SNS topics to subscribing endpoints. + - name: NumberOfNotificationsFailed.sum + type: long + description: The number of messages that Amazon SNS failed to deliver. + - name: NumberOfNotificationsFilteredOut.sum + type: long + description: The number of messages that were rejected by subscription filter policies. + - name: NumberOfNotificationsFilteredOut-InvalidAttributes.sum + type: long + description: The number of messages that were rejected by subscription filter policies because the messages' attributes are invalid – for example, because the attribute JSON is incorrectly formatted. + - name: NumberOfNotificationsFilteredOut-NoMessageAttributes.sum + type: long + description: The number of messages that were rejected by subscription filter policies because the messages have no attributes. + - name: NumberOfNotificationsRedrivenToDlq.sum + type: long + description: The number of messages that have been moved to a dead-letter queue. + - name: NumberOfNotificationsFailedToRedriveToDlq.sum + type: long + description: The number of messages that couldn't be moved to a dead-letter queue. + - name: SMSMonthToDateSpentUSD.sum + type: long + description: The charges you have accrued since the start of the current calendar month for sending SMS messages. diff --git a/x-pack/metricbeat/module/aws/transitgateway/_meta/fields.yml b/x-pack/metricbeat/module/aws/transitgateway/_meta/fields.yml index ed376cd296d..e687ae973d0 100644 --- a/x-pack/metricbeat/module/aws/transitgateway/_meta/fields.yml +++ b/x-pack/metricbeat/module/aws/transitgateway/_meta/fields.yml @@ -4,3 +4,24 @@ `transitgateway` contains the metrics from Cloudwatch to track usage of transit gateway related resources. release: beta fields: + - name: metrics + type: group + fields: + - name: BytesIn.sum + type: long + description: The number of bytes received by the transit gateway. + - name: BytesOut.sum + type: long + description: The number of bytes sent from the transit gateway. + - name: PacketsIn.sum + type: long + description: The number of packets received by the transit gateway. + - name: PacketsOut.sum + type: long + description: The number of packets sent by the transit gateway. + - name: PacketDropCountBlackhole.sum + type: long + description: The number of packets dropped because they matched a blackhole route. + - name: PacketDropCountNoRoute.sum + type: long + description: The number of packets dropped because they did not match a route. diff --git a/x-pack/metricbeat/module/aws/usage/_meta/fields.yml b/x-pack/metricbeat/module/aws/usage/_meta/fields.yml index aa138f70eec..6b69118adc6 100644 --- a/x-pack/metricbeat/module/aws/usage/_meta/fields.yml +++ b/x-pack/metricbeat/module/aws/usage/_meta/fields.yml @@ -4,3 +4,12 @@ `usage` contains the metrics from Cloudwatch to track usage of some AWS resources. release: beta fields: + - name: metrics + type: group + fields: + - name: CallCount.sum + type: long + description: The number of specified API operations performed in your account. + - name: ResourceCount.sum + type: long + description: The number of the specified resources running in your account. The resources are defined by the dimensions associated with the metric. diff --git a/x-pack/metricbeat/module/aws/vpn/_meta/fields.yml b/x-pack/metricbeat/module/aws/vpn/_meta/fields.yml index 9d1ae32eebb..4569fcc1a57 100644 --- a/x-pack/metricbeat/module/aws/vpn/_meta/fields.yml +++ b/x-pack/metricbeat/module/aws/vpn/_meta/fields.yml @@ -4,3 +4,15 @@ `vpn` contains the metrics from Cloudwatch to track usage of VPN related resources. release: beta fields: + - name: metrics + type: group + fields: + - name: TunnelState.avg + type: double + description: The state of the tunnel. For static VPNs, 0 indicates DOWN and 1 indicates UP. For BGP VPNs, 1 indicates ESTABLISHED and 0 is used for all other states. + - name: TunnelDataIn.sum + type: double + description: The bytes received through the VPN tunnel. + - name: TunnelDataOut.sum + type: double + description: The bytes sent through the VPN tunnel. diff --git a/x-pack/metricbeat/module/googlecloud/_meta/config.yml b/x-pack/metricbeat/module/googlecloud/_meta/config.yml index 5df057bba18..640fd87bc5a 100644 --- a/x-pack/metricbeat/module/googlecloud/_meta/config.yml +++ b/x-pack/metricbeat/module/googlecloud/_meta/config.yml @@ -1,13 +1,21 @@ - module: googlecloud metricsets: - compute + region: "us-central1" + project_id: "your project id" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 300s + +- module: googlecloud + metricsets: - pubsub - loadbalancing zone: "us-central1-a" project_id: "your project id" credentials_file_path: "your JSON credentials file path" exclude_labels: false - period: 300s + period: 60s - module: googlecloud metricsets: @@ -17,3 +25,12 @@ credentials_file_path: "your JSON credentials file path" exclude_labels: false period: 300s + +- module: googlecloud + metricsets: + - compute + region: "us-" + project_id: "your project id" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 60s diff --git a/x-pack/metricbeat/module/googlecloud/_meta/docs.asciidoc b/x-pack/metricbeat/module/googlecloud/_meta/docs.asciidoc index ff4b03cb023..5a1e4ed62f3 100644 --- a/x-pack/metricbeat/module/googlecloud/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/googlecloud/_meta/docs.asciidoc @@ -6,18 +6,88 @@ Note: extra GCP charges on Stackdriver Monitoring API requests will be generated == Module config and parameters This is a list of the possible module parameters you can tune: -* *zone*: A single string with the zone you want to monitor like "us-central1-a". If you need to fetch from multiple regions, you have to setup a different configuration for each (but you don't need a new instance of Metricbeat running) -* *region*: A single string with the region you want to monitor like "us-central1". This will enable monitoring for all zones under this region. +* *zone*: A single string with the zone you want to monitor like `us-central1-a`. +Or you can specific a partial zone name like `us-central1-` or `us-central1-*`, +which will monitor all zones start with `us-central1-`: `us-central1-a`, +`us-central1-b`, `us-central1-c` and `us-central1-f`. +Please see https://cloud.google.com/compute/docs/regions-zones#available[GCP zones] +for zones that are available in GCP. + +* *region*: A single string with the region you want to monitor like `us-central1`. +This will enable monitoring for all zones under this region. Or you can specific +a partial region name like `us-east` or `us-east*`, which will monitor all regions start with +`us-east`: `us-east1` and `us-east4`. If both region and zone are configured, +only region will be used. +Please see https://cloud.google.com/compute/docs/regions-zones#available[GCP regions] +for regions that are available in GCP. + * *project_id*: A single string with your GCP Project ID -* *credentials_file_path*: A single string pointing to the JSON file path reachable by Metricbeat that you have created using IAM. -* *exclude_labels*: (`true`/`false` default `false`) Do not extract extra labels and metadata information from Metricsets and fetch metrics onlly. At the moment, *labels and metadata extraction is only supported* in Compute Metricset. + +* *credentials_file_path*: A single string pointing to the JSON file path +reachable by Metricbeat that you have created using IAM. + +* *exclude_labels*: (`true`/`false` default `false`) Do not extract extra labels +and metadata information from metricsets and fetch metrics only. At the moment, +*labels and metadata extraction is only supported* in `compute` metricset. + +* *period*: A single time duration specified for this module collection frequency. + +[float] +== Example configuration +* `compute` metricset is enabled to collect metrics from `us-central1-a` zone +in `elastic-observability` project. ++ +[source,yaml] +---- +- module: googlecloud + metricsets: + - compute + zone: "us-central1-a" + project_id: "elastic-observability" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 60s +---- + +* `compute` and `pubsub` metricsets are enabled to collect metrics from all zones +under `us-central1` region in `elastic-observability` project. ++ +[source,yaml] +---- +- module: googlecloud + metricsets: + - compute + - pubsub + region: "us-central1" + project_id: "elastic-observability" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 60s +---- + +* `compute` metricset is enabled to collect metrics from all regions starts with +`us-west` in `elastic-observability` project, which includes all zones under +`us-west1`, `us-west2`, `us-west3` and `us-west4`. ++ +[source,yaml] +---- +- module: googlecloud + metricsets: + - compute + - pubsub + region: "us-west" + project_id: "elastic-observability" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 60s +---- [float] == Authentication, authorization and permissions. Authentication and authorization in Google Cloud Platform can be achieved in many ways. For the current version of the Google Cloud Platform module for Metricbeat, the only supported method is using Service Account JSON files. A typical JSON with a private key looks like this: [float] -==== Example Credentials +=== Example Credentials [source,json] ---- { @@ -52,7 +122,9 @@ Google Cloud Platform offers the https://cloud.google.com/monitoring/api/metrics If you also want to *extract service labels* (by setting `exclude_labels` to false, which is the default state). You also make a new API check on the corresponding service. Service labels requires a new API call to extract those metrics. In the worst case the number of API calls will be doubled. In the best case, all metrics come from the same GCP entity and 100% of the required information is included in the first API call (which is cached for subsequent calls). -A recommended `period` value between fetches is between 5 and 10 minutes, depending on how granular you want your metrics. GCP restricts information for less than 5 minutes. +If `period` value is set to 5-minute and sample period of the metric type is 60-second, then this module will collect data from this metric type once every 5 minutes with aggregation. +GCP monitoring data has a up to 240 seconds latency, which means latest monitoring data will be up to 4 minutes old. Please see https://cloud.google.com/monitoring/api/v3/latency-n-retention[Latency of GCP Monitoring Metric Data] for more details. +In googlecloud module, metrics are collected based on this ingest delay, which is also obtained from ListMetricDescriptors API. [float] === Rough estimation of the number of API Calls diff --git a/x-pack/metricbeat/module/googlecloud/_meta/fields.yml b/x-pack/metricbeat/module/googlecloud/_meta/fields.yml index 8bbae3bf4fe..41c1c09fad9 100644 --- a/x-pack/metricbeat/module/googlecloud/_meta/fields.yml +++ b/x-pack/metricbeat/module/googlecloud/_meta/fields.yml @@ -4,14 +4,23 @@ description: > GCP module fields: - - name: googlecloud.labels - type: object + - name: googlecloud + type: group fields: - - name: user.* + - name: labels type: object - - name: metadata.* - type: object - - name: metrics.* - type: object - - name: system.* + fields: + - name: user.* + type: object + - name: metadata.* + type: object + - name: metrics.* + type: object + - name: system.* + type: object + - name: "stackdriver.*.*.*.*" type: object + object_type: double + object_type_mapping_type: "*" + description: > + Metrics that returned from StackDriver API query. diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/data.json b/x-pack/metricbeat/module/googlecloud/compute/_meta/data.json index cd37490aa90..4a3ab08217c 100644 --- a/x-pack/metricbeat/module/googlecloud/compute/_meta/data.json +++ b/x-pack/metricbeat/module/googlecloud/compute/_meta/data.json @@ -5,8 +5,8 @@ "id": "elastic-observability" }, "instance": { - "id": "9077240380975650630", - "name": "gke-observability-8--observability-8--bc1afd95-cwh3" + "id": "1174463293187628268", + "name": "gke-observability-8--observability-8--bc1afd95-ngmh" }, "machine": { "type": "n1-standard-4" @@ -23,16 +23,24 @@ "compute": { "instance": { "disk": { - "read_bytes_count": 0, - "read_ops_count": 0, - "write_bytes_count": 0, - "write_ops_count": 0 + "read_bytes_count": { + "value": 0 + }, + "read_ops_count": { + "value": 0 + }, + "write_bytes_count": { + "value": 0 + }, + "write_ops_count": { + "value": 0 + } } } }, "labels": { "metrics": { - "device_name": "gke-observability-8-0--pvc-ad47fe58-7bcf-11e9-a839-42010a8401a4", + "device_name": "disk-2", "device_type": "permanent", "storage_type": "pd-standard" }, diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_cpu.json b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_cpu.json index 8496bfd79b1..1d3a120a218 100644 --- a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_cpu.json +++ b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_cpu.json @@ -1,69 +1,62 @@ { - "@timestamp": "2020-01-08T16:06:00.000Z", - "@metadata": { - "beat": "metricbeat", - "type": "_doc", - "version": "8.0.0" - }, - "host": { - "name": "mcastro", - "id": "54f70115bae545cbac2b150f254472a0", - "containerized": false, - "hostname": "mcastro", - "architecture": "x86_64", - "os": { - "version": "", - "family": "", - "name": "Antergos Linux", - "kernel": "5.4.3-arch1-1", - "platform": "antergos" - } - }, - "agent": { - "ephemeral_id": "8b802033-b611-414b-bcaf-1aa19e5f5901", - "hostname": "mcastro", - "id": "7e36a073-1a32-4a94-b65b-4c7f971fb228", - "version": "8.0.0", - "type": "metricbeat" - }, + "@timestamp": "2017-10-12T08:05:34.853Z", "cloud": { "account": { - "id": "elastic-metricbeat" + "id": "elastic-observability" }, - "provider": "googlecloud", "instance": { - "name": "instance-1", - "id": "4503798379141677974" + "id": "1174463293187628268", + "name": "gke-observability-8--observability-8--bc1afd95-ngmh" }, "machine": { - "type": "f1-micro" + "type": "n1-standard-4" }, - "availability_zone": "us-central1-a" + "provider": "googlecloud" }, + "cloud.availability_zone": "europe-west1-c", "event": { - "duration": 1398412653, "dataset": "googlecloud.compute", + "duration": 115000, "module": "googlecloud" }, - "metricset": { - "name": "compute", - "period": 300000 - }, "googlecloud": { "compute": { + "firewall": { + "dropped_bytes_count": { + "value": 181 + }, + "dropped_packets_count": { + "value": 3 + } + }, "instance": { "cpu": { - "reserved_cores": 0.2, - "utilization": 0.005524845140497596 + "reserved_cores": { + "value": 4 + }, + "usage_time": { + "value": 63.478293027728796 + }, + "utilization": { + "value": 0.26449288761553663 + } + }, + "uptime": { + "value": 60 } } }, - "labels": {} + "labels": { + "user": { + "goog-gke-node": "" + } + } + }, + "metricset": { + "name": "compute", + "period": 10000 }, "service": { "type": "googlecloud" - }, - "ecs": { - "version": "1.2.0" } -} +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_disk.json b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_disk.json new file mode 100644 index 00000000000..8da39b6ab7d --- /dev/null +++ b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_disk.json @@ -0,0 +1,59 @@ +{ + "@timestamp": "2017-10-12T08:05:34.853Z", + "cloud": { + "account": { + "id": "elastic-observability" + }, + "instance": { + "id": "8390997210852978465", + "name": "gke-observability-7--observability-7--3dd3e39b-0jm5" + }, + "machine": { + "type": "n1-standard-4" + }, + "provider": "googlecloud" + }, + "cloud.availability_zone": "europe-west1-c", + "event": { + "dataset": "googlecloud.compute", + "duration": 115000, + "module": "googlecloud" + }, + "googlecloud": { + "compute": { + "instance": { + "disk": { + "read_bytes_count": { + "value": 0 + }, + "read_ops_count": { + "value": 0 + }, + "write_bytes_count": { + "value": 0 + }, + "write_ops_count": { + "value": 0 + } + } + } + }, + "labels": { + "metrics": { + "device_name": "gke-observability-7-1--pvc-65581044-7d5d-11ea-8cd9-42010af0011c", + "device_type": "permanent", + "storage_type": "pd-standard" + }, + "user": { + "goog-gke-node": "" + } + } + }, + "metricset": { + "name": "compute", + "period": 10000 + }, + "service": { + "type": "googlecloud" + } +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_disk_01.json b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_disk_01.json deleted file mode 100644 index 038c451d934..00000000000 --- a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_disk_01.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "@timestamp": "2020-01-08T16:05:00.000Z", - "@metadata": { - "beat": "metricbeat", - "type": "_doc", - "version": "8.0.0" - }, - "agent": { - "hostname": "mcastro", - "id": "7e36a073-1a32-4a94-b65b-4c7f971fb228", - "version": "8.0.0", - "type": "metricbeat", - "ephemeral_id": "8b802033-b611-414b-bcaf-1aa19e5f5901" - }, - "ecs": { - "version": "1.2.0" - }, - "googlecloud": { - "labels": { - "metrics": { - "device_type": "permanent", - "storage_type": "pd-standard", - "device_name": "instance-1" - } - }, - "compute": { - "instance": { - "disk": { - "write_bytes_count": 945853 - } - } - } - }, - "service": { - "type": "googlecloud" - }, - "cloud": { - "account": { - "id": "elastic-metricbeat" - }, - "provider": "googlecloud", - "instance": { - "name": "instance-1", - "id": "4503798379141677974" - }, - "machine": { - "type": "f1-micro" - }, - "availability_zone": "us-central1-a" - }, - "metricset": { - "name": "compute", - "period": 300000 - }, - "event": { - "module": "googlecloud", - "duration": 1398637364, - "dataset": "googlecloud.compute" - }, - "host": { - "containerized": false, - "hostname": "mcastro", - "architecture": "x86_64", - "os": { - "platform": "antergos", - "version": "", - "family": "", - "name": "Antergos Linux", - "kernel": "5.4.3-arch1-1" - }, - "name": "mcastro", - "id": "54f70115bae545cbac2b150f254472a0" - } -} diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_disk_02.json b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_disk_02.json deleted file mode 100644 index d3d0bc9c5ba..00000000000 --- a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_disk_02.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "@timestamp": "2020-01-08T16:04:00.000Z", - "@metadata": { - "beat": "metricbeat", - "type": "_doc", - "version": "8.0.0" - }, - "service": { - "type": "googlecloud" - }, - "ecs": { - "version": "1.2.0" - }, - "host": { - "os": { - "name": "Antergos Linux", - "kernel": "5.4.3-arch1-1", - "platform": "antergos", - "version": "", - "family": "" - }, - "id": "54f70115bae545cbac2b150f254472a0", - "containerized": false, - "name": "mcastro", - "hostname": "mcastro", - "architecture": "x86_64" - }, - "agent": { - "ephemeral_id": "8b802033-b611-414b-bcaf-1aa19e5f5901", - "hostname": "mcastro", - "id": "7e36a073-1a32-4a94-b65b-4c7f971fb228", - "version": "8.0.0", - "type": "metricbeat" - }, - "cloud": { - "availability_zone": "us-central1-a", - "account": { - "id": "elastic-metricbeat" - }, - "provider": "googlecloud", - "instance": { - "id": "4503798379141677974", - "name": "instance-1" - }, - "machine": { - "type": "f1-micro" - } - }, - "metricset": { - "name": "compute", - "period": 300000 - }, - "event": { - "dataset": "googlecloud.compute", - "module": "googlecloud", - "duration": 1398743696 - }, - "googlecloud": { - "labels": { - "metrics": { - "device_name": "instance-1", - "device_type": "permanent", - "storage_type": "pd-standard" - } - }, - "compute": { - "instance": { - "disk": { - "write_ops_count": 140, - "read_ops_count": 2897, - "read_bytes_count": 71574649, - "write_bytes_count": 2557677 - } - } - } - } -} diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_firewall.json b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_firewall.json index ee219ec74e0..04b750b8b7d 100644 --- a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_firewall.json +++ b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_firewall.json @@ -1,75 +1,62 @@ { - "@timestamp": "2020-01-08T16:05:00.000Z", - "@metadata": { - "beat": "metricbeat", - "type": "_doc", - "version": "8.0.0" - }, - "ecs": { - "version": "1.2.0" - }, - "host": { - "containerized": false, - "name": "mcastro", - "hostname": "mcastro", - "architecture": "x86_64", - "os": { - "version": "", - "family": "", - "name": "Antergos Linux", - "kernel": "5.4.3-arch1-1", - "platform": "antergos" - }, - "id": "54f70115bae545cbac2b150f254472a0" - }, - "agent": { - "type": "metricbeat", - "ephemeral_id": "8b802033-b611-414b-bcaf-1aa19e5f5901", - "hostname": "mcastro", - "id": "7e36a073-1a32-4a94-b65b-4c7f971fb228", - "version": "8.0.0" - }, + "@timestamp": "2017-10-12T08:05:34.853Z", "cloud": { - "availability_zone": "us-central1-a", "account": { - "id": "elastic-metricbeat" + "id": "elastic-observability" }, - "provider": "googlecloud", "instance": { - "id": "4503798379141677974", - "name": "instance-1" + "id": "2528596280375797115", + "name": "gke-dev-next-oblt-dev-next-oblt-pool-404d7f0c-cpj6" }, "machine": { - "type": "f1-micro" - } + "type": "n1-standard-4" + }, + "provider": "googlecloud" }, + "cloud.availability_zone": "europe-west1-c", "event": { "dataset": "googlecloud.compute", - "module": "googlecloud", - "duration": 1397755844 - }, - "metricset": { - "name": "compute", - "period": 300000 + "duration": 115000, + "module": "googlecloud" }, "googlecloud": { - "labels": {}, "compute": { + "firewall": { + "dropped_bytes_count": { + "value": 386 + }, + "dropped_packets_count": { + "value": 7 + } + }, "instance": { - "uptime": 60.00000000000001, "cpu": { - "reserved_cores": 0.2, - "utilization": 0.38202685489490784, - "usage_time": 0.06629814168597115 + "reserved_cores": { + "value": 4 + }, + "usage_time": { + "value": 106.88293868489563 + }, + "utilization": { + "value": 0.4453455778537318 + } + }, + "uptime": { + "value": 60 } - }, - "firewall": { - "dropped_packets_count": 3 + } + }, + "labels": { + "user": { + "goog-gke-node": "" } } }, + "metricset": { + "name": "compute", + "period": 10000 + }, "service": { "type": "googlecloud" } -} - +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_instance.json b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_instance.json deleted file mode 100644 index 4306273f73c..00000000000 --- a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_instance.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "@timestamp": "2020-01-08T16:04:00.000Z", - "@metadata": { - "beat": "metricbeat", - "type": "_doc", - "version": "8.0.0" - }, - "ecs": { - "version": "1.2.0" - }, - "host": { - "os": { - "platform": "antergos", - "version": "", - "family": "", - "name": "Antergos Linux", - "kernel": "5.4.3-arch1-1" - }, - "id": "54f70115bae545cbac2b150f254472a0", - "containerized": false, - "hostname": "mcastro", - "name": "mcastro", - "architecture": "x86_64" - }, - "agent": { - "ephemeral_id": "8b802033-b611-414b-bcaf-1aa19e5f5901", - "hostname": "mcastro", - "id": "7e36a073-1a32-4a94-b65b-4c7f971fb228", - "version": "8.0.0", - "type": "metricbeat" - }, - "cloud": { - "provider": "googlecloud", - "instance": { - "id": "4503798379141677974", - "name": "instance-1" - }, - "machine": { - "type": "f1-micro" - }, - "availability_zone": "us-central1-a", - "account": { - "id": "elastic-metricbeat" - } - }, - "event": { - "module": "googlecloud", - "duration": 1397750508, - "dataset": "googlecloud.compute" - }, - "metricset": { - "period": 300000, - "name": "compute" - }, - "googlecloud": { - "labels": {}, - "compute": { - "firewall": { - "dropped_bytes_count": 0, - "dropped_packets_count": 0 - }, - "instance": { - "uptime": 46.181442, - "cpu": { - "usage_time": 4.5843222587388945 - } - } - } - }, - "service": { - "type": "googlecloud" - } -} diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_network.json b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_network.json new file mode 100644 index 00000000000..d543fc2382f --- /dev/null +++ b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_network.json @@ -0,0 +1,54 @@ +{ + "@timestamp": "2017-10-12T08:05:34.853Z", + "cloud": { + "account": { + "id": "elastic-observability" + }, + "instance": { + "id": "7208038667777737825", + "name": "gke-dev-next-oblt-dev-next-oblt-pool-404d7f0c-fgxk" + }, + "machine": { + "type": "n1-standard-4" + }, + "provider": "googlecloud" + }, + "cloud.availability_zone": "europe-west1-c", + "event": { + "dataset": "googlecloud.compute", + "duration": 115000, + "module": "googlecloud" + }, + "googlecloud": { + "compute": { + "instance": { + "network": { + "received_bytes_count": { + "value": 17913 + }, + "received_packets_count": { + "value": 128 + }, + "sent_bytes_count": { + "value": 841 + } + } + } + }, + "labels": { + "metrics": { + "loadbalanced": "true" + }, + "user": { + "goog-gke-node": "" + } + } + }, + "metricset": { + "name": "compute", + "period": 10000 + }, + "service": { + "type": "googlecloud" + } +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_network_01.json b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_network_01.json deleted file mode 100644 index 26c21d62295..00000000000 --- a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_network_01.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "@timestamp": "2020-01-08T16:04:00.000Z", - "@metadata": { - "beat": "metricbeat", - "type": "_doc", - "version": "8.0.0" - }, - "cloud": { - "machine": { - "type": "f1-micro" - }, - "availability_zone": "us-central1-a", - "account": { - "id": "elastic-metricbeat" - }, - "provider": "googlecloud", - "instance": { - "id": "4503798379141677974", - "name": "instance-1" - } - }, - "ecs": { - "version": "1.2.0" - }, - "host": { - "name": "mcastro", - "hostname": "mcastro", - "architecture": "x86_64", - "os": { - "kernel": "5.4.3-arch1-1", - "platform": "antergos", - "version": "", - "family": "", - "name": "Antergos Linux" - }, - "id": "54f70115bae545cbac2b150f254472a0", - "containerized": false - }, - "agent": { - "hostname": "mcastro", - "id": "7e36a073-1a32-4a94-b65b-4c7f971fb228", - "version": "8.0.0", - "type": "metricbeat", - "ephemeral_id": "8b802033-b611-414b-bcaf-1aa19e5f5901" - }, - "event": { - "duration": 1397728251, - "dataset": "googlecloud.compute", - "module": "googlecloud" - }, - "metricset": { - "name": "compute", - "period": 300000 - }, - "googlecloud": { - "labels": { - "metrics": { - "loadbalanced": "false" - } - }, - "compute": { - "instance": { - "network": { - "received_bytes_count": 3846, - "sent_bytes_count": 1750, - "received_packets_count": 17 - } - } - } - }, - "service": { - "type": "googlecloud" - } -} diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_network_02.json b/x-pack/metricbeat/module/googlecloud/compute/_meta/data_network_02.json deleted file mode 100644 index cc71eda5683..00000000000 --- a/x-pack/metricbeat/module/googlecloud/compute/_meta/data_network_02.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "@timestamp": "2020-01-08T16:05:00.000Z", - "@metadata": { - "beat": "metricbeat", - "type": "_doc", - "version": "8.0.0" - }, - "event": { - "dataset": "googlecloud.compute", - "module": "googlecloud", - "duration": 1398297740 - }, - "metricset": { - "name": "compute", - "period": 300000 - }, - "googlecloud": { - "labels": { - "metrics": { - "loadbalanced": "false" - } - }, - "compute": { - "instance": { - "network": { - "sent_bytes_count": 3977 - } - } - } - }, - "service": { - "type": "googlecloud" - }, - "ecs": { - "version": "1.2.0" - }, - "host": { - "containerized": false, - "name": "mcastro", - "hostname": "mcastro", - "architecture": "x86_64", - "os": { - "family": "", - "name": "Antergos Linux", - "kernel": "5.4.3-arch1-1", - "platform": "antergos", - "version": "" - }, - "id": "54f70115bae545cbac2b150f254472a0" - }, - "agent": { - "id": "7e36a073-1a32-4a94-b65b-4c7f971fb228", - "version": "8.0.0", - "type": "metricbeat", - "ephemeral_id": "8b802033-b611-414b-bcaf-1aa19e5f5901", - "hostname": "mcastro" - }, - "cloud": { - "account": { - "id": "elastic-metricbeat" - }, - "provider": "googlecloud", - "instance": { - "id": "4503798379141677974", - "name": "instance-1" - }, - "machine": { - "type": "f1-micro" - }, - "availability_zone": "us-central1-a" - } -} diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/docs.asciidoc b/x-pack/metricbeat/module/googlecloud/compute/_meta/docs.asciidoc index 8cc6b3f85a2..f72103099ea 100644 --- a/x-pack/metricbeat/module/googlecloud/compute/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/googlecloud/compute/_meta/docs.asciidoc @@ -1,11 +1,15 @@ -Compute Metricset to fetch metrics from https://cloud.google.com/compute/[Compute Engine] Virtual Machines in Google Cloud Platform. No Monitoring or Logging agent is required in your instances to use this Metricset. +Compute metricset to fetch metrics from https://cloud.google.com/compute/[Compute Engine] Virtual Machines in Google Cloud Platform. No Monitoring or Logging agent is required in your instances to use this metricset. -The `compute` Metricset contains all metrics exported from the https://cloud.google.com/monitoring/api/metrics_gcp#gcp-compute[Stackdriver API]. The field names have been left untouched for people already familiar with them. +The `compute` metricset contains all metrics exported from the https://cloud.google.com/monitoring/api/metrics_gcp#gcp-compute[Stackdriver API]. The field names have been left untouched for people already familiar with them. Extra labels and metadata are also extracted using the https://cloud.google.com/compute/docs/reference/rest/v1/instances/get[Compute API]. This is enough to get most of the info associated with a metric like Compute labels and metadata and metric specific Labels. [float] -=== Fields and labels +=== Metrics and labels +Here is a list of metrics collected by `compute` metricset: + +[float] +==== firewall * `instance.firewall.dropped_bytes_count`: Incoming bytes dropped by the firewall. - `instance_name`: The name of the VM instance. @@ -13,14 +17,21 @@ Extra labels and metadata are also extracted using the https://cloud.google.com/ * `instance.firewall.dropped_packets_count`: Incoming packets dropped by the firewall. - `instance_name`: The name of the VM instance. +[float] +==== cpu + * `instance.cpu.reserved_cores`: Number of cores reserved on the host of the `instance`. - `instance_name`: The name of the VM instance. * `instance.cpu.utilization`: The fraction of the allocated CPU that is currently in use on the `instance`. - `instance_name`: The name of the VM instance. + * `instance.cpu.usage_time`: Usage for all cores in seconds. - `instance_name`: The name of the VM instance. +[float] +==== disk + * `instance.disk.read_bytes_count`: Count of bytes read from disk. - `instance_name`: The name of the VM instance. - `device_name`: The name of the disk device. @@ -45,9 +56,15 @@ Extra labels and metadata are also extracted using the https://cloud.google.com/ - `storage_type`: The storage type: `pd-standard`, `pd-ssd`, or `local-ssd`. - `device_type`: The disk type: `ephemeral` or `permanent`. +[float] +==== uptime + * `instance.uptime`: How long the VM has been running, in seconds - `instance_name`: The name of the VM instance. +[float] +==== network + * `instance.network.received_bytes_count`: Count of bytes received from the network - `instance_name`: The name of the VM instance. - `loadBalanced`: Whether traffic was sent from an L3 loadbalanced IP address assigned to the VM. Traffic that is externally routed from the VM's standard internal or external IP address, such as L7 loadbalanced traffic, is not considered to be loadbalanced in this metric. diff --git a/x-pack/metricbeat/module/googlecloud/compute/_meta/fields.yml b/x-pack/metricbeat/module/googlecloud/compute/_meta/fields.yml index 01be5ebf386..5cbdfb3ea56 100644 --- a/x-pack/metricbeat/module/googlecloud/compute/_meta/fields.yml +++ b/x-pack/metricbeat/module/googlecloud/compute/_meta/fields.yml @@ -9,54 +9,54 @@ - name: firewall type: group fields: - - name: dropped_bytes_count + - name: dropped_bytes_count.value type: long description: Incoming bytes dropped by the firewall - - name: dropped_packets_count + - name: dropped_packets_count.value type: long description: Incoming packets dropped by the firewall - name: cpu type: group fields: - - name: reserved_cores + - name: reserved_cores.value type: double description: Number of cores reserved on the host of the instance - - name: utilization + - name: utilization.value type: double description: The fraction of the allocated CPU that is currently in use on the instance - - name: usage_time + - name: usage_time.value type: double description: Usage for all cores in seconds - name: disk type: group fields: - - name: read_bytes_count + - name: read_bytes_count.value type: long description: Count of bytes read from disk - - name: read_ops_count + - name: read_ops_count.value type: long description: Count of disk read IO operations - - name: write_bytes_count + - name: write_bytes_count.value type: long description: Count of bytes written to disk - - name: write_ops_count + - name: write_ops_count.value type: long description: Count of disk write IO operations - - name: uptime + - name: uptime.value type: long description: How long the VM has been running, in seconds - name: network type: group fields: - - name: received_bytes_count + - name: received_bytes_count.value type: long description: Count of bytes received from the network - - name: received_packets_count + - name: received_packets_count.value type: long description: Count of packets received from the network - - name: sent_bytes_count + - name: sent_bytes_count.value type: long description: Count of bytes sent over the network - - name: sent_packets_count + - name: sent_packets_count.value type: long description: Count of packets sent over the network diff --git a/x-pack/metricbeat/module/googlecloud/compute/compute_integration_test.go b/x-pack/metricbeat/module/googlecloud/compute/compute_integration_test.go index ae9400fdeb3..be2dd08cdec 100644 --- a/x-pack/metricbeat/module/googlecloud/compute/compute_integration_test.go +++ b/x-pack/metricbeat/module/googlecloud/compute/compute_integration_test.go @@ -8,14 +8,39 @@ package compute import ( + "fmt" "testing" + "github.com/elastic/beats/v7/libbeat/common" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" - "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud" + "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud/stackdriver" ) func TestData(t *testing.T) { - config := googlecloud.GetConfigForTest(t, "compute") - metricSet := mbtest.NewFetcher(t, config) - metricSet.WriteEvents(t, "/") + metricPrefixIs := func(metricPrefix string) func(e common.MapStr) bool { + return func(e common.MapStr) bool { + v, err := e.GetValue(metricPrefix) + return err == nil && v != nil + } + } + + dataFiles := []struct { + metricPrefix string + path string + }{ + {"googlecloud.compute.instance", "./_meta/data.json"}, + {"googlecloud.compute.instance.disk", "./_meta/data_disk.json"}, + {"googlecloud.compute.instance.network", "./_meta/data_network.json"}, + {"googlecloud.compute.instance.cpu", "./_meta/data_cpu.json"}, + {"googlecloud.compute.firewall", "./_meta/data_firewall.json"}, + } + + config := stackdriver.GetConfigForTest(t, "compute") + + for _, df := range dataFiles { + metricSet := mbtest.NewFetcher(t, config) + t.Run(fmt.Sprintf("metric prefix: %s", df.metricPrefix), func(t *testing.T) { + metricSet.WriteEventsCond(t, df.path, metricPrefixIs(df.metricPrefix)) + }) + } } diff --git a/x-pack/metricbeat/module/googlecloud/compute/manifest.yml b/x-pack/metricbeat/module/googlecloud/compute/manifest.yml index 03b16dcd440..34210db8c0e 100644 --- a/x-pack/metricbeat/module/googlecloud/compute/manifest.yml +++ b/x-pack/metricbeat/module/googlecloud/compute/manifest.yml @@ -6,16 +6,17 @@ input: stackdriver: service: compute metrics: - - "compute.googleapis.com/firewall/dropped_bytes_count" - - "compute.googleapis.com/firewall/dropped_packets_count" - - "compute.googleapis.com/instance/cpu/reserved_cores" - - "compute.googleapis.com/instance/cpu/usage_time" - - "compute.googleapis.com/instance/cpu/utilization" - - "compute.googleapis.com/instance/disk/read_bytes_count" - - "compute.googleapis.com/instance/disk/read_ops_count" - - "compute.googleapis.com/instance/disk/write_bytes_count" - - "compute.googleapis.com/instance/disk/write_ops_count" - - "compute.googleapis.com/instance/network/received_bytes_count" - - "compute.googleapis.com/instance/network/received_packets_count" - - "compute.googleapis.com/instance/network/sent_bytes_count" - - "compute.googleapis.com/instance/uptime" + - metric_types: + - "compute.googleapis.com/firewall/dropped_bytes_count" + - "compute.googleapis.com/firewall/dropped_packets_count" + - "compute.googleapis.com/instance/cpu/reserved_cores" + - "compute.googleapis.com/instance/cpu/usage_time" + - "compute.googleapis.com/instance/cpu/utilization" + - "compute.googleapis.com/instance/disk/read_bytes_count" + - "compute.googleapis.com/instance/disk/read_ops_count" + - "compute.googleapis.com/instance/disk/write_bytes_count" + - "compute.googleapis.com/instance/disk/write_ops_count" + - "compute.googleapis.com/instance/network/received_bytes_count" + - "compute.googleapis.com/instance/network/received_packets_count" + - "compute.googleapis.com/instance/network/sent_bytes_count" + - "compute.googleapis.com/instance/uptime" diff --git a/x-pack/metricbeat/module/googlecloud/constants.go b/x-pack/metricbeat/module/googlecloud/constants.go index b5ded8bf9bc..19b7e27c53d 100644 --- a/x-pack/metricbeat/module/googlecloud/constants.go +++ b/x-pack/metricbeat/module/googlecloud/constants.go @@ -4,16 +4,14 @@ package googlecloud +import monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + const ( // ModuleName in Metricbeat ModuleName = "googlecloud" - // MinTimeIntervalDataWindowMinutes is the minimum time in minutes that we allow the user to specify when requesting past metrics. Less than 5 minutes - // usually return no results. - MinTimeIntervalDataWindowMinutes = 5 - - // MaxTimeIntervalDataWindowMinutes is the max time in minutes that we allow the user to specify when requesting past metrics. - MaxTimeIntervalDataWindowMinutes = 60 + // MonitoringMetricsSamplingRate (in second) refers to how frequent monitoring collects measurement in GCP. + MonitoringMetricsSamplingRate = 60 ) // Metricsets / GCP services names @@ -73,3 +71,53 @@ const ( LabelUser = "user" LabelMetadata = "metadata" ) + +// Available perSeriesAligner map +// https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies#Aligner +var AlignersMapToGCP = map[string]monitoringpb.Aggregation_Aligner{ + "ALIGN_NONE": monitoringpb.Aggregation_ALIGN_NONE, + "ALIGN_DELTA": monitoringpb.Aggregation_ALIGN_DELTA, + "ALIGN_RATE": monitoringpb.Aggregation_ALIGN_RATE, + "ALIGN_INTERPOLATE": monitoringpb.Aggregation_ALIGN_INTERPOLATE, + "ALIGN_NEXT_OLDER": monitoringpb.Aggregation_ALIGN_NEXT_OLDER, + "ALIGN_MIN": monitoringpb.Aggregation_ALIGN_MIN, + "ALIGN_MAX": monitoringpb.Aggregation_ALIGN_MAX, + "ALIGN_MEAN": monitoringpb.Aggregation_ALIGN_MEAN, + "ALIGN_COUNT": monitoringpb.Aggregation_ALIGN_COUNT, + "ALIGN_SUM": monitoringpb.Aggregation_ALIGN_SUM, + "ALIGN_STDDEV": monitoringpb.Aggregation_ALIGN_STDDEV, + "ALIGN_COUNT_TRUE": monitoringpb.Aggregation_ALIGN_COUNT_TRUE, + "ALIGN_COUNT_FALSE": monitoringpb.Aggregation_ALIGN_COUNT_FALSE, + "ALIGN_FRACTION_TRUE": monitoringpb.Aggregation_ALIGN_FRACTION_TRUE, + "ALIGN_PERCENTILE_99": monitoringpb.Aggregation_ALIGN_PERCENTILE_99, + "ALIGN_PERCENTILE_95": monitoringpb.Aggregation_ALIGN_PERCENTILE_95, + "ALIGN_PERCENTILE_50": monitoringpb.Aggregation_ALIGN_PERCENTILE_50, + "ALIGN_PERCENTILE_05": monitoringpb.Aggregation_ALIGN_PERCENTILE_05, + "ALIGN_PERCENT_CHANGE": monitoringpb.Aggregation_ALIGN_PERCENT_CHANGE, +} + +const ( + DefaultAligner = "ALIGN_NONE" +) + +var AlignersMapToSuffix = map[string]string{ + "ALIGN_NONE": ".value", + "ALIGN_DELTA": ".delta", + "ALIGN_RATE": ".rate", + "ALIGN_INTERPOLATE": ".interpolate", + "ALIGN_NEXT_OLDER": ".next_older", + "ALIGN_MIN": ".min", + "ALIGN_MAX": ".max", + "ALIGN_MEAN": ".avg", + "ALIGN_COUNT": ".count", + "ALIGN_SUM": ".sum", + "ALIGN_STDDEV": ".stddev", + "ALIGN_COUNT_TRUE": ".count_true", + "ALIGN_COUNT_FALSE": ".count_false", + "ALIGN_FRACTION_TRUE": ".fraction_true", + "ALIGN_PERCENTILE_99": ".percentile_99", + "ALIGN_PERCENTILE_95": ".percentile_95", + "ALIGN_PERCENTILE_50": ".percentile_50", + "ALIGN_PERCENTILE_05": ".percentile_05", + "ALIGN_PERCENT_CHANGE": ".percent_change", +} diff --git a/x-pack/metricbeat/module/googlecloud/fields.go b/x-pack/metricbeat/module/googlecloud/fields.go index 8fac4947787..6d629ce05c2 100644 --- a/x-pack/metricbeat/module/googlecloud/fields.go +++ b/x-pack/metricbeat/module/googlecloud/fields.go @@ -19,5 +19,5 @@ func init() { // AssetGooglecloud returns asset data. // This is the base64 encoded gzipped contents of module/googlecloud. func AssetGooglecloud() string { - return "eJzsXUuT2zYSvvtX9M321nh82JtrK1WTyW7iWnsztTPJVQWCLQk7IMDgMYry67cAkBQp8QFSpPwo3WYkEv11A/i+bgCk3sEz7j/ARsoNR8qlTV8BGGY4foDXP/tP4d59DA+cmLVU2etXAAo5Eo0fIEFDXgGkqKliuWFSfIAfXgEA/Hz/AJlMLcdXAGuGPNUf/BfvQJAMGyZvOUmQa/81gNnn+AFk8j+kpviofn+9DatR3f6t+rj13sPVGRqSEkNG3aEY1fE36L02mMVfT2WWW4O1y49D22xmo6TNa582At/orvvQculC7Z7jYNbxMKENERQbX3YZ72qs3uCaKdwRzk8u6Gu0r+F646mSeY7pKtkb1CsqrTCt15e2uBSbjgsagfwoqMyY2IBvuDQDyR7MFvtcOoWWE/qMZhFwRdPR8Koxl9tFekOhRvWC6YpKhbrX11TahB8PslZv/2OzBBXINfhWKyMghfd2K7Vx37q/OwZvE6U1jLO/iGt9JohPLuiKUPdfCYVwLikxmML9w29gtsQA00CtUigM3wMTjrtKJ+KAa7LBlWFZF6ixuH9zDcJaKoe2CC8ToJFKkZ72XzWymX5eaPyQ+afyvWvJ9UqYys4GrJXMutw4giPzRcA44wHLx19B5qj8eGyfMyWenWIGl46PM2JQgJHDAQqAloyQtzAQomp65B1ToxdAw/gvcuev83Py98+wJRoSRAHKCsHE5iZmegg0O6mWmiEU2csCgncyS4KdMFNcNLq9akE3v+ZV+ErNm4ZQozBLx87ZAPmCahyqC8SsH1mJhkuSJoQTQVnDzlx56SdJUvixNDAyPd0akx9Pve5pdQaILiB1MImLq0hXnBgUlLWmPA0Id47VjGKJracK4fY9UMKp5T5n8IN6t8WQHSj8w6I2sCNFHxaJXq7kn3vH1O6fAgxYYRivfX06Ucorg21t/Lh1YBTqXAqNt4tw1xkD+1CNkf6cbbCFxLqpECZZf4Ya25TMu5V7KGwwELq6uV977cTZirFXt/nPP3MpUBhG+I/e2z7z8RBiYdShbJTcme1qTaiRavCuqFz41IimJOLqCB4+bVrYbLVmwmdwUaEcsHPMQQVHDIhbfCrkahpRlV41ZSO65CMdKOWXp6eH949eNiDohhMYWSLTp2zShX1u1BXOomJM9hUo93Ub8BiwgSQvFOlgrAh1hf6NVEAJ3eJbF+lxjqyVFMZ5Ymi+UqYNfIRo/ffpydGxtspJi1SAhG6BSiEw1MEJmp1LnSln3hmRBkG6istVXK7iMtj4VyEuTBhUgsy5djsmd/5+sufhwds/ZGNoLmLkRFDdiFZi6G58c/2UF8cCw/M/nvjieSeOdSbT3xgCjKfA6SQ4kgbHE+EoKjyTDAdsHVa4YrLsQeATM+2QSun4lO8U+BKQW9Ls8fCikuo5A9tIrLtqmCLifdCNNIRfWMYqVWpI2bF2baSBu/t/18cOSBFEq/Tfh+WqWGc1d1Wss4HEg4GrYkXYGqdXS6wKjdaqWKWacTFoLLAvt/AzWp/i1ekb1KbrclFHU9flotEQYmHAdbmo1U61a/z3247Vogtvz5aAcKNQ616y7gldHFF79vr46cfDolBJyfBmLRU83T/AmsudBmZe68A6h1OFUgDJc86oP9gC2igkGUjB92+PWe/Iqf6zAtPcahwS6HGs0ATnVhdKJi4V+wKTkQ2wy8S+9OoywW/1rAuaMqZH4wcUvrGT40+HuNAddnK0395xKHw4TwW5j2iHF4In1vqDMhx197AEj2mmT37nqaaHhHfeUnOk5C5TdY4T2xFSO05oo2X2DJEdkFhD85XWfOUz6C8rsrXpy6X2J74rujirJqqf9j4QkD8+vUOFYFBlTPgSpKSq94+Pn7q2lkckA9NQHgvS759rUmm1P6XfD22e7XidSWm2mHo2f8MEZPrtgdXrpdhr7fldG0Kfb8KufcaENdgQRE72qArvcqKLMrTaMgsOXmuza212rc0GG/9KtvIvRYGHzPX3z3EUKHB3MQGhCkerh8xRzAbwPjwCVEv+pTXaEJG6SDVBK2k3W0/dHUhLhLlNtE1qpuc6rP1gk0ebjDykrW1StTctT3m01T/OFd9j0/ITQp9XGWr/6NT5o/7eZpYTw14wyJTrvqJ17UwJueOYboLk3h3+r1afb4Lv4YIUOXtBtfcA+k8ecrkJU/e8FWdpCA8HVjT7C8vkwYoG9MqhN+T2+ZbclgCqL94CE0Aa3dwzuW22qo3wsjPmmuUV2CKY6NcqmuBea8it3gKKNJdMmBtIrAEhDezRNLqt3w0rKiNzu7FoF0ieojYrb+MAfUU25z0tdbdBn2oWD0S9LYdTMNfhUpdHoxzKLeer+syuHg9baI7XPKkcOTyS1pjUCrXl5hb+JRUQSNGLeZGpt92qsca17w9k2wjF+0ym3uEUScqZwC7PI2I23wbbUKRaSa+ITz/OS/WrM/ZNd+iynenDM7EX9XZxcHoLxBjM8nZw8Jvg7Bm9F/omVMDuHr8BrIBlOccMhQkVbypRe01IiKFb/2KJim1v4VGGUrnc6ZSC712yZggTGqTAxg23fnm3bky5IRDWnVEpqdw4cTK1YS8oGvcCJX5tGomCzHLDco5gWIY9u7yNaPdt9sZH/KejRQbvTOl9ZSIsNTCqZCkCo0aJf/DxQtlZefS3OTWNLKelE+JiO31KkhZ2EFyCc3FxeixNP7jp+h1IVUssl6WSymCT8WDHzBaEFO8cx+wboWXpOEI88ulSY+PIs0sNiH9QmeIPk4ZFbASr4zgXGhLFeZxRne58XXE0BtWlaC63CWd6G8ogZx+CfTAyZ3QU+s4eW9iF3VZqhNKqP1Rk89SvASR7+CxTtt7f0eefygvOqa0nj8pzvT11I25uxnuyLGVGdsMQ6qI0VegSKfQeXK5Cba1PSygTCuxuL1bJfqVw0/7KoS/lzw0kSj6jS3134pAaBZxTVhMu42XHusJc7h114mKLbu3rPVNGXxvimbpiJuwTu0IjLp75IT73FpHRCd2XEpJm9n9KyxPlZMi9ryIhP/U2bAeM8tSN7hWVekYvwhvp6pGvtuKZCBtmt/C0ZRqYdsW/T9aKS+APKw2pv6WuGzqVYs02q5AdzbGr19YXwYgNrgDdEuEme/XIf3Oe1+Jeue/t+xcADHVFqSkXoLCuBfdzuOtw6D5ndNrG15O79cwtr2+wUNIo0gsu+voS6ZtdHZn3lSl9EVompf9qkuGiJP7usuBz/Po6k8lzPJpJ388T9Zqce+n0It+zov/llP170fCxQ6aiV0FyvZXH0Y49uFLcfaaEF+xyoXWQjqWCwpVBDrw49/XjnUoTS5+y6aqTB8PcQLbsjBrAODG0wmaLHfsZG8w6lpliGY1qYvi+HS1413nu0P/GhD9znir2gvWTuWOPJMb/rsGhIJKqSaFzHY98DC2PPB9JchatLv0POfaXAbHPov2E3JDDULh7+AiU+DMTte539OC+ydBsZertl2MgPK1NZYrtkkqs2f51psf1E518lRCN6Sr8pMaKUIp6jnlxFIdq1cmfHg5j3U9oUfyYByTo6t6NIsJJfcABWnLke0gtugSwuPLu/lNPtudcOiSWM3gS3uzv+vL+Uy1lPeaf/r2xIrw6R8rWjK4cysyauUrzo2iXC0sZSeuBK623RPBwfLztFdvTB1jkS9Yne3r0ivXjF3KfO+vg+IDNom60vlN8LuI4Je7zerYY0TO8usOnT7Uj/L5hDTmq4nmfRggKP4Byossi8YVwi65UPDx7Kyj6FlKyv/EhcqGrrlOYh0cXiClWXkmWc0dA/sH7F8LLJwakNf7WlPQ81BDeDOKL465fNPg2xvjBkTnS9jIbJpxXvcrCW06+so79fwAAAP//qGqEJg==" + return "eJzsXV9z2zYSf8+n2OlLkpvEebi3zE1nXOeuzVxy9ZzdvnJAcCXhDAIs/lhVP/0NAJIiJZECKZKOM05faonE/nax+O0fgNR7eMDdR1hLueZIubTZKwDDDMeP8Ppn/yncuI/hlhOzkip//QpAIUei8SOkaMgrgAw1VawwTIqP8OMrAICfb24hl5nl+ApgxZBn+qP/4j0IkuOhSPfP7Ar3uZK2KD9p3te8l5MUua4/rm6V6f+QmsbHh/c3x7Aa1dXfWl91jrO/K0dDMmLIqDsVo3r4jXqnDeZx91X3/KANoQ+ZYo9OyfDfD+ftFT5IwreZtKmfvRPfJjkpCibW5aU/tAY/4Q3Vv6/BCGA2xIBCY5XADFZK5nDnEH/yiOH69jP8YVHtro40ozIvrGnCOnTGto5NdzoC13LwmzByNVGRbsSENkRQPDk7h8K7BmsOuGIKt4Tzowv6Bu0buDl4pmRRYJakO4M6odIKc/VIuD2E35bIpVh3XNAy52dBZc7EGvzwlTBId2A22KfYMcCC0Ac0M0IsBUSDrP2vsLPMjEKN6hGzhEqFOkLjo+XZqfN/bJ6iArkCP3YtCqTwOm+kNu5b9/8d7tzGag3j7C/iRp8U6L2bAEWo+6sCRDiXlBjM4Ob2t8AbTAO1SqEwfAdMOCavVImDr8kaE8NynBT9b25YWEnlMJemZgI0Uiky3elQGdMPM3kUmWuh37jx3AyFhe4kBRLvUOYAlCxmhOQgBESffwVZoPJ+emz/JqqtYgaXsZUTZVCAkeeNFWDNby0v54y56sVT9C6cXhgtCL/Irb/Or9vfv8KGaEgRBSgrBBPrdzGLR6DZSjXX+qHIHmcLlkdrKEgL68jZpFu3Exjnipc1yipejsOpUZhl7OgkgXRJ5CBsi9mvH19d4EiSpYQTQVlLzlS57hdJMvipEjAw5d0YUxwuye7ldgGILiBNMKmzq8gSTgwKyvAU0bcgXDvOM4qltplmhNt3QAmnlvt8wzv4doMhs1D4h0VtYEvKOSwTxkLJP3eOzd0fJRiwwjDe+Pp40VRXBtnaeO91YBTqQgqNV7Nw2sXuva9mSUzud3ac1LplEZZdTNYbO6AsuqP+OUPCGWM2xf3aKydOVoy8psx//llIgcIwwn/y2vaJj4cQC6MJZa3k1mySFaFGqt7pO8bTm1kfi9KU8P6E/VhED0sfCxA2T1ZM+EwwmHUKaYc8VfJIVDCMT6Zc5STqMq8RCYmumEsH8vnl/v72w50PMBAijAtFssKnj3mnS4N5sNdoyxo13dXQ3Nen4MdADqS6qNWDyNLstQ5vpAJK6AbfOqsPU2elpDBOH0OLRBkzLtT99/7e0be2ygUkqQAJ3QCVQmCovFM0W5eIU868MiILYewlJF004EtIGgwhFga8hKQB0vata4NKkCk7zUOy8u8nLz/vzv1OHE+FEV4UTYcDxoqnxOGD9tNiHFOc54h4coznpjhmGk2RQ0gyniYvJcqBVDmWLAfR5SSEeUbivuMWn7+fVWJkDh8SMx2fQB7Dnw/4iQR+OMgB6fqURm6l7F2VUmn9PgWMNIQvHPzqWNYKgIcRby0NXN/8u+lHIEUIdZX+3iwvce4lzkVLhZc4933EuTFRbo4u1eAINyy+TdicGgrvqRtRg6NafEx7hhHtpX310r56aV89n/ZVvT/+96uO7tXCG9EVIFwr1DqC1nvMGEfpnuc+f/lp36qqyBverKSC+5tbWHG51cDMax34aX8mUwogRcEZ9cd8QBuFJAcp+O7tIT8eqBZzQmKccq2jET3qlTHEKdeFlYll56FEZmQL8jzzUOm25ESc1K8LoDKmJz84kx20dqX8+RhnwP2ulPZbVQ6FN+pxMO+j5PMN64t6DJEhPGqM2PA9ZLC+0D1NRX8uaE9b3A4M1/PUuWMC9YAwPSZIR4foiwP0mfBsaJFozROfoT9tgG4scS61P19fU8oElVfzbP2eqvwx9S0qBIMqZ8IXOhWpfbi7+9K1oT44nRiH9TCM/f61EWat9s9H9AOc5iiCzqU0G8w8+79hAnL9dh8FmmXfa+3jgX+s6V04sZAzYQ22wignO1SldgXRZclbbwUGBV/qwIsGfKkDB0OIhQEvdeAAacNz/mnIcp8T//41jiwFbhcOO1Th4JgjCxQTw7wJj2k1SgxpjTZEZM5qbehK2vXGE34H3gpnYVNt04boqQ7F39r0zqYDD8Nrm9bjjct07mz9h1PFz9u4DIfQhyRH7R9vm2od3NjccmLYI4YQ5yaxlKGdQCG3HLN1CNfX+7/rXvm7YIFwQYacPaLaeQD95za5XIclPUV/XBrCw1Eezf7CKv2woqVArdYbcvVwRa4qGPUXb4EJIK0p71n0Nk8a3l5NzLSrv4ZcGhZ9v6QN8bWGwuoNoMgKyYR5B6k1IKSBHZrWFPYrY0UtZB5lZp0OyTPUJvEy9gokZD3Fs2zXa/Tpa/m42tvKwYLQDsW69BqkVmE5T5rrvn6Eb1YGaOhTq7N/eLC15BVqy80V/EsqIJChTwTKGuDUrRobfPxhT8gtg3zIZebVzpBknAns1P+85abeMjxnr5PEWFqpH+2yc+xEPuvJXWJivZFGzqjeLARRb4AYg3lxGiL8Jjh7QK+LfhdqbneP3+RWwPKCY47ChBo7k6h99EiJoRv/GpWaka/gTobivNrHlYLvXKJnCBMapMDWDVe+Ad0Uppw7hP44KiWV8xkX0NbsEUXrXqDE99CRKMgtN6zgCIbl2LOH3bJ53aqewO6fDpobXqXKBrWg0OJgVMkqUAzyGP+g6qL5XXWgur1kjayWqwvc5cGBMWle2PVwydEThbG7CsCtW8bfQVA7YdElKKYW2+ZD2DKzASHFe8c9u5aBWTaMLg80W9ZPDvRbyjn+QWWGP45ykVg71geSFnWP8kTSIAdwGiccjUG1LAkWNuVMb0Jp5VBAQAFGFowO0qFz9hZRZLuRGqGS7Y9Y2SLz/YZ0B19lxla7a/rwqbrgkgo+msrm0flYmbg1G6/PErQaOSXnsJelr0KXhKHXY+kK+GT9WwEaUcZ365Kku0Thuu/VVE+l1TtIlXxAl0JvxT6tKtGO6FwsqWtHD2MqJQ8mdObG3+k+0xh/PIV70mmZSIOR06IRF8ogER96i9ToxPBpA0+7ojgm8JHh55yS31CSf6xz2LYYpK/z94RKPbku4U2HzVmojxswEbb6ruB+wzQwDVaHpK+8BP6w0pDW2w87FaBSrNg6CfnVdLuSp+YliLJBIaAbIhwV1C92aLNAYw5qI3j5/jUP56alikGL0VzXlsAl/LZ/oKFgdNyW3b279cLNumdbiGkU2eKtaF+CPduuzByvzemz0zxlwjeWWpfl93eaU1+i3beclF6i16RZwWWpQCMJ8KHWpwY9Ow9PnQ9cGvmrFeKme54NkzbDezl+7fiJePtMUpKh3l1HCEEKvZGHbdvYE0Tl3RdmJCUpLtoY6uialAqdJfAnIu5+1GPZbZlDT13NgrMmb+FbYqWdQTrSzMLmM5/FGmrYJqJJ7RqNbaQpn1tQe995VNT/CE7jN1NeHeKe42dE9pWgVGTdtNlUJ1rvwsgDj7SSgkXHof6nYmNqnthHFT8hN2TvENe3n4ESf2Cl4QSONtw3OZqNzDyKyhPCiwCozPB0CCbWbP66UO/mUVyepERjlpS/mkMoRT3dGjmwRt2MK39Yx/m9X+Ki/NUeSNGV/WtFhEsQAhrQkiPfQWbR5bblldc3X3oSWafYPmeeTJ/w4xluXm++NHLyQ17q33QsTa0LpGzFaOKw5tZM2584sHzVb8tJ1jRiheGENfdPBZx6N/14lxv0ewWj9T34tYLDN9lfuhrh8LzTAsqcfCX/VLRyTO6XzXLp45O9OcanXo3nM/zwGgpU5QNfLUOU2gDlRFfVsQfhauT949uCoh8hI7t33lDOgPV1CovwdAoxZYua5AV39OTf6fBIePU4iLTG35qRnudWwotpfG+gTNeftdfv1ZmuCKiyasJ5PcMsvHDnG5vk/wcAAP//BgUovg==" } diff --git a/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/data.json b/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/data.json index 2106cb1e277..97a9192732a 100644 --- a/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/data.json +++ b/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/data.json @@ -14,120 +14,30 @@ "googlecloud": { "labels": { "metrics": { - "cache_result": "DISABLED", - "proxy_continent": "Europe", - "response_code": "502", - "response_code_class": "500" + "client_network": "ocp-be-c5kjr-network", + "client_subnetwork": "ocp-be-c5kjr-worker-subnet", + "client_zone": "us-central1-a" }, "resource": { - "backend_name": "INVALID_BACKEND", - "backend_scope": "INVALID_BACKEND", - "backend_scope_type": "INVALID_BACKEND", - "backend_target_name": "test1-backend-ks", + "backend_name": "ocp-be-c5kjr-master-us-central1-a", + "backend_scope": "us-central1-a", + "backend_scope_type": "ZONE", + "backend_subnetwork_name": "ocp-be-c5kjr-master-subnet", + "backend_target_name": "ocp-be-c5kjr-api-internal", "backend_target_type": "BACKEND_SERVICE", - "backend_type": "INVALID_BACKEND", - "forwarding_rule_name": "test-lb-ks-forwarding-rule", - "matched_url_path_rule": "UNMATCHED", - "region": "global", - "target_proxy_name": "test-lb-ks-target-proxy", - "url_map_name": "test-lb-ks" + "backend_type": "INSTANCE_GROUP", + "forwarding_rule_name": "ocp-be-c5kjr-api-internal", + "load_balancer_name": "ocp-be-c5kjr-api-internal", + "network_name": "ocp-be-c5kjr-network", + "region": "us-central1" } }, "loadbalancing": { - "https": { - "backend_latencies": { - "count": 4, - "mean": 97.927, - "bucket_options": { - "Options": { - "ExponentialBuckets": { - "num_finite_buckets": 66, - "growth_factor": 1.4, - "scale": 1 - } - } - }, - "bucket_counts": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 4 - ] - }, - "backend_request_bytes_count": 736, - "backend_request_count": 4, - "backend_response_bytes_count": 1952, - "frontend_tcp_rtt": { - "count": 4, - "mean": 50, - "bucket_options": { - "Options": { - "ExponentialBuckets": { - "num_finite_buckets": 66, - "growth_factor": 1.4, - "scale": 1 - } - } - }, - "bucket_counts": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 4 - ] - }, - "request_bytes_count": 736, - "request_count": 4, - "response_bytes_count": 1952, - "total_latencies": { - "count": 4, - "mean": 98.423, - "bucket_options": { - "Options": { - "ExponentialBuckets": { - "num_finite_buckets": 66, - "growth_factor": 1.4, - "scale": 1 - } - } - }, - "bucket_counts": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 4 - ] + "l3": { + "internal": { + "egress_packets_count": { + "value": 0 + } } } } @@ -139,4 +49,4 @@ "service": { "type": "googlecloud" } -} \ No newline at end of file +} diff --git a/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/data_l3.json b/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/data_l3.json new file mode 100644 index 00000000000..9a58a5ebc5b --- /dev/null +++ b/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/data_l3.json @@ -0,0 +1,50 @@ +{ + "@timestamp": "2017-10-12T08:05:34.853Z", + "cloud": { + "account": { + "id": "elastic-observability" + }, + "provider": "googlecloud" + }, + "event": { + "dataset": "googlecloud.loadbalancing", + "duration": 115000, + "module": "googlecloud" + }, + "googlecloud": { + "labels": { + "metrics": { + "client_network": "UNKNOWN", + "client_subnetwork": "REMOTE_IS_EXTERNAL", + "client_zone": "UNKNOWN" + }, + "resource": { + "backend_name": "ocp-be-c5kjr-master-us-central1-c", + "backend_scope": "us-central1-c", + "backend_scope_type": "ZONE", + "backend_subnetwork_name": "ocp-be-c5kjr-master-subnet", + "backend_target_name": "ocp-be-c5kjr-api-internal", + "backend_target_type": "BACKEND_SERVICE", + "backend_type": "INSTANCE_GROUP", + "forwarding_rule_name": "ocp-be-c5kjr-api-internal", + "load_balancer_name": "ocp-be-c5kjr-api-internal", + "network_name": "ocp-be-c5kjr-network", + "region": "us-central1" + } + }, + "loadbalancing": { + "l3": { + "internal": { + "egress_packets_count": 394 + } + } + } + }, + "metricset": { + "name": "loadbalancing", + "period": 10000 + }, + "service": { + "type": "googlecloud" + } +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/docs.asciidoc b/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/docs.asciidoc index 256e744ba6b..2022b44d1c7 100644 --- a/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/docs.asciidoc @@ -3,25 +3,32 @@ Load Balancing metricset to fetch metrics from https://cloud.google.com/load-bal The `loadbalancing` metricset contains all metrics exported from the https://cloud.google.com/monitoring/api/metrics_gcp#gcp-loadbalancing[Stackdriver API]. The field names have been left untouched for people already familiar with them. [float] -=== Fields +=== Metrics +Here is a list of metrics collected by `loadbalancing` metricset: + +[float] +==== https -- `loadbalancing.https.backend_latencies`: A distribution of the latency calculated from when the request was sent by the proxy to the backend until the proxy received from the backend the last byte of response. - `loadbalancing.https.backend_request_bytes_count`: The number of bytes sent as requests from HTTP/S load balancer to backends. - `loadbalancing.https.backend_request_count`: The number of requests served by backends of HTTP/S load balancer. - `loadbalancing.https.backend_response_bytes_count`: The number of bytes sent as responses from backends (or cache) to HTTP/S load balancer. -- `loadbalancing.https.frontend_tcp_rtt`: A distribution of the RTT measured for each connection between client and proxy. - `loadbalancing.https.request_bytes_count`: The number of bytes sent as requests from clients to HTTP/S load balancer. - `loadbalancing.https.request_count`: The number of requests served by HTTP/S load balancer. - `loadbalancing.https.response_bytes_count`: The number of bytes sent as responses from HTTP/S load balancer to clients. -- `loadbalancing.https.total_latencies`: A distribution of the latency calculated from when the request was received by the proxy until the proxy got ACK from client on last response byte. + +[float] +==== l3 + - `loadbalancing.l3.internal.egress_bytes_count`: The number of bytes sent from ILB backend to client (for TCP flows it's counting bytes on application stream only). - `loadbalancing.l3.internal.egress_packets_count`: The number of packets sent from ILB backend to client of the flow. - `loadbalancing.l3.internal.ingress_bytes_count`: The number of bytes sent from client to ILB backend (for TCP flows it's counting bytes on application stream only). - `loadbalancing.l3.internal.ingress_packets_count`: The number of packets sent from client to ILB backend. -- `loadbalancing.l3.internal.rtt_latencies`: A distribution of RTT measured over TCP connections for ILB flows. + +[float] +==== tcp_ssl_proxy + - `loadbalancing.tcp_ssl_proxy.closed_connections`: Number of connections that were terminated over TCP/SSL proxy. - `loadbalancing.tcp_ssl_proxy.egress_bytes_count`: Number of bytes sent from VM to client using proxy. -- `loadbalancing.tcp_ssl_proxy.frontend_tcp_rtt`: A distribution of the smoothed RTT (in ms) measured by the proxy's TCP stack, each minute application layer bytes pass from proxy to client. - `loadbalancing.tcp_ssl_proxy.ingress_bytes_count`: Number of bytes sent from client to VM using proxy. - `loadbalancing.tcp_ssl_proxy.new_connections`: Number of connections that were created over TCP/SSL proxy. - `loadbalancing.tcp_ssl_proxy.open_connections`: Current number of outstanding connections through the TCP/SSL proxy. diff --git a/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/fields.yml b/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/fields.yml index 57761c8ea1b..93855f7ee9c 100644 --- a/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/fields.yml +++ b/x-pack/metricbeat/module/googlecloud/loadbalancing/_meta/fields.yml @@ -11,11 +11,11 @@ description: A distribution of the latency calculated from when the request was sent by the proxy to the backend until the proxy received from the backend the last byte of response. type: group fields: - - name: count + - name: count.value type: long - - name: mean + - name: mean.value type: long - - name: bucket_counts + - name: bucket_counts.value type: long - name: bucket_options type: group @@ -26,30 +26,30 @@ - name: ExponentialBuckets type: group fields: - - name: growth_factor + - name: growth_factor.value type: double - - name: scale + - name: scale.value type: long - - name: num_finite_buckets + - name: num_finite_buckets.value type: long - - name: backend_request_bytes_count + - name: backend_request_bytes_count.value type: long description: The number of bytes sent as requests from HTTP/S load balancer to backends. - - name: backend_request_count + - name: backend_request_count.value type: long description: The number of requests served by backends of HTTP/S load balancer. - - name: backend_response_bytes_count + - name: backend_response_bytes_count.value type: long description: The number of bytes sent as responses from backends (or cache) to HTTP/S load balancer. - name: frontend_tcp_rtt description: A distribution of the RTT measured for each connection between client and proxy. type: group fields: - - name: count + - name: count.value type: long - - name: mean + - name: mean.value type: long - - name: bucket_counts + - name: bucket_counts.value type: long - name: bucket_options type: group @@ -60,11 +60,11 @@ - name: ExponentialBuckets type: group fields: - - name: growth_factor + - name: growth_factor.value type: double - - name: scale + - name: scale.value type: long - - name: num_finite_buckets + - name: num_finite_buckets.value type: long - name: internal type: group @@ -73,11 +73,11 @@ description: A distribution of the latency calculated from when the request was sent by the proxy to the backend until the proxy received from the backend the last byte of response. type: group fields: - - name: count + - name: count.value type: long - - name: mean + - name: mean.value type: long - - name: bucket_counts + - name: bucket_counts.value type: long - name: bucket_options type: group @@ -88,30 +88,30 @@ - name: ExponentialBuckets type: group fields: - - name: growth_factor + - name: growth_factor.value type: double - - name: scale + - name: scale.value type: long - - name: num_finite_buckets + - name: num_finite_buckets.value type: long - - name: request_bytes_count + - name: request_bytes_count.value type: long description: The number of bytes sent as requests from clients to HTTP/S load balancer. - - name: request_count + - name: request_count.value type: long description: The number of requests served by HTTP/S load balancer. - - name: response_bytes_count + - name: response_bytes_count.value type: long description: The number of bytes sent as responses from HTTP/S load balancer to clients. - name: total_latencies description: A distribution of the latency calculated from when the request was received by the proxy until the proxy got ACK from client on last response byte. type: group fields: - - name: count + - name: count.value type: long - - name: mean + - name: mean.value type: long - - name: bucket_counts + - name: bucket_counts.value type: long - name: bucket_options type: group @@ -122,30 +122,30 @@ - name: ExponentialBuckets type: group fields: - - name: growth_factor + - name: growth_factor.value type: double - - name: scale + - name: scale.value type: long - - name: num_finite_buckets + - name: num_finite_buckets.value type: long - - name: request_bytes_count + - name: request_bytes_count.value type: long description: The number of bytes sent as requests from clients to HTTP/S load balancer. - - name: request_count + - name: request_count.value type: long description: The number of requests served by HTTP/S load balancer. - - name: response_bytes_count + - name: response_bytes_count.value type: long description: The number of bytes sent as responses from HTTP/S load balancer to clients. - name: total_latencies description: A distribution of the latency calculated from when the request was received by the proxy until the proxy got ACK from client on last response byte. type: group fields: - - name: count + - name: count.value type: long - - name: mean + - name: mean.value type: long - - name: bucket_counts + - name: bucket_counts.value type: long - name: bucket_options type: group @@ -156,37 +156,37 @@ - name: ExponentialBuckets type: group fields: - - name: growth_factor + - name: growth_factor.value type: double - - name: scale + - name: scale.value type: long - - name: num_finite_buckets + - name: num_finite_buckets.value type: long - name: l3.internal type: group description: Google Cloud Load Balancing metrics fields: - - name: egress_bytes_count + - name: egress_bytes_count.value type: long description: The number of bytes sent from ILB backend to client (for TCP flows it's counting bytes on application stream only). - - name: egress_packets_count + - name: egress_packets_count.value type: long description: The number of packets sent from ILB backend to client of the flow. - - name: ingress_bytes_count + - name: ingress_bytes_count.value type: long description: The number of bytes sent from client to ILB backend (for TCP flows it's counting bytes on application stream only). - - name: ingress_packets_count + - name: ingress_packets_count.value type: long description: The number of packets sent from client to ILB backend. - name: rtt_latencies description: A distribution of RTT measured over TCP connections for ILB flows. type: group fields: - - name: count + - name: count.value type: long - - name: mean + - name: mean.value type: long - - name: bucket_counts + - name: bucket_counts.value type: long - name: bucket_options type: group @@ -197,31 +197,31 @@ - name: ExponentialBuckets type: group fields: - - name: growth_factor + - name: growth_factor.value type: double - - name: scale + - name: scale.value type: long - - name: num_finite_buckets + - name: num_finite_buckets.value type: long - name: tcp_ssl_proxy type: group description: Google Cloud Load Balancing metrics fields: - - name: closed_connections + - name: closed_connections.value type: long description: Number of connections that were terminated over TCP/SSL proxy. - - name: egress_bytes_count + - name: egress_bytes_count.value type: long description: Number of bytes sent from VM to client using proxy. - name: frontend_tcp_rtt description: A distribution of the smoothed RTT (in ms) measured by the proxy's TCP stack, each minute application layer bytes pass from proxy to client. type: group fields: - - name: count + - name: count.value type: long - - name: mean + - name: mean.value type: long - - name: bucket_counts + - name: bucket_counts.value type: long - name: bucket_options type: group @@ -232,18 +232,18 @@ - name: ExponentialBuckets type: group fields: - - name: growth_factor + - name: growth_factor.value type: double - - name: scale + - name: scale.value type: long - - name: num_finite_buckets + - name: num_finite_buckets.value type: long - - name: ingress_bytes_count + - name: ingress_bytes_count.value type: long description: Number of bytes sent from client to VM using proxy. - - name: new_connections + - name: new_connections.value type: long description: Number of connections that were created over TCP/SSL proxy. - - name: open_connections + - name: open_connections.value type: long description: Current number of outstanding connections through the TCP/SSL proxy. diff --git a/x-pack/metricbeat/module/googlecloud/loadbalancing/loadbalancing_integration_test.go b/x-pack/metricbeat/module/googlecloud/loadbalancing/loadbalancing_integration_test.go index 80a5e99c57e..c070d96a736 100644 --- a/x-pack/metricbeat/module/googlecloud/loadbalancing/loadbalancing_integration_test.go +++ b/x-pack/metricbeat/module/googlecloud/loadbalancing/loadbalancing_integration_test.go @@ -8,14 +8,38 @@ package loadbalancing import ( + "fmt" "testing" + "github.com/elastic/beats/v7/libbeat/common" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" - "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud" + "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud/stackdriver" ) func TestData(t *testing.T) { - config := googlecloud.GetConfigForTest(t, "loadbalancing") - metricSet := mbtest.NewFetcher(t, config) - metricSet.WriteEvents(t, "/") + metricPrefixIs := func(metricPrefix string) func(e common.MapStr) bool { + return func(e common.MapStr) bool { + v, err := e.GetValue(metricPrefix) + return err == nil && v != nil + } + } + + dataFiles := []struct { + metricPrefix string + path string + }{ + {"googlecloud.loadbalancing", "./_meta/data.json"}, + {"googlecloud.loadbalancing.https", "./_meta/data_https.json"}, + {"googlecloud.loadbalancing.l3", "./_meta/data_l3.json"}, + {"googlecloud.loadbalancing.tcp_ssl_proxy", "./_meta/data_tcp_ssl_proxy.json"}, + } + + config := stackdriver.GetConfigForTest(t, "loadbalancing") + + for _, df := range dataFiles { + metricSet := mbtest.NewFetcher(t, config) + t.Run(fmt.Sprintf("metric prefix: %s", df.metricPrefix), func(t *testing.T) { + metricSet.WriteEventsCond(t, df.path, metricPrefixIs(df.metricPrefix)) + }) + } } diff --git a/x-pack/metricbeat/module/googlecloud/loadbalancing/manifest.yml b/x-pack/metricbeat/module/googlecloud/loadbalancing/manifest.yml index 479f92c94d4..5ec1dc8d417 100644 --- a/x-pack/metricbeat/module/googlecloud/loadbalancing/manifest.yml +++ b/x-pack/metricbeat/module/googlecloud/loadbalancing/manifest.yml @@ -6,28 +6,19 @@ input: stackdriver: service: loadbalancing metrics: - - "loadbalancing.googleapis.com/https/backend_latencies" - - "loadbalancing.googleapis.com/https/backend_latencies" - - "loadbalancing.googleapis.com/https/backend_request_bytes_count" - - "loadbalancing.googleapis.com/https/backend_request_count" - - "loadbalancing.googleapis.com/https/backend_response_bytes_count" - - "loadbalancing.googleapis.com/https/frontend_tcp_rtt" - - "loadbalancing.googleapis.com/https/request_bytes_count" - - "loadbalancing.googleapis.com/https/request_bytes_count" - - "loadbalancing.googleapis.com/https/request_count" - - "loadbalancing.googleapis.com/https/request_count" - - "loadbalancing.googleapis.com/https/response_bytes_count" - - "loadbalancing.googleapis.com/https/response_bytes_count" - - "loadbalancing.googleapis.com/https/total_latencies" - - "loadbalancing.googleapis.com/https/total_latencies" - - "loadbalancing.googleapis.com/l3/internal/egress_bytes_count" - - "loadbalancing.googleapis.com/l3/internal/egress_packets_count" - - "loadbalancing.googleapis.com/l3/internal/ingress_bytes_count" - - "loadbalancing.googleapis.com/l3/internal/ingress_packets_count" - - "loadbalancing.googleapis.com/l3/internal/rtt_latencies" - - "loadbalancing.googleapis.com/tcp_ssl_proxy/closed_connections" - - "loadbalancing.googleapis.com/tcp_ssl_proxy/egress_bytes_count" - - "loadbalancing.googleapis.com/tcp_ssl_proxy/frontend_tcp_rtt" - - "loadbalancing.googleapis.com/tcp_ssl_proxy/ingress_bytes_count" - - "loadbalancing.googleapis.com/tcp_ssl_proxy/new_connections" - - "loadbalancing.googleapis.com/tcp_ssl_proxy/open_connections" + - metric_types: + - "loadbalancing.googleapis.com/https/backend_request_bytes_count" + - "loadbalancing.googleapis.com/https/backend_request_count" + - "loadbalancing.googleapis.com/https/backend_response_bytes_count" + - "loadbalancing.googleapis.com/https/request_bytes_count" + - "loadbalancing.googleapis.com/https/request_count" + - "loadbalancing.googleapis.com/https/response_bytes_count" + - "loadbalancing.googleapis.com/l3/internal/egress_bytes_count" + - "loadbalancing.googleapis.com/l3/internal/egress_packets_count" + - "loadbalancing.googleapis.com/l3/internal/ingress_bytes_count" + - "loadbalancing.googleapis.com/l3/internal/ingress_packets_count" + - "loadbalancing.googleapis.com/tcp_ssl_proxy/closed_connections" + - "loadbalancing.googleapis.com/tcp_ssl_proxy/egress_bytes_count" + - "loadbalancing.googleapis.com/tcp_ssl_proxy/ingress_bytes_count" + - "loadbalancing.googleapis.com/tcp_ssl_proxy/new_connections" + - "loadbalancing.googleapis.com/tcp_ssl_proxy/open_connections" diff --git a/x-pack/metricbeat/module/googlecloud/pubsub/_meta/data.json b/x-pack/metricbeat/module/googlecloud/pubsub/_meta/data.json index 86da8d924e5..c1a0365401d 100644 --- a/x-pack/metricbeat/module/googlecloud/pubsub/_meta/data.json +++ b/x-pack/metricbeat/module/googlecloud/pubsub/_meta/data.json @@ -14,34 +14,14 @@ "googlecloud": { "labels": { "resource": { - "subscription_id": "test-ks" + "subscription_id": "test-subscription-1" } }, "pubsub": { - "snapshot": { - "backlog_bytes": 19, - "backlog_bytes_by_region": 19, - "num_messages": 4, - "num_messages_by_region": 4, - "oldest_message_age": 69319, - "oldest_message_age_by_region": 69319 - }, "subscription": { - "backlog_bytes": 0, - "num_undelivered_messages": 0, - "oldest_retained_acked_message_age": 0, - "oldest_retained_acked_message_age_by_region": 0, - "oldest_unacked_message_age": 0, - "oldest_unacked_message_age_by_region": 69277, - "retained_acked_bytes": 0, - "retained_acked_bytes_by_region": 0, - "unacked_bytes_by_region": 19 - }, - "topic": { - "oldest_retained_acked_message_age_by_region": 0, - "oldest_unacked_message_age_by_region": 69319, - "retained_acked_bytes_by_region": 0, - "unacked_bytes_by_region": 76 + "backlog_bytes": { + "value": 0 + } } } }, diff --git a/x-pack/metricbeat/module/googlecloud/pubsub/_meta/data_subscription.json b/x-pack/metricbeat/module/googlecloud/pubsub/_meta/data_subscription.json new file mode 100644 index 00000000000..13c2724143f --- /dev/null +++ b/x-pack/metricbeat/module/googlecloud/pubsub/_meta/data_subscription.json @@ -0,0 +1,35 @@ +{ + "@timestamp": "2017-10-12T08:05:34.853Z", + "cloud": { + "account": { + "id": "elastic-observability" + }, + "provider": "googlecloud" + }, + "event": { + "dataset": "googlecloud.pubsub", + "duration": 115000, + "module": "googlecloud" + }, + "googlecloud": { + "labels": { + "resource": { + "subscription_id": "test-ks" + } + }, + "pubsub": { + "subscription": { + "backlog_bytes": { + "value": 0 + } + } + } + }, + "metricset": { + "name": "pubsub", + "period": 10000 + }, + "service": { + "type": "googlecloud" + } +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/googlecloud/pubsub/_meta/data_topic.json b/x-pack/metricbeat/module/googlecloud/pubsub/_meta/data_topic.json new file mode 100644 index 00000000000..7f296406136 --- /dev/null +++ b/x-pack/metricbeat/module/googlecloud/pubsub/_meta/data_topic.json @@ -0,0 +1,43 @@ +{ + "@timestamp": "2017-10-12T08:05:34.853Z", + "cloud": { + "account": { + "id": "elastic-observability" + }, + "provider": "googlecloud" + }, + "event": { + "dataset": "googlecloud.pubsub", + "duration": 115000, + "module": "googlecloud" + }, + "googlecloud": { + "labels": { + "resource": { + "topic_id": "test-ks" + } + }, + "pubsub": { + "topic": { + "message_sizes": { + "bucket_options": { + "Options": { + "ExponentialBuckets": { + "num_finite_buckets": 16, + "growth_factor": 4, + "scale": 1 + } + } + } + } + } + } + }, + "metricset": { + "name": "pubsub", + "period": 10000 + }, + "service": { + "type": "googlecloud" + } +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/googlecloud/pubsub/_meta/docs.asciidoc b/x-pack/metricbeat/module/googlecloud/pubsub/_meta/docs.asciidoc index cd6e94083ef..f8faf2bada0 100644 --- a/x-pack/metricbeat/module/googlecloud/pubsub/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/googlecloud/pubsub/_meta/docs.asciidoc @@ -1,9 +1,13 @@ -PubSub Metricset to fetch metrics from https://cloud.google.com/pubsub/[Pub/Sub] topics and subscriptions in Google Cloud Platform. +PubSub metricsetf to fetch metrics from https://cloud.google.com/pubsub/[Pub/Sub] topics and subscriptions in Google Cloud Platform. -The `pubsub` Metricset contains all GA stage metrics exported from the https://cloud.google.com/monitoring/api/metrics_gcp#gcp-pubsub[Stackdriver API]. The field names have been left untouched for people already familiar with them. +The `pubsub` metricset contains all GA stage metrics exported from the https://cloud.google.com/monitoring/api/metrics_gcp#gcp-pubsub[Stackdriver API]. The field names have been left untouched for people already familiar with them. No special permissions are needed apart from the ones detailed in the module section of the docs. +[float] +=== Metrics +Here is a list of metrics collected by `pubsub` metricset: + [float] ==== Snapshot Metrics - `pubsub.snapshot.backlog_bytes`: Total byte size of the messages retained in a snapshot. @@ -35,7 +39,6 @@ No special permissions are needed apart from the ones detailed in the module sec - `pubsub.subscription.pull_message_operation_count`: Cumulative count of pull message operations, grouped by result. For a definition of message operations, see Cloud Pub/Sub metric subscription/mod_ack_deadline_message_operation_count. - `pubsub.subscription.pull_request_count`: Cumulative count of pull requests, grouped by result. - `pubsub.subscription.push_request_count`: Cumulative count of push attempts, grouped by result. Unlike pulls, the push server implementation does not batch user messages. So each request only contains one user message. The push server retries on errors, so a given user message can appear multiple times. -- `pubsub.subscription.push_request_latencies`: Distribution of push request latencies (in microseconds), grouped by result. - `pubsub.subscription.retained_acked_bytes`: otal byte size of the acknowledged messages retained in a subscription. - `pubsub.subscription.retained_acked_bytes_by_region`: Total byte size of the acknowledged messages retained in a subscription, broken down by Cloud region. - `pubsub.subscription.seek_request_count`: Cumulative count of seek attempts, grouped by result. @@ -52,7 +55,6 @@ No special permissions are needed apart from the ones detailed in the module sec ==== Topic Metrics - `pubsub.topic.byte_cost`: Cost of operations, measured in bytes. This is used to measure utilization for quotas. - `pubsub.topic.config_updates_count`: Cumulative count of configuration changes, grouped by operation type and result. -- `pubsub.topic.message_sizes`: Distribution of publish message sizes (in bytes). - `pubsub.topic.oldest_retained_acked_message_age_by_region`: Age (in seconds) of the oldest acknowledged message retained in a topic, broken down by Cloud region. - `pubsub.topic.oldest_unacked_message_age_by_region`: Age (in seconds) of the oldest unacknowledged message in a topic, broken down by Cloud region. - `pubsub.topic.retained_acked_bytes_by_region`: Total byte size of the acknowledged messages retained in a topic, broken down by Cloud region. diff --git a/x-pack/metricbeat/module/googlecloud/pubsub/_meta/fields.yml b/x-pack/metricbeat/module/googlecloud/pubsub/_meta/fields.yml index 32e4acbb521..ae6443e219f 100644 --- a/x-pack/metricbeat/module/googlecloud/pubsub/_meta/fields.yml +++ b/x-pack/metricbeat/module/googlecloud/pubsub/_meta/fields.yml @@ -7,152 +7,155 @@ type: group description: Suscription related metrics fields: - - name: ack_message_count + - name: ack_message_count.value type: long description: Cumulative count of messages acknowledged by Acknowledge requests, grouped by delivery type. - - name: backlog_bytes + - name: backlog_bytes.value type: long description: Total byte size of the unacknowledged messages (a.k.a. backlog messages) in a subscription. - - name: num_outstanding_messages + - name: num_outstanding_messages.value type: long description: Number of messages delivered to a subscription's push endpoint, but not yet acknowledged. - - name: num_undelivered_messages + - name: num_undelivered_messages.value type: long description: Number of unacknowledged messages (a.k.a. backlog messages) in a subscription. - - name: oldest_unacked_message_age + - name: oldest_unacked_message_age.value type: long description: Age (in seconds) of the oldest unacknowledged message (a.k.a. backlog message) in a subscription. - - name: pull_ack_message_operation_count + - name: pull_ack_message_operation_count.value type: long description: Cumulative count of acknowledge message operations, grouped by result. For a definition of message operations, see Cloud Pub/Sub metric subscription/mod_ack_deadline_message_operation_count. - - name: pull_ack_request_count + - name: pull_ack_request_count.value type: long description: Cumulative count of acknowledge requests, grouped by result. - - name: pull_message_operation_count + - name: pull_message_operation_count.value type: long description: Cumulative count of pull message operations, grouped by result. For a definition of message operations, see Cloud Pub/Sub metric subscription/mod_ack_deadline_message_operation_count. - - name: pull_request_count + - name: pull_request_count.value type: long description: Cumulative count of pull requests, grouped by result. - - name: push_request_count + - name: push_request_count.value type: long description: Cumulative count of push attempts, grouped by result. Unlike pulls, the push server implementation does not batch user messages. So each request only contains one user message. The push server retries on errors, so a given user message can appear multiple times. - - name: push_request_latencies + - name: push_request_latencies.value type: long description: Distribution of push request latencies (in microseconds), grouped by result. - - name: sent_message_count + - name: sent_message_count.value type: long description: Cumulative count of messages sent by Cloud Pub/Sub to subscriber clients, grouped by delivery type. - - name: streaming_pull_ack_message_operation_count + - name: streaming_pull_ack_message_operation_count.value type: long description: Cumulative count of StreamingPull acknowledge message operations, grouped by result. For a definition of message operations, see Cloud Pub/Sub metric subscription/mod_ack_deadline_message_operation_count. - - name: streaming_pull_ack_request_count + - name: streaming_pull_ack_request_count.value type: long description: Cumulative count of streaming pull requests with non-empty acknowledge ids, grouped by result. - - name: streaming_pull_message_operation_count + - name: streaming_pull_message_operation_count.value type: long description: Cumulative count of streaming pull message operations, grouped by result. For a definition of message operations, see Cloud Pub/Sub metric subscription/mod_ack_deadline_message_operation_count - - name: streaming_pull_response_count + - name: streaming_pull_response_count.value type: long description: Cumulative count of streaming pull responses, grouped by result. - - name: dead_letter_message_count + - name: dead_letter_message_count.value type: long description: Cumulative count of messages published to dead letter topic, grouped by result. - - name: mod_ack_deadline_message_count + - name: mod_ack_deadline_message_count.value type: long description: Cumulative count of messages whose deadline was updated by ModifyAckDeadline requests, grouped by delivery type. - - name: mod_ack_deadline_message_operation_count + - name: mod_ack_deadline_message_operation_count.value type: long description: Cumulative count of ModifyAckDeadline message operations, grouped by result. - - name: mod_ack_deadline_request_count + - name: mod_ack_deadline_request_count.value type: long description: Cumulative count of ModifyAckDeadline requests, grouped by result. - - name: oldest_retained_acked_message_age + - name: oldest_retained_acked_message_age.value type: long description: Age (in seconds) of the oldest acknowledged message retained in a subscription. - - name: oldest_retained_acked_message_age_by_region + - name: oldest_retained_acked_message_age_by_region.value type: long description: Age (in seconds) of the oldest acknowledged message retained in a subscription, broken down by Cloud region. - - name: oldest_unacked_message_age_by_region + - name: oldest_unacked_message_age_by_region.value type: long description: Age (in seconds) of the oldest unacknowledged message in a subscription, broken down by Cloud region. - - name: retained_acked_bytes + - name: retained_acked_bytes.value type: long description: Total byte size of the acknowledged messages retained in a subscription. - - name: retained_acked_bytes_by_region + - name: retained_acked_bytes_by_region.value type: long description: Total byte size of the acknowledged messages retained in a subscription, broken down by Cloud region. - - name: seek_request_count + - name: seek_request_count.value type: long description: Cumulative count of seek attempts, grouped by result. - - name: streaming_pull_mod_ack_deadline_message_operation_count + - name: streaming_pull_mod_ack_deadline_message_operation_count.value type: long description: Cumulative count of StreamingPull ModifyAckDeadline operations, grouped by result. - - name: streaming_pull_mod_ack_deadline_request_count + - name: streaming_pull_mod_ack_deadline_request_count.value type: long description: Cumulative count of streaming pull requests with non-empty ModifyAckDeadline fields, grouped by result. - - name: byte_cost + - name: byte_cost.value type: long description: Cumulative cost of operations, measured in bytes. This is used to measure quota utilization. - - name: config_updates_count + - name: config_updates_count.value type: long description: Cumulative count of configuration changes for each subscription, grouped by operation type and result. - - name: unacked_bytes_by_region + - name: unacked_bytes_by_region.value type: long description: Total byte size of the unacknowledged messages in a subscription, broken down by Cloud region. - name: topic type: group description: Topic related metrics fields: - - name: streaming_pull_response_count + - name: streaming_pull_response_count.value type: long description: Cumulative count of streaming pull responses, grouped by result. - - name: send_message_operation_count + - name: send_message_operation_count.value type: long description: Cumulative count of publish message operations, grouped by result. For a definition of message operations, see Cloud Pub/Sub metric subscription/mod_ack_deadline_message_operation_count. - - name: send_request_count + - name: send_request_count.value type: long description: Cumulative count of publish requests, grouped by result. - - name: oldest_retained_acked_message_age_by_region + - name: oldest_retained_acked_message_age_by_region.value type: long description: Age (in seconds) of the oldest acknowledged message retained in a topic, broken down by Cloud region. - - name: oldest_unacked_message_age_by_region + - name: oldest_unacked_message_age_by_region.value type: long description: Age (in seconds) of the oldest unacknowledged message in a topic, broken down by Cloud region. - - name: retained_acked_bytes_by_region + - name: retained_acked_bytes_by_region.value type: long description: Total byte size of the acknowledged messages retained in a topic, broken down by Cloud region. - - name: byte_cost + - name: byte_cost.value type: long description: Cost of operations, measured in bytes. This is used to measure utilization for quotas. - - name: config_updates_count + - name: config_updates_count.value type: long description: Cumulative count of configuration changes, grouped by operation type and result. - - name: unacked_bytes_by_region + - name: message_sizes.value + type: long + description: Distribution of publish message sizes (in bytes) + - name: unacked_bytes_by_region.value type: long description: Total byte size of the unacknowledged messages in a topic, broken down by Cloud region. - name: snapshot type: group description: Snapshot related metrics fields: - - name: oldest_message_age + - name: oldest_message_age.value type: long description: Age (in seconds) of the oldest message retained in a snapshot. - - name: oldest_message_age_by_region + - name: oldest_message_age_by_region.value type: long description: Age (in seconds) of the oldest message retained in a snapshot, broken down by Cloud region. - - name: backlog_bytes + - name: backlog_bytes.value type: long description: Total byte size of the messages retained in a snapshot. - - name: backlog_bytes_by_region + - name: backlog_bytes_by_region.value type: long description: Total byte size of the messages retained in a snapshot, broken down by Cloud region. - - name: num_messages + - name: num_messages.value type: long description: Number of messages retained in a snapshot. - - name: num_messages_by_region + - name: num_messages_by_region.value type: long description: Number of messages retained in a snapshot, broken down by Cloud region. - - name: config_updates_count + - name: config_updates_count.value type: long description: Cumulative count of configuration changes, grouped by operation type and result. diff --git a/x-pack/metricbeat/module/googlecloud/pubsub/manifest.yml b/x-pack/metricbeat/module/googlecloud/pubsub/manifest.yml index 3d8cdb0949c..a002820ebd6 100644 --- a/x-pack/metricbeat/module/googlecloud/pubsub/manifest.yml +++ b/x-pack/metricbeat/module/googlecloud/pubsub/manifest.yml @@ -6,50 +6,49 @@ input: stackdriver: service: pubsub metrics: - - "pubsub.googleapis.com/snapshot/backlog_bytes" - - "pubsub.googleapis.com/snapshot/backlog_bytes_by_region" - - "pubsub.googleapis.com/snapshot/config_updates_count" - - "pubsub.googleapis.com/snapshot/num_messages" - - "pubsub.googleapis.com/snapshot/num_messages_by_region" - - "pubsub.googleapis.com/snapshot/oldest_message_age" - - "pubsub.googleapis.com/snapshot/oldest_message_age_by_region" - - "pubsub.googleapis.com/subscription/ack_message_count" - - "pubsub.googleapis.com/subscription/backlog_bytes" - - "pubsub.googleapis.com/subscription/byte_cost" - - "pubsub.googleapis.com/subscription/config_updates_count" - - "pubsub.googleapis.com/subscription/dead_letter_message_count" - - "pubsub.googleapis.com/subscription/mod_ack_deadline_message_count" - - "pubsub.googleapis.com/subscription/mod_ack_deadline_message_operation_count" - - "pubsub.googleapis.com/subscription/mod_ack_deadline_request_count" - - "pubsub.googleapis.com/subscription/num_outstanding_messages" - - "pubsub.googleapis.com/subscription/num_undelivered_messages" - - "pubsub.googleapis.com/subscription/oldest_retained_acked_message_age" - - "pubsub.googleapis.com/subscription/oldest_retained_acked_message_age_by_region" - - "pubsub.googleapis.com/subscription/oldest_unacked_message_age" - - "pubsub.googleapis.com/subscription/oldest_unacked_message_age_by_region" - - "pubsub.googleapis.com/subscription/pull_ack_message_operation_count" - - "pubsub.googleapis.com/subscription/pull_ack_request_count" - - "pubsub.googleapis.com/subscription/pull_message_operation_count" - - "pubsub.googleapis.com/subscription/pull_request_count" - - "pubsub.googleapis.com/subscription/push_request_count" - - "pubsub.googleapis.com/subscription/push_request_latencies" - - "pubsub.googleapis.com/subscription/retained_acked_bytes" - - "pubsub.googleapis.com/subscription/retained_acked_bytes_by_region" - - "pubsub.googleapis.com/subscription/seek_request_count" - - "pubsub.googleapis.com/subscription/sent_message_count" - - "pubsub.googleapis.com/subscription/streaming_pull_ack_message_operation_count" - - "pubsub.googleapis.com/subscription/streaming_pull_ack_request_count" - - "pubsub.googleapis.com/subscription/streaming_pull_message_operation_count" - - "pubsub.googleapis.com/subscription/streaming_pull_mod_ack_deadline_message_operation_count" - - "pubsub.googleapis.com/subscription/streaming_pull_mod_ack_deadline_request_count" - - "pubsub.googleapis.com/subscription/streaming_pull_response_count" - - "pubsub.googleapis.com/subscription/unacked_bytes_by_region" - - "pubsub.googleapis.com/topic/byte_cost" - - "pubsub.googleapis.com/topic/config_updates_count" - - "pubsub.googleapis.com/topic/message_sizes" - - "pubsub.googleapis.com/topic/oldest_retained_acked_message_age_by_region" - - "pubsub.googleapis.com/topic/oldest_unacked_message_age_by_region" - - "pubsub.googleapis.com/topic/retained_acked_bytes_by_region" - - "pubsub.googleapis.com/topic/send_message_operation_count" - - "pubsub.googleapis.com/topic/send_request_count" - - "pubsub.googleapis.com/topic/unacked_bytes_by_region" + - metric_types: + - "pubsub.googleapis.com/snapshot/backlog_bytes" + - "pubsub.googleapis.com/snapshot/backlog_bytes_by_region" + - "pubsub.googleapis.com/snapshot/config_updates_count" + - "pubsub.googleapis.com/snapshot/num_messages" + - "pubsub.googleapis.com/snapshot/num_messages_by_region" + - "pubsub.googleapis.com/snapshot/oldest_message_age" + - "pubsub.googleapis.com/snapshot/oldest_message_age_by_region" + - "pubsub.googleapis.com/subscription/ack_message_count" + - "pubsub.googleapis.com/subscription/backlog_bytes" + - "pubsub.googleapis.com/subscription/byte_cost" + - "pubsub.googleapis.com/subscription/config_updates_count" + - "pubsub.googleapis.com/subscription/dead_letter_message_count" + - "pubsub.googleapis.com/subscription/mod_ack_deadline_message_count" + - "pubsub.googleapis.com/subscription/mod_ack_deadline_message_operation_count" + - "pubsub.googleapis.com/subscription/mod_ack_deadline_request_count" + - "pubsub.googleapis.com/subscription/num_outstanding_messages" + - "pubsub.googleapis.com/subscription/num_undelivered_messages" + - "pubsub.googleapis.com/subscription/oldest_retained_acked_message_age" + - "pubsub.googleapis.com/subscription/oldest_retained_acked_message_age_by_region" + - "pubsub.googleapis.com/subscription/oldest_unacked_message_age" + - "pubsub.googleapis.com/subscription/oldest_unacked_message_age_by_region" + - "pubsub.googleapis.com/subscription/pull_ack_message_operation_count" + - "pubsub.googleapis.com/subscription/pull_ack_request_count" + - "pubsub.googleapis.com/subscription/pull_message_operation_count" + - "pubsub.googleapis.com/subscription/pull_request_count" + - "pubsub.googleapis.com/subscription/push_request_count" + - "pubsub.googleapis.com/subscription/retained_acked_bytes" + - "pubsub.googleapis.com/subscription/retained_acked_bytes_by_region" + - "pubsub.googleapis.com/subscription/seek_request_count" + - "pubsub.googleapis.com/subscription/sent_message_count" + - "pubsub.googleapis.com/subscription/streaming_pull_ack_message_operation_count" + - "pubsub.googleapis.com/subscription/streaming_pull_ack_request_count" + - "pubsub.googleapis.com/subscription/streaming_pull_message_operation_count" + - "pubsub.googleapis.com/subscription/streaming_pull_mod_ack_deadline_message_operation_count" + - "pubsub.googleapis.com/subscription/streaming_pull_mod_ack_deadline_request_count" + - "pubsub.googleapis.com/subscription/streaming_pull_response_count" + - "pubsub.googleapis.com/subscription/unacked_bytes_by_region" + - "pubsub.googleapis.com/topic/byte_cost" + - "pubsub.googleapis.com/topic/config_updates_count" + - "pubsub.googleapis.com/topic/oldest_retained_acked_message_age_by_region" + - "pubsub.googleapis.com/topic/oldest_unacked_message_age_by_region" + - "pubsub.googleapis.com/topic/retained_acked_bytes_by_region" + - "pubsub.googleapis.com/topic/send_message_operation_count" + - "pubsub.googleapis.com/topic/send_request_count" + - "pubsub.googleapis.com/topic/unacked_bytes_by_region" diff --git a/x-pack/metricbeat/module/googlecloud/pubsub/pubsub_integration_test.go b/x-pack/metricbeat/module/googlecloud/pubsub/pubsub_integration_test.go index 90032d50310..6d739326dea 100644 --- a/x-pack/metricbeat/module/googlecloud/pubsub/pubsub_integration_test.go +++ b/x-pack/metricbeat/module/googlecloud/pubsub/pubsub_integration_test.go @@ -8,14 +8,38 @@ package pubsub import ( + "fmt" "testing" + "github.com/elastic/beats/v7/libbeat/common" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" - "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud" + "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud/stackdriver" ) func TestData(t *testing.T) { - config := googlecloud.GetConfigForTest(t, "pubsub") - metricSet := mbtest.NewFetcher(t, config) - metricSet.WriteEvents(t, "/") + metricPrefixIs := func(metricPrefix string) func(e common.MapStr) bool { + return func(e common.MapStr) bool { + v, err := e.GetValue(metricPrefix) + return err == nil && v != nil + } + } + + dataFiles := []struct { + metricPrefix string + path string + }{ + {"googlecloud.pubsub", "./_meta/data.json"}, + {"googlecloud.pubsub.snapshot", "./_meta/data_snapshot.json"}, + {"googlecloud.pubsub.subscription", "./_meta/data_subscription.json"}, + {"googlecloud.pubsub.topic", "./_meta/data_topic.json"}, + } + + config := stackdriver.GetConfigForTest(t, "pubsub") + + for _, df := range dataFiles { + metricSet := mbtest.NewFetcher(t, config) + t.Run(fmt.Sprintf("metric prefix: %s", df.metricPrefix), func(t *testing.T) { + metricSet.WriteEventsCond(t, df.path, metricPrefixIs(df.metricPrefix)) + }) + } } diff --git a/x-pack/metricbeat/module/googlecloud/stackdriver/_meta/docs.asciidoc b/x-pack/metricbeat/module/googlecloud/stackdriver/_meta/docs.asciidoc new file mode 100644 index 00000000000..00cce58cac8 --- /dev/null +++ b/x-pack/metricbeat/module/googlecloud/stackdriver/_meta/docs.asciidoc @@ -0,0 +1,82 @@ +Stackdriver provides visibility into the performance, uptime, and overall health +of cloud-powered applications. It collects metrics, events, and metadata from +different services from Google Cloud. This metricset is to collect monitoring +metrics from Google Cloud using `ListTimeSeries` API. + +[float] +== Metricset config and parameters + +* *metric_types*: Required, a list of metric type strings. Each call of the +`ListTimeSeries` API can return any number of time series from a single metric +type. Metric type is to used for identifying a specific time series. + +* *aligner*: A single string with which aggregation operation need to be applied +onto time series data for ListTimeSeries API. If it's not given, default aligner +is set to be `ALIGN_NONE`. Sample period of each metric type is obtained from +making https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors/list [ListMetricDescriptors API] call. + +[float] +=== Example Configuration +* `stackdriver` metricset is enabled to collect metrics from all zones under +`europe-west1-c` region in `elastic-observability` project. Two sets of metrics +are specified: first one is to collect CPU usage time and utilization with +aggregation aligner ALIGN_MEAN; second one is to collect uptime with aggregation +aligner ALIGN_SUM. These metric types all have 240 seconds ingest delay time and +60 seconds sample period. With `period` specified as `300s` in the config below, +Metricbeat will collect compute metrics from googlecloud every 5-minute with +given aggregation aligner applied for each metric type. ++ +[source,yaml] +---- +- module: googlecloud + metricsets: + - stackdriver + zone: "europe-west1-c" + project_id: elastic-observability + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 300s + stackdriver: + service: compute + metrics: + - aligner: ALIGN_MEAN + metric_types: + - "compute.googleapis.com/instance/cpu/usage_time" + - "compute.googleapis.com/instance/cpu/utilization" + - aligner: ALIGN_SUM + metric_types: + - "compute.googleapis.com/instance/uptime" + +---- + +* `stackdriver` metricset is enabled to collect metrics from all zones under +`europe-west1-c` region in `elastic-observability` project. Two sets of metrics +are specified: first one is to collect CPU usage time and utilization with +aggregation aligner ALIGN_MEAN; second one is to collect uptime with aggregation +aligner ALIGN_SUM. These metric types all have 240 seconds ingest delay time and +60 seconds sample period. With `period` specified as `60s` in the config below, +Metricbeat will collect compute metrics from googlecloud every minute with no +aggregation. This case, the aligners specified in the configuration will be +ignored. ++ +[source,yaml] +---- +- module: googlecloud + metricsets: + - stackdriver + zone: "europe-west1-c" + project_id: elastic-observability + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 60s + stackdriver: + service: compute + metrics: + - aligner: ALIGN_MEAN + metric_types: + - "compute.googleapis.com/instance/cpu/usage_time" + - "compute.googleapis.com/instance/cpu/utilization" + - aligner: ALIGN_SUM + metric_types: + - "compute.googleapis.com/instance/uptime" +---- diff --git a/x-pack/metricbeat/module/googlecloud/stackdriver/compute/identity.go b/x-pack/metricbeat/module/googlecloud/stackdriver/compute/identity.go index 9367862e6c7..19e434e8df7 100644 --- a/x-pack/metricbeat/module/googlecloud/stackdriver/compute/identity.go +++ b/x-pack/metricbeat/module/googlecloud/stackdriver/compute/identity.go @@ -23,7 +23,7 @@ func (s *metadataCollector) ID(ctx context.Context, in *googlecloud.MetadataColl if in.Timestamp != nil { metadata.ECS.Put("timestamp", in.Timestamp) } else if in.Point != nil { - metadata.ECS.Put("timestamp", in.Point.Interval.StartTime) + metadata.ECS.Put("timestamp", in.Point.Interval.EndTime) } else { return "", errors.New("no timestamp information found") } diff --git a/x-pack/metricbeat/module/googlecloud/integration.go b/x-pack/metricbeat/module/googlecloud/stackdriver/integration.go similarity index 86% rename from x-pack/metricbeat/module/googlecloud/integration.go rename to x-pack/metricbeat/module/googlecloud/stackdriver/integration.go index 0e7ac055bc7..68e3750d5a5 100644 --- a/x-pack/metricbeat/module/googlecloud/integration.go +++ b/x-pack/metricbeat/module/googlecloud/stackdriver/integration.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package googlecloud +package stackdriver import ( "os" @@ -36,7 +36,11 @@ func GetConfigForTest(t *testing.T, metricSetName string) map[string]interface{} if metricSetName == "stackdriver" { config["stackdriver.service"] = "compute" - config["stackdriver.metrics"] = []string{"compute.googleapis.com/instance/uptime"} + stackDriverConfig := stackDriverConfig{ + Aligner: "ALIGN_NONE", + MetricTypes: []string{"compute.googleapis.com/instance/uptime"}, + } + config["stackdriver.metrics"] = stackDriverConfig } } return config diff --git a/x-pack/metricbeat/module/googlecloud/stackdriver/metrics_requester.go b/x-pack/metricbeat/module/googlecloud/stackdriver/metrics_requester.go index 8d2147d285c..11b04e5a1b2 100644 --- a/x-pack/metricbeat/module/googlecloud/stackdriver/metrics_requester.go +++ b/x-pack/metricbeat/module/googlecloud/stackdriver/metrics_requester.go @@ -8,12 +8,14 @@ import ( "context" "fmt" "regexp" + "strings" "sync" "time" + "github.com/golang/protobuf/ptypes/duration" + monitoring "cloud.google.com/go/monitoring/apiv3" "github.com/golang/protobuf/ptypes/timestamp" - "github.com/pkg/errors" "google.golang.org/api/iterator" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" @@ -21,42 +23,31 @@ import ( "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud" ) -func newStackdriverMetricsRequester(ctx context.Context, c config, window time.Duration, logger *logp.Logger) (*stackdriverMetricsRequester, error) { - interval, err := getTimeInterval(window) - if err != nil { - return nil, errors.Wrap(err, "error trying to get time window") - } - - client, err := monitoring.NewMetricClient(ctx, c.opt...) - if err != nil { - return nil, errors.Wrap(err, "error creating Stackdriver client") - } - - return &stackdriverMetricsRequester{ - config: c, - client: client, - logger: logger, - interval: interval, - }, nil -} - type stackdriverMetricsRequester struct { config config - client *monitoring.MetricClient - interval *monitoringpb.TimeInterval + client *monitoring.MetricClient logger *logp.Logger } -func (r *stackdriverMetricsRequester) Metric(ctx context.Context, m string) (out []*monitoringpb.TimeSeries) { - out = make([]*monitoringpb.TimeSeries, 0) +type timeSeriesWithAligner struct { + timeSeries []*monitoringpb.TimeSeries + aligner string +} + +func (r *stackdriverMetricsRequester) Metric(ctx context.Context, metricType string, timeInterval *monitoringpb.TimeInterval, aligner string) (out timeSeriesWithAligner) { + timeSeries := make([]*monitoringpb.TimeSeries, 0) req := &monitoringpb.ListTimeSeriesRequest{ Name: "projects/" + r.config.ProjectID, - Interval: r.interval, + Interval: timeInterval, View: monitoringpb.ListTimeSeriesRequest_FULL, - Filter: r.getFilterForMetric(m), + Filter: r.getFilterForMetric(metricType), + Aggregation: &monitoringpb.Aggregation{ + PerSeriesAligner: googlecloud.AlignersMapToGCP[aligner], + AlignmentPeriod: &r.config.period, + }, } it := r.client.ListTimeSeries(ctx, req) @@ -67,45 +58,41 @@ func (r *stackdriverMetricsRequester) Metric(ctx context.Context, m string) (out } if err != nil { - r.logger.Errorf("Could not read time series value: %s: %v", m, err) + r.logger.Errorf("Could not read time series value: %s: %v", metricType, err) break } - out = append(out, resp) + timeSeries = append(timeSeries, resp) } + out.aligner = aligner + out.timeSeries = timeSeries return } -func constructFilter(m string, region string, zone string) string { - filter := fmt.Sprintf(`metric.type="%s" AND resource.labels.zone = `, m) - // If region is specified, use region as filter resource label. - // If region is empty but zone is given, use zone instead. - if region != "" { - filter += fmt.Sprintf(`starts_with("%s")`, region) - } else if zone != "" { - filter += fmt.Sprintf(`"%s"`, zone) - } - return filter -} - -func (r *stackdriverMetricsRequester) Metrics(ctx context.Context, ms []string) ([]*monitoringpb.TimeSeries, error) { +func (r *stackdriverMetricsRequester) Metrics(ctx context.Context, stackDriverConfigs []stackDriverConfig, metricsMeta map[string]metricMeta) ([]timeSeriesWithAligner, error) { var lock sync.Mutex var wg sync.WaitGroup - results := make([]*monitoringpb.TimeSeries, 0) + results := make([]timeSeriesWithAligner, 0) - for _, metric := range ms { - wg.Add(1) + for _, sdc := range stackDriverConfigs { + aligner := sdc.Aligner + for _, mt := range sdc.MetricTypes { + metricType := mt + wg.Add(1) - go func(m string) { - defer wg.Done() + go func(metricType string) { + defer wg.Done() - ts := r.Metric(ctx, m) + metricMeta := metricsMeta[metricType] + interval, aligner := getTimeIntervalAligner(metricMeta.ingestDelay, metricMeta.samplePeriod, r.config.period, aligner) + ts := r.Metric(ctx, metricType, interval, aligner) - lock.Lock() - defer lock.Unlock() - results = append(results, ts...) - }(metric) + lock.Lock() + defer lock.Unlock() + results = append(results, ts) + }(metricType) + } } wg.Wait() @@ -136,29 +123,44 @@ func (r *stackdriverMetricsRequester) getFilterForMetric(m string) (f string) { "both are provided, only use region", r.config.Region, r.config.Zone) } if r.config.Region != "" { - f = fmt.Sprintf(`%s AND resource.labels.zone = starts_with("%s")`, f, r.config.Region) + region := r.config.Region + if strings.HasSuffix(r.config.Region, "*") { + region = strings.TrimSuffix(r.config.Region, "*") + } + f = fmt.Sprintf(`%s AND resource.labels.zone = starts_with("%s")`, f, region) } else if r.config.Zone != "" { - f = fmt.Sprintf(`%s AND resource.labels.zone = "%s"`, f, r.config.Zone) + zone := r.config.Zone + if strings.HasSuffix(r.config.Zone, "*") { + zone = strings.TrimSuffix(r.config.Zone, "*") + } + f = fmt.Sprintf(`%s AND resource.labels.zone = starts_with("%s")`, f, zone) } } return } -// Returns a GCP TimeInterval based on the provided config -func getTimeInterval(windowTime time.Duration) (*monitoringpb.TimeInterval, error) { - var startTime, endTime time.Time - - if windowTime > 0 { - endTime = time.Now().UTC() - startTime = time.Now().UTC().Add(-windowTime) +// Returns a GCP TimeInterval based on the ingestDelay and samplePeriod from ListMetricDescriptor +func getTimeIntervalAligner(ingestDelay time.Duration, samplePeriod time.Duration, collectionPeriod duration.Duration, inputAligner string) (*monitoringpb.TimeInterval, string) { + var startTime, endTime, currentTime time.Time + var needsAggregation bool + currentTime = time.Now().UTC() + + // When samplePeriod < collectionPeriod, aggregation will be done in ListTimeSeriesRequest. + // For example, samplePeriod = 60s, collectionPeriod = 300s, if perSeriesAligner is not given, + // ALIGN_MEAN will be used by default. + if int64(samplePeriod.Seconds()) < collectionPeriod.Seconds { + endTime = currentTime.Add(-ingestDelay) + startTime = endTime.Add(-time.Duration(collectionPeriod.Seconds) * time.Second) + needsAggregation = true } - if windowTime.Minutes() < googlecloud.MinTimeIntervalDataWindowMinutes { - return nil, errors.Errorf("the provided window time is too small. No less than %d minutes can be fetched", googlecloud.MinTimeIntervalDataWindowMinutes) - } - - if windowTime.Minutes() >= googlecloud.MaxTimeIntervalDataWindowMinutes { - return nil, errors.Errorf("the provided window time is too big. No more than %d minutes can be fetched", googlecloud.MaxTimeIntervalDataWindowMinutes) + // When samplePeriod == collectionPeriod, aggregation is not needed + // When samplePeriod > collectionPeriod, aggregation is not needed, use sample period + // to determine startTime and endTime to make sure there will be data point in this time range. + if int64(samplePeriod.Seconds()) >= collectionPeriod.Seconds { + endTime = time.Now().UTC().Add(-ingestDelay) + startTime = endTime.Add(-samplePeriod) + needsAggregation = false } interval := &monitoringpb.TimeInterval{ @@ -170,5 +172,11 @@ func getTimeInterval(windowTime time.Duration) (*monitoringpb.TimeInterval, erro }, } - return interval, nil + // Default aligner for aggregation is ALIGN_NONE if it's not given + updatedAligner := googlecloud.DefaultAligner + if needsAggregation && inputAligner != "" { + updatedAligner = inputAligner + } + + return interval, updatedAligner } diff --git a/x-pack/metricbeat/module/googlecloud/stackdriver/metrics_requester_test.go b/x-pack/metricbeat/module/googlecloud/stackdriver/metrics_requester_test.go index be7b824d224..f7aff666c0f 100644 --- a/x-pack/metricbeat/module/googlecloud/stackdriver/metrics_requester_test.go +++ b/x-pack/metricbeat/module/googlecloud/stackdriver/metrics_requester_test.go @@ -6,44 +6,14 @@ package stackdriver import ( "testing" + "time" + "github.com/golang/protobuf/ptypes/duration" "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/logp" ) -func TestStringInSlice(t *testing.T) { - cases := []struct { - title string - m string - region string - zone string - expectedFilter string - }{ - { - "construct filter with zone", - "compute.googleapis.com/instance/cpu/utilization", - "", - "us-east1-b", - "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.labels.zone = \"us-east1-b\"", - }, - { - "construct filter with region", - "compute.googleapis.com/instance/cpu/utilization", - "us-east1", - "", - "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.labels.zone = starts_with(\"us-east1\")", - }, - } - - for _, c := range cases { - t.Run(c.title, func(t *testing.T) { - filter := constructFilter(c.m, c.region, c.zone) - assert.Equal(t, c.expectedFilter, filter) - }) - } -} - func TestGetFilterForMetric(t *testing.T) { var logger = logp.NewLogger("test") cases := []struct { @@ -56,7 +26,7 @@ func TestGetFilterForMetric(t *testing.T) { "compute service with zone in config", "compute.googleapis.com/firewall/dropped_bytes_count", stackdriverMetricsRequester{config: config{Zone: "us-central1-a"}}, - "metric.type=\"compute.googleapis.com/firewall/dropped_bytes_count\" AND resource.labels.zone = \"us-central1-a\"", + "metric.type=\"compute.googleapis.com/firewall/dropped_bytes_count\" AND resource.labels.zone = starts_with(\"us-central1-a\")", }, { "pubsub service with zone in config", @@ -94,6 +64,30 @@ func TestGetFilterForMetric(t *testing.T) { stackdriverMetricsRequester{config: config{Region: "us-central1", Zone: "us-central1-a"}, logger: logger}, "metric.type=\"compute.googleapis.com/firewall/dropped_bytes_count\" AND resource.labels.zone = starts_with(\"us-central1\")", }, + { + "compute uptime with partial region", + "compute.googleapis.com/instance/uptime", + stackdriverMetricsRequester{config: config{Region: "us-west"}, logger: logger}, + "metric.type=\"compute.googleapis.com/instance/uptime\" AND resource.labels.zone = starts_with(\"us-west\")", + }, + { + "compute uptime with partial zone", + "compute.googleapis.com/instance/uptime", + stackdriverMetricsRequester{config: config{Zone: "us-west1-"}, logger: logger}, + "metric.type=\"compute.googleapis.com/instance/uptime\" AND resource.labels.zone = starts_with(\"us-west1-\")", + }, + { + "compute uptime with wildcard in region", + "compute.googleapis.com/instance/uptime", + stackdriverMetricsRequester{config: config{Region: "us-*"}, logger: logger}, + "metric.type=\"compute.googleapis.com/instance/uptime\" AND resource.labels.zone = starts_with(\"us-\")", + }, + { + "compute uptime with wildcard in zone", + "compute.googleapis.com/instance/uptime", + stackdriverMetricsRequester{config: config{Zone: "us-west1-*"}, logger: logger}, + "metric.type=\"compute.googleapis.com/instance/uptime\" AND resource.labels.zone = starts_with(\"us-west1-\")", + }, } for _, c := range cases { @@ -103,3 +97,62 @@ func TestGetFilterForMetric(t *testing.T) { }) } } + +func TestGetTimeIntervalAligner(t *testing.T) { + cases := []struct { + title string + ingestDelay time.Duration + samplePeriod time.Duration + collectionPeriod duration.Duration + inputAligner string + expectedAligner string + }{ + { + "test collectionPeriod equals to samplePeriod", + time.Duration(240) * time.Second, + time.Duration(60) * time.Second, + duration.Duration{ + Seconds: int64(60), + }, + "", + "ALIGN_NONE", + }, + { + "test collectionPeriod larger than samplePeriod", + time.Duration(240) * time.Second, + time.Duration(60) * time.Second, + duration.Duration{ + Seconds: int64(300), + }, + "ALIGN_MEAN", + "ALIGN_MEAN", + }, + { + "test collectionPeriod smaller than samplePeriod", + time.Duration(240) * time.Second, + time.Duration(60) * time.Second, + duration.Duration{ + Seconds: int64(30), + }, + "ALIGN_MAX", + "ALIGN_NONE", + }, + { + "test collectionPeriod equals to samplePeriod with given aligner", + time.Duration(240) * time.Second, + time.Duration(60) * time.Second, + duration.Duration{ + Seconds: int64(60), + }, + "ALIGN_MEAN", + "ALIGN_NONE", + }, + } + + for _, c := range cases { + t.Run(c.title, func(t *testing.T) { + _, aligner := getTimeIntervalAligner(c.ingestDelay, c.samplePeriod, c.collectionPeriod, c.inputAligner) + assert.Equal(t, c.expectedAligner, aligner) + }) + } +} diff --git a/x-pack/metricbeat/module/googlecloud/stackdriver/metricset.go b/x-pack/metricbeat/module/googlecloud/stackdriver/metricset.go index 69ac38ca101..81fa98751aa 100644 --- a/x-pack/metricbeat/module/googlecloud/stackdriver/metricset.go +++ b/x-pack/metricbeat/module/googlecloud/stackdriver/metricset.go @@ -6,8 +6,13 @@ package stackdriver import ( "context" + "fmt" "time" + "github.com/golang/protobuf/ptypes/duration" + + monitoring "cloud.google.com/go/monitoring/apiv3" + "github.com/pkg/errors" "google.golang.org/api/option" @@ -38,19 +43,33 @@ func init() { // interface methods except for Fetch. type MetricSet struct { mb.BaseMetricSet - config config + config config + metricsMeta map[string]metricMeta + requester *stackdriverMetricsRequester + stackDriverConfig []stackDriverConfig `config:"metrics" validate:"nonzero,required"` +} + +//stackDriverConfig holds a configuration specific for stackdriver metricset. +type stackDriverConfig struct { + MetricTypes []string `config:"metric_types" validate:"required"` + Aligner string `config:"aligner"` +} + +type metricMeta struct { + samplePeriod time.Duration + ingestDelay time.Duration } type config struct { - Metrics []string `config:"stackdriver.metrics" validate:"required"` - Zone string `config:"zone"` - Region string `config:"region"` - ProjectID string `config:"project_id" validate:"required"` - ExcludeLabels bool `config:"exclude_labels"` - ServiceName string `config:"stackdriver.service" validate:"required"` - CredentialsFilePath string `config:"credentials_file_path"` - - opt []option.ClientOption + Zone string `config:"zone"` + Region string `config:"region"` + ProjectID string `config:"project_id" validate:"required"` + ExcludeLabels bool `config:"exclude_labels"` + ServiceName string `config:"stackdriver.service" validate:"required"` + CredentialsFilePath string `config:"credentials_file_path"` + + opt []option.ClientOption + period duration.Duration } // New creates a new instance of the MetricSet. New is responsible for unpacking @@ -64,12 +83,39 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } + stackDriverConfigs := struct { + StackDriverMetrics []stackDriverConfig `config:"stackdriver.metrics" validate:"nonzero,required"` + }{} + + if err := base.Module().UnpackConfig(&stackDriverConfigs); err != nil { + return nil, err + } + + m.stackDriverConfig = stackDriverConfigs.StackDriverMetrics m.config.opt = []option.ClientOption{option.WithCredentialsFile(m.config.CredentialsFilePath)} + m.config.period.Seconds = int64(m.Module().Config().Period.Seconds()) if err := validatePeriodForGCP(m.Module().Config().Period); err != nil { return nil, err } + // Get ingest delay and sample period for each metric type + ctx := context.Background() + client, err := monitoring.NewMetricClient(ctx, m.config.opt...) + if err != nil { + return nil, errors.Wrap(err, "error creating Stackdriver client") + } + + m.metricsMeta, err = metricDescriptor(ctx, client, m.config.ProjectID, m.stackDriverConfig) + if err != nil { + return nil, errors.Wrap(err, "error calling metricDescriptor function") + } + + m.requester = &stackdriverMetricsRequester{ + config: m.config, + client: client, + logger: m.Logger(), + } return m, nil } @@ -77,12 +123,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) (err error) { - reqs, err := newStackdriverMetricsRequester(ctx, m.config, m.Module().Config().Period, m.Logger()) - if err != nil { - return errors.Wrapf(err, "error trying to do create a request client to GCP project '%s' in zone '%s' or region '%s'", m.config.ProjectID, m.config.Zone, m.config.Region) - } - - responses, err := reqs.Metrics(ctx, m.config.Metrics) + responses, err := m.requester.Metrics(ctx, m.stackDriverConfig, m.metricsMeta) if err != nil { return errors.Wrapf(err, "error trying to get metrics for project '%s' and zone '%s' or region '%s'", m.config.ProjectID, m.config.Zone, m.config.Region) } @@ -99,7 +140,7 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) (err erro return nil } -func (m *MetricSet) eventMapping(ctx context.Context, tss []*monitoringpb.TimeSeries) ([]mb.Event, error) { +func (m *MetricSet) eventMapping(ctx context.Context, tss []timeSeriesWithAligner) ([]mb.Event, error) { e := newIncomingFieldExtractor(m.Logger()) var gcpService = googlecloud.NewStackdriverMetadataServiceForTimeSeries(nil) @@ -140,13 +181,14 @@ func (m *MetricSet) eventMapping(ctx context.Context, tss []*monitoringpb.TimeSe // validatePeriodForGCP returns nil if the Period in the module config is in the accepted threshold func validatePeriodForGCP(d time.Duration) (err error) { - if d.Seconds() < 300 { - return errors.New("period in Google Cloud config file cannot be set to less than 300 seconds") + if d.Seconds() < googlecloud.MonitoringMetricsSamplingRate { + return errors.Errorf("period in Google Cloud config file cannot be set to less than %d seconds", googlecloud.MonitoringMetricsSamplingRate) } return nil } +// Validate googlecloud module config func (c *config) Validate() error { // storage metricset does not require region or zone config parameter. if c.ServiceName == "storage" { @@ -158,3 +200,46 @@ func (c *config) Validate() error { } return nil } + +// Validate stackdriver related config +func (mc *stackDriverConfig) Validate() error { + gcpAlignerNames := make([]string, 0) + for k := range googlecloud.AlignersMapToGCP { + gcpAlignerNames = append(gcpAlignerNames, k) + } + + if mc.Aligner != "" { + if _, ok := googlecloud.AlignersMapToGCP[mc.Aligner]; !ok { + return errors.Errorf("the given aligner is not supported, please specify one of %s as aligner", gcpAlignerNames) + } + } + return nil +} + +// metricDescriptor calls ListMetricDescriptorsRequest API to get metric metadata +// (sample period and ingest delay) of each given metric type +func metricDescriptor(ctx context.Context, client *monitoring.MetricClient, projectID string, stackDriverConfigs []stackDriverConfig) (map[string]metricMeta, error) { + metricsWithMeta := make(map[string]metricMeta, 0) + + for _, sdc := range stackDriverConfigs { + for _, mt := range sdc.MetricTypes { + req := &monitoringpb.ListMetricDescriptorsRequest{ + Name: "projects/" + projectID, + Filter: fmt.Sprintf(`metric.type = "%s"`, mt), + } + + it := client.ListMetricDescriptors(ctx, req) + out, err := it.Next() + if err != nil { + return metricsWithMeta, errors.Errorf("Could not make ListMetricDescriptors request: %s: %v", mt, err) + } + + metricsWithMeta[mt] = metricMeta{ + samplePeriod: time.Duration(out.Metadata.SamplePeriod.Seconds) * time.Second, + ingestDelay: time.Duration(out.Metadata.IngestDelay.Seconds) * time.Second, + } + } + } + + return metricsWithMeta, nil +} diff --git a/x-pack/metricbeat/module/googlecloud/stackdriver/response_parser.go b/x-pack/metricbeat/module/googlecloud/stackdriver/response_parser.go index 474f04a244b..b6e38f4d333 100644 --- a/x-pack/metricbeat/module/googlecloud/stackdriver/response_parser.go +++ b/x-pack/metricbeat/module/googlecloud/stackdriver/response_parser.go @@ -9,9 +9,10 @@ import ( "strings" "time" - "github.com/pkg/errors" + "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud" "github.com/golang/protobuf/ptypes" + "github.com/pkg/errors" "google.golang.org/genproto/googleapis/monitoring/v3" "github.com/elastic/beats/v7/libbeat/common" @@ -36,7 +37,7 @@ type KeyValuePoint struct { } // extractTimeSeriesMetricValues valuable to send to Elasticsearch. This includes, for example, metric values, labels and timestamps -func (e *incomingFieldExtractor) extractTimeSeriesMetricValues(resp *monitoring.TimeSeries) (points []KeyValuePoint, err error) { +func (e *incomingFieldExtractor) extractTimeSeriesMetricValues(resp *monitoring.TimeSeries, aligner string) (points []KeyValuePoint, err error) { points = make([]KeyValuePoint, 0) for _, point := range resp.Points { @@ -48,7 +49,7 @@ func (e *incomingFieldExtractor) extractTimeSeriesMetricValues(resp *monitoring. } p := KeyValuePoint{ - Key: cleanMetricNameString(resp.Metric.Type), + Key: cleanMetricNameString(resp.Metric.Type, aligner), Value: getValueFromPoint(point), Timestamp: ts, } @@ -62,8 +63,8 @@ func (e *incomingFieldExtractor) extractTimeSeriesMetricValues(resp *monitoring. func (e *incomingFieldExtractor) getTimestamp(p *monitoring.Point) (ts time.Time, err error) { // Don't add point intervals that can't be "stated" at some timestamp. if p.Interval != nil { - if ts, err = ptypes.Timestamp(p.Interval.StartTime); err != nil { - return time.Time{}, errors.Errorf("error trying to parse timestamp '%#v' from metric\n", p.Interval.StartTime) + if ts, err = ptypes.Timestamp(p.Interval.EndTime); err != nil { + return time.Time{}, errors.Errorf("error trying to parse timestamp '%#v' from metric\n", p.Interval.EndTime) } return ts, nil } @@ -73,7 +74,7 @@ func (e *incomingFieldExtractor) getTimestamp(p *monitoring.Point) (ts time.Time var rx = regexp.MustCompile(`^[a-z_-]+\.googleapis.com\/`) -func cleanMetricNameString(s string) string { +func cleanMetricNameString(s string, aligner string) string { if s == "" { return "unknown" } @@ -83,7 +84,8 @@ func cleanMetricNameString(s string) string { removedPrefix := strings.TrimPrefix(s, prefix) replacedChars := strings.Replace(removedPrefix, "/", ".", -1) - return replacedChars + metricName := replacedChars + googlecloud.AlignersMapToSuffix[aligner] + return metricName } func getValueFromPoint(p *monitoring.Point) (out interface{}) { diff --git a/x-pack/metricbeat/module/googlecloud/stackdriver/response_parser_test.go b/x-pack/metricbeat/module/googlecloud/stackdriver/response_parser_test.go index 4f055ffc0cc..84f689d042b 100644 --- a/x-pack/metricbeat/module/googlecloud/stackdriver/response_parser_test.go +++ b/x-pack/metricbeat/module/googlecloud/stackdriver/response_parser_test.go @@ -5,7 +5,10 @@ package stackdriver import ( + "testing" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/stretchr/testify/assert" "google.golang.org/genproto/googleapis/api/metric" "google.golang.org/genproto/googleapis/api/monitoredres" "google.golang.org/genproto/googleapis/monitoring/v3" @@ -65,3 +68,32 @@ var metrics = []string{ "compute.googleapis.com/instance/disk/read_bytes_count", "compute.googleapis.com/http/server/response_latencies", } + +func TestCleanMetricNameString(t *testing.T) { + cases := []struct { + title string + metricType string + aligner string + expectedMetricName string + }{ + { + "test construct metric name with ALIGN_MEAN aligner", + "compute.googleapis.com/instance/cpu/usage_time", + "ALIGN_MEAN", + "instance.cpu.usage_time.avg", + }, + { + "test construct metric name with ALIGN_NONE aligner", + "compute.googleapis.com/instance/cpu/utilization", + "ALIGN_NONE", + "instance.cpu.utilization.value", + }, + } + + for _, c := range cases { + t.Run(c.title, func(t *testing.T) { + metricName := cleanMetricNameString(c.metricType, c.aligner) + assert.Equal(t, c.expectedMetricName, metricName) + }) + } +} diff --git a/x-pack/metricbeat/module/googlecloud/stackdriver/stackdriver_integration_test.go b/x-pack/metricbeat/module/googlecloud/stackdriver/stackdriver_integration_test.go index 8fe568ebe95..fd11a50c6e9 100644 --- a/x-pack/metricbeat/module/googlecloud/stackdriver/stackdriver_integration_test.go +++ b/x-pack/metricbeat/module/googlecloud/stackdriver/stackdriver_integration_test.go @@ -11,11 +11,10 @@ import ( "testing" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" - "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud" ) func TestData(t *testing.T) { - config := googlecloud.GetConfigForTest(t, "stackdriver") + config := GetConfigForTest(t, "stackdriver") metricSet := mbtest.NewFetcher(t, config) metricSet.WriteEvents(t, "/") } diff --git a/x-pack/metricbeat/module/googlecloud/stackdriver/timeseries.go b/x-pack/metricbeat/module/googlecloud/stackdriver/timeseries.go index fcf3184717c..c0b456f9954 100644 --- a/x-pack/metricbeat/module/googlecloud/stackdriver/timeseries.go +++ b/x-pack/metricbeat/module/googlecloud/stackdriver/timeseries.go @@ -7,53 +7,54 @@ package stackdriver import ( "context" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" - "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud" ) //timeSeriesGrouped groups TimeSeries responses into common Elasticsearch friendly events. This is to avoid sending // events with a single metric that shares info (like timestamp) with another event with a single metric too -func (m *MetricSet) timeSeriesGrouped(ctx context.Context, gcpService googlecloud.MetadataService, tss []*monitoringpb.TimeSeries, e *incomingFieldExtractor) (map[string][]KeyValuePoint, error) { +func (m *MetricSet) timeSeriesGrouped(ctx context.Context, gcpService googlecloud.MetadataService, tsas []timeSeriesWithAligner, e *incomingFieldExtractor) (map[string][]KeyValuePoint, error) { eventGroups := make(map[string][]KeyValuePoint) metadataService := gcpService - for _, ts := range tss { - keyValues, err := e.extractTimeSeriesMetricValues(ts) - if err != nil { - return nil, err - } - - sdCollectorInputData := googlecloud.NewStackdriverCollectorInputData(ts, m.config.ProjectID, m.config.Zone, m.config.Region) - if gcpService == nil { - metadataService = googlecloud.NewStackdriverMetadataServiceForTimeSeries(ts) - } - - for i := range keyValues { - sdCollectorInputData.Timestamp = &keyValues[i].Timestamp - - id, err := metadataService.ID(ctx, sdCollectorInputData) + for _, tsa := range tsas { + aligner := tsa.aligner + for _, ts := range tsa.timeSeries { + keyValues, err := e.extractTimeSeriesMetricValues(ts, aligner) if err != nil { - m.Logger().Errorf("error trying to retrieve ID from metric event '%v'", err) - continue + return nil, err } - metadataCollectorData, err := metadataService.Metadata(ctx, sdCollectorInputData.TimeSeries) - if err != nil { - m.Logger().Error("error trying to retrieve labels from metric event") - continue + sdCollectorInputData := googlecloud.NewStackdriverCollectorInputData(ts, m.config.ProjectID, m.config.Zone, m.config.Region) + if gcpService == nil { + metadataService = googlecloud.NewStackdriverMetadataServiceForTimeSeries(ts) } - if _, ok := eventGroups[id]; !ok { - eventGroups[id] = make([]KeyValuePoint, 0) - } + for i := range keyValues { + sdCollectorInputData.Timestamp = &keyValues[i].Timestamp + + id, err := metadataService.ID(ctx, sdCollectorInputData) + if err != nil { + m.Logger().Errorf("error trying to retrieve ID from metric event '%v'", err) + continue + } - keyValues[i].ECS = metadataCollectorData.ECS - keyValues[i].Labels = metadataCollectorData.Labels + metadataCollectorData, err := metadataService.Metadata(ctx, sdCollectorInputData.TimeSeries) + if err != nil { + m.Logger().Error("error trying to retrieve labels from metric event") + continue + } - // Group the data into common events - eventGroups[id] = append(eventGroups[id], keyValues[i]) + if _, ok := eventGroups[id]; !ok { + eventGroups[id] = make([]KeyValuePoint, 0) + } + + keyValues[i].ECS = metadataCollectorData.ECS + keyValues[i].Labels = metadataCollectorData.Labels + + // Group the data into common events + eventGroups[id] = append(eventGroups[id], keyValues[i]) + } } } diff --git a/x-pack/metricbeat/module/googlecloud/storage/_meta/data.json b/x-pack/metricbeat/module/googlecloud/storage/_meta/data.json index f94a1375997..679102209b2 100644 --- a/x-pack/metricbeat/module/googlecloud/storage/_meta/data.json +++ b/x-pack/metricbeat/module/googlecloud/storage/_meta/data.json @@ -2,7 +2,7 @@ "@timestamp": "2017-10-12T08:05:34.853Z", "cloud": { "account": { - "id": "elastic-observability" + "id": "elastic-apm" }, "provider": "googlecloud" }, @@ -14,18 +14,18 @@ "googlecloud": { "labels": { "metrics": { - "storage_class": "REGIONAL" + "storage_class": "MULTI_REGIONAL" }, "resource": { - "bucket_name": "elastic-vsphere-images", - "location": "us-east1" + "bucket_name": "artifacts.elastic-apm.appspot.com", + "location": "us" } }, "storage": { "storage": { - "object_count": 3, - "total_byte_seconds": 58816542441472, - "total_bytes": 680747019 + "object_count": { + "value": 15 + } } } }, diff --git a/x-pack/metricbeat/module/googlecloud/storage/_meta/data_network.json b/x-pack/metricbeat/module/googlecloud/storage/_meta/data_network.json new file mode 100644 index 00000000000..7bb1c6a4c86 --- /dev/null +++ b/x-pack/metricbeat/module/googlecloud/storage/_meta/data_network.json @@ -0,0 +1,38 @@ +{ + "@timestamp": "2017-10-12T08:05:34.853Z", + "cloud": { + "account": { + "id": "elastic-observability" + }, + "provider": "googlecloud" + }, + "event": { + "dataset": "googlecloud.storage", + "duration": 115000, + "module": "googlecloud" + }, + "googlecloud": { + "labels": { + "metrics": { + "method": "GetBucketMetadata", + "response_code": "OK" + }, + "resource": { + "bucket_name": "ocp-be-c5kjr-image-registry-us-central1-dsoafnbgctvfimpavswkgn", + "location": "us-central1" + } + }, + "storage": { + "network": { + "received_bytes_count": 0 + } + } + }, + "metricset": { + "name": "storage", + "period": 10000 + }, + "service": { + "type": "googlecloud" + } +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/googlecloud/storage/_meta/data_storage.json b/x-pack/metricbeat/module/googlecloud/storage/_meta/data_storage.json new file mode 100644 index 00000000000..f98a6a6f744 --- /dev/null +++ b/x-pack/metricbeat/module/googlecloud/storage/_meta/data_storage.json @@ -0,0 +1,39 @@ +{ + "@timestamp": "2017-10-12T08:05:34.853Z", + "cloud": { + "account": { + "id": "elastic-observability" + }, + "provider": "googlecloud" + }, + "event": { + "dataset": "googlecloud.storage", + "duration": 115000, + "module": "googlecloud" + }, + "googlecloud": { + "labels": { + "metrics": { + "storage_class": "MULTI_REGIONAL" + }, + "resource": { + "bucket_name": "fstuermer-log-data-categorization-7-6-0", + "location": "us" + } + }, + "storage": { + "storage": { + "total_bytes": { + "value": 4472520191 + } + } + } + }, + "metricset": { + "name": "storage", + "period": 10000 + }, + "service": { + "type": "googlecloud" + } +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/googlecloud/storage/_meta/docs.asciidoc b/x-pack/metricbeat/module/googlecloud/storage/_meta/docs.asciidoc index 4c9ff62e4ae..d58ceda230f 100644 --- a/x-pack/metricbeat/module/googlecloud/storage/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/googlecloud/storage/_meta/docs.asciidoc @@ -1,11 +1,12 @@ -Storage Metricset to fetch metrics from https://cloud.google.com/storage/[Storage] in Google Cloud Platform. +Storage metricset to fetch metrics from https://cloud.google.com/storage/[Storage] in Google Cloud Platform. -The `storage` Metricset contains all metrics exported from the https://cloud.google.com/monitoring/api/metrics_gcp#gcp-storage[Stackdriver API]. The field names have been left untouched for people already familiar with them. +The `storage` metricset contains all metrics exported from the https://cloud.google.com/monitoring/api/metrics_gcp#gcp-storage[Stackdriver API]. The field names have been left untouched for people already familiar with them. You can specify a single region to fetch metrics like `us-central1`. Be aware that GCP Storage does not use zones so `us-central1-a` will return nothing. If no region is specified, it will return metrics from all buckets. [float] -=== Fields +=== Metrics +Here is a list of metrics collected by `storage` metricset: - `storage.api.request_count`: Delta count of API calls, grouped by the API method name and response code. - `storage.authz.acl_based_object_access_count`: Delta count of requests that result in an object being granted access solely due to object ACLs. diff --git a/x-pack/metricbeat/module/googlecloud/storage/_meta/fields.yml b/x-pack/metricbeat/module/googlecloud/storage/_meta/fields.yml index 71fcb2bdbeb..cdf2fde2fff 100644 --- a/x-pack/metricbeat/module/googlecloud/storage/_meta/fields.yml +++ b/x-pack/metricbeat/module/googlecloud/storage/_meta/fields.yml @@ -6,39 +6,39 @@ - name: api type: group fields: - - name: request_count + - name: request_count.value type: long description: Delta count of API calls, grouped by the API method name and response code. - name: authz type: group fields: - - name: acl_based_object_access_count + - name: acl_based_object_access_count.value type: long description: Delta count of requests that result in an object being granted access solely due to object ACLs. - - name: acl_operations_count + - name: acl_operations_count.value type: long description: Usage of ACL operations broken down by type. - - name: object_specific_acl_mutation_count + - name: object_specific_acl_mutation_count.value type: long description: Delta count of changes made to object specific ACLs. - name: network type: group fields: - - name: received_bytes_count + - name: received_bytes_count.value type: long description: Delta count of bytes received over the network, grouped by the API method name and response code. - - name: sent_bytes_count + - name: sent_bytes_count.value type: long description: Delta count of bytes sent over the network, grouped by the API method name and response code. - name: storage type: group fields: - - name: object_count + - name: object_count.value type: long description: Total number of objects per bucket, grouped by storage class. This value is measured once per day, and the value is repeated at each sampling interval throughout the day. - - name: total_byte_seconds + - name: total_byte_seconds.value type: long description: Delta count of bytes received over the network, grouped by the API method name and response code. - - name: total_bytes + - name: total_bytes.value type: long description: Total size of all objects in the bucket, grouped by storage class. This value is measured once per day, and the value is repeated at each sampling interval throughout the day. diff --git a/x-pack/metricbeat/module/googlecloud/storage/manifest.yml b/x-pack/metricbeat/module/googlecloud/storage/manifest.yml index f462867dcb5..2a9363cf78d 100644 --- a/x-pack/metricbeat/module/googlecloud/storage/manifest.yml +++ b/x-pack/metricbeat/module/googlecloud/storage/manifest.yml @@ -6,12 +6,13 @@ input: stackdriver: service: storage metrics: - - "storage.googleapis.com/api/request_count" - - "storage.googleapis.com/authz/acl_based_object_access_count" - - "storage.googleapis.com/authz/acl_operations_count" - - "storage.googleapis.com/authz/object_specific_acl_mutation_count" - - "storage.googleapis.com/network/received_bytes_count" - - "storage.googleapis.com/network/sent_bytes_count" - - "storage.googleapis.com/storage/object_count" - - "storage.googleapis.com/storage/total_byte_seconds" - - "storage.googleapis.com/storage/total_bytes" + - metric_types: + - "storage.googleapis.com/api/request_count" + - "storage.googleapis.com/authz/acl_based_object_access_count" + - "storage.googleapis.com/authz/acl_operations_count" + - "storage.googleapis.com/authz/object_specific_acl_mutation_count" + - "storage.googleapis.com/network/received_bytes_count" + - "storage.googleapis.com/network/sent_bytes_count" + - "storage.googleapis.com/storage/object_count" + - "storage.googleapis.com/storage/total_byte_seconds" + - "storage.googleapis.com/storage/total_bytes" diff --git a/x-pack/metricbeat/module/googlecloud/storage/storage_integration_test.go b/x-pack/metricbeat/module/googlecloud/storage/storage_integration_test.go index f669a7c46e9..7d40e7b2bf9 100644 --- a/x-pack/metricbeat/module/googlecloud/storage/storage_integration_test.go +++ b/x-pack/metricbeat/module/googlecloud/storage/storage_integration_test.go @@ -8,14 +8,38 @@ package storage import ( + "fmt" "testing" + "github.com/elastic/beats/v7/libbeat/common" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" - "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud" + "github.com/elastic/beats/v7/x-pack/metricbeat/module/googlecloud/stackdriver" ) func TestData(t *testing.T) { - config := googlecloud.GetConfigForTest(t, "storage") - metricSet := mbtest.NewFetcher(t, config) - metricSet.WriteEvents(t, "/") + metricPrefixIs := func(metricPrefix string) func(e common.MapStr) bool { + return func(e common.MapStr) bool { + v, err := e.GetValue(metricPrefix) + return err == nil && v != nil + } + } + + dataFiles := []struct { + metricPrefix string + path string + }{ + {"googlecloud.storage", "./_meta/data.json"}, + {"googlecloud.storage.authz", "./_meta/data_authz.json"}, + {"googlecloud.storage.network", "./_meta/data_network.json"}, + {"googlecloud.storage.storage", "./_meta/data_storage.json"}, + } + + config := stackdriver.GetConfigForTest(t, "storage") + + for _, df := range dataFiles { + metricSet := mbtest.NewFetcher(t, config) + t.Run(fmt.Sprintf("metric prefix: %s", df.metricPrefix), func(t *testing.T) { + metricSet.WriteEventsCond(t, df.path, metricPrefixIs(df.metricPrefix)) + }) + } } diff --git a/x-pack/metricbeat/module/iis/fields.go b/x-pack/metricbeat/module/iis/fields.go index 8a124fdbef0..18bad877cf9 100644 --- a/x-pack/metricbeat/module/iis/fields.go +++ b/x-pack/metricbeat/module/iis/fields.go @@ -19,5 +19,5 @@ func init() { // AssetIis returns asset data. // This is the base64 encoded gzipped contents of module/iis. func AssetIis() string { - return "eJy0kbGOgzAQRHt/xYgSKfkAF/cryMAm2ovBlu1cxN+ffAYOiEE02YJidzTzzFzwoEGC2QsgcNAkUTD7QgAt+caxDWx6iS8BIOrQmfapSQCONClPEjUFJQRwY9Ktl3/KC3rV0eQcJwyWJO7OPO24yQSsTZZGylrNjYriyhqjZ0HOOc4a73+fTU1zkLHFWqLF7+owIT1oeBnXbm4HABsIRIi1/ZT5otqT+yF3La/liVcnIFN/UxMW67So0vWmjdo5Vp2ylvv7qCzK4twfnTHnbfYlHOgTfb5bv9e4W+J+hYcFjpnJ7zcAAP//navhqA==" + return "eJzMkrGOgzAQRHt/xYgSKfkAF/cryMAm2ovBlr25iL8/+QwcIEAp44JidjTzFu0FDxo0mKMChMWSRsEcCwW0FJvAXtj1Gl8KQPKhc+3TkgICWTKRNGoSoxRwY7Jt1H/OC3rT0ZScngyeNO7BPf2o7BSsQ5ZBxnvLjUnmyjtnZ8NecnprvH99tzW/k44t1hItfVeDCelBw8uFdjM7AdhAIEGs46fOF9WRwg+Fa3kt39g6A7n6mxpZyFmo8vRmnTkYVp3xnvv76CzK4r0/OmPO6u4mLPT5e7DQ6UEcnsPxMZyewtiZ834DAAD///e79wo=" } diff --git a/x-pack/metricbeat/module/iis/webserver/manifest.yml b/x-pack/metricbeat/module/iis/webserver/manifest.yml index c8d148a21c1..32da4b384b6 100644 --- a/x-pack/metricbeat/module/iis/webserver/manifest.yml +++ b/x-pack/metricbeat/module/iis/webserver/manifest.yml @@ -6,158 +6,81 @@ input: perfmon.group_measurements_by_instance: true perfmon.ignore_non_existent_counters: true perfmon.group_all_counter: "worker_process_count" - perfmon.counters: - #network - - instance_label: '' - measurement_label: network.total_bytes_received - query: '\Web Service(_Total)\Total Bytes Received' - - instance_label: '' - measurement_label: network.total_bytes_sent - query: '\Web Service(_Total)\Total Bytes Sent' - - instance_label: '' - measurement_label: network.bytes_sent_per_sec - query: '\Web Service(_Total)\Bytes Sent/sec' - - instance_label: '' - measurement_label: network.bytes_received_per_sec - query: '\Web Service(_Total)\Bytes Received/sec' - - instance_label: '' - measurement_label: network.current_connections - query: '\Web Service(_Total)\Current Connections' - - instance_label: '' - measurement_label: network.maximum_connections - query: '\Web Service(_Total)\Maximum Connections' - - instance_label: '' - measurement_label: network.total_connection_attempts - query: '\Web Service(_Total)\Total Connection Attempts (all instances)' - - instance_label: '' - measurement_label: network.total_get_requests - query: '\Web Service(_Total)\Total Get Requests' - - instance_label: '' - measurement_label: network.get_requests_per_sec - query: '\Web Service(_Total)\Get Requests/sec' - - instance_label: '' - measurement_label: network.total_post_requests - query: '\Web Service(_Total)\Total Post Requests' - - instance_label: '' - measurement_label: network.post_requests_per_sec - query: '\Web Service(_Total)\Post Requests/sec' - - instance_label: '' - measurement_label: network.total_delete_requests - query: '\Web Service(_Total)\Total Delete Requests' - - instance_label: '' - measurement_label: network.delete_requests_per_sec - query: '\Web Service(_Total)\Delete Requests/sec' - - instance_label: '' - measurement_label: network.service_uptime - query: '\Web Service(_Total)\Service Uptime' - - instance_label: '' - measurement_label: network.current_anonymous_users - query: '\Web Service(_Total)\Current Anonymous Users' - - instance_label: '' - measurement_label: network.current_nonanonymous_users - query: '\Web Service(_Total)\Current NonAnonymous Users' - - instance_label: '' - measurement_label: network.total_anonymous_users - query: '\Web Service(_Total)\Total Anonymous Users' - - instance_label: '' - measurement_label: network.anonymous_users_per_sec - query: '\Web Service(_Total)\Anonymous Users/sec' - - instance_label: '' - measurement_label: network.total_nonanonymous_users - query: '\Web Service(_Total)\Total NonAnonymous Users' - + perfmon.queries: + - object: "Web Service" + instance: "_Total" + namespace : "network" + counters: + # network + - name: "Total Bytes Received" + - name: "Total Bytes Sent" + - name: "Bytes Sent/sec" + - name: "Bytes Received/sec" + - name: "Current Connections" + - name: "Maximum Connections" + - name: "Total Connection Attempts (all instances)" + field: "total_connection_attempts" + - name: "Total Get Requests" + - name: "Get Requests/sec" + - name: "Total Post Requests" + - name: "Post Requests/sec" + - name: "Total Delete Requests" + - name: "Delete Requests/sec" + - name: "Service Uptime" + - name: "Current Anonymous Users" + - name: "Current NonAnonymous Users" + - name: "Total Anonymous Users" + - name: "Anonymous Users/sec" + - name: "Total NonAnonymous Users" #asp.net - - instance_label: '' - measurement_label: asp_net_application.errors_per_sec - query: '\ASP.NET Applications(__Total__)\Errors Total/Sec' - - instance_label: '' - measurement_label: asp_net_application.pipeline_instance_count - query: '\ASP.NET Applications(__Total__)\Pipeline Instance Count' - - instance_label: '' - measurement_label: asp_net_application.requests_executing - query: '\ASP.NET Applications(__Total__)\Requests Executing' - - instance_label: '' - measurement_label: asp_net_application.requests_in_application_queue - query: '\ASP.NET Applications(__Total__)\Requests in Application Queue' - format: 'large' - - instance_label: '' - measurement_label: asp_net_application.requests_per_sec - query: '\ASP.NET Applications(__Total__)\Requests/Sec' - - instance_label: '' - measurement_label: asp_net.application_restarts - query: '\ASP.NET\Application Restarts' - - instance_label: '' - measurement_label: asp_net.request_wait_time - query: '\ASP.NET\Request Wait Time' + - object: "ASP.NET Applications" + instance: "__Total__" + namespace: "asp_net" + counters: + - name: "Application Restarts" + - name: "Request Wait Time" + #asp_net_application + - object: "ASP.NET Applications" + instance: "__Total__" + namespace: "asp_net_application" + counters: + - name: "Errors Total/Sec" + - name: "Pipeline Instance Count" + - name: "Requests Executing" + - name: "Requests in Application Queue" + format: 'large' + - name: "Requests/Sec" #cache - - instance_label: '' - measurement_label: cache.current_files_cached - query: '\Web Service Cache\Current Files Cached' - - instance_label: '' - measurement_label: cache.total_files_cached - query: '\Web Service Cache\Total Files Cached' - - instance_label: '' - measurement_label: cache.file_cache_hits - query: '\Web Service Cache\File Cache Hits' - - instance_label: '' - measurement_label: cache.file_cache_misses - query: '\Web Service Cache\File Cache Misses' - - instance_label: '' - measurement_label: cache.current_file_cache_memory_usage - query: '\Web Service Cache\Current File Cache Memory Usage' - - instance_label: '' - measurement_label: cache.maximum_file_cache_memory_usage - query: '\Web Service Cache\Maximum File Cache Memory Usage' - - instance_label: '' - measurement_label: cache.current_uris_cached - query: '\Web Service Cache\Current URIs Cached' - - instance_label: '' - measurement_label: cache.total_uris_cached - query: '\Web Service Cache\Total URIs Cached' - - instance_label: '' - measurement_label: cache.uri_cache_hits - query: '\Web Service Cache\URI Cache Hits' - - instance_label: '' - measurement_label: cache.uri_cache_misses - query: '\Web Service Cache\URI Cache Misses' - - instance_label: '' - measurement_label: cache.output_cache_current_memory_usage - query: '\Web Service Cache\Output Cache Current Memory Usage' - - instance_label: '' - measurement_label: cache.output_cache_current_items - query: '\Web Service Cache\Output Cache Current Items' - - instance_label: '' - measurement_label: cache.output_cache_total_hits - query: '\Web Service Cache\Output Cache Total Hits' - - instance_label: '' - measurement_label: cache.output_cache_total_misses - query: '\Web Service Cache\Output Cache Total Misses' + - object: "Web Service Cache" + namespace: "cache" + counters: + - name: "Current Files Cached" + - name: "Total Files Cached" + - name: "File Cache Hits" + - name: "File Cache Misses" + - name: "Current File Cache Memory Usage" + - name: "Maximum File Cache Memory Usage" + - name: "Current URIs Cached" + - name: "Total URIs Cached" + - name: "URI Cache Hits" + - name: "URI Cache Misses" + - name: "Output Cache Current Memory Usage" + - name: "Output Cache Current Items" + - name: "Output Cache Total Hits" + - name: "Output Cache Total Misses" #process - - instance_label: '' - measurement_label: process.cpu_usage_perc - query: '\Process(w3wp*)\% Processor Time' - - instance_label: '' - measurement_label: process.handle_count - query: '\Process(w3wp*)\Handle Count' - - instance_label: '' - measurement_label: process.thread_count - query: '\Process(w3wp*)\Thread Count' - - instance_label: '' - measurement_label: process.working_set - query: '\Process(w3wp*)\Working Set' - - instance_label: '' - measurement_label: process.private_byte - query: '\Process(w3wp*)\Private Bytes' - - instance_label: '' - measurement_label: process.virtual_bytes - query: '\Process(w3wp*)\Virtual Bytes' - - instance_label: '' - measurement_label: process.page_faults_per_Sec - query: '\Process(w3wp*)\Page Faults/sec' - - instance_label: '' - measurement_label: process.io_read_operations_per_sec - query: '\Process(w3wp*)\IO Read Operations/sec' - - instance_label: '' - measurement_label: process.io_write_operations_per_sec - query: '\Process(w3wp*)\IO Write Operations/sec' + - object: "Process" + field: "process" + namespace: "process" + instance: "w3wp*" + counters: + - name: "% Processor Time" + - name: "Handle Count" + - name: "Thread Count" + - name: "Working Set" + - name: "Private Bytes" + - name: "Virtual Bytes" + - name: "Page Faults/sec" + - name: "IO Read Operations/sec" + - name: "IO Write Operations/sec" diff --git a/x-pack/metricbeat/module/iis/website/_meta/data.json b/x-pack/metricbeat/module/iis/website/_meta/data.json index d9804730f97..e2a6ed9905c 100644 --- a/x-pack/metricbeat/module/iis/website/_meta/data.json +++ b/x-pack/metricbeat/module/iis/website/_meta/data.json @@ -7,17 +7,24 @@ }, "iis": { "website": { - "current_connections": 0, - "maximum_connections": 1, - "name": "test2.local", - "service_uptime": 346586, - "total_bytes_received": 1666, - "total_bytes_sent": 84224, - "total_connection_attempts": 2, - "total_delete_requests": 0, - "total_get_requests": 4, - "total_post_requests": 0, - "total_put_requests": 0 + "network": { + "bytes_sent_per_sec": 0, + "maximum_connections": 1, + "total_post_requests": 0, + "post_requests_per_sec": 0, + "total_connection_attempts": 1, + "service_uptime": 114161, + "get_requests_per_sec": 0, + "total_put_requests": 0, + "current_connections": 0, + "total_delete_requests": 0, + "bytes_received_per_sec": 0, + "put_requests_per_sec": 0, + "total_bytes_sent": 944, + "total_get_requests": 1, + "delete_requests_per_sec": 0 + }, + "name": "Default Web Site" } }, "metricset": { @@ -27,4 +34,4 @@ "service": { "type": "iis" } -} \ No newline at end of file +} diff --git a/x-pack/metricbeat/module/iis/website/_meta/fields.yml b/x-pack/metricbeat/module/iis/website/_meta/fields.yml index 74be2045930..1bd4ebc3acc 100644 --- a/x-pack/metricbeat/module/iis/website/_meta/fields.yml +++ b/x-pack/metricbeat/module/iis/website/_meta/fields.yml @@ -1,6 +1,8 @@ -- name: website - type: group +- name: website.*.* release: beta + type: object + object_type: float + object_type_mapping_type: "*" description: > website fields: diff --git a/x-pack/metricbeat/module/iis/website/manifest.yml b/x-pack/metricbeat/module/iis/website/manifest.yml index 564ad6db86c..62ebfd27ed8 100644 --- a/x-pack/metricbeat/module/iis/website/manifest.yml +++ b/x-pack/metricbeat/module/iis/website/manifest.yml @@ -5,57 +5,41 @@ input: defaults: perfmon.group_measurements_by_instance: true perfmon.ignore_non_existent_counters: true - perfmon.counters: - #network - - instance_label: 'name' - measurement_label: total_bytes_received - query: '\Web Service(*)\Total Bytes Received' - - instance_label: 'name' - measurement_label: total_bytes_sent - query: '\Web Service(*)\Total Bytes Sent' - - instance_label: 'name' - measurement_label: bytes_sent_per_sec - query: '\Web Service(*)\Bytes Sent/sec' - - instance_label: 'name' - measurement_label: bytes_received_per_sec - query: '\Web Service(*)\Bytes Received/sec' - - instance_label: 'name' - measurement_label: current_connections - query: '\Web Service(*)\Current Connections' - - instance_label: 'name' - measurement_label: maximum_connections - query: '\Web Service(*)\Maximum Connections' - - instance_label: 'name' - measurement_label: total_connection_attempts - query: '\Web Service(*)\Total Connection Attempts (all instances)' - - instance_label: 'name' - measurement_label: total_get_requests - query: '\Web Service(*)\Total Get Requests' - - instance_label: 'name' - measurement_label: get_requests_per_sec - query: '\Web Service(*)\Get Requests/sec' - - instance_label: 'name' - measurement_label: total_post_requests - query: '\Web Service(*)\Total Post Requests' - - instance_label: 'name' - measurement_label: post_requests_per_sec - query: '\Web Service(*)\Post Requests/sec' - - instance_label: 'name' - measurement_label: total_delete_requests - query: '\Web Service(*)\Total Delete Requests' - - instance_label: 'name' - measurement_label: delete_requests_per_sec - query: '\Web Service(*)\Delete Requests/sec' - - instance_label: 'name' - measurement_label: service_uptime - query: '\Web Service(*)\Service Uptime' - - instance_label: 'name' - measurement_label: total_put_requests - query: '\Web Service(*)\Total PUT Requests' - - instance_label: 'name' - measurement_label: put_requests_per_sec - query: '\Web Service(*)\PUT Requests/sec' + perfmon.queries: + - object: 'Web Service' + instance: "*" + namespace : "network" + counters: + - name: 'Total Bytes Received' + - name: 'Total Bytes Sent' + - name: "Bytes Sent/sec" + - name: "Bytes Received/sec" + - name: "Current Connections" + - name: "Maximum Connections" + - name: "Total Connection Attempts (all instances)" + field: total_connection_attempts + - name: "Total Get Requests" + - name: "Get Requests/sec" + - name: "Total Post Requests" + - name: "Post Requests/sec" + - name: "Total Delete Requests" + - name: "Delete Requests/sec" + - name: "Service Uptime" + - name: "Total PUT Requests" + - name: "PUT Requests/sec" processors: - drop_event.when.equals: iis.website.name: '_Total' +- drop_fields: + fields: "iis.website.object" +- rename: + ignore_missing: true + fields: + - from: "iis.website.instance" + to: "iis.website.name" + + + + + diff --git a/x-pack/metricbeat/modules.d/googlecloud.yml.disabled b/x-pack/metricbeat/modules.d/googlecloud.yml.disabled index fc7d792dadf..cd49fdc146f 100644 --- a/x-pack/metricbeat/modules.d/googlecloud.yml.disabled +++ b/x-pack/metricbeat/modules.d/googlecloud.yml.disabled @@ -4,13 +4,21 @@ - module: googlecloud metricsets: - compute + region: "us-central1" + project_id: "your project id" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 300s + +- module: googlecloud + metricsets: - pubsub - loadbalancing zone: "us-central1-a" project_id: "your project id" credentials_file_path: "your JSON credentials file path" exclude_labels: false - period: 300s + period: 60s - module: googlecloud metricsets: @@ -20,3 +28,12 @@ credentials_file_path: "your JSON credentials file path" exclude_labels: false period: 300s + +- module: googlecloud + metricsets: + - compute + region: "us-" + project_id: "your project id" + credentials_file_path: "your JSON credentials file path" + exclude_labels: false + period: 60s diff --git a/x-pack/metricbeat/tests/system/test_xpack_base.py b/x-pack/metricbeat/tests/system/test_xpack_base.py index 69d07b61354..225ad779f0d 100644 --- a/x-pack/metricbeat/tests/system/test_xpack_base.py +++ b/x-pack/metricbeat/tests/system/test_xpack_base.py @@ -5,5 +5,4 @@ class Test(xpack_metricbeat.XPackTest, test_base.Test): - def kibana_dir(self): - return os.path.join(self.beat_path, 'build', 'kibana') + pass diff --git a/x-pack/winlogbeat/Makefile b/x-pack/winlogbeat/Makefile index 5da45563104..022bb515c23 100644 --- a/x-pack/winlogbeat/Makefile +++ b/x-pack/winlogbeat/Makefile @@ -7,4 +7,4 @@ GOX_OS := windows # # Includes # -include ../../dev-tools/make/xpack.mk +include ../../dev-tools/make/mage.mk diff --git a/x-pack/winlogbeat/magefile.go b/x-pack/winlogbeat/magefile.go index 6b6646bed15..a52000fa830 100644 --- a/x-pack/winlogbeat/magefile.go +++ b/x-pack/winlogbeat/magefile.go @@ -37,15 +37,3 @@ func init() { // Update is an alias for update:all. This is a workaround for // https://github.com/magefile/mage/issues/217. func Update() { mg.Deps(winlogbeat.Update.All) } - -// Fields is an alias for update:fields. -// -// TODO: dev-tools/jenkins_ci.ps1 uses this. This should be removed when all -// projects have update to use goUnitTest. -func Fields() { mg.Deps(winlogbeat.Update.Fields) } - -// GoTestUnit is an alias for goUnitTest. -// -// TODO: dev-tools/jenkins_ci.ps1 uses this. This should be removed when all -// projects have update to use goUnitTest. -func GoTestUnit() { mg.Deps(unittest.GoUnitTest) } diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index a7a7b562ca8..c8643d904ab 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -452,6 +452,27 @@ output.elasticsearch: # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. @@ -720,6 +741,9 @@ output.elasticsearch: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password @@ -1306,6 +1330,27 @@ logging.files: # never, once, and freely. Default is never. #ssl.renegotiation: never + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + #metrics.period: 10s #state.period: 1m