From 8b75c62c779d819461341f4ab21bebd6fa425e0d Mon Sep 17 00:00:00 2001 From: Sebastien Guilloux Date: Wed, 24 Jul 2019 11:01:45 +0200 Subject: [PATCH] Merge master into statefulset-refactoring (#1358) * Use the setvmmaxmapcount initcontainer by default in E2E tests (#1300) Let's keep our default defaults :) The setting is disabled explicitly for E2E tests where we enable a restricted security context. * Add docs for plugins, custom configuration files and secure settings (#1298) * Allow license secret webhook to fail (#1301) Webhooks on core k8s objects are just too debilitating in case our webhook service fails. This sets the failure policy for the secret webhook to ignore to strike a balance between UX (immediate feedback) and keeping the users k8s cluster in a working state. Also we have an additional validation run on controller level so this does not allow circumventing our validation logic. * Revert "Use the setvmmaxmapcount initcontainer by default in E2E tests (#1300)" (#1302) This reverts commit fff15269be1c431121fdefd8ca8c6ad93db8c9df. This commit is breaking our E2E tests chain, which deploy a PodSecurityPolicy by default. Any privileged init container will not work. I'll open an issue for a longer-term fix to properly handle this. * Update quickstart (#1307) * Update the name of the secret for the elastic user * Bump the Elastic Stack version from 7.1.0 to 7.2.0 * Change Kibana readiness endpoint to return a 200 OK (#1309) The previous endpoint returned an http code 302. While this is fine for Kubernetes, some derived systems like GCP LoadBalancers mimic the container readiness check for their own readiness check. Except GCP Loadbalancers only work with status 200. It's not up to us to adapt GCP LoadBalancers to K8s, but this is a fairly trivial fix. * Fix pod_forwarder to support two part DNS names, adjust e2e http_client (#1297) * Fix pod_forwarder to support two part DNS names, adjust e2e http_client url * Revert removing .svc in e2e http_client * [DOC] Resources management and volume claim template (#1252) * Add resources and persistent volume templates documentation * Ignore resources reconciled by older controllers (#1286) * Document PodDisruptionBudget section of the ES spec (#1306) * Document PodDisruptionBudget section of the ES spec I suspect this might slightly change in the feature depending on how we handle the readiness check, so I'm keeping this doc minimal for now: * what is a PDB, briefly (with a link) * default PDB we apply * how to set a different PDB * how to disable the default PDB * Move version out from Makefile (#1312) * Add release note generation tool (#1314) * no external dependencies * inspects PRs by version label * generates structured release notes in asciidoc grouped by type label * Add console output to standalone apm sample (#1321) * Update Quickstart to 0.9.0 (#1317) * Update doc (#1319) * Update persistent storage section * Update kibana localhost url to use https * Update k8s resources names in accessing-services doc * Mention SSL browser warning * Fix bulleted list * Add CI job for nightly builds (#1248) * Move version to a file * Add CI implementation * Update VERSION * Depend on another PR for moving out version from Makefile * Update Jenkinsfile * Don't build and push operator image in bootstrap-gke (#1332) We don't need to do that anymore, since we don't use an init container based on the operator image. * Remove Docker image publishing from devops-ci (#1339) * Suppress output of certain commands from Makefile (#1342) * Document how to disable TLS (#1341) * Use new credentials for Docker registry (#1346) * Workaround controller-runtime webhook upsert bug (#1337) * Fix docs build on PR job (#1351) * Fix docs build on PR job * Cleanup workspace before doing other steps * APM: remove "output" element and add elasticsearchRef (#1345) * Don't rely on buggy metaObject Kind (#1324) * Don't rely on buggy metaObject Kind A bug in our client implementation may clear the object's Kind on certain scenarios. See https://github.com/kubernetes-sigs/controller-runtime/issues/406. Let's avoid that by fixing a constant Kind returned by a method call on the resource. --- .ci/jobs/gke-e2e-versions.yml | 6 +- build/ci/Makefile | 27 +- build/ci/e2e/GKE_k8s_versions.jenkinsfile | 8 +- build/ci/nightly/Jenkinsfile | 59 ++++ build/ci/pr/Jenkinsfile | 23 +- docs/accessing-services.asciidoc | 36 ++- docs/elasticsearch-spec.asciidoc | 150 ++++++++++ docs/index.asciidoc | 2 + docs/k8s-quickstart.asciidoc | 26 +- docs/managing-compute-resources.asciidoc | 117 ++++++++ operators/Makefile | 6 +- operators/VERSION | 1 + .../config/crds/apm_v1alpha1_apmserver.yaml | 113 ++++---- .../config/samples/apm/apm_es_kibana.yaml | 13 +- operators/config/samples/apm/apmserver.yaml | 5 +- operators/hack/release_notes.go | 266 ++++++++++++++++++ operators/hack/release_notes_test.go | 263 +++++++++++++++++ .../pkg/apis/apm/v1alpha1/apmserver_types.go | 29 +- .../apm/v1alpha1/zz_generated.deepcopy.go | 25 +- .../v1alpha1/elasticsearch_types.go | 11 +- .../pkg/apis/kibana/v1alpha1/kibana_types.go | 11 +- .../apmserver/apmserver_controller.go | 15 +- .../pkg/controller/apmserver/config/config.go | 4 +- operators/pkg/controller/apmserver/pod.go | 2 +- ...rverelasticsearchassociation_controller.go | 54 ++-- ...lasticsearchassociation_controller_test.go | 8 +- .../apmserverelasticsearchassociation/user.go | 17 +- .../user_test.go | 22 +- .../common/annotation/controller_version.go | 95 +++++++ .../annotation/controller_version_test.go | 152 +++++++++- .../common/association/association_test.go | 26 +- .../controller/common/keystore/resources.go | 12 +- .../controller/common/keystore/user_secret.go | 3 +- .../elasticsearch/elasticsearch_controller.go | 13 +- .../pkg/controller/kibana/driver_test.go | 2 +- .../controller/kibana/kibana_controller.go | 14 +- operators/pkg/controller/kibana/pod/pod.go | 2 +- .../association_controller.go | 30 +- .../pkg/dev/portforward/pod_forwarder.go | 4 +- .../pkg/dev/portforward/pod_forwarder_test.go | 9 +- operators/pkg/webhook/server.go | 16 +- operators/test/e2e/apm/standalone_test.go | 5 +- operators/test/e2e/test/apmserver/builder.go | 22 +- .../e2e/test/elasticsearch/http_client.go | 2 +- 44 files changed, 1441 insertions(+), 285 deletions(-) create mode 100644 build/ci/nightly/Jenkinsfile create mode 100644 docs/managing-compute-resources.asciidoc create mode 100644 operators/VERSION create mode 100644 operators/hack/release_notes.go create mode 100644 operators/hack/release_notes_test.go diff --git a/.ci/jobs/gke-e2e-versions.yml b/.ci/jobs/gke-e2e-versions.yml index 58c220917a..10f9c49566 100644 --- a/.ci/jobs/gke-e2e-versions.yml +++ b/.ci/jobs/gke-e2e-versions.yml @@ -8,8 +8,10 @@ artifactNumToKeep: 10 name: cloud-on-k8s-versions-gke project-type: pipeline - triggers: - - timed: '0 0 * * 1-5' + parameters: + - string: + name: IMAGE + description: "Docker image with ECK" pipeline-scm: scm: - git: diff --git a/build/ci/Makefile b/build/ci/Makefile index 77fba62175..71919f2f56 100644 --- a/build/ci/Makefile +++ b/build/ci/Makefile @@ -11,7 +11,7 @@ VAULT_GKE_CREDS_SECRET ?= secret/cloud-team/cloud-ci/ci-gcp-k8s-operator GKE_CREDS_FILE ?= credentials.json VAULT_PUBLIC_KEY ?= secret/release/license PUBLIC_KEY_FILE ?= license.key -VAULT_DOCKER_CREDENTIALS ?= secret/cloud-team/cloud-ci/cloudadmin +VAULT_DOCKER_CREDENTIALS ?= secret/devops-ci/cloud-on-k8s/eckadmin DOCKER_CREDENTIALS_FILE ?= docker_credentials.file VAULT_AWS_CREDS ?= secret/cloud-team/cloud-ci/eck-release VAULT_AWS_ACCESS_KEY_FILE ?= aws_access_key.file @@ -48,7 +48,7 @@ vault-docker-creds: @ VAULT_TOKEN=$(VAULT_TOKEN) \ vault read \ -address=$(VAULT_ADDR) \ - -field=password \ + -field=value \ $(VAULT_DOCKER_CREDENTIALS) \ > $(DOCKER_CREDENTIALS_FILE) @@ -71,7 +71,7 @@ vault-aws-creds: ci-pr: check-license-header docker build -f Dockerfile -t cloud-on-k8s-ci-pr . - docker run --rm -t \ + @ docker run --rm -t \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ -w $(GO_MOUNT_PATH) \ @@ -86,7 +86,7 @@ ci-pr: check-license-header ci-release: vault-public-key vault-docker-creds docker build -f Dockerfile -t cloud-on-k8s-ci-release . - docker run --rm -t \ + @ docker run --rm -t \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ -w $(GO_MOUNT_PATH) \ @@ -105,7 +105,7 @@ ci-release: vault-public-key vault-docker-creds # Will be uploaded to https://download.elastic.co/downloads/eck/$TAG_NAME/all-in-one.yaml yaml-upload: vault-aws-creds docker build -f Dockerfile -t cloud-on-k8s-ci-release . - docker run --rm -t \ + @ docker run --rm -t \ -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ -w $(GO_MOUNT_PATH) \ -e "AWS_ACCESS_KEY_ID=$(shell cat $(VAULT_AWS_ACCESS_KEY_FILE))" \ @@ -119,7 +119,7 @@ yaml-upload: vault-aws-creds # Spawn a k8s cluster, and run e2e tests against it ci-e2e: vault-gke-creds docker build -f Dockerfile -t cloud-on-k8s-ci-e2e . - docker run --rm -t \ + @ docker run --rm -t \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ -w $(GO_MOUNT_PATH) \ @@ -137,7 +137,7 @@ ci-e2e: vault-gke-creds # Run e2e tests in GKE against provided ECK image ci-e2e-rc: vault-gke-creds docker build -f Dockerfile -t cloud-on-k8s-ci-e2e . - docker run --rm -t \ + @ docker run --rm -t \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ -w $(GO_MOUNT_PATH) \ @@ -156,7 +156,7 @@ ci-e2e-rc: vault-gke-creds # Remove k8s cluster ci-e2e-delete-cluster: vault-gke-creds docker build -f Dockerfile -t cloud-on-k8s-ci-e2e . - docker run --rm -t \ + @ docker run --rm -t \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ -w $(GO_MOUNT_PATH) \ @@ -168,7 +168,7 @@ ci-e2e-delete-cluster: vault-gke-creds # Remove all unused resources in GKE ci-gke-cleanup: ci-e2e-delete-cluster - docker run --rm -t \ + @ docker run --rm -t \ -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ -w $(GO_MOUNT_PATH) \ -e "GCLOUD_PROJECT=$(GCLOUD_PROJECT)" \ @@ -177,12 +177,3 @@ ci-gke-cleanup: ci-e2e-delete-cluster cloud-on-k8s-ci-e2e \ bash -c "GKE_CLUSTER_VERSION=1.11 $(GO_MOUNT_PATH)/operators/hack/gke-cluster.sh auth && \ $(GO_MOUNT_PATH)/build/ci/delete_unused_disks.py" - -# Run docs build -ci-build-docs: - docker run --rm -t \ - -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ - docker.elastic.co/docs/build:1 \ - bash -c "git clone https://github.com/elastic/docs.git && \ - /docs/build_docs.pl --doc $(GO_MOUNT_PATH)/docs/index.asciidoc --out $(GO_MOUNT_PATH)/docs/html --chunk 1 && \ - test -e $(GO_MOUNT_PATH)/docs/html/index.html" diff --git a/build/ci/e2e/GKE_k8s_versions.jenkinsfile b/build/ci/e2e/GKE_k8s_versions.jenkinsfile index 0df4eca802..567c828ba5 100644 --- a/build/ci/e2e/GKE_k8s_versions.jenkinsfile +++ b/build/ci/e2e/GKE_k8s_versions.jenkinsfile @@ -14,6 +14,8 @@ pipeline { VAULT_SECRET_ID = credentials('vault-secret-id') REGISTRY = "eu.gcr.io" GCLOUD_PROJECT = credentials('k8s-operators-gcloud-project') + OPERATOR_IMAGE = "${IMAGE}" + LATEST_RELEASED_IMG = "${IMAGE}" } stages { @@ -26,7 +28,7 @@ pipeline { } steps { checkout scm - sh 'make -C build/ci ci-e2e' + sh 'make -C build/ci ci-e2e-rc' } } stage("1.12") { @@ -39,7 +41,7 @@ pipeline { } steps { checkout scm - sh 'make -C build/ci ci-e2e' + sh 'make -C build/ci ci-e2e-rc' } } stage("1.13") { @@ -52,7 +54,7 @@ pipeline { } steps { checkout scm - sh 'make -C build/ci ci-e2e' + sh 'make -C build/ci ci-e2e-rc' } } } diff --git a/build/ci/nightly/Jenkinsfile b/build/ci/nightly/Jenkinsfile new file mode 100644 index 0000000000..9d7b616423 --- /dev/null +++ b/build/ci/nightly/Jenkinsfile @@ -0,0 +1,59 @@ +pipeline { + + agent { + label 'linux' + } + + options { + timeout(time: 1, unit: 'HOURS') + } + + environment { + VAULT_ADDR = credentials('vault-addr') + VAULT_ROLE_ID = credentials('vault-role-id') + VAULT_SECRET_ID = credentials('vault-secret-id') + GCLOUD_PROJECT = credentials('k8s-operators-gcloud-project') + REGISTRY = "push.docker.elastic.co" + REPOSITORY = "eck-snapshots" + IMG_NAME = "eck-operator" + SNAPSHOT = "true" + DOCKER_IMAGE_NO_TAG = "docker.elastic.co/${REPOSITORY}/${IMG_NAME}" + } + + stages { + stage('Run unit and integration tests') { + steps { + sh 'make -C build/ci ci-pr' + } + } + stage('Build and push Docker image') { + steps { + sh """ + export VERSION=\$(cat $WORKSPACE/operators/VERSION)-\$(date +%F)-\$(git rev-parse --short --verify HEAD) + export OPERATOR_IMAGE=${REGISTRY}/${REPOSITORY}/${IMG_NAME}:\$VERSION + make -C build/ci ci-release + """ + } + } + } + + post { + success { + script { + def version = sh(returnStdout: true, script: 'cat $WORKSPACE/operators/VERSION') + def hash = sh(returnStdout: true, script: 'git rev-parse --short --verify HEAD') + def date = new Date() + def image = env.DOCKER_IMAGE_NO_TAG + ":" + version + "-" + date.format("yyyy-MM-dd") + "-" + hash + currentBuild.description = image + + build job: 'cloud-on-k8s-versions-gke', + parameters: [string(name: 'IMAGE', value: image)], + wait: false + } + } + cleanup { + cleanWs() + } + } + +} diff --git a/build/ci/pr/Jenkinsfile b/build/ci/pr/Jenkinsfile index e2822e8c63..94159b5f51 100644 --- a/build/ci/pr/Jenkinsfile +++ b/build/ci/pr/Jenkinsfile @@ -38,8 +38,16 @@ pipeline { } stage("Run docs build") { steps { - checkout scm - sh 'make -C build/ci ci-build-docs' + cleanWs() + sh 'git clone git@github.com:elastic/docs.git' + sh 'git clone git@github.com:elastic/cloud-on-k8s.git' + sh """ + $WORKSPACE/docs/build_docs \ + --doc $WORKSPACE/cloud-on-k8s/docs/index.asciidoc \ + --out $WORKSPACE/cloud-on-k8s/docs/html \ + --chunk 1 + """ + sh 'test -e $WORKSPACE/cloud-on-k8s/docs/html/index.html' } } stage("Run smoke E2E tests") { @@ -61,17 +69,6 @@ pipeline { } post { - success { - withEnv([ - 'REGISTRY=push.docker.elastic.co', - 'REPOSITORY=eck-snapshots', - 'IMG_SUFFIX=', - 'SNAPSHOT_RELEASE=true', - 'TAG_NAME=${ghprbPullId}' - ]) { - sh 'make -C build/ci ci-release' - } - } cleanup { script { if (notOnlyDocs()) { diff --git a/docs/accessing-services.asciidoc b/docs/accessing-services.asciidoc index 1c789edb0f..940aa8e682 100644 --- a/docs/accessing-services.asciidoc +++ b/docs/accessing-services.asciidoc @@ -25,7 +25,7 @@ To access Elasticsearch, Kibana or APM Server, the operator manages a default us [source,sh] ---- -> kubectl get secret hulk-elastic-user -o go-template='{{.data.elastic | base64decode }}' +> kubectl get secret hulk-es-elastic-user -o go-template='{{.data.elastic | base64decode }}' 42xyz42citsale42xyz42 ---- @@ -46,6 +46,7 @@ For each resource, `Elasticsearch`, `Kibana` or `ApmServer`, the operator manage > kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +hulk-apm-http ClusterIP 10.19.212.105 8200:31000/TCP 1m hulk-es-http ClusterIP 10.19.252.160 9200:31320/TCP 1m hulk-kb-http ClusterIP 10.19.247.151 5601:31380/TCP 1m ---- @@ -76,6 +77,7 @@ spec: > kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +hulk-apm-http ClusterIP 10.19.212.105 35.176.227.106 8200:31000/TCP 1m hulk-es-http LoadBalancer 10.19.252.160 35.198.131.115 9200:31320/TCP 1m hulk-kb-http LoadBalancer 10.19.247.151 35.242.197.228 5601:31380/TCP 1m ---- @@ -141,8 +143,9 @@ spec: You can bring your own certificate to configure TLS to ensure that communication between HTTP clients and the cluster is encrypted. Create a Kubernetes secret with: -. tls.crt: the certificate (or a chain). -. tls.key: the private key to the first certificate in the certificate chain. + +- tls.crt: the certificate (or a chain). +- tls.key: the private key to the first certificate in the certificate chain. [source,sh] ---- @@ -160,6 +163,23 @@ spec: secretName: my-cert ---- +[float] +[id="{p}-disable-tls"] +==== Disable TLS + +You can explicitly disable TLS for Kibana or APM Server if you want to. + +[source,yaml] +---- +spec: + http: + tls: + selfSignedCertificate: + disabled: true +---- + +TLS cannot be disabled for Elasticsearch. + [float] [id="{p}-request-elasticsearch-endpoint"] === Requesting the Elasticsearch endpoint @@ -178,7 +198,7 @@ NAME=hulk kubectl get secret "$NAME-ca" -o go-template='{{index .data "ca.pem" | base64decode }}' > ca.pem PW=$(kubectl get secret "$NAME-elastic-user" -o go-template='{{.data.elastic | base64decode }}') -curl --cacert ca.pem -u elastic:$PW https://$NAME-es:9200/ +curl --cacert ca.pem -u elastic:$PW https://$NAME-es-http:9200/ ---- *Outside the Kubernetes cluster* @@ -191,11 +211,11 @@ curl --cacert ca.pem -u elastic:$PW https://$NAME-es:9200/ ---- NAME=hulk -kubectl get secret "$NAME-ca" -o go-template='{{index .data "ca.pem" | base64decode }}' > ca.pem -IP=$(kubectl get svc "$NAME-es" -o jsonpath='{.status.loadBalancer.ingress[].ip}') -PW=$(kubectl get secret "$NAME-elastic-user" -o go-template='{{.data.elastic | base64decode }}') +kubectl get secret "$NAME-es-http-certs-public" -o go-template='{{index .data "tls.crt" | base64decode }}' > tls.crt +IP=$(kubectl get svc "$NAME-es-http" -o jsonpath='{.status.loadBalancer.ingress[].ip}') +PW=$(kubectl get secret "$NAME-es-elastic-user" -o go-template='{{.data.elastic | base64decode }}') -curl --cacert ca.pem -u elastic:$PW https://$IP:9200/ +curl --cacert tls.crt -u elastic:$PW https://$IP:9200/ ---- Now you should get this message: diff --git a/docs/elasticsearch-spec.asciidoc b/docs/elasticsearch-spec.asciidoc index 8935e873e3..463366a4e1 100644 --- a/docs/elasticsearch-spec.asciidoc +++ b/docs/elasticsearch-spec.asciidoc @@ -50,6 +50,45 @@ spec: For more information on Elasticsearch settings, see https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html[Configuring Elasticsearch]. +[id="{p}-volume-claim-templates"] +=== Volume Claim Templates + +By default the operator creates a https://kubernetes.io/docs/concepts/storage/persistent-volumes/[`PersistentVolumeClaim`] with a capacity of 1Gi for every Pod in an Elasticsearch cluster. This is to ensure that there is no data loss if a Pod is deleted. + +You can customize the volume claim templates used by Elasticsearch to adjust the storage to your needs, the name in the template must be `elasticsearch-data`: + +[source,yaml] +---- +spec: + nodes: + - volumeClaimTemplates: + - metadata: + name: elasticsearch-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + storageClassName: standard +---- + +For some reasons you may want to use an `emptyDir` volume, this can be done by specifying the `elasticsearch-data` volume in the `podTemplate`: + +[source,yaml] +---- +spec: + nodes: + - config: + podTemplate: + spec: + volumes: + - name: elasticsearch-data + emptyDir: {} +---- + +Keep in mind that using `emptyDir` may result in data loss and is not recommended. + [id="{p}-http-settings-tls-sans"] === HTTP settings & TLS SANs @@ -101,6 +140,75 @@ $ openssl req -x509 -newkey rsa:4096 -keyout tls.key -out tls.crt -days 365 -nod $ kubectl create secret tls my-cert --cert tls.crt --key tls.key ---- +[id="{p}-es-secure-settings"] +=== Secure Settings + +link:https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html[Secure settings] can be specified via a Kubernetes secret. +The secret should contain a key-value pair for each secure setting you want to add. Reference that secret in the Elasticsearch +resource spec for ECK to automatically inject those settings into the keystore on each node before it starts Elasticsearch. + +[source,yaml] +---- +spec: + secureSettings: + secretName: your-secure-settings-secret +---- + +See link:snapshots.asciidoc[How to create automated snapshots] for an example use case. + + +[id="{p}-bundles-plugins"] +=== Custom Configuration Files and Plugins + +To run Elasticsearch with specific plugins or configurations files installed on ECK you have two options: + +1. create a custom Docker image with the plugins or files pre-installed +2. install the plugins or configuration files at pod startup time + +NOTE: The first option has the advantage that you can verify the correctness of the image before rolling it out to your ECK installation, while the second option gives you +maximum flexibility. But the second option also means you might catch any errors only at runtime. Plugin installation at runtime has another drawback in that it needs access to the Internet from your cluster +and downloads each plugin multiple times, once for each Elasticsearch node. + +Building your custom Docker images is outside the scope of this documentation despite being the better solution for most users. + +The following therefore describes option 2 using a repository plugin as the example. To install the plugin before the Elasticsearch +nodes start, use an init container to run the link:https://www.elastic.co/guide/en/elasticsearch/plugins/current/installation.html[plugin installation tool]. + +[source,yaml] +---- +podTemplate: + spec: + initContainers: + - name: install-plugins + command: + - sh + - -c + - | + bin/elasticsearch-plugin install --batch repository-azure +---- + +To install custom configuration files you can use volumes and volume mounts. The next example shows how to add a synonyms file for the +link:https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-tokenfilter.html[synonym token filter] in Elasticsearch. +But you can use the same approach for any kind of file you want to mount into the configuration directory of Elasticsearch. + +[source,yaml] +---- +podTemplate: + spec: + containers: + - name: elasticsearch <1> + volumeMounts: + - name: synonyms + mountPath: /usr/share/elasticsearch/config/dictionaries + volumes: + - name: synonyms + configMap: + name: synonyms <2> +---- + +<1> Elasticsearch runs by convention in a container called 'elasticsearch' +<2> assuming you have created a config map in the same namespace as Elasticsearch with the name 'synonyms' containing the synonyms file(s) + [id="{p}-virtual-memory"] === Virtual memory @@ -271,3 +379,45 @@ In this situation, it would be preferable to first recreate the missing nodes in In order to do so, ECK must know about the logical grouping of nodes. Since this is an arbitrary setting (can represent availability zones, but also nodes roles, hot-warm topologies, etc.), it must be specified in the `updateStrategy.groups` section of the Elasticsearch specification. Nodes grouping is expressed through labels on the resources. In the example above, 3 pods are labeled with `group-a`, and the 3 other pods with `group-b`. +[id="{p}-pod-disruption-budget"] +=== Pod disruption budget + +A link:https://kubernetes.io/docs/tasks/run-application/configure-pdb/[Pod Disruption Budget] allows limiting disruptions on an existing set of pods while the Kubernetes cluster administrator manages cluster nodes. +With Elasticsearch, we'd like to make sure some indices don't become unavailable. + +A default PDB of 1 `maxUnavailable` pod on the entire cluster is enforced by default. + +This default can be tweaked in the Elasticsearch specification: + +[source,yaml] +---- +apiVersion: elasticsearch.k8s.elastic.co/v1alpha1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + version: 7.2.0 + nodes: + - nodeCount: 3 + podDisruptionBudget: + spec: + maxUnavailable: 2 + selector: + matchLabels: + elasticsearch.k8s.elastic.co/cluster-name: quickstart +---- + +It can also be explicitly disabled: + +[source,yaml] +---- +apiVersion: elasticsearch.k8s.elastic.co/v1alpha1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + version: 7.2.0 + nodes: + - nodeCount: 3 + podDisruptionBudget: {} +---- diff --git a/docs/index.asciidoc b/docs/index.asciidoc index fcd147c800..73198d3b31 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -10,4 +10,6 @@ include::overview.asciidoc[] include::k8s-quickstart.asciidoc[] include::accessing-services.asciidoc[] include::advanced-node-scheduling.asciidoc[] +include::managing-compute-resources.asciidoc[] include::snapshots.asciidoc[] +include::elasticsearch-spec.asciidoc[] diff --git a/docs/k8s-quickstart.asciidoc b/docs/k8s-quickstart.asciidoc index bbaf0c274b..0b3aafdef3 100644 --- a/docs/k8s-quickstart.asciidoc +++ b/docs/k8s-quickstart.asciidoc @@ -28,7 +28,7 @@ NOTE: If you are using Amazon EKS, make sure the Kubernetes control plane is all + [source,sh] ---- -kubectl apply -f https://download.elastic.co/downloads/eck/0.8.0/all-in-one.yaml +kubectl apply -f https://download.elastic.co/downloads/eck/0.9.0/all-in-one.yaml ---- . Monitor the operator logs: @@ -54,7 +54,7 @@ kind: Elasticsearch metadata: name: quickstart spec: - version: 7.1.0 + version: 7.2.0 nodes: - nodeCount: 1 config: @@ -79,7 +79,7 @@ kubectl get elasticsearch [source,sh] ---- NAME HEALTH NODES VERSION PHASE AGE -quickstart green 1 7.1.0 Operational 1m +quickstart green 1 7.2.0 Operational 1m ---- When you create the cluster, there is no `HEALTH` status and the `PHASE` is `Pending`. After a while, the `PHASE` turns into `Operational`, and `HEALTH` becomes `green`. @@ -126,7 +126,7 @@ A default user named `elastic` is automatically created. Its password is stored + [source,sh] ---- -PASSWORD=$(kubectl get secret quickstart-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode) +PASSWORD=$(kubectl get secret quickstart-es-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode) ---- . Request the Elasticsearch endpoint. @@ -161,7 +161,7 @@ NOTE: For testing purposes only, you can specify the `-k` option to turn off cer "cluster_name" : "quickstart", "cluster_uuid" : "XqWg0xIiRmmEBg4NMhnYPg", "version" : { - "number" : "7.1.0", + "number" : "7.2.0", "build_flavor" : "default", "build_type" : "docker", "build_hash" : "04116c9", @@ -191,7 +191,7 @@ kind: Kibana metadata: name: quickstart spec: - version: 7.1.0 + version: 7.2.0 nodeCount: 1 elasticsearchRef: name: quickstart @@ -230,13 +230,13 @@ Use `kubectl port-forward` to access Kibana from your local workstation: kubectl port-forward service/quickstart-kb-http 5601 ---- + -Open `http://localhost:5601` in your browser. +Open `https://localhost:5601` in your browser. Your browser will show a warning because the self-signed certificate configured by default is not verified by a third party certificate authority and not trusted by your browser. You can either configure a link:k8s-accessing-elastic-services.html#k8s-setting-up-your-own-certificate[valid certificate] or acknowledge the warning for the purposes of this quick start. + Login with the `elastic` user. Retrieve its password with: + [source,sh] ---- -echo $(kubectl get secret quickstart-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode) +echo $(kubectl get secret quickstart-es-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode) ---- [float] @@ -255,7 +255,7 @@ kind: Elasticsearch metadata: name: quickstart spec: - version: 7.1.0 + version: 7.2.0 nodes: - nodeCount: 3 config: @@ -267,11 +267,11 @@ EOF [float] [id="{p}-persistent-storage"] -=== Use persistent storage +=== Update persistent storage -Now that you have completed the quickstart, you can try out more features like using persistent storage. The cluster that you deployed in this quickstart uses a default persistent volume claim of 1GiB, without a storage class set. This means that the default storage class defined in the Kubernetes cluster is the one that will be provisioned. +Now that you have completed the quickstart, you can try out more features like tweaking persistent storage. The cluster that you deployed in this quickstart uses a default persistent volume claim of 1GiB, without a storage class set. This means that the default storage class defined in the Kubernetes cluster is the one that will be provisioned. -You can request a `PersistentVolumeClaim` in the cluster specification, to target any `PersistentVolume` class available in your Kubernetes cluster: +You can request a `PersistentVolumeClaim` with a larger size in the Elasticsearch specification or target any `PersistentVolume` class available in your Kubernetes cluster: [source,yaml] ---- @@ -281,7 +281,7 @@ kind: Elasticsearch metadata: name: quickstart spec: - version: 7.1.0 + version: 7.2.0 nodes: - nodeCount: 3 config: diff --git a/docs/managing-compute-resources.asciidoc b/docs/managing-compute-resources.asciidoc new file mode 100644 index 0000000000..1b337bae8f --- /dev/null +++ b/docs/managing-compute-resources.asciidoc @@ -0,0 +1,117 @@ +[id="{p}-managing-compute-resources"] +== Managing compute resources + +When a Pod is created it may request CPU and RAM resources. It may also specify the maximum resources that the containers are allowed to consume. Both Pod `limits` and `requests` can be set in the specification of any object managed by the operator (Elasticsearch, Kibana or the APM server). For more information about how this is used by Kubernetes please see https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/[Managing Compute Resources for Containers]. + +[float] +[id="{p}-custom-resources"] +=== Set custom resources + +The `resources` can be customized in the `podTemplate` of an object. + +Here is an example for Elasticsearch: + +[source,yaml] +---- +spec: + nodes: + - podTemplate: + spec: + containers: + - name: elasticsearch + env: + - name: ES_JAVA_OPTS + value: -Xms2048M -Xmx2048M + resources: + requests: + memory: 2Gi + cpu: 1 + limits: + memory: 4Gi + cpu: 2 +---- + +This example also demonstrates how to set the JVM memory options accordingly using the `ES_JAVA_OPTS` environment variable. + +The same applies for every object managed by the operator, here is how to set some custom resources for Kibana: + +[source,yaml] +---- +spec: + podTemplate: + spec: + containers: + - name: kibana + resources: + requests: + memory: 1Gi + cpu: 1 + limits: + memory: 2Gi + cpu: 2 +---- + +And here is how to set custom resources on the APM server: + +[source,yaml] +---- +spec: + podTemplate: + spec: + containers: + - name: apm-server + resources: + requests: + memory: 1Gi + cpu: 1 + limits: + memory: 2Gi + cpu: 2 +---- + +[float] +[id="{p}-default-behavior"] +=== Default behavior + +If there's no `resources` set in the specification of an object then no `requests` or `limits` will be applied on the containers, with the notable exception of Elasticsearch. +It is important to understand that by default, if no memory requirement is set in the specification of Elasticsearch then the operator will apply a default memory request of 2Gi. The reason is that it is critical for Elasticsearch to have a minimum amount of memory to perform correctly. But this can be a problem if resources are https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/[managed with some LimitRanges at the namespace level] and if a minimum memory constraint is imposed. + +For example you may want to apply a default request of 3Gi and enforce it as a minimum with a constraint: + +[source,yaml] +---- +apiVersion: v1 +kind: LimitRange +metadata: + name: default-mem-per-container +spec: + limits: + - min: + memory: "3Gi" + defaultRequest: + memory: "3Gi" + type: Container +---- + +But if there is no `resources` declared in the specification then the Pod can't be created and the following event is generated: + +................................... +default 0s Warning Unexpected elasticsearch/elasticsearch-sample Cannot create pod elasticsearch-sample-es-ldbgj48c7r: pods "elasticsearch-sample-es-ldbgj48c7r" is forbidden: minimum memory usage per Container is 3Gi, but request is 2Gi +................................... + +In order to solve this situation you can specify an empty `limits` section in the specification: + +[source,yaml] +---- +spec: + nodes: + - podTemplate: + spec: + containers: + - name: elasticsearch + resources: + # specify empty limits + limits: {} +---- + +The default `requests` will not be set by the operator and the Pod will be created. \ No newline at end of file diff --git a/operators/Makefile b/operators/Makefile index fbb346c375..a7a9c1a1c2 100644 --- a/operators/Makefile +++ b/operators/Makefile @@ -17,7 +17,7 @@ GKE_CLUSTER_VERSION ?= 1.12 REPOSITORY ?= eck NAME ?= eck-operator -VERSION ?= 0.9.0-SNAPSHOT +VERSION ?= $(shell cat VERSION) SNAPSHOT ?= true LATEST_RELEASED_IMG ?= "docker.elastic.co/eck/$(NAME):0.8.0" @@ -241,10 +241,6 @@ bootstrap-gke: require-gcloud-project ifeq ($(PSP), 1) kubectl apply -f config/dev/elastic-psp.yaml endif -ifeq ($(SKIP_DOCKER_COMMAND), false) - # push "latest" operator image to be used for init containers when running the operator locally - $(MAKE) docker-build docker-push -endif delete-gke: require-gcloud-project GKE_CLUSTER_VERSION=$(GKE_CLUSTER_VERSION) ./hack/gke-cluster.sh delete diff --git a/operators/VERSION b/operators/VERSION new file mode 100644 index 0000000000..5ea35de7ad --- /dev/null +++ b/operators/VERSION @@ -0,0 +1 @@ +0.9.0-SNAPSHOT diff --git a/operators/config/crds/apm_v1alpha1_apmserver.yaml b/operators/config/crds/apm_v1alpha1_apmserver.yaml index aa93cc5473..9776fc7f58 100644 --- a/operators/config/crds/apm_v1alpha1_apmserver.yaml +++ b/operators/config/crds/apm_v1alpha1_apmserver.yaml @@ -50,6 +50,60 @@ spec: config: description: Config represents the APM configuration. type: object + elasticsearch: + description: Elasticsearch configures how the APM server connects to + Elasticsearch + properties: + auth: + description: Auth configures authentication for APM Server to use. + properties: + inline: + description: Inline is auth provided as plaintext inline credentials. + properties: + password: + description: Password is the password to use. + type: string + username: + description: User is the username to use. + type: string + required: + - username + - password + type: object + secret: + description: SecretKeyRef is a secret that contains the credentials + to use. + type: object + type: object + hosts: + description: Hosts are the URLs of the output Elasticsearch nodes. + items: + type: string + type: array + ssl: + description: SSL configures TLS-related configuration for Elasticsearch + properties: + certificateAuthorities: + description: CertificateAuthorities is a secret that contains + a `tls.crt` entry that contain certificates for server verifications. + properties: + secretName: + type: string + type: object + type: object + type: object + elasticsearchRef: + description: ElasticsearchRef references an Elasticsearch resource in + the Kubernetes cluster. If the namespace is not specified, the current + resource namespace will be used. + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object featureFlags: description: FeatureFlags are apm-specific flags that enable or disable specific experimental features @@ -114,65 +168,6 @@ spec: must have. format: int32 type: integer - output: - properties: - elasticsearch: - description: Elasticsearch configures the Elasticsearch output - properties: - auth: - description: Auth configures authentication for APM Server to - use. - properties: - inline: - description: Inline is auth provided as plaintext inline - credentials. - properties: - password: - description: Password is the password to use. - type: string - username: - description: User is the username to use. - type: string - required: - - username - - password - type: object - secret: - description: SecretKeyRef is a secret that contains the - credentials to use. - type: object - type: object - hosts: - description: Hosts are the URLs of the output Elasticsearch - nodes. - items: - type: string - type: array - ref: - description: ElasticsearchRef allows users to reference a Elasticsearch - cluster inside k8s to automatically derive the other fields. - properties: - name: - type: string - namespace: - type: string - required: - - name - type: object - ssl: - description: SSL configures TLS-related configuration for Elasticsearch - properties: - certificateAuthorities: - description: CertificateAuthorities is a secret that contains - a `tls.crt` entry that contain certificates for server - verifications. - properties: - secretName: - type: string - type: object - type: object - type: object - type: object podTemplate: description: PodTemplate can be used to propagate configuration to APM Server pods. This allows specifying custom annotations, labels, environment diff --git a/operators/config/samples/apm/apm_es_kibana.yaml b/operators/config/samples/apm/apm_es_kibana.yaml index d0a12c0557..ca5c6f0bc4 100644 --- a/operators/config/samples/apm/apm_es_kibana.yaml +++ b/operators/config/samples/apm/apm_es_kibana.yaml @@ -5,7 +5,7 @@ kind: Elasticsearch metadata: name: elasticsearch-sample spec: - version: "7.1.0" + version: "7.2.0" nodes: - name: default nodeCount: 3 @@ -15,20 +15,17 @@ kind: ApmServer metadata: name: apm-server-sample spec: - version: "7.1.0" + version: "7.2.0" nodeCount: 1 - output: - elasticsearch: - ref: - name: elasticsearch-sample - namespace: default + elasticsearchRef: + name: "elasticsearch-sample" --- apiVersion: kibana.k8s.elastic.co/v1alpha1 kind: Kibana metadata: name: kibana-sample spec: - version: "7.1.0" + version: "7.2.0" nodeCount: 1 elasticsearchRef: name: "elasticsearch-sample" diff --git a/operators/config/samples/apm/apmserver.yaml b/operators/config/samples/apm/apmserver.yaml index e66e4a837c..8348ebd85e 100644 --- a/operators/config/samples/apm/apmserver.yaml +++ b/operators/config/samples/apm/apmserver.yaml @@ -3,5 +3,8 @@ kind: ApmServer metadata: name: apmserver-sample spec: - version: "7.1.0" + version: "7.2.0" nodeCount: 1 + config: + output.console: + pretty: true diff --git a/operators/hack/release_notes.go b/operators/hack/release_notes.go new file mode 100644 index 0000000000..f5ef7ad8f7 --- /dev/null +++ b/operators/hack/release_notes.go @@ -0,0 +1,266 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "encoding/json" + "errors" + "fmt" + "html/template" + "io" + "net/http" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +const ( + baseURL = "https://api.github.com/repos/" + repo = "elastic/cloud-on-k8s/" + releaseNoteTemplate = `:issue: https://github.com/{{.Repo}}issues/ +:pull: https://github.com/{{.Repo}}pull/ + +[[release-notes-{{.Version}}]] +== {n} version {{.Version}} +{{range $group, $prs := .Groups}} +[[{{- id $group -}}-{{$.Version}}]] +[float] +=== {{index $.GroupLabels $group}} +{{range $prs}} +* {{.Title}} {pull}{{.Number}}[#{{.Number}}]{{with .RelatedIssues -}} +{{$length := len .}} (issue{{if gt $length 1}}s{{end}}: {{range $idx, $el := .}}{{if $idx}}, {{end}}{issue}{{$el}}[#{{$el}}]{{end}}) +{{- end}} +{{- end}} +{{end}} +` +) + +var ( + groupLabels = map[string]string{ + ">breaking": "Breaking changes", + ">deprecation": "Deprecations", + ">feature": "New features", + ">enhancement": "Enhancements", + ">bug": "Bug fixes", + "nogroup": "Misc", + } + + ignore = map[string]bool{ + ">non-issue": true, + ">refactoring": true, + ">docs": true, + ">test": true, + ":ci": true, + "backport": true, + } +) + +// Label models a subset of a GitHub label. +type Label struct { + Name string `json:"name"` +} + +// Issue models a subset of a Github issue. +type Issue struct { + Labels []Label `json:"labels"` + Body string `json:"body"` + Title string `json:"title"` + Number int `json:"number"` + PullRequest map[string]string `json:"pull_request,omitempty"` + RelatedIssues []int +} + +type GroupedIssues = map[string][]Issue + +type TemplateParams struct { + Version string + Repo string + GroupLabels map[string]string + Groups GroupedIssues +} + +func fetch(url string, out interface{}) (string, error) { + resp, err := http.Get(url) + if err != nil { + return "", err + } + defer resp.Body.Close() + + nextLink := extractNextLink(resp.Header) + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return "", errors.New(fmt.Sprintf("%s: %d %s ", url, resp.StatusCode, resp.Status)) + } + + if err = json.NewDecoder(resp.Body).Decode(&out); err != nil { + return "", err + } + return nextLink, nil +} + +func extractNextLink(headers http.Header) string { + var nextLink string + nextRe := regexp.MustCompile(`<([^>]+)>; rel="next"`) + links := headers["Link"] + for _, lnk := range links { + matches := nextRe.FindAllStringSubmatch(lnk, 1) + if matches != nil && matches[0][1] != "" { + nextLink = matches[0][1] + break + } + } + return nextLink +} + +func fetchVersionLabels() ([]string, error) { + var versionLabels []string + url := fmt.Sprintf("%s%slabels?page=1", baseURL, repo) +FETCH: + var labels []Label + next, err := fetch(url, &labels) + if err != nil { + return nil, err + } + for _, l := range labels { + if strings.HasPrefix(l.Name, "v") { + versionLabels = append(versionLabels, l.Name) + } + } + if next != "" { + url = next + goto FETCH + } + + return versionLabels, nil +} + +func fetchIssues(version string) (GroupedIssues, error) { + url := fmt.Sprintf("%s%sissues?labels=%s&pagesize=100&state=all&page=1", baseURL, repo, version) + var prs []Issue +FETCH: + var tranche []Issue + next, err := fetch(url, &tranche) + if err != nil { + return nil, err + } + for _, issue := range tranche { + // only look at PRs + if issue.PullRequest != nil { + prs = append(prs, issue) + } + } + if next != "" { + url = next + goto FETCH + } + result := make(GroupedIssues) + noGroup := "nogroup" +PR: + for _, pr := range prs { + prLabels := make(map[string]bool) + for _, lbl := range pr.Labels { + // remove PRs that have labels to be ignored + if ignore[lbl.Name] { + continue PR + } + // build a lookup table of all labels for this PR + prLabels[lbl.Name] = true + } + + // extract related issues from PR body + if err := extractRelatedIssues(&pr); err != nil { + return nil, err + } + + // group PRs by type label + for typeLabel := range groupLabels { + if prLabels[typeLabel] { + result[typeLabel] = append(result[typeLabel], pr) + continue PR + } + } + // or fall back to a default group + result[noGroup] = append(result[noGroup], pr) + } + return result, nil +} + +func extractRelatedIssues(issue *Issue) error { + re := regexp.MustCompile(fmt.Sprintf(`https://github.com/%sissues/(\d+)`, repo)) + matches := re.FindAllStringSubmatch(issue.Body, -1) + issues := map[int]struct{}{} + for _, capture := range matches { + issueNum, err := strconv.Atoi(capture[1]) + if err != nil { + return err + } + issues[issueNum] = struct{}{} + + } + for rel := range issues { + issue.RelatedIssues = append(issue.RelatedIssues, rel) + } + sort.Ints(issue.RelatedIssues) + return nil +} + +func dumpIssues(params TemplateParams, out io.Writer) { + funcs := template.FuncMap{ + "id": func(s string) string { + return strings.TrimPrefix(s, ">") + }, + } + tpl := template.Must(template.New("release_notes").Funcs(funcs).Parse(releaseNoteTemplate)) + err := tpl.Execute(out, params) + if err != nil { + println(err) + } +} + +func main() { + labels, err := fetchVersionLabels() + if err != nil { + panic(err) + } + + if len(os.Args) != 2 { + usage(labels) + } + + version := os.Args[1] + found := false + for _, l := range labels { + if l == version { + found = true + } + } + if !found { + usage(labels) + } + + groupedIssues, err := fetchIssues(version) + if err != nil { + panic(err) + } + dumpIssues(TemplateParams{ + Version: strings.TrimPrefix(version, "v"), + Repo: repo, + GroupLabels: groupLabels, + Groups: groupedIssues, + }, os.Stdout) + +} + +func usage(labels []string) { + println(fmt.Sprintf("USAGE: %s version > outfile", os.Args[0])) + println("Known versions:") + sort.Strings(labels) + for _, l := range labels { + println(l) + } + os.Exit(1) +} diff --git a/operators/hack/release_notes_test.go b/operators/hack/release_notes_test.go new file mode 100644 index 0000000000..e7bf002206 --- /dev/null +++ b/operators/hack/release_notes_test.go @@ -0,0 +1,263 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "bytes" + "net/http" + "reflect" + "testing" +) + +func Test_dumpIssues(t *testing.T) { + type args struct { + params TemplateParams + } + tests := []struct { + name string + args args + wantOut string + }{ + { + name: "two issues--no related", + args: args{ + params: TemplateParams{ + Version: "0.9.0", + Repo: "me/my-repo/", + GroupLabels: map[string]string{ + ">bugs": "Bug Fixes", + }, + Groups: GroupedIssues{ + ">bugs": []Issue{ + { + Labels: nil, + Body: "body", + Title: "title", + Number: 123, + PullRequest: nil, + RelatedIssues: nil, + }, + { + Labels: nil, + Body: "body2", + Title: "title2", + Number: 456, + PullRequest: nil, + RelatedIssues: nil, + }, + }, + }, + }, + }, + wantOut: `:issue: https://github.com/me/my-repo/issues/ +:pull: https://github.com/me/my-repo/pull/ + +[[release-notes-0.9.0]] +== {n} version 0.9.0 + +[[bugs-0.9.0]] +[float] +=== Bug Fixes + +* title {pull}123[#123] +* title2 {pull}456[#456] + +`, + }, + { + name: "single issue with related", + args: args{ + params: TemplateParams{ + Version: "0.9.0", + Repo: "me/my-repo/", + GroupLabels: map[string]string{ + ">bugs": "Bug Fixes", + }, + Groups: GroupedIssues{ + ">bugs": []Issue{ + { + Labels: nil, + Body: "body", + Title: "title", + Number: 123, + PullRequest: nil, + RelatedIssues: []int{456}, + }, + }, + }, + }, + }, + wantOut: `:issue: https://github.com/me/my-repo/issues/ +:pull: https://github.com/me/my-repo/pull/ + +[[release-notes-0.9.0]] +== {n} version 0.9.0 + +[[bugs-0.9.0]] +[float] +=== Bug Fixes + +* title {pull}123[#123] (issue: {issue}456[#456]) + +`, + }, + { + name: "single issue--two related", + args: args{ + params: TemplateParams{ + Version: "0.9.0", + Repo: "me/my-repo/", + GroupLabels: map[string]string{ + ">bugs": "Bug Fixes", + }, + Groups: GroupedIssues{ + ">bugs": []Issue{ + { + Labels: nil, + Body: "body", + Title: "title", + Number: 123, + PullRequest: nil, + RelatedIssues: []int{456, 789}, + }, + }, + }, + }, + }, + wantOut: `:issue: https://github.com/me/my-repo/issues/ +:pull: https://github.com/me/my-repo/pull/ + +[[release-notes-0.9.0]] +== {n} version 0.9.0 + +[[bugs-0.9.0]] +[float] +=== Bug Fixes + +* title {pull}123[#123] (issues: {issue}456[#456], {issue}789[#789]) + +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out := &bytes.Buffer{} + dumpIssues(tt.args.params, out) + if gotOut := out.String(); gotOut != tt.wantOut { + t.Errorf("dumpIssues() = %v, want %v", gotOut, tt.wantOut) + } + }) + } +} + +func Test_extractRelatedIssues(t *testing.T) { + type args struct { + issue *Issue + } + tests := []struct { + name string + args args + want []int + wantErr bool + }{ + { + name: "single issue", + args: args{ + issue: &Issue{ + Body: "Resolves https://github.com/elastic/cloud-on-k8s/issues/1241\r\n\r\n* If there is no existing annotation on a resource", + }, + }, + want: []int{1241}, + wantErr: false, + }, + { + name: "multi issue", + args: args{ + issue: &Issue{ + Body: "Resolves https://github.com/elastic/cloud-on-k8s/issues/1241\r\n\r\nRelated https://github.com/elastic/cloud-on-k8s/issues/1245\r\n\r\n", + }, + }, + want: []int{1241, 1245}, + wantErr: false, + }, + { + name: "non issue", + args: args{ + issue: &Issue{ + Body: "Resolves https://github.com/elastic/cloud-on-k8s/issues/1241\r\n\r\nSee all issues https://github.com/elastic/cloud-on-k8s/issues/\r\n\r\n", + }, + }, + want: []int{1241}, + wantErr: false, + }, + { + name: "duplicate issue", + args: args{ + issue: &Issue{ + Body: "Resolves https://github.com/elastic/cloud-on-k8s/issues/1241\r\n\r\nRelated https://github.com/elastic/cloud-on-k8s/issues/1241\r\n\r\n", + }, + }, + want: []int{1241}, + wantErr: false, + }, + { + name: "ordered", + args: args{ + issue: &Issue{ + Body: "Resolves https://github.com/elastic/cloud-on-k8s/issues/1245\r\n\r\nRelated https://github.com/elastic/cloud-on-k8s/issues/1241\r\n\r\n", + }, + }, + want: []int{1241, 1245}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := extractRelatedIssues(tt.args.issue); (err != nil) != tt.wantErr { + t.Errorf("extractRelatedIssues() error = %v, wantErr %v", err, tt.wantErr) + } + if !reflect.DeepEqual(tt.want, tt.args.issue.RelatedIssues) { + t.Errorf("extractRelatedIssues() got = %v, want %v", tt.args.issue.RelatedIssues, tt.want) + } + }) + } +} + +func Test_extractNextLink(t *testing.T) { + type args struct { + headers http.Header + } + tests := []struct { + name string + args args + want string + }{ + { + name: "no link", + args: args{ + headers: http.Header{}, + }, + want: "", + }, + { + name: "with next link", + args: args{ + headers: http.Header{ + "Link": []string{ + `; rel="next", ; rel="last"`, + }, + }, + }, + want: "https://api.github.com/repositories/155368246/issues?page=2", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := extractNextLink(tt.args.headers); got != tt.want { + t.Errorf("extractNextLink() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/operators/pkg/apis/apm/v1alpha1/apmserver_types.go b/operators/pkg/apis/apm/v1alpha1/apmserver_types.go index f6be41f9cd..dd202fce2b 100644 --- a/operators/pkg/apis/apm/v1alpha1/apmserver_types.go +++ b/operators/pkg/apis/apm/v1alpha1/apmserver_types.go @@ -10,7 +10,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const APMServerContainerName = "apm-server" +const ( + APMServerContainerName = "apm-server" + Kind = "ApmServer" +) // ApmServerSpec defines the desired state of ApmServer type ApmServerSpec struct { @@ -29,8 +32,13 @@ type ApmServerSpec struct { // HTTP contains settings for HTTP. HTTP commonv1alpha1.HTTPConfig `json:"http,omitempty"` + // ElasticsearchRef references an Elasticsearch resource in the Kubernetes cluster. + // If the namespace is not specified, the current resource namespace will be used. + ElasticsearchRef commonv1alpha1.ObjectSelector `json:"elasticsearchRef,omitempty"` + + // Elasticsearch configures how the APM server connects to Elasticsearch // +optional - Output Output `json:"output,omitempty"` + Elasticsearch ElasticsearchOutput `json:"elasticsearch,omitempty"` // PodTemplate can be used to propagate configuration to APM Server pods. // This allows specifying custom annotations, labels, environment variables, @@ -49,17 +57,8 @@ type ApmServerSpec struct { FeatureFlags commonv1alpha1.FeatureFlags `json:"featureFlags,omitempty"` } -// Output contains output configuration for supported outputs -type Output struct { - // Elasticsearch configures the Elasticsearch output - // +optional - Elasticsearch ElasticsearchOutput `json:"elasticsearch,omitempty"` -} - // Elasticsearch contains configuration for the Elasticsearch output type ElasticsearchOutput struct { - // ElasticsearchRef allows users to reference a Elasticsearch cluster inside k8s to automatically derive the other fields. - ElasticsearchRef *commonv1alpha1.ObjectSelector `json:"ref,omitempty"` // Hosts are the URLs of the output Elasticsearch nodes. Hosts []string `json:"hosts,omitempty"` @@ -148,9 +147,15 @@ func (as *ApmServer) IsMarkedForDeletion() bool { } func (as *ApmServer) ElasticsearchAuth() commonv1alpha1.ElasticsearchAuth { - return as.Spec.Output.Elasticsearch.Auth + return as.Spec.Elasticsearch.Auth } func (as *ApmServer) SecureSettings() *commonv1alpha1.SecretRef { return as.Spec.SecureSettings } + +// Kind can technically be retrieved from metav1.Object, but there is a bug preventing us to retrieve it +// see https://github.com/kubernetes-sigs/controller-runtime/issues/406 +func (as *ApmServer) Kind() string { + return Kind +} diff --git a/operators/pkg/apis/apm/v1alpha1/zz_generated.deepcopy.go b/operators/pkg/apis/apm/v1alpha1/zz_generated.deepcopy.go index d9a2ed652b..90db926ef0 100644 --- a/operators/pkg/apis/apm/v1alpha1/zz_generated.deepcopy.go +++ b/operators/pkg/apis/apm/v1alpha1/zz_generated.deepcopy.go @@ -82,7 +82,8 @@ func (in *ApmServerSpec) DeepCopyInto(out *ApmServerSpec) { *out = (*in).DeepCopy() } in.HTTP.DeepCopyInto(&out.HTTP) - in.Output.DeepCopyInto(&out.Output) + out.ElasticsearchRef = in.ElasticsearchRef + in.Elasticsearch.DeepCopyInto(&out.Elasticsearch) in.PodTemplate.DeepCopyInto(&out.PodTemplate) if in.SecureSettings != nil { in, out := &in.SecureSettings, &out.SecureSettings @@ -129,11 +130,6 @@ func (in *ApmServerStatus) DeepCopy() *ApmServerStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ElasticsearchOutput) DeepCopyInto(out *ElasticsearchOutput) { *out = *in - if in.ElasticsearchRef != nil { - in, out := &in.ElasticsearchRef, &out.ElasticsearchRef - *out = new(commonv1alpha1.ObjectSelector) - **out = **in - } if in.Hosts != nil { in, out := &in.Hosts, &out.Hosts *out = make([]string, len(*in)) @@ -170,20 +166,3 @@ func (in *ElasticsearchOutputSSL) DeepCopy() *ElasticsearchOutputSSL { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Output) DeepCopyInto(out *Output) { - *out = *in - in.Elasticsearch.DeepCopyInto(&out.Elasticsearch) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Output. -func (in *Output) DeepCopy() *Output { - if in == nil { - return nil - } - out := new(Output) - in.DeepCopyInto(out) - return out -} diff --git a/operators/pkg/apis/elasticsearch/v1alpha1/elasticsearch_types.go b/operators/pkg/apis/elasticsearch/v1alpha1/elasticsearch_types.go index 53e5f39a62..55877db6a5 100644 --- a/operators/pkg/apis/elasticsearch/v1alpha1/elasticsearch_types.go +++ b/operators/pkg/apis/elasticsearch/v1alpha1/elasticsearch_types.go @@ -10,7 +10,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const ElasticsearchContainerName = "elasticsearch" +const ( + ElasticsearchContainerName = "elasticsearch" + Kind = "Elasticsearch" +) // ElasticsearchSpec defines the desired state of Elasticsearch type ElasticsearchSpec struct { @@ -260,6 +263,12 @@ func (e Elasticsearch) SecureSettings() *commonv1alpha1.SecretRef { return e.Spec.SecureSettings } +// Kind can technically be retrieved from metav1.Object, but there is a bug preventing us to retrieve it +// see https://github.com/kubernetes-sigs/controller-runtime/issues/406 +func (e Elasticsearch) Kind() string { + return Kind +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ElasticsearchList contains a list of Elasticsearch clusters diff --git a/operators/pkg/apis/kibana/v1alpha1/kibana_types.go b/operators/pkg/apis/kibana/v1alpha1/kibana_types.go index 8acfe3e511..8b926cdfd9 100644 --- a/operators/pkg/apis/kibana/v1alpha1/kibana_types.go +++ b/operators/pkg/apis/kibana/v1alpha1/kibana_types.go @@ -11,7 +11,10 @@ import ( commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" ) -const KibanaContainerName = "kibana" +const ( + KibanaContainerName = "kibana" + Kind = "Kibana" +) // KibanaSpec defines the desired state of Kibana type KibanaSpec struct { @@ -107,6 +110,12 @@ func (k *Kibana) SecureSettings() *commonv1alpha1.SecretRef { return k.Spec.SecureSettings } +// Kind can technically be retrieved from metav1.Object, but there is a bug preventing us to retrieve it +// see https://github.com/kubernetes-sigs/controller-runtime/issues/406 +func (k *Kibana) Kind() string { + return Kind +} + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/operators/pkg/controller/apmserver/apmserver_controller.go b/operators/pkg/controller/apmserver/apmserver_controller.go index 5f3797a8fd..e970bccb3e 100644 --- a/operators/pkg/controller/apmserver/apmserver_controller.go +++ b/operators/pkg/controller/apmserver/apmserver_controller.go @@ -35,6 +35,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8slabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" @@ -196,6 +197,16 @@ func (r *ReconcileApmServer) Reconcile(request reconcile.Request) (reconcile.Res return reconcile.Result{}, nil } + selector := k8slabels.Set(map[string]string{labels.ApmServerNameLabelName: as.Name}).AsSelector() + compat, err := annotation.ReconcileCompatibility(r.Client, as, selector, r.OperatorInfo.BuildInfo.Version) + if err != nil { + return reconcile.Result{}, err + } + if !compat { + // this resource is not able to be reconciled by this version of the controller, so we will skip it and not requeue + return reconcile.Result{}, nil + } + err = annotation.UpdateControllerVersion(r.Client, as, r.OperatorInfo.BuildInfo.Version) if err != nil { return reconcile.Result{}, err @@ -296,7 +307,7 @@ func (r *ReconcileApmServer) deploymentParams( _, _ = configChecksum.Write([]byte(params.keystoreResources.Version)) } - esCASecretName := as.Spec.Output.Elasticsearch.SSL.CertificateAuthorities.SecretName + esCASecretName := as.Spec.Elasticsearch.SSL.CertificateAuthorities.SecretName if esCASecretName != "" { // TODO: use apmServerCa to generate cert for deployment @@ -439,6 +450,6 @@ func (r *ReconcileApmServer) updateStatus(state State) (reconcile.Result, error) // finalizersFor returns the list of finalizers applying to a given APM deployment func (r *ReconcileApmServer) finalizersFor(as apmv1alpha1.ApmServer) []finalizer.Finalizer { return []finalizer.Finalizer{ - keystore.Finalizer(k8s.ExtractNamespacedName(&as), r.dynamicWatches, "apmserver"), + keystore.Finalizer(k8s.ExtractNamespacedName(&as), r.dynamicWatches, as.Kind()), } } diff --git a/operators/pkg/controller/apmserver/config/config.go b/operators/pkg/controller/apmserver/config/config.go index b17f9c5096..8921aa9466 100644 --- a/operators/pkg/controller/apmserver/config/config.go +++ b/operators/pkg/controller/apmserver/config/config.go @@ -45,7 +45,7 @@ func NewConfigFromSpec(c k8s.Client, as v1alpha1.ApmServer) (*settings.Canonical } outputCfg := settings.NewCanonicalConfig() - if as.Spec.Output.Elasticsearch.IsConfigured() { + if as.Spec.Elasticsearch.IsConfigured() { // Get username and password username, password, err := association.ElasticsearchAuthSettings(c, &as) if err != nil { @@ -53,7 +53,7 @@ func NewConfigFromSpec(c k8s.Client, as v1alpha1.ApmServer) (*settings.Canonical } outputCfg = settings.MustCanonicalConfig( map[string]interface{}{ - "output.elasticsearch.hosts": as.Spec.Output.Elasticsearch.Hosts, + "output.elasticsearch.hosts": as.Spec.Elasticsearch.Hosts, "output.elasticsearch.username": username, "output.elasticsearch.password": password, "output.elasticsearch.ssl.certificate_authorities": []string{filepath.Join(CertificatesDir, certificates.CertFileName)}, diff --git a/operators/pkg/controller/apmserver/pod.go b/operators/pkg/controller/apmserver/pod.go index b40f94c0a4..7f483fb904 100644 --- a/operators/pkg/controller/apmserver/pod.go +++ b/operators/pkg/controller/apmserver/pod.go @@ -117,7 +117,7 @@ func newPodSpec(as *v1alpha1.ApmServer, p PodSpecParams) corev1.PodTemplateSpec if p.keystoreResources != nil { dataVolume := keystore.DataVolume( - strings.ToLower(as.Kind), + strings.ToLower(as.Kind()), DataVolumePath, ) builder.WithInitContainers(p.keystoreResources.InitContainer). diff --git a/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller.go b/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller.go index 9f6e0a3238..ed249dd1af 100644 --- a/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller.go +++ b/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller.go @@ -12,7 +12,9 @@ import ( apmtype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" estype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/elasticsearch/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/labels" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/annotation" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/certificates/http" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/finalizer" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/operator" @@ -24,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8slabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -45,8 +48,8 @@ var ( // Add creates a new ApmServerElasticsearchAssociation Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func Add(mgr manager.Manager, _ operator.Parameters) error { - r := newReconciler(mgr) +func Add(mgr manager.Manager, params operator.Parameters) error { + r := newReconciler(mgr, params) c, err := add(mgr, r) if err != nil { return err @@ -55,13 +58,14 @@ func Add(mgr manager.Manager, _ operator.Parameters) error { } // newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager) *ReconcileApmServerElasticsearchAssociation { +func newReconciler(mgr manager.Manager, params operator.Parameters) *ReconcileApmServerElasticsearchAssociation { client := k8s.WrapClient(mgr.GetClient()) return &ReconcileApmServerElasticsearchAssociation{ - Client: client, - scheme: mgr.GetScheme(), - watches: watches.NewDynamicWatches(), - recorder: mgr.GetRecorder(name), + Client: client, + scheme: mgr.GetScheme(), + watches: watches.NewDynamicWatches(), + recorder: mgr.GetRecorder(name), + Parameters: params, } } @@ -97,7 +101,7 @@ type ReconcileApmServerElasticsearchAssociation struct { scheme *runtime.Scheme recorder record.EventRecorder watches watches.DynamicWatches - + operator.Parameters // iteration is the number of times this controller has run its Reconcile method iteration int64 } @@ -144,6 +148,21 @@ func (r *ReconcileApmServerElasticsearchAssociation) Reconcile(request reconcile return reconcile.Result{}, nil } + selector := k8slabels.Set(map[string]string{labels.ApmServerNameLabelName: apmServer.Name}).AsSelector() + compat, err := annotation.ReconcileCompatibility(r.Client, &apmServer, selector, r.OperatorInfo.BuildInfo.Version) + if err != nil { + return reconcile.Result{}, err + } + if !compat { + // this resource is not able to be reconciled by this version of the controller, so we will skip it and not requeue + return reconcile.Result{}, nil + } + + err = annotation.UpdateControllerVersion(r.Client, &apmServer, r.OperatorInfo.BuildInfo.Version) + if err != nil { + return reconcile.Result{}, err + } + newStatus, err := r.reconcileInternal(apmServer) // maybe update status origStatus := apmServer.Status.DeepCopy() @@ -185,13 +204,16 @@ func resultFromStatus(status commonv1alpha1.AssociationStatus) reconcile.Result } func (r *ReconcileApmServerElasticsearchAssociation) reconcileInternal(apmServer apmtype.ApmServer) (commonv1alpha1.AssociationStatus, error) { - assocKey := k8s.ExtractNamespacedName(&apmServer) // no auto-association nothing to do - elasticsearchRef := apmServer.Spec.Output.Elasticsearch.ElasticsearchRef - if elasticsearchRef == nil { + elasticsearchRef := apmServer.Spec.ElasticsearchRef + if !elasticsearchRef.IsDefined() { return "", nil } - + if elasticsearchRef.Namespace == "" { + // no namespace provided: default to the APM server namespace + elasticsearchRef.Namespace = apmServer.Namespace + } + assocKey := k8s.ExtractNamespacedName(&apmServer) // Make sure we see events from Elasticsearch using a dynamic watch // will become more relevant once we refactor user handling to CRDs and implement // syncing of user credentials across namespaces @@ -221,8 +243,6 @@ func (r *ReconcileApmServerElasticsearchAssociation) reconcileInternal(apmServer } var expectedEsConfig apmtype.ElasticsearchOutput - expectedEsConfig.ElasticsearchRef = apmServer.Spec.Output.Elasticsearch.ElasticsearchRef - // TODO: look up public certs secret name from the ES cluster resource instead of relying on naming convention var publicCertsSecret corev1.Secret publicCertsSecretKey := http.PublicCertsSecretRef( @@ -238,8 +258,8 @@ func (r *ReconcileApmServerElasticsearchAssociation) reconcileInternal(apmServer expectedEsConfig.Auth.SecretKeyRef = clearTextSecretKeySelector(apmServer) // TODO: this is a bit rough - if !reflect.DeepEqual(apmServer.Spec.Output.Elasticsearch, expectedEsConfig) { - apmServer.Spec.Output.Elasticsearch = expectedEsConfig + if !reflect.DeepEqual(apmServer.Spec.Elasticsearch, expectedEsConfig) { + apmServer.Spec.Elasticsearch = expectedEsConfig log.Info("Updating Apm Server spec with Elasticsearch output configuration", "namespace", apmServer.Namespace, "as_name", apmServer.Name) if err := r.Update(&apmServer); err != nil { return commonv1alpha1.AssociationPending, err @@ -266,7 +286,7 @@ func deleteOrphanedResources(c k8s.Client, apm apmtype.ApmServer) error { for _, s := range secrets.Items { controlledBy := metav1.IsControlledBy(&s, &apm) - if controlledBy && !apm.Spec.Output.Elasticsearch.ElasticsearchRef.IsDefined() { + if controlledBy && !apm.Spec.ElasticsearchRef.IsDefined() { log.Info("Deleting secret", "namespace", s.Namespace, "secret_name", s.Name, "as_name", apm.Name) if err := c.Delete(&s); err != nil { return err diff --git a/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller_test.go b/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller_test.go index d54e11beef..b3d0a330d6 100644 --- a/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller_test.go +++ b/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller_test.go @@ -77,13 +77,7 @@ func Test_deleteOrphanedResources(t *testing.T) { Name: "as", Namespace: "default", }, - Spec: apmtype.ApmServerSpec{ - Output: apmtype.Output{ - Elasticsearch: apmtype.ElasticsearchOutput{ - ElasticsearchRef: nil, - }, - }, - }, + Spec: apmtype.ApmServerSpec{}, }, initialObjects: []runtime.Object{ &corev1.Secret{ diff --git a/operators/pkg/controller/apmserverelasticsearchassociation/user.go b/operators/pkg/controller/apmserverelasticsearchassociation/user.go index e349c2bbe6..fdcba2fe13 100644 --- a/operators/pkg/controller/apmserverelasticsearchassociation/user.go +++ b/operators/pkg/controller/apmserverelasticsearchassociation/user.go @@ -34,13 +34,18 @@ func apmUserObjectName(assocName string) string { // userKey is the namespaced name to identify the customer user resource created by the controller. func userKey(apm apmtype.ApmServer) *types.NamespacedName { - - ref := apm.Spec.Output.Elasticsearch.ElasticsearchRef - if ref == nil { + esRef := apm.Spec.ElasticsearchRef + if !esRef.IsDefined() { return nil } + + esNamespace := esRef.Namespace + if esNamespace == "" { + // no namespace given, default to APM's one + esNamespace = apm.Namespace + } return &types.NamespacedName{ - Namespace: ref.Namespace, + Namespace: esNamespace, Name: userName(apm), } } @@ -76,7 +81,7 @@ func reconcileEsUser(c k8s.Client, s *runtime.Scheme, apm apmtype.ApmServer, es secretLabels := labels.NewLabels(apm.Name) secretLabels[AssociationLabelName] = apm.Name // add ES labels - for k, v := range label.NewLabels(apm.Spec.Output.Elasticsearch.ElasticsearchRef.NamespacedName()) { + for k, v := range label.NewLabels(apm.Spec.ElasticsearchRef.NamespacedName()) { secretLabels[k] = v } secKey := secretKey(apm) @@ -120,7 +125,7 @@ func reconcileEsUser(c k8s.Client, s *runtime.Scheme, apm apmtype.ApmServer, es } // analogous to the secret: the user goes on the Elasticsearch side of the association, we apply the ES labels for visibility - userLabels := common.NewLabels(apm.Spec.Output.Elasticsearch.ElasticsearchRef.NamespacedName()) + userLabels := common.NewLabels(apm.Spec.ElasticsearchRef.NamespacedName()) userLabels[AssociationLabelName] = apm.Name userLabels[AssociationLabelNamespace] = apm.Namespace expectedEsUser := &corev1.Secret{ diff --git a/operators/pkg/controller/apmserverelasticsearchassociation/user_test.go b/operators/pkg/controller/apmserverelasticsearchassociation/user_test.go index 842c8870e7..1704ec408d 100644 --- a/operators/pkg/controller/apmserverelasticsearchassociation/user_test.go +++ b/operators/pkg/controller/apmserverelasticsearchassociation/user_test.go @@ -38,14 +38,11 @@ var apmFixture = apmtype.ApmServer{ Namespace: "default", }, Spec: apmtype.ApmServerSpec{ - Output: apmtype.Output{ - Elasticsearch: apmtype.ElasticsearchOutput{ - ElasticsearchRef: &commonv1alpha1.ObjectSelector{ - Name: "es", - Namespace: "default", - }, - }, + ElasticsearchRef: commonv1alpha1.ObjectSelector{ + Name: "es", + Namespace: "default", }, + Elasticsearch: apmtype.ElasticsearchOutput{}, }, } @@ -186,14 +183,11 @@ func Test_reconcileEsUser(t *testing.T) { Namespace: "ns-2", }, Spec: apmtype.ApmServerSpec{ - Output: apmtype.Output{ - Elasticsearch: apmtype.ElasticsearchOutput{ - ElasticsearchRef: &commonv1alpha1.ObjectSelector{ - Name: "es", - Namespace: "ns-1", - }, - }, + ElasticsearchRef: commonv1alpha1.ObjectSelector{ + Name: "es", + Namespace: "ns-1", }, + Elasticsearch: apmtype.ElasticsearchOutput{}, }, }, }, diff --git a/operators/pkg/controller/common/annotation/controller_version.go b/operators/pkg/controller/common/annotation/controller_version.go index 33f1c5dfb4..37e1677f51 100644 --- a/operators/pkg/controller/common/annotation/controller_version.go +++ b/operators/pkg/controller/common/annotation/controller_version.go @@ -5,9 +5,14 @@ package annotation import ( + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/version" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) // ControllerVersionAnnotation is the annotation name that indicates the last controller version to update a resource @@ -50,3 +55,93 @@ func UpdateControllerVersion(client k8s.Client, obj runtime.Object, version stri log.V(1).Info("updating controller version annotation", "namespace", namespace, "name", name, "kind", obj.GetObjectKind()) return client.Update(obj) } + +// ReconcileCompatibility determines if this controller is compatible with a given resource by examining the controller version annotation +// controller versions 0.9.0+ cannot reconcile resources created with earlier controllers, so this lets our controller skip those resources until they can be manually recreated +// if an object does not have an annotation, it will determine if it is a new object or if it has been previously reconciled by an older controller version, as this annotation +// was not applied by earlier controller versions. it will update the object's annotations indicating it is incompatible if so +func ReconcileCompatibility(client k8s.Client, obj runtime.Object, selector labels.Selector, controllerVersion string) (bool, error) { + accessor := meta.NewAccessor() + namespace, err := accessor.Namespace(obj) + if err != nil { + log.Error(err, "error getting namespace", "kind", obj.GetObjectKind().GroupVersionKind().Kind) + return false, err + } + name, err := accessor.Name(obj) + if err != nil { + log.Error(err, "error getting name", "namespace", namespace, "kind", obj.GetObjectKind().GroupVersionKind().Kind) + return false, err + } + annotations, err := accessor.Annotations(obj) + if err != nil { + log.Error(err, "error getting annotations", "namespace", namespace, "name", name, "kind", obj.GetObjectKind().GroupVersionKind().Kind) + return false, err + } + + annExists := annotations != nil && annotations[ControllerVersionAnnotation] != "" + + // if the annotation does not exist, it might indicate it was reconciled by an older controller version that did not add the version annotation, + // in which case it is incompatible with the current controller, or it is a brand new resource that has not been reconciled by any controller yet + if !annExists { + exist, err := checkExistingResources(client, obj, selector) + if err != nil { + return false, err + } + if exist { + log.Info("Resource was previously reconciled by incompatible controller version and missing annotation, adding annotation", "controller_version", controllerVersion, "namespace", namespace, "name", name, "kind", obj.GetObjectKind().GroupVersionKind().Kind) + err = UpdateControllerVersion(client, obj, "0.8.0-UNKNOWN") + return false, err + } + // no annotation exists and there are no existing resources, so this has not previously been reconciled + err = UpdateControllerVersion(client, obj, controllerVersion) + return true, err + } + + currentVersion, err := version.Parse(annotations[ControllerVersionAnnotation]) + if err != nil { + return false, errors.Wrap(err, "Error parsing current version on resource") + } + minVersion, err := version.Parse("0.9.0-ALPHA") + if err != nil { + return false, errors.Wrap(err, "Error parsing minimum compatible version") + } + ctrlVersion, err := version.Parse(controllerVersion) + if err != nil { + return false, errors.Wrap(err, "Error parsing controller version") + } + + // if the current version is gte the minimum version then they are compatible + if currentVersion.IsSameOrAfter(*minVersion) { + log.V(1).Info("Current controller version on resource is compatible with running controller version", "controller_version", ctrlVersion, + "resource_controller_version", currentVersion, "namespace", namespace, "name", name) + return true, nil + } + + log.Info("Resource was created with older version of operator, will not take action", "controller_version", ctrlVersion, + "resource_controller_version", currentVersion, "namespace", namespace, "name", name) + return false, nil +} + +// checkExistingResources returns a bool indicating if there are existing resources created for a given resource +func checkExistingResources(client k8s.Client, obj runtime.Object, selector labels.Selector) (bool, error) { + + accessor := meta.NewAccessor() + namespace, err := accessor.Namespace(obj) + if err != nil { + log.Error(err, "error getting namespace", "kind", obj.GetObjectKind().GroupVersionKind().Kind) + return false, err + } + // if there's no controller version annotation on the object, then we need to see maybe the object has been reconciled by an older, incompatible controller version + opts := ctrlclient.ListOptions{ + LabelSelector: selector, + Namespace: namespace, + } + var svcs corev1.ServiceList + err = client.List(&opts, &svcs) + if err != nil { + return false, err + } + // if we listed any services successfully, then we know this cluster was reconciled by an old version since any objects reconciled by a 0.9.0+ operator would have a label + return len(svcs.Items) != 0, nil + +} diff --git a/operators/pkg/controller/common/annotation/controller_version_test.go b/operators/pkg/controller/common/annotation/controller_version_test.go index d844e25a2a..23598a175a 100644 --- a/operators/pkg/controller/common/annotation/controller_version_test.go +++ b/operators/pkg/controller/common/annotation/controller_version_test.go @@ -8,17 +8,21 @@ import ( "testing" kibanav1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/elasticsearch/label" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client/fake" apmtype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" assoctype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/associations/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/apis/elasticsearch/v1alpha1" estype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/elasticsearch/v1alpha1" "k8s.io/client-go/kubernetes/scheme" ) @@ -66,21 +70,145 @@ func TestAnnotationCreated(t *testing.T) { assert.Equal(t, actualKibana.GetAnnotations()[ControllerVersionAnnotation], "newversion") } -// setupScheme creates a scheme to use for our fake clients so they know about our custom resources -// TODO move this into one of the upper level common packages and make public, refactor out this code that's in a lot of our tests -func setupScheme(t *testing.T) *runtime.Scheme { - sc := scheme.Scheme - if err := assoctype.SchemeBuilder.AddToScheme(sc); err != nil { - assert.Fail(t, "failed to add Association types") +// TestMissingAnnotationOldVersion tests that we skip reconciling an object missing annotations that has already been reconciled by +// a previous operator version, and add an annotation indicating an old controller version +func TestMissingAnnotationOldVersion(t *testing.T) { + + es := &v1alpha1.Elasticsearch{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "es", + }, } - if err := apmtype.SchemeBuilder.AddToScheme(sc); err != nil { - assert.Fail(t, "failed to add APM types") + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "svc", + Labels: map[string]string{ + label.ClusterNameLabelName: "es", + }, + }, } - if err := estype.SchemeBuilder.AddToScheme(sc); err != nil { - assert.Fail(t, "failed to add ES types") + sc := setupScheme(t) + client := k8s.WrapClient(fake.NewFakeClientWithScheme(sc, es, svc)) + selector := getElasticsearchSelector(es) + compat, err := ReconcileCompatibility(client, es, selector, "0.9.0-SNAPSHOT") + require.NoError(t, err) + assert.False(t, compat) + + // check old version annotation was added + require.NotNil(t, es.Annotations) + assert.Equal(t, "0.8.0-UNKNOWN", es.Annotations[ControllerVersionAnnotation]) +} + +// TestMissingAnnotationNewObject tests that we add an annotation for new objects +func TestMissingAnnotationNewObject(t *testing.T) { + es := &v1alpha1.Elasticsearch{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "es", + }, } - if err := kibanav1alpha1.SchemeBuilder.AddToScheme(sc); err != nil { - assert.Fail(t, "failed to add Kibana types") + // TODO this is currently broken due to an upstream bug in the fake client. when we upgrade controller runtime + // to a version that contains this PR we can uncomment this and add the service to the client + + // add existing svc that is not part of cluster to make sure we have label selectors correct + // https://github.com/kubernetes-sigs/controller-runtime/pull/311 + // svc := &corev1.Service{ + // ObjectMeta: metav1.ObjectMeta{ + // Namespace: "ns", + // Name: "svc", + // Labels: map[string]string{ + // label.ClusterNameLabelName: "literallyanything", + // }, + // }, + // } + + sc := setupScheme(t) + // client := k8s.WrapClient(fake.NewFakeClientWithScheme(sc, es, svc)) + client := k8s.WrapClient(fake.NewFakeClientWithScheme(sc, es)) + selector := getElasticsearchSelector(es) + compat, err := ReconcileCompatibility(client, es, selector, "0.9.0-SNAPSHOT") + require.NoError(t, err) + assert.True(t, compat) + + // check version annotation was added + require.NotNil(t, es.Annotations) + assert.Equal(t, "0.9.0-SNAPSHOT", es.Annotations[ControllerVersionAnnotation]) +} + +// +func TestSameAnnotation(t *testing.T) { + es := &v1alpha1.Elasticsearch{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "es", + Annotations: map[string]string{ + ControllerVersionAnnotation: "0.9.0-SNAPSHOT", + }, + }, } + sc := setupScheme(t) + client := k8s.WrapClient(fake.NewFakeClientWithScheme(sc, es)) + selector := getElasticsearchSelector(es) + compat, err := ReconcileCompatibility(client, es, selector, "0.9.0-SNAPSHOT") + require.NoError(t, err) + assert.True(t, compat) + assert.Equal(t, "0.9.0-SNAPSHOT", es.Annotations[ControllerVersionAnnotation]) +} + +func TestIncompatibleAnnotation(t *testing.T) { + es := &v1alpha1.Elasticsearch{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "es", + Annotations: map[string]string{ + ControllerVersionAnnotation: "0.8.0-FOOBAR", + }, + }, + } + sc := setupScheme(t) + client := k8s.WrapClient(fake.NewFakeClientWithScheme(sc, es)) + selector := getElasticsearchSelector(es) + compat, err := ReconcileCompatibility(client, es, selector, "0.9.0-SNAPSHOT") + require.NoError(t, err) + assert.False(t, compat) + // check we did not update the annotation + assert.Equal(t, "0.8.0-FOOBAR", es.Annotations[ControllerVersionAnnotation]) +} + +func TestNewerAnnotation(t *testing.T) { + es := &v1alpha1.Elasticsearch{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "es", + Annotations: map[string]string{ + ControllerVersionAnnotation: "2.0.0", + }, + }, + } + sc := setupScheme(t) + client := k8s.WrapClient(fake.NewFakeClientWithScheme(sc, es)) + selector := getElasticsearchSelector(es) + compat, err := ReconcileCompatibility(client, es, selector, "0.9.0-SNAPSHOT") + assert.NoError(t, err) + assert.True(t, compat) +} + +// setupScheme creates a scheme to use for our fake clients so they know about our custom resources +func setupScheme(t *testing.T) *runtime.Scheme { + sc := scheme.Scheme + err := assoctype.SchemeBuilder.AddToScheme(sc) + require.NoError(t, err) + err = apmtype.SchemeBuilder.AddToScheme(sc) + require.NoError(t, err) + err = estype.SchemeBuilder.AddToScheme(sc) + require.NoError(t, err) + err = kibanav1alpha1.SchemeBuilder.AddToScheme(sc) + require.NoError(t, err) return sc } + +func getElasticsearchSelector(es *v1alpha1.Elasticsearch) labels.Selector { + return labels.Set(map[string]string{label.ClusterNameLabelName: es.Name}).AsSelector() +} diff --git a/operators/pkg/controller/common/association/association_test.go b/operators/pkg/controller/common/association/association_test.go index 82d34803fb..98302c6b57 100644 --- a/operators/pkg/controller/common/association/association_test.go +++ b/operators/pkg/controller/common/association/association_test.go @@ -48,15 +48,13 @@ func Test_getCredentials(t *testing.T) { Namespace: "default", }, Spec: v1alpha1.ApmServerSpec{ - Output: v1alpha1.Output{ - Elasticsearch: v1alpha1.ElasticsearchOutput{ - Hosts: []string{"https://elasticsearch-sample-es-http.default.svc:9200"}, - Auth: commonv1alpha1.ElasticsearchAuth{ - SecretKeyRef: &corev1.SecretKeySelector{ - Key: "elastic-internal-apm", - LocalObjectReference: corev1.LocalObjectReference{ - Name: "apmelasticsearchassociation-sample-elastic-internal-apm", - }, + Elasticsearch: v1alpha1.ElasticsearchOutput{ + Hosts: []string{"https://elasticsearch-sample-es-http.default.svc:9200"}, + Auth: commonv1alpha1.ElasticsearchAuth{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: "elastic-internal-apm", + LocalObjectReference: corev1.LocalObjectReference{ + Name: "apmelasticsearchassociation-sample-elastic-internal-apm", }, }, }, @@ -83,12 +81,10 @@ func Test_getCredentials(t *testing.T) { Namespace: "default", }, Spec: v1alpha1.ApmServerSpec{ - Output: v1alpha1.Output{ - Elasticsearch: v1alpha1.ElasticsearchOutput{ - Hosts: []string{"https://elasticsearch-sample-es-http.default.svc:9200"}, - Auth: commonv1alpha1.ElasticsearchAuth{ - Inline: &elasticsearhInlineAuth, - }, + Elasticsearch: v1alpha1.ElasticsearchOutput{ + Hosts: []string{"https://elasticsearch-sample-es-http.default.svc:9200"}, + Auth: commonv1alpha1.ElasticsearchAuth{ + Inline: &elasticsearhInlineAuth, }, }, }, diff --git a/operators/pkg/controller/common/keystore/resources.go b/operators/pkg/controller/common/keystore/resources.go index 3f0b0c4a94..7c1fd81d5d 100644 --- a/operators/pkg/controller/common/keystore/resources.go +++ b/operators/pkg/controller/common/keystore/resources.go @@ -7,14 +7,15 @@ package keystore import ( "strings" - commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" - "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + + commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" + "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" ) var log = logf.Log.WithName("keystore") @@ -35,6 +36,9 @@ type HasKeystore interface { metav1.Object runtime.Object SecureSettings() *commonv1alpha1.SecretRef + // Kind can technically be retrieved from metav1.Object, but there is a bug preventing us to retrieve it + // see https://github.com/kubernetes-sigs/controller-runtime/issues/406 + Kind() string } // NewResources optionally returns a volume and init container to include in pods, @@ -60,7 +64,7 @@ func NewResources( // build an init container to create the keystore from the secure settings volume initContainer, err := initContainer( *secretVolume, - strings.ToLower(hasKeystore.GetObjectKind().GroupVersionKind().Kind), + strings.ToLower(hasKeystore.Kind()), initContainerParams, ) if err != nil { diff --git a/operators/pkg/controller/common/keystore/user_secret.go b/operators/pkg/controller/common/keystore/user_secret.go index 7e857b596a..ee44f4a434 100644 --- a/operators/pkg/controller/common/keystore/user_secret.go +++ b/operators/pkg/controller/common/keystore/user_secret.go @@ -6,6 +6,7 @@ package keystore import ( "fmt" + "strings" commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/events" @@ -116,7 +117,7 @@ func watchSecureSettings(watched watches.DynamicWatches, secureSettingsRef *comm // to be reliable with controller-runtime < v0.2.0-beta.4 func Finalizer(namespacedName types.NamespacedName, watched watches.DynamicWatches, kind string) finalizer.Finalizer { return finalizer.Finalizer{ - Name: "secure-settings.finalizers." + kind + ".k8s.elastic.co", + Name: "secure-settings.finalizers." + strings.ToLower(kind) + ".k8s.elastic.co", Execute: func() error { watched.Secrets.RemoveHandlerForKey(secureSettingsWatchName(namespacedName)) return nil diff --git a/operators/pkg/controller/elasticsearch/elasticsearch_controller.go b/operators/pkg/controller/elasticsearch/elasticsearch_controller.go index 9db9f6e235..6ccd561da2 100644 --- a/operators/pkg/controller/elasticsearch/elasticsearch_controller.go +++ b/operators/pkg/controller/elasticsearch/elasticsearch_controller.go @@ -11,6 +11,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -201,6 +202,16 @@ func (r *ReconcileElasticsearch) Reconcile(request reconcile.Request) (reconcile return common.PauseRequeue, nil } + selector := labels.Set(map[string]string{label.ClusterNameLabelName: es.Name}).AsSelector() + compat, err := annotation.ReconcileCompatibility(r.Client, &es, selector, r.OperatorInfo.BuildInfo.Version) + if err != nil { + return reconcile.Result{}, err + } + if !compat { + // this resource is not able to be reconciled by this version of the controller, so we will skip it and not requeue + return reconcile.Result{}, nil + } + err = annotation.UpdateControllerVersion(r.Client, &es, r.OperatorInfo.BuildInfo.Version) if err != nil { return reconcile.Result{}, err @@ -288,7 +299,7 @@ func (r *ReconcileElasticsearch) finalizersFor( clusterName := k8s.ExtractNamespacedName(&es) return []finalizer.Finalizer{ r.esObservers.Finalizer(clusterName), - keystore.Finalizer(k8s.ExtractNamespacedName(&es), r.dynamicWatches, "elasticsearch"), + keystore.Finalizer(k8s.ExtractNamespacedName(&es), r.dynamicWatches, es.Kind()), http.DynamicWatchesFinalizer(r.dynamicWatches, es.Name, esname.ESNamer), } } diff --git a/operators/pkg/controller/kibana/driver_test.go b/operators/pkg/controller/kibana/driver_test.go index 2244030331..021b055a0c 100644 --- a/operators/pkg/controller/kibana/driver_test.go +++ b/operators/pkg/controller/kibana/driver_test.go @@ -116,7 +116,7 @@ func expectedDeploymentParams() *DeploymentParams { Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Port: intstr.FromInt(5601), - Path: "/", + Path: "/login", Scheme: corev1.URISchemeHTTPS, }, }, diff --git a/operators/pkg/controller/kibana/kibana_controller.go b/operators/pkg/controller/kibana/kibana_controller.go index 0e7f841a17..fecf36cec0 100644 --- a/operators/pkg/controller/kibana/kibana_controller.go +++ b/operators/pkg/controller/kibana/kibana_controller.go @@ -18,10 +18,12 @@ import ( "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/operator" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/version" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/label" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -153,6 +155,16 @@ func (r *ReconcileKibana) Reconcile(request reconcile.Request) (reconcile.Result return common.PauseRequeue, nil } + selector := labels.Set(map[string]string{label.KibanaNameLabelName: kb.Name}).AsSelector() + compat, err := annotation.ReconcileCompatibility(r.Client, kb, selector, r.params.OperatorInfo.BuildInfo.Version) + if err != nil { + return reconcile.Result{}, err + } + if !compat { + // this resource is not able to be reconciled by this version of the controller, so we will skip it and not requeue + return reconcile.Result{}, nil + } + if err := r.finalizers.Handle(kb, r.finalizersFor(*kb)...); err != nil { if errors.IsConflict(err) { // Conflicts are expected and should be resolved on next loop @@ -210,6 +222,6 @@ func (r *ReconcileKibana) updateStatus(state State) error { func (r *ReconcileKibana) finalizersFor(kb kibanav1alpha1.Kibana) []finalizer.Finalizer { return []finalizer.Finalizer{ secretWatchFinalizer(kb, r.dynamicWatches), - keystore.Finalizer(k8s.ExtractNamespacedName(&kb), r.dynamicWatches, "kibana"), + keystore.Finalizer(k8s.ExtractNamespacedName(&kb), r.dynamicWatches, kb.Kind()), } } diff --git a/operators/pkg/controller/kibana/pod/pod.go b/operators/pkg/controller/kibana/pod/pod.go index 98b1d3eb4c..f397b15504 100644 --- a/operators/pkg/controller/kibana/pod/pod.go +++ b/operators/pkg/controller/kibana/pod/pod.go @@ -43,7 +43,7 @@ func readinessProbe(useTLS bool) corev1.Probe { Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Port: intstr.FromInt(HTTPPort), - Path: "/", + Path: "/login", Scheme: scheme, }, }, diff --git a/operators/pkg/controller/kibanaassociation/association_controller.go b/operators/pkg/controller/kibanaassociation/association_controller.go index 5d954af2ff..083d685803 100644 --- a/operators/pkg/controller/kibanaassociation/association_controller.go +++ b/operators/pkg/controller/kibanaassociation/association_controller.go @@ -13,15 +13,18 @@ import ( estype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/elasticsearch/v1alpha1" kbtype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/annotation" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/finalizer" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/operator" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/user" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/elasticsearch/services" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/label" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -57,8 +60,8 @@ var ( // Add creates a new Association Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func Add(mgr manager.Manager, _ operator.Parameters) error { - r := newReconciler(mgr) +func Add(mgr manager.Manager, params operator.Parameters) error { + r := newReconciler(mgr, params) c, err := add(mgr, r) if err != nil { return err @@ -67,13 +70,14 @@ func Add(mgr manager.Manager, _ operator.Parameters) error { } // newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager) *ReconcileAssociation { +func newReconciler(mgr manager.Manager, params operator.Parameters) *ReconcileAssociation { client := k8s.WrapClient(mgr.GetClient()) return &ReconcileAssociation{ - Client: client, - scheme: mgr.GetScheme(), - watches: watches.NewDynamicWatches(), - recorder: mgr.GetRecorder(name), + Client: client, + scheme: mgr.GetScheme(), + watches: watches.NewDynamicWatches(), + recorder: mgr.GetRecorder(name), + Parameters: params, } } @@ -95,7 +99,7 @@ type ReconcileAssociation struct { scheme *runtime.Scheme recorder record.EventRecorder watches watches.DynamicWatches - + operator.Parameters // iteration is the number of times this controller has run its Reconcile method iteration int64 } @@ -149,6 +153,16 @@ func (r *ReconcileAssociation) Reconcile(request reconcile.Request) (reconcile.R return common.PauseRequeue, nil } + selector := labels.Set(map[string]string{label.KibanaNameLabelName: kibana.Name}).AsSelector() + compat, err := annotation.ReconcileCompatibility(r.Client, &kibana, selector, r.OperatorInfo.BuildInfo.Version) + if err != nil { + return reconcile.Result{}, err + } + if !compat { + // this resource is not able to be reconciled by this version of the controller, so we will skip it and not requeue + return reconcile.Result{}, nil + } + newStatus, err := r.reconcileInternal(kibana) // maybe update status if !reflect.DeepEqual(kibana.Status.AssociationStatus, newStatus) { diff --git a/operators/pkg/dev/portforward/pod_forwarder.go b/operators/pkg/dev/portforward/pod_forwarder.go index c733bd87a7..2856b36bec 100644 --- a/operators/pkg/dev/portforward/pod_forwarder.go +++ b/operators/pkg/dev/portforward/pod_forwarder.go @@ -98,7 +98,7 @@ func newDefaultKubernetesClientset() (*kubernetes.Clientset, error) { } // podDNSRegex matches pods FQDN such as {name}.{namespace}.pod -var podDNSRegex = regexp.MustCompile(`^.+\..+\..*$`) +var podDNSRegex = regexp.MustCompile(`^.+\..+$`) // podIPRegex matches any ipv4 address. var podIPv4Regex = regexp.MustCompile(`^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$`) @@ -118,7 +118,7 @@ func parsePodAddr(addr string, clientSet *kubernetes.Clientset) (*types.Namespac // retrieve pod name and namespace from addr // TODO: subdomains in pod names would change this. parts := strings.SplitN(host, ".", 3) - if len(parts) <= 2 { + if len(parts) <= 1 { return nil, fmt.Errorf("unsupported pod address format: %s", host) } return &types.NamespacedName{Namespace: parts[1], Name: parts[0]}, nil diff --git a/operators/pkg/dev/portforward/pod_forwarder_test.go b/operators/pkg/dev/portforward/pod_forwarder_test.go index 555aea5230..866322fc11 100644 --- a/operators/pkg/dev/portforward/pod_forwarder_test.go +++ b/operators/pkg/dev/portforward/pod_forwarder_test.go @@ -143,10 +143,15 @@ func Test_parsePodAddr(t *testing.T) { args: args{addr: "foo.bar.pod:1234"}, want: types.NamespacedName{Namespace: "bar", Name: "foo"}, }, + { + name: "pod DNS with pod and namespace only", + args: args{addr: "foopod.barnamespace:1234"}, + want: types.NamespacedName{Namespace: "barnamespace", Name: "foopod"}, + }, { name: "invalid", - args: args{addr: "example.com:1234"}, - wantErr: errors.New("unsupported pod address format: example.com"), + args: args{addr: "foobar:1234"}, + wantErr: errors.New("unsupported pod address format: foobar"), }, } for _, tt := range tests { diff --git a/operators/pkg/webhook/server.go b/operators/pkg/webhook/server.go index 0493a75317..6032aeafcb 100644 --- a/operators/pkg/webhook/server.go +++ b/operators/pkg/webhook/server.go @@ -5,11 +5,14 @@ package webhook import ( + "context" + "github.com/elastic/cloud-on-k8s/operators/pkg/apis/elasticsearch/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/webhook/elasticsearch" "github.com/elastic/cloud-on-k8s/operators/pkg/webhook/license" admission "k8s.io/api/admissionregistration/v1beta1" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -41,7 +44,7 @@ func RegisterValidations(mgr manager.Manager, params Parameters) error { licWh, err := builder.NewWebhookBuilder(). Name("validation.license.elastic.co"). Validating(). - FailurePolicy(admission.Fail). + FailurePolicy(admission.Ignore). ForType(&corev1.Secret{}). Handlers(&license.ValidationHandler{}). WithManager(mgr). @@ -51,6 +54,17 @@ func RegisterValidations(mgr manager.Manager, params Parameters) error { } disabled := !params.AutoInstall + if params.AutoInstall { + // nasty side effect in register function + webhookSvc := corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Name: params.Bootstrap.Service.Name, + Namespace: params.Bootstrap.Service.Namespace, + }, + } + // best effort deletion attempt to handle incompatible services from previous versions + _ = mgr.GetClient().Delete(context.Background(), &webhookSvc) + } svr, err := webhook.NewServer(admissionServerName, mgr, webhook.ServerOptions{ Port: serverPort, CertDir: "/tmp/cert", diff --git a/operators/test/e2e/apm/standalone_test.go b/operators/test/e2e/apm/standalone_test.go index 2e66d6539a..7ce2175595 100644 --- a/operators/test/e2e/apm/standalone_test.go +++ b/operators/test/e2e/apm/standalone_test.go @@ -7,7 +7,6 @@ package apm import ( "testing" - apmtype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/test/e2e/test" "github.com/elastic/cloud-on-k8s/operators/test/e2e/test/apmserver" @@ -16,7 +15,7 @@ import ( // TestApmStandalone runs a test suite on an APM server that is not outputting to Elasticsearch func TestApmStandalone(t *testing.T) { apmBuilder := apmserver.NewBuilder("standalone"). - WithOutput(apmtype.Output{}). + WithElasticsearchRef(v1alpha1.ObjectSelector{}). WithConfig(map[string]interface{}{ "output.console": map[string]interface{}{ "pretty": true, @@ -29,7 +28,7 @@ func TestApmStandalone(t *testing.T) { func TestApmStandaloneNoTLS(t *testing.T) { apmBuilder := apmserver.NewBuilder("standalone-no-tls"). - WithOutput(apmtype.Output{}). + WithElasticsearchRef(v1alpha1.ObjectSelector{}). WithConfig(map[string]interface{}{ "output.console": map[string]interface{}{ "pretty": true, diff --git a/operators/test/e2e/test/apmserver/builder.go b/operators/test/e2e/test/apmserver/builder.go index 370f2001fa..9173bf7005 100644 --- a/operators/test/e2e/test/apmserver/builder.go +++ b/operators/test/e2e/test/apmserver/builder.go @@ -6,7 +6,6 @@ package apmserver import ( apmtype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" - common "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/test/e2e/test" corev1 "k8s.io/api/core/v1" @@ -30,13 +29,9 @@ func NewBuilder(name string) Builder { Spec: apmtype.ApmServerSpec{ NodeCount: 1, Version: test.ElasticStackVersion, - Output: apmtype.Output{ - Elasticsearch: apmtype.ElasticsearchOutput{ - ElasticsearchRef: &commonv1alpha1.ObjectSelector{ - Name: name, - Namespace: test.Namespace, - }, - }, + ElasticsearchRef: commonv1alpha1.ObjectSelector{ + Name: name, + Namespace: test.Namespace, }, PodTemplate: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ @@ -55,12 +50,7 @@ func (b Builder) WithRestrictedSecurityContext() Builder { func (b Builder) WithNamespace(namespace string) Builder { b.ApmServer.ObjectMeta.Namespace = namespace - ref := b.ApmServer.Spec.Output.Elasticsearch.ElasticsearchRef - if ref == nil { - ref = &common.ObjectSelector{} - } - ref.Namespace = namespace - b.ApmServer.Spec.Output.Elasticsearch.ElasticsearchRef = ref + b.ApmServer.Spec.ElasticsearchRef.Namespace = namespace return b } @@ -74,8 +64,8 @@ func (b Builder) WithNodeCount(count int) Builder { return b } -func (b Builder) WithOutput(out apmtype.Output) Builder { - b.ApmServer.Spec.Output = out +func (b Builder) WithElasticsearchRef(ref commonv1alpha1.ObjectSelector) Builder { + b.ApmServer.Spec.ElasticsearchRef = ref return b } diff --git a/operators/test/e2e/test/elasticsearch/http_client.go b/operators/test/e2e/test/elasticsearch/http_client.go index 154dac2409..dae9d32b37 100644 --- a/operators/test/e2e/test/elasticsearch/http_client.go +++ b/operators/test/e2e/test/elasticsearch/http_client.go @@ -28,7 +28,7 @@ func NewElasticsearchClient(es v1alpha1.Elasticsearch, k *test.K8sClient) (clien if err != nil { return nil, err } - inClusterURL := fmt.Sprintf("https://%s:9200", name.HTTPService(es.Name)) + inClusterURL := fmt.Sprintf("https://%s.%s.svc:9200", name.HTTPService(es.Name), es.Namespace) var dialer net.Dialer if test.AutoPortForward { dialer = portforward.NewForwardingDialer()