diff --git a/charts/elastic/elasticsearch/.helmignore b/charts/elastic/elasticsearch/.helmignore new file mode 100644 index 0000000..e12c0b4 --- /dev/null +++ b/charts/elastic/elasticsearch/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/charts/elastic/elasticsearch/Chart.yaml b/charts/elastic/elasticsearch/Chart.yaml new file mode 100644 index 0000000..15cbabc --- /dev/null +++ b/charts/elastic/elasticsearch/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: 7.4.1 +description: Official Elastic helm chart for Elasticsearch +home: https://github.com/elastic/helm-charts +icon: https://helm.elastic.co/icons/elasticsearch.png +maintainers: +- email: helm-charts@elastic.co + name: Elastic +name: elasticsearch +sources: +- https://github.com/elastic/elasticsearch +version: 7.4.1 diff --git a/charts/elastic/elasticsearch/Makefile b/charts/elastic/elasticsearch/Makefile new file mode 100644 index 0000000..22218a1 --- /dev/null +++ b/charts/elastic/elasticsearch/Makefile @@ -0,0 +1 @@ +include ../helpers/common.mk diff --git a/charts/elastic/elasticsearch/README.md b/charts/elastic/elasticsearch/README.md new file mode 100644 index 0000000..cfc2a01 --- /dev/null +++ b/charts/elastic/elasticsearch/README.md @@ -0,0 +1,315 @@ +# Elasticsearch Helm Chart + +This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features. + +This helm chart is a lightweight way to configure and run our official [Elasticsearch docker image](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html) + +## Requirements + +* [Helm](https://helm.sh/) >= 2.8.0 (see parent [README](../README.md) for more details) +* Kubernetes >= 1.8 +* Minimum cluster requirements include the following to run this chart with default settings. All of these settings are configurable. + * Three Kubernetes nodes to respect the default "hard" affinity settings + * 1GB of RAM for the JVM heap + +## Usage notes and getting started + +* This repo includes a number of [example](./examples) configurations which can be used as a reference. They are also used in the automated testing of this chart +* Automated testing of this chart is currently only run against GKE (Google Kubernetes Engine). +* The chart deploys a statefulset and by default will do an automated rolling update of your cluster. It does this by waiting for the cluster health to become green after each instance is updated. If you prefer to update manually you can set [`updateStrategy: OnDelete`](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#on-delete) +* It is important to verify that the JVM heap size in `esJavaOpts` and to set the CPU/Memory `resources` to something suitable for your cluster +* To simplify chart and maintenance each set of node groups is deployed as a separate helm release. Take a look at the [multi](./examples/multi) example to get an idea for how this works. Without doing this it isn't possible to resize persistent volumes in a statefulset. By setting it up this way it makes it possible to add more nodes with a new storage size then drain the old ones. It also solves the problem of allowing the user to determine which node groups to update first when doing upgrades or changes. +* We have designed this chart to be very un-opinionated about how to configure Elasticsearch. It exposes ways to set environment variables and mount secrets inside of the container. Doing this makes it much easier for this chart to support multiple versions with minimal changes. + +## Migration from helm/charts stable + +If you currently have a cluster deployed with the [helm/charts stable](https://github.com/helm/charts/tree/master/stable/elasticsearch) chart you can follow the [migration guide](/elasticsearch/examples/migration/README.md) + +## Installing + +* Add the elastic helm charts repo + ``` + helm repo add elastic https://helm.elastic.co + ``` +* Install it + ``` + helm install --name elasticsearch elastic/elasticsearch + ``` + +## Compatibility + +This chart is tested with the latest supported versions. The currently tested versions are: + +| 6.x | 7.x | +| ----- | ----- | +| 6.8.4 | 7.4.1 | + +Examples of installing older major versions can be found in the [examples](./examples) directory. + +While only the latest releases are tested, it is possible to easily install old or new releases by overriding the `imageTag`. To install version `7.4.1` of Elasticsearch it would look like this: + +``` +helm install --name elasticsearch elastic/elasticsearch --set imageTag=7.4.1 +``` + +## Configuration + +| Parameter | Description | Default | +| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| `clusterName` | This will be used as the Elasticsearch [cluster.name](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.name.html) and should be unique per cluster in the namespace | `elasticsearch` | +| `nodeGroup` | This is the name that will be used for each group of nodes in the cluster. The name will be `clusterName-nodeGroup-X` | `master` | +| `masterService` | Optional. The service name used to connect to the masters. You only need to set this if your master `nodeGroup` is set to something other than `master`. See [Clustering and Node Discovery](#clustering-and-node-discovery) for more information. | `` | +| `roles` | A hash map with the [specific roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) for the node group | `master: true`
`data: true`
`ingest: true` | +| `replicas` | Kubernetes replica count for the statefulset (i.e. how many pods) | `3` | +| `minimumMasterNodes` | The value for [discovery.zen.minimum_master_nodes](https://www.elastic.co/guide/en/elasticsearch/reference/6.7/discovery-settings.html#minimum_master_nodes). Should be set to `(master_eligible_nodes / 2) + 1`. Ignored in Elasticsearch versions >= 7. | `2` | +| `esMajorVersion` | Used to set major version specific configuration. If you are using a custom image and not running the default Elasticsearch version you will need to set this to the version you are running (e.g. `esMajorVersion: 6`) | `""` | +| `esConfig` | Allows you to add any config files in `/usr/share/elasticsearch/config/` such as `elasticsearch.yml` and `log4j2.properties`. See [values.yaml](./values.yaml) for an example of the formatting. | `{}` | +| `extraEnvs` | Extra [environment variables](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config) which will be appended to the `env:` definition for the container | `[]` | +| `extraVolumes` | Templatable string of additional volumes to be passed to the `tpl` function | `""` | +| `extraVolumeMounts` | Templatable string of additional volumeMounts to be passed to the `tpl` function | `""` | +| `extraInitContainers` | Templatable string of additional init containers to be passed to the `tpl` function | `""` | +| `secretMounts` | Allows you easily mount a secret as a file inside the statefulset. Useful for mounting certificates and other secrets. See [values.yaml](./values.yaml) for an example | `[]` | +| `image` | The Elasticsearch docker image | `docker.elastic.co/elasticsearch/elasticsearch` | +| `imageTag` | The Elasticsearch docker image tag | `7.4.1` | +| `imagePullPolicy` | The Kubernetes [imagePullPolicy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) value | `IfNotPresent` | +| `podAnnotations` | Configurable [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) applied to all Elasticsearch pods | `{}` | +| `labels` | Configurable [label](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) applied to all Elasticsearch pods | `{}` | +| `esJavaOpts` | [Java options](https://www.elastic.co/guide/en/elasticsearch/reference/current/jvm-options.html) for Elasticsearch. This is where you should configure the [jvm heap size](https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html) | `-Xmx1g -Xms1g` | +| `resources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the statefulset | `requests.cpu: 100m`
`requests.memory: 2Gi`
`limits.cpu: 1000m`
`limits.memory: 2Gi` | +| `initResources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the initContainer in the statefulset | {} | +| `sidecarResources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the sidecar containers in the statefulset | {} | +| `networkHost` | Value for the [network.host Elasticsearch setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/network.host.html) | `0.0.0.0` | +| `volumeClaimTemplate` | Configuration for the [volumeClaimTemplate for statefulsets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-storage). You will want to adjust the storage (default `30Gi`) and the `storageClassName` if you are using a different storage class | `accessModes: [ "ReadWriteOnce" ]`
`resources.requests.storage: 30Gi` | +| `persistence.annotations` | Additional persistence annotations for the `volumeClaimTemplate` | `{}` | +| `persistence.enabled` | Enables a persistent volume for Elasticsearch data. Can be disabled for nodes that only have [roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) which don't require persistent data. | `true` | +| `priorityClassName` | The [name of the PriorityClass](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass). No default is supplied as the PriorityClass must be created first. | `""` | +| `antiAffinityTopologyKey` | The [anti-affinity topology key](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). By default this will prevent multiple Elasticsearch nodes from running on the same Kubernetes node | `kubernetes.io/hostname` | +| `antiAffinity` | Setting this to hard enforces the [anti-affinity rules](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). If it is set to soft it will be done "best effort". Other values will be ignored. | `hard` | +| `nodeAffinity` | Value for the [node affinity settings](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature) | `{}` | +| `podManagementPolicy` | By default Kubernetes [deploys statefulsets serially](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies). This deploys them in parallel so that they can discover eachother | `Parallel` | +| `protocol` | The protocol that will be used for the readinessProbe. Change this to `https` if you have `xpack.security.http.ssl.enabled` set | `http` | +| `httpPort` | The http port that Kubernetes will use for the healthchecks and the service. If you change this you will also need to set [http.port](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#_settings) in `extraEnvs` | `9200` | +| `transportPort` | The transport port that Kubernetes will use for the service. If you change this you will also need to set [transport port configuration](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-transport.html#_transport_settings) in `extraEnvs` | `9300` | +| `service.type` | Type of elasticsearch service. [Service Types](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) | `ClusterIP` | +| `service.nodePort` | Custom [nodePort](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport) port that can be set if you are using `service.type: nodePort`. | `` | +| `service.annotations` | Annotations that Kubernetes will use for the service. This will configure load balancer if `service.type` is `LoadBalancer` [Annotations](https://kubernetes.io/docs/concepts/services-networking/service/#ssl-support-on-aws) | `{}` | +| `service.httpPortName` | The name of the http port within the service | `http` | +| `service.transportPortName` | The name of the transport port within the service | `transport` | +| `updateStrategy` | The [updateStrategy](https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets) for the statefulset. By default Kubernetes will wait for the cluster to be green after upgrading each pod. Setting this to `OnDelete` will allow you to manually delete each pod during upgrades | `RollingUpdate` | +| `maxUnavailable` | The [maxUnavailable](https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget) value for the pod disruption budget. By default this will prevent Kubernetes from having more than 1 unhealthy pod in the node group | `1` | +| `fsGroup (DEPRECATED)` | The Group ID (GID) for [securityContext.fsGroup](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) so that the Elasticsearch user can read from the persistent volume | `` | +| `podSecurityContext` | Allows you to set the [securityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) for the pod | `fsGroup: 1000`
`runAsUser: 1000` | +| `securityContext` | Allows you to set the [securityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container) for the container | `capabilities.drop:[ALL]`
`runAsNonRoot: true`
`runAsUser: 1000` | +| `terminationGracePeriod` | The [terminationGracePeriod](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods) in seconds used when trying to stop the pod | `120` | +| `sysctlInitContainer.enabled` | Allows you to disable the sysctlInitContainer if you are setting vm.max_map_count with another method | `true` | +| `sysctlVmMaxMapCount` | Sets the [sysctl vm.max_map_count](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html#vm-max-map-count) needed for Elasticsearch | `262144` | +| `readinessProbe` | Configuration fields for the [readinessProbe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) | `failureThreshold: 3`
`initialDelaySeconds: 10`
`periodSeconds: 10`
`successThreshold: 3`
`timeoutSeconds: 5` | +| `clusterHealthCheckParams` | The [Elasticsearch cluster health status params](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params) that will be used by readinessProbe command | `wait_for_status=green&timeout=1s` | +| `imagePullSecrets` | Configuration for [imagePullSecrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) so that you can use a private registry for your image | `[]` | +| `nodeSelector` | Configurable [nodeSelector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) so that you can target specific nodes for your Elasticsearch cluster | `{}` | +| `tolerations` | Configurable [tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) | `[]` | +| `ingress` | Configurable [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) to expose the Elasticsearch service. See [`values.yaml`](./values.yaml) for an example | `enabled: false` | +| `schedulerName` | Name of the [alternate scheduler](https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/#specify-schedulers-for-pods) | `nil` | +| `masterTerminationFix` | A workaround needed for Elasticsearch < 7.2 to prevent master status being lost during restarts [#63](https://github.com/elastic/helm-charts/issues/63) | `false` | +| `lifecycle` | Allows you to add lifecycle configuration. See [values.yaml](./values.yaml) for an example of the formatting. | `{}` | +| `keystore` | Allows you map Kubernetes secrets into the keystore. See the [config example](/elasticsearch/examples/config/values.yaml) and [how to use the keystore](#how-to-use-the-keystore) | `[]` | +| `rbac` | Configuration for creating a role, role binding and service account as part of this helm chart with `create: true`. Also can be used to reference an external service account with `serviceAccountName: "externalServiceAccountName"`. | `create: false`
`serviceAccountName: ""` | +| `podSecurityPolicy` | Configuration for create a pod security policy with minimal permissions to run this Helm chart with `create: true`. Also can be used to reference an external pod security policy with `name: "externalPodSecurityPolicy"` | `create: false`
`name: ""` | + +## Try it out + +In [examples/](./examples) you will find some example configurations. These examples are used for the automated testing of this helm chart + +### Default + +To deploy a cluster with all default values and run the integration tests + +``` +cd examples/default +make +``` + +### Multi + +A cluster with dedicated node types + +``` +cd examples/multi +make +``` + +### Security + +A cluster with node to node security and https enabled. This example uses autogenerated certificates and password, for a production deployment you want to generate SSL certificates following the [official docs](https://www.elastic.co/guide/en/elasticsearch/reference/current/configuring-tls.html#node-certificates). + +* Generate the certificates and install Elasticsearch + ``` + cd examples/security + make + + # Run a curl command to interact with the cluster + kubectl exec -ti security-master-0 -- sh -c 'curl -u $ELASTIC_USERNAME:$ELASTIC_PASSWORD -k https://localhost:9200/_cluster/health?pretty' + ``` + +### FAQ + +#### How to install plugins? + +The [recommended](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#_c_customized_image) way to install plugins into our docker images is to create a custom docker image. + +The Dockerfile would look something like: + +``` +ARG elasticsearch_version +FROM docker.elastic.co/elasticsearch/elasticsearch:${elasticsearch_version} + +RUN bin/elasticsearch-plugin install --batch repository-gcs +``` + +And then updating the `image` in values to point to your custom image. + +There are a couple reasons we recommend this. + +1. Tying the availability of Elasticsearch to the download service to install plugins is not a great idea or something that we recommend. Especially in Kubernetes where it is normal and expected for a container to be moved to another host at random times. +2. Mutating the state of a running docker image (by installing plugins) goes against best practices of containers and immutable infrastructure. + +#### How to use the keystore? + + +##### Basic example + +Create the secret, the key name needs to be the keystore key path. In this example we will create a secret from a file and from a literal string. + +``` +kubectl create secret generic encryption_key --from-file=xpack.watcher.encryption_key=./watcher_encryption_key +kubectl create secret generic slack_hook --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +To add these secrets to the keystore: +``` +keystore: + - secretName: encryption_key + - secretName: slack_hook +``` + +##### Multiple keys + +All keys in the secret will be added to the keystore. To create the previous example in one secret you could also do: + +``` +kubectl create secret generic keystore_secrets --from-file=xpack.watcher.encryption_key=./watcher_encryption_key --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +``` +keystore: + - secretName: keystore_secrets +``` + +##### Custom paths and keys + +If you are using these secrets for other applications (besides the Elasticsearch keystore) then it is also possible to specify the keystore path and which keys you want to add. Everything specified under each `keystore` item will be passed through to the `volumeMounts` section for [mounting the secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets). In this example we will only add the `slack_hook` key from a secret that also has other keys. Our secret looks like this: + +``` +kubectl create secret generic slack_secrets --from-literal=slack_channel='#general' --from-literal=slack_hook='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +We only want to add the `slack_hook` key to the keystore at path `xpack.notification.slack.account.monitoring.secure_url`. + +``` +keystore: + - secretName: slack_secrets + items: + - key: slack_hook + path: xpack.notification.slack.account.monitoring.secure_url +``` + +You can also take a look at the [config example](/elasticsearch/examples/config/) which is used as part of the automated testing pipeline. + +#### How to enable snapshotting? + +1. Install your [snapshot plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository.html) into a custom docker image following the [how to install plugins guide](/elasticsearch/README.md#how-to-install-plugins) +2. Add any required secrets or credentials into an Elasticsearch keystore following the [how to use the keystore guide](/elasticsearch/README.md#how-to-use-the-keystore) +3. Configure the [snapshot repository](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html) as you normally would. +4. To automate snapshots you can use a tool like [curator](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/snapshot.html). In the future there are plans to have Elasticsearch manage automated snapshots with [Snapshot Lifecycle Management](https://github.com/elastic/elasticsearch/issues/38461). + +### Local development environments + +This chart is designed to run on production scale Kubernetes clusters with multiple nodes, lots of memory and persistent storage. For that reason it can be a bit tricky to run them against local Kubernetes environments such as minikube. Below are some examples of how to get this working locally. + +#### Minikube + +This chart also works successfully on [minikube](https://kubernetes.io/docs/setup/minikube/) in addition to typical hosted Kubernetes environments. +An example `values.yaml` file for minikube is provided under `examples/`. + +In order to properly support the required persistent volume claims for the Elasticsearch `StatefulSet`, the `default-storageclass` and `storage-provisioner` minikube addons must be enabled. + +``` +minikube addons enable default-storageclass +minikube addons enable storage-provisioner +cd examples/minikube +make +``` + +Note that if `helm` or `kubectl` timeouts occur, you may consider creating a minikube VM with more CPU cores or memory allocated. + +#### Docker for Mac - Kubernetes + +It is also possible to run this chart with the built in Kubernetes cluster that comes with [docker-for-mac](https://docs.docker.com/docker-for-mac/kubernetes/). + +``` +cd examples/docker-for-mac +make +``` + +#### KIND - Kubernetes + +It is also possible to run this chart using a Kubernetes [KIND (Kubernetes in Docker)](https://github.com/kubernetes-sigs/kind) cluster: + +``` +cd examples/kubernetes-kind +make +``` + +## Clustering and Node Discovery + +This chart facilitates Elasticsearch node discovery and services by creating two `Service` definitions in Kubernetes, one with the name `$clusterName-$nodeGroup` and another named `$clusterName-$nodeGroup-headless`. +Only `Ready` pods are a part of the `$clusterName-$nodeGroup` service, while all pods (`Ready` or not) are a part of `$clusterName-$nodeGroup-headless`. + +If your group of master nodes has the default `nodeGroup: master` then you can just add new groups of nodes with a different `nodeGroup` and they will automatically discover the correct master. If your master nodes have a different `nodeGroup` name then you will need to set `masterService` to `$clusterName-$masterNodeGroup`. + +The chart value for `masterService` is used to populate `discovery.zen.ping.unicast.hosts`, which Elasticsearch nodes will use to contact master nodes and form a cluster. +Therefore, to add a group of nodes to an existing cluster, setting `masterService` to the desired `Service` name of the related cluster is sufficient. + +For an example of deploying both a group master nodes and data nodes using multiple releases of this chart, see the accompanying values files in `examples/multi`. + +## Testing + +This chart uses [pytest](https://docs.pytest.org/en/latest/) to test the templating logic. The dependencies for testing can be installed from the [`requirements.txt`](../requirements.txt) in the parent directory. + +``` +pip install -r ../requirements.txt +make pytest +``` + +You can also use `helm template` to look at the YAML being generated + +``` +make template +``` + +It is possible to run all of the tests and linting inside of a docker container + +``` +make test +``` + +## Integration Testing + +Integration tests are run using [goss](https://github.com/aelsabbahy/goss/blob/master/docs/manual.md) which is a serverspec like tool written in golang. See [goss.yaml](examples/default/test/goss.yaml) for an example of what the tests look like. + +To run the goss tests against the default example: + +``` +cd examples/default +make goss +``` diff --git a/charts/elastic/elasticsearch/examples/6.x/Makefile b/charts/elastic/elasticsearch/examples/6.x/Makefile new file mode 100644 index 0000000..2020d4a --- /dev/null +++ b/charts/elastic/elasticsearch/examples/6.x/Makefile @@ -0,0 +1,15 @@ +default: test +include ../../../helpers/examples.mk + +RELEASE := helm-es-six + +install: + helm upgrade --wait --timeout=600 --install $(RELEASE) --values ./values.yaml ../../ + +restart: + helm upgrade --set terminationGracePeriod=121 --wait --timeout=600 --install $(RELEASE) --values ./values.yaml ../../ + +test: install goss + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/elasticsearch/examples/6.x/test/goss.yaml b/charts/elastic/elasticsearch/examples/6.x/test/goss.yaml new file mode 100644 index 0000000..be15386 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/6.x/test/goss.yaml @@ -0,0 +1,17 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - 'green' + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "6.8.4"' + - '"cluster_name" : "six"' + - '"name" : "six-master-0"' + - 'You Know, for Search' diff --git a/charts/elastic/elasticsearch/examples/6.x/values.yaml b/charts/elastic/elasticsearch/examples/6.x/values.yaml new file mode 100644 index 0000000..aef2284 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/6.x/values.yaml @@ -0,0 +1,4 @@ +--- + +clusterName: "six" +imageTag: "6.8.4" diff --git a/charts/elastic/elasticsearch/examples/config/Makefile b/charts/elastic/elasticsearch/examples/config/Makefile new file mode 100644 index 0000000..a3f9617 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/config/Makefile @@ -0,0 +1,19 @@ +default: test +include ../../../helpers/examples.mk + +RELEASE := helm-es-config + +install: + helm upgrade --wait --timeout=600 --install $(RELEASE) --values ./values.yaml ../../ + +secrets: + kubectl delete secret elastic-config-credentials elastic-config-secret elastic-config-slack elastic-config-custom-path || true + kubectl create secret generic elastic-config-credentials --from-literal=password=changeme --from-literal=username=elastic + kubectl create secret generic elastic-config-slack --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' + kubectl create secret generic elastic-config-secret --from-file=xpack.watcher.encryption_key=./watcher_encryption_key + kubectl create secret generic elastic-config-custom-path --from-literal=slack_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' --from-literal=thing_i_don_tcare_about=test + +test: secrets install goss + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/elasticsearch/examples/config/README.md b/charts/elastic/elasticsearch/examples/config/README.md new file mode 100644 index 0000000..d98d836 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/config/README.md @@ -0,0 +1,3 @@ +# Config + +An example testing suite for testing some of the optional features of this chart. diff --git a/charts/elastic/elasticsearch/examples/config/test/goss.yaml b/charts/elastic/elasticsearch/examples/config/test/goss.yaml new file mode 100644 index 0000000..8487013 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/config/test/goss.yaml @@ -0,0 +1,26 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - 'green' + - '"number_of_nodes":1' + - '"number_of_data_nodes":1' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"cluster_name" : "config"' + - '"name" : "config-master-0"' + - 'You Know, for Search' + +command: + "elasticsearch-keystore list": + exit-status: 0 + stdout: + - keystore.seed + - bootstrap.password + - xpack.notification.slack.account.monitoring.secure_url + - xpack.notification.slack.account.otheraccount.secure_url + - xpack.watcher.encryption_key diff --git a/charts/elastic/elasticsearch/examples/config/values.yaml b/charts/elastic/elasticsearch/examples/config/values.yaml new file mode 100644 index 0000000..ebde4f4 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/config/values.yaml @@ -0,0 +1,31 @@ +--- + +clusterName: "config" +replicas: 1 + +extraEnvs: + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: elastic-credentials + key: password + - name: ELASTIC_USERNAME + valueFrom: + secretKeyRef: + name: elastic-credentials + key: username + +# This is just a dummy file to make sure that +# the keystore can be mounted at the same time +# as a custom elasticsearch.yml +esConfig: + elasticsearch.yml: | + path.data: /usr/share/elasticsearch/data + +keystore: + - secretName: elastic-config-secret + - secretName: elastic-config-slack + - secretName: elastic-config-custom-path + items: + - key: slack_url + path: xpack.notification.slack.account.otheraccount.secure_url diff --git a/charts/elastic/elasticsearch/examples/config/watcher_encryption_key b/charts/elastic/elasticsearch/examples/config/watcher_encryption_key new file mode 100644 index 0000000..b5f9078 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/config/watcher_encryption_key @@ -0,0 +1 @@ +supersecret diff --git a/charts/elastic/elasticsearch/examples/default/Makefile b/charts/elastic/elasticsearch/examples/default/Makefile new file mode 100644 index 0000000..5f5215c --- /dev/null +++ b/charts/elastic/elasticsearch/examples/default/Makefile @@ -0,0 +1,16 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-default + +install: + helm upgrade --wait --timeout=600 --install $(RELEASE) ../../ + +restart: + helm upgrade --set terminationGracePeriod=121 --wait --timeout=600 --install $(RELEASE) ../../ + +test: install goss + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/elasticsearch/examples/default/rolling_upgrade.sh b/charts/elastic/elasticsearch/examples/default/rolling_upgrade.sh new file mode 100644 index 0000000..c5a2a88 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/default/rolling_upgrade.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash -x + +kubectl proxy || true & + +make & +PROC_ID=$! + +while kill -0 "$PROC_ID" >/dev/null 2>&1; do + echo "PROCESS IS RUNNING" + if curl --fail 'http://localhost:8001/api/v1/proxy/namespaces/default/services/elasticsearch-master:9200/_search' ; then + echo "cluster is healthy" + else + echo "cluster not healthy!" + exit 1 + fi + sleep 1 +done +echo "PROCESS TERMINATED" +exit 0 diff --git a/charts/elastic/elasticsearch/examples/default/test/goss.yaml b/charts/elastic/elasticsearch/examples/default/test/goss.yaml new file mode 100644 index 0000000..f5c4056 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/default/test/goss.yaml @@ -0,0 +1,39 @@ +kernel-param: + vm.max_map_count: + value: '262144' + +http: + http://elasticsearch-master:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - 'green' + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "7.4.1"' + - '"cluster_name" : "elasticsearch"' + - '"name" : "elasticsearch-master-0"' + - 'You Know, for Search' + +file: + /usr/share/elasticsearch/data: + exists: true + mode: "2775" + owner: root + group: elasticsearch + filetype: directory + +mount: + /usr/share/elasticsearch/data: + exists: true + +user: + elasticsearch: + exists: true + uid: 1000 + gid: 1000 diff --git a/charts/elastic/elasticsearch/examples/docker-for-mac/Makefile b/charts/elastic/elasticsearch/examples/docker-for-mac/Makefile new file mode 100644 index 0000000..398545e --- /dev/null +++ b/charts/elastic/elasticsearch/examples/docker-for-mac/Makefile @@ -0,0 +1,12 @@ +default: test + +RELEASE := helm-es-docker-for-mac + +install: + helm upgrade --wait --timeout=900 --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/elasticsearch/examples/docker-for-mac/values.yaml b/charts/elastic/elasticsearch/examples/docker-for-mac/values.yaml new file mode 100644 index 0000000..f7deba6 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/docker-for-mac/values.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "hostpath" + resources: + requests: + storage: 100M diff --git a/charts/elastic/elasticsearch/examples/kubernetes-kind/Makefile b/charts/elastic/elasticsearch/examples/kubernetes-kind/Makefile new file mode 100644 index 0000000..c0a0f8d --- /dev/null +++ b/charts/elastic/elasticsearch/examples/kubernetes-kind/Makefile @@ -0,0 +1,12 @@ +default: test + +RELEASE := helm-es-kind + +install: + helm upgrade --wait --timeout=900 --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/elasticsearch/examples/kubernetes-kind/values.yaml b/charts/elastic/elasticsearch/examples/kubernetes-kind/values.yaml new file mode 100644 index 0000000..ffa7b62 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/kubernetes-kind/values.yaml @@ -0,0 +1,36 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 100M +extraInitContainers: | + - name: create + image: busybox:1.28 + command: ['mkdir', '/usr/share/elasticsearch/data/nodes/'] + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: elasticsearch-master + - name: file-permissions + image: busybox:1.28 + command: ['chown', '-R', '1000:1000', '/usr/share/elasticsearch/'] + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: elasticsearch-master + diff --git a/charts/elastic/elasticsearch/examples/migration/Makefile b/charts/elastic/elasticsearch/examples/migration/Makefile new file mode 100644 index 0000000..3b1dac1 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/migration/Makefile @@ -0,0 +1,10 @@ +PREFIX := helm-es-migration + +data: + helm upgrade --wait --timeout=600 --install --values ./data.yml $(PREFIX)-data ../../ + +master: + helm upgrade --wait --timeout=600 --install --values ./master.yml $(PREFIX)-master ../../ + +client: + helm upgrade --wait --timeout=600 --install --values ./client.yml $(PREFIX)-client ../../ diff --git a/charts/elastic/elasticsearch/examples/migration/README.md b/charts/elastic/elasticsearch/examples/migration/README.md new file mode 100644 index 0000000..e5f4b1a --- /dev/null +++ b/charts/elastic/elasticsearch/examples/migration/README.md @@ -0,0 +1,86 @@ +# Migration Guide from helm/charts + +There are two viable options for migrating from the community Elasticsearch helm chart from the [helm/charts](https://github.com/helm/charts/tree/master/stable/elasticsearch) repo. + +1. Restoring from Snapshot to a fresh cluster +2. Live migration by joining a new cluster to the existing cluster. + +## Restoring from Snapshot + +This is the recommended and preferred option. The downside is that it will involve a period of write downtime during the migration. If you have a way to temporarily stop writes to your cluster then this is the way to go. This is also a lot simpler as it just involves launching a fresh cluster and restoring a snapshot following the [restoring to a different cluster guide](https://www.elastic.co/guide/en/elasticsearch/reference/6.6/modules-snapshots.html#_restoring_to_a_different_cluster). + +## Live migration + +If restoring from a snapshot is not possible due to the write downtime then a live migration is also possible. It is very important to first test this in a testing environment to make sure you are comfortable with the process and fully understand what is happening. + +This process will involve joining a new set of master, data and client nodes to an existing cluster that has been deployed using the [helm/charts](https://github.com/helm/charts/tree/master/stable/elasticsearch) community chart. Nodes will then be replaced one by one in a controlled fashion to decommission the old cluster. + +This example will be using the default values for the existing helm/charts release and for the elastic helm-charts release. If you have changed any of the default values then you will need to first make sure that your values are configured in a compatible way before starting the migration. + +The process will involve a re-sync and a rolling restart of all of your data nodes. Therefore it is important to disable shard allocation and perform a synced flush like you normally would during any other rolling upgrade. See the [rolling upgrades guide](https://www.elastic.co/guide/en/elasticsearch/reference/6.6/rolling-upgrades.html) for more information. + +* The default image for this chart is `docker.elastic.co/elasticsearch/elasticsearch` which contains the default distribution of Elasticsearch with a [basic license](https://www.elastic.co/subscriptions). Make sure to update the `image` and `imageTag` values to the correct Docker image and Elasticsearch version that you currently have deployed. +* Convert your current helm/charts configuration into something that is compatible with this chart. +* Take a fresh snapshot of your cluster. If something goes wrong you want to be able to restore your data no matter what. +* Check that your clusters health is green. If not abort and make sure your cluster is healthy before continuing. + ``` + curl localhost:9200/_cluster/health + ``` +* Deploy new data nodes which will join the existing cluster. Take a look at the configuration in [data.yml](./data.yml) + ``` + make data + ``` +* Check that the new nodes have joined the cluster (run this and any other curl commands from within one of your pods). + ``` + curl localhost:9200/_cat/nodes + ``` +* Check that your cluster is still green. If so we can now start to scale down the existing data nodes. Assuming you have the default amount of data nodes (2) we now want to scale it down to 1. + ``` + kubectl scale statefulsets my-release-elasticsearch-data --replicas=1 + ``` +* Wait for your cluster to become green again + ``` + watch 'curl -s localhost:9200/_cluster/health' + ``` +* Once the cluster is green we can scale down again. + ``` + kubectl scale statefulsets my-release-elasticsearch-data --replicas=0 + ``` +* Wait for the cluster to be green again. +* OK. We now have all data nodes running in the new cluster. Time to replace the masters by firstly scaling down the masters from 3 to 2. Between each step make sure to wait for the cluster to become green again, and check with `curl localhost:9200/_cat/nodes` that you see the correct amount of master nodes. During this process we will always make sure to keep at least 2 master nodes as to not lose quorum. + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=2 + ``` +* Now deploy a single new master so that we have 3 masters again. See [master.yml](./master.yml) for the configuration. + ``` + make master + ``` +* Scale down old masters to 1 + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=1 + ``` +* Edit the masters in [masters.yml](./masters.yml) to 2 and redeploy + ``` + make master + ``` +* Scale down the old masters to 0 + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=0 + ``` +* Edit the [masters.yml](./masters.yml) to have 3 replicas and remove the `discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then redeploy the masters. This will make sure all 3 masters are running in the new cluster and are pointing at each other for discovery. + ``` + make master + ``` +* Remove the `discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then redeploy the data nodes to make sure they are pointing at the new masters. + ``` + make data + ``` +* Deploy the client nodes + ``` + make client + ``` +* Update any processes that are talking to the existing client nodes and point them to the new client nodes. Once this is done you can scale down the old client nodes + ``` + kubectl scale deployment my-release-elasticsearch-client --replicas=0 + ``` +* The migration should now be complete. After verifying that everything is working correctly you can cleanup leftover resources from your old cluster. diff --git a/charts/elastic/elasticsearch/examples/migration/client.yml b/charts/elastic/elasticsearch/examples/migration/client.yml new file mode 100644 index 0000000..30ee700 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/migration/client.yml @@ -0,0 +1,23 @@ +--- + +replicas: 2 + +clusterName: "elasticsearch" +nodeGroup: "client" + +esMajorVersion: 6 + +roles: + master: "false" + ingest: "false" + data: "false" + +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 1Gi # Currently needed till pvcs are made optional + +persistence: + enabled: false diff --git a/charts/elastic/elasticsearch/examples/migration/data.yml b/charts/elastic/elasticsearch/examples/migration/data.yml new file mode 100644 index 0000000..eedcbb0 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/migration/data.yml @@ -0,0 +1,17 @@ +--- + +replicas: 2 + +esMajorVersion: 6 + +extraEnvs: + - name: discovery.zen.ping.unicast.hosts + value: "my-release-elasticsearch-discovery" + +clusterName: "elasticsearch" +nodeGroup: "data" + +roles: + master: "false" + ingest: "false" + data: "true" diff --git a/charts/elastic/elasticsearch/examples/migration/master.yml b/charts/elastic/elasticsearch/examples/migration/master.yml new file mode 100644 index 0000000..3e3a2f1 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/migration/master.yml @@ -0,0 +1,26 @@ +--- + +# Temporarily set to 3 so we can scale up/down the old a new cluster +# one at a time whilst always keeping 3 masters running +replicas: 1 + +esMajorVersion: 6 + +extraEnvs: + - name: discovery.zen.ping.unicast.hosts + value: "my-release-elasticsearch-discovery" + +clusterName: "elasticsearch" +nodeGroup: "master" + +roles: + master: "true" + ingest: "false" + data: "false" + +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 4Gi diff --git a/charts/elastic/elasticsearch/examples/minikube/Makefile b/charts/elastic/elasticsearch/examples/minikube/Makefile new file mode 100644 index 0000000..97109ce --- /dev/null +++ b/charts/elastic/elasticsearch/examples/minikube/Makefile @@ -0,0 +1,12 @@ +default: test + +RELEASE := helm-es-minikube + +install: + helm upgrade --wait --timeout=900 --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/elasticsearch/examples/minikube/values.yaml b/charts/elastic/elasticsearch/examples/minikube/values.yaml new file mode 100644 index 0000000..ccceb3a --- /dev/null +++ b/charts/elastic/elasticsearch/examples/minikube/values.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 100M diff --git a/charts/elastic/elasticsearch/examples/multi/Makefile b/charts/elastic/elasticsearch/examples/multi/Makefile new file mode 100644 index 0000000..836ec2e --- /dev/null +++ b/charts/elastic/elasticsearch/examples/multi/Makefile @@ -0,0 +1,16 @@ +default: test + +include ../../../helpers/examples.mk + +PREFIX := helm-es-multi +RELEASE := helm-es-multi-master + +install: + helm upgrade --wait --timeout=600 --install --values ./master.yml $(PREFIX)-master ../../ + helm upgrade --wait --timeout=600 --install --values ./data.yml $(PREFIX)-data ../../ + +test: install goss + +purge: + helm del --purge $(PREFIX)-master + helm del --purge $(PREFIX)-data diff --git a/charts/elastic/elasticsearch/examples/multi/data.yml b/charts/elastic/elasticsearch/examples/multi/data.yml new file mode 100644 index 0000000..ecc6893 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/multi/data.yml @@ -0,0 +1,9 @@ +--- + +clusterName: "multi" +nodeGroup: "data" + +roles: + master: "false" + ingest: "true" + data: "true" diff --git a/charts/elastic/elasticsearch/examples/multi/master.yml b/charts/elastic/elasticsearch/examples/multi/master.yml new file mode 100644 index 0000000..2ca4cca --- /dev/null +++ b/charts/elastic/elasticsearch/examples/multi/master.yml @@ -0,0 +1,9 @@ +--- + +clusterName: "multi" +nodeGroup: "master" + +roles: + master: "true" + ingest: "false" + data: "false" diff --git a/charts/elastic/elasticsearch/examples/multi/test/goss.yaml b/charts/elastic/elasticsearch/examples/multi/test/goss.yaml new file mode 100644 index 0000000..18cb250 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/multi/test/goss.yaml @@ -0,0 +1,9 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - 'green' + - '"cluster_name":"multi"' + - '"number_of_nodes":6' + - '"number_of_data_nodes":3' diff --git a/charts/elastic/elasticsearch/examples/openshift/Makefile b/charts/elastic/elasticsearch/examples/openshift/Makefile new file mode 100644 index 0000000..6e49591 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/openshift/Makefile @@ -0,0 +1,15 @@ +default: test +include ../../../helpers/examples.mk + +RELEASE := elasticsearch + +template: + helm template --values ./values.yaml ../../ + +install: + helm upgrade --wait --timeout=600 --install $(RELEASE) --values ./values.yaml ../../ + +test: install goss + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/elasticsearch/examples/openshift/test/goss.yaml b/charts/elastic/elasticsearch/examples/openshift/test/goss.yaml new file mode 100644 index 0000000..67a3920 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/openshift/test/goss.yaml @@ -0,0 +1,17 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - 'green' + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "7.4.1"' + - '"cluster_name" : "elasticsearch"' + - '"name" : "elasticsearch-master-0"' + - 'You Know, for Search' diff --git a/charts/elastic/elasticsearch/examples/openshift/values.yaml b/charts/elastic/elasticsearch/examples/openshift/values.yaml new file mode 100644 index 0000000..7f5cd84 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/openshift/values.yaml @@ -0,0 +1,10 @@ +--- + +securityContext: + runAsUser: null + +podSecurityContext: + fsGroup: null + +sysctlInitContainer: + enabled: false diff --git a/charts/elastic/elasticsearch/examples/oss/Makefile b/charts/elastic/elasticsearch/examples/oss/Makefile new file mode 100644 index 0000000..e274659 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/oss/Makefile @@ -0,0 +1,12 @@ +default: test +include ../../../helpers/examples.mk + +RELEASE := helm-es-oss + +install: + helm upgrade --wait --timeout=600 --install $(RELEASE) --values ./values.yaml ../../ + +test: install goss + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/elasticsearch/examples/oss/test/goss.yaml b/charts/elastic/elasticsearch/examples/oss/test/goss.yaml new file mode 100644 index 0000000..888a41e --- /dev/null +++ b/charts/elastic/elasticsearch/examples/oss/test/goss.yaml @@ -0,0 +1,17 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - 'green' + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "7.4.1"' + - '"cluster_name" : "oss"' + - '"name" : "oss-master-0"' + - 'You Know, for Search' diff --git a/charts/elastic/elasticsearch/examples/oss/values.yaml b/charts/elastic/elasticsearch/examples/oss/values.yaml new file mode 100644 index 0000000..adcb7df --- /dev/null +++ b/charts/elastic/elasticsearch/examples/oss/values.yaml @@ -0,0 +1,4 @@ +--- + +clusterName: "oss" +image: "docker.elastic.co/elasticsearch/elasticsearch-oss" diff --git a/charts/elastic/elasticsearch/examples/security/Makefile b/charts/elastic/elasticsearch/examples/security/Makefile new file mode 100644 index 0000000..1c7e7c2 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/security/Makefile @@ -0,0 +1,31 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-security + +install: + helm upgrade --wait --timeout=600 --install --values ./security.yml $(RELEASE) ../../ + +purge: + kubectl delete secrets elastic-credentials elastic-certificates elastic-certificate-pem || true + helm del --purge $(RELEASE) + +test: secrets install goss + +secrets: + docker rm -f elastic-helm-charts-certs || true + rm -f elastic-certificates.p12 elastic-certificate.pem elastic-stack-ca.p12 || true + password=$$([ ! -z "$$ELASTIC_PASSWORD" ] && echo $$ELASTIC_PASSWORD || echo $$(docker run --rm docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION) /bin/sh -c "< /dev/urandom tr -cd '[:alnum:]' | head -c20")) && \ + docker run --name elastic-helm-charts-certs -i -w /app \ + docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION) \ + /bin/sh -c " \ + elasticsearch-certutil ca --out /app/elastic-stack-ca.p12 --pass '' && \ + elasticsearch-certutil cert --name security-master --dns security-master --ca /app/elastic-stack-ca.p12 --pass '' --ca-pass '' --out /app/elastic-certificates.p12" && \ + docker cp elastic-helm-charts-certs:/app/elastic-certificates.p12 ./ && \ + docker rm -f elastic-helm-charts-certs && \ + openssl pkcs12 -nodes -passin pass:'' -in elastic-certificates.p12 -out elastic-certificate.pem && \ + kubectl create secret generic elastic-certificates --from-file=elastic-certificates.p12 && \ + kubectl create secret generic elastic-certificate-pem --from-file=elastic-certificate.pem && \ + kubectl create secret generic elastic-credentials --from-literal=password=$$password --from-literal=username=elastic && \ + rm -f elastic-certificates.p12 elastic-certificate.pem elastic-stack-ca.p12 diff --git a/charts/elastic/elasticsearch/examples/security/security.yml b/charts/elastic/elasticsearch/examples/security/security.yml new file mode 100644 index 0000000..04d932c --- /dev/null +++ b/charts/elastic/elasticsearch/examples/security/security.yml @@ -0,0 +1,38 @@ +--- +clusterName: "security" +nodeGroup: "master" + +roles: + master: "true" + ingest: "true" + data: "true" + +protocol: https + +esConfig: + elasticsearch.yml: | + xpack.security.enabled: true + xpack.security.transport.ssl.enabled: true + xpack.security.transport.ssl.verification_mode: certificate + xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.http.ssl.enabled: true + xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + +extraEnvs: + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: elastic-credentials + key: password + - name: ELASTIC_USERNAME + valueFrom: + secretKeyRef: + name: elastic-credentials + key: username + +secretMounts: + - name: elastic-certificates + secretName: elastic-certificates + path: /usr/share/elasticsearch/config/certs diff --git a/charts/elastic/elasticsearch/examples/security/test/goss.yaml b/charts/elastic/elasticsearch/examples/security/test/goss.yaml new file mode 100644 index 0000000..c6d4b98 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/security/test/goss.yaml @@ -0,0 +1,45 @@ +http: + https://security-master:9200/_cluster/health: + status: 200 + timeout: 2000 + allow-insecure: true + username: '{{ .Env.ELASTIC_USERNAME }}' + password: '{{ .Env.ELASTIC_PASSWORD }}' + body: + - 'green' + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + https://localhost:9200/: + status: 200 + timeout: 2000 + allow-insecure: true + username: '{{ .Env.ELASTIC_USERNAME }}' + password: '{{ .Env.ELASTIC_PASSWORD }}' + body: + - '"cluster_name" : "security"' + - '"name" : "security-master-0"' + - 'You Know, for Search' + + https://localhost:9200/_xpack/license: + status: 200 + timeout: 2000 + allow-insecure: true + username: '{{ .Env.ELASTIC_USERNAME }}' + password: '{{ .Env.ELASTIC_PASSWORD }}' + body: + - 'active' + - 'basic' + +file: + /usr/share/elasticsearch/config/elasticsearch.yml: + exists: true + contains: + - 'xpack.security.enabled: true' + - 'xpack.security.transport.ssl.enabled: true' + - 'xpack.security.transport.ssl.verification_mode: certificate' + - 'xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12' + - 'xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12' + - 'xpack.security.http.ssl.enabled: true' + - 'xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12' + - 'xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12' diff --git a/charts/elastic/elasticsearch/examples/upgrade/Makefile b/charts/elastic/elasticsearch/examples/upgrade/Makefile new file mode 100644 index 0000000..9e1e6fd --- /dev/null +++ b/charts/elastic/elasticsearch/examples/upgrade/Makefile @@ -0,0 +1,25 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-upgrade + +# Right now the version is hardcoded because helm install will ignore +# anything with an alpha tag when trying to install the latest release +# This hardcoding can be removed once we drop the alpha tag +# The "--set terminationGracePeriod=121" always makes sure that a rolling +# upgrade is forced for this test +install: + helm repo add elastic https://helm.elastic.co && \ + helm upgrade --wait --timeout=600 --install $(RELEASE) elastic/elasticsearch --version 7.0.0-alpha1 --set clusterName=upgrade ; \ + kubectl rollout status sts/upgrade-master --timeout=600s + helm upgrade --wait --timeout=600 --set terminationGracePeriod=121 --install $(RELEASE) ../../ --set clusterName=upgrade ; \ + kubectl rollout status sts/upgrade-master --timeout=600s + +init: + helm init --client-only + +test: init install goss + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/elasticsearch/examples/upgrade/test/goss.yaml b/charts/elastic/elasticsearch/examples/upgrade/test/goss.yaml new file mode 100644 index 0000000..9a0d086 --- /dev/null +++ b/charts/elastic/elasticsearch/examples/upgrade/test/goss.yaml @@ -0,0 +1,17 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - 'green' + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "7.4.1"' + - '"cluster_name" : "upgrade"' + - '"name" : "upgrade-master-0"' + - 'You Know, for Search' diff --git a/charts/elastic/elasticsearch/templates/NOTES.txt b/charts/elastic/elasticsearch/templates/NOTES.txt new file mode 100644 index 0000000..3600c6b --- /dev/null +++ b/charts/elastic/elasticsearch/templates/NOTES.txt @@ -0,0 +1,4 @@ +1. Watch all cluster members come up. + $ kubectl get pods --namespace={{ .Release.Namespace }} -l app={{ template "uname" . }} -w +2. Test cluster health using Helm test. + $ helm test {{ .Release.Name }} diff --git a/charts/elastic/elasticsearch/templates/_helpers.tpl b/charts/elastic/elasticsearch/templates/_helpers.tpl new file mode 100644 index 0000000..7866bca --- /dev/null +++ b/charts/elastic/elasticsearch/templates/_helpers.tpl @@ -0,0 +1,71 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "uname" -}} +{{ .Values.clusterName }}-{{ .Values.nodeGroup }} +{{- end -}} + +{{- define "masterService" -}} +{{- if empty .Values.masterService -}} +{{ .Values.clusterName }}-master +{{- else -}} +{{ .Values.masterService }} +{{- end -}} +{{- end -}} + +{{- define "endpoints" -}} +{{- $replicas := int (toString (.Values.replicas)) }} +{{- $uname := printf "%s-%s" .Values.clusterName .Values.nodeGroup }} + {{- range $i, $e := untilStep 0 $replicas 1 -}} +{{ $uname }}-{{ $i }}, + {{- end -}} +{{- end -}} + +{{- define "esMajorVersion" -}} +{{- if .Values.esMajorVersion -}} +{{ .Values.esMajorVersion }} +{{- else -}} +{{- $version := int (index (.Values.imageTag | splitList ".") 0) -}} + {{- if and (contains "docker.elastic.co/elasticsearch/elasticsearch" .Values.image) (not (eq $version 0)) -}} +{{ $version }} + {{- else -}} +7 + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "elasticsearch.statefulset.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "elasticsearch.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/charts/elastic/elasticsearch/templates/configmap.yaml b/charts/elastic/elasticsearch/templates/configmap.yaml new file mode 100644 index 0000000..7754178 --- /dev/null +++ b/charts/elastic/elasticsearch/templates/configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.esConfig }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "uname" . }}-config + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "uname" . }}" +data: +{{- range $path, $config := .Values.esConfig }} + {{ $path }}: | +{{ $config | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/charts/elastic/elasticsearch/templates/ingress.yaml b/charts/elastic/elasticsearch/templates/ingress.yaml new file mode 100644 index 0000000..1715b97 --- /dev/null +++ b/charts/elastic/elasticsearch/templates/ingress.yaml @@ -0,0 +1,38 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "uname" . -}} +{{- $servicePort := .Values.httpPort -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: {{ template "elasticsearch.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ .Chart.Name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ . }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} +{{- end }} diff --git a/charts/elastic/elasticsearch/templates/poddisruptionbudget.yaml b/charts/elastic/elasticsearch/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..e118f2c --- /dev/null +++ b/charts/elastic/elasticsearch/templates/poddisruptionbudget.yaml @@ -0,0 +1,12 @@ +--- +{{- if .Values.maxUnavailable }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: "{{ template "uname" . }}-pdb" +spec: + maxUnavailable: {{ .Values.maxUnavailable }} + selector: + matchLabels: + app: "{{ template "uname" . }}" +{{- end }} diff --git a/charts/elastic/elasticsearch/templates/podsecuritypolicy.yaml b/charts/elastic/elasticsearch/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..1e65b28 --- /dev/null +++ b/charts/elastic/elasticsearch/templates/podsecuritypolicy.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podSecurityPolicy.create -}} +{{- $fullName := include "uname" . -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ default $fullName .Values.podSecurityPolicy.name | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +spec: +{{ toYaml .Values.podSecurityPolicy.spec | indent 2 }} +{{- end -}} diff --git a/charts/elastic/elasticsearch/templates/role.yaml b/charts/elastic/elasticsearch/templates/role.yaml new file mode 100644 index 0000000..d616e80 --- /dev/null +++ b/charts/elastic/elasticsearch/templates/role.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "uname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $fullName | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +rules: + - apiGroups: + - extensions + resources: + - podsecuritypolicies + resourceNames: + {{- if eq .Values.podSecurityPolicy.name "" }} + - {{ $fullName | quote }} + {{- else }} + - {{ .Values.podSecurityPolicy.name | quote }} + {{- end }} + verbs: + - use +{{- end -}} diff --git a/charts/elastic/elasticsearch/templates/rolebinding.yaml b/charts/elastic/elasticsearch/templates/rolebinding.yaml new file mode 100644 index 0000000..c2b6070 --- /dev/null +++ b/charts/elastic/elasticsearch/templates/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "uname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $fullName | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +subjects: + - kind: ServiceAccount + {{- if eq .Values.rbac.serviceAccountName "" }} + name: {{ $fullName | quote }} + {{- else }} + name: {{ .Values.rbac.serviceAccountName | quote }} + {{- end }} + namespace: {{ .Release.Namespace | quote }} +roleRef: + kind: Role + name: {{ $fullName | quote }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/charts/elastic/elasticsearch/templates/service.yaml b/charts/elastic/elasticsearch/templates/service.yaml new file mode 100644 index 0000000..0a89edb --- /dev/null +++ b/charts/elastic/elasticsearch/templates/service.yaml @@ -0,0 +1,52 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: {{ template "uname" . }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "uname" . }}" + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +spec: + type: {{ .Values.service.type }} + selector: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "uname" . }}" + ports: + - name: {{ .Values.service.httpPortName | default "http" }} + protocol: TCP + port: {{ .Values.httpPort }} +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + - name: {{ .Values.service.transportPortName | default "transport" }} + protocol: TCP + port: {{ .Values.transportPort }} +--- +kind: Service +apiVersion: v1 +metadata: + name: {{ template "uname" . }}-headless + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "uname" . }}" + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve + # Create endpoints also if the related pod isn't ready + publishNotReadyAddresses: true + selector: + app: "{{ template "uname" . }}" + ports: + - name: {{ .Values.service.httpPortName | default "http" }} + port: {{ .Values.httpPort }} + - name: {{ .Values.service.transportPortName | default "transport" }} + port: {{ .Values.transportPort }} diff --git a/charts/elastic/elasticsearch/templates/serviceaccount.yaml b/charts/elastic/elasticsearch/templates/serviceaccount.yaml new file mode 100644 index 0000000..59bbd53 --- /dev/null +++ b/charts/elastic/elasticsearch/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "uname" . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + {{- if eq .Values.rbac.serviceAccountName "" }} + name: {{ $fullName | quote }} + {{- else }} + name: {{ .Values.rbac.serviceAccountName | quote }} + {{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +{{- end -}} diff --git a/charts/elastic/elasticsearch/templates/statefulset.yaml b/charts/elastic/elasticsearch/templates/statefulset.yaml new file mode 100644 index 0000000..175f6a8 --- /dev/null +++ b/charts/elastic/elasticsearch/templates/statefulset.yaml @@ -0,0 +1,347 @@ +--- +apiVersion: {{ template "elasticsearch.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "uname" . }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + esMajorVersion: "{{ include "esMajorVersion" . }}" +spec: + serviceName: {{ template "uname" . }}-headless + selector: + matchLabels: + app: "{{ template "uname" . }}" + replicas: {{ .Values.replicas }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: {{ .Values.updateStrategy }} + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: {{ template "uname" . }} + {{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: +{{ toYaml .Values.volumeClaimTemplate | indent 6 }} + {{- end }} + template: + metadata: + name: "{{ template "uname" . }}" + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "uname" . }}" + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{/* This forces a restart if the configmap has changed */}} + {{- if .Values.esConfig }} + configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} + {{- if .Values.fsGroup }} + fsGroup: {{ .Values.fsGroup }} # Deprecated value, please use .Values.podSecurityContext.fsGroup + {{- end }} + {{- if .Values.rbac.create }} + serviceAccountName: "{{ template "uname" . }}" + {{- else if not (eq .Values.rbac.serviceAccountName "") }} + serviceAccountName: {{ .Values.rbac.serviceAccountName | quote }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if or (eq .Values.antiAffinity "hard") (eq .Values.antiAffinity "soft") .Values.nodeAffinity }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + affinity: + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "{{ template "uname" .}}" + topologyKey: {{ .Values.antiAffinityTopologyKey }} + {{- else if eq .Values.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: {{ .Values.antiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "{{ template "uname" . }}" + {{- end }} + {{- with .Values.nodeAffinity }} + nodeAffinity: +{{ toYaml . | indent 10 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + volumes: + {{- range .Values.secretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- if .Values.esConfig }} + - name: esconfig + configMap: + name: {{ template "uname" . }}-config + {{- end }} +{{- if .Values.keystore }} + - name: keystore + emptyDir: {} + {{- range .Values.keystore }} + - name: keystore-{{ .secretName }} + secret: {{ toYaml . | nindent 12 }} + {{- end }} +{{ end }} + {{- if .Values.extraVolumes }} +{{ tpl .Values.extraVolumes . | indent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + initContainers: + {{- if .Values.sysctlInitContainer.enabled }} + - name: configure-sysctl + securityContext: + runAsUser: 0 + privileged: true + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + command: ["sysctl", "-w", "vm.max_map_count={{ .Values.sysctlVmMaxMapCount}}"] + resources: +{{ toYaml .Values.initResources | indent 10 }} + {{- end }} +{{ if .Values.keystore }} + - name: keystore + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + command: + - sh + - -c + - | + #!/usr/bin/env bash + set -euo pipefail + + elasticsearch-keystore create + + for i in /tmp/keystoreSecrets/*/*; do + key=$(basename $i) + echo "Adding file $i to keystore key $key" + elasticsearch-keystore add-file "$key" "$i" + done + + # Add the bootstrap password since otherwise the Elasticsearch entrypoint tries to do this on startup + if [ ! -z ${ELASTIC_PASSWORD+x} ]; then + echo 'Adding env $ELASTIC_PASSWORD to keystore as key bootstrap.password' + echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x bootstrap.password + fi + + cp -a /usr/share/elasticsearch/config/elasticsearch.keystore /tmp/keystore/ + env: {{ toYaml .Values.extraEnvs | nindent 10 }} + resources: {{ toYaml .Values.initResources | nindent 10 }} + volumeMounts: + - name: keystore + mountPath: /tmp/keystore + {{- range .Values.keystore }} + - name: keystore-{{ .secretName }} + mountPath: /tmp/keystoreSecrets/{{ .secretName }} + {{- end }} +{{ end }} + {{- if .Values.extraInitContainers }} +{{ tpl .Values.extraInitContainers . | indent 6 }} + {{- end }} + containers: + - name: "{{ template "name" . }}" + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 10 }} + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + # If the node is starting up wait for the cluster to be ready (request params: '{{ .Values.clusterHealthCheckParams }}' ) + # Once it has started only check that the node itself is responding + START_FILE=/tmp/.es_start_file + + http () { + local path="${1}" + if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then + BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}" + else + BASIC_AUTH='' + fi + curl -XGET -s -k --fail ${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}${path} + } + + if [ -f "${START_FILE}" ]; then + echo 'Elasticsearch is already running, lets check the node is healthy' + http "/" + else + echo 'Waiting for elasticsearch cluster to become cluster to be ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' + if http "/_cluster/health?{{ .Values.clusterHealthCheckParams }}" ; then + touch ${START_FILE} + exit 0 + else + echo 'Cluster is not yet ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' + exit 1 + fi + fi + ports: + - name: http + containerPort: {{ .Values.httpPort }} + - name: transport + containerPort: {{ .Values.transportPort }} + resources: +{{ toYaml .Values.resources | indent 10 }} + env: + - name: node.name + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if eq .Values.roles.master "true" }} + {{- if ge (int (include "esMajorVersion" .)) 7 }} + - name: cluster.initial_master_nodes + value: "{{ template "endpoints" . }}" + {{- else }} + - name: discovery.zen.minimum_master_nodes + value: "{{ .Values.minimumMasterNodes }}" + {{- end }} + {{- end }} + {{- if lt (int (include "esMajorVersion" .)) 7 }} + - name: discovery.zen.ping.unicast.hosts + value: "{{ template "masterService" . }}-headless" + {{- else }} + - name: discovery.seed_hosts + value: "{{ template "masterService" . }}-headless" + {{- end }} + - name: cluster.name + value: "{{ .Values.clusterName }}" + - name: network.host + value: "{{ .Values.networkHost }}" + - name: ES_JAVA_OPTS + value: "{{ .Values.esJavaOpts }}" + {{- range $role, $enabled := .Values.roles }} + - name: node.{{ $role }} + value: "{{ $enabled }}" + {{- end }} +{{- if .Values.extraEnvs }} +{{ toYaml .Values.extraEnvs | indent 10 }} +{{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: "{{ template "uname" . }}" + mountPath: /usr/share/elasticsearch/data + {{- end }} +{{ if .Values.keystore }} + - name: keystore + mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore + subPath: elasticsearch.keystore +{{ end }} + {{- range .Values.secretMounts }} + - name: {{ .name }} + mountPath: {{ .path }} + {{- if .subPath }} + subPath: {{ .subPath }} + {{- end }} + {{- end }} + {{- range $path, $config := .Values.esConfig }} + - name: esconfig + mountPath: /usr/share/elasticsearch/config/{{ $path }} + subPath: {{ $path }} + {{- end -}} + {{- if .Values.extraVolumeMounts }} +{{ tpl .Values.extraVolumeMounts . | indent 10 }} + {{- end }} + {{- if .Values.masterTerminationFix }} + {{- if eq .Values.roles.master "true" }} + # This sidecar will prevent slow master re-election + # https://github.com/elastic/helm-charts/issues/63 + - name: elasticsearch-master-graceful-termination-handler + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: + - "sh" + - -c + - | + #!/usr/bin/env bash + set -eo pipefail + + http () { + local path="${1}" + if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then + BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}" + else + BASIC_AUTH='' + fi + curl -XGET -s -k --fail ${BASIC_AUTH} {{ .Values.protocol }}://{{ template "masterService" . }}:{{ .Values.httpPort }}${path} + } + + cleanup () { + while true ; do + local master="$(http "/_cat/master?h=node" || echo "")" + if [[ $master == "{{ template "masterService" . }}"* && $master != "${NODE_NAME}" ]]; then + echo "This node is not master." + break + fi + echo "This node is still master, waiting gracefully for it to step down" + sleep 1 + done + + exit 0 + } + + trap cleanup SIGTERM + + sleep infinity & + wait $! + resources: +{{ toYaml .Values.sidecarResources | indent 10 }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.extraEnvs }} +{{ toYaml .Values.extraEnvs | indent 10 }} + {{- end }} + {{- end }} + {{- end }} +{{- if .Values.lifecycle }} + lifecycle: +{{ toYaml .Values.lifecycle | indent 10 }} +{{- end }} diff --git a/charts/elastic/elasticsearch/templates/test/test-elasticsearch-health.yaml b/charts/elastic/elasticsearch/templates/test/test-elasticsearch-health.yaml new file mode 100644 index 0000000..1454115 --- /dev/null +++ b/charts/elastic/elasticsearch/templates/test/test-elasticsearch-health.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + command: + - "sh" + - "-c" + - | + #!/usr/bin/env bash -e + curl -XGET --fail '{{ template "uname" . }}:{{ .Values.httpPort }}/_cluster/health?{{ .Values.clusterHealthCheckParams }}' + restartPolicy: Never diff --git a/charts/elastic/elasticsearch/values.yaml b/charts/elastic/elasticsearch/values.yaml new file mode 100644 index 0000000..72bf2b4 --- /dev/null +++ b/charts/elastic/elasticsearch/values.yaml @@ -0,0 +1,239 @@ +--- +clusterName: "elasticsearch" +nodeGroup: "master" + +# The service that non master groups will try to connect to when joining the cluster +# This should be set to clusterName + "-" + nodeGroup for your master group +masterService: "" + +# Elasticsearch roles that will be applied to this nodeGroup +# These will be set as environment variables. E.g. node.master=true +roles: + master: "true" + ingest: "true" + data: "true" + +replicas: 3 +minimumMasterNodes: 2 + +esMajorVersion: "" + +# Allows you to add any config files in /usr/share/elasticsearch/config/ +# such as elasticsearch.yml and log4j2.properties +esConfig: {} +# elasticsearch.yml: | +# key: +# nestedkey: value +# log4j2.properties: | +# key = value + +# Extra environment variables to append to this nodeGroup +# This will be appended to the current 'env:' key. You can use any of the kubernetes env +# syntax here +extraEnvs: [] +# - name: MY_ENVIRONMENT_VAR +# value: the_value_goes_here + +# A list of secrets and their paths to mount inside the pod +# This is useful for mounting certificates for security and for mounting +# the X-Pack license +secretMounts: [] +# - name: elastic-certificates +# secretName: elastic-certificates +# path: /usr/share/elasticsearch/config/certs + +image: "docker.elastic.co/elasticsearch/elasticsearch" +imageTag: "7.4.1" +imagePullPolicy: "IfNotPresent" + +podAnnotations: {} + # iam.amazonaws.com/role: es-cluster + +# additionals labels +labels: {} + +esJavaOpts: "-Xmx1g -Xms1g" + +resources: + requests: + cpu: "100m" + memory: "2Gi" + limits: + cpu: "1000m" + memory: "2Gi" + +initResources: {} + # limits: + # cpu: "25m" + # # memory: "128Mi" + # requests: + # cpu: "25m" + # memory: "128Mi" + +sidecarResources: {} + # limits: + # cpu: "25m" + # # memory: "128Mi" + # requests: + # cpu: "25m" + # memory: "128Mi" + +networkHost: "0.0.0.0" + +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 30Gi + +rbac: + create: false + serviceAccountName: "" + +podSecurityPolicy: + create: false + name: "" + spec: + privileged: true + fsGroup: + rule: RunAsAny + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - configMap + - persistentVolumeClaim + +persistence: + enabled: true + annotations: {} + +extraVolumes: "" + # - name: extras + # emptyDir: {} + +extraVolumeMounts: "" + # - name: extras + # mountPath: /usr/share/extras + # readOnly: true + +extraInitContainers: "" + # - name: do-something + # image: busybox + # command: ['do', 'something'] + +# This is the PriorityClass settings as defined in +# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +priorityClassName: "" + +# By default this will make sure two pods don't end up on the same node +# Changing this to a region would allow you to spread pods across regions +antiAffinityTopologyKey: "kubernetes.io/hostname" + +# Hard means that by default pods will only be scheduled if there are enough nodes for them +# and that they will never end up on the same node. Setting this to soft will do this "best effort" +antiAffinity: "hard" + +# This is the node affinity settings as defined in +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature +nodeAffinity: {} + +# The default is to deploy all pods serially. By setting this to parallel all pods are started at +# the same time when bootstrapping the cluster +podManagementPolicy: "Parallel" + +protocol: http +httpPort: 9200 +transportPort: 9300 + +service: + type: ClusterIP + nodePort: "" + annotations: {} + httpPortName: http + transportPortName: transport + +updateStrategy: RollingUpdate + +# This is the max unavailable setting for the pod disruption budget +# The default value of 1 will make sure that kubernetes won't allow more than 1 +# of your pods to be unavailable during maintenance +maxUnavailable: 1 + +podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + +# The following value is deprecated, +# please use the above podSecurityContext.fsGroup instead +fsGroup: "" + +securityContext: + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# How long to wait for elasticsearch to stop gracefully +terminationGracePeriod: 120 + +sysctlVmMaxMapCount: 262144 + +readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 3 + timeoutSeconds: 5 + +# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status +clusterHealthCheckParams: "wait_for_status=green&timeout=1s" + +## Use an alternate scheduler. +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +imagePullSecrets: [] +nodeSelector: {} +tolerations: [] + +# Enabling this will publically expose your Elasticsearch instance. +# Only enable this if you have security enabled on your cluster +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +nameOverride: "" +fullnameOverride: "" + +# https://github.com/elastic/helm-charts/issues/63 +masterTerminationFix: false + +lifecycle: {} + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # postStart: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + +sysctlInitContainer: + enabled: true + +keystore: [] diff --git a/charts/elastic/kibana/.helmignore b/charts/elastic/kibana/.helmignore new file mode 100644 index 0000000..e12c0b4 --- /dev/null +++ b/charts/elastic/kibana/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/charts/elastic/kibana/Chart.yaml b/charts/elastic/kibana/Chart.yaml new file mode 100644 index 0000000..6f53e00 --- /dev/null +++ b/charts/elastic/kibana/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: 7.4.1 +description: Official Elastic helm chart for Kibana +home: https://github.com/elastic/helm-charts +icon: https://helm.elastic.co/icons/kibana.png +maintainers: +- email: helm-charts@elastic.co + name: Elastic +name: kibana +sources: +- https://github.com/elastic/kibana +version: 7.4.1 diff --git a/charts/elastic/kibana/Makefile b/charts/elastic/kibana/Makefile new file mode 100644 index 0000000..22218a1 --- /dev/null +++ b/charts/elastic/kibana/Makefile @@ -0,0 +1 @@ +include ../helpers/common.mk diff --git a/charts/elastic/kibana/README.md b/charts/elastic/kibana/README.md new file mode 100644 index 0000000..cbeedfe --- /dev/null +++ b/charts/elastic/kibana/README.md @@ -0,0 +1,133 @@ +# Kibana Helm Chart + +This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features. + +This helm chart is a lightweight way to configure and run our official [Kibana docker image](https://www.elastic.co/guide/en/kibana/current/docker.html) + +## Requirements + +* Kubernetes >= 1.9 +* [Helm](https://helm.sh/) >= 2.8.0 (see parent [README](../README.md) for more details) + +## Installing + +* Add the elastic helm charts repo + ``` + helm repo add elastic https://helm.elastic.co + ``` +* Install it + ``` + helm install --name kibana elastic/kibana + ``` + +## Compatibility + +This chart is tested with the latest supported versions. The currently tested versions are: + +| 6.x | 7.x | +| ----- | ----- | +| 6.8.4 | 7.4.1 | + +Examples of installing older major versions can be found in the [examples](./examples) directory. + +While only the latest releases are tested, it is possible to easily install old or new releases by overriding the `imageTag`. To install version `7.4.1` of Kibana it would look like this: + +``` +helm install --name kibana elastic/kibana --set imageTag=7.4.1 +``` + +## Configuration + +| Parameter | Description | Default | +| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| `elasticsearchHosts` | The URLs used to connect to Elasticsearch. | `http://elasticsearch-master:9200` | +| `elasticsearchURL` | The URL used to connect to Elasticsearch. Deprecated, needs to be used for Kibana versions < 6.6 | | +| `replicas` | Kubernetes replica count for the deployment (i.e. how many pods) | `1` | +| `extraEnvs` | Extra [environment variables](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config) which will be appended to the `env:` definition for the container | `[]` | +| `secretMounts` | Allows you easily mount a secret as a file inside the deployment. Useful for mounting certificates and other secrets. See [values.yaml](./values.yaml) for an example | `[]` | +| `image` | The Kibana docker image | `docker.elastic.co/kibana/kibana` | +| `imageTag` | The Kibana docker image tag | `7.4.1` | +| `imagePullPolicy` | The Kubernetes [imagePullPolicy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) value | `IfNotPresent` | +| `podAnnotations` | Configurable [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) applied to all Kibana pods | `{}` | +| `resources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the statefulset | `requests.cpu: 100m`
`requests.memory: 500Mi`
`limits.cpu: 1000m`
`limits.memory: 2Gi` | +| `protocol` | The protocol that will be used for the readinessProbe. Change this to `https` if you have `server.ssl.enabled: true` set | `http` | +| `serverHost` | The [`server.host`](https://www.elastic.co/guide/en/kibana/current/settings.html) Kibana setting. This is set explicitly so that the default always matches what comes with the docker image. | `0.0.0.0` | +| `healthCheckPath` | The path used for the readinessProbe to check that Kibana is ready. If you are setting `server.basePath` you will also need to update this to `/${basePath}/app/kibana` | `/app/kibana` | +| `kibanaConfig` | Allows you to add any config files in `/usr/share/kibana/config/` such as `kibana.yml`. See [values.yaml](./values.yaml) for an example of the formatting. | `{}` | +| `podSecurityContext` | Allows you to set the [securityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) for the pod | `fsGroup: 1000` | +| `securityContext` | Allows you to set the [securityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container) for the container | `capabilities.drop:[ALL]`
`runAsNonRoot: true`
`runAsUser: 1000` | +| `serviceAccount` | Allows you to overwrite the "default" [serviceAccount](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) for the pod | `[]` | +| `priorityClassName` | The [name of the PriorityClass](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass). No default is supplied as the PriorityClass must be created first. | `""` | +| `httpPort` | The http port that Kubernetes will use for the healthchecks and the service. | `5601` | +| `maxUnavailable` | The [maxUnavailable](https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget) value for the pod disruption budget. By default this will prevent Kubernetes from having more than 1 unhealthy pod | `1` | +| `updateStrategy` | Allows you to change the default update [strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#updating-a-deployment) for the deployment. A [standard upgrade](https://www.elastic.co/guide/en/kibana/current/upgrade-standard.html) of Kibana requires a full stop and start which is why the default strategy is set to `Recreate` | `Recreate` | +| `readinessProbe` | Configuration for the [readinessProbe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) | `failureThreshold: 3`
`initialDelaySeconds: 10`
`periodSeconds: 10`
`successThreshold: 3`
`timeoutSeconds: 5` | +| `imagePullSecrets` | Configuration for [imagePullSecrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) so that you can use a private registry for your image | `[]` | +| `nodeSelector` | Configurable [nodeSelector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) so that you can target specific nodes for your Kibana instances | `{}` | +| `tolerations` | Configurable [tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) | `[]` | +| `ingress` | Configurable [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) to expose the Kibana service. See [`values.yaml`](./values.yaml) for an example | `enabled: false` | +| `service` | Configurable [service](https://kubernetes.io/docs/concepts/services-networking/service/) to expose the Kibana service. See [`values.yaml`](./values.yaml) for an example | `type: ClusterIP`
`port: 5601`
`nodePort:`
`annotations: {}` | +| `labels` | Configurable [label](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) applied to all Kibana pods | `{}` | +| `lifecycle` | Allows you to add lifecycle configuration. See [values.yaml](./values.yaml) for an example of the formatting. | `{}` | + +## Examples + +In [examples/](./examples) you will find some example configurations. These examples are used for the automated testing of this helm chart + +### Default + +* Deploy the [default Elasticsearch helm chart](../elasticsearch/README.md#default) +* Deploy Kibana with the default values + ``` + cd examples/default + make + ``` +* You can now setup a port forward and access Kibana at http://localhost:5601 + ``` + kubectl port-forward deployment/helm-kibana-default-kibana 5601 + ``` + +### Security + +* Deploy a [security enabled Elasticsearch cluster](../elasticsearch/README.md#security) +* Deploy Kibana with the security example + ``` + cd examples/security + make + ``` +* Setup a port forward and access Kibana at https://localhost:5601 + ``` + # Setup the port forward + kubectl port-forward deployment/helm-kibana-security-kibana 5601 + + # Run this in a seperate terminal + # Get the auto generated password + password=$(kubectl get secret elastic-credentials -o jsonpath='{.data.password}' | base64 --decode) + echo $password + + # Test Kibana is working with curl or access it with your browser at https://localhost:5601 + # The example certificate is self signed so you may see a warning about the certificate + curl -I -k -u elastic:$password https://localhost:5601/app/kibana + ``` + +## Testing + +This chart uses [pytest](https://docs.pytest.org/en/latest/) to test the templating logic. The dependencies for testing can be installed from the [`requirements.txt`](../requirements.txt) in the parent directory. + +``` +pip install -r ../requirements.txt +make test +``` + + +You can also use `helm template` to look at the YAML being generated + +``` +make template +``` + +It is possible to run all of the tests and linting inside of a docker container + +``` +make test +``` diff --git a/charts/elastic/kibana/examples/6.x/Makefile b/charts/elastic/kibana/examples/6.x/Makefile new file mode 100644 index 0000000..468e2b8 --- /dev/null +++ b/charts/elastic/kibana/examples/6.x/Makefile @@ -0,0 +1,12 @@ +default: test +include ../../../helpers/examples.mk + +RELEASE := helm-kibana-six + +install: + helm upgrade --wait --timeout=600 --install --values ./values.yml $(RELEASE) ../../ + +purge: + helm del --purge $(RELEASE) + +test: install goss diff --git a/charts/elastic/kibana/examples/6.x/test/goss.yaml b/charts/elastic/kibana/examples/6.x/test/goss.yaml new file mode 100644 index 0000000..50bdc31 --- /dev/null +++ b/charts/elastic/kibana/examples/6.x/test/goss.yaml @@ -0,0 +1,10 @@ +http: + http://localhost:5601/api/status: + status: 200 + timeout: 2000 + body: + - '"number":"6.8.4"' + + http://localhost:5601/app/kibana: + status: 200 + timeout: 2000 diff --git a/charts/elastic/kibana/examples/6.x/values.yml b/charts/elastic/kibana/examples/6.x/values.yml new file mode 100644 index 0000000..2c59f76 --- /dev/null +++ b/charts/elastic/kibana/examples/6.x/values.yml @@ -0,0 +1,4 @@ +--- + +imageTag: 6.8.4 +elasticsearchHosts: "http://six-master:9200" diff --git a/charts/elastic/kibana/examples/default/Makefile b/charts/elastic/kibana/examples/default/Makefile new file mode 100644 index 0000000..39caa30 --- /dev/null +++ b/charts/elastic/kibana/examples/default/Makefile @@ -0,0 +1,13 @@ +default: test +include ../../../helpers/examples.mk + +RELEASE := helm-kibana-default + +install: + echo "Goss container: $(GOSS_CONTAINER)" + helm upgrade --wait --timeout=600 --install $(RELEASE) ../../ + +test: install goss + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/kibana/examples/default/test/goss.yaml b/charts/elastic/kibana/examples/default/test/goss.yaml new file mode 100644 index 0000000..4bd704c --- /dev/null +++ b/charts/elastic/kibana/examples/default/test/goss.yaml @@ -0,0 +1,14 @@ +http: + http://localhost:5601/api/status: + status: 200 + timeout: 2000 + body: + - '"number":"7.4.1"' + + http://localhost:5601/app/kibana: + status: 200 + timeout: 2000 + + http://helm-kibana-default-kibana:5601/app/kibana: + status: 200 + timeout: 2000 diff --git a/charts/elastic/kibana/examples/openshift/Makefile b/charts/elastic/kibana/examples/openshift/Makefile new file mode 100644 index 0000000..9dccc65 --- /dev/null +++ b/charts/elastic/kibana/examples/openshift/Makefile @@ -0,0 +1,15 @@ +default: test +include ../../../helpers/examples.mk + +RELEASE := kibana + +template: + helm template --values ./values.yml ../../ + +install: + helm upgrade --wait --timeout=600 --install --values ./values.yml $(RELEASE) ../../ + +test: install goss + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/kibana/examples/openshift/test/goss.yaml b/charts/elastic/kibana/examples/openshift/test/goss.yaml new file mode 100644 index 0000000..35aee7d --- /dev/null +++ b/charts/elastic/kibana/examples/openshift/test/goss.yaml @@ -0,0 +1,4 @@ +http: + http://localhost:5601/app/kibana: + status: 200 + timeout: 2000 diff --git a/charts/elastic/kibana/examples/openshift/values.yml b/charts/elastic/kibana/examples/openshift/values.yml new file mode 100644 index 0000000..558306f --- /dev/null +++ b/charts/elastic/kibana/examples/openshift/values.yml @@ -0,0 +1,7 @@ +--- + +podSecurityContext: + fsGroup: null + +securityContext: + runAsUser: null diff --git a/charts/elastic/kibana/examples/oss/Makefile b/charts/elastic/kibana/examples/oss/Makefile new file mode 100644 index 0000000..77c6412 --- /dev/null +++ b/charts/elastic/kibana/examples/oss/Makefile @@ -0,0 +1,12 @@ +default: test +include ../../../helpers/examples.mk + +RELEASE := helm-kibana-oss + +install: + helm upgrade --wait --timeout=600 --install --values ./values.yml $(RELEASE) ../../ + +test: install goss + +purge: + helm del --purge $(RELEASE) diff --git a/charts/elastic/kibana/examples/oss/test/goss.yaml b/charts/elastic/kibana/examples/oss/test/goss.yaml new file mode 100644 index 0000000..35aee7d --- /dev/null +++ b/charts/elastic/kibana/examples/oss/test/goss.yaml @@ -0,0 +1,4 @@ +http: + http://localhost:5601/app/kibana: + status: 200 + timeout: 2000 diff --git a/charts/elastic/kibana/examples/oss/values.yml b/charts/elastic/kibana/examples/oss/values.yml new file mode 100644 index 0000000..eb0203c --- /dev/null +++ b/charts/elastic/kibana/examples/oss/values.yml @@ -0,0 +1,4 @@ +--- + +image: "docker.elastic.co/kibana/kibana-oss" +elasticsearchHosts: "http://oss-master:9200" diff --git a/charts/elastic/kibana/examples/security/Makefile b/charts/elastic/kibana/examples/security/Makefile new file mode 100644 index 0000000..a54769d --- /dev/null +++ b/charts/elastic/kibana/examples/security/Makefile @@ -0,0 +1,17 @@ +default: test +include ../../../helpers/examples.mk + +RELEASE := helm-kibana-security + +install: + helm upgrade --wait --timeout=600 --install --values ./security.yml $(RELEASE) ../../ + +test: secrets install goss + +purge: + kubectl delete secret kibana || true + helm del --purge $(RELEASE) + +secrets: + encryptionkey=$$(echo $$(docker run --rm docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION) /bin/sh -c "< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c50")) && \ + kubectl create secret generic kibana --from-literal=encryptionkey=$$encryptionkey diff --git a/charts/elastic/kibana/examples/security/security.yml b/charts/elastic/kibana/examples/security/security.yml new file mode 100644 index 0000000..dfc9d5d --- /dev/null +++ b/charts/elastic/kibana/examples/security/security.yml @@ -0,0 +1,38 @@ +--- + +elasticsearchHosts: "https://security-master:9200" + +extraEnvs: + - name: 'ELASTICSEARCH_USERNAME' + valueFrom: + secretKeyRef: + name: elastic-credentials + key: username + - name: 'ELASTICSEARCH_PASSWORD' + valueFrom: + secretKeyRef: + name: elastic-credentials + key: password + - name: 'KIBANA_ENCRYPTION_KEY' + valueFrom: + secretKeyRef: + name: kibana + key: encryptionkey + +kibanaConfig: + kibana.yml: | + server.ssl: + enabled: true + key: /usr/share/kibana/config/certs/elastic-certificate.pem + certificate: /usr/share/kibana/config/certs/elastic-certificate.pem + xpack.security.encryptionKey: ${KIBANA_ENCRYPTION_KEY} + elasticsearch.ssl: + certificateAuthorities: /usr/share/kibana/config/certs/elastic-certificate.pem + verificationMode: certificate + +protocol: https + +secretMounts: + - name: elastic-certificate-pem + secretName: elastic-certificate-pem + path: /usr/share/kibana/config/certs diff --git a/charts/elastic/kibana/examples/security/test/goss.yaml b/charts/elastic/kibana/examples/security/test/goss.yaml new file mode 100644 index 0000000..39e9a48 --- /dev/null +++ b/charts/elastic/kibana/examples/security/test/goss.yaml @@ -0,0 +1,27 @@ +http: + https://localhost:5601/app/kibana: + status: 200 + timeout: 2000 + allow-insecure: true + username: '{{ .Env.ELASTICSEARCH_USERNAME }}' + password: '{{ .Env.ELASTICSEARCH_PASSWORD }}' + + https://helm-kibana-security-kibana:5601/app/kibana: + status: 200 + timeout: 2000 + allow-insecure: true + username: '{{ .Env.ELASTICSEARCH_USERNAME }}' + password: '{{ .Env.ELASTICSEARCH_PASSWORD }}' + +file: + /usr/share/kibana/config/kibana.yml: + exists: true + contains: + - 'server.ssl:' + - ' enabled: true' + - ' key: /usr/share/kibana/config/certs/elastic-certificate.pem' + - ' certificate: /usr/share/kibana/config/certs/elastic-certificate.pem' + - 'xpack.security.encryptionKey:' + - 'elasticsearch.ssl:' + - ' certificateAuthorities: /usr/share/kibana/config/certs/elastic-certificate.pem' + - ' verificationMode: certificate' diff --git a/charts/elastic/kibana/templates/_helpers.tpl b/charts/elastic/kibana/templates/_helpers.tpl new file mode 100644 index 0000000..a5254f4 --- /dev/null +++ b/charts/elastic/kibana/templates/_helpers.tpl @@ -0,0 +1,27 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Release.Name .Values.nameOverride -}} +{{- printf "%s-%s" $name .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "kibana.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/charts/elastic/kibana/templates/configmap.yaml b/charts/elastic/kibana/templates/configmap.yaml new file mode 100644 index 0000000..24869a1 --- /dev/null +++ b/charts/elastic/kibana/templates/configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.kibanaConfig }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "fullname" . }}-config + labels: + app: {{ .Chart.Name }} + release: {{ .Release.Name | quote }} +data: +{{- range $path, $config := .Values.kibanaConfig }} + {{ $path }}: | +{{ $config | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/charts/elastic/kibana/templates/deployment.yaml b/charts/elastic/kibana/templates/deployment.yaml new file mode 100644 index 0000000..6ae36ae --- /dev/null +++ b/charts/elastic/kibana/templates/deployment.yaml @@ -0,0 +1,133 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "fullname" . }} + labels: + app: {{ .Chart.Name }} + release: {{ .Release.Name | quote }} + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + replicas: {{ .Values.replicas }} + strategy: +{{ toYaml .Values.updateStrategy | indent 4 }} + selector: + matchLabels: + app: kibana + release: {{ .Release.Name | quote }} + template: + metadata: + labels: + app: kibana + release: {{ .Release.Name | quote }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{/* This forces a restart if the configmap has changed */}} + {{- if .Values.kibanaConfig }} + configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }} + {{- end }} + spec: +{{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} +{{- end }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} + {{- if .Values.serviceAccount }} + serviceAccount: {{ .Values.serviceAccount }} + {{- end }} + volumes: + {{- range .Values.secretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- if .Values.kibanaConfig }} + - name: kibanaconfig + configMap: + name: {{ template "fullname" . }}-config + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + containers: + - name: kibana + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + env: + {{- if .Values.elasticsearchURL }} + - name: ELASTICSEARCH_URL + value: "{{ .Values.elasticsearchURL }}" + {{- else if .Values.elasticsearchHosts }} + - name: ELASTICSEARCH_HOSTS + value: "{{ .Values.elasticsearchHosts }}" + {{- end }} + - name: SERVER_HOST + value: "{{ .Values.serverHost }}" +{{- if .Values.extraEnvs }} +{{ toYaml .Values.extraEnvs | indent 10 }} +{{- end }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 10 }} + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + http () { + local path="${1}" + set -- -XGET -s --fail + + if [ -n "${ELASTICSEARCH_USERNAME}" ] && [ -n "${ELASTICSEARCH_PASSWORD}" ]; then + set -- "$@" -u "${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}" + fi + + STATUS=$(curl --output /dev/null --write-out "%{http_code}" -k "$@" "{{ .Values.protocol }}://localhost:{{ .Values.httpPort }}${path}") + if [[ "${STATUS}" -eq 200 ]]; then + exit 0 + fi + + echo "Error: Got HTTP code ${STATUS} but expected a 200" + exit 1 + } + + http "{{ .Values.healthCheckPath }}" + ports: + - containerPort: {{ .Values.httpPort }} +{{- if .Values.lifecycle }} + lifecycle: +{{ toYaml .Values.lifecycle | indent 10 }} +{{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + {{- range .Values.secretMounts }} + - name: {{ .name }} + mountPath: {{ .path }} + {{- if .subPath }} + subPath: {{ .subPath }} + {{- end }} + {{- end }} + {{- range $path, $config := .Values.kibanaConfig }} + - name: kibanaconfig + mountPath: /usr/share/kibana/config/{{ $path }} + subPath: {{ $path }} + {{- end -}} diff --git a/charts/elastic/kibana/templates/ingress.yaml b/charts/elastic/kibana/templates/ingress.yaml new file mode 100644 index 0000000..690b0c7 --- /dev/null +++ b/charts/elastic/kibana/templates/ingress.yaml @@ -0,0 +1,32 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: {{ template "kibana.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ .Chart.Name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ . }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} +{{- end }} diff --git a/charts/elastic/kibana/templates/service.yaml b/charts/elastic/kibana/templates/service.yaml new file mode 100644 index 0000000..60f47eb --- /dev/null +++ b/charts/elastic/kibana/templates/service.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "fullname" . }} + labels: + app: {{ .Chart.Name }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + protocol: TCP + name: http + targetPort: {{ .Values.httpPort }} + selector: + app: {{ .Chart.Name }} + release: {{ .Release.Name | quote }} diff --git a/charts/elastic/kibana/values.yaml b/charts/elastic/kibana/values.yaml new file mode 100644 index 0000000..77ed94f --- /dev/null +++ b/charts/elastic/kibana/values.yaml @@ -0,0 +1,129 @@ +--- + +elasticsearchURL: "" # "http://elasticsearch-master:9200" +elasticsearchHosts: "http://elasticsearch-master:9200" + +replicas: 1 + +# Extra environment variables to append to this nodeGroup +# This will be appended to the current 'env:' key. You can use any of the kubernetes env +# syntax here +extraEnvs: [] +# - name: MY_ENVIRONMENT_VAR +# value: the_value_goes_here + +# A list of secrets and their paths to mount inside the pod +# This is useful for mounting certificates for security and for mounting +# the X-Pack license +secretMounts: [] +# - name: kibana-keystore +# secretName: kibana-keystore +# path: /usr/share/kibana/data/kibana.keystore +# subPath: kibana.keystore # optional + +image: "docker.elastic.co/kibana/kibana" +imageTag: "7.4.1" +imagePullPolicy: "IfNotPresent" + +# additionals labels +labels: {} + +podAnnotations: {} + # iam.amazonaws.com/role: es-cluster + +resources: + requests: + cpu: "100m" + memory: "500Mi" + limits: + cpu: "1000m" + memory: "1Gi" + +protocol: http + +serverHost: "0.0.0.0" + +healthCheckPath: "/app/kibana" + +# Allows you to add any config files in /usr/share/kibana/config/ +# such as kibana.yml +kibanaConfig: {} +# kibana.yml: | +# key: +# nestedkey: value + +# If Pod Security Policy in use it may be required to specify security context as well as service account + +podSecurityContext: + fsGroup: 1000 + +securityContext: + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +serviceAccount: "" + +# This is the PriorityClass settings as defined in +# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +priorityClassName: "" + +httpPort: 5601 + +# This is the max unavailable setting for the pod disruption budget +# The default value of 1 will make sure that kubernetes won't allow more than 1 +# of your pods to be unavailable during maintenance +maxUnavailable: 1 + +updateStrategy: + type: "Recreate" + +service: + type: ClusterIP + port: 5601 + nodePort: "" + annotations: {} + # cloud.google.com/load-balancer-type: "Internal" + # service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + # service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # service.beta.kubernetes.io/openstack-internal-load-balancer: "true" + # service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 3 + timeoutSeconds: 5 + +imagePullSecrets: [] +nodeSelector: {} +tolerations: [] +affinity: {} + +nameOverride: "" +fullnameOverride: "" + +lifecycle: {} + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # postStart: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] diff --git a/charts/stable/fluent-bit/Chart.yaml b/charts/stable/fluent-bit/Chart.yaml new file mode 100644 index 0000000..f17d532 --- /dev/null +++ b/charts/stable/fluent-bit/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +appVersion: 1.3.2 +description: Fast and Lightweight Log/Data Forwarder for Linux, BSD and OSX +home: https://fluentbit.io +icon: https://fluentbit.io/assets/img/logo1-default.png +keywords: +- logging +- monitoring +- fluent +- fluentd +maintainers: +- email: Kevin.Fox@pnnl.gov + name: kfox1111 +- email: eduardo@treasure-data.com + name: edsiper +- email: hfernandez@mesosphere.com + name: hectorj2f +name: fluent-bit +sources: +- https://fluentbit.io +version: 2.8.0 diff --git a/charts/stable/fluent-bit/OWNERS b/charts/stable/fluent-bit/OWNERS new file mode 100644 index 0000000..fa1f012 --- /dev/null +++ b/charts/stable/fluent-bit/OWNERS @@ -0,0 +1,10 @@ +approvers: +- kfox1111 +- edsiper +- hectorj2f +- Towmeykaw +reviewers: +- kfox1111 +- edsiper +- hectorj2f +- Towmeykaw diff --git a/charts/stable/fluent-bit/README.md b/charts/stable/fluent-bit/README.md new file mode 100644 index 0000000..b81f548 --- /dev/null +++ b/charts/stable/fluent-bit/README.md @@ -0,0 +1,176 @@ +# Fluent-Bit Chart + +[Fluent Bit](http://fluentbit.io/) is an open source and multi-platform Log Forwarder. + +## Chart Details + +This chart will do the following: + +* Install a configmap for Fluent Bit +* Install a daemonset that provisions Fluent Bit [per-host architecture] + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/fluent-bit +``` + +When installing this chart on [Minikube](https://kubernetes.io/docs/getting-started-guides/minikube/), it's required to specify that so the DaemonSet will be able to mount the log files properly, make sure to append the _--set on\_minikube=true_ option at the end of the _helm_ command, e.g: + +```bash +$ helm install --name my-release stable/fluent-bit --set on_minikube=true +``` + +## Configuration + +The following table lists the configurable parameters of the Fluent-Bit chart and the default values. + +| Parameter | Description | Default | +| ----------------------- | ---------------------------------- | ----------------------- | +| **Backend Selection** | +| `backend.type` | Set the backend to which Fluent-Bit should flush the information it gathers | `forward` | +| **Forward Backend** | +| `backend.forward.host` | Target host where Fluent-Bit or Fluentd are listening for Forward messages | `fluentd` | +| `backend.forward.port` | TCP Port of the target service | `24284` | +| `backend.forward.shared_key` | A key string known by the remote Fluentd used for authorization. | `` | +| `backend.forward.tls` | Enable or disable TLS support | `off` | +| `backend.forward.tls_verify` | Force certificate validation | `on` | +| `backend.forward.tls_debug` | Set TLS debug verbosity level. It accept the following values: 0-4 | `1` | +| **ElasticSearch Backend** | +| `backend.es.host` | IP address or hostname of the target Elasticsearch instance | `elasticsearch` | +| `backend.es.port` | TCP port of the target Elasticsearch instance. | `9200` | +| `backend.es.index` | Elastic Index name | `kubernetes_cluster` | +| `backend.es.type` | Elastic Type name | `flb_type` | +| `backend.es.time_key` | Elastic Time Key | `@timestamp` | +| `backend.es.logstash_format` | Enable Logstash format compatibility. | `On` | +| `backend.es.logstash_prefix` | Index Prefix. If Logstash_Prefix is equals to 'mydata' your index will become 'mydata-YYYY.MM.DD'. | `kubernetes_cluster` | +| `backend.es.replace_dots` | Enable/Disable Replace_Dots option. | `On` | +| `backend.es.http_user` | Optional username credential for Elastic X-Pack access. | `` | +| `backend.es.http_passwd` | Password for user defined in HTTP_User. | `` | +| `backend.es.http_passwd_secret` | Secret name for password for user defined in HTTP_User. | `` | +| `backend.es.http_passwd_secret_key` | Secret key for password for user defined in HTTP_User. | `` | +| `backend.es.tls` | Enable or disable TLS support | `off` | +| `backend.es.tls_verify` | Force certificate validation | `on` | +| `backend.es.tls_secret` | Existing secret storing TLS CA certificate for the Elastic instance. Specify if tls: on. Overrides `backend.es.tls_ca` | `` | +| `backend.es.tls_secret_ca_key` | Existing secret key storing TLS CA certificate for the Elastic instance. Specify if tls: on. | `` | +| `backend.es.tls_ca` | TLS CA certificate for the Elastic instance (in PEM format). Specify if tls: on. | `` | +| `backend.es.tls_debug` | Set TLS debug verbosity level. It accept the following values: 0-4 | `1` | +| **HTTP Backend** | +| `backend.http.host` | IP address or hostname of the target HTTP Server | `127.0.0.1` | +| `backend.http.port` | TCP port of the target HTTP Server | `80` | +| `backend.http.uri` | Specify an optional HTTP URI for the target web server, e.g: /something | `"/"` +| `backend.http.http_user` | Optional username credential for Basic Authentication. | `` | +| `backend.http.http_passwd:` | Password for user defined in HTTP_User. | `` | +| `backend.http.format` | Specify the data format to be used in the HTTP request body, by default it uses msgpack, optionally it can be set to json. | `msgpack` | +| `backend.http.headers` | HTTP Headers | `[]` | +| `backend.http.tls` | Enable or disable TLS support | `off` | +| `backend.http.tls_verify` | Force certificate validation | `on` | +| `backend.http.tls_debug` | Set TLS debug verbosity level. It accept the following values: 0-4 | `1` | +| **Splunk Backend** | +| `backend.splunk.host` | IP address or hostname of the target Splunk Server | `127.0.0.1` | +| `backend.splunk.port` | TCP port of the target Splunk Server | `8088` | +| `backend.splunk.token` | Specify the Authentication Token for the HTTP Event Collector interface. | `` | +| `backend.splunk.send_raw` | If enabled, record keys and values are set in the main map. | `off` | +| `backend.splunk.tls` | Enable or disable TLS support | `on` | +| `backend.splunk.tls_verify` | Force TLS certificate validation | `off` | +| `backend.splunk.tls_debug` | Set TLS debug verbosity level. It accept the following values: 0-4 | `1` | +| `backend.splunk.message_key` | Tag applied to all incoming logs | `kubernetes` | +| **Stackdriver Backend** | +| `backend.stackdriver.google_service_credentials` | Contents of a Google Cloud credentials JSON file. | `` | +| `backend.stackdriver.service_account_email` | Account email associated to the service. Only available if no credentials file has been provided. | `` | +| `backend.stackdriver.service_account_secret` | Private key content associated with the service account. Only available if no credentials file has been provided. | `` | +| **Parsers** | +| `parsers.enabled` | Enable custom parsers | `false` | +| `parsers.regex` | List of regex parsers | `NULL` | +| `parsers.json` | List of json parsers | `NULL` | +| `parsers.logfmt` | List of logfmt parsers | `NULL` | +| **General** | +| `annotations` | Optional deamonset set annotations | `NULL` | +| `podAnnotations` | Optional pod annotations | `NULL` | +| `podLabels` | Optional pod labels | `NULL` | +| `fullConfigMap` | User has provided entire config (parsers + system) | `false` | +| `existingConfigMap` | ConfigMap override | `` | +| `extraEntries.input` | Extra entries for existing [INPUT] section | `` | +| `extraEntries.filter` | Extra entries for existing [FILTER] section | `` | +| `extraEntries.output` | Extra entries for existing [OUPUT] section | `` | +| `extraPorts` | List of extra ports | | +| `extraVolumeMounts` | Mount an extra volume, required to mount ssl certificates when elasticsearch has tls enabled | | +| `extraVolume` | Extra volume | | +| `service.flush` | Interval to flush output (seconds) | `1` | +| `service.logLevel` | Diagnostic level (error/warning/info/debug/trace) | `info` | +| `filter.enableExclude` | Enable the use of monitoring for a pod annotation of `fluentbit.io/exclude: true`. If present, discard logs from that pod. | `true` | +| `filter.enableParser` | Enable the use of monitoring for a pod annotation of `fluentbit.io/parser: parser_name`. parser_name must be the name of a parser contained within parsers.conf | `true` | +| `filter.kubeURL` | Optional custom configmaps | `https://kubernetes.default.svc:443` | +| `filter.kubeCAFile` | Optional custom configmaps | `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` | +| `filter.kubeTokenFile` | Optional custom configmaps | `/var/run/secrets/kubernetes.io/serviceaccount/token` | +| `filter.kubeTag` | Optional top-level tag for matching in filter | `kube` | +| `filter.kubeTagPrefix` | Optional tag prefix used by Tail | `kube.var.log.containers.` | +| `filter.mergeJSONLog` | If the log field content is a JSON string map, append the map fields as part of the log structure | `true` | +| `filter.mergeLogKey` | If set, append the processed log keys under a new root key specified by this variable. | `nil` | +| `image.fluent_bit.repository` | Image | `fluent/fluent-bit` | +| `image.fluent_bit.tag` | Image tag | `1.3.2` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `nameOverride` | Override name of app | `nil` | +| `fullnameOverride` | Override full name of app | `nil` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `input.tail.memBufLimit` | Specify Mem_Buf_Limit in tail input | `5MB` | +| `input.tail.parser` | Specify Parser in tail input. | `docker` | +| `input.tail.path` | Specify log file(s) through the use of common wildcards. | `/var/log/containers/*.log` | +| `input.tail.ignore_older` | Ignores files that have been last modified before this time in seconds. Supports m,h,d (minutes, hours,days) syntax. | `` | +| `input.systemd.enabled` | [Enable systemd input](https://docs.fluentbit.io/manual/input/systemd) | `false` | +| `input.systemd.filters.systemdUnit` | Please see https://docs.fluentbit.io/manual/input/systemd | `[docker.service, kubelet.service`, `node-problem-detector.service]` | +| `input.systemd.maxEntries` | Please see https://docs.fluentbit.io/manual/input/systemd | `1000` | +| `input.systemd.readFromTail` | Please see https://docs.fluentbit.io/manual/input/systemd | `true` | +| `input.systemd.tag` | Please see https://docs.fluentbit.io/manual/input/systemd | `host.*` | +| `rbac.create` | Specifies whether RBAC resources should be created. | `true` | +| `rbac.pspEnabled` | Specifies whether a PodSecurityPolicy should be created. | `false` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created. | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `NULL` | +| `rawConfig` | Raw contents of fluent-bit.conf | `@INCLUDE fluent-bit-service.conf`
`@INCLUDE fluent-bit-input.conf`
`@INCLUDE fluent-bit-filter.conf`
` @INCLUDE fluent-bit-output.conf` | +| `resources` | Pod resource requests & limits | `{}` | +| `securityContext` | [Security settings for a container](https://kubernetes.io/docs/concepts/policy/security-context) | `{}` | +| `podSecurityContext` | [Security settings for a pod](https://kubernetes.io/docs/concepts/policy/security-context) | `{}` | +| `hostNetwork` | Use host's network | `false` | +| `dnsPolicy` | Specifies the dnsPolicy to use | `ClusterFirst` | +| `priorityClassName` | Specifies the priorityClassName to use | `NULL` | +| `tolerations` | Optional daemonset tolerations | `NULL` | +| `nodeSelector` | Node labels for fluent-bit pod assignment | `NULL` | +| `affinity` | Expressions for affinity | `NULL` | +| `metrics.enabled` | Specifies whether a service for metrics should be exposed | `false` | +| `metrics.service.annotations` | Optional metrics service annotations | `NULL` | +| `metrics.service.labels` | Additional labels for the fluent-bit metrics service definition, specified as a map. | None | +| `metrics.service.port` | Port on where metrics should be exposed | `2020` | +| `metrics.service.type` | Service type for metrics | `ClusterIP` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `trackOffsets` | Specify whether to track the file offsets for tailing docker logs. This allows fluent-bit to pick up where it left after pod restarts but requires access to a `hostPath` | `false` | +| `testFramework.image` | `test-framework` image repository. | `dduportal/bats` | +| `testFramework.tag` | `test-framework` image tag. | `0.4.0` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/fluent-bit +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Upgrading + +### From < 1.0.0 To >= 1.0.0 + +Values `extraInputs`, `extraFilters` and `extraOutputs` have been removed in version `1.0.0` of the fluent-bit chart. +To add additional entries to the existing sections, please use the `extraEntries.input`, `extraEntries.filter` and `extraEntries.output` values. +For entire sections, please use the `rawConfig` value, inserting blocks of text as desired. + +### From < 1.8.0 to >= 1.8.0 + +Version `1.8.0` introduces the use of release name as full name if it contains the chart name(fluent-bit in this case). E.g. with a release name of `fluent-bit`, this renames the DaemonSet from `fluent-bit-fluent-bit` to `fluent-bit`. The suggested approach is to delete the release and reinstall it. diff --git a/charts/stable/fluent-bit/templates/NOTES.txt b/charts/stable/fluent-bit/templates/NOTES.txt new file mode 100644 index 0000000..bbfcc0b --- /dev/null +++ b/charts/stable/fluent-bit/templates/NOTES.txt @@ -0,0 +1,15 @@ +fluent-bit is now running. + +{{- if eq .Values.backend.type "forward" }} + +It will forward all container logs to the svc named {{ .Values.backend.forward.host }} on port: {{ .Values.backend.forward.port }} +{{- else if eq .Values.backend.type "es" }} + +It will forward all container logs to the svc named {{ .Values.backend.es.host }} on port: {{ .Values.backend.es.port }} +{{- else if eq .Values.backend.type "http" }} + +It will forward all container logs to the svc named {{ .Values.backend.http.host }} on port: {{ .Values.backend.http.port }} +{{- else if eq .Values.backend.type "splunk" }} + +It will forward all container logs to the svc named {{ .Values.backend.splunk.host }} on port: {{ .Values.backend.splunk.port }} +{{- end }} diff --git a/charts/stable/fluent-bit/templates/_helpers.tpl b/charts/stable/fluent-bit/templates/_helpers.tpl new file mode 100644 index 0000000..e9159a0 --- /dev/null +++ b/charts/stable/fluent-bit/templates/_helpers.tpl @@ -0,0 +1,78 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "fluent-bit.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fluent-bit.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fluent-bit.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC APIs. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" -}} +rbac.authorization.k8s.io/v1 +{{- else if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1beta1" -}} +rbac.authorization.k8s.io/v1beta1 +{{- else -}} +rbac.authorization.k8s.io/v1alpha1 +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "fluent-bit.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "fluent-bit.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "rbac.pspApiGroup" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions" -}} +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for DaemonSet. +*/}} +{{- define "daemonSet.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} diff --git a/charts/stable/fluent-bit/templates/cluster-role.yaml b/charts/stable/fluent-bit/templates/cluster-role.yaml new file mode 100644 index 0000000..69c7989 --- /dev/null +++ b/charts/stable/fluent-bit/templates/cluster-role.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "fluent-bit.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get +{{- if .Values.rbac.pspEnabled }} + - apiGroups: + - {{ template "rbac.pspApiGroup" . }} + resources: + - podsecuritypolicies + resourceNames: + - {{ template "fluent-bit.fullname" . }} + verbs: + - use +{{- end }} +{{- end -}} diff --git a/charts/stable/fluent-bit/templates/cluster-rolebinding.yaml b/charts/stable/fluent-bit/templates/cluster-rolebinding.yaml new file mode 100644 index 0000000..140e527 --- /dev/null +++ b/charts/stable/fluent-bit/templates/cluster-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "fluent-bit.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "fluent-bit.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "fluent-bit.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/stable/fluent-bit/templates/config.yaml b/charts/stable/fluent-bit/templates/config.yaml new file mode 100644 index 0000000..936b5f2 --- /dev/null +++ b/charts/stable/fluent-bit/templates/config.yaml @@ -0,0 +1,236 @@ +{{- if (empty .Values.existingConfigMap) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "fluent-bit.fullname" . }}-config + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + fluent-bit-service.conf: | + [SERVICE] + Flush {{ .Values.service.flush }} + Daemon Off + Log_Level {{ .Values.service.logLevel }} + Parsers_File parsers.conf +{{- if .Values.parsers.enabled }} + Parsers_File parsers_custom.conf +{{- end }} +{{- if .Values.metrics.enabled }} + HTTP_Server On + HTTP_Listen 0.0.0.0 + HTTP_Port 2020 +{{- end }} + + fluent-bit-input.conf: | + [INPUT] + Name tail + Path {{ .Values.input.tail.path }} + Parser {{ .Values.input.tail.parser }} + Tag {{ .Values.filter.kubeTag }}.* + Refresh_Interval 5 + Mem_Buf_Limit {{ .Values.input.tail.memBufLimit }} + Skip_Long_Lines On +{{- if .Values.input.tail.ignore_older }} + Ignore_Older {{ .Values.input.tail.ignore_older }} +{{- end }} +{{- if .Values.trackOffsets }} + DB /tail-db/tail-containers-state.db + DB.Sync Normal +{{- end }} +{{- if .Values.input.systemd.enabled }} + [INPUT] + Name systemd + Tag {{ .Values.input.systemd.tag }} +{{- range $value := .Values.input.systemd.filters.systemdUnit }} + Systemd_Filter _SYSTEMD_UNIT={{ $value }} +{{- end }} + Max_Entries {{ .Values.input.systemd.maxEntries }} + Read_From_Tail {{ .Values.input.systemd.readFromTail }} +{{- end }} +{{ .Values.extraEntries.input | indent 8 }} + + fluent-bit-filter.conf: | + [FILTER] + Name kubernetes + Match {{ .Values.filter.kubeTag }}.* + Kube_Tag_Prefix {{ .Values.filter.kubeTagPrefix }} + Kube_URL {{ .Values.filter.kubeURL }} + Kube_CA_File {{ .Values.filter.kubeCAFile }} + Kube_Token_File {{ .Values.filter.kubeTokenFile }} +{{- if .Values.filter.mergeJSONLog }} + Merge_Log On +{{- end }} + +{{- if .Values.filter.mergeLogKey }} + Merge_Log_Key {{ .Values.filter.mergeLogKey }} +{{- end }} + +{{- if .Values.filter.enableParser }} + K8S-Logging.Parser On +{{- end }} +{{- if .Values.filter.enableExclude }} + K8S-Logging.Exclude On +{{- end }} +{{ .Values.extraEntries.filter | indent 8 }} + + fluent-bit-output.conf: | +{{ if eq .Values.backend.type "test" }} + [OUTPUT] + Name file + Match * + Path /tmp/fluent-bit.log +{{ else if eq .Values.backend.type "forward" }} + [OUTPUT] + Name forward + Match * + Host {{ .Values.backend.forward.host }} + Port {{ .Values.backend.forward.port }} + Retry_Limit False +{{- if .Values.backend.forward.shared_key }} + Shared_Key {{ .Values.backend.forward.shared_key }} +{{- end }} +{{ else if eq .Values.backend.type "es" }} + [OUTPUT] + Name es + Match * + Host {{ .Values.backend.es.host }} + Port {{ .Values.backend.es.port }} + Logstash_Format {{ default "On" .Values.backend.es.logstash_format }} + Retry_Limit False + Type {{ .Values.backend.es.type }} +{{- if .Values.backend.es.time_key }} + Time_Key {{ .Values.backend.es.time_key }} +{{- end }} +{{- if .Values.backend.es.replace_dots }} + Replace_Dots {{ .Values.backend.es.replace_dots }} +{{- end }} +{{- if .Values.backend.es.logstash_prefix }} + Logstash_Prefix {{ .Values.backend.es.logstash_prefix }} +{{ else if .Values.backend.es.index }} + Index {{ .Values.backend.es.index }} +{{- end }} +{{- if .Values.backend.es.http_user }} + HTTP_User {{ .Values.backend.es.http_user }} +{{- if .Values.backend.es.http_passwd }} + HTTP_Passwd {{ .Values.backend.es.http_passwd }} +{{- else }} + HTTP_Passwd ${HTTP_PASSWORD} +{{- end }} +{{- end }} +{{if eq .Values.backend.es.tls "on" }} + tls {{ .Values.backend.es.tls }} + tls.verify {{ .Values.backend.es.tls_verify }} + tls.debug {{ .Values.backend.es.tls_debug }} +{{- if .Values.backend.es.tls_ca }} + tls.ca_file /secure/es-tls-ca.crt +{{- end }} +{{- end }} +{{ else if eq .Values.backend.type "splunk" }} + [OUTPUT] + Name splunk + Match * + Host {{ .Values.backend.splunk.host }} + Port {{ .Values.backend.splunk.port }} + Splunk_Token {{ .Values.backend.splunk.token }} + Splunk_Send_Raw {{ .Values.backend.splunk.send_raw}} + TLS {{ .Values.backend.splunk.tls }} + TLS.Verify {{ .Values.backend.splunk.tls_verify }} + tls.debug {{ .Values.backend.splunk.tls_debug }} + Message_Key {{ .Values.backend.splunk.message_key }} +{{ else if eq .Values.backend.type "stackdriver" }} + [OUTPUT] + Name stackdriver + Match * + resource global +{{- if .Values.backend.stackdriver.google_service_credentials }} + google_service_credentials /secure/google_service_credentials.json +{{- else }} + service_account_email {{ .Values.backend.stackdriver.service_account_email }} + service_account_secret {{ .Values.backend.stackdriver.service_account_secret }} +{{- end }} +{{ else if eq .Values.backend.type "http" }} + [OUTPUT] + Name http + Match * + Host {{ .Values.backend.http.host }} + Port {{ .Values.backend.http.port }} + URI {{ .Values.backend.http.uri }} +{{- if .Values.backend.http.http_user }} + HTTP_User {{ .Values.backend.http.http_user }} + HTTP_Passwd {{ .Values.backend.http.http_passwd }} +{{- end }} + tls {{ .Values.backend.http.tls }} + tls.verify {{ .Values.backend.http.tls_verify }} + tls.debug {{ .Values.backend.http.tls_debug }} +{{- if .Values.backend.http.proxy }} + Proxy {{ .Values.backend.http.proxy }} +{{- end }} + Format {{ .Values.backend.http.format }} +{{- end }} +{{- range .Values.backend.http.headers }} + Header {{ . }} +{{- end }} +{{ .Values.extraEntries.output | indent 8 }} + + + fluent-bit.conf: | +{{ .Values.rawConfig | indent 4 }} + + parsers.conf: | +{{- if .Values.parsers.regex }} +{{- range .Values.parsers.regex }} + [PARSER] + Name {{ .name }} + Format regex + Regex {{ .regex }} +{{- if .timeKey }} + Time_Key {{ .timeKey }} +{{- end }} +{{- if .timeFormat }} + Time_Format {{ .timeFormat }} +{{- end }} +{{ end }} +{{- end }} +{{- if .Values.parsers.json }} +{{- range .Values.parsers.json }} + [PARSER] + Name {{ .name }} + Format json +{{- if .timeKeep }} + Time_Keep {{ .timeKeep }} +{{- end }} +{{- if .timeKey }} + Time_Key {{ .timeKey }} +{{- end }} +{{- if .timeFormat }} + Time_Format {{ .timeFormat }} +{{- end }} +{{- if .decodeFieldAs }} + Decode_Field_As {{ .decodeFieldAs }} {{ .decodeField | default "log" }} +{{- end}} +{{- if .extraEntries }} +{{ .extraEntries | indent 8 }} +{{- end }} +{{ end }} +{{- end }} +{{- if .Values.parsers.logfmt }} +{{- range .Values.parsers.logfmt }} + [PARSER] + Name {{ .name }} + Format logfmt +{{- if .timeKey }} + Time_Key {{ .timeKey }} +{{- end }} +{{- if .timeFormat }} + Time_Format {{ .timeFormat }} +{{- end }} +{{- if .extraEntries }} +{{ .extraEntries | indent 8 }} +{{- end }} +{{ end }} +{{- end }} + +{{- end -}} diff --git a/charts/stable/fluent-bit/templates/daemonset.yaml b/charts/stable/fluent-bit/templates/daemonset.yaml new file mode 100644 index 0000000..e9cbfe1 --- /dev/null +++ b/charts/stable/fluent-bit/templates/daemonset.yaml @@ -0,0 +1,192 @@ +apiVersion: {{ template "daemonSet.apiVersion" . }} +kind: DaemonSet +metadata: + name: {{ template "fluent-bit.fullname" . }} + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ template "fluent-bit.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 6 }} +{{- end }} + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: {{ template "fluent-bit.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: +{{- if .Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} +{{- end }} +{{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" +{{- end }} +{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }} + hostNetwork: {{ .Values.hostNetwork }} + dnsPolicy: {{ .Values.dnsPolicy }} + serviceAccountName: {{ template "fluent-bit.serviceAccountName" . }} + containers: + - name: fluent-bit + image: "{{ .Values.image.fluent_bit.repository }}:{{ .Values.image.fluent_bit.tag }}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + env: +{{- if .Values.backend.es.http_passwd_secret }} + - name: HTTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.backend.es.http_passwd_secret }} + key: {{ .Values.backend.es.http_passwd_secret_key }} +{{- end }} +{{- /* Only render empty array when no HTTP_PASSWORD */ -}} +{{- if or .Values.env (not .Values.backend.es.http_passwd_secret) }} +{{ toYaml .Values.env | indent 10 }} +{{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} +{{- if or .Values.metrics.enabled .Values.extraPorts }} + ports: +{{- if .Values.metrics.enabled }} + - name: metrics + containerPort: 2020 + protocol: TCP +{{- end -}} +{{- if .Values.extraPorts }} +{{ toYaml .Values.extraPorts | indent 8 }} +{{- end }} +{{- end }} +{{- if .Values.securityContext }} + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} +{{- end }} + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + {{- if .Values.input.systemd.enabled }} + - name: etcmachineid + mountPath: /etc/machine-id + readOnly: true + {{- end }} +{{- if .Values.fullConfigMap }} + - name: config + mountPath: /fluent-bit/etc +{{- else }} + - name: config + mountPath: /fluent-bit/etc/fluent-bit.conf + subPath: fluent-bit.conf + - name: config + mountPath: /fluent-bit/etc/fluent-bit-service.conf + subPath: fluent-bit-service.conf + - name: config + mountPath: /fluent-bit/etc/fluent-bit-input.conf + subPath: fluent-bit-input.conf + - name: config + mountPath: /fluent-bit/etc/fluent-bit-filter.conf + subPath: fluent-bit-filter.conf + - name: config + mountPath: /fluent-bit/etc/fluent-bit-output.conf + subPath: fluent-bit-output.conf + +{{- if .Values.parsers.enabled }} + - name: config + mountPath: /fluent-bit/etc/parsers_custom.conf + subPath: parsers.conf +{{- end }} +{{- if and .Values.backend.type "stackdriver" .Values.backend.stackdriver.google_service_credentials }} + - name: gcp-stackdriver-secret + mountPath: /secure/google_service_credentials.json + subPath: google_service_credentials.json +{{- end }} +{{- end }} +{{- if or .Values.backend.es.tls_ca .Values.backend.es.tls_secret }} + - name: es-tls-secret + mountPath: /secure/es-tls-ca.crt + subPath: {{ .Values.backend.es.tls_secret_ca_key | default "es-tls-ca.crt" | quote }} +{{- end }} +{{- if .Values.trackOffsets }} + - name: tail-db + mountPath: /tail-db +{{- end }} +{{- if .Values.extraVolumeMounts }} +{{ toYaml .Values.extraVolumeMounts | indent 8 }} +{{- end }} +{{ if .Values.on_minikube }} + - name: mnt + mountPath: /mnt + readOnly: true +{{ end }} + terminationGracePeriodSeconds: 10 + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{ if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + {{- if .Values.input.systemd.enabled }} + - name: etcmachineid + hostPath: + path: /etc/machine-id + type: File + {{- end }} +{{- if or .Values.backend.es.tls_ca .Values.backend.es.tls_secret }} + - name: es-tls-secret + secret: + secretName: {{ .Values.backend.es.tls_secret | default ( print ( include "fluent-bit.fullname" . ) "-es-tls-secret" ) | quote }} +{{- end }} +{{- if and .Values.backend.type "stackdriver" .Values.backend.stackdriver.google_service_credentials }} + - name: gcp-stackdriver-secret + secret: + secretName: "{{ template "fluent-bit.fullname" . }}-gcp-stackdriver-secret" +{{- end }} +{{- if .Values.trackOffsets }} + - name: tail-db + hostPath: + path: {{ .Values.taildb.directory }} + type: DirectoryOrCreate +{{- end }} + - name: config + configMap: + name: {{ if .Values.existingConfigMap }}{{ .Values.existingConfigMap }}{{- else }}{{ template "fluent-bit.fullname" . }}-config{{- end }} +{{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 6 }} +{{- end }} +{{ if .Values.on_minikube }} + - name: mnt + hostPath: + path: /mnt +{{ end }} diff --git a/charts/stable/fluent-bit/templates/psp.yaml b/charts/stable/fluent-bit/templates/psp.yaml new file mode 100644 index 0000000..67065b6 --- /dev/null +++ b/charts/stable/fluent-bit/templates/psp.yaml @@ -0,0 +1,60 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "fluent-bit.fullname" . }} +spec: + # Prevents running in privileged mode + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'secret' + - 'hostPath' + allowedHostPaths: + - pathPrefix: "/var/log" + - pathPrefix: "/var/lib/docker/containers" + readOnly: true +{{- if .Values.input.systemd.enabled }} + - pathPrefix: "/etc/machine-id" + readOnly: true +{{- end }} + - pathPrefix: "/fluent-bit/etc" +{{- if .Values.trackOffsets }} + - pathPrefix: {{ .Values.taildb.directory }} +{{- end }} +{{- if .Values.on_minikube }} + - pathPrefix: "/mnt" +{{- end }} +{{- range .Values.extraVolumes }} + {{- if .hostPath }} + - pathPrefix: {{ .hostPath.path }} + {{- end }} +{{- end }} + hostNetwork: {{ .Values.hostNetwork }} + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/stable/fluent-bit/templates/secret.yaml b/charts/stable/fluent-bit/templates/secret.yaml new file mode 100644 index 0000000..890747d --- /dev/null +++ b/charts/stable/fluent-bit/templates/secret.yaml @@ -0,0 +1,30 @@ +{{- if not .Values.backend.es.tls_secret }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: "{{ template "fluent-bit.fullname" . }}-es-tls-secret" + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + es-tls-ca.crt: {{ .Values.backend.es.tls_ca | b64enc | quote }} +{{- end }} +{{- if eq .Values.backend.type "stackdriver" }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: "{{ template "fluent-bit.fullname" . }}-gcp-stackdriver-secret" + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + google_service_credentials.json: {{ .Values.backend.stackdriver.google_service_credentials | b64enc | quote }} +{{- end }} diff --git a/charts/stable/fluent-bit/templates/service.yaml b/charts/stable/fluent-bit/templates/service.yaml new file mode 100644 index 0000000..8bbc88f --- /dev/null +++ b/charts/stable/fluent-bit/templates/service.yaml @@ -0,0 +1,30 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.metrics.service.annotations }} + annotations: +{{ toYaml .Values.metrics.service.annotations | indent 4 }} +{{- end }} + name: {{ template "fluent-bit.fullname" . }}-metrics + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- if .Values.metrics.service }} + {{- range $key, $value := .Values.metrics.service.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type}} + sessionAffinity: None + ports: + - port: {{ .Values.metrics.service.port }} + targetPort: metrics + name: metrics + selector: + app: {{ template "fluent-bit.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/stable/fluent-bit/templates/serviceaccount.yaml b/charts/stable/fluent-bit/templates/serviceaccount.yaml new file mode 100644 index 0000000..c7f4307 --- /dev/null +++ b/charts/stable/fluent-bit/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "fluent-bit.serviceAccountName" . }} +{{- end -}} diff --git a/charts/stable/fluent-bit/templates/servicemonitor.yaml b/charts/stable/fluent-bit/templates/servicemonitor.yaml new file mode 100644 index 0000000..df0a03c --- /dev/null +++ b/charts/stable/fluent-bit/templates/servicemonitor.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled (.Capabilities.APIVersions.Has "monitoring.coreos.com/v1") }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "fluent-bit.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + path: /api/v1/metrics/prometheus + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ include "fluent-bit.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/stable/fluent-bit/templates/tests/test-configmap.yaml b/charts/stable/fluent-bit/templates/tests/test-configmap.yaml new file mode 100644 index 0000000..49f0a00 --- /dev/null +++ b/charts/stable/fluent-bit/templates/tests/test-configmap.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "fluent-bit.fullname" . }}-test + labels: + app: {{ template "fluent-bit.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +data: + run.sh: |- + {{- if eq .Values.backend.type "forward"}} + {{- if eq .Values.backend.forward.tls "on"}} + fluent-gem install fluent-plugin-secure-forward + {{- end }} + @test "Test fluentd" { + fluentd -c /tests/fluentd.conf --dry-run + } + {{- else if eq .Values.backend.type "es"}} + @test "Test Elasticssearch Indices" { + url="http://{{ .Values.backend.es.host }}:{{ .Values.backend.es.port }}/_cat/indices?format=json" + body=$(curl $url) + + result=$(echo $body | jq -cr '.[] | select(.index | contains("{{ .Values.backend.es.index }}"))') + [ "$result" != "" ] + + result=$(echo $body | jq -cr '.[] | select((.index | contains("{{ .Values.backend.es.index }}")) and (.health != "green"))') + [ "$result" == "" ] + } + {{- end }} + + fluentd.conf: |- + + {{- if eq .Values.backend.forward.tls "off" }} + @type forward + bind 0.0.0.0 + port {{ .Values.backend.forward.port }} + {{- else }} + @type secure_forward + self_hostname myserver.local + secure no + {{- end }} + shared_key {{ .Values.backend.forward.shared_key }} + + + + @type stdout + diff --git a/charts/stable/fluent-bit/templates/tests/test.yaml b/charts/stable/fluent-bit/templates/tests/test.yaml new file mode 100644 index 0000000..c030875 --- /dev/null +++ b/charts/stable/fluent-bit/templates/tests/test.yaml @@ -0,0 +1,53 @@ +{{- if or (eq .Values.backend.type "forward") (and (eq .Values.backend.type "es") (eq .Values.backend.es.tls "off")) }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ template "fluent-bit.fullname" . }}-test + labels: + app: {{ template "fluent-bit.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" + annotations: + "helm.sh/hook": test-success +spec: + initContainers: + - name: test-framework + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + command: + - "bash" + - "-c" + - | + set -ex + # copy bats to tools dir + cp -R /usr/local/libexec/ /tools/bats/ + volumeMounts: + - mountPath: /tools + name: tools + containers: + - name: {{ .Release.Name }}-test + {{- if eq .Values.backend.type "forward"}} + image: "fluent/fluentd:v1.4-debian-1" + {{- else }} + image: "dwdraju/alpine-curl-jq" + {{- end }} + command: ["/tools/bats/bats", "-t", "/tests/run.sh"] + {{- if and (eq .Values.backend.forward.tls "on") (eq .Values.backend.type "forward") }} + securityContext: + # run as root to install fluent gems + runAsUser: 0 + {{- end }} + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + - mountPath: /tools + name: tools + volumes: + - name: tests + configMap: + name: {{ template "fluent-bit.fullname" . }}-test + - name: tools + emptyDir: {} + restartPolicy: Never +{{- end }} diff --git a/charts/stable/fluent-bit/values.yaml b/charts/stable/fluent-bit/values.yaml new file mode 100644 index 0000000..27f4dc6 --- /dev/null +++ b/charts/stable/fluent-bit/values.yaml @@ -0,0 +1,287 @@ +# Minikube stores its logs in a separate directory. +# Enable if you install the chart in minikube. +on_minikube: false + +image: + fluent_bit: + repository: fluent/fluent-bit + tag: 1.3.2 + pullPolicy: Always + +testFramework: + image: "dduportal/bats" + tag: "0.4.0" + +nameOverride: "" +fullnameOverride: "" + +# When enabled, exposes json and prometheus metrics on {{ .Release.Name }}-metrics service +metrics: + enabled: false + service: + # labels: + # key: value + annotations: {} + # In order for Prometheus to consume metrics automatically use the following annotations: + # prometheus.io/path: "/api/v1/metrics/prometheus" + # prometheus.io/port: "2020" + # prometheus.io/scrape: "true" + port: 2020 + type: ClusterIP + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + +# When enabled, fluent-bit will keep track of tailing offsets across pod restarts. +trackOffsets: false + +## PriorityClassName +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +priorityClassName: "" + +backend: + type: forward + forward: + host: fluentd + port: 24284 + tls: "off" + tls_verify: "on" + tls_debug: 1 + shared_key: + es: + host: elasticsearch + port: 9200 + # Elastic Index Name + index: kubernetes_cluster + type: flb_type + logstash_prefix: kubernetes_cluster + replace_dots: "On" + logstash_format: "On" + time_key: "@timestamp" + # Optional username credential for Elastic X-Pack access + http_user: + # Password for user defined in HTTP_User + http_passwd: + # Optional TLS encryption to ElasticSearch instance + tls: "off" + tls_verify: "on" + # TLS certificate for the Elastic (in PEM format). Use if tls=on and tls_verify=on. + tls_ca: "" + # TLS debugging levels = 1-4 + tls_debug: 1 + splunk: + host: 127.0.0.1 + port: 8088 + token: "" + send_raw: "on" + tls: "on" + tls_verify: "off" + tls_debug: 1 + message_key: "kubernetes" + stackdriver: {} + + ## + ## Ref: http://fluentbit.io/documentation/current/output/http.html + ## + http: + host: 127.0.0.1 + port: 80 + uri: "/" + http_user: + http_passwd: + tls: "off" + tls_verify: "on" + tls_debug: 1 + ## Specify the data format to be used in the HTTP request body + ## Can be either 'msgpack' or 'json' + format: msgpack + headers: [] + +parsers: + enabled: false + ## List the respective parsers in key: value format per entry + ## Regex required fields are name and regex. JSON and Logfmt required field + ## is name. + regex: [] + logfmt: [] + ## json parser config can be defined by providing an extraEntries field. + ## The following entry: + ## json: + ## - extraEntries: | + ## Decode_Field_As escaped log do_next + ## Decode_Field_As json log + ## + ## translates into + ## + ## Command | Decoder | Field | Optional Action | + ## ==============|===========|=======|===================| + ## Decode_Field_As escaped log do_next + ## Decode_Field_As json log + ## + json: [] + +env: [] + +## Annotations to add to the DaemonSet's Pods +podAnnotations: {} + +## By default there different 'files' provides in the config +## (fluent-bit.conf, custom_parsers.conf). This defeats +## changing a configmap (since it uses subPath). If this +## variable is set, the user is assumed to have provided, +## in 'existingConfigMap' the entire config (etc/*) of fluent-bit, +## parsers and system config. In this case, no subPath is +## used +fullConfigMap: false + +## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.existingConfigMap}} +## Defining existingConfigMap will cause templates/config.yaml +## to NOT generate a ConfigMap resource +## +existingConfigMap: "" + + +# NOTE If you want to add extra sections, add them here, inbetween the includes, +# wherever they need to go. Sections order matters. + +rawConfig: |- + @INCLUDE fluent-bit-service.conf + @INCLUDE fluent-bit-input.conf + @INCLUDE fluent-bit-filter.conf + @INCLUDE fluent-bit-output.conf + + +# WARNING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# This is to add extra entries to an existing section, NOT for adding new sections +# Do not submit bugs against indent being wrong. Add your new sections to rawConfig +# instead. +# +extraEntries: + input: |- +# # >=1 additional Key/Value entrie(s) for existing Input section + filter: |- +# # >=1 additional Key/Value entrie(s) for existing Filter section + output: |- +# # >=1 additional Key/Value entrie(s) for existing Ouput section +# WARNING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + +## Extra ports to add to the daemonset ports section +extraPorts: [] + +## Extra volumes containing additional files required for fluent-bit to work +## (eg. CA certificates) +## Ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +extraVolumes: [] + +## Extra volume mounts for the fluent-bit pod. +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-volume-storage/ +## +extraVolumeMounts: [] + +resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 8Mi + +# When enabled, pods will bind to the node's network namespace. +hostNetwork: false + +# Which DNS policy to use for the pod. +# Consider switching to 'ClusterFirstWithHostNet' when 'hostNetwork' is enabled. +dnsPolicy: ClusterFirst + +## Node tolerations for fluent-bit scheduling to nodes with taints +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +## +tolerations: [] +# - key: "key" +# operator: "Equal|Exists" +# value: "value" +# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +## Node labels for fluent-bit pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +affinity: {} + +service: + flush: 1 + logLevel: info + +input: + tail: + memBufLimit: 5MB + parser: docker + path: /var/log/containers/*.log + ignore_older: "" + systemd: + enabled: false + filters: + systemdUnit: + - docker.service + - kubelet.service + - node-problem-detector.service + maxEntries: 1000 + readFromTail: true + tag: host.* + +filter: + kubeURL: https://kubernetes.default.svc:443 + kubeCAFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + kubeTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kubeTag: kube + kubeTagPrefix: kube.var.log.containers. + +# If true, check to see if the log field content is a JSON string map, if so, +# it append the map fields as part of the log structure. + mergeJSONLog: true + +# If set, all unpacked keys from mergeJSONLog (Merge_Log) will be packed under +# the key name specified on mergeLogKey (Merge_Log_Key) + mergeLogKey: "" + +# If true, enable the use of monitoring for a pod annotation of +# fluentbit.io/parser: parser_name. parser_name must be the name +# of a parser contained within parsers.conf + enableParser: true + +# If true, enable the use of monitoring for a pod annotation of +# fluentbit.io/exclude: true. If present, discard logs from that pod. + enableExclude: true + +rbac: + # Specifies whether RBAC resources should be created + create: true + # Specifies whether a PodSecurityPolicy should be created + pspEnabled: false + +taildb: + directory: /var/lib/fluent-bit + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +## Specifies security settings for a container +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +securityContext: {} + # securityContext: + # privileged: true + +## Specifies security settings for a pod +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +podSecurityContext: {} + # podSecurityContext: + # runAsUser: 1000 diff --git a/fetch.sh b/fetch.sh index e630c7b..4a99f55 100755 --- a/fetch.sh +++ b/fetch.sh @@ -24,6 +24,7 @@ helm init --client-only # add upstream repositories helm repo add fluxcd https://charts.fluxcd.io helm repo add istio https://storage.googleapis.com/istio-release/releases/1.3.3/charts/ +helm repo add elastic https://helm.elastic.co # reset and redownload latest stable charts rm -rf $ROOT_PATH/charts/stable/* @@ -33,6 +34,7 @@ helm fetch --untar --untardir $ROOT_PATH/charts/stable stable/grafana helm fetch --untar --untardir $ROOT_PATH/charts/stable stable/sealed-secrets helm fetch --untar --untardir $ROOT_PATH/charts/stable stable/cert-manager helm fetch --untar --untardir $ROOT_PATH/charts/stable stable/velero +helm fetch --untar --untardir $ROOT_PATH/charts/stable stable/fluent-bit # reset and redownload flux charts rm -rf $ROOT_PATH/charts/fluxcd/* @@ -43,3 +45,8 @@ helm fetch --untar --untardir $ROOT_PATH/charts/fluxcd fluxcd/helm-operator rm -rf $ROOT_PATH/charts/istio/* helm fetch --untar --untardir $ROOT_PATH/charts/istio istio/istio-init helm fetch --untar --untardir $ROOT_PATH/charts/istio istio/istio + +# reset and redownload elastic charts +rm -rf $ROOT_PATH/charts/elastic/* +helm fetch --untar --untardir $ROOT_PATH/charts/elastic elastic/elasticsearch +helm fetch --untar --untardir $ROOT_PATH/charts/elastic elastic/kibana diff --git a/platform/global/helm/gitops-system/helm-operator.yaml b/platform/global/helm/gitops-system/helm-operator.yaml index a8d0de1..c033a43 100644 --- a/platform/global/helm/gitops-system/helm-operator.yaml +++ b/platform/global/helm/gitops-system/helm-operator.yaml @@ -5,7 +5,7 @@ image: repository: docker.io/fluxcd/helm-operator tag: 1.0.0-rc3 -createCRD: false +createCRD: true updateChartDeps: true logFormat: fmt