From c59d4555faa7645a1f18ef05d70c9a26c51f2d90 Mon Sep 17 00:00:00 2001 From: Thibault Richard Date: Mon, 22 Jul 2019 16:12:54 +0200 Subject: [PATCH] Update doc (#1319) (#1329) * Update persistent storage section * Update kibana localhost url to use https * Update k8s resources names in accessing-services doc * Mention SSL browser warning * Fix bulleted list --- docs/accessing-services.asciidoc | 17 +++++++++-------- docs/k8s-quickstart.asciidoc | 8 ++++---- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/docs/accessing-services.asciidoc b/docs/accessing-services.asciidoc index edcf2da913..8a040bccb9 100644 --- a/docs/accessing-services.asciidoc +++ b/docs/accessing-services.asciidoc @@ -25,7 +25,7 @@ To access Elasticsearch and Kibana, the operator manages a default user named `e [source,sh] ---- -> kubectl get secret hulk-elastic-user -o go-template='{{.data.elastic | base64decode }}' +> kubectl get secret hulk-es-elastic-user -o go-template='{{.data.elastic | base64decode }}' 42xyz42citsale42xyz42 ---- @@ -141,8 +141,9 @@ spec: You can bring your own certificate to configure TLS to ensure that communication between HTTP clients and the cluster is encrypted. Create a Kubernetes secret with: -. tls.crt: the certificate (or a chain). -. tls.key: the private key to the first certificate in the certificate chain. + +- tls.crt: the certificate (or a chain). +- tls.key: the private key to the first certificate in the certificate chain. [source,sh] ---- @@ -178,7 +179,7 @@ NAME=hulk kubectl get secret "$NAME-ca" -o go-template='{{index .data "ca.pem" | base64decode }}' > ca.pem PW=$(kubectl get secret "$NAME-elastic-user" -o go-template='{{.data.elastic | base64decode }}') -curl --cacert ca.pem -u elastic:$PW https://$NAME-es:9200/ +curl --cacert ca.pem -u elastic:$PW https://$NAME-es-http:9200/ ---- *Outside the Kubernetes cluster* @@ -191,11 +192,11 @@ curl --cacert ca.pem -u elastic:$PW https://$NAME-es:9200/ ---- NAME=hulk -kubectl get secret "$NAME-ca" -o go-template='{{index .data "ca.pem" | base64decode }}' > ca.pem -IP=$(kubectl get svc "$NAME-es" -o jsonpath='{.status.loadBalancer.ingress[].ip}') -PW=$(kubectl get secret "$NAME-elastic-user" -o go-template='{{.data.elastic | base64decode }}') +kubectl get secret "$NAME-es-http-certs-public" -o go-template='{{index .data "tls.crt" | base64decode }}' > tls.crt +IP=$(kubectl get svc "$NAME-es-http" -o jsonpath='{.status.loadBalancer.ingress[].ip}') +PW=$(kubectl get secret "$NAME-es-elastic-user" -o go-template='{{.data.elastic | base64decode }}') -curl --cacert ca.pem -u elastic:$PW https://$IP:9200/ +curl --cacert tls.crt -u elastic:$PW https://$IP:9200/ ---- Now you should get this message: diff --git a/docs/k8s-quickstart.asciidoc b/docs/k8s-quickstart.asciidoc index 5fa9d21aa0..e741a43bd6 100644 --- a/docs/k8s-quickstart.asciidoc +++ b/docs/k8s-quickstart.asciidoc @@ -230,7 +230,7 @@ Use `kubectl port-forward` to access Kibana from your local workstation: kubectl port-forward service/quickstart-kb-http 5601 ---- + -Open `http://localhost:5601` in your browser. +Open `https://localhost:5601` in your browser. Your browser will show a warning because the self-signed certificate configured by default is not verified by a third party certificate authority and not trusted by your browser. You can either configure a link:k8s-accessing-elastic-services.html#k8s-setting-up-your-own-certificate[valid certificate] or acknowledge the warning for the purposes of this quick start. + Login with the `elastic` user. Retrieve its password with: + @@ -267,11 +267,11 @@ EOF [float] [id="{p}-persistent-storage"] -=== Use persistent storage +=== Update persistent storage -Now that you have completed the quickstart, you can try out more features like using persistent storage. The cluster that you deployed in this quickstart uses a default persistent volume claim of 1GiB, without a storage class set. This means that the default storage class defined in the Kubernetes cluster is the one that will be provisioned. +Now that you have completed the quickstart, you can try out more features like tweaking persistent storage. The cluster that you deployed in this quickstart uses a default persistent volume claim of 1GiB, without a storage class set. This means that the default storage class defined in the Kubernetes cluster is the one that will be provisioned. -You can request a `PersistentVolumeClaim` in the cluster specification, to target any `PersistentVolume` class available in your Kubernetes cluster: +You can request a `PersistentVolumeClaim` with a larger size in the Elasticsearch specification or target any `PersistentVolume` class available in your Kubernetes cluster: [source,yaml] ----