From 304969136b4a78585ebb3b15454b4293305b838d Mon Sep 17 00:00:00 2001 From: Ashleigh Brennan Date: Thu, 2 Nov 2023 10:26:55 -0500 Subject: [PATCH] OBSDOCS-135: Update pod names --- modules/cluster-logging-deploy-console.adoc | 50 +++++++++++---------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/modules/cluster-logging-deploy-console.adoc b/modules/cluster-logging-deploy-console.adoc index e63b858d27a8..e37b34d81c56 100644 --- a/modules/cluster-logging-deploy-console.adoc +++ b/modules/cluster-logging-deploy-console.adoc @@ -93,15 +93,15 @@ configuring {logging} components for information on modifications you can make t + [source,yaml] ---- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" +apiVersion: logging.openshift.io/v1 +kind: ClusterLogging metadata: - name: "instance" <1> - namespace: "openshift-logging" + name: instance <1> + namespace: openshift-logging spec: - managementState: "Managed" <2> + managementState: Managed <2> logStore: - type: "elasticsearch" <3> + type: elasticsearch <3> retentionPolicy: <4> application: maxAge: 1d @@ -112,27 +112,27 @@ spec: elasticsearch: nodeCount: 3 <5> storage: - storageClassName: "" <6> + storageClassName: <6> size: 200G resources: <7> limits: - memory: "16Gi" + memory: 16Gi requests: - memory: "16Gi" + memory: 16Gi proxy: <8> resources: limits: memory: 256Mi requests: memory: 256Mi - redundancyPolicy: "SingleRedundancy" + redundancyPolicy: SingleRedundancy visualization: - type: "kibana" <9> + type: kibana <9> kibana: replicas: 1 collection: logs: - type: "fluentd" <10> + type: fluentd <10> fluentd: {} ---- <1> The name must be `instance`. @@ -180,19 +180,21 @@ The number of primary shards for the index templates is equal to the number of E .. Select the *openshift-logging* project. + -You should see several pods for OpenShift Logging, Elasticsearch, Fluentd, and Kibana similar to the following list: +Confirm that pods exist for the Operator and the Elasticsearch, collector, and Kibana components: + -* cluster-logging-operator-cb795f8dc-xkckc -* collector-pb2f8 -* elasticsearch-cdm-b3nqzchd-1-5c6797-67kfz -* elasticsearch-cdm-b3nqzchd-2-6657f4-wtprv -* elasticsearch-cdm-b3nqzchd-3-588c65-clg7g -* fluentd-2c7dg -* fluentd-9z7kk -* fluentd-br7r2 -* fluentd-fn2sb -* fluentd-zqgqx -* kibana-7fb4fd4cc9-bvt4p +* cluster-logging-operator-595f9bf9c4-txrp4 +* collector-29bw8 +* collector-4kvnl +* collector-7rr7w +* collector-9m2xp +* collector-xt45j +* elasticsearch-cdm-g559ha9u-1-659fd594bf-pcm2f +* elasticsearch-cdm-g559ha9u-2-66455f68db-v46n6 +* elasticsearch-cdm-g559ha9u-3-85696bcf55-g7tf8 +* elasticsearch-im-app-27934020-9ltxl +* elasticsearch-im-audit-27934020-86cdt +* elasticsearch-im-infra-27934020-6lrgm +* kibana-5c6b7cd56-66c9l .Troubleshooting * If Alertmanager logs alerts such as `Prometheus could not scrape fluentd for more than 10m`, make sure that `openshift.io/cluster-monitoring` is set to `"true"` for the OpenShift Elasticsearch Operator and OpenShift Logging Operator. See the Red Hat KnowledgeBase for more information: link:https://access.redhat.com/solutions/5692801[Prometheus could not scrape fluentd for more than 10m alert in Alertmanager]