Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automated Prometheus version update 0.66.0-48.1.2 #4634

Merged
merged 1 commit into from
Jul 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions addons/prometheus/0.66.0-48.1.2/Manifest
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
image alertmanager quay.io/prometheus/alertmanager:v0.25.0
image grafana docker.io/grafana/grafana:10.0.2
image k8s-sidecar quay.io/kiwigrid/k8s-sidecar:1.24.6
image kube-state-metrics registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.9.2
image node-exporter quay.io/prometheus/node-exporter:v1.6.0
image prometheus quay.io/prometheus/prometheus:v2.45.0
image prometheus-adapter registry.k8s.io/prometheus-adapter/prometheus-adapter:v0.10.0
image prometheus-config-reloader quay.io/prometheus-operator/prometheus-config-reloader:v0.66.0
image prometheus-operator quay.io/prometheus-operator/prometheus-operator:v0.66.0
39,052 changes: 39,052 additions & 0 deletions addons/prometheus/0.66.0-48.1.2/crds/crds.yaml

Large diffs are not rendered by default.

28 changes: 28 additions & 0 deletions addons/prometheus/0.66.0-48.1.2/crds/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
resources:
- crds.yaml

patchesJson6902:
- target:
group: "apiextensions.k8s.io"
version: v1 # apiVersion
kind: CustomResourceDefinition
name: alertmanagers.monitoring.coreos.com
path: preserveUnknown.yaml
- target:
group: "apiextensions.k8s.io"
version: v1 # apiVersion
kind: CustomResourceDefinition
name: prometheuses.monitoring.coreos.com
path: preserveUnknown.yaml
- target:
group: "apiextensions.k8s.io"
version: v1 # apiVersion
kind: CustomResourceDefinition
name: podmonitors.monitoring.coreos.com
path: preserveUnknown.yaml
- target:
group: "apiextensions.k8s.io"
version: v1 # apiVersion
kind: CustomResourceDefinition
name: servicemonitors.monitoring.coreos.com
path: preserveUnknown.yaml
3 changes: 3 additions & 0 deletions addons/prometheus/0.66.0-48.1.2/crds/preserveUnknown.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
- op: add
path: "/spec/preserveUnknownFields"
value: false
34 changes: 34 additions & 0 deletions addons/prometheus/0.66.0-48.1.2/host-preflight.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
apiVersion: troubleshoot.sh/v1beta2
kind: HostPreflight
metadata:
name: prometheus
spec:
collectors:
- tcpPortStatus:
collectorName: "Node Exporter Metrics Server TCP Port Status"
port: 9100
exclude: '{{kurl .IsUpgrade }}'

analyzers:
- tcpPortStatus:
checkName: "Node Exporter Metrics Server TCP Port Status"
collectorName: "Node Exporter Metrics Server TCP Port Status"
exclude: '{{kurl .IsUpgrade }}'
outcomes:
- fail:
when: "connection-refused"
message: Connection to port 9100 was refused. This is likely to be a routing problem since this preflight configures a test server to listen on this port.
- warn:
when: "address-in-use"
message: Another process was already listening on port 9100.
- fail:
when: "connection-timeout"
message: Timed out connecting to port 9100. Check your firewall.
- fail:
when: "error"
message: Unexpected port status
- pass:
when: "connected"
message: Port 9100 is available
- warn:
message: Unexpected port status
146 changes: 146 additions & 0 deletions addons/prometheus/0.66.0-48.1.2/install.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
# shellcheck disable=SC2148

function prometheus() {
local src="$DIR/addons/prometheus/0.66.0-48.1.2"
local dst="$DIR/kustomize/prometheus"

local operatorsrc="$src/operator"
local operatordst="$dst/operator"

local crdssrc="$src/crds"
local crdsdst="$dst/crds"

cp -r "$operatorsrc/" "$operatordst/"
cp -r "$crdssrc/" "$crdsdst/"

grafana_admin_secret "$src" "$operatordst"

# Server-side apply is needed here because the CRDs are too large to keep in metadata
# https://github.com/prometheus-community/helm-charts/issues/1500
# Also delete any existing last-applied-configuration annotations for pre-122 clusters
kubectl get crd | grep coreos.com | awk '{ print $1 }' | xargs -I {} kubectl patch crd {} --type=json -p='[{"op": "remove", "path": "/metadata/annotations/kubectl.kubernetes.io~1last-applied-configuration"}]' 2>/dev/null || true
kubectl apply --server-side --force-conflicts -k "$crdsdst/"
spinner_until -1 prometheus_crd_ready

prometheus_rook_ceph "$operatordst"
prometheus_longhorn "$operatordst"

# remove deployments and daemonsets that had labelselectors change (as those are immutable)
kubectl delete deployment -n monitoring kube-state-metrics || true
kubectl delete daemonset -n monitoring node-exporter || true
kubectl delete deployment -n monitoring grafana || true
kubectl delete deployment -n monitoring prometheus-adapter || true

# remove things that had names change during upgrades
kubectl delete alertmanager -n monitoring main || true

# remove services that had a clusterip change
kubectl delete service -n monitoring kube-state-metrics || true
kubectl delete service -n monitoring prometheus-operator || true

# remove nodeport services that had names change
kubectl delete service -n monitoring grafana || true
kubectl delete service -n monitoring alertmanager-main || true
kubectl delete service -n monitoring prometheus-k8s || true

# if the prometheus-node-exporter daemonset exists and has a release labelSelector set, delete it
if kubernetes_resource_exists monitoring daemonset prometheus-node-exporter; then
local promNodeExporterLabelSelector=$(kubectl get daemonset -n monitoring prometheus-node-exporter --output="jsonpath={.spec.selector.matchLabels.release}")
if [ -n "$promNodeExporterLabelSelector" ]; then
kubectl delete daemonset -n monitoring prometheus-node-exporter || true
fi
fi

# if the prometheus-operator deployment exists and has the wrong labelSelectors set, delete it
if kubernetes_resource_exists monitoring deployment prometheus-operator; then
local promOperatorLabelSelector=$(kubectl get deployment -n monitoring prometheus-operator --output="jsonpath={.spec.selector.matchLabels.release}") || true
if [ -n "$promOperatorLabelSelector" ]; then
kubectl delete deployment -n monitoring prometheus-operator || true
fi

promOperatorLabelSelector=$(kubectl get deployment -n monitoring prometheus-operator --output="jsonpath={.spec.selector.matchLabels.app\.kubernetes\.io/component}") || true
if [ -n "$promOperatorLabelSelector" ]; then
kubectl delete deployment -n monitoring prometheus-operator || true
fi
fi

# the metrics service has been renamed to v1beta1.custom.metrics.k8s.io, delete the old
if kubectl get --no-headers apiservice v1beta1.metrics.k8s.io 2>/dev/null | grep -q 'monitoring/prometheus-adapter' ; then
kubectl delete apiservice v1beta1.metrics.k8s.io
fi

# change ClusterIP services to NodePorts if required
if [ -z "$PROMETHEUS_SERVICE_TYPE" ] || [ "$PROMETHEUS_SERVICE_TYPE" = "NodePort" ] ; then
cp "$src/nodeport-services.yaml" "$operatordst"
insert_patches_strategic_merge "$operatordst/kustomization.yaml" nodeport-services.yaml
fi

kubectl apply -k "$operatordst/"
}

GRAFANA_ADMIN_USER=
GRAFANA_ADMIN_PASS=
function grafana_admin_secret() {
if kubernetes_resource_exists monitoring secret grafana-admin; then
return 0
fi

local src="$1"
local grafanadst="$2"

GRAFANA_ADMIN_USER=admin
GRAFANA_ADMIN_PASS=$(< /dev/urandom tr -dc A-Za-z0-9 | head -c9)

insert_resources "$grafanadst/kustomization.yaml" grafana-secret.yaml

render_yaml_file "$src/tmpl-grafana-secret.yaml" > "$grafanadst/grafana-secret.yaml"
}

function prometheus_outro() {
printf "\n"
printf "\n"
if [ -z "$PROMETHEUS_SERVICE_TYPE" ] || [ "$PROMETHEUS_SERVICE_TYPE" = "NodePort" ] ; then
printf "The UIs of Prometheus, Grafana and Alertmanager have been exposed on NodePorts ${GREEN}30900${NC}, ${GREEN}30902${NC} and ${GREEN}30903${NC} respectively.\n"
else
printf "The UIs of Prometheus, Grafana and Alertmanager have been exposed on internal ClusterIP services.\n"
fi
if [ -n "$GRAFANA_ADMIN_PASS" ]; then
printf "\n"
printf "To access Grafana use the generated user:password of ${GREEN}${GRAFANA_ADMIN_USER:-admin}:${GRAFANA_ADMIN_PASS} .${NC}\n"
fi
printf "\n"
printf "\n"
}

function prometheus_crd_ready() {
# https://github.com/coreos/kube-prometheus#quickstart
if ! kubectl get customresourcedefinitions servicemonitors.monitoring.coreos.com &>/dev/null; then
return 1
fi
if ! kubectl get servicemonitors --all-namespaces &>/dev/null; then
return 1
fi
if ! kubectl get customresourcedefinitions prometheuses.monitoring.coreos.com &>/dev/null; then
return 1
fi
if ! kubectl get prometheuses --all-namespaces &>/dev/null; then
return 1
fi
return 0
}

function prometheus_rook_ceph() {
local dst="$1"

if kubectl get ns | grep -q rook-ceph; then
insert_resources "$dst/kustomization.yaml" rook-ceph-rolebindings.yaml
fi
}

function prometheus_longhorn() {
local dst="$1"

if kubectl get ns | grep -q longhorn-system; then
insert_resources "$dst/kustomization.yaml" longhorn.yaml
fi
}
37 changes: 37 additions & 0 deletions addons/prometheus/0.66.0-48.1.2/nodeport-services.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
apiVersion: v1
kind: Service
metadata:
name: prometheus-alertmanager
namespace: monitoring
spec:
ports:
- name: web
port: 9093
protocol: TCP
nodePort: 30903
type: "NodePort"
---
apiVersion: v1
kind: Service
metadata:
name: prometheus-k8s
namespace: monitoring
spec:
ports:
- name: web
port: 9090
nodePort: 30900
type: "NodePort"
---
apiVersion: v1
kind: Service
metadata:
name: grafana
namespace: monitoring
spec:
type: "NodePort"
ports:
- name: service
port: 80
protocol: TCP
nodePort: 30902
Loading
Loading