diff --git a/.chainsaw.yaml b/.chainsaw.yaml new file mode 100644 index 00000000..e3644593 --- /dev/null +++ b/.chainsaw.yaml @@ -0,0 +1,12 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/configuration-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Configuration +metadata: + name: configuration +spec: + timeouts: + assert: 5m0s + cleanup: 5m0s + delete: 5m0s + error: 5m0s + exec: 5m0s diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 75ee545b..13a46e0e 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,3 +1,16 @@ + + ### General: * [ ] Have you removed all sensitive information, including but not limited to access keys and passwords? diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..70f9fe7f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,38 @@ +version: 2 +updates: + +# Go - root directory + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + ## group all dependencies with a k8s.io prefix into a single PR. + groups: + kubernetes: + patterns: [ "k8s.io/*", "sigs.k8s.io/*" ] + otel: + patterns: ["go.opentelemetry.io/*"] + commit-message: + prefix: ":seedling:" + labels: + - "dependencies" + +# Docker + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: ":seedling:" + labels: + - "dependencies" + +# github-actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: ":seedling:" + labels: + - "dependencies" diff --git a/.github/filters.yml b/.github/filters.yml new file mode 100644 index 00000000..a790d19d --- /dev/null +++ b/.github/filters.yml @@ -0,0 +1,3 @@ +# Any file that is not a doc *.md file +src: + - "!**/*.md" diff --git a/.github/labels.yml b/.github/labels.yml index f9b89f11..7b91ed41 100644 --- a/.github/labels.yml +++ b/.github/labels.yml @@ -1,24 +1,35 @@ -- name: added-feature +# PR Labels +- name: new-feature description: for new features in the changelog. - color: a2eeef -- name: changed - description: for changes in existing functionality in the changelog. - color: a2eeef -- name: deprecated - description: for soon-to-be removed features in the changelog. - color: e4e669 -- name: removed - description: for now removed features in the changelog. - color: e4e669 + color: 225fee +- name: improvement + description: for improvements in existing functionality in the changelog. + color: 22ee47 +- name: repo-ci-improvement + description: for improvements in the repository or CI workflow in the changelog. + color: c922ee - name: bugfix description: for any bug fixes in the changelog. - color: d73a4a -- name: security - description: for vulnerabilities in the changelog. - color: dd4739 -- name: bug - description: Something isn't working in this issue. - color: d73a4a + color: ed8e21 +- name: documentation + description: for updates to the documentation in the changelog. + color: d3e1e6 +- name: dependencies + description: dependency updates including security fixes + color: 5c9dff +- name: testing + description: for updates to the testing suite in the changelog. + color: 933ac9 +- name: breaking-change + description: for breaking changes in the changelog. + color: ff0000 +- name: ignore-for-release + description: PRs you do not want to render in the changelog. + color: 7b8eac +# Issue Labels - name: enhancement - description: New feature request in this issue. - color: a2eeef + description: issues that request a enhancement. + color: 22ee47 +- name: bug + description: issues that report a bug. + color: ed8e21 diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml new file mode 100644 index 00000000..d880f84a --- /dev/null +++ b/.github/release-drafter.yml @@ -0,0 +1,67 @@ +name-template: 'v$NEXT_PATCH_VERSION' +tag-template: 'v$NEXT_PATCH_VERSION' +exclude-labels: + - ignore-for-release +categories: + - title: โš ๏ธ Breaking Change + labels: + - breaking-change + - title: ๐Ÿ› Bug Fixes + labels: + - bugfix + - title: ๐Ÿš€ New Features + labels: + - new-feature + - title: ๐Ÿ’ก Improvements + labels: + - improvement + - title: ๐Ÿงช Testing Improvements + labels: + - testing + - title: โš™๏ธ Repo/CI Improvements + labels: + - repo-ci-improvement + - title: ๐Ÿ“– Documentation + labels: + - documentation + - title: ๐Ÿ“ฆ Dependency Updates + labels: + - dependencies + - title: Other Changes + labels: + - "*" +autolabeler: + - label: 'breaking-change' + title: + - '/.*\[breaking\].+/' + - label: 'deprecation' + title: + - '/.*\[deprecation\].+/' + - label: 'bugfix' + title: + - '/.*\[fix\].+/' + - label: 'new-feature' + title: + - '/.*\[feat\].+/' + - label: 'improvement' + title: + - '/.*\[improvement\].+/' + - label: 'testing' + title: + - '/.*\[test\].+/' + - label: 'repo-ci-improvement' + title: + - '/.*\[CI\].+/' + - '/.*\[ci\].+/' + - label: 'documentation' + title: + - '/.*\[docs\].+/' + - label: 'dependencies' + title: + - '/.*\[deps\].+/' + +change-template: '- $TITLE by @$AUTHOR in #$NUMBER' +no-changes-template: "- No changes" +template: | + ## What's Changed + $CHANGES diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9cbc9be7..78394bd1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,41 +6,97 @@ on: - main pull_request: null +permissions: + contents: read + pull-requests: read + actions: read + +concurrency: + group: ci-${{ github.ref }} + cancel-in-progress: true + jobs: - ci: + changes: + runs-on: ubuntu-latest + outputs: + paths: ${{ steps.filter.outputs.changes }} + steps: + - uses: actions/checkout@v4.2.2 + - name: Harden Runner + uses: step-security/harden-runner@v2 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + github.com:443 + - uses: dorny/paths-filter@v3 + id: filter + with: + base: ${{ github.ref }} + filters: .github/filters.yml + + build-test: runs-on: ubuntu-latest - strategy: - matrix: - go-version: [ 'stable', '1.22' ] + needs: changes + if: ${{ contains(fromJSON(needs.changes.outputs.paths), 'src') }} steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@v2 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + github.com:443 + golang.org:443 + proxy.golang.org:443 + sum.golang.org:443 + objects.githubusercontent.com:443 + storage.googleapis.com:443 + cli.codecov.io:443 + api.codecov.io:443 + ingest.codecov.io:443 + raw.githubusercontent.com:443 + get.helm.sh:443 + + - uses: actions/checkout@v4.2.2 with: fetch-depth: 0 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: ${{ matrix.go-version }} + go-version-file: go.mod + check-latest: true + - name: Vet run: make vet - - name: Lint - run: make lint + + - name: lint + uses: golangci/golangci-lint-action@v6 + with: + version: v1.62.2 + - name: Helm Lint run: make helm-lint + - name: Test run: make test + - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: files: ./coverage.out - fail_ci_if_error: true verbose: true token: ${{ secrets.CODECOV_TOKEN }} slug: linode/linode-cloud-controller-manager + - name: Build run: make build + docker-build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.2.2 with: fetch-depth: 0 - name: Docker Meta @@ -53,7 +109,7 @@ jobs: type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'main') }} type=semver,pattern={{raw}},value=${{ github.ref_name }} - name: Build Dockerfile - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . push: false @@ -61,3 +117,48 @@ jobs: labels: ${{ steps.meta.outputs.labels }} build-args: | REV=${{ github.ref_name }} + + e2e-tests: + runs-on: ubuntu-latest + needs: changes + if: ${{ contains(fromJSON(needs.changes.outputs.paths), 'src') }} + env: + GITHUB_TOKEN: ${{ secrets.github_token }} + LINODE_TOKEN: ${{ secrets.LINODE_TOKEN }} + IMG: linode/linode-cloud-controller-manager:${{ github.ref == 'refs/heads/main' && 'latest' || format('pr-{0}', github.event.number) || github.ref_name }} + LINODE_REGION: us-lax + LINODE_CONTROL_PLANE_MACHINE_TYPE: g6-standard-2 + LINODE_MACHINE_TYPE: g6-standard-2 + WORKER_NODES: '2' + steps: + - uses: actions/checkout@v4.2.2 + with: + fetch-depth: 0 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: 'go.mod' + check-latest: true + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Install devbox + uses: jetify-com/devbox-install-action@v0.11.0 + + - name: Setup CAPL Management Kind Cluster and CAPL Child Cluster For Testing + run: devbox run mgmt-and-capl-cluster + + - name: Run E2E Tests + run: devbox run e2e-test + + - name: Run Cilium BGP e2e test + run: devbox run e2e-test-bgp + + - name: Cleanup Resources + if: always() + run: devbox run cleanup-cluster diff --git a/.github/workflows/helm.yml b/.github/workflows/helm.yml index e213c747..e321378a 100644 --- a/.github/workflows/helm.yml +++ b/.github/workflows/helm.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v4.2.2 with: fetch-depth: 0 @@ -26,15 +26,15 @@ jobs: sed -ie "s/version: 0.0.0/version: ${TAG#helm-}/g" deploy/chart/Chart.yaml - name: Set up Helm - uses: azure/setup-helm@v3 + uses: azure/setup-helm@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: '3.10' check-latest: true - name: Set up chart-testing - uses: helm/chart-testing-action@v2.6.0 + uses: helm/chart-testing-action@v2.6.1 - name: Run chart-testing (lint) run: ct lint --check-version-increment=false --chart-dirs deploy --target-branch ${{ github.event.repository.default_branch }} @@ -54,7 +54,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v4.2.2 with: fetch-depth: 0 @@ -70,7 +70,7 @@ jobs: git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - name: Set up Helm - uses: azure/setup-helm@v3 + uses: azure/setup-helm@v4 - name: Run chart-releaser uses: helm/chart-releaser-action@v1.6.0 diff --git a/.github/workflows/label-sync.yml b/.github/workflows/label-sync.yml index 9b097ecf..f502b3e6 100644 --- a/.github/workflows/label-sync.yml +++ b/.github/workflows/label-sync.yml @@ -9,7 +9,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # pin@v2 + - uses: actions/checkout@cbb722410c2e876e24abbe8de2cc27693e501dcb # pin@v2 - uses: micnncim/action-label-syncer@3abd5ab72fda571e69fffd97bd4e0033dd5f495c # pin@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/pr-labeler.yml b/.github/workflows/pr-labeler.yml new file mode 100644 index 00000000..89a24f7c --- /dev/null +++ b/.github/workflows/pr-labeler.yml @@ -0,0 +1,25 @@ +name: PR labeler + +on: + workflow_dispatch: + pull_request_target: + types: [opened, reopened, synchronize] + +jobs: + label-pr: + name: Update PR labels + permissions: + contents: write + pull-requests: write + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4.2.2 + with: + fetch-depth: 0 + - name: Label PR + uses: release-drafter/release-drafter@v6 + with: + disable-releaser: github.ref != 'refs/heads/main' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml new file mode 100644 index 00000000..cf85d7e3 --- /dev/null +++ b/.github/workflows/release-drafter.yml @@ -0,0 +1,21 @@ +name: Release Drafter + +on: + workflow_dispatch: + push: + branches: + - main + +permissions: + contents: read + +jobs: + update_release_draft: + permissions: + contents: write + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: release-drafter/release-drafter@v6 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bb27f50d..f3bf0096 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -8,7 +8,7 @@ jobs: release: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.2.2 with: fetch-depth: 0 - name: Create Release Artifacts @@ -16,7 +16,7 @@ jobs: env: IMAGE_VERSION: ${{ github.ref_name }} - name: Upload Release Artifacts - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@v2 with: files: | ./release/helm-chart-${{ github.ref_name }}.tgz @@ -35,7 +35,7 @@ jobs: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Build and Push to Docker Hub - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . push: true diff --git a/.golangci.yml b/.golangci.yml index 19df20d2..fcb5072d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -15,7 +15,7 @@ linters: # cherry picked from https://golangci-lint.run/usage/linters/ # - ginkgolinter # to be enabled once #158 is merged - bodyclose - - exportloopref + - copyloopvar - gocheckcompilerdirectives - gofmt - goimports diff --git a/Dockerfile b/Dockerfile index a98114af..24ed3827 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22-alpine as builder +FROM golang:1.23-alpine AS builder RUN mkdir -p /linode WORKDIR /linode @@ -11,7 +11,7 @@ COPY sentry ./sentry RUN go mod download RUN go build -a -ldflags '-extldflags "-static"' -o /bin/linode-cloud-controller-manager-linux /linode -FROM alpine:3.20.3 +FROM alpine:3.21.2 RUN apk add --update --no-cache ca-certificates LABEL maintainers="Linode" LABEL description="Linode Cloud Controller Manager" diff --git a/Makefile b/Makefile index a64209ab..42a96e2a 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,30 @@ -IMG ?= linode/linode-cloud-controller-manager:canary -RELEASE_DIR ?= release -PLATFORM ?= linux/amd64 +IMG ?= linode/linode-cloud-controller-manager:canary +RELEASE_DIR ?= release +PLATFORM ?= linux/amd64 # Use CACHE_BIN for tools that cannot use devbox and LOCALBIN for tools that can use either method -CACHE_BIN ?= $(CURDIR)/bin -LOCALBIN ?= $(CACHE_BIN) - -DEVBOX_BIN ?= $(DEVBOX_PACKAGES_DIR)/bin +CACHE_BIN ?= $(CURDIR)/bin +LOCALBIN ?= $(CACHE_BIN) + +DEVBOX_BIN ?= $(DEVBOX_PACKAGES_DIR)/bin +HELM ?= $(LOCALBIN)/helm +HELM_VERSION ?= v3.16.3 + +##################################################################### +# Dev Setup +##################################################################### +CLUSTER_NAME ?= ccm-$(shell git rev-parse --short HEAD) +K8S_VERSION ?= "v1.31.2" +CAPI_VERSION ?= "v1.8.5" +CAAPH_VERSION ?= "v0.2.1" +CAPL_VERSION ?= "v0.7.1" +CONTROLPLANE_NODES ?= 1 +WORKER_NODES ?= 1 +LINODE_FIREWALL_ENABLED ?= true +LINODE_REGION ?= us-lax +LINODE_OS ?= linode/ubuntu22.04 +KUBECONFIG_PATH ?= $(CURDIR)/test-cluster-kubeconfig.yaml +MGMT_KUBECONFIG_PATH ?= $(CURDIR)/mgmt-cluster-kubeconfig.yaml # if the $DEVBOX_PACKAGES_DIR env variable exists that means we are within a devbox shell and can safely # use devbox's bin for our tools @@ -41,10 +59,13 @@ vet: fmt .PHONY: lint lint: - docker run --rm -v "$(shell pwd):/var/work:ro" -w /var/work \ - golangci/golangci-lint:v1.57.2 golangci-lint run -v --timeout=5m - docker run --rm -v "$(shell pwd):/var/work:ro" -w /var/work/e2e \ - golangci/golangci-lint:v1.57.2 golangci-lint run -v --timeout=5m + docker run --rm -v "$(PWD):/var/work:ro" -w /var/work \ + golangci/golangci-lint:latest golangci-lint run -c .golangci.yml + +.PHONY: gosec +gosec: ## Run gosec against code. + docker run --rm -v "$(PWD):/var/work:ro" -w /var/work securego/gosec:2.19.0 \ + -exclude-dir=bin -exclude-generated ./... .PHONY: fmt fmt: @@ -53,7 +74,7 @@ fmt: .PHONY: test # we say code is not worth testing unless it's formatted test: fmt codegen - go test -v -cover -coverprofile ./coverage.out ./cloud/... $(TEST_ARGS) + go test -v -cover -coverprofile ./coverage.out ./cloud/... ./sentry/... $(TEST_ARGS) .PHONY: build-linux build-linux: codegen @@ -88,9 +109,11 @@ docker-build: build-linux .PHONY: docker-push # must run the docker build before pushing the image docker-push: - echo "[reminder] Did you run `make docker-build`?" docker push ${IMG} +.PHONY: docker-setup +docker-setup: docker-build docker-push + .PHONY: run # run the ccm locally, really only makes sense on linux anyway run: build @@ -108,6 +131,85 @@ run-debug: build --kubeconfig=${KUBECONFIG} \ --linodego-debug +##################################################################### +# E2E Test Setup +##################################################################### + +.PHONY: mgmt-and-capl-cluster +mgmt-and-capl-cluster: docker-setup mgmt-cluster capl-cluster + +.PHONY: capl-cluster +capl-cluster: generate-capl-cluster-manifests create-capl-cluster patch-linode-ccm + +.PHONY: generate-capl-cluster-manifests +generate-capl-cluster-manifests: + # Create the CAPL cluster manifests without any CSI driver stuff + LINODE_FIREWALL_ENABLED=$(LINODE_FIREWALL_ENABLED) LINODE_OS=$(LINODE_OS) clusterctl generate cluster $(CLUSTER_NAME) \ + --kubernetes-version $(K8S_VERSION) --infrastructure linode-linode:$(CAPL_VERSION) \ + --control-plane-machine-count $(CONTROLPLANE_NODES) --worker-machine-count $(WORKER_NODES) > capl-cluster-manifests.yaml + +.PHONY: create-capl-cluster +create-capl-cluster: + # Create a CAPL cluster with updated CCM and wait for it to be ready + kubectl apply -f capl-cluster-manifests.yaml + kubectl wait --for=condition=ControlPlaneReady cluster/$(CLUSTER_NAME) --timeout=600s || (kubectl get cluster -o yaml; kubectl get linodecluster -o yaml; kubectl get linodemachines -o yaml) + kubectl wait --for=condition=NodeHealthy=true machines -l cluster.x-k8s.io/cluster-name=$(CLUSTER_NAME) --timeout=900s + clusterctl get kubeconfig $(CLUSTER_NAME) > $(KUBECONFIG_PATH) + KUBECONFIG=$(KUBECONFIG_PATH) kubectl wait --for=condition=Ready nodes --all --timeout=600s + # Remove all taints from control plane node so that pods scheduled on it by tests can run (without this, some tests fail) + KUBECONFIG=$(KUBECONFIG_PATH) kubectl taint nodes -l node-role.kubernetes.io/control-plane node-role.kubernetes.io/control-plane- + +.PHONY: patch-linode-ccm +patch-linode-ccm: + KUBECONFIG=$(KUBECONFIG_PATH) kubectl patch -n kube-system daemonset ccm-linode --type='json' -p="[{'op': 'replace', 'path': '/spec/template/spec/containers/0/image', 'value': '${IMG}'}]" + KUBECONFIG=$(KUBECONFIG_PATH) kubectl rollout status -n kube-system daemonset/ccm-linode --timeout=600s + KUBECONFIG=$(KUBECONFIG_PATH) kubectl -n kube-system get daemonset/ccm-linode -o yaml + +.PHONY: mgmt-cluster +mgmt-cluster: + # Create a mgmt cluster + ctlptl apply -f e2e/setup/ctlptl-config.yaml + clusterctl init \ + --wait-providers \ + --wait-provider-timeout 600 \ + --core cluster-api:$(CAPI_VERSION) \ + --bootstrap kubeadm:$(CAPI_VERSION) \ + --control-plane kubeadm:$(CAPI_VERSION) \ + --addon helm:$(CAAPH_VERSION) \ + --infrastructure linode-linode:$(CAPL_VERSION) + kind get kubeconfig --name=caplccm > $(MGMT_KUBECONFIG_PATH) + +.PHONY: cleanup-cluster +cleanup-cluster: + kubectl delete cluster -A --all --timeout=180s + kubectl delete linodefirewalls -A --all --timeout=60s + kubectl delete lvpc -A --all --timeout=60s + kind delete cluster -n caplccm + +.PHONY: e2e-test +e2e-test: + CLUSTER_NAME=$(CLUSTER_NAME) \ + MGMT_KUBECONFIG=$(MGMT_KUBECONFIG_PATH) \ + KUBECONFIG=$(KUBECONFIG_PATH) \ + REGION=$(LINODE_REGION) \ + LINODE_TOKEN=$(LINODE_TOKEN) \ + chainsaw test e2e/test --parallel 2 + +.PHONY: e2e-test-bgp +e2e-test-bgp: + KUBECONFIG=$(KUBECONFIG_PATH) CLUSTER_SUFFIX=$(CLUSTER_NAME) ./e2e/setup/cilium-setup.sh + KUBECONFIG=$(KUBECONFIG_PATH) kubectl -n kube-system rollout status daemonset/ccm-linode --timeout=300s + CLUSTER_NAME=$(CLUSTER_NAME) \ + MGMT_KUBECONFIG=$(MGMT_KUBECONFIG_PATH) \ + KUBECONFIG=$(KUBECONFIG_PATH) \ + REGION=$(LINODE_REGION) \ + LINODE_TOKEN=$(LINODE_TOKEN) \ + chainsaw test e2e/bgp-test/lb-cilium-bgp + +##################################################################### +# OS / ARCH +##################################################################### + # Set the host's OS. Only linux and darwin supported for now HOSTOS := $(shell uname -s | tr '[:upper:]' '[:lower:]') ifeq ($(filter darwin linux,$(HOSTOS)),) @@ -121,9 +223,6 @@ else ifeq ($(ARCH_SHORT),aarch64) ARCH_SHORT := arm64 endif -HELM ?= $(LOCALBIN)/helm -HELM_VERSION ?= v3.9.1 - .PHONY: helm helm: $(HELM) ## Download helm locally if necessary $(HELM): $(LOCALBIN) diff --git a/README.md b/README.md index 93aefe00..9d8a521a 100644 --- a/README.md +++ b/README.md @@ -4,444 +4,104 @@ [![Continuous Integration](https://github.com/linode/linode-cloud-controller-manager/actions/workflows/ci.yml/badge.svg)](https://github.com/linode/linode-cloud-controller-manager/actions/workflows/ci.yml) [![codecov](https://codecov.io/gh/linode/linode-cloud-controller-manager/graph/badge.svg?token=GSRnqHUmCk)](https://codecov.io/gh/linode/linode-cloud-controller-manager) [![Docker Pulls](https://img.shields.io/docker/pulls/linode/linode-cloud-controller-manager.svg)](https://hub.docker.com/r/linode/linode-cloud-controller-manager/) - [![Twitter](https://img.shields.io/twitter/follow/linode.svg?style=social&logo=twitter&label=Follow)](https://twitter.com/intent/follow?screen_name=linode) -## The purpose of the CCM -The Linode Cloud Controller Manager (CCM) creates a fully supported -Kubernetes experience on Linode. - -* Load balancers, Linode NodeBalancers, are automatically deployed when a -[Kubernetes Service of type "LoadBalancer"](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) is deployed. This is the most -reliable way to allow services running in your cluster to be reachable from -the Internet. -* Linode hostnames and network addresses (private/public IPs) are automatically -associated with their corresponding Kubernetes resources, forming the basis for -a variety of Kubernetes features. -* Nodes resources are put into the correct state when Linodes are shut down, -allowing pods to be appropriately rescheduled. -* Nodes are annotated with the Linode region, which is the basis for scheduling based on -failure domains. - -## Kubernetes Supported Versions -Kubernetes 1.9+ - -## Usage - -### LoadBalancer Services -Kubernetes Services of type `LoadBalancer` will be served through a [Linode NodeBalancer](https://www.linode.com/nodebalancers) by default which the Cloud Controller Manager will provision on demand. -For general feature and usage notes, refer to the [Getting Started with Linode NodeBalancers](https://www.linode.com/docs/platform/nodebalancer/getting-started-with-nodebalancers/) guide. - -#### Using IP Sharing instead of NodeBalancers -Alternatively, the Linode CCM can integrate with [Cilium's BGP Control Plane](https://docs.cilium.io/en/stable/network/bgp-control-plane/) -to perform load-balancing via IP sharing on labeled Nodes. This option does not create a backing NodeBalancer and instead -provisions a new IP on an ip-holder Nanode to share for the desired region. See [Shared IP LoadBalancing](#shared-ip-load-balancing). - -#### Annotations -The Linode CCM accepts several annotations which affect the properties of the underlying NodeBalancer deployment. - -All of the Service annotation names listed below have been shortened for readability. The values, such as `http`, are case-sensitive. - -Each *Service* annotation **MUST** be prefixed with:
-**`service.beta.kubernetes.io/linode-loadbalancer-`** - -Annotation (Suffix) | Values | Default | Description ----|---|---|--- -`throttle` | `0`-`20` (`0` to disable) | `0` | Client Connection Throttle, which limits the number of subsequent new connections per second from the same client IP -`default-protocol` | `tcp`, `http`, `https` | `tcp` | This annotation is used to specify the default protocol for Linode NodeBalancer. -`default-proxy-protocol` | `none`, `v1`, `v2` | `none` | Specifies whether to use a version of Proxy Protocol on the underlying NodeBalancer. -`port-*` | json (e.g. `{ "tls-secret-name": "prod-app-tls", "protocol": "https", "proxy-protocol": "v2"}`) | | Specifies port specific NodeBalancer configuration. See [Port Specific Configuration](#port-specific-configuration). `*` is the port being configured, e.g. `linode-loadbalancer-port-443` -`check-type` | `none`, `connection`, `http`, `http_body` | | The type of health check to perform against back-ends to ensure they are serving requests -`check-path` | string | | The URL path to check on each back-end during health checks -`check-body` | string | | Text which must be present in the response body to pass the NodeBalancer health check -`check-interval` | int | | Duration, in seconds, to wait between health checks -`check-timeout` | int (1-30) | | Duration, in seconds, to wait for a health check to succeed before considering it a failure -`check-attempts` | int (1-30) | | Number of health check failures necessary to remove a back-end from the service -`check-passive` | [bool](#annotation-bool-values) | `false` | When `true`, `5xx` status codes will cause the health check to fail -`preserve` | [bool](#annotation-bool-values) | `false` | When `true`, deleting a `LoadBalancer` service does not delete the underlying NodeBalancer. This will also prevent deletion of the former LoadBalancer when another one is specified with the `nodebalancer-id` annotation. -`nodebalancer-id` | string | | The ID of the NodeBalancer to front the service. When not specified, a new NodeBalancer will be created. This can be configured on service creation or patching -`hostname-only-ingress` | [bool](#annotation-bool-values) | `false` | When `true`, the LoadBalancerStatus for the service will only contain the Hostname. This is useful for bypassing kube-proxy's rerouting of in-cluster requests originally intended for the external LoadBalancer to the service's constituent pod IPs. -`tags` | string | | A comma seperated list of tags to be applied to the createad NodeBalancer instance -`firewall-id` | string | | An existing Cloud Firewall ID to be attached to the NodeBalancer instance. See [Firewalls](#firewalls). -`firewall-acl` | string | | The Firewall rules to be applied to the NodeBalancer. Adding this annotation creates a new CCM managed Linode CloudFirewall instance. See [Firewalls](#firewalls). - -#### Deprecated Annotations -These annotations are deprecated, and will be removed in a future release. - -Annotation (Suffix) | Values | Default | Description | Scheduled Removal ----|---|---|---|--- -`proxy-protcol` | `none`, `v1`, `v2` | `none` | Specifies whether to use a version of Proxy Protocol on the underlying NodeBalancer | Q4 2021 - -#### Annotation bool values -For annotations with bool value types, `"1"`, `"t"`, `"T"`, `"True"`, `"true"` and `"True"` are valid string representations of `true`. Any other values will be interpreted as false. For more details, see [strconv.ParseBool](https://golang.org/pkg/strconv/#ParseBool). - -#### Port Specific Configuration -These configuration options can be specified via the `port-*` annotation, encoded in JSON. - -Key | Values | Default | Description ----|---|---|--- -`protocol` | `tcp`, `http`, `https` | `tcp` | Specifies protocol of the NodeBalancer port. Overwrites `default-protocol`. -`proxy-protocol` | `none`, `v1`, `v2` | `none` | Specifies whether to use a version of Proxy Protocol on the underlying NodeBalancer. Overwrites `default-proxy-protocol`. -`tls-secret-name` | string | | Specifies a secret to use for TLS. The secret type should be `kubernetes.io/tls`. - -#### Shared IP Load-Balancing -**NOTE:** This feature requires contacting [Customer Support](https://www.linode.com/support/contact/) to enable provisioning additional IPs. - -Services of `type: LoadBalancer` can receive an external IP not backed by a NodeBalancer if `--bgp-node-selector` is set on the Linode CCM and `--load-balancer-type` is set to `cilium-bgp`. Additionally, the `LINODE_URL` environment variable in the linode CCM needs to be set to "https://api.linode.com/v4beta" for IP sharing to work. - -This feature requires the Kubernetes cluster to be using [Cilium](https://cilium.io/) as the CNI with the `bgp-control-plane` feature enabled. - -##### Example Daemonset configuration: - -``` -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: ccm-linode - namespace: kube-system -spec: - template: - spec: - containers: - - image: linode/linode-cloud-controller-manager:latest - name: ccm-linode - env: - - name: LINODE_URL - value: https://api.linode.com/v4beta - args: - - --bgp-node-selector=cilium-bgp-peering=true - - --load-balancer-type=cilium-bgp -... -``` - -##### Example Helm chart configuration: - -``` -sharedIPLoadBalancing: - loadBalancerType: cilium-bgp - bgpNodeSelector: cilium-bgp-peering=true -``` - -#### Firewalls -Firewall rules can be applied to the CCM Managed NodeBalancers in two distinct ways. - -##### CCM Managed Firewall -To use this feature, ensure that the linode api token used with the ccm has the `add_firewalls` grant. - -The CCM accepts firewall ACLs in json form. The ACL can either be an `allowList` or a `denyList`. Supplying both is not supported. Supplying neither is not supported. The `allowList` sets up a CloudFirewall that `ACCEPT`s traffic only from the specified IPs/CIDRs and `DROP`s everything else. The `denyList` sets up a CloudFirewall that `DROP`s traffic only from the specified IPs/CIDRs and `ACCEPT`s everything else. Ports are automatically inferred from the service configuration. - -See [Firewall rules](https://www.linode.com/docs/api/networking/#firewall-create__request-body-schema) for more details on how to specify the IPs/CIDRs - -Example usage of an ACL to allow traffic from a specific set of addresses - -```yaml -kind: Service -apiVersion: v1 -metadata: - name: https-lb - annotations: - service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | - { - "allowList": { - "ipv4": ["192.166.0.0/16", "172.23.41.0/24"], - "ipv6": ["2001:DB8::/128"] - } - } -spec: - type: LoadBalancer - selector: - app: nginx-https-example - ports: - - name: http - protocol: TCP - port: 80 - targetPort: http - - name: https - protocol: TCP - port: 443 - targetPort: https -``` - - -##### User Managed Firewall -Users can create CloudFirewall instances, supply their own rules and attach them to the NodeBalancer. To do so, set the -`service.beta.kubernetes.io/linode-loadbalancer-firewall-id` annotation to the ID of the cloud firewall. The CCM does not manage the lifecycle of the CloudFirewall Instance in this case. Users are responsible for ensuring the policies are correct. - -**Note**
-If the user supplies a firewall-id, and later switches to using an ACL, the CCM will take over the CloudFirewall Instance. To avoid this, delete the service, and re-create it so the original CloudFirewall is left undisturbed. - -#### Routes -When running k8s clusters within VPC, node specific podCIDRs need to be allowed on the VPC interface. Linode CCM comes with route-controller functionality which can be enabled for automatically adding/deleting routes on VPC interfaces. When installing CCM with helm, make sure to specify routeController settings. - -##### Example usage in values.yaml -```yaml -routeController: - vpcNames: - clusterCIDR: 10.0.0.0/8 - configureCloudRoutes: true -``` - -### Nodes -Kubernetes Nodes can be configured with the following annotations. - -Each *Node* annotation **MUST** be prefixed with:
-**`node.k8s.linode.com/`** - -Key | Values | Default | Description ----|---|---|--- -`private-ip` | `IPv4` | `none` | Specifies the Linode Private IP overriding default detection of the Node InternalIP.
When using a [VLAN] or [VPC], the Node InternalIP may not be a Linode Private IP as [required for NodeBalancers] and should be specified. - - -[required for NodeBalancers]: https://www.linode.com/docs/api/nodebalancers/#nodebalancer-create__request-body-schema -[VLAN]: https://www.linode.com/products/vlan/ -[VPC]: https://www.linode.com/blog/linode/new-betas-coming-to-green-light/ - -### Example usage -```yaml -kind: Service -apiVersion: v1 -metadata: - name: https-lb - annotations: - service.beta.kubernetes.io/linode-loadbalancer-throttle: "4" - service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" - service.beta.kubernetes.io/linode-loadbalancer-port-443: | - { - "tls-secret-name": "example-secret", - "protocol": "https" - } -spec: - type: LoadBalancer - selector: - app: nginx-https-example - ports: - - name: http - protocol: TCP - port: 80 - targetPort: http - - name: https - protocol: TCP - port: 443 - targetPort: https - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-https-deployment -spec: - replicas: 2 - selector: - matchLabels: - app: nginx-https-example - template: - metadata: - labels: - app: nginx-https-example - spec: - containers: - - name: nginx - image: nginx - ports: - - name: http - containerPort: 80 - protocol: TCP - - name: https - containerPort: 80 - protocol: TCP - -``` - -See more in the [examples directory](examples) - -## Why `stickiness` and `algorithm` annotations don't exist -As kube-proxy will simply double-hop the traffic to a random backend Pod anyway, it doesn't matter which backend Node traffic is forwarded-to for the sake of session stickiness. -These annotations are not necessary to implement session stickiness, as kube-proxy will simply double-hop the packets to a random backend Pod. It would not make a difference to set a backend Node that would receive the network traffic in an attempt to set session stickiness. - -## How to use sessionAffinity -In Kubernetes, sessionAffinity refers to a mechanism that allows a client always to be redirected to the same pod when the client hits a service. - -To enable sessionAffinity `service.spec.sessionAffinity` field must be set to `ClientIP` as the following service yaml: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: wordpress-lsmnl-wordpress - namespace: wordpress-lsmnl - labels: - app: wordpress-lsmnl-wordpress -spec: - type: LoadBalancer - selector: - app: wordpress-lsmnl-wordpress - sessionAffinity: ClientIP -``` - -The max session sticky time can be set by setting the field `service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` as below: - -```yaml -sessionAffinityConfig: - clientIP: - timeoutSeconds: 100 -``` - -## Additional environment variables -To tweak CCM based on needs, one can overwrite the default values set for caches and requests by setting appropriate environment variables when applying the manifest or helm chart. - -Environment Variable | Default | Description ----|---|--- -`LINODE_INSTANCE_CACHE_TTL` | `15` | Default timeout of instance cache in seconds -`LINODE_ROUTES_CACHE_TTL_SECONDS` | `60` | Default timeout of route cache in seconds -`LINODE_REQUEST_TIMEOUT_SECONDS` | `120` | Default timeout in seconds for http requests to linode API -`LINODE_EXTERNAL_SUBNET` | | Mark private network as external. Example - `172.24.0.0/16` - -## Generating a Manifest for Deployment -Use the script located at `./deploy/generate-manifest.sh` to generate a self-contained deployment manifest for the Linode CCM. Two arguments are required. - -The first argument must be a Linode APIv4 Personal Access Token with all permissions. -(https://cloud.linode.com/profile/tokens) - -The second argument must be a Linode region. -(https://api.linode.com/v4/regions) - -Example: - -```sh -./deploy/generate-manifest.sh $LINODE_API_TOKEN us-east -``` - -This will create a file `ccm-linode.yaml` which you can use to deploy the CCM. - -`kubectl apply -f ccm-linode.yaml` - -Note: Your kubelets, controller-manager, and apiserver must be started with `--cloud-provider=external` as noted in the following documentation. - -## Deployment Through Helm Chart -LINODE_API_TOKEN must be a Linode APIv4 [Personal Access Token](https://cloud.linode.com/profile/tokens) with all permissions. - -REGION must be a Linode [region](https://api.linode.com/v4/regions). -### Install the ccm-linode repo -```shell -helm repo add ccm-linode https://linode.github.io/linode-cloud-controller-manager/ -helm repo update ccm-linode -``` - -### To deploy ccm-linode. Run the following command: - -```sh -export VERSION=v0.4.8 -export LINODE_API_TOKEN= -export REGION= -helm install ccm-linode --set apiToken=$LINODE_API_TOKEN,region=$REGION ccm-linode/ccm-linode -``` -_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ - -### To uninstall ccm-linode from kubernetes cluster. Run the following command: -```sh -helm uninstall ccm-linode -``` -_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ - -### To upgrade when new changes are made to the helm chart. Run the following command: -```sh -export VERSION=v0.4.8 -export LINODE_API_TOKEN= -export REGION= - -helm upgrade ccm-linode --install --set apiToken=$LINODE_API_TOKEN,region=$REGION ccm-linode/ccm-linode -``` -_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ - -### Configurations -There are other variables that can be set to a different value. For list of all the modifiable variables/values, take a look at './deploy/chart/values.yaml'. - -Values can be set/overrided by using the '--set var=value,...' flag or by passing in a custom-values.yaml using '-f custom-values.yaml'. - -Recommendation: Use custom-values.yaml to override the variables to avoid any errors with template rendering - -### Upstream Documentation Including Deployment Instructions - -[Kubernetes Cloud Controller Manager](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/). - -## Upstream Developer Documentation - -[Developing a Cloud Controller Manager](https://kubernetes.io/docs/tasks/administer-cluster/developing-cloud-controller-manager/). - -## Development Guide - -### Building the Linode Cloud Controller Manager - -Some of the Linode Cloud Controller Manager development helper scripts rely -on a fairly up-to-date GNU tools environment, so most recent Linux distros -should work just fine out-of-the-box. - -#### Setup Go - -The Linode Cloud Controller Manager is written in Google's Go programming -language. Currently, the Linode Cloud Controller Manager is developed and -tested on **Go 1.8.3**. If you haven't set up a Go development environment, -please follow [these instructions](https://golang.org/doc/install) to -install Go. - -On macOS, Homebrew has a nice package - -```bash -brew install golang -``` - -#### Download Source - -```bash -go get github.com/linode/linode-cloud-controller-manager -cd $(go env GOPATH)/src/github.com/linode/linode-cloud-controller-manager -``` - -#### Install Dev tools -To install various dev tools for Pharm Controller Manager, run the following command: - -```bash -./hack/builddeps.sh -``` - -#### Build Binary -Use the following Make targets to build and run a local binary - -```bash -$ make build -$ make run -# You can also run the binary directly to pass additional args -$ dist/linode-cloud-controller-manager -``` - -#### Dependency management -Linode Cloud Controller Manager uses [Go Modules](https://blog.golang.org/using-go-modules) to manage dependencies. -If you want to update/add dependencies, run: - -```bash -go mod tidy -``` - -#### Building Docker images -To build and push a Docker image, use the following make targets. - -```bash -# Set the repo/image:tag with the TAG environment variable -# Then run the docker-build make target -$ IMG=linode/linode-cloud-controller-manager:canary make docker-build - -# Push Image -$ IMG=linode/linode-cloud-controller-manager:canary make docker-push -``` - -Then, to run the image - -```bash -docker run -ti linode/linode-cloud-controller-manager:canary -``` - -## Contribution Guidelines -Want to improve the linode-cloud-controller-manager? Please start [here](.github/CONTRIBUTING.md). - -## Join the Kubernetes Community -For general help or discussion, join us in #linode on the [Kubernetes Slack](https://kubernetes.slack.com/messages/CD4B15LUR/details/). To sign up, use the [Kubernetes Slack inviter](http://slack.kubernetes.io/). +## Overview + +The Linode Cloud Controller Manager (CCM) is a crucial component that integrates Kubernetes with Linode's infrastructure services. It implements the cloud-controller-manager binary, running cloud-specific control loops that are essential for cluster operation. + +A Cloud Controller Manager (CCM) is a Kubernetes control plane component that embeds cloud-specific control logic. It lets you link your cluster to your cloud provider's API, separating out the components that interact with that cloud platform from components that only interact with your cluster. + +### Core Components + +#### Node Controller +- Initializes node configuration with Linode-specific information + - Sets node addresses (public/private IPs) + - Labels nodes with region/zone information + - Configures node hostnames +- Monitors node health and lifecycle + - Detects node termination + - Updates node status + - Manages node cleanup + +#### Service Controller +- Manages LoadBalancer service implementations using Linode NodeBalancers + - Creates and configures NodeBalancers + - Updates backend pools + - Manages SSL/TLS certificates +- Handles automatic provisioning and configuration + - Health checks + - Session affinity + - Protocol configuration +- Supports multiple load balancing approaches + - Traditional NodeBalancer deployment + - BGP-based IP sharing for cost optimization + - Custom firewall rules and security configurations + +#### Route Controller +- Manages VPC and private network integration + - Configures routes for pod CIDR ranges + - Handles cross-node pod communication +- Ensures proper network connectivity + - Sets up pod-to-pod networking + - Manages network policies + - Configures network routes for optimal communication + +## Requirements + +- Kubernetes 1.22+ +- Kubelets, controller-manager, and apiserver with `--cloud-provider=external` +- Linode APIv4 Token +- Supported Linode region + +## Documentation + +### Quick Start +- [Getting Started Guide](docs/getting-started/README.md) - Start here for installation and setup + - [Overview](docs/getting-started/overview.md) - Learn about CCM basics + - [Requirements](docs/getting-started/requirements.md) - Check prerequisites + - [Installation](docs/getting-started/installation.md) - Install the CCM + - [Helm Installation](docs/getting-started/helm-installation.md) - Install using Helm + - [Manual Installation](docs/getting-started/manual-installation.md) - Manual setup instructions + - [Verification](docs/getting-started/verification.md) - Verify your installation + - [Troubleshooting](docs/getting-started/troubleshooting.md) - Common issues and solutions + +### Configuration +- [Configuration Guide](docs/configuration/README.md) - Detailed configuration options + - [LoadBalancer Services](docs/configuration/loadbalancer.md) + - [Service Annotations](docs/configuration/annotations.md) + - [Node Configuration](docs/configuration/nodes.md) + - [Environment Variables](docs/configuration/environment.md) + - [Firewall Setup](docs/configuration/firewall.md) + - [Route Configuration](docs/configuration/routes.md) + - [Session Affinity](docs/configuration/session-affinity.md) + +### Examples and Development +- [Examples](docs/examples/README.md) - Real-world usage examples + - [Basic Services](docs/examples/basic.md) + - [Advanced Configuration](docs/examples/advanced.md) +- [Development Guide](docs/development/README.md) - Contributing to CCM + +## Getting Help + +### Community Support + +For general help or discussion, join us in #linode on the [Kubernetes Slack](https://kubernetes.slack.com/messages/CD4B15LUR/details/). + +To sign up for Kubernetes Slack, use the [Kubernetes Slack inviter](http://slack.kubernetes.io/). + +### Issue Tracking + +If you've found a bug or want to request a feature: +- Check the [GitHub Issues](https://github.com/linode/linode-cloud-controller-manager/issues) +- Submit a [Pull Request](https://github.com/linode/linode-cloud-controller-manager/pulls) + +### Additional Resources + +- [Official Linode Documentation](https://www.linode.com/docs/) +- [Kubernetes Cloud Controller Manager Documentation](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/) +- [API Documentation](https://www.linode.com/docs/api) + +## Contributing + +Want to improve the Linode Cloud Controller Manager? Please see our [contributing guidelines](.github/CONTRIBUTING.md). diff --git a/_config.yaml b/_config.yaml new file mode 100644 index 00000000..88b63ad6 --- /dev/null +++ b/_config.yaml @@ -0,0 +1 @@ +markdown: GFM diff --git a/cloud/linode/cilium_loadbalancers.go b/cloud/linode/cilium_loadbalancers.go index 3a5590b7..71dc5632 100644 --- a/cloud/linode/cilium_loadbalancers.go +++ b/cloud/linode/cilium_loadbalancers.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "net/http" + "os" "slices" "strings" @@ -28,8 +29,8 @@ const ( ciliumLBClass = "io.cilium/bgp-control-plane" ipHolderLabelPrefix = "linode-ccm-ip-holder" ciliumBGPPeeringPolicyName = "linode-ccm-bgp-peering" - - commonControlPlaneLabel = "node-role.kubernetes.io/control-plane" + defaultBGPPeerPrefix = "2600:3c0f" + commonControlPlaneLabel = "node-role.kubernetes.io/control-plane" ) // This mapping is unfortunately necessary since there is no way to get the @@ -148,7 +149,7 @@ func (l *loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.N // perform IP sharing (via a specified node selector) have the expected IPs shared // in the event that a Node joins the cluster after the LoadBalancer Service already // exists -func (l *loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node) error { +func (l *loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHolderSuffix string) error { // ignore cases where the provider ID has been set if node.Spec.ProviderID == "" { klog.Info("skipping IP while providerID is unset") @@ -182,7 +183,7 @@ func (l *loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node) erro // if any of the addrs don't exist on the ip-holder (e.g. someone manually deleted it outside the CCM), // we need to exclude that from the list // TODO: also clean up the CiliumLoadBalancerIPPool for that missing IP if that happens - ipHolder, err := l.getIPHolder(ctx) + ipHolder, err := l.getIPHolder(ctx, ipHolderSuffix) if err != nil { return err } @@ -207,8 +208,8 @@ func (l *loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node) erro // createSharedIP requests an additional IP that can be shared on Nodes to support // loadbalancing via Cilium LB IPAM + BGP Control Plane. -func (l *loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node) (string, error) { - ipHolder, err := l.ensureIPHolder(ctx) +func (l *loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ipHolderSuffix string) (string, error) { + ipHolder, err := l.ensureIPHolder(ctx, ipHolderSuffix) if err != nil { return "", err } @@ -276,7 +277,15 @@ func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) return err } bgpNodes := nodeList.Items - ipHolder, err := l.getIPHolder(ctx) + + serviceNn := getServiceNn(service) + var ipHolderSuffix string + if Options.IpHolderSuffix != "" { + ipHolderSuffix = Options.IpHolderSuffix + klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) + } + + ipHolder, err := l.getIPHolder(ctx, ipHolderSuffix) if err != nil { // return error or nil if not found since no IP holder means there // is no IP to reclaim @@ -310,48 +319,90 @@ func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) // To hold the IP in lieu of a proper IP reservation system, a special Nanode is // created but not booted and used to hold all shared IPs. -func (l *loadbalancers) ensureIPHolder(ctx context.Context) (*linodego.Instance, error) { - ipHolder, err := l.getIPHolder(ctx) +func (l *loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { + ipHolder, err := l.getIPHolder(ctx, suffix) if err != nil { return nil, err } if ipHolder != nil { return ipHolder, nil } - + label := generateClusterScopedIPHolderLinodeName(l.zone, suffix) ipHolder, err = l.client.CreateInstance(ctx, linodego.InstanceCreateOptions{ Region: l.zone, Type: "g6-nanode-1", - Label: fmt.Sprintf("%s-%s", ipHolderLabelPrefix, l.zone), + Label: label, RootPass: uuid.NewString(), Image: "linode/ubuntu22.04", Booted: ptr.To(false), }) if err != nil { + if linodego.ErrHasStatus(err, http.StatusBadRequest) && strings.Contains(err.Error(), "Label must be unique") { + // TODO (rk): should we handle more status codes on error? + klog.Errorf("failed to create new IP Holder instance %s since it already exists: %s", label, err.Error()) + return nil, err + } return nil, err } + klog.Infof("created new IP Holder instance %s", label) return ipHolder, nil } -func (l *loadbalancers) getIPHolder(ctx context.Context) (*linodego.Instance, error) { +func (l *loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { + // even though we have updated the naming convention, leaving this in ensures we have backwards compatibility filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, l.zone)} rawFilter, err := json.Marshal(filter) if err != nil { panic("this should not have failed") } var ipHolder *linodego.Instance + // TODO (rk): should we switch to using GET instead of LIST? we would be able to wrap logic around errors linodes, err := l.client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) if err != nil { return nil, err } + if len(linodes) == 0 { + // since a list that returns 0 results has a 200/OK status code (no error) + + // we assume that either + // a) an ip holder instance does not exist yet + // or + // b) another cluster already holds the linode grant to an ip holder using the old naming convention + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(l.zone, suffix)} + rawFilter, err = json.Marshal(filter) + if err != nil { + panic("this should not have failed") + } + linodes, err = l.client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) + if err != nil { + return nil, err + } + } if len(linodes) > 0 { ipHolder = &linodes[0] } - return ipHolder, nil } +// generateClusterScopedIPHolderLinodeName attempts to generate a unique name for the IP Holder +// instance used alongside Cilium LoadBalancers and Shared IPs for Kubernetes Services. +// If the `--ip-holder-suffix` arg is passed when running Linode CCM, `suffix` is set to that value. +func generateClusterScopedIPHolderLinodeName(zone, suffix string) (label string) { + // since Linode CCM consumers are varied, we require a method of providing a + // suffix that does not rely on the use of a specific product (ex. LKE) to + // have a specific piece of metadata (ex. annotation(s), label(s) ) present to key off of. + + if suffix == "" { + // this avoids a trailing hyphen if suffix is empty (ex. linode-ccm-ip-holder-us-ord-) + label = fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone) + } else { + label = fmt.Sprintf("%s-%s-%s", ipHolderLabelPrefix, zone, suffix) + } + klog.V(5).Infof("generated IP Holder Linode label: %s", label) + return label +} + func (l *loadbalancers) retrieveCiliumClientset() error { if l.ciliumClient != nil { return nil @@ -431,6 +482,12 @@ func (l *loadbalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Servi // NOTE: Cilium CRDs must be installed for this to work func (l *loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error { + if raw, ok := os.LookupEnv("BGP_CUSTOM_ID_MAP"); ok && raw != "" { + klog.Info("BGP_CUSTOM_ID_MAP env variable specified, using it instead of the default region map") + if err := json.Unmarshal([]byte(raw), ®ionIDMap); err != nil { + return err + } + } regionID, ok := regionIDMap[l.zone] if !ok { return fmt.Errorf("unsupported region for BGP: %s", l.zone) @@ -493,10 +550,15 @@ func (l *loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error }}, }, } + bgpPeerPrefix := defaultBGPPeerPrefix + if raw, ok := os.LookupEnv("BGP_PEER_PREFIX"); ok && raw != "" { + klog.Info("BGP_PEER_PREFIX env variable specified, using it instead of the default bgpPeer prefix") + bgpPeerPrefix = raw + } // As in https://github.com/linode/lelastic, there are 4 peers per DC for i := 1; i <= 4; i++ { neighbor := v2alpha1.CiliumBGPNeighbor{ - PeerAddress: fmt.Sprintf("2600:3c0f:%d:34::%d/64", regionID, i), + PeerAddress: fmt.Sprintf("%s:%d:34::%d/64", bgpPeerPrefix, regionID, i), PeerASN: 65000, EBGPMultihopTTL: ptr.To(int32(10)), ConnectRetryTimeSeconds: ptr.To(int32(5)), diff --git a/cloud/linode/cilium_loadbalancers_test.go b/cloud/linode/cilium_loadbalancers_test.go index a4226241..f03bfaeb 100644 --- a/cloud/linode/cilium_loadbalancers_test.go +++ b/cloud/linode/cilium_loadbalancers_test.go @@ -69,14 +69,15 @@ var ( }, }, } - publicIPv4 = net.ParseIP("45.76.101.25") - ipHolderInstance = linodego.Instance{ + publicIPv4 = net.ParseIP("45.76.101.25") + oldIpHolderInstance = linodego.Instance{ ID: 12345, Label: fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone), Type: "g6-standard-1", Region: "us-west", IPv4: []*net.IP{&publicIPv4}, } + newIpHolderInstance = linodego.Instance{} ) func TestCiliumCCMLoadBalancers(t *testing.T) { @@ -93,20 +94,44 @@ func TestCiliumCCMLoadBalancers(t *testing.T) { f: testUnsupportedRegion, }, { - name: "Create Cilium Load Balancer With explicit loadBalancerClass and existing IP holder nanode", - f: testCreateWithExistingIPHolder, + name: "Create Cilium Load Balancer With explicit loadBalancerClass and existing IP holder nanode with old IP Holder naming convention", + f: testCreateWithExistingIPHolderWithOldIpHolderNamingConvention, }, { - name: "Create Cilium Load Balancer With no existing IP holder nanode", - f: testCreateWithNoExistingIPHolder, + name: "Create Cilium Load Balancer With explicit loadBalancerClass and existing IP holder nanode with new IP Holder naming convention", + f: testCreateWithExistingIPHolderWithNewIpHolderNamingConvention, }, { - name: "Delete Cilium Load Balancer", - f: testEnsureCiliumLoadBalancerDeleted, + name: "Create Cilium Load Balancer With explicit loadBalancerClass and existing IP holder nanode with new IP Holder naming convention and 63 char long suffix", + f: testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffix, }, { - name: "Add node to existing Cilium Load Balancer", - f: testCiliumUpdateLoadBalancerAddNode, + name: "Create Cilium Load Balancer With no existing IP holder nanode and short suffix", + f: testCreateWithNoExistingIPHolderUsingShortSuffix, + }, + { + name: "Create Cilium Load Balancer With no existing IP holder nanode and no suffix", + f: testCreateWithNoExistingIPHolderUsingNoSuffix, + }, + { + name: "Create Cilium Load Balancer With no existing IP holder nanode and 63 char long suffix", + f: testCreateWithNoExistingIPHolderUsingLongSuffix, + }, + { + name: "Delete Cilium Load Balancer With Old IP Holder Naming Convention", + f: testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention, + }, + { + name: "Delete Cilium Load Balancer With New IP Holder Naming Convention", + f: testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention, + }, + { + name: "Add node to existing Cilium Load Balancer With Old IP Holder Naming Convention", + f: testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention, + }, + { + name: "Add node to existing Cilium Load Balancer With New IP Holder Naming Convention", + f: testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention, }, } for _, tc := range testCases { @@ -163,9 +188,22 @@ func addNodes(t *testing.T, kubeClient kubernetes.Interface, nodes []*v1.Node) { } } +func createNewIpHolderInstance() linodego.Instance { + return linodego.Instance{ + ID: 123456, + Label: generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix), + Type: "g6-standard-1", + Region: "us-west", + IPv4: []*net.IP{&publicIPv4}, + } +} + func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { Options.BGPNodeSelector = "" + Options.IpHolderSuffix = "linodelb" + t.Setenv("BGP_PEER_PREFIX", "2600:3cef") svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} @@ -176,14 +214,17 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, _ := json.Marshal(filter) mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) dummySharedIP := "45.76.101.26" - mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&ipHolderInstance, nil) - mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), ipHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, }, }, nil) - mc.EXPECT().AddInstanceIPAddress(gomock.Any(), ipHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ IPs: []string{dummySharedIP}, LinodeID: 11111, @@ -217,16 +258,108 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) if err == nil { - t.Fatal("expected nil error") + t.Fatal("expected not nil error") } if lbStatus != nil { t.Fatalf("expected a nil lbStatus, got %v", lbStatus) } + + // Use BGP custom id map + t.Setenv("BGP_CUSTOM_ID_MAP", "{'us-foobar': 2}") + lb = &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lbStatus, err = lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err == nil { + t.Fatal("expected not nil error") + } + if lbStatus != nil { + t.Fatalf("expected a nil lbStatus, got %v", lbStatus) + } +} + +func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } +} + +func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "linodelb" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } } -func testCreateWithExistingIPHolder(t *testing.T, mc *mocks.MockClient) { +func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffix(t *testing.T, mc *mocks.MockClient) { Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp" svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} @@ -236,10 +369,10 @@ func testCreateWithExistingIPHolder(t *testing.T, mc *mocks.MockClient) { filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, _ := json.Marshal(filter) - mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{ipHolderInstance}, nil) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) dummySharedIP := "45.76.101.26" - mc.EXPECT().AddInstanceIPAddress(gomock.Any(), ipHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) - mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), ipHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, }, @@ -262,9 +395,11 @@ func testCreateWithExistingIPHolder(t *testing.T, mc *mocks.MockClient) { } } -func testCreateWithNoExistingIPHolder(t *testing.T, mc *mocks.MockClient) { +func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockClient) { Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "" svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} @@ -275,14 +410,17 @@ func testCreateWithNoExistingIPHolder(t *testing.T, mc *mocks.MockClient) { filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, _ := json.Marshal(filter) mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) dummySharedIP := "45.76.101.26" - mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&ipHolderInstance, nil) - mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), ipHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, }, }, nil) - mc.EXPECT().AddInstanceIPAddress(gomock.Any(), ipHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ IPs: []string{dummySharedIP}, LinodeID: 11111, @@ -301,7 +439,95 @@ func testCreateWithNoExistingIPHolder(t *testing.T, mc *mocks.MockClient) { } } -func testEnsureCiliumLoadBalancerDeleted(t *testing.T, mc *mocks.MockClient) { +func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "linodelb" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } +} + +func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } +} + +func testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { Options.BGPNodeSelector = "cilium-bgp-peering=true" svc := createTestService() @@ -316,10 +542,10 @@ func testEnsureCiliumLoadBalancerDeleted(t *testing.T, mc *mocks.MockClient) { filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, _ := json.Marshal(filter) - mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{ipHolderInstance}, nil) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 11111, dummySharedIP).Times(1).Return(nil) mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 22222, dummySharedIP).Times(1).Return(nil) - mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), ipHolderInstance.ID, dummySharedIP).Times(1).Return(nil) + mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, dummySharedIP).Times(1).Return(nil) err := lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) if err != nil { @@ -327,9 +553,11 @@ func testEnsureCiliumLoadBalancerDeleted(t *testing.T, mc *mocks.MockClient) { } } -func testCiliumUpdateLoadBalancerAddNode(t *testing.T, mc *mocks.MockClient) { +func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "linodelb" svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} @@ -337,12 +565,102 @@ func testCiliumUpdateLoadBalancerAddNode(t *testing.T, mc *mocks.MockClient) { addNodes(t, kubeClient, nodes) lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + dummySharedIP := "45.76.101.26" + svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, _ := json.Marshal(filter) - mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{ipHolderInstance}, nil) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{newIpHolderInstance}, nil) + mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 11111, dummySharedIP).Times(1).Return(nil) + mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 22222, dummySharedIP).Times(1).Return(nil) + mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, dummySharedIP).Times(1).Return(nil) + + err := lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } +} + +func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + svc := createTestService() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } + + // Now add another node to the cluster and assert that it gets the shared IP + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 55555, + }).Times(1) + addNodes(t, kubeClient, additionalNodes) + + err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, additionalNodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } +} + +func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "linodelb" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{newIpHolderInstance}, nil) dummySharedIP := "45.76.101.26" - mc.EXPECT().AddInstanceIPAddress(gomock.Any(), ipHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) - mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), ipHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, }, @@ -365,8 +683,14 @@ func testCiliumUpdateLoadBalancerAddNode(t *testing.T, mc *mocks.MockClient) { } // Now add another node to the cluster and assert that it gets the shared IP - mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{ipHolderInstance}, nil) - mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), ipHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + filter = map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{newIpHolderInstance}, nil) + + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, }, diff --git a/cloud/linode/cloud.go b/cloud/linode/cloud.go index f6ec2f7c..ed9b21ed 100644 --- a/cloud/linode/cloud.go +++ b/cloud/linode/cloud.go @@ -40,7 +40,9 @@ var Options struct { VPCNames string LoadBalancerType string BGPNodeSelector string + IpHolderSuffix string LinodeExternalNetwork *net.IPNet + NodeBalancerTags []string } type linodeCloud struct { @@ -50,6 +52,8 @@ type linodeCloud struct { routes cloudprovider.Routes } +var instanceCache *instances + func init() { cloudprovider.RegisterCloudProvider( ProviderName, @@ -96,7 +100,8 @@ func newCloud() (cloudprovider.Interface, error) { Options.VPCNames = Options.VPCName } - routes, err := newRoutes(linodeClient) + instanceCache = newInstances(linodeClient) + routes, err := newRoutes(linodeClient, instanceCache) if err != nil { return nil, fmt.Errorf("routes client was not created successfully: %w", err) } @@ -109,10 +114,20 @@ func newCloud() (cloudprovider.Interface, error) { ) } + if Options.IpHolderSuffix != "" { + klog.Infof("Using IP holder suffix '%s'\n", Options.IpHolderSuffix) + } + + if len(Options.IpHolderSuffix) > 23 { + msg := fmt.Sprintf("ip-holder-suffix must be 23 characters or less: %s is %d characters\n", Options.IpHolderSuffix, len(Options.IpHolderSuffix)) + klog.Error(msg) + return nil, fmt.Errorf("%s", msg) + } + // create struct that satisfies cloudprovider.Interface lcloud := &linodeCloud{ client: linodeClient, - instances: newInstances(linodeClient), + instances: instanceCache, loadbalancers: newLoadbalancers(linodeClient, region), routes: routes, } @@ -128,7 +143,7 @@ func (c *linodeCloud) Initialize(clientBuilder cloudprovider.ControllerClientBui serviceController := newServiceController(c.loadbalancers.(*loadbalancers), serviceInformer) go serviceController.Run(stopCh) - nodeController := newNodeController(kubeclient, c.client, nodeInformer) + nodeController := newNodeController(kubeclient, c.client, nodeInformer, instanceCache) go nodeController.Run(stopCh) } diff --git a/cloud/linode/cloud_test.go b/cloud/linode/cloud_test.go index 7f46fadf..c6f2c97d 100644 --- a/cloud/linode/cloud_test.go +++ b/cloud/linode/cloud_test.go @@ -1,10 +1,14 @@ package linode import ( + "reflect" + "strings" "testing" "github.com/golang/mock/gomock" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" "github.com/stretchr/testify/assert" + cloudprovider "k8s.io/cloud-provider" ) func TestNewCloudRouteControllerDisabled(t *testing.T) { @@ -13,6 +17,7 @@ func TestNewCloudRouteControllerDisabled(t *testing.T) { t.Setenv("LINODE_API_TOKEN", "dummyapitoken") t.Setenv("LINODE_REGION", "us-east") + t.Setenv("LINODE_REQUEST_TIMEOUT_SECONDS", "10") t.Run("should not fail if vpc is empty and routecontroller is disabled", func(t *testing.T) { Options.VPCName = "" @@ -28,3 +33,498 @@ func TestNewCloudRouteControllerDisabled(t *testing.T) { assert.Error(t, err) }) } + +func TestNewCloud(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + t.Setenv("LINODE_API_TOKEN", "dummyapitoken") + t.Setenv("LINODE_REGION", "us-east") + t.Setenv("LINODE_REQUEST_TIMEOUT_SECONDS", "10") + t.Setenv("LINODE_ROUTES_CACHE_TTL_SECONDS", "60") + Options.LinodeGoDebug = true + + t.Run("should fail if api token is empty", func(t *testing.T) { + t.Setenv("LINODE_API_TOKEN", "") + _, err := newCloud() + assert.Error(t, err, "expected error when api token is empty") + }) + + t.Run("should fail if region is empty", func(t *testing.T) { + t.Setenv("LINODE_REGION", "") + _, err := newCloud() + assert.Error(t, err, "expected error when linode region is empty") + }) + + t.Run("should fail if both vpcname and vpcnames are set", func(t *testing.T) { + Options.VPCName = "tt" + Options.VPCNames = "tt" + defer func() { + Options.VPCName = "" + Options.VPCNames = "" + }() + _, err := newCloud() + assert.Error(t, err, "expected error when both vpcname and vpcnames are set") + }) + + t.Run("should not fail if deprecated vpcname is set", func(t *testing.T) { + Options.VPCName = "tt" + defer func() { + Options.VPCName = "" + Options.VPCNames = "" + }() + _, err := newCloud() + assert.NoError(t, err, "expected no error if deprecated flag vpcname is set") + assert.Equal(t, Options.VPCNames, "tt", "expected vpcnames to be set to vpcname") + }) + + t.Run("should fail if incorrect loadbalancertype is set", func(t *testing.T) { + rtEnabled := Options.EnableRouteController + Options.EnableRouteController = false + Options.LoadBalancerType = "test" + defer func() { + Options.LoadBalancerType = "" + Options.EnableRouteController = rtEnabled + }() + _, err := newCloud() + assert.Error(t, err, "expected error if incorrect loadbalancertype is set") + }) + + t.Run("should fail if ipholdersuffix is longer than 23 chars", func(t *testing.T) { + suffix := Options.IpHolderSuffix + Options.IpHolderSuffix = strings.Repeat("a", 24) + rtEnabled := Options.EnableRouteController + Options.EnableRouteController = false + defer func() { + Options.IpHolderSuffix = suffix + Options.EnableRouteController = rtEnabled + }() + _, err := newCloud() + assert.Error(t, err, "expected error if ipholdersuffix is longer than 23 chars") + }) +} + +func Test_linodeCloud_LoadBalancer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want cloudprovider.LoadBalancer + want1 bool + }{ + { + name: "should return loadbalancer interface", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: nil, + }, + want: newLoadbalancers(client, "us-east"), + want1: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + got, got1 := c.LoadBalancer() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.LoadBalancer() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.LoadBalancer() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_InstancesV2(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want cloudprovider.InstancesV2 + want1 bool + }{ + { + name: "should return instances interface", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: nil, + }, + want: newInstances(client), + want1: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + got, got1 := c.InstancesV2() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.InstancesV2() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.InstancesV2() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_Instances(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want cloudprovider.Instances + want1 bool + }{ + { + name: "should return nil", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: nil, + }, + want: nil, + want1: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + got, got1 := c.Instances() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.Instances() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.Instances() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_Zones(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want cloudprovider.Zones + want1 bool + }{ + { + name: "should return nil", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: nil, + }, + want: nil, + want1: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + got, got1 := c.Zones() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.Zones() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.Zones() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_Clusters(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want cloudprovider.Clusters + want1 bool + }{ + { + name: "should return nil", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: nil, + }, + want: nil, + want1: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + got, got1 := c.Clusters() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.Clusters() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.Clusters() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_Routes(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + r := &routes{} + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + EnableRouteController bool + } + tests := []struct { + name string + fields fields + want cloudprovider.Routes + want1 bool + }{ + { + name: "should return nil", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: r, + EnableRouteController: false, + }, + want: nil, + want1: false, + }, + { + name: "should return routes interface", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: r, + EnableRouteController: true, + }, + want: r, + want1: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + rt := Options.EnableRouteController + defer func() { Options.EnableRouteController = rt }() + Options.EnableRouteController = tt.fields.EnableRouteController + got, got1 := c.Routes() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.Routes() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.Routes() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_ProviderName(t *testing.T) { + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want string + }{ + { + name: "should return linode", + fields: fields{ + client: nil, + instances: nil, + loadbalancers: nil, + routes: nil, + }, + want: ProviderName, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + if got := c.ProviderName(); got != tt.want { + t.Errorf("linodeCloud.ProviderName() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_linodeCloud_ScrubDNS(t *testing.T) { + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + type args struct { + in0 []string + in1 []string + } + tests := []struct { + name string + fields fields + args args + wantNsOut []string + wantSrchOut []string + }{ + { + name: "should return linode", + fields: fields{ + client: nil, + instances: nil, + loadbalancers: nil, + routes: nil, + }, + wantNsOut: nil, + wantSrchOut: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + gotNsOut, gotSrchOut := c.ScrubDNS(tt.args.in0, tt.args.in1) + if !reflect.DeepEqual(gotNsOut, tt.wantNsOut) { + t.Errorf("linodeCloud.ScrubDNS() gotNsOut = %v, want %v", gotNsOut, tt.wantNsOut) + } + if !reflect.DeepEqual(gotSrchOut, tt.wantSrchOut) { + t.Errorf("linodeCloud.ScrubDNS() gotSrchOut = %v, want %v", gotSrchOut, tt.wantSrchOut) + } + }) + } +} + +func Test_linodeCloud_HasClusterID(t *testing.T) { + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want bool + }{ + { + name: "should return true", + fields: fields{ + client: nil, + instances: nil, + loadbalancers: nil, + routes: nil, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + if got := c.HasClusterID(); got != tt.want { + t.Errorf("linodeCloud.HasClusterID() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/cloud/linode/common.go b/cloud/linode/common.go index 67b8b976..9c4aed9f 100644 --- a/cloud/linode/common.go +++ b/cloud/linode/common.go @@ -9,7 +9,10 @@ import ( "github.com/linode/linodego" ) -const providerIDPrefix = "linode://" +const ( + providerIDPrefix = "linode://" + DNS1123LabelMaxLength int = 63 +) type invalidProviderIDError struct { value string diff --git a/cloud/linode/fake_linode_test.go b/cloud/linode/fake_linode_test.go index 6dab73fe..aeb069d8 100644 --- a/cloud/linode/fake_linode_test.go +++ b/cloud/linode/fake_linode_test.go @@ -141,13 +141,11 @@ func (f *fakeAPI) setupRoutes() { f.t.Fatal(err) } - data := linodego.NodeBalancerFirewallsPagedResponse{ - Data: []linodego.Firewall{}, - PageOptions: &linodego.PageOptions{ - Page: 1, - Pages: 1, - Results: 0, - }, + data := paginatedResponse[linodego.Firewall]{ + Page: 1, + Pages: 1, + Results: 0, + Data: []linodego.Firewall{}, } out: @@ -155,7 +153,7 @@ func (f *fakeAPI) setupRoutes() { for _, device := range devices { if device.Entity.ID == nodebalancerID { data.Data = append(data.Data, *f.fw[fwid]) - data.PageOptions.Results = 1 + data.Results = 1 break out } } diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index 4a396ccf..eb80aad8 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -222,9 +222,15 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri }, nil } + var ipHolderSuffix string + if Options.IpHolderSuffix != "" { + ipHolderSuffix = Options.IpHolderSuffix + klog.Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) + } + // CiliumLoadBalancerIPPool does not yet exist for the service var sharedIP string - if sharedIP, err = l.createSharedIP(ctx, nodes); err != nil { + if sharedIP, err = l.createSharedIP(ctx, nodes, ipHolderSuffix); err != nil { klog.Errorf("Failed to request shared instance IP: %s", err.Error()) return nil, err } @@ -428,9 +434,16 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri // handle LoadBalancers backed by Cilium if l.loadBalancerType == ciliumLBType { klog.Infof("handling update for LoadBalancer Service %s/%s as %s", service.Namespace, service.Name, ciliumLBClass) + serviceNn := getServiceNn(service) + var ipHolderSuffix string + if Options.IpHolderSuffix != "" { + ipHolderSuffix = Options.IpHolderSuffix + klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) + } + // make sure that IPs are shared properly on the Node if using load-balancers not backed by NodeBalancers for _, node := range nodes { - if err := l.handleIPSharing(ctx, node); err != nil { + if err := l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil { return err } } @@ -599,6 +612,8 @@ func (l *loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName strin tags = append(tags, clusterName) } + tags = append(tags, Options.NodeBalancerTags...) + tagStr, ok := service.GetAnnotations()[annotations.AnnLinodeLoadBalancerTags] if ok { return append(tags, strings.Split(tagStr, ",")...) diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index 806ce25b..2cce0575 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -150,6 +150,10 @@ func TestCCMLoadBalancers(t *testing.T) { name: "Create Load Balancer With Invalid Firewall ACL - NO Allow Or Deny", f: testCreateNodeBalanceWithNoAllowOrDenyList, }, + { + name: "Create Load Balancer With Global Tags set", + f: testCreateNodeBalancerWithGlobalTags, + }, { name: "Update Load Balancer - Add Node", f: testUpdateLoadBalancerAddNode, @@ -274,7 +278,7 @@ func stubService(fake *fake.Clientset, service *v1.Service) { _, _ = fake.CoreV1().Services("").Create(context.TODO(), service, metav1.CreateOptions{}) } -func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, annMap map[string]string) error { +func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, annMap map[string]string, expectedTags []string) error { svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -341,7 +345,9 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a t.Logf("actual: %v", nb.ClientConnThrottle) } - expectedTags := []string{"linodelb", "fake", "test", "yolo"} + if len(expectedTags) == 0 { + expectedTags = []string{"linodelb", "fake", "test", "yolo"} + } if !reflect.DeepEqual(nb.Tags, expectedTags) { t.Error("unexpected Tags") t.Logf("expected: %v", expectedTags) @@ -366,7 +372,7 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a } func testCreateNodeBalancerWithOutFirewall(t *testing.T, client *linodego.Client, f *fakeAPI) { - err := testCreateNodeBalancer(t, client, f, nil) + err := testCreateNodeBalancer(t, client, f, nil, nil) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -377,7 +383,7 @@ func testCreateNodeBalanceWithNoAllowOrDenyList(t *testing.T, client *linodego.C annotations.AnnLinodeCloudFirewallACL: `{}`, } - err := testCreateNodeBalancer(t, client, f, annotations) + err := testCreateNodeBalancer(t, client, f, annotations, nil) if err == nil || !stderrors.Is(err, firewall.ErrInvalidFWConfig) { t.Fatalf("expected a %v error, got %v", firewall.ErrInvalidFWConfig, err) } @@ -395,7 +401,7 @@ func testCreateNodeBalanceWithBothAllowOrDenyList(t *testing.T, client *linodego }`, } - err := testCreateNodeBalancer(t, client, f, annotations) + err := testCreateNodeBalancer(t, client, f, annotations, nil) if err == nil || !stderrors.Is(err, firewall.ErrInvalidFWConfig) { t.Fatalf("expected a %v error, got %v", firewall.ErrInvalidFWConfig, err) } @@ -410,7 +416,7 @@ func testCreateNodeBalancerWithAllowList(t *testing.T, client *linodego.Client, }`, } - err := testCreateNodeBalancer(t, client, f, annotations) + err := testCreateNodeBalancer(t, client, f, annotations, nil) if err != nil { t.Fatalf("expected a non-nil error, got %v", err) } @@ -425,7 +431,7 @@ func testCreateNodeBalancerWithDenyList(t *testing.T, client *linodego.Client, f }`, } - err := testCreateNodeBalancer(t, client, f, annotations) + err := testCreateNodeBalancer(t, client, f, annotations, nil) if err != nil { t.Fatalf("expected a non-nil error, got %v", err) } @@ -435,7 +441,7 @@ func testCreateNodeBalancerWithFirewall(t *testing.T, client *linodego.Client, f annotations := map[string]string{ annotations.AnnLinodeCloudFirewallID: "123", } - err := testCreateNodeBalancer(t, client, f, annotations) + err := testCreateNodeBalancer(t, client, f, annotations, nil) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -446,12 +452,25 @@ func testCreateNodeBalancerWithInvalidFirewall(t *testing.T, client *linodego.Cl annotations.AnnLinodeCloudFirewallID: "qwerty", } expectedError := "strconv.Atoi: parsing \"qwerty\": invalid syntax" - err := testCreateNodeBalancer(t, client, f, annotations) + err := testCreateNodeBalancer(t, client, f, annotations, nil) if err.Error() != expectedError { t.Fatalf("expected a %s error, got %v", expectedError, err) } } +func testCreateNodeBalancerWithGlobalTags(t *testing.T, client *linodego.Client, f *fakeAPI) { + original := Options.NodeBalancerTags + defer func() { + Options.NodeBalancerTags = original + }() + Options.NodeBalancerTags = []string{"foobar"} + expectedTags := []string{"linodelb", "foobar", "fake", "test", "yolo"} + err := testCreateNodeBalancer(t, client, f, nil, expectedTags) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } +} + func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fakeAPI) { svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -543,7 +562,6 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak checkIDs := func() (int, int) { var req *fakeRequest for request := range f.requests { - request := request if rx.MatchString(request.Path) { req = &request break diff --git a/cloud/linode/node_controller.go b/cloud/linode/node_controller.go index fe502a6a..9b390043 100644 --- a/cloud/linode/node_controller.go +++ b/cloud/linode/node_controller.go @@ -39,10 +39,10 @@ type nodeController struct { metadataLastUpdate map[string]time.Time ttl time.Duration - queue workqueue.DelayingInterface + queue workqueue.TypedDelayingInterface[any] } -func newNodeController(kubeclient kubernetes.Interface, client client.Client, informer v1informers.NodeInformer) *nodeController { +func newNodeController(kubeclient kubernetes.Interface, client client.Client, informer v1informers.NodeInformer, instanceCache *instances) *nodeController { timeout := defaultMetadataTTL if raw, ok := os.LookupEnv("LINODE_METADATA_TTL"); ok { if t, _ := strconv.Atoi(raw); t > 0 { @@ -52,12 +52,12 @@ func newNodeController(kubeclient kubernetes.Interface, client client.Client, in return &nodeController{ client: client, - instances: newInstances(client), + instances: instanceCache, kubeclient: kubeclient, informer: informer, ttl: timeout, metadataLastUpdate: make(map[string]time.Time), - queue: workqueue.NewDelayingQueue(), + queue: workqueue.NewTypedDelayingQueue[any](), } } diff --git a/cloud/linode/node_controller_test.go b/cloud/linode/node_controller_test.go new file mode 100644 index 00000000..409bc24d --- /dev/null +++ b/cloud/linode/node_controller_test.go @@ -0,0 +1,229 @@ +package linode + +import ( + "context" + "errors" + "net" + "net/http" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/linode/linode-cloud-controller-manager/cloud/annotations" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" + "github.com/linode/linodego" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/util/workqueue" +) + +func TestNodeController_Run(t *testing.T) { + // Mock dependencies + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + kubeClient := fake.NewSimpleClientset() + informer := informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Nodes() + mockQueue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "test"}) + + nodeCtrl := newNodeController(kubeClient, client, informer, newInstances(client)) + nodeCtrl.queue = mockQueue + nodeCtrl.ttl = 1 * time.Second + + // Add test node + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nodeA", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: v1.NodeSpec{}, + } + _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + assert.NoError(t, err, "expected no error during node creation") + + // Start the controller + stopCh := make(chan struct{}) + go nodeCtrl.Run(stopCh) + + client.EXPECT().ListInstances(gomock.Any(), nil).AnyTimes().Return([]linodego.Instance{}, &linodego.Error{Code: http.StatusTooManyRequests, Message: "Too many requests"}) + // Add the node to the informer + err = nodeCtrl.informer.Informer().GetStore().Add(node) + assert.NoError(t, err, "expected no error when adding node to informer") + + // Allow some time for the queue to process + time.Sleep(1 * time.Second) + + // Stop the controller + close(stopCh) +} + +func TestNodeController_processNext(t *testing.T) { + // Mock dependencies + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + kubeClient := fake.NewSimpleClientset() + queue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: v1.NodeSpec{}, + } + + _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + assert.NoError(t, err, "expected no error during node creation") + + controller := &nodeController{ + kubeclient: kubeClient, + instances: newInstances(client), + queue: queue, + metadataLastUpdate: make(map[string]time.Time), + ttl: defaultMetadataTTL, + } + + t.Run("should return no error on unknown errors", func(t *testing.T) { + queue.Add(node) + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, errors.New("lookup failed")) + result := controller.processNext() + assert.True(t, result, "processNext should return true") + if queue.Len() != 0 { + t.Errorf("expected queue to be empty, got %d items", queue.Len()) + } + }) + + t.Run("should return no error if node exists", func(t *testing.T) { + queue.Add(node) + publicIP := net.ParseIP("172.234.31.123") + privateIP := net.ParseIP("192.168.159.135") + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ + {ID: 111, Label: "test", IPv4: []*net.IP{&publicIP, &privateIP}, HostUUID: "111"}, + }, nil) + result := controller.processNext() + assert.True(t, result, "processNext should return true") + if queue.Len() != 0 { + t.Errorf("expected queue to be empty, got %d items", queue.Len()) + } + }) + + t.Run("should return no error if queued object is not of type Node", func(t *testing.T) { + queue.Add("abc") + result := controller.processNext() + assert.True(t, result, "processNext should return true") + if queue.Len() != 0 { + t.Errorf("expected queue to be empty, got %d items", queue.Len()) + } + }) + + t.Run("should return no error if node in k8s doesn't exist", func(t *testing.T) { + queue.Add(node) + controller.kubeclient = fake.NewSimpleClientset() + defer func() { controller.kubeclient = kubeClient }() + result := controller.processNext() + assert.True(t, result, "processNext should return true") + if queue.Len() != 0 { + t.Errorf("expected queue to be empty, got %d items", queue.Len()) + } + }) + + t.Run("should return error and requeue when it gets 429 from linode API", func(t *testing.T) { + queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue1"}) + queue.Add(node) + controller.queue = queue + client := mocks.NewMockClient(ctrl) + controller.instances = newInstances(client) + retryInterval = 1 * time.Nanosecond + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, &linodego.Error{Code: http.StatusTooManyRequests, Message: "Too many requests"}) + result := controller.processNext() + time.Sleep(1 * time.Second) + assert.True(t, result, "processNext should return true") + if queue.Len() == 0 { + t.Errorf("expected queue to not be empty, got it empty") + } + }) + + t.Run("should return error and requeue when it gets error >= 500 from linode API", func(t *testing.T) { + queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue2"}) + queue.Add(node) + controller.queue = queue + client := mocks.NewMockClient(ctrl) + controller.instances = newInstances(client) + retryInterval = 1 * time.Nanosecond + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, &linodego.Error{Code: http.StatusInternalServerError, Message: "Too many requests"}) + result := controller.processNext() + time.Sleep(1 * time.Second) + assert.True(t, result, "processNext should return true") + if queue.Len() == 0 { + t.Errorf("expected queue to not be empty, got it empty") + } + }) +} + +func TestNodeController_handleNode(t *testing.T) { + // Mock dependencies + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + kubeClient := fake.NewSimpleClientset() + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: v1.NodeSpec{ProviderID: "linode://123"}, + } + _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + assert.NoError(t, err, "expected no error during node creation") + + instCache := newInstances(client) + + t.Setenv("LINODE_METADATA_TTL", "30") + nodeCtrl := newNodeController(kubeClient, client, nil, instCache) + assert.Equal(t, 30*time.Second, nodeCtrl.ttl, "expected ttl to be 30 seconds") + + // Test: Successful metadata update + publicIP := net.ParseIP("172.234.31.123") + privateIP := net.ParseIP("192.168.159.135") + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ + {ID: 123, Label: "test-node", IPv4: []*net.IP{&publicIP, &privateIP}, HostUUID: "123"}, + }, nil) + err = nodeCtrl.handleNode(context.TODO(), node) + assert.NoError(t, err, "expected no error during handleNode") + + // Check metadataLastUpdate + lastUpdate := nodeCtrl.LastMetadataUpdate("test-node") + if time.Since(lastUpdate) > 5*time.Second { + t.Errorf("metadataLastUpdate was not updated correctly") + } + + // Annotations set, no update needed as ttl not reached + node.Labels[annotations.AnnLinodeHostUUID] = "123" + node.Annotations[annotations.AnnLinodeNodePrivateIP] = privateIP.String() + err = nodeCtrl.handleNode(context.TODO(), node) + assert.NoError(t, err, "expected no error during handleNode") + + // Lookup failure for linode instance + client = mocks.NewMockClient(ctrl) + nodeCtrl.instances = newInstances(client) + nodeCtrl.metadataLastUpdate["test-node"] = time.Now().Add(-2 * nodeCtrl.ttl) + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, errors.New("lookup failed")) + err = nodeCtrl.handleNode(context.TODO(), node) + assert.Error(t, err, "expected error during handleNode, got nil") + + // All fields already set + client = mocks.NewMockClient(ctrl) + nodeCtrl.instances = newInstances(client) + nodeCtrl.metadataLastUpdate["test-node"] = time.Now().Add(-2 * nodeCtrl.ttl) + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ + {ID: 123, Label: "test-node", IPv4: []*net.IP{&publicIP, &privateIP}, HostUUID: "123"}, + }, nil) + err = nodeCtrl.handleNode(context.TODO(), node) + assert.NoError(t, err, "expected no error during handleNode") +} diff --git a/cloud/linode/route_controller.go b/cloud/linode/route_controller.go index 8b3bdf47..b1aa112b 100644 --- a/cloud/linode/route_controller.go +++ b/cloud/linode/route_controller.go @@ -62,7 +62,7 @@ type routes struct { routeCache *routeCache } -func newRoutes(client client.Client) (cloudprovider.Routes, error) { +func newRoutes(client client.Client, instanceCache *instances) (cloudprovider.Routes, error) { timeout := 60 if raw, ok := os.LookupEnv("LINODE_ROUTES_CACHE_TTL_SECONDS"); ok { if t, _ := strconv.Atoi(raw); t > 0 { @@ -77,7 +77,7 @@ func newRoutes(client client.Client) (cloudprovider.Routes, error) { return &routes{ client: client, - instances: newInstances(client), + instances: instanceCache, routeCache: &routeCache{ routes: make(map[int][]linodego.VPCIP, 0), ttl: time.Duration(timeout) * time.Second, diff --git a/cloud/linode/route_controller_test.go b/cloud/linode/route_controller_test.go index 6b2efc64..e6f2bff0 100644 --- a/cloud/linode/route_controller_test.go +++ b/cloud/linode/route_controller_test.go @@ -33,7 +33,8 @@ func TestListRoutes(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.Instance{}, nil) @@ -56,7 +57,8 @@ func TestListRoutes(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) @@ -82,7 +84,8 @@ func TestListRoutes(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) @@ -123,7 +126,8 @@ func TestListRoutes(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) @@ -164,7 +168,8 @@ func TestListRoutes(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) @@ -179,7 +184,8 @@ func TestListRoutes(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) vpcIP2 := "10.0.0.3" @@ -283,7 +289,8 @@ func TestCreateRoute(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) @@ -315,7 +322,8 @@ func TestCreateRoute(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) @@ -328,7 +336,8 @@ func TestCreateRoute(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) @@ -370,7 +379,8 @@ func TestDeleteRoute(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) @@ -400,7 +410,8 @@ func TestDeleteRoute(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) @@ -431,7 +442,8 @@ func TestDeleteRoute(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) - routeController, err := newRoutes(client) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) assert.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) diff --git a/cloud/linode/service_controller.go b/cloud/linode/service_controller.go index f04167e9..684cac7e 100644 --- a/cloud/linode/service_controller.go +++ b/cloud/linode/service_controller.go @@ -15,20 +15,20 @@ import ( "k8s.io/klog/v2" ) -const retryInterval = time.Minute * 1 +var retryInterval = time.Minute * 1 type serviceController struct { loadbalancers *loadbalancers informer v1informers.ServiceInformer - queue workqueue.DelayingInterface + queue workqueue.TypedDelayingInterface[any] } func newServiceController(loadbalancers *loadbalancers, informer v1informers.ServiceInformer) *serviceController { return &serviceController{ loadbalancers: loadbalancers, informer: informer, - queue: workqueue.NewDelayingQueue(), + queue: workqueue.NewTypedDelayingQueue[any](), } } diff --git a/cloud/linode/service_controller_test.go b/cloud/linode/service_controller_test.go new file mode 100644 index 00000000..8d90d9ea --- /dev/null +++ b/cloud/linode/service_controller_test.go @@ -0,0 +1,111 @@ +package linode + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/util/workqueue" +) + +func Test_serviceController_Run(t *testing.T) { + // Mock dependencies + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + kubeClient := fake.NewSimpleClientset() + informer := informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Services() + mockQueue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "test"}) + + loadbalancers := newLoadbalancers(client, "us-east").(*loadbalancers) + svcCtrl := newServiceController(loadbalancers, informer) + svcCtrl.queue = mockQueue + + svc := createTestService() + svc.Spec.Type = "LoadBalancer" + _, err := kubeClient.CoreV1().Services("test-ns").Create(context.TODO(), svc, metav1.CreateOptions{}) + assert.NoError(t, err, "expected no error during svc creation") + + // Start the controller + stopCh := make(chan struct{}) + go svcCtrl.Run(stopCh) + + // Add svc to the informer + err = svcCtrl.informer.Informer().GetStore().Add(svc) + assert.NoError(t, err, "expected no error when adding svc to informer") + + // Allow some time for the queue to process + time.Sleep(1 * time.Second) + + // Stop the controller + close(stopCh) +} + +func Test_serviceController_processNextDeletion(t *testing.T) { + type fields struct { + loadbalancers *loadbalancers + queue workqueue.TypedDelayingInterface[any] + Client *mocks.MockClient + } + tests := []struct { + name string + fields fields + Setup func(*fields) + want bool + queueLen int + }{ + { + name: "Invalid service type", + fields: fields{ + loadbalancers: nil, + }, + Setup: func(f *fields) { + f.loadbalancers = &loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} + f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) + f.queue.Add("test") + }, + want: true, + queueLen: 0, + }, + { + name: "Valid service type", + fields: fields{ + loadbalancers: nil, + }, + Setup: func(f *fields) { + f.loadbalancers = &loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} + f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) + svc := createTestService() + f.queue.Add(svc) + }, + want: true, + queueLen: 0, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &serviceController{ + loadbalancers: tt.fields.loadbalancers, + queue: tt.fields.queue, + } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + tt.fields.Client = client + tt.Setup(&tt.fields) + s.loadbalancers = tt.fields.loadbalancers + s.queue = tt.fields.queue + s.loadbalancers.client = tt.fields.Client + if got := s.processNextDeletion(); got != tt.want { + t.Errorf("serviceController.processNextDeletion() = %v, want %v", got, tt.want) + } + assert.Equal(t, tt.queueLen, tt.fields.queue.Len()) + }) + } +} diff --git a/cloud/linode/vpc_test.go b/cloud/linode/vpc_test.go new file mode 100644 index 00000000..9e99b675 --- /dev/null +++ b/cloud/linode/vpc_test.go @@ -0,0 +1,149 @@ +package linode + +import ( + "context" + "errors" + "net/http" + "reflect" + "sort" + "testing" + + "github.com/golang/mock/gomock" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" + "github.com/linode/linodego" + "github.com/stretchr/testify/assert" +) + +func TestGetAllVPCIDs(t *testing.T) { + tests := []struct { + name string + vpcIDs map[string]int + want []int + }{ + { + name: "multiple vpcs present", + vpcIDs: map[string]int{"test1": 1, "test2": 2, "test3": 3}, + want: []int{1, 2, 3}, + }, + { + name: "no vpc present", + vpcIDs: map[string]int{}, + want: []int{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + vpcIDs = tt.vpcIDs + got := GetAllVPCIDs() + sort.Ints(got) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetAllVPCIDs() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetVPCID(t *testing.T) { + t.Run("vpcID in cache", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + got, err := GetVPCID(context.TODO(), client, "test3") + if err != nil { + t.Errorf("GetVPCID() error = %v", err) + return + } + if got != vpcIDs["test3"] { + t.Errorf("GetVPCID() = %v, want %v", got, vpcIDs["test3"]) + } + }) + + t.Run("vpcID not in cache and listVPCs return error", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{}, errors.New("error")) + got, err := GetVPCID(context.TODO(), client, "test4") + assert.Error(t, err) + if got != 0 { + t.Errorf("GetVPCID() = %v, want %v", got, 0) + } + }) + + t.Run("vpcID not in cache and listVPCs return nothing", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{}, nil) + got, err := GetVPCID(context.TODO(), client, "test4") + assert.ErrorIs(t, err, vpcLookupError{"test4"}) + if got != 0 { + t.Errorf("GetVPCID() = %v, want %v", got, 0) + } + }) + + t.Run("vpcID not in cache and listVPCs return vpc info", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{{ID: 4, Label: "test4"}}, nil) + got, err := GetVPCID(context.TODO(), client, "test4") + assert.NoError(t, err) + if got != 4 { + t.Errorf("GetVPCID() = %v, want %v", got, 4) + } + }) +} + +func TestGetVPCIPAddresses(t *testing.T) { + t.Run("vpc id not found", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{}, nil) + _, err := GetVPCIPAddresses(context.TODO(), client, "test4") + assert.Error(t, err) + }) + + t.Run("vpc id found but listing ip addresses fails with 404 error", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, &linodego.Error{Code: http.StatusNotFound, Message: "[404] [label] VPC not found"}) + _, err := GetVPCIPAddresses(context.TODO(), client, "test3") + assert.Error(t, err) + _, exists := vpcIDs["test3"] + assert.False(t, exists, "test3 key should get deleted from vpcIDs map") + }) + + t.Run("vpc id found but listing ip addresses fails with 500 error", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, &linodego.Error{Code: http.StatusInternalServerError, Message: "[500] [label] Internal Server Error"}) + _, err := GetVPCIPAddresses(context.TODO(), client, "test1") + assert.Error(t, err) + _, exists := vpcIDs["test1"] + assert.True(t, exists, "test1 key should not get deleted from vpcIDs map") + }) + + t.Run("vpc id found and listing vpc ipaddresses succeeds", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{{ID: 10, Label: "test10"}}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, nil) + _, err := GetVPCIPAddresses(context.TODO(), client, "test10") + assert.NoError(t, err) + _, exists := vpcIDs["test10"] + assert.True(t, exists, "test10 key should be present in vpcIDs map") + }) +} diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000..458b455c --- /dev/null +++ b/codecov.yml @@ -0,0 +1,2 @@ +ignore: + - "cloud/linode/client/mocks" diff --git a/deploy/ccm-linode-template.yaml b/deploy/ccm-linode-template.yaml index 6d4c53ce..4f0048b1 100644 --- a/deploy/ccm-linode-template.yaml +++ b/deploy/ccm-linode-template.yaml @@ -23,6 +23,9 @@ rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "watch", "list", "update", "create"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "update", "create"] - apiGroups: [""] resources: ["nodes"] verbs: ["get", "watch", "list", "update", "delete", "patch"] @@ -101,10 +104,10 @@ spec: imagePullPolicy: Always name: ccm-linode args: - - --leader-elect-resource-lock=endpoints + - --leader-elect-resource-lock=leases - --v=3 - - --port=0 - --secure-port=10253 + - --webhook-secure-port=0 volumeMounts: - mountPath: /etc/kubernetes name: k8s diff --git a/deploy/chart/templates/daemonset.yaml b/deploy/chart/templates/daemonset.yaml index 0a46c517..68176b74 100644 --- a/deploy/chart/templates/daemonset.yaml +++ b/deploy/chart/templates/daemonset.yaml @@ -60,8 +60,14 @@ spec: {{- if .Values.sharedIPLoadBalancing.bgpNodeSelector }} - --bgp-node-selector={{ .Values.sharedIPLoadBalancing.bgpNodeSelector }} {{- end }} + {{- if .Values.sharedIPLoadBalancing.ipHolderSuffix }} + - --ip-holder-suffix={{ .Values.sharedIPLoadBalancing.ipHolderSuffix }} + {{- end}} - --load-balancer-type={{ required "A valid .Values.sharedIPLoadBalancing.loadBalancerType is required for shared IP load-balancing" .Values.sharedIPLoadBalancing.loadBalancerType }} {{- end }} + {{- if .Values.nodeBalancerTags }} + - --nodebalancer-tags={{ join " " .Values.nodeBalancerTags }} + {{- end }} volumeMounts: - mountPath: /etc/kubernetes name: k8s diff --git a/deploy/chart/values.yaml b/deploy/chart/values.yaml index 5bd4546c..c8e296bd 100644 --- a/deploy/chart/values.yaml +++ b/deploy/chart/values.yaml @@ -48,6 +48,7 @@ tolerations: # sharedIPLoadBalancing: # loadBalancerType: cilium-bgp # bgpNodeSelector: +# ipHolderSuffix: # This section adds ability to enable route-controller for ccm # routeController: @@ -63,6 +64,9 @@ env: # - name: EXAMPLE_ENV_VAR # value: "true" +# Linode tags to apply to all NodeBalancers +nodeBalancerTags: [] + # This section adds the ability to pass volumes to the CCM DaemonSet volumes: # - name: test-volume diff --git a/deploy/generate-manifest.sh b/deploy/generate-manifest.sh index 75994adc..d61ea290 100755 --- a/deploy/generate-manifest.sh +++ b/deploy/generate-manifest.sh @@ -4,6 +4,11 @@ set -o pipefail -o noclobber -o nounset die() { echo "$*" 1>&2; exit 1; } +echo -e "\n********************************************************************" +echo -e "WARNING: This script is deprecated and may be removed in future." +echo -e "Please use helm for installs, or refer to the docs for alternatives." +echo -e "********************************************************************\n" + [ "$#" -eq 2 ] || die "First argument must be a Linode APIv4 Personal Access Token with all permissions. (https://cloud.linode.com/profile/tokens) diff --git a/devbox.json b/devbox.json index 481ecf54..ff3d96fa 100644 --- a/devbox.json +++ b/devbox.json @@ -4,7 +4,7 @@ "clusterctl@latest", "docker@latest", "envsubst@latest", - "go@1.22.2", + "go@1.23.3", "golangci-lint@latest", "jq@latest", "kind@latest", @@ -18,6 +18,14 @@ "init_hook": [ "export \"GOROOT=$(go env GOROOT)\"" ], - "scripts": {} + "scripts": { + "mgmt-and-capl-cluster": "make mgmt-and-capl-cluster", + "e2e-test": "make e2e-test", + "e2e-test-bgp": "make e2e-test-bgp", + "cleanup-cluster": "make cleanup-cluster" + } + }, + "env": { + "EXP_CLUSTER_RESOURCE_SET": "true" } } diff --git a/devbox.lock b/devbox.lock index 5dc1a796..cb767c68 100644 --- a/devbox.lock +++ b/devbox.lock @@ -193,51 +193,51 @@ } } }, - "go@1.22.2": { - "last_modified": "2024-05-12T16:19:40Z", - "resolved": "github:NixOS/nixpkgs/3281bec7174f679eabf584591e75979a258d8c40#go", + "go@1.23.3": { + "last_modified": "2024-11-28T07:51:56Z", + "resolved": "github:NixOS/nixpkgs/226216574ada4c3ecefcbbec41f39ce4655f78ef#go", "source": "devbox-search", - "version": "1.22.2", + "version": "1.23.3", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/460vdyz0ghxh8n5ibq3fgc3s63is68cd-go-1.22.2", + "path": "/nix/store/qrj2wp6vzfpjfrrlcmr22818zg83fb73-go-1.23.3", "default": true } ], - "store_path": "/nix/store/460vdyz0ghxh8n5ibq3fgc3s63is68cd-go-1.22.2" + "store_path": "/nix/store/qrj2wp6vzfpjfrrlcmr22818zg83fb73-go-1.23.3" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/c732580an83by9405c5j2fmn04hp6ry6-go-1.22.2", + "path": "/nix/store/dm66qyl19skrwcmk4rb9xcs64xc1d071-go-1.23.3", "default": true } ], - "store_path": "/nix/store/c732580an83by9405c5j2fmn04hp6ry6-go-1.22.2" + "store_path": "/nix/store/dm66qyl19skrwcmk4rb9xcs64xc1d071-go-1.23.3" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/9cim6f30wrzdbiaw8wa45kvffns73dgz-go-1.22.2", + "path": "/nix/store/vkjn6njpz4gy5ma763vh8hh93bgjwycr-go-1.23.3", "default": true } ], - "store_path": "/nix/store/9cim6f30wrzdbiaw8wa45kvffns73dgz-go-1.22.2" + "store_path": "/nix/store/vkjn6njpz4gy5ma763vh8hh93bgjwycr-go-1.23.3" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/6bvndddvxaypc42x6x4ari20gv3vfdgd-go-1.22.2", + "path": "/nix/store/bavnchxi7v6xs077jxv7fl5rrqc3y87w-go-1.23.3", "default": true } ], - "store_path": "/nix/store/6bvndddvxaypc42x6x4ari20gv3vfdgd-go-1.22.2" + "store_path": "/nix/store/bavnchxi7v6xs077jxv7fl5rrqc3y87w-go-1.23.3" } } }, diff --git a/docs/configuration/README.md b/docs/configuration/README.md new file mode 100644 index 00000000..9d8f2531 --- /dev/null +++ b/docs/configuration/README.md @@ -0,0 +1,57 @@ +# Configuration Guide + +The Linode Cloud Controller Manager (CCM) offers extensive configuration options to customize its behavior. This section covers all available configuration methods and options. + +## Configuration Areas + +1. **[LoadBalancer Services](loadbalancer.md)** + - NodeBalancer implementation + - BGP-based IP sharing + - Protocol configuration + - Health checks + - SSL/TLS setup + - Connection throttling + - [See examples](../examples/basic.md#loadbalancer-services) + +2. **[Service Annotations](annotations.md)** + - NodeBalancer configuration + - Protocol settings + - Health check options + - Port configuration + - Firewall settings + - [See annotation reference](annotations.md#available-annotations) + +3. **[Node Configuration](nodes.md)** + - Node labels and topology + - Private networking setup + - VPC configuration + - Node controller behavior + - [See node management](nodes.md#node-controller-behavior) + +4. **[Environment Variables](environment.md)** + - Cache settings + - API configuration + - Network settings + - BGP configuration + - [See environment reference](environment.md#available-variables) + +5. **[Firewall Setup](firewall.md)** + - CCM-managed firewalls + - User-managed firewalls + - Allow/deny lists + - [See firewall options](firewall.md#ccm-managed-firewalls) + +6. **[Route Configuration](routes.md)** + - VPC routing + - Pod CIDR management + - Route controller setup + - [See route management](routes.md#route-management) + +7. **[Session Affinity](session-affinity.md)** + - Client IP affinity + - Timeout configuration + - Service configuration + - [See affinity setup](session-affinity.md#configuration) + +For installation instructions, see the [Installation Guide](../getting-started/installation.md). +For troubleshooting help, see the [Troubleshooting Guide](../getting-started/troubleshooting.md). diff --git a/docs/configuration/annotations.md b/docs/configuration/annotations.md new file mode 100644 index 00000000..185627a3 --- /dev/null +++ b/docs/configuration/annotations.md @@ -0,0 +1,117 @@ +# Service Annotations + +## Overview + +Service annotations allow you to customize the behavior of your LoadBalancer services. All Service annotations must be prefixed with: `service.beta.kubernetes.io/linode-loadbalancer-` + +For implementation details, see: +- [LoadBalancer Configuration](loadbalancer.md) +- [Basic Service Examples](../examples/basic.md) +- [Advanced Configuration Examples](../examples/advanced.md) + +## Available Annotations + +### Basic Configuration + +| Annotation (Suffix) | Values | Default | Description | +|--------------------|--------|---------|-------------| +| `throttle` | `0`-`20` (`0` to disable) | `0` | Client Connection Throttle, which limits the number of subsequent new connections per second from the same client IP | +| `default-protocol` | `tcp`, `http`, `https` | `tcp` | This annotation is used to specify the default protocol for Linode NodeBalancer | +| `default-proxy-protocol` | `none`, `v1`, `v2` | `none` | Specifies whether to use a version of Proxy Protocol on the underlying NodeBalancer | +| `port-*` | json object | | Specifies port specific NodeBalancer configuration. See [Port Configuration](#port-specific-configuration) | +| `check-type` | `none`, `connection`, `http`, `http_body` | | The type of health check to perform against back-ends. See [Health Checks](loadbalancer.md#health-checks) | +| `check-path` | string | | The URL path to check on each back-end during health checks | +| `check-body` | string | | Text which must be present in the response body to pass the health check | +| `check-interval` | int | | Duration, in seconds, to wait between health checks | +| `check-timeout` | int (1-30) | | Duration, in seconds, to wait for a health check to succeed | +| `check-attempts` | int (1-30) | | Number of health check failures necessary to remove a back-end | +| `check-passive` | bool | `false` | When `true`, `5xx` status codes will cause the health check to fail | +| `preserve` | bool | `false` | When `true`, deleting a `LoadBalancer` service does not delete the underlying NodeBalancer | +| `nodebalancer-id` | string | | The ID of the NodeBalancer to front the service | +| `hostname-only-ingress` | bool | `false` | When `true`, the LoadBalancerStatus will only contain the Hostname | +| `tags` | string | | A comma separated list of tags to be applied to the NodeBalancer instance | +| `firewall-id` | string | | An existing Cloud Firewall ID to be attached to the NodeBalancer instance. See [Firewall Setup](firewall.md) | +| `firewall-acl` | string | | The Firewall rules to be applied to the NodeBalancer. See [Firewall Configuration](#firewall-configuration) | + +### Port Specific Configuration + +The `port-*` annotation allows per-port configuration, encoded in JSON. For detailed examples, see [LoadBalancer SSL/TLS Setup](loadbalancer.md#ssltls-configuration). + +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-port-443: | + "protocol": "https", + "tls-secret-name": "my-tls-secret", + "proxy-protocol": "v2" + } +``` + +Available port options: +- `protocol`: Protocol for this port (tcp, http, https) +- `tls-secret-name`: Name of TLS secret for HTTPS. The secret type should be `kubernetes.io/tls` +- `proxy-protocol`: Proxy protocol version for this port + +### Deprecated Annotations + +| Annotation (Suffix) | Values | Default | Description | Scheduled Removal | +|--------------------|--------|---------|-------------|-------------------| +| `proxy-protocol` | `none`, `v1`, `v2` | `none` | Specifies whether to use a version of Proxy Protocol on the underlying NodeBalancer | Q4 2021 | + +### Annotation Boolean Values +For annotations with bool value types, the following string values are interpreted as `true`: +- `"1"` +- `"t"` +- `"T"` +- `"true"` +- `"True"` +- `"TRUE"` + +Any other values will be interpreted as `false`. For more details, see [strconv.ParseBool](https://golang.org/pkg/strconv/#ParseBool). + +## Examples + +### Basic HTTP Service +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" + service.beta.kubernetes.io/linode-loadbalancer-check-type: "http" + service.beta.kubernetes.io/linode-loadbalancer-check-path: "/healthz" +``` + +### HTTPS Service with TLS +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-port-443: | + { + "protocol": "https", + "tls-secret-name": "my-tls-secret" + } +``` + +### Firewall Configuration +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | + { + "allowList": { + "ipv4": ["192.168.0.0/16"], + "ipv6": ["2001:db8::/32"] + } + } +``` + +For more examples and detailed configuration options, see: +- [LoadBalancer Configuration](loadbalancer.md) +- [Firewall Configuration](firewall.md) +- [Basic Service Examples](../examples/basic.md) +- [Advanced Configuration Examples](../examples/advanced.md) +- [Complete Stack Example](../examples/complete-stack.md) + +See also: +- [Environment Variables](environment.md) +- [Route Configuration](routes.md) +- [Session Affinity](session-affinity.md) diff --git a/docs/configuration/environment.md b/docs/configuration/environment.md new file mode 100644 index 00000000..15ad47f0 --- /dev/null +++ b/docs/configuration/environment.md @@ -0,0 +1,87 @@ +# Environment Variables + +## Overview + +Environment variables provide global configuration options for the CCM. These settings affect caching, API behavior, and networking configurations. + +## Available Variables + +### Cache Configuration + +| Variable | Default | Description | +|----------|---------|-------------| +| `LINODE_INSTANCE_CACHE_TTL` | `15` | Default timeout of instance cache in seconds | +| `LINODE_ROUTES_CACHE_TTL_SECONDS` | `60` | Default timeout of route cache in seconds | + +### API Configuration + +| Variable | Default | Description | +|----------|---------|-------------| +| `LINODE_REQUEST_TIMEOUT_SECONDS` | `120` | Default timeout in seconds for http requests to linode API | +| `LINODE_URL` | `https://api.linode.com/v4` | Linode API endpoint | + +### Network Configuration + +| Variable | Default | Description | +|----------|---------|-------------| +| `LINODE_EXTERNAL_SUBNET` | "" | Mark private network as external. Example - `172.24.0.0/16` | +| `BGP_CUSTOM_ID_MAP` | "" | Use your own map instead of default region map for BGP | +| `BGP_PEER_PREFIX` | `2600:3c0f` | Use your own BGP peer prefix instead of default one | + +## Configuration Methods + +### Helm Chart +Configure via `values.yaml`: +```yaml +env: + - name: LINODE_INSTANCE_CACHE_TTL + value: "30" +``` + +### Manual Deployment +Add to the CCM DaemonSet: +```yaml +spec: + template: + spec: + containers: + - name: ccm-linode + env: + - name: LINODE_INSTANCE_CACHE_TTL + value: "30" +``` + +## Usage Guidelines + +### Cache Settings +- Adjust cache TTL based on cluster size and update frequency +- Monitor memory usage when modifying cache settings +- Consider API rate limits when decreasing TTL (see [Linode API Rate Limits](@https://techdocs.akamai.com/linode-api/reference/rate-limits)) + +### API Settings +- Increase timeout for slower network conditions +- Use default API URL unless testing/development required +- Consider regional latency when adjusting timeouts + +### Network Settings +- Configure external subnet for custom networking needs +- Use BGP settings only when implementing IP sharing +- Document any custom network configurations + +## Troubleshooting + +### Common Issues + +1. **API Timeouts** + - Check network connectivity + - Verify API endpoint accessibility + - Consider increasing timeout value + +2. **Cache Issues** + - Monitor memory usage + - Verify cache TTL settings + - Check for stale data + +For more details, see: +- [Installation Guide](../getting-started/installation.md) +- [Troubleshooting Guide](../getting-started/troubleshooting.md) diff --git a/docs/configuration/firewall.md b/docs/configuration/firewall.md new file mode 100644 index 00000000..c5d011d4 --- /dev/null +++ b/docs/configuration/firewall.md @@ -0,0 +1,83 @@ +# Firewall Setup + +## Overview + +The CCM provides two methods for securing NodeBalancers with firewalls: +1. CCM-managed Cloud Firewalls (using `firewall-acl` annotation) +2. User-managed Cloud Firewalls (using `firewall-id` annotation) + +## CCM-Managed Firewalls + +### Configuration + +Use the `firewall-acl` annotation to specify firewall rules. The rules should be provided as a JSON object with either an `allowList` or `denyList` (but not both). + +#### Allow List Configuration +```yaml +apiVersion: v1 +kind: Service +metadata: + name: restricted-service + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | + { + "allowList": { + "ipv4": ["192.168.0.0/16", "10.0.0.0/8"], + "ipv6": ["2001:db8::/32"] + } + } +``` + +#### Deny List Configuration +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | + { + "denyList": { + "ipv4": ["203.0.113.0/24"], + "ipv6": ["2001:db8:1234::/48"] + } + } +``` + +### Behavior +- Only one type of list (allow or deny) can be used per service +- Rules are automatically created and managed by the CCM +- Rules are updated when the annotation changes +- Firewall is deleted when the service is deleted (unless preserved) + +## User-Managed Firewalls + +### Configuration + +1. Create a Cloud Firewall in Linode Cloud Manager +2. Attach it to the service using the `firewall-id` annotation: + +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-id: "12345" +``` + +### Management +- User maintains full control over firewall rules +- Firewall persists after service deletion +- Manual updates required for rule changes + +## Best Practices + +1. **Rule Management** + - Use descriptive rule labels + - Document rule changes + - Regular security audits + +2. **IP Range Planning** + - Plan CIDR ranges carefully + - Document allowed/denied ranges + - Consider future expansion + +For more information: +- [Service Annotations](annotations.md#firewall-configuration) +- [LoadBalancer Configuration](loadbalancer.md) +- [Linode Cloud Firewall Documentation](https://www.linode.com/docs/products/networking/cloud-firewall/) diff --git a/docs/configuration/loadbalancer.md b/docs/configuration/loadbalancer.md new file mode 100644 index 00000000..c0781fe2 --- /dev/null +++ b/docs/configuration/loadbalancer.md @@ -0,0 +1,206 @@ +# LoadBalancer Services Configuration + +## Overview + +The CCM supports two types of LoadBalancer implementations: +1. Linode NodeBalancers (default) +2. BGP-based IP sharing + +For implementation examples, see [Basic Service Examples](../examples/basic.md#loadbalancer-services). + +## NodeBalancer Implementation + +When using NodeBalancers, the CCM automatically: +1. Creates and configures a NodeBalancer +2. Sets up backend nodes +3. Manages health checks +4. Handles SSL/TLS configuration + +For more details, see [Linode NodeBalancer Documentation](https://www.linode.com/docs/products/networking/nodebalancers/). + +### Basic Configuration + +Create a LoadBalancer service: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 8080 + selector: + app: my-app +``` + +See [Advanced Configuration Examples](../examples/advanced.md#loadbalancer-services) for more complex setups. + +### NodeBalancer Settings + +#### Protocol Configuration +Available protocols: +- `tcp` (default) +- `http` +- `https` + +Set the default protocol: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" +``` + +See [Service Annotations](annotations.md#basic-configuration) for all protocol options. + +### Health Checks + +Configure health checks using annotations: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-type: "http" + service.beta.kubernetes.io/linode-loadbalancer-check-path: "/healthz" + service.beta.kubernetes.io/linode-loadbalancer-check-interval: "5" + service.beta.kubernetes.io/linode-loadbalancer-check-timeout: "3" + service.beta.kubernetes.io/linode-loadbalancer-check-attempts: "2" +``` + +Available check types: +- `none`: No health check +- `connection`: TCP connection check +- `http`: HTTP status check +- `http_body`: HTTP response body check + +For more details, see [Health Check Configuration](annotations.md#health-check-configuration). + +### SSL/TLS Configuration + +1. Create a TLS secret: +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-tls-secret +type: kubernetes.io/tls +data: + tls.crt: + tls.key: +``` + +2. Reference in service annotation: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-port-443: | + { + "protocol": "https", + "tls-secret-name": "my-tls-secret" + } +``` + +### Connection Throttling + +Limit connections from the same client IP: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-throttle: "5" +``` + +### Proxy Protocol + +Enable proxy protocol for client IP preservation: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol: "v2" +``` + +## BGP-based IP Sharing Implementation + +BGP-based IP sharing provides a more cost-effective solution for multiple LoadBalancer services. For detailed setup instructions, see [Cilium BGP Documentation](https://docs.cilium.io/en/stable/network/bgp-control-plane/). + +### Prerequisites +- [Cilium CNI](https://docs.cilium.io/en/stable/network/bgp-control-plane/) with BGP control plane enabled +- Additional IP provisioning enabled on your account (contact [Linode Support](https://www.linode.com/support/)) +- Nodes labeled for BGP peering + +### Configuration + +1. Enable BGP in CCM deployment: +```yaml +args: + - --load-balancer-type=cilium-bgp + - --bgp-node-selector=cilium-bgp-peering=true + - --ip-holder-suffix=mycluster +``` + +2. Label nodes that should participate in BGP peering: +```bash +kubectl label node my-node cilium-bgp-peering=true +``` + +3. Create LoadBalancer services as normal - the CCM will automatically use BGP-based IP sharing instead of creating NodeBalancers. + +### Environment Variables +- `BGP_CUSTOM_ID_MAP`: Use your own map instead of default region map for BGP +- `BGP_PEER_PREFIX`: Use your own BGP peer prefix instead of default one + +For more details, see [Environment Variables](environment.md#network-configuration). + +## Advanced Configuration + +### Using Existing NodeBalancers + +Specify an existing NodeBalancer: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: "12345" +``` + +### NodeBalancer Preservation + +Prevent NodeBalancer deletion when service is deleted: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-preserve: "true" +``` + +### Port Configuration + +Configure individual ports: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-port-443: | + { + "protocol": "https", + "tls-secret-name": "my-tls-secret", + "proxy-protocol": "v2" + } +``` + +### Tags + +Add tags to NodeBalancer: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-tags: "production,web-tier" +``` + +## Related Documentation + +- [Service Annotations](annotations.md) +- [Firewall Configuration](firewall.md) +- [Session Affinity](session-affinity.md) +- [Environment Variables](environment.md) +- [Route Configuration](routes.md) +- [Linode NodeBalancer Documentation](https://www.linode.com/docs/products/networking/nodebalancers/) +- [Cilium BGP Documentation](https://docs.cilium.io/en/stable/network/bgp-control-plane/) +- [Basic Service Examples](../examples/basic.md) +- [Advanced Configuration Examples](../examples/advanced.md) diff --git a/docs/configuration/nodes.md b/docs/configuration/nodes.md new file mode 100644 index 00000000..62e308ef --- /dev/null +++ b/docs/configuration/nodes.md @@ -0,0 +1,96 @@ +# Node Configuration + +## Overview + +The Node Controller in CCM manages node-specific configurations and lifecycle operations for Kubernetes nodes running on Linode instances. + +## Node Labels + +The CCM automatically adds the following labels to nodes: + +### Topology Labels +Current: +- `topology.kubernetes.io/region`: Linode region (e.g., "us-east") +- `topology.kubernetes.io/zone`: Linode availability zone + +Legacy (deprecated): +- `failure-domain.beta.kubernetes.io/region`: Linode region +- `failure-domain.beta.kubernetes.io/zone`: Linode availability zone + +### Provider Labels +- `node.kubernetes.io/instance-type`: Linode instance type (e.g., "g6-standard-4") + +## Node Annotations + +All node annotations must be prefixed with: `node.k8s.linode.com/` + +### Available Annotations + +| Annotation | Type | Default | Description | +|------------|------|---------|-------------| +| `private-ip` | IPv4 | none | Overrides default detection of Node InternalIP | + +### Use Cases + +#### Private Network Configuration +```yaml +apiVersion: v1 +kind: Node +metadata: + name: my-node + annotations: + node.k8s.linode.com/private-ip: "192.168.1.100" +``` + +#### VPC Configuration +When using CCM with [Linode VPC](https://www.linode.com/docs/products/networking/vpc/), internal ip will be set to VPC ip. To use a different ip-address as internal ip, you may need to manually configure the node's InternalIP: +```yaml +apiVersion: v1 +kind: Node +metadata: + name: vpc-node + annotations: + node.k8s.linode.com/private-ip: "10.0.0.5" +``` + +## Node Networking + +### Private Network Requirements +- NodeBalancers require nodes to have linode specific [private IP addresses](https://techdocs.akamai.com/cloud-computing/docs/managing-ip-addresses-on-a-compute-instance#types-of-ip-addresses) +- Private IPs must be configured in the Linode Cloud Manager or via the API +- The CCM will use private IPs for inter-node communication + +### VPC Configuration +When using VPC: +1. Configure network interfaces in Linode Cloud Manager +2. Add appropriate node annotations for private IPs +3. Ensure proper routing configuration +4. Configure security groups if needed + +For VPC routing setup, see [Route Configuration](routes.md). + +## Node Controller Behavior + +### Node Initialization +- Configures node with Linode-specific information +- Sets node addresses (public/private IPs) +- Applies region/zone labels +- Configures node hostnames + +### Node Lifecycle Management +- Monitors node health +- Updates node status +- Handles node termination +- Manages node cleanup + +### Node Updates +- Updates node labels when region/zone changes +- Updates node addresses when IP configuration changes +- Maintains node conditions based on Linode instance status + +For more information: +- [Linode Instance Types](https://www.linode.com/docs/products/compute/compute-instances/plans/) +- [Private Networking](https://www.linode.com/docs/products/networking/private-networking/) +- [VPC Documentation](https://www.linode.com/docs/products/networking/vpc/) +- [Route Configuration](routes.md) +- [Environment Variables](environment.md) diff --git a/docs/configuration/routes.md b/docs/configuration/routes.md new file mode 100644 index 00000000..0f8b8a26 --- /dev/null +++ b/docs/configuration/routes.md @@ -0,0 +1,100 @@ +# Route Configuration + +## Overview + +The Route Controller manages network routes for pod communication in VPC environments. It ensures proper connectivity between nodes and pods across the cluster by configuring routes in Linode VPC. + +## Prerequisites + +- Kubernetes cluster running in Linode VPC +- CCM with route controller enabled +- Proper API permissions + +## Configuration + +### Enable Route Controller + +1. Via Helm chart in `values.yaml`: +```yaml +routeController: + vpcNames: "vpc-prod,vpc-staging" # Comma separated names of VPCs managed by CCM + clusterCIDR: "10.0.0.0/8" # Pod CIDR range + configureCloudRoutes: true # Enable route controller +``` + +2. Via command line flags in CCM deployment: +```yaml +spec: + template: + spec: + containers: + - name: ccm-linode + args: + - --configure-cloud-routes=true + - --vpc-names=vpc-prod,vpc-staging + - --cluster-cidr=10.0.0.0/8 +``` + +### Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `LINODE_ROUTES_CACHE_TTL_SECONDS` | `60` | Default timeout of route cache in seconds | + +## Route Management + +### Automatic Operations + +The Route Controller: +- Creates routes for pod CIDR ranges assigned to nodes +- Updates routes when nodes are added/removed +- Manages route tables in specified VPCs +- Handles route cleanup during node removal +- Maintains route cache for performance + +### Route Types + +1. **Pod CIDR Routes** + - Created for each node's pod CIDR allocation + - Target is node's private IP address + - Automatically managed based on node lifecycle + +2. **VPC Routes** + - Managed within specified VPCs + - Enables cross-node pod communication + - Automatically updated with topology changes + +## Best Practices + +### CIDR Planning +- Ensure pod CIDR range doesn't overlap with node's VPC ip-address +- Plan for future cluster growth +- Document CIDR allocations + +### VPC Configuration +- Use clear, descriptive VPC names +- Configure proper VPC security settings +- Ensure proper API permissions + +## Troubleshooting + +### Common Issues + +1. **Route Creation Failures** + - Verify API permissions + - Check for CIDR conflicts + - Validate VPC configuration + - Ensure node private IPs are configured + +2. **Pod Communication Issues** + - Verify route table entries + - Check VPC network ACLs + - Validate node networking + - Confirm pod CIDR assignments + +## Related Documentation + +- [VPC Configuration](https://www.linode.com/docs/products/networking/vpc/) +- [Node Configuration](nodes.md) +- [Environment Variables](environment.md) +- [Kubernetes Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) diff --git a/docs/configuration/session-affinity.md b/docs/configuration/session-affinity.md new file mode 100644 index 00000000..78683c90 --- /dev/null +++ b/docs/configuration/session-affinity.md @@ -0,0 +1,60 @@ +# Session Affinity + +## Overview + +Session affinity (also known as sticky sessions) ensures that requests from the same client are consistently routed to the same backend pod. In Kubernetes, sessionAffinity refers to a mechanism that allows a client to always be redirected to the same pod when the client hits a service. + +## Configuration + +### Basic Setup + +Enable session affinity by setting `service.spec.sessionAffinity` to `ClientIP`: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: wordpress-lsmnl-wordpress + namespace: wordpress-lsmnl + labels: + app: wordpress-lsmnl-wordpress +spec: + type: LoadBalancer + selector: + app: wordpress-lsmnl-wordpress + sessionAffinity: ClientIP +``` + +### Setting Timeout + +Configure the maximum session sticky time using `sessionAffinityConfig`: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + type: LoadBalancer + sessionAffinity: ClientIP + sessionAffinityConfig: + clientIP: + timeoutSeconds: 10800 # 3 hours +``` + +## Configuration Options + +### Session Affinity Types +- `None`: No session affinity (default) +- `ClientIP`: Route based on client's IP address. All requests from the same client IP will be directed to the same pod. + +### Timeout Configuration +- `timeoutSeconds`: Duration to maintain affinity +- Default: 10800 seconds (3 hours) +- Valid range: 1 to 86400 seconds (24 hours) +- After the timeout period, client requests may be routed to a different pod + +## Related Documentation + +- [Service Configuration](annotations.md) +- [LoadBalancer Configuration](loadbalancer.md) +- [Kubernetes Services Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#session-affinity) +- [Service Selectors](https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service) diff --git a/docs/development/README.md b/docs/development/README.md new file mode 100644 index 00000000..552c1fb6 --- /dev/null +++ b/docs/development/README.md @@ -0,0 +1,121 @@ +# Development Guide + +## Prerequisites + +The Linode Cloud Controller Manager development requires: +- A fairly up-to-date GNU tools environment +- Go 1.23 or higher + +### Setting Up Development Environment + +#### Option 1: Using Devbox (Recommended) +The simplest way to set up your development environment is using [Devbox](https://www.jetpack.io/devbox/): + +1. Install Devbox by following the instructions at [jetpack.io/devbox/docs/installing_devbox/](https://www.jetpack.io/devbox/docs/installing_devbox/) + +2. Start the development environment: +```bash +devbox shell +``` + +This will automatically set up all required dependencies and tools for development. + +#### Option 2: Manual Setup + +1. If you haven't set up a Go development environment, follow [these instructions](https://golang.org/doc/install) to install Go. + +On macOS, you can use Homebrew: +```bash +brew install golang +``` + +## Getting Started + +### Download Source +```bash +go get github.com/linode/linode-cloud-controller-manager +cd $(go env GOPATH)/src/github.com/linode/linode-cloud-controller-manager +``` + +### Building the Project + +#### Build Binary +Use the following Make targets to build and run a local binary: + +```bash +# Build the binary +make build + +# Run the binary +make run + +# You can also run the binary directly to pass additional args +dist/linode-cloud-controller-manager +``` + +#### Building Docker Images +To build and push a Docker image: + +```bash +# Set the repo/image:tag with the TAG environment variable +# Then run the docker-build make target +IMG=linode/linode-cloud-controller-manager:canary make docker-build + +# Push Image +IMG=linode/linode-cloud-controller-manager:canary make docker-push +``` + +To run the Docker image: +```bash +docker run -ti linode/linode-cloud-controller-manager:canary +``` + +### Managing Dependencies +The Linode Cloud Controller Manager uses [Go Modules](https://blog.golang.org/using-go-modules) to manage dependencies. + +To update or add dependencies: +```bash +go mod tidy +``` + +## Development Guidelines + +### Code Quality Standards +- Write correct, up-to-date, bug-free, fully functional, secure, and efficient code +- Use the latest stable version of Go +- Follow Go idioms and best practices +- Implement proper error handling with custom error types when beneficial +- Include comprehensive input validation +- Utilize built-in language features for performance optimization +- Follow relevant design patterns and principles +- Leave NO todos, placeholders, or incomplete implementations + +### Code Structure +- Include necessary imports and declarations +- Implement proper logging using appropriate logging mechanisms +- Consider implementing middleware or interceptors for cross-cutting concerns +- Structure code in a modular and maintainable way +- Use appropriate naming conventions and code organization + +### Security & Performance +- Implement security best practices +- Consider rate limiting when appropriate +- Include authentication/authorization where needed +- Optimize for performance while maintaining readability +- Consider scalability in design decisions + +### Documentation & Testing +- Provide brief comments for complex logic or language-specific idioms +- Include clear documentation for public interfaces +- Write tests using appropriate testing frameworks +- Document any assumptions or limitations + +### Pull Request Process +1. Ensure your code follows the project's coding standards +2. Update documentation as needed +3. Add or update tests as appropriate +4. Make sure all tests pass locally +5. Submit the PR with a clear description of the changes + +## Getting Help +For development related questions or discussions, join us in #linode on the [Kubernetes Slack](https://kubernetes.slack.com/messages/CD4B15LUR/details/). \ No newline at end of file diff --git a/docs/examples/README.md b/docs/examples/README.md new file mode 100644 index 00000000..606e1589 --- /dev/null +++ b/docs/examples/README.md @@ -0,0 +1,23 @@ +# Examples + +This section provides working examples of common CCM configurations. Each example includes a complete service and deployment configuration. + +## Available Examples + +1. **[Basic Services](basic.md)** + - HTTP LoadBalancer + - HTTPS LoadBalancer with TLS termination + +2. **[Advanced Configuration](advanced.md)** + - Custom Health Checks + - Firewalled Services + - Session Affinity + - Shared IP Load-Balancing + - Custom Node Selection + +For testing these examples, see the [test script](https://github.com/linode/linode-cloud-controller-manager/blob/master/examples/test.sh). + +For more configuration options, see: +- [Service Annotations](../configuration/annotations.md) +- [LoadBalancer Configuration](../configuration/loadbalancer.md) +- [Firewall Configuration](../configuration/firewall.md) diff --git a/docs/examples/advanced.md b/docs/examples/advanced.md new file mode 100644 index 00000000..42fd8e64 --- /dev/null +++ b/docs/examples/advanced.md @@ -0,0 +1,140 @@ +# Advanced Configuration + +## Custom Health Checks + +Service with custom health check configuration: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: web-healthcheck + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-type: "http" + service.beta.kubernetes.io/linode-loadbalancer-check-path: "/healthz" + service.beta.kubernetes.io/linode-loadbalancer-check-interval: "5" + service.beta.kubernetes.io/linode-loadbalancer-check-timeout: "3" + service.beta.kubernetes.io/linode-loadbalancer-check-attempts: "2" + service.beta.kubernetes.io/linode-loadbalancer-check-passive: "true" +spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: web +``` + +## Firewalled Services + +Service with firewall rules: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: restricted-access + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | + { + "allowList": { + "ipv4": ["192.166.0.0/16", "172.23.41.0/24"], + "ipv6": ["2001:DB8::/128"] + } + } +spec: + type: LoadBalancer + selector: + app: restricted-app + ports: + - name: http + port: 80 + targetPort: 8080 +``` + +## Session Affinity + +Service with sticky sessions: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: sticky-service +spec: + type: LoadBalancer + sessionAffinity: ClientIP + sessionAffinityConfig: + clientIP: + timeoutSeconds: 100 + selector: + app: sticky-app + ports: + - port: 80 + targetPort: 8080 +``` + +## Shared IP Load-Balancing + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: shared-ip-service +spec: + type: LoadBalancer + selector: + app: web + ports: + - port: 80 + targetPort: 8080 +--- +# Required DaemonSet configuration for shared IP +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ccm-linode + namespace: kube-system +spec: + template: + spec: + containers: + - image: linode/linode-cloud-controller-manager:latest + name: ccm-linode + env: + - name: LINODE_URL + value: https://api.linode.com/v4 + args: + - --bgp-node-selector=cilium-bgp-peering=true + - --load-balancer-type=cilium-bgp + - --ip-holder-suffix=myclustername1 +``` + +## Custom Node Selection + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: custom-nodes +spec: + type: LoadBalancer + selector: + app: custom-app + ports: + - port: 80 + # Only use nodes with specific labels + externalTrafficPolicy: Local +--- +# Example node with custom annotation +apiVersion: v1 +kind: Node +metadata: + name: custom-node + annotations: + node.k8s.linode.com/private-ip: "192.168.1.100" +``` + +For more examples, see: +- [Service Annotations](../configuration/annotations.md) +- [Firewall Configuration](../configuration/firewall.md) +- [LoadBalancer Configuration](../configuration/loadbalancer.md) diff --git a/docs/examples/basic.md b/docs/examples/basic.md new file mode 100644 index 00000000..d15ff070 --- /dev/null +++ b/docs/examples/basic.md @@ -0,0 +1,107 @@ +# Basic Services + +## HTTP LoadBalancer + +Basic HTTP LoadBalancer service with nginx: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: http-lb + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" +spec: + type: LoadBalancer + selector: + app: nginx-http-example + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-http-deployment +spec: + replicas: 2 + selector: + matchLabels: + app: nginx-http-example + template: + metadata: + labels: + app: nginx-http-example + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 + protocol: TCP +``` + +## HTTPS LoadBalancer + +HTTPS LoadBalancer with TLS termination: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: https-lb + annotations: + service.beta.kubernetes.io/linode-loadbalancer-throttle: "4" + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" + service.beta.kubernetes.io/linode-loadbalancer-port-443: | + { + "tls-secret-name": "example-secret", + "protocol": "https" + } +spec: + type: LoadBalancer + selector: + app: nginx-https-example + ports: + - name: http + protocol: TCP + port: 80 + targetPort: http + - name: https + protocol: TCP + port: 443 + targetPort: https + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-https-deployment +spec: + replicas: 2 + selector: + matchLabels: + app: nginx-https-example + template: + metadata: + labels: + app: nginx-https-example + spec: + containers: + - name: nginx + image: nginx + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP +``` + +For more configuration options, see: +- [Service Annotations](../configuration/annotations.md) +- [LoadBalancer Configuration](../configuration/loadbalancer.md) diff --git a/docs/getting-started/README.md b/docs/getting-started/README.md new file mode 100644 index 00000000..9850e6d3 --- /dev/null +++ b/docs/getting-started/README.md @@ -0,0 +1,13 @@ +# Getting Started + +This section will guide you through: +- Understanding the CCM's requirements +- Installing the CCM using either Helm or manual installation +- Verifying your installation +- Troubleshooting common issues + +Choose the installation method that best suits your needs: +- **Helm Installation**: Recommended for most users, provides easier upgrades and configuration +- **Manual Installation**: Offers more control over the deployment process + +Before proceeding with installation, make sure to review the requirements section to ensure your environment is properly configured. diff --git a/docs/getting-started/helm-installation.md b/docs/getting-started/helm-installation.md new file mode 100644 index 00000000..979bd868 --- /dev/null +++ b/docs/getting-started/helm-installation.md @@ -0,0 +1,58 @@ +# Helm Installation + +## Prerequisites +- Helm 3.x installed +- kubectl configured to access your cluster +- Linode API token +- Target region identified + +## Installation Steps + +1. Add the CCM Helm repository: +```bash +helm repo add ccm-linode https://linode.github.io/linode-cloud-controller-manager/ +helm repo update ccm-linode +``` + +2. Create a values file (values.yaml): +```yaml +apiToken: "your-api-token" +region: "us-east" + +# Optional: Configure route controller +routeController: + vpcNames: "" # Comma separated VPC names + clusterCIDR: "10.0.0.0/8" + configureCloudRoutes: true + +# Optional: Configure shared IP load balancing instead of NodeBalancers (requires Cilium CNI and BGP Control Plane enabled) +sharedIPLoadBalancing: + loadBalancerType: cilium-bgp + bgpNodeSelector: cilium-bgp-peering=true + ipHolderSuffix: "" +``` + +3. Install the CCM: +```bash +helm install ccm-linode \ + --namespace kube-system \ + -f values.yaml \ + ccm-linode/ccm-linode +``` + +## Upgrading + +To upgrade an existing installation: +```bash +helm upgrade ccm-linode \ + --namespace kube-system \ + -f values.yaml \ + ccm-linode/ccm-linode +``` + +## Uninstalling + +To remove the CCM: +```bash +helm uninstall ccm-linode -n kube-system +``` diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md new file mode 100644 index 00000000..ea47f6df --- /dev/null +++ b/docs/getting-started/installation.md @@ -0,0 +1,20 @@ +# Installation + +The CCM can be installed using either Helm (recommended) or by manually applying manifests. Choose the method that best suits your needs: + +## Installation Methods + +### [Helm Installation](helm-installation.md) +- Easier to manage and upgrade +- Configurable through values.yaml +- Supports templating for different environments + +### [Manual Installation](manual-installation.md) +- More control over the deployment +- Better for customized setups +- Useful for understanding the components + +## Post-Installation +After installing the CCM, proceed to the [Verification](verification.md) section to ensure everything is working correctly. + +If you encounter any issues, check the [Troubleshooting](troubleshooting.md) guide. diff --git a/docs/getting-started/manual-installation.md b/docs/getting-started/manual-installation.md new file mode 100644 index 00000000..d8f8ef9a --- /dev/null +++ b/docs/getting-started/manual-installation.md @@ -0,0 +1,56 @@ +# Manual Installation + +## Prerequisites +- kubectl configured to access your cluster +- Linode API token +- Target region identified + +## Installation Steps + +1. Generate the manifest: +```bash +./deploy/generate-manifest.sh $LINODE_API_TOKEN $REGION +``` + +2. Review the generated manifest: +The script creates `ccm-linode.yaml` containing: +- ServiceAccount +- ClusterRole and ClusterRoleBinding +- Secret with API token +- DaemonSet for the CCM + +3. Apply the manifest: +```bash +kubectl apply -f ccm-linode.yaml +``` + +## Customization + +### Environment Variables +You can modify the DaemonSet to include custom environment variables: +```yaml +env: + - name: LINODE_INSTANCE_CACHE_TTL + value: "15" + - name: LINODE_ROUTES_CACHE_TTL_SECONDS + value: "60" +``` + +### Resource Limits +Adjust compute resources as needed: +```yaml +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi +``` + +## Uninstalling + +To remove the CCM: +```bash +kubectl delete -f ccm-linode.yaml +``` diff --git a/docs/getting-started/overview.md b/docs/getting-started/overview.md new file mode 100644 index 00000000..39fe3f0e --- /dev/null +++ b/docs/getting-started/overview.md @@ -0,0 +1,34 @@ +# Overview + +The Linode Cloud Controller Manager provides several key features that enable a fully supported Kubernetes experience on Linode infrastructure. + +## Features + +### LoadBalancer Services +- Automatic deployment and configuration of Linode NodeBalancers +- Support for HTTP, HTTPS, and TCP traffic +- SSL/TLS termination +- Custom health checks and session affinity + +### Node Management +- Automatic configuration of node hostnames and network addresses +- Proper node state management for Linode shutdowns +- Region-based node annotation for failure domain scheduling + +### Network Integration +- Support for private networking +- VPC and VLAN compatibility +- BGP-based IP sharing capabilities + +### Security +- Integrated firewall management +- Support for TLS termination +- Custom security rules and ACLs + +## When to Use CCM + +The Linode CCM is essential when: +- Running Kubernetes clusters on Linode infrastructure +- Requiring automated load balancer provisioning +- Needing integrated cloud provider features +- Managing multi-node clusters with complex networking requirements \ No newline at end of file diff --git a/docs/getting-started/requirements.md b/docs/getting-started/requirements.md new file mode 100644 index 00000000..463d4bc3 --- /dev/null +++ b/docs/getting-started/requirements.md @@ -0,0 +1,54 @@ +# Requirements + +Before installing the Linode Cloud Controller Manager, ensure your environment meets the following requirements. + +## Kubernetes Cluster Requirements + +### Version Compatibility +- Kubernetes version 1.22 or higher +- Kubernetes cluster running on Linode infrastructure + +### Kubernetes Components Configuration +The following Kubernetes components must be started with the `--cloud-provider=external` flag: +- Kubelet +- Kube Controller Manager +- Kube API Server + +## Linode Requirements + +### API Token +You need a Linode APIv4 Personal Access Token with the following scopes: +- Linodes - Read/Write +- NodeBalancers - Read/Write +- IPs - Read/Write +- Volumes - Read/Write +- Firewalls - Read/Write (if using firewall features) +- VPCs - Read/Write (if using VPC features) +- VLANs - Read/Write (if using VLAN features) + +To create a token: +1. Log into the [Linode Cloud Manager](https://cloud.linode.com) +2. Go to your profile +3. Select the "API Tokens" tab +4. Click "Create a Personal Access Token" +5. Select the required scopes +6. Set an expiry (optional) + +### Region Support +Your cluster must be in a [supported Linode region](https://api.linode.com/v4/regions). + +## Network Requirements + +### Private Networking +- If using NodeBalancers, nodes must have private IP addresses +- VPC or VLAN configurations require additional network configuration + +### Firewall Considerations +- Ensure required ports are open for Kubernetes components +- If using Cloud Firewalls, ensure the API token has firewall management permissions + +## Resource Quotas +Ensure your Linode account has sufficient quota for: +- NodeBalancers (if using LoadBalancer services) +- Additional IP addresses (if using shared IP features) +- Cloud Firewalls (if using firewall features) diff --git a/docs/getting-started/troubleshooting.md b/docs/getting-started/troubleshooting.md new file mode 100644 index 00000000..5e613f8d --- /dev/null +++ b/docs/getting-started/troubleshooting.md @@ -0,0 +1,96 @@ +# Troubleshooting + +## Common Issues and Solutions + +### CCM Pod Issues + +#### Pod Won't Start +```bash +kubectl get pods -n kube-system -l app=ccm-linode +kubectl describe pod -n kube-system -l app=ccm-linode +``` + +Common causes: +- Invalid API token +- Missing RBAC permissions +- Resource constraints + +#### Pod Crashes +Check the logs: +```bash +kubectl logs -n kube-system -l app=ccm-linode +``` + +Common causes: +- API rate limiting +- Network connectivity issues +- Configuration errors + +### LoadBalancer Service Issues + +#### Service Stuck in Pending +```bash +kubectl describe service +``` + +Check for: +- API token permissions +- NodeBalancer quota limits +- Network configuration + +#### Health Checks Failing +Verify: +- Backend pod health +- Service port configuration +- Health check path configuration + +### Node Issues + +#### Missing Node Labels +```bash +kubectl get nodes --show-labels +``` + +Verify: +- CCM node controller logs +- Node annotations +- API permissions + +#### Network Problems +Check: +- Private IP configuration +- VPC/VLAN setup +- Firewall rules + +## Gathering Information + +### Useful Commands +```bash +# Get CCM version +kubectl get pods -n kube-system -l app=ccm-linode -o jsonpath='{.items[0].spec.containers[0].image}' + +# Check events +kubectl get events -n kube-system + +# Get CCM logs with timestamps +kubectl logs -n kube-system -l app=ccm-linode --timestamps +``` + +### Debug Mode +Set the following environment variable in the CCM deployment: +```yaml +env: + - name: LINODE_DEBUG + value: "1" +``` + +## Getting Help + +If issues persist: +1. Join #linode on [Kubernetes Slack](https://kubernetes.slack.com) +2. Check [GitHub Issues](https://github.com/linode/linode-cloud-controller-manager/issues) +3. Submit a new issue with: + - CCM version + - Kubernetes version + - Relevant logs + - Steps to reproduce diff --git a/docs/getting-started/verification.md b/docs/getting-started/verification.md new file mode 100644 index 00000000..22e7c046 --- /dev/null +++ b/docs/getting-started/verification.md @@ -0,0 +1,63 @@ +# Verification + +After installing the CCM, follow these steps to verify it's working correctly. + +## Check CCM Pod Status + +1. Verify the CCM pods are running: +```bash +kubectl get pods -n kube-system -l app=ccm-linode +``` + +Expected output: +``` +NAME READY STATUS RESTARTS AGE +ccm-linode-xxxxx 1/1 Running 0 2m +``` + +2. Check CCM logs: +```bash +kubectl logs -n kube-system -l app=ccm-linode +``` + +Look for successful initialization messages and no errors. + +## Verify Node Configuration + +1. Check node annotations: +```bash +kubectl get nodes -o yaml +``` + +Look for: +- Proper region labels +- Node addresses +- Provider ID + +## Test LoadBalancer Service + +1. Create a test service: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: test-lb +spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: test +``` + +2. Verify NodeBalancer creation: +```bash +kubectl get svc test-lb +``` + +The service should receive an external IP address. + +## Common Issues +- Pods in CrashLoopBackOff: Check logs for API token or permissions issues +- Service stuck in 'Pending': Verify API token has NodeBalancer permissions +- Missing node annotations: Check CCM logs for node controller issues diff --git a/e2e/Makefile b/e2e/Makefile deleted file mode 100644 index a7218b20..00000000 --- a/e2e/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -IMG ?= linode/linode-cloud-controller-manager:latest -GINKGO_PATH := $(shell go env GOPATH)/bin/ginkgo - -REUSED_KUBECONFIG := test/ccm-linode-for-reuse.conf - -ifneq ("$(wildcard $(REUSED_KUBECONFIG))","") - CONTROL_PLANE_IP := $(shell grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}" $(REUSED_KUBECONFIG)) -endif - -.PHONY: test reuse-and-test clean - -${GINKGO_PATH}: - go install github.com/onsi/ginkgo/v2/ginkgo@v2.13.2 - -reuse-and-test: SUITE_ARGS='--reuse' - -test reuse-and-test: ${GINKGO_PATH} check-token - go list -m; \ - ginkgo -r --vv --trace $(TEST_ARGS) -- --image=${IMG} $(SUITE_ARGS) - -clean: check-token - cd test; \ - ./scripts/delete_cluster.sh ccm-linode-for-reuse; \ - rm terraform.tfstate; \ - rm -rf terraform.tfstate.d - -check-token: - @if test "$(LINODE_API_TOKEN)" = "" ; then \ - echo "LINODE_API_TOKEN must be set"; \ - exit 1; \ - fi - -control-plane-ssh: $(REUSED_KUBECONFIG) - ssh root@$(CONTROL_PLANE_IP) diff --git a/e2e/README.md b/e2e/README.md deleted file mode 100644 index 15080aa0..00000000 --- a/e2e/README.md +++ /dev/null @@ -1,4 +0,0 @@ -## How to run these End-to-end (e2e) tests - -TBD: the way we run these e2e tests has recently changed, there is currently -no convenient way to run these with no pre-existing clusters. diff --git a/e2e/bgp-test/lb-cilium-bgp/chainsaw-test.yaml b/e2e/bgp-test/lb-cilium-bgp/chainsaw-test.yaml new file mode 100644 index 00000000..979bcac6 --- /dev/null +++ b/e2e/bgp-test/lb-cilium-bgp/chainsaw-test.yaml @@ -0,0 +1,139 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: cilium-bgp-test +spec: + namespace: "cilium-bgp-test" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../../test/assert-ccm-resources.yaml + - name: Create a pod and service with load balancer type cilium-bgp + try: + - apply: + file: create-pod-service.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Verify CiliumLoadBalancerIPPool creation + try: + - assert: + resource: + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: cilium-bgp-test-test-bgp-svc-pool + spec: + disabled: false + - name: Verify CiliumBGPPeeringPolicy + try: + - assert: + resource: + apiVersion: cilium.io/v2alpha1 + kind: CiliumBGPPeeringPolicy + metadata: + name: linode-ccm-bgp-peering + spec: + nodeSelector: + matchLabels: + cilium-bgp-peering: "true" + - name: Check LoadBalancer IP assignment + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: test-bgp-svc + status: + conditions: + - status: "True" + type: cilium.io/IPAMRequestSatisfied + - name: Verify IP sharing on labeled nodes + try: + - script: + content: | + set -e + + delete_nanode() { + local NANODE_RESPONSE=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances") + + local NANODE_ID=$(echo "$NANODE_RESPONSE" | \ + jq -r --arg cluster "$CLUSTER_NAME" '.data[] | select(.label | endswith($cluster)) | .id') + + if [ -n "$NANODE_ID" ]; then + curl -s -X DELETE -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances/$NANODE_ID" || true + fi + } + + # Get the LoadBalancer IP + LB_IP=$(kubectl get svc test-bgp-svc -n cilium-bgp-test -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + + # Get nodes with BGP label + BGP_NODES=$(kubectl get nodes -l cilium-bgp-peering=true -o name) + + if [ -z "$BGP_NODES" ]; then + echo "No nodes found with label cilium-bgp-peering=true" + delete_nanode + exit 1 + fi + + # Check if IP is shared on each BGP node + for node in $BGP_NODES; do + NODE_ID=$(kubectl get $node -o jsonpath='{.spec.providerID}' | sed 's|linode://||') + echo "Node ID: $NODE_ID" + + NODE_IP_RESPONSE=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances/$NODE_ID/ips") + + SHARED_IPS=$(echo "$NODE_IP_RESPONSE" | jq -r '.ipv4.shared[]?.address // empty') + echo "shared IPs: $SHARED_IPS" + + if [ -n "$SHARED_IPS" ] && ! echo "$SHARED_IPS" | grep -q "$LB_IP"; then + echo "LoadBalancer IP $LB_IP not found in shared IPs of node $node" + delete_nanode + exit 1 + fi + done + + # Check if the nanode has the shared IP + NANODE_RESPONSE=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances") + + NANODE_ID=$(echo "$NANODE_RESPONSE" | \ + jq -r --arg cluster "$CLUSTER_NAME" '.data[] | select(.label | endswith($cluster)) | .id') + + if [ -z "$NANODE_ID" ]; then + echo "No nanode found for cluster $CLUSTER_NAME" + exit 0 + fi + + NANODE_IP_RESPONSE=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances/$NANODE_ID/ips") + + NANODE_IPS=$(echo "$NANODE_IP_RESPONSE" | jq -r '.ipv4.public[]?.address // empty') + + if [ -n "$NANODE_IPS" ] && ! echo "$NANODE_IPS" | grep -q "$LB_IP"; then + echo "LoadBalancer IP not found in nanode IPs" + delete_nanode + exit 1 + fi + + echo "Successfully found LoadBalancer IP in nanode IPs" + + # Delete the nanode on success + delete_nanode + check: + ($error == null): true + (contains($stdout, 'LoadBalancer IP not found in shared IPs of node')): false + (contains($stdout, 'LoadBalancer IP not found in nanode IPs')): false + (contains($stdout, 'Successfully found LoadBalancer IP in nanode IPs')): true + diff --git a/e2e/bgp-test/lb-cilium-bgp/create-pod-service.yaml b/e2e/bgp-test/lb-cilium-bgp/create-pod-service.yaml new file mode 100644 index 00000000..b2f96238 --- /dev/null +++ b/e2e/bgp-test/lb-cilium-bgp/create-pod-service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pod-1 + labels: + app: test-bgp +spec: + containers: + - name: nginx + image: nginx:latest + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: test-bgp-svc +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 80 + selector: + app: test-bgp diff --git a/e2e/go.mod b/e2e/go.mod deleted file mode 100644 index eef1ec8a..00000000 --- a/e2e/go.mod +++ /dev/null @@ -1,77 +0,0 @@ -module e2e_test - -go 1.22 - -toolchain go1.22.2 - -require ( - github.com/appscode/go v0.0.0-20200323182826-54e98e09185a - github.com/linode/linodego v1.34.0 - github.com/onsi/ginkgo/v2 v2.17.1 - github.com/onsi/gomega v1.33.0 - k8s.io/api v0.23.17 - k8s.io/apimachinery v0.23.17 - k8s.io/client-go v1.5.2 -) - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-resty/resty/v2 v2.13.1 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.1.0 // indirect - github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect - github.com/googleapis/gnostic v0.5.5 // indirect - github.com/imdario/mergo v0.3.5 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/term v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect - golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.30.0 // indirect - k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect - k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect - sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect -) - -replace ( - k8s.io/api => k8s.io/api v0.23.17 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.17 - k8s.io/apimachinery => k8s.io/apimachinery v0.23.17 - k8s.io/apiserver => k8s.io/apiserver v0.23.17 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.17 - k8s.io/client-go => k8s.io/client-go v0.23.17 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.17 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.17 - k8s.io/code-generator => k8s.io/code-generator v0.23.17 - k8s.io/component-base => k8s.io/component-base v0.23.17 - k8s.io/cri-api => k8s.io/cri-api v0.23.17 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.17 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.17 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.17 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.17 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.17 - k8s.io/kubectl => k8s.io/kubectl v0.23.17 - k8s.io/kubelet => k8s.io/kubelet v0.23.17 - k8s.io/kubernetes => k8s.io/kubernetes v0.23.17 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.17 - k8s.io/metrics => k8s.io/metrics v0.23.17 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.17 -) diff --git a/e2e/go.sum b/e2e/go.sum deleted file mode 100644 index a02b7c44..00000000 --- a/e2e/go.sum +++ /dev/null @@ -1,337 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/appscode/go v0.0.0-20200323182826-54e98e09185a h1:cZ80NKoLRaW1PVCWXAJE+YFkBAmLZ8BnrJmH0ClY1Gs= -github.com/appscode/go v0.0.0-20200323182826-54e98e09185a/go.mod h1:lIcm8Z6VPuvcw/a3EeOWcG6R3I13iHMLYbtVP7TKufY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= -github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= -github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linode/linodego v1.34.0 h1:tBCwZzJTNh6Sr5xImkq/KQ/1rvUbH3aXGve5VuHEspQ= -github.com/linode/linodego v1.34.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= -github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/version v0.1.0/go.mod h1:Y8xuV02mL/45psyPKG3NCVOwvAOy6T5Kx0l3rCjKSjU= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.23.17 h1:gC11V5AIsNXUUa/xd5RQo7djukvl5O1ZDQKwEYu0H7g= -k8s.io/api v0.23.17/go.mod h1:upM9VIzXUjEyLTmGGi0KnH8kdlPnvgv+fEJ3tggDHfE= -k8s.io/apimachinery v0.23.17 h1:ipJ0SrpI6EzH8zVw0WhCBldgJhzIamiYIumSGTdFExY= -k8s.io/apimachinery v0.23.17/go.mod h1:87v5Wl9qpHbnapX1PSNgln4oO3dlyjAU3NSIwNhT4Lo= -k8s.io/client-go v0.23.17 h1:MbW05RO5sy+TFw2ds36SDdNSkJbr8DFVaaVrClSA8Vs= -k8s.io/client-go v0.23.17/go.mod h1:X5yz7nbJHS7q8977AKn8BWKgxeAXjl1sFsgstczUsCM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE= -k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/e2e/setup/cilium-setup.sh b/e2e/setup/cilium-setup.sh new file mode 100755 index 00000000..9e8a7afb --- /dev/null +++ b/e2e/setup/cilium-setup.sh @@ -0,0 +1,32 @@ +#!/bin/bash +set -euo pipefail + +# Add bgp peering label to non control plane nodes. Needed to update the shared IP on the nodes +kubectl get nodes --no-headers | grep -v control-plane |\ + awk '{print $1}' | xargs -I {} kubectl label nodes {} cilium-bgp-peering=true --overwrite + +# Add RBAC permissions +kubectl patch clusterrole ccm-linode-clusterrole --type='json' -p='[{ + "op": "add", + "path": "/rules/-", + "value": { + "apiGroups": ["cilium.io"], + "resources": ["ciliumloadbalancerippools", "ciliumbgppeeringpolicies"], + "verbs": ["get", "list", "watch", "create", "update", "patch", "delete"] + } +}]' + +# Patch DaemonSet +kubectl patch daemonset ccm-linode -n kube-system --type='json' -p='[{ + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--bgp-node-selector=cilium-bgp-peering=true" +}, { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--load-balancer-type=cilium-bgp" +}, { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--ip-holder-suffix='"${CLUSTER_SUFFIX}"'" +}]' diff --git a/e2e/setup/ctlptl-config.yaml b/e2e/setup/ctlptl-config.yaml new file mode 100644 index 00000000..6d9570a2 --- /dev/null +++ b/e2e/setup/ctlptl-config.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: ctlptl.dev/v1alpha1 +kind: Cluster +product: kind +kindV1Alpha4Cluster: + name: caplccm + nodes: + - role: control-plane + image: kindest/node:v1.31.2 diff --git a/e2e/test/.gitignore b/e2e/test/.gitignore deleted file mode 100644 index ac582049..00000000 --- a/e2e/test/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -# dont commit temporary files written here by Terraform -*.conf -cluster.tf -terraform* -.terraform diff --git a/e2e/test/assert-ccm-resources.yaml b/e2e/test/assert-ccm-resources.yaml new file mode 100644 index 00000000..4d7d87d6 --- /dev/null +++ b/e2e/test/assert-ccm-resources.yaml @@ -0,0 +1,8 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ccm-linode + namespace: kube-system +status: + numberAvailable: 1 + numberReady: 1 diff --git a/e2e/test/ccm_e2e_test.go b/e2e/test/ccm_e2e_test.go deleted file mode 100644 index d5ce41ba..00000000 --- a/e2e/test/ccm_e2e_test.go +++ /dev/null @@ -1,1398 +0,0 @@ -package test - -import ( - "context" - "e2e_test/test/framework" - "fmt" - "os/exec" - "strconv" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - - "github.com/linode/linodego" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/onsi/gomega/types" - core "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/watch" -) - -func EnsuredService() types.GomegaMatcher { - return And( - WithTransform(func(e watch.Event) (string, error) { - event, ok := e.Object.(*core.Event) - if !ok { - return "", fmt.Errorf("failed to poll event") - } - return event.Reason, nil - }, Equal("EnsuredLoadBalancer")), - ) -} - -var _ = Describe("e2e tests", func() { - var ( - err error - f *framework.Invocation - workers []string - ) - - const ( - annLinodeProxyProtocolDeprecated = "service.beta.kubernetes.io/linode-loadbalancer-proxy-protocol" - annLinodeDefaultProxyProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol" - annLinodeDefaultProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-protocol" - annLinodePortConfigPrefix = "service.beta.kubernetes.io/linode-loadbalancer-port-" - annLinodeLoadBalancerPreserve = "service.beta.kubernetes.io/linode-loadbalancer-preserve" - annLinodeHealthCheckType = "service.beta.kubernetes.io/linode-loadbalancer-check-type" - annLinodeCheckBody = "service.beta.kubernetes.io/linode-loadbalancer-check-body" - annLinodeCheckPath = "service.beta.kubernetes.io/linode-loadbalancer-check-path" - annLinodeHealthCheckInterval = "service.beta.kubernetes.io/linode-loadbalancer-check-interval" - annLinodeHealthCheckTimeout = "service.beta.kubernetes.io/linode-loadbalancer-check-timeout" - annLinodeHealthCheckAttempts = "service.beta.kubernetes.io/linode-loadbalancer-check-attempts" - annLinodeHealthCheckPassive = "service.beta.kubernetes.io/linode-loadbalancer-check-passive" - annLinodeNodeBalancerID = "service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id" - annLinodeHostnameOnlyIngress = "service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress" - ) - - BeforeEach(func() { - f = root.Invoke() - workers, err = f.GetNodeList() - Expect(err).NotTo(HaveOccurred()) - Expect(len(workers)).Should(BeNumerically(">=", 2)) - }) - - createPodWithLabel := func(pods []string, ports []core.ContainerPort, image string, labels map[string]string, selectNode bool) { - for i, pod := range pods { - p := f.LoadBalancer.GetPodObject(pod, image, ports, labels) - if selectNode { - p = f.LoadBalancer.SetNodeSelector(p, workers[i]) - } - Expect(f.LoadBalancer.CreatePod(p)).ToNot(BeNil()) - Eventually(f.LoadBalancer.GetPod).WithArguments(p.ObjectMeta.Name, f.LoadBalancer.Namespace()).Should(HaveField("Status.Phase", Equal(core.PodRunning))) - } - } - - deletePods := func(pods []string) { - for _, pod := range pods { - Expect(f.LoadBalancer.DeletePod(pod)).NotTo(HaveOccurred()) - } - } - - deleteService := func() { - Expect(f.LoadBalancer.DeleteService()).NotTo(HaveOccurred()) - } - - deleteSecret := func(name string) { - Expect(f.LoadBalancer.DeleteSecret(name)).NotTo(HaveOccurred()) - } - - ensureServiceLoadBalancer := func() { - watcher, err := f.LoadBalancer.GetServiceWatcher() - Expect(err).NotTo(HaveOccurred()) - Eventually(watcher.ResultChan()).Should(Receive(EnsuredService())) - } - - ensureServiceWasDeleted := func() { - err := func() error { - _, err := f.LoadBalancer.GetService() - return err - } - Eventually(err).WithTimeout(10 * time.Second).Should(MatchError(errors.IsNotFound, "IsNotFound")) - } - - createServiceWithSelector := func(selector map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) { - Expect(f.LoadBalancer.CreateService(selector, nil, ports, isSessionAffinityClientIP)).NotTo(HaveOccurred()) - Eventually(f.LoadBalancer.GetServiceEndpoints).Should(Not(BeEmpty())) - ensureServiceLoadBalancer() - } - - createServiceWithAnnotations := func(labels, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) { - Expect(f.LoadBalancer.CreateService(labels, annotations, ports, isSessionAffinityClientIP)).NotTo(HaveOccurred()) - Eventually(f.LoadBalancer.GetServiceEndpoints).Should(Not(BeEmpty())) - ensureServiceLoadBalancer() - } - - updateServiceWithAnnotations := func(labels, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) { - Expect(f.LoadBalancer.UpdateService(labels, annotations, ports, isSessionAffinityClientIP)).NotTo(HaveOccurred()) - Eventually(f.LoadBalancer.GetServiceEndpoints).Should(Not(BeEmpty())) - ensureServiceLoadBalancer() - } - - deleteNodeBalancer := func(id int) { - Expect(getLinodeClient().DeleteNodeBalancer(context.Background(), id)).NotTo(HaveOccurred()) - } - - createNodeBalancer := func() int { - var nb *linodego.NodeBalancer - nb, err = getLinodeClient().CreateNodeBalancer(context.TODO(), linodego.NodeBalancerCreateOptions{ - Region: region, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(nb).NotTo(BeNil()) - return nb.ID - } - - checkNumberOfWorkerNodes := func(numNodes int) { - Eventually(f.GetNodeList).Should(HaveLen(numNodes)) - } - - checkNumberOfUpNodes := func(numNodes int) { - By("Checking the Number of Up Nodes") - Eventually(f.LoadBalancer.GetNodeBalancerUpNodes).WithArguments(framework.TestServerResourceName).Should(BeNumerically(">=", numNodes)) - } - - checkNodeBalancerExists := func(id int) { - By("Checking if the NodeBalancer exists") - Eventually(getLinodeClient().GetNodeBalancer).WithArguments(context.Background(), id).Should(HaveField("ID", Equal(id))) - } - - checkNodeBalancerNotExists := func(id int) { - Eventually(func() int { - _, err := getLinodeClient().GetNodeBalancer(context.Background(), id) - if err == nil { - return 0 - } - linodeErr, _ := err.(*linodego.Error) - return linodeErr.Code - }).Should(Equal(404)) - } - - type checkArgs struct { - checkType, path, body, interval, timeout, attempts, checkPassive, protocol, proxyProtocol string - checkNodes bool - } - - checkNodeBalancerID := func(service string, expectedID int) { - Eventually(f.LoadBalancer.GetNodeBalancerID).WithArguments(service).Should(Equal(expectedID)) - } - - checkLBStatus := func(service string, hasIP bool) { - Eventually(f.LoadBalancer.GetNodeBalancerFromService).WithArguments(service, hasIP).Should(Not(BeNil())) - } - - checkNodeBalancerConfigForPort := func(port int, args checkArgs) { - By("Getting NodeBalancer Configuration for port " + strconv.Itoa(port)) - var nbConfig *linodego.NodeBalancerConfig - Eventually(func() error { - nbConfig, err = f.LoadBalancer.GetNodeBalancerConfigForPort(framework.TestServerResourceName, port) - return err - }).Should(BeNil()) - - if args.checkType != "" { - By("Checking Health Check Type") - Expect(string(nbConfig.Check)).To(Equal(args.checkType)) - } - - if args.path != "" { - By("Checking Health Check Path") - Expect(nbConfig.CheckPath).To(Equal(args.path)) - } - - if args.body != "" { - By("Checking Health Check Body") - Expect(nbConfig.CheckBody).To(Equal(args.body)) - } - - if args.interval != "" { - By("Checking TCP Connection Health Check Body") - intInterval, err := strconv.Atoi(args.interval) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckInterval).To(Equal(intInterval)) - } - - if args.timeout != "" { - By("Checking TCP Connection Health Check Timeout") - intTimeout, err := strconv.Atoi(args.timeout) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckTimeout).To(Equal(intTimeout)) - } - - if args.attempts != "" { - By("Checking TCP Connection Health Check Attempts") - intAttempts, err := strconv.Atoi(args.attempts) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckAttempts).To(Equal(intAttempts)) - } - - if args.checkPassive != "" { - By("Checking for Passive Health Check") - boolCheckPassive, err := strconv.ParseBool(args.checkPassive) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckPassive).To(Equal(boolCheckPassive)) - } - - if args.protocol != "" { - By("Checking for Protocol") - Expect(string(nbConfig.Protocol)).To(Equal(args.protocol)) - } - - if args.proxyProtocol != "" { - By("Checking for Proxy Protocol") - Expect(string(nbConfig.ProxyProtocol)).To(Equal(args.proxyProtocol)) - } - - if args.checkNodes { - checkNumberOfUpNodes(2) - } - } - - addNewNode := func() { - err := exec.Command("terraform", "apply", "-var", "nodes=3", "-auto-approve").Run() - Expect(err).NotTo(HaveOccurred()) - } - - deleteNewNode := func() { - err := exec.Command("terraform", "apply", "-var", "nodes=2", "-auto-approve").Run() - Expect(err).NotTo(HaveOccurred()) - } - - waitForNodeAddition := func() { - checkNumberOfUpNodes(3) - } - - Describe("Test", func() { - Context("Simple", func() { - Context("Load Balancer", func() { - var ( - pods []string - labels map[string]string - ) - - BeforeEach(func() { - pods = []string{"test-pod-1", "test-pod-2"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - labels = map[string]string{ - "app": "test-loadbalancer", - } - - By("Creating Pods") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, true) - - By("Creating Service") - createServiceWithSelector(labels, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should reach all pods", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - Eventually(framework.GetResponseFromCurl).WithArguments(eps[0]).Should(ContainSubstring(pods[0])) - Eventually(framework.GetResponseFromCurl).WithArguments(eps[0]).Should(ContainSubstring(pods[1])) - }) - }) - }) - }) - - Describe("Test", func() { - Context("LoadBalancer", func() { - AfterEach(func() { - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - Context("With single TLS port", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - secretName string - ) - BeforeEach(func() { - pods = []string{"test-single-port-pod"} - ports := []core.ContainerPort{ - { - Name: "https", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "https", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - secretName = "tls-secret" - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodePortConfigPrefix + "80": `{ "tls-secret-name": "` + secretName + `" }`, - annLinodeDefaultProtocol: "https", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Secret") - Expect(f.LoadBalancer.CreateTLSSecret("tls-secret")).NotTo(HaveOccurred()) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Secrets") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Secret") - deleteSecret(secretName) - }) - - It("should reach the pod via tls", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - - By("Waiting for Response from the LoadBalancer url: " + eps[0]) - Eventually(framework.WaitForHTTPSResponse).WithArguments(eps[0]).Should(ContainSubstring(pods[0])) - }) - }) - - Context("With Hostname only ingress", func() { - var ( - pods []string - labels map[string]string - servicePorts []core.ServicePort - - annotations = map[string]string{} - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 80, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-hostname-only-ingress", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, map[string]string{}, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("can update service to only use Hostname in ingress", func() { - By("Checking LB Status has IP") - checkLBStatus(framework.TestServerResourceName, true) - - By("Annotating service with " + annLinodeHostnameOnlyIngress) - updateServiceWithAnnotations(labels, map[string]string{ - annLinodeHostnameOnlyIngress: "true", - }, servicePorts, false) - - By("Checking LB Status does not have IP") - checkLBStatus(framework.TestServerResourceName, false) - }) - - annotations[annLinodeHostnameOnlyIngress] = "true" - - It("can create a service that only uses Hostname in ingress", func() { - By("Creating a service annotated with " + annLinodeHostnameOnlyIngress) - checkLBStatus(framework.TestServerResourceName, true) - }) - }) - - Context("With ProxyProtocol", func() { - var ( - pods []string - labels map[string]string - servicePorts []core.ServicePort - - proxyProtocolV1 = string(linodego.ProxyProtocolV1) - proxyProtocolV2 = string(linodego.ProxyProtocolV2) - proxyProtocolNone = string(linodego.ProxyProtocolNone) - ) - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 80, - }, - { - Name: "http-2", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - { - Name: "http-2", - Port: 8080, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-proxyprotocol", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, map[string]string{}, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("can set proxy-protocol on each port", func() { - By("Annotating port 80 with v1 and 8080 with v2") - updateServiceWithAnnotations(labels, map[string]string{ - annLinodePortConfigPrefix + "80": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV1), - annLinodePortConfigPrefix + "8080": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV2), - }, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have ProxyProtocol v1") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV1}) - - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v2") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV2}) - }) - - It("should override default proxy-protocol annotation when a port configuration is specified", func() { - By("Annotating a default version of ProxyProtocol v2 and v1 for port 8080") - updateServiceWithAnnotations(labels, map[string]string{ - annLinodeDefaultProxyProtocol: proxyProtocolV2, - annLinodePortConfigPrefix + "8080": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV1), - }, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have the default ProxyProtocol v2") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV2}) - - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v1") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV1}) - }) - - It("port specific configuration should not effect other ports", func() { - By("Annotating ProxyProtocol v2 on port 8080") - updateServiceWithAnnotations(labels, map[string]string{ - annLinodePortConfigPrefix + "8080": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV2), - }, servicePorts, false) - - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocolv2") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV2}) - - By("Checking NodeBalancerConfig for port 80 should not have ProxyProtocol enabled") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolNone}) - }) - - It("default annotations can be used to apply ProxyProtocol to all NodeBalancerConfigs", func() { - annotations := make(map[string]string) - - By("By specifying ProxyProtocol v2 using the deprecated annotation " + annLinodeProxyProtocolDeprecated) - annotations[annLinodeProxyProtocolDeprecated] = proxyProtocolV2 - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have default ProxyProtocol v2") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV2}) - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v2") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV2}) - - By("specifying ProxyProtocol v1 using the annotation " + annLinodeDefaultProtocol) - annotations[annLinodeDefaultProxyProtocol] = proxyProtocolV1 - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have default ProxyProtocol v1") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV1}) - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v1") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV1}) - }) - }) - - Context("With Multiple HTTP and HTTPS Ports", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - secretName1 string - secretName2 string - ) - BeforeEach(func() { - pods = []string{"tls-multi-port-pod"} - secretName1 = "tls-secret-1" - secretName2 = "tls-secret-2" - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeDefaultProtocol: "https", - annLinodePortConfigPrefix + "80": `{"protocol": "http"}`, - annLinodePortConfigPrefix + "8080": `{"protocol": "http"}`, - annLinodePortConfigPrefix + "443": `{"tls-secret-name": "` + secretName1 + `"}`, - annLinodePortConfigPrefix + "8443": `{"tls-secret-name": "` + secretName2 + `", "protocol": "https"}`, - } - ports := []core.ContainerPort{ - { - Name: "alpha", - ContainerPort: 8080, - }, - { - Name: "beta", - ContainerPort: 8989, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8989), - Protocol: "TCP", - }, - { - Name: "http-2", - Port: 8080, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - { - Name: "https-1", - Port: 443, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - { - Name: "https-2", - Port: 8443, - TargetPort: intstr.FromInt(8989), - Protocol: "TCP", - }, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Secret") - err = f.LoadBalancer.CreateTLSSecret(secretName1) - Expect(err).NotTo(HaveOccurred()) - err = f.LoadBalancer.CreateTLSSecret(secretName2) - Expect(err).NotTo(HaveOccurred()) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Secrets") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Secret") - deleteSecret(secretName1) - deleteSecret(secretName2) - }) - - It("should reach the pods", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - Expect(eps).Should(HaveLen(4)) - - // in order of the spec - http80, http8080, https443, https8443 := eps[0], eps[1], eps[2], eps[3] - Eventually(framework.WaitForHTTPResponse).WithArguments(http80).Should(ContainSubstring(pods[0])) - Eventually(framework.WaitForHTTPResponse).WithArguments(http8080).Should(ContainSubstring(pods[0])) - Eventually(framework.WaitForHTTPSResponse).WithArguments(https443).Should(ContainSubstring(pods[0])) - Eventually(framework.WaitForHTTPSResponse).WithArguments(https8443).Should(ContainSubstring(pods[0])) - }) - }) - - Context("With HTTP updating to have HTTPS", Serial, func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - secretName string - ) - BeforeEach(func() { - pods = []string{"tls-pod"} - secretName = "tls-secret-1" - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeDefaultProtocol: "https", - annLinodePortConfigPrefix + "80": `{"protocol": "http"}`, - } - ports := []core.ContainerPort{ - { - Name: "alpha", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Creating Secret") - err = f.LoadBalancer.CreateTLSSecret(secretName) - Expect(err).NotTo(HaveOccurred()) - - By("Updating the Service") - updateAnnotations := map[string]string{ - annLinodeDefaultProtocol: "https", - annLinodePortConfigPrefix + "80": `{"protocol": "http"}`, - annLinodePortConfigPrefix + "443": `{"tls-secret-name": "` + secretName + `", "protocol": "https"}`, - } - updateServicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - { - Name: "https", - Port: 443, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - updateServiceWithAnnotations(labels, updateAnnotations, updateServicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Secrets") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Secret") - deleteSecret(secretName) - }) - - It("should reach the pods", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - Expect(eps).Should(HaveLen(2)) - http80, https443 := eps[0], eps[1] - By("Waiting for Response from the LoadBalancer url: " + http80) - Eventually(framework.WaitForHTTPResponse).WithArguments(http80).Should(ContainSubstring(pods[0])) - - By("Waiting for Response from the LoadBalancer url: " + https443) - Eventually(framework.WaitForHTTPSResponse).WithArguments(https443).Should(ContainSubstring(pods[0])) - }) - }) - - Context("For HTTP body health check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "http_body" - path = "/" - body = "nginx" - protocol = "http" - ) - BeforeEach(func() { - pods = []string{"test-pod-http-body"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckType: checkType, - annLinodeCheckPath: path, - annLinodeCheckBody: body, - annLinodeDefaultProtocol: protocol, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - path: path, - body: body, - protocol: protocol, - checkNodes: true, - }) - }) - }) - - Context("Updated with NodeBalancerID", func() { - var ( - pods []string - labels map[string]string - servicePorts []core.ServicePort - - annotations = map[string]string{} - ) - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-nodebalancer-id", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should use the specified NodeBalancer", func() { - By("Creating new NodeBalancer") - nbID := createNodeBalancer() - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(nbID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, nbID) - }) - }) - - Context("Created with NodeBalancerID", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - servicePorts []core.ServicePort - - nodeBalancerID int - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-nodebalancer-id", - } - - By("Creating NodeBalancer") - nodeBalancerID = createNodeBalancer() - - annotations = map[string]string{ - annLinodeNodeBalancerID: strconv.Itoa(nodeBalancerID), - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - It("should use the specified NodeBalancer", func() { - By("Checking the NodeBalancerID") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - }) - - It("should use the newly specified NodeBalancer ID", func() { - By("Creating new NodeBalancer") - nbID := createNodeBalancer() - - By("Waiting for current NodeBalancer to be ready") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(nbID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, nbID) - - By("Checking old NodeBalancer was deleted") - checkNodeBalancerNotExists(nodeBalancerID) - }) - }) - - Context("Deleted Service when NodeBalancer not present", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - servicePorts []core.ServicePort - - nodeBalancerID int - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-nodebalancer-id", - } - - By("Creating NodeBalancer") - nodeBalancerID = createNodeBalancer() - - annotations = map[string]string{ - annLinodeNodeBalancerID: strconv.Itoa(nodeBalancerID), - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - It("should use the specified NodeBalancer", func() { - By("Checking the NodeBalancerID") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - }) - - It("should use the newly specified NodeBalancer ID", func() { - By("Creating new NodeBalancer") - nbID := createNodeBalancer() - - By("Waiting for current NodeBalancer to be ready") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(nbID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, nbID) - - By("Checking old NodeBalancer was deleted") - checkNodeBalancerNotExists(nodeBalancerID) - }) - - It("should delete the service with no NodeBalancer present", func() { - By("Deleting the NodeBalancer") - deleteNodeBalancer(nodeBalancerID) - - By("Checking old NodeBalancer was deleted") - checkNodeBalancerNotExists(nodeBalancerID) - - By("Deleting the Service") - deleteService() - - By("Checking if the service was deleted") - ensureServiceWasDeleted() - }) - }) - - Context("With Preserve Annotation", func() { - var ( - pods []string - servicePorts []core.ServicePort - labels map[string]string - annotations map[string]string - nodeBalancerID int - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeLoadBalancerPreserve: "true", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Getting NodeBalancer ID") - nodeBalancerID, err = f.LoadBalancer.GetNodeBalancerID(framework.TestServerResourceName) - Expect(err).NotTo(HaveOccurred()) - }) - - AfterEach(func() { - By("Deleting the NodeBalancer") - deleteNodeBalancer(nodeBalancerID) - - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - It("should preserve the underlying nodebalancer after service deletion", func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Checking if the NodeBalancer exists") - checkNodeBalancerExists(nodeBalancerID) - }) - - It("should preserve the underlying nodebalancer after a new one is specified", func() { - defer func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }() - - By("Creating new NodeBalancer") - newID := createNodeBalancer() - defer func() { - By("Deleting new NodeBalancer") - deleteNodeBalancer(newID) - }() - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(newID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the service's NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, newID) - - By("Checking the old NodeBalancer exists") - checkNodeBalancerExists(nodeBalancerID) - }) - }) - - Context("With Node Addition", func() { - var ( - pods []string - labels map[string]string - ) - - BeforeEach(func() { - Skip("skip until rewritten to drop terraform") - pods = []string{"test-pod-node-add"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - labels = map[string]string{ - "app": "test-loadbalancer", - } - - By("Creating Pods") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithSelector(labels, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Newly Created Nodes") - deleteNewNode() - - By("Waiting for the Node to be removed") - checkNumberOfWorkerNodes(2) - }) - - It("should reach the same pod every time it requests", func() { - By("Adding a New Node") - addNewNode() - - By("Waiting for the Node to be Added to the NodeBalancer") - waitForNodeAddition() - }) - }) - - Context("For TCP Connection health check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "connection" - interval = "10" - timeout = "5" - attempts = "4" - protocol = "tcp" - ) - BeforeEach(func() { - pods = []string{"test-pod-tcp"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckType: checkType, - annLinodeDefaultProtocol: protocol, - annLinodeHealthCheckInterval: interval, - annLinodeHealthCheckTimeout: timeout, - annLinodeHealthCheckAttempts: attempts, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - interval: interval, - timeout: timeout, - attempts: attempts, - protocol: protocol, - checkNodes: true, - }) - }) - }) - - Context("For Passive Health Check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "none" - checkPassive = "true" - ) - BeforeEach(func() { - pods = []string{"test-pod-passive-hc"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckPassive: checkPassive, - annLinodeHealthCheckType: checkType, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - checkPassive: checkPassive, - checkNodes: true, - }) - }) - }) - - Context("For HTTP Status Health Check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "http" - path = "/" - ) - BeforeEach(func() { - pods = []string{"test-pod-http-status"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckType: checkType, - annLinodeCheckPath: path, - annLinodeDefaultProtocol: "http", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - path: path, - checkNodes: true, - }) - }) - }) - }) - }) -}) diff --git a/e2e/test/ccm_suite_test.go b/e2e/test/ccm_suite_test.go deleted file mode 100644 index 8f5c9ca8..00000000 --- a/e2e/test/ccm_suite_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package test - -import ( - "e2e_test/test/framework" - "flag" - "os" - "path/filepath" - "testing" - "time" - - "github.com/linode/linodego" - - "github.com/appscode/go/crypto/rand" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var ( - useExisting = false - reuse = false - clusterName string - region = "us-east" - k8s_version string - linodeURL = "https://api.linode.com" -) - -func init() { - flag.StringVar(&framework.Image, "image", framework.Image, "registry/repository:tag") - flag.StringVar(&framework.ApiToken, "api-token", os.Getenv("LINODE_API_TOKEN"), "linode api token") - flag.BoolVar(&reuse, "reuse", reuse, "Create a cluster and continue to use it") - flag.BoolVar(&useExisting, "use-existing", useExisting, "Use an existing kubernetes cluster") - flag.StringVar(&framework.KubeConfigFile, "kubeconfig", os.Getenv("TEST_KUBECONFIG"), "To use existing cluster provide kubeconfig file") - flag.StringVar(®ion, "region", region, "Region to create load balancers") - flag.StringVar(&k8s_version, "k8s_version", k8s_version, "k8s_version for child cluster") - flag.DurationVar(&framework.Timeout, "timeout", 5*time.Minute, "Timeout for a test to complete successfully") - flag.StringVar(&linodeURL, "linode-url", linodeURL, "The Linode API URL to send requests to") -} - -const ( - TIMEOUT = 5 * time.Minute -) - -var root *framework.Framework - -func TestE2e(t *testing.T) { - RegisterFailHandler(Fail) - SetDefaultEventuallyTimeout(framework.Timeout) - RunSpecs(t, "e2e Suite") -} - -var getLinodeClient = func() *linodego.Client { - linodeClient := linodego.NewClient(nil) - linodeClient.SetToken(framework.ApiToken) - linodeClient.SetBaseURL(linodeURL) - return &linodeClient -} - -var _ = BeforeSuite(func() { - if reuse { - clusterName = "ccm-linode-for-reuse" - } else { - clusterName = rand.WithUniqSuffix("ccm-linode") - } - - dir, err := os.Getwd() - Expect(err).NotTo(HaveOccurred()) - kubeConfigFile := filepath.Join(dir, clusterName+".conf") - - if reuse { - if _, err := os.Stat(kubeConfigFile); !os.IsNotExist(err) { - useExisting = true - framework.KubeConfigFile = kubeConfigFile - } - } - - if !useExisting { - err := framework.CreateCluster(clusterName, region, k8s_version) - Expect(err).NotTo(HaveOccurred()) - framework.KubeConfigFile = kubeConfigFile - } - - By("Using kubeconfig from " + framework.KubeConfigFile) - config, err := clientcmd.BuildConfigFromFlags("", framework.KubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - - // Clients - kubeClient := kubernetes.NewForConfigOrDie(config) - linodeClient := getLinodeClient() - - // Framework - root = framework.New(config, kubeClient, *linodeClient) - - By("Using Namespace " + root.Namespace()) - err = root.CreateNamespace() - Expect(err).NotTo(HaveOccurred()) -}) - -var _ = AfterSuite(func() { - if !(useExisting || reuse) { - By("Deleting cluster") - err := framework.DeleteCluster(clusterName) - Expect(err).NotTo(HaveOccurred()) - } else { - By("Deleting Namespace " + root.Namespace()) - err := root.DeleteNamespace() - Expect(err).NotTo(HaveOccurred()) - - By("Not deleting cluster") - } -}) diff --git a/e2e/test/framework/cluster.go b/e2e/test/framework/cluster.go deleted file mode 100644 index e40676d2..00000000 --- a/e2e/test/framework/cluster.go +++ /dev/null @@ -1,9 +0,0 @@ -package framework - -func CreateCluster(cluster, region, k8s_version string) error { - return RunScript("create_cluster.sh", ApiToken, cluster, Image, k8s_version, region) -} - -func DeleteCluster(clusterName string) error { - return RunScript("delete_cluster.sh", clusterName) -} diff --git a/e2e/test/framework/framework.go b/e2e/test/framework/framework.go deleted file mode 100644 index a54491e2..00000000 --- a/e2e/test/framework/framework.go +++ /dev/null @@ -1,90 +0,0 @@ -package framework - -import ( - "fmt" - "time" - - "github.com/appscode/go/crypto/rand" - "github.com/linode/linodego" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -var ( - Image = "linode/linode-cloud-controller-manager:latest" - ApiToken = "" - Timeout time.Duration - - KubeConfigFile = "" - TestServerResourceName = "e2e-test-server-" + rand.Characters(5) -) - -const ( - MaxRetry = 100 - TestServerImage = "appscode/test-server:2.3" -) - -type Framework struct { - restConfig *rest.Config - kubeClient kubernetes.Interface - namespace string - name string - - linodeClient linodego.Client -} - -func generateNamespaceName() string { - return rand.WithUniqSuffix("ccm") -} - -func New( - restConfig *rest.Config, - kubeClient kubernetes.Interface, - linodeClient linodego.Client, -) *Framework { - return &Framework{ - restConfig: restConfig, - kubeClient: kubeClient, - linodeClient: linodeClient, - - name: "cloud-controller-manager", - namespace: generateNamespaceName(), - } -} - -func (f *Framework) Invoke() *Invocation { - r := &rootInvocation{ - Framework: f, - app: rand.WithUniqSuffix("csi-driver-e2e"), - } - return &Invocation{ - rootInvocation: r, - LoadBalancer: &lbInvocation{rootInvocation: r}, - } -} - -func (f *Framework) Recycle() error { - if err := f.DeleteNamespace(); err != nil { - return fmt.Errorf("failed to delete namespace (%s)", f.namespace) - } - - f.namespace = generateNamespaceName() - if err := f.CreateNamespace(); err != nil { - return fmt.Errorf("failed to create namespace (%s)", f.namespace) - } - return nil -} - -type Invocation struct { - *rootInvocation - LoadBalancer *lbInvocation -} - -type rootInvocation struct { - *Framework - app string -} - -type lbInvocation struct { - *rootInvocation -} diff --git a/e2e/test/framework/loadbalancer_suite.go b/e2e/test/framework/loadbalancer_suite.go deleted file mode 100644 index d5a6d186..00000000 --- a/e2e/test/framework/loadbalancer_suite.go +++ /dev/null @@ -1,86 +0,0 @@ -package framework - -import ( - "context" - "fmt" - - "github.com/linode/linodego" -) - -func (i *lbInvocation) GetNodeBalancerFromService(svcName string, checkIP bool) (*linodego.NodeBalancer, error) { - ingress, err := i.getServiceIngress(svcName, i.Namespace()) - if err != nil { - return nil, err - } - hostname := ingress[0].Hostname - ip := ingress[0].IP - nbList, errListNodeBalancers := i.linodeClient.ListNodeBalancers(context.Background(), nil) - if errListNodeBalancers != nil { - return nil, fmt.Errorf("Error listingNodeBalancer for hostname %s: %s", hostname, errListNodeBalancers.Error()) - } - - for _, nb := range nbList { - if *nb.Hostname == hostname { - if checkIP { - if *nb.IPv4 == ip { - return &nb, nil - } else { - return nil, fmt.Errorf("IPv4 for Nodebalancer (%s) does not match IP (%s) for service %v", *nb.IPv4, ip, svcName) - } - } - return &nb, nil - } - } - return nil, fmt.Errorf("no NodeBalancer Found for service %v", svcName) -} - -func (i *lbInvocation) GetNodeBalancerID(svcName string) (int, error) { - nb, err := i.GetNodeBalancerFromService(svcName, false) - if err != nil { - return -1, err - } - return nb.ID, nil -} - -func (i *lbInvocation) GetNodeBalancerConfig(svcName string) (*linodego.NodeBalancerConfig, error) { - id, err := i.GetNodeBalancerID(svcName) - if err != nil { - return nil, err - } - nbcList, err := i.linodeClient.ListNodeBalancerConfigs(context.Background(), id, nil) - if err != nil { - return nil, err - } - return &nbcList[0], nil -} - -func (i *lbInvocation) GetNodeBalancerConfigForPort(svcName string, port int) (*linodego.NodeBalancerConfig, error) { - id, err := i.GetNodeBalancerID(svcName) - if err != nil { - return nil, err - } - nbConfigs, err := i.linodeClient.ListNodeBalancerConfigs(context.Background(), id, nil) - if err != nil { - return nil, err - } - - for _, config := range nbConfigs { - if config.Port == port { - return &config, nil - } - } - return nil, fmt.Errorf("NodeBalancerConfig for port %d was not found", port) -} - -func (i *lbInvocation) GetNodeBalancerUpNodes(svcName string) (int, error) { - id, err := i.GetNodeBalancerID(svcName) - if err != nil { - return 0, err - } - nbcList, err := i.linodeClient.ListNodeBalancerConfigs(context.Background(), id, nil) - if err != nil { - return 0, err - } - nb := &nbcList[0] - return nb.NodesStatus.Up, nil -} diff --git a/e2e/test/framework/namespace.go b/e2e/test/framework/namespace.go deleted file mode 100644 index c95207d6..00000000 --- a/e2e/test/framework/namespace.go +++ /dev/null @@ -1,26 +0,0 @@ -package framework - -import ( - "context" - - core "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (f *Framework) Namespace() string { - return f.namespace -} - -func (f *Framework) CreateNamespace() error { - obj := &core.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.namespace, - }, - } - _, err := f.kubeClient.CoreV1().Namespaces().Create(context.TODO(), obj, metav1.CreateOptions{}) - return err -} - -func (f *Framework) DeleteNamespace() error { - return f.kubeClient.CoreV1().Namespaces().Delete(context.TODO(), f.namespace, deleteInForeground()) -} diff --git a/e2e/test/framework/node.go b/e2e/test/framework/node.go deleted file mode 100644 index 2ac0ad55..00000000 --- a/e2e/test/framework/node.go +++ /dev/null @@ -1,26 +0,0 @@ -package framework - -import ( - "context" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - masterLabel = "node-role.kubernetes.io/master" -) - -func (i *Invocation) GetNodeList() ([]string, error) { - workers := make([]string, 0) - nodes, err := i.kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return nil, err - } - - for _, node := range nodes.Items { - if _, found := node.ObjectMeta.Labels[masterLabel]; !found { - workers = append(workers, node.Name) - } - } - return workers, nil -} diff --git a/e2e/test/framework/pod.go b/e2e/test/framework/pod.go deleted file mode 100644 index 46f307d7..00000000 --- a/e2e/test/framework/pod.go +++ /dev/null @@ -1,56 +0,0 @@ -package framework - -import ( - "context" - - core "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (i *lbInvocation) GetPodObject(podName, image string, ports []core.ContainerPort, labels map[string]string) *core.Pod { - return &core.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: i.Namespace(), - Labels: labels, - }, - Spec: core.PodSpec{ - Containers: []core.Container{ - { - Name: "server", - Image: image, - Env: []core.EnvVar{ - { - Name: "POD_NAME", - ValueFrom: &core.EnvVarSource{ - FieldRef: &core.ObjectFieldSelector{ - FieldPath: "metadata.name", - }, - }, - }, - }, - Ports: ports, - }, - }, - }, - } -} - -func (i *lbInvocation) SetNodeSelector(pod *core.Pod, nodeName string) *core.Pod { - pod.Spec.NodeSelector = map[string]string{ - "kubernetes.io/hostname": nodeName, - } - return pod -} - -func (i *lbInvocation) CreatePod(pod *core.Pod) (*core.Pod, error) { - return i.kubeClient.CoreV1().Pods(i.Namespace()).Create(context.TODO(), pod, metav1.CreateOptions{}) -} - -func (i *lbInvocation) DeletePod(name string) error { - return i.kubeClient.CoreV1().Pods(i.Namespace()).Delete(context.TODO(), name, deleteInForeground()) -} - -func (i *lbInvocation) GetPod(name, ns string) (*core.Pod, error) { - return i.kubeClient.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) -} diff --git a/e2e/test/framework/secret.go b/e2e/test/framework/secret.go deleted file mode 100644 index 1d761aa3..00000000 --- a/e2e/test/framework/secret.go +++ /dev/null @@ -1,117 +0,0 @@ -package framework - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - serverCert = `-----BEGIN CERTIFICATE----- -MIIFvTCCA6WgAwIBAgIUBpS47ArkUC0MXYK3LvXU3eRh/CowDQYJKoZIhvcNAQEL -BQAwUjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM -GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2EwHhcNMjQxMTE1 -MTcxNjI1WhcNMjcxMTE1MTcxNjI1WjByMQswCQYDVQQGEwJVUzELMAkGA1UECAwC -UEExFTATBgNVBAcMDFBoaWxhZGVscGhpYTETMBEGA1UECgwKTGlub2RlIExMQzEU -MBIGA1UECwwLTGlub2RlIFRlc3QxFDASBgNVBAMMC2xpbm9kZS50ZXN0MIICIjAN -BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA27JxXbiT+0aptSvE2uFakJQf+qwT -5mGFCNaQFRBDdxxLTUF6UyglZflT9KdVtJ9kmsyZj9vhFnxszWnoUK1Y/runOYTf -JlNBVp28fO43HrUtaHFCZncvu0C4Tdc09721p2pP5nhgXv8BtZeDAVY/hjSIGgP1 -1WNLSWP2eZn4+q4hr7iUqVqLRYVz5e489b1sEXpCiSDWuq6GWRzvEBquHX0F82mW -84DMfa2TrcG4bw0i0r4nKWcgB3at7sR32DvEPFsFiEreFgNsx7b1KcG+ngzA3ZKL -9bviQKSLjjn48VPoV/w5lT3PYGIAjwu2tbNY8J6dUcni4aHnIwhwBFVb5299eIEC -nccueVExw8LtXBYOUKT4A8doKy3ZBq4B+WY8N0QhE6H8tuLrAl6IUh8rduuvJc38 -+QIDD6IKr58zuest6q0/lNvjruOfUMa+EsBPX795wyDuqL4tUyfySyUyYNXcQ4ip -2nFTBYXoB75jLsXHULhOC+7AbxzWeM76mjeNgKzUJaz+1EUMLYOSsfiYFMlWfoiL -ilf7WMdR3bLHccFAA/Qg3CZETU/B20amYDI/+0TvY1td01gzoUx3UjDPB6mpntgr -DoTISDNAvZgPOt9ebs7AEM6/iHgIQtAnCQULTzQ48i3WZlpPYb2IeWOsNCXiOZPN -+STXedL5M3IUwUcCAwEAAaNrMGkwJwYDVR0RBCAwHoILbGlub2RlLnRlc3SCD3d3 -dy5saW5vZGUudGVzdDAdBgNVHQ4EFgQUgNqzhL/JpxllvFu18qvlg/usDrEwHwYD -VR0jBBgwFoAUC2AMOf90/zpuQ588rPLfe7EukIUwDQYJKoZIhvcNAQELBQADggIB -AL38v8A0Yfi3Qcr7JtMJ+EOgiHo+W1PW05CAKrswqIZGb9pLwcc46N1ICX4/wItH -DfOmiLHEJ+eEaf07XWy1G+orvqsz6FLh2lfr1cne2DH1udiBXw2VyHDeaighgqTX -rHPcV9lLPcRgQgE8AC2WSn3Rmjd4eU+twlqYcJTLt3cy+TulwXxGBjn7CSmRamRA -AaURnVpsMhw9baINrN6+3zbjw1LKpMO3JfPx9NPw0iUYYbUWFMli2RTEwdR0o9Fu -Om6ogyYHHLTUDv2+cHYY4TKJ0LGz9PGB3iwdGbSSpLadjV7xkFERio5B4o/FedLB -CuECSIoWqjScSrVWjpIpG6b7LVkuDI7ZrZ6Rvkwcv4Zezx5TkynQUw9EezEgGRQf -RiBSKoPGKJfRGiYGNXDjqENX3kxqt5cuVe/Z0czrb+2zOMfaTZwJtp2rrJqckxBh -CK4CXQz2nsfGRW/lyJ1Jyc+ul0obXXhynDBA9dE5woCIwgTCRL9M0ZOHjoQi1tDh -27i0j4YzIvlIDIi6iex/XVZi9mhuRvDR7f7c5RVpHsu38znCLyQetFnwOQOmIVZI -lEUQvU1Jnk+e5+RqvOcZ0ZcLppBa71XjUdYm56mzY1ph04n1VUO4rmaI3wNBETGd -jJ3K7XuBBL/YT+02AzsZR/0fiHLdA9DbLUdhtRs0mb5u ------END CERTIFICATE-----` - serverKey = `-----BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEA27JxXbiT+0aptSvE2uFakJQf+qwT5mGFCNaQFRBDdxxLTUF6 -UyglZflT9KdVtJ9kmsyZj9vhFnxszWnoUK1Y/runOYTfJlNBVp28fO43HrUtaHFC -Zncvu0C4Tdc09721p2pP5nhgXv8BtZeDAVY/hjSIGgP11WNLSWP2eZn4+q4hr7iU -qVqLRYVz5e489b1sEXpCiSDWuq6GWRzvEBquHX0F82mW84DMfa2TrcG4bw0i0r4n -KWcgB3at7sR32DvEPFsFiEreFgNsx7b1KcG+ngzA3ZKL9bviQKSLjjn48VPoV/w5 -lT3PYGIAjwu2tbNY8J6dUcni4aHnIwhwBFVb5299eIECnccueVExw8LtXBYOUKT4 -A8doKy3ZBq4B+WY8N0QhE6H8tuLrAl6IUh8rduuvJc38+QIDD6IKr58zuest6q0/ -lNvjruOfUMa+EsBPX795wyDuqL4tUyfySyUyYNXcQ4ip2nFTBYXoB75jLsXHULhO -C+7AbxzWeM76mjeNgKzUJaz+1EUMLYOSsfiYFMlWfoiLilf7WMdR3bLHccFAA/Qg -3CZETU/B20amYDI/+0TvY1td01gzoUx3UjDPB6mpntgrDoTISDNAvZgPOt9ebs7A -EM6/iHgIQtAnCQULTzQ48i3WZlpPYb2IeWOsNCXiOZPN+STXedL5M3IUwUcCAwEA -AQKCAgBgau3p7cm0K4zrX+wjC2fNr9RhFQgewYm7GT9enyacraQ2oZfnyuSu3j+E -TbQFczaZ4VU7l4ovbifp9qLoVUuLcBux2Kh+j2dLdip0wa8bIPRus9YqVgBys7Kv -JtWuLGn+sV+jjAzvZAcCBR6PhaSXZ5KbqEVJgyxVZzOSpopoqedK0T0dHgmlVy5I -KMhEKP+2o+tzdyAGCfYYQeSBMtRbSLVF4H9JGqukNHttdGlXA3LW/nD9cK7T17f5 -4+uc0I4M1v2UlRbmnlYtSBRMYSUhBAPYuioGjJB9QjmlD7g7YVHE24MCBoBuklQg -c0macL2FzHbKoEmcMIvaCifvHu8X0J5qjZghmi7Zozh/Skg9B4XINdHpX7vX7INZ -A7z2nx5x4xaNPO3hJJJkbpCcpSIEQkuqe8a/GYcn0tTMTqoGXr/OFz+ut1ZzZThs -YL8YWh2SqVOzR8xJE3cR9qd/ISTl1CPrxWyWm3eOZ0WGOKZTzUIN3p8gcDIDucs4 -kXGDCh7tj7EsYWpa0fnEp5n8kupLWPY050aal898xPP4RDNQFx/VdDBfa/PVKKMy -OzXFq801UoOdF9d6FR3p3YS5O0Zd8UILJQui3s2dpY6/BzuWa2ch9PwvEFI8rsT6 -8VxRCEG9gJxA/GSV/ZNU4hH3Tiv7fSG/aED/uUSvI/t7AWgQgQKCAQEA+Xrshwnt -Cp0cDdkHde/0WnT3DUEvYM0tlJY6z1YR5Kx0GL4zR+yhBuTfmgCMsbkNLvHsc3Us -UbwM4OSAD0oHMa6LCYer6fiYWfv4c19gCtLCZhjBPYHSwXGaQxdjiEE4N6J+mnPW -n39DCjXhl//WlatbLkZRbGYnbORfcE2Kx72OAJt2ujp0Jr/Loi1px6KMbKnzhEhy -mI6FPejx1h8KC5xlCq6faUnal1ZvdNc5WkxtZ1YOCzaKbVuGEok3bFK986aSYYlP -AI4SMo0M/Sy/5tlb9CL5H8s4Dbz35CRyKmXYMQYeGtJ/7HTSdrU7qcp4EZTu5RVX -1xtq6S+w4/V3JwKCAQEA4XBDaxw2B5ica9xxTAzzq7H9QtGgtYaBIQmkBVqVvoDs -ywGbe7ueJFY7id2rWdeDB7Nxt6feoTuoyXmA3YYAeUBQZGtLKc3MZfdIFJt6yM1D -6FZyITwo0Zl6ShPxIYsc94BRA7YzmQWaucByrRFLX+y463u2UGqD9s3aPZm921mb -oweIkEQiD2lJNqhx0gRphN+Le+0z7Gh+1ZxI8XikSIkuQ+nvuh5zQA/lqmWr4E9m -EICTP6D5lvJj3EpKZ1pUgHvPEy/fyUq+i7nu0hS394blI6amv2iwmrLhe2NafCHu -+Nux305uO8jqHzEl+l1CvGf0BqNXCM3x5CgLMJW44QKCAQBpmRpc3lqzT2T8h4yc -4wBu+WtI9Pp04uQULLKf6DKStFw/zOIv430VSfNLYEgtQcLOyB/pjwM/ZXWeC5oY -3qDE6rh3RDIESvFRxVGYpBom+qbGSFwjCLyInOlK1K+QkOqWwfUMs1N5F4js3Xmr -uOK/X1Ss9Z6pX2P4t4GeK3Q+r4FXyHYsxWk8rZon/0jy81608ArfRzsaT9keJ2eV -1nWODJjIOLnI+zXHMRLkReVEz2zPfKFdJazaNQ8+8U3AUBWO+EalelUySvBw7Ts+ -Pp7Lu90sLVF9n6sORZo3uyWHxKwJtCkx+T+kep5LGNM0PzsrVfr4hFw19KkAIuug -0dmpAoIBAQCbbix9b+DskdLfJwjSV2e1bC1iYWe9YDQtlBkLO+5cf0VDniMWRz/8 -a5v3LOdUNRt5NsZjypDbd2ejKWuo0BgJgUcsRTF4bBTOBJUk6CHaynNUgC2GLpUy -FfBTnLY221QobMbumTOwAEYyZbZrDq56P5sreIs1nIrJohojOJnG31xIJgyI8wDM -wVmiHrcDBtm9q+belaekClPQcUV1fyk9fZ9xYZxQJWhutccyGZFMQVHsdMmRKCqN -YSdqnan44jW6tCIMZ4iSnz8K1TIMlA5W0iGv19nFxKdmsYh26wRa64Z4+/gCL3Af -NiH9SYSWvrAheEauQPXj8yIgnV9BqyjhAoIBAA0NGugiXqloQD4tKFYROZ2rm1kx -IlbC5rVePSeMz59Qty79dODAvGuJxOb/vKOlQqcULfgidpctBdtZJ/oencwOf/49 -e0R5uYpvsxyvAro5OKxk0SD2YSgkdBf8gF5+opG6ZjcBcRk3jp8cdYDTIpViJco5 -IJwbMqoWpJxuilj0imxDNQPPoN6yf3mkD2tyYp2YL9X5bgSB58l1LCBJDdJDC4tR -rrXq0Btn9jpwwW/AJ6mIFWWGQKDpkGhLRHxOOK4dC+XgbkEogDSOlZDOEALLvFI9 -OVIIxvytGW/Qy6AEzsMnsTPUJMyPsktCQ2YI628dytmqXOniZe1QQ2R7dzw= ------END RSA PRIVATE KEY-----` -) - -func (i *lbInvocation) CreateTLSSecret(secretName string) (err error) { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - }, - Data: map[string][]byte{ - corev1.TLSCertKey: []byte(serverCert), - corev1.TLSPrivateKeyKey: []byte(serverKey), - }, - Type: corev1.SecretTypeTLS, - } - - _, err = i.kubeClient.CoreV1().Secrets(i.Namespace()).Create(context.TODO(), secret, metav1.CreateOptions{}) - - return err -} - -func (i *lbInvocation) DeleteSecret(name string) error { - err := i.kubeClient.CoreV1().Secrets(i.Namespace()).Delete(context.TODO(), name, metav1.DeleteOptions{}) - return err -} diff --git a/e2e/test/framework/service.go b/e2e/test/framework/service.go deleted file mode 100644 index e1c1d8be..00000000 --- a/e2e/test/framework/service.go +++ /dev/null @@ -1,137 +0,0 @@ -package framework - -import ( - "context" - "fmt" - - core "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/util/retry" -) - -func (i *lbInvocation) createOrUpdateService(selector, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP, isCreate bool) error { - var sessionAffinity core.ServiceAffinity = "None" - if isSessionAffinityClientIP { - sessionAffinity = "ClientIP" - } - svc := &core.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestServerResourceName, - Namespace: i.Namespace(), - Annotations: annotations, - Labels: map[string]string{ - "app": "test-server-" + i.app, - }, - }, - Spec: core.ServiceSpec{ - Ports: ports, - Selector: selector, - Type: core.ServiceTypeLoadBalancer, - SessionAffinity: sessionAffinity, - }, - } - - service := i.kubeClient.CoreV1().Services(i.Namespace()) - if isCreate { - _, err := service.Create(context.TODO(), svc, metav1.CreateOptions{}) - if err != nil { - return err - } - } else { - if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - options := metav1.GetOptions{} - resource, err := service.Get(context.TODO(), TestServerResourceName, options) - if err != nil { - return err - } - svc.ObjectMeta.ResourceVersion = resource.ResourceVersion - svc.Spec.ClusterIP = resource.Spec.ClusterIP - _, err = service.Update(context.TODO(), svc, metav1.UpdateOptions{}) - return err - }); err != nil { - return err - } - } - return nil -} - -func (i *lbInvocation) GetServiceWatcher() (watch.Interface, error) { - var timeoutSeconds int64 = 30 - watcher, err := i.kubeClient.CoreV1().Events(i.Namespace()).Watch(context.TODO(), metav1.ListOptions{ - FieldSelector: "involvedObject.kind=Service", - Watch: true, - TimeoutSeconds: &timeoutSeconds, - }) - if err != nil { - return nil, err - } - return watcher, nil -} - -func (i *lbInvocation) GetService() (*core.Service, error) { - return i.kubeClient.CoreV1().Services(i.Namespace()).Get(context.TODO(), TestServerResourceName, metav1.GetOptions{}) -} - -func (i *lbInvocation) CreateService(selector, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) error { - return i.createOrUpdateService(selector, annotations, ports, isSessionAffinityClientIP, true) -} - -func (i *lbInvocation) UpdateService(selector, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) error { - err := i.deleteEvents() - if err != nil { - return err - } - return i.createOrUpdateService(selector, annotations, ports, isSessionAffinityClientIP, false) -} - -func (i *lbInvocation) DeleteService() error { - return i.kubeClient.CoreV1().Services(i.Namespace()).Delete(context.TODO(), TestServerResourceName, metav1.DeleteOptions{}) -} - -func (i *lbInvocation) GetServiceEndpoints() ([]core.EndpointAddress, error) { - ep, err := i.kubeClient.CoreV1().Endpoints(i.Namespace()).Get(context.TODO(), TestServerResourceName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if len(ep.Subsets) == 0 { - return nil, fmt.Errorf("No service endpoints found for %s", TestServerResourceName) - } - return ep.Subsets[0].Addresses, err -} - -func (i *lbInvocation) deleteEvents() error { - return i.kubeClient.CoreV1().Events(i.Namespace()).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: "involvedObject.kind=Service"}) -} - -func (i *lbInvocation) GetLoadBalancerIps() ([]string, error) { - svc, err := i.kubeClient.CoreV1().Services(i.Namespace()).Get(context.TODO(), TestServerResourceName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - var serverAddr []string - for _, ingress := range svc.Status.LoadBalancer.Ingress { - if len(svc.Spec.Ports) > 0 { - for _, port := range svc.Spec.Ports { - if port.NodePort > 0 { - serverAddr = append(serverAddr, fmt.Sprintf("%s:%d", ingress.IP, port.Port)) - } - } - } - } - if serverAddr == nil { - return nil, fmt.Errorf("failed to get Status.LoadBalancer.Ingress for service %s/%s", TestServerResourceName, i.Namespace()) - } - return serverAddr, nil -} - -func (i *lbInvocation) getServiceIngress(name, namespace string) ([]core.LoadBalancerIngress, error) { - svc, err := i.kubeClient.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if svc.Status.LoadBalancer.Ingress == nil { - return nil, fmt.Errorf("Status.LoadBalancer.Ingress is empty for %s", name) - } - return svc.Status.LoadBalancer.Ingress, nil -} diff --git a/e2e/test/framework/util.go b/e2e/test/framework/util.go deleted file mode 100644 index 31379256..00000000 --- a/e2e/test/framework/util.go +++ /dev/null @@ -1,180 +0,0 @@ -package framework - -import ( - "context" - "crypto/tls" - "crypto/x509" - "io" - "log" - "net" - "net/http" - "os" - "os/exec" - "path" - "strings" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - scriptDirectory = "scripts" - RetryInterval = 5 * time.Second - RetryTimeout = 15 * time.Minute - caCert = `-----BEGIN CERTIFICATE----- -MIIFejCCA2KgAwIBAgIJAN7D2Ju254yUMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxCzAJBgNVBAMMAmNhMB4XDTE5MDQwOTA5MzYxNFoXDTI5 -MDQwNjA5MzYxNFowUjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx -ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2Ew -ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDoTwE1kijjrhCcGXSPyHlf -7NngxPCFuFqVdRvG4DrrdL7YW3iEovAXTbuoyiPpF/U9T5BfDVs2dCEHGlpiOADR -tA/Z5mFbVcefOCBL+rL2sTN2o19U7eimcZjH1xN1L5j2RkYmRAoI+nwG/g5NehOu -YM930oPqe3vOYevOHBCebHuKc7zaM31AtKcDG0IjIJ1ZdJy91+rx8Prb+IxTIKZl -Ca/e0e6iZWCPp5kaJyNUGZkjjcRVzFM79xVf34DEuS+N1RZP7EevM0bfHehJfSpU -M6gfsrL9WctD0nGJd2YsH9hLCub2G7emgiV7dvN1R0QW9ijguwZ9aBemiat5AnGs -QHSR+WRijZNjHTWY4DEaTNWecDd2Tz37RNN9Ow8FThERwZVnpji1kcijEg4g7Ppy -9P6tdavjkFVW0xOieInjS/m5Bxj2a44UT1JshNr1M4HGXvqUcCFS4vhytIc05lOv -X20NR+C+RgNy7G14Hz/3+qRo9hlkonyTJAoU++2vgsaNmmhcU6fGgYpARHm1Y675 -pGrgZAcjFcsG84q0dSdr6AeY+6+1UyS6pktBobXIiciSPmseHJ24dRd06OYQMxQ3 -ccOZhZ3cNy8OMT9eUwcjnif36BVmZdCObJexqXq/cSVX3IhhaQhLLfN9ZyGDkxWl -N5ehRMCabgv3mQCDd/9HMwIDAQABo1MwUTAdBgNVHQ4EFgQUC2AMOf90/zpuQ588 -rPLfe7EukIUwHwYDVR0jBBgwFoAUC2AMOf90/zpuQ588rPLfe7EukIUwDwYDVR0T -AQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAHopjHkeIciVtlAyAPEfh/pnf -r91H1aQMPmHisqlveM3Bz9MOIa9a26YO+ZzCPozALxkJIjdp7L3L8Q8CuLmkC4YV -6nHvSLaC/82UGoiRGyjdFh30puqekWMZ62ZrQLpCr0DzOJrarslLM0fONqpjDTWP -8OXyRcnVSbFB1n5XUoviMTTxYOQ3HQe8b3Tt7GO/9w6dWkkSX1Vy4RmzNt7fb9K5 -mxu/n+SVu+2iQX9oEWq2rpvsD3RGnhewCPlZU8NQYKb72K00kEcG/J+WU1IPtkq0 -JaU5TDMMzfp3PMYxCzYD9pdM8J0N0zJac2t9hkx7H83jy/TfLrmDvB6nCK8N3+6j -8In6RwYw4XJ41AWsJpGXBpvYCq5GJjdogEi9IaBXSmtVPYm0NURYbephk+Wg0oyk -ESk4cyWUhYG8mcMyORc8lzOQ79YT6A5QnitTGCVQGTlnNRjevtfhAFEXr9e8UZFq -oWtfEdltH6ElGDpivwuOERAN9v3GoPlifpo1UDElnPJft+C0cRv0YpPwvwJTy1MU -q1op/4Z/7SHzFWTSyRZqvI41AsLImylzfZ0w9U8sogd4pHv30kGc9+LhqrsfLDvK -9XedVoWJx/x3i8BUhVDyd4FyVWHCf9N/6a9HzbFWT8QZTBk5pErTaFiTi5TQxoi7 -ER4ILjvRX7mLWUGhN58= ------END CERTIFICATE-----` - Domain = "linode.test" -) - -func RunScript(script string, args ...string) error { - wd, err := os.Getwd() - if err != nil { - return err - } - - return runCommand(path.Join(wd, scriptDirectory, script), args...) -} - -func runCommand(cmd string, args ...string) error { - c := exec.Command(cmd, args...) - c.Stdout = os.Stdout - c.Stderr = os.Stderr - log.Printf("Running command %q\n", cmd) - return c.Run() -} - -func deleteInForeground() metav1.DeleteOptions { - policy := metav1.DeletePropagationForeground - graceSeconds := int64(0) - return metav1.DeleteOptions{ - PropagationPolicy: &policy, - GracePeriodSeconds: &graceSeconds, - } -} - -func getHTTPSResponse(domain, ip, port string) (string, error) { - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } - - if ok := rootCAs.AppendCertsFromPEM([]byte(caCert)); !ok { - log.Println("No certs appended, using system certs only") - } - - config := &tls.Config{ - RootCAs: rootCAs, - } - - dialer := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - dialContext := func(ctx context.Context, network, addr string) (net.Conn, error) { - if addr == domain+":"+port { - addr = ip + ":" + port - } - return dialer.DialContext(ctx, network, addr) - } - - tr := &http.Transport{ - TLSClientConfig: config, - DialContext: dialContext, - } - client := &http.Client{Transport: tr} - - log.Println("Waiting for response from https://" + ip + ":" + port) - u := "https://" + domain + ":" + port - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return "", err - } - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - bodyBytes, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - bodyString := string(bodyBytes) - - return bodyString, nil -} - -func WaitForHTTPSResponse(link string) (string, error) { - hostPort := strings.Split(link, ":") - host, port := hostPort[0], hostPort[1] - - resp, err := getHTTPSResponse(Domain, host, port) - if err != nil { - return "", err - } - return resp, nil -} - -func getHTTPResponse(link string) (bool, string, error) { - resp, err := http.Get("http://" + link) - if err != nil { - return false, "", err - } - defer resp.Body.Close() - - bodyBytes, err := io.ReadAll(resp.Body) - if err != nil { - return false, "", err - } - return resp.StatusCode == 200, string(bodyBytes), nil -} - -func WaitForHTTPResponse(link string) (string, error) { - ok, resp, err := getHTTPResponse(link) - if err != nil { - return "", err - } - if ok { - return resp, nil - } - return "", nil -} - -func GetResponseFromCurl(endpoint string) string { - resp, err := exec.Command("curl", "--max-time", "5", "-s", endpoint).Output() - if err != nil { - return "" - } - return string(resp) -} diff --git a/e2e/test/fw-use-specified-nb/chainsaw-test.yaml b/e2e/test/fw-use-specified-nb/chainsaw-test.yaml new file mode 100644 index 00000000..fb0a0148 --- /dev/null +++ b/e2e/test/fw-use-specified-nb/chainsaw-test.yaml @@ -0,0 +1,124 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: fw-use-specified-nb +spec: + bindings: + - name: fwname + value: (join('-', ['ccm-fwtest', env('CLUSTER_NAME')])) + namespace: "fw-use-specified-nb" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create firewall, Create pods and services + try: + - script: + env: + - name: FWLABEL + value: ($fwname) + content: | + set -e + + create_fw=$(curl -s --write-out "%{http_code}\n" --output /dev/null --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/networking/firewalls" \ + --data " + { + \"label\": \"$FWLABEL\", + \"rules\": { + \"inbound\": [{ + \"action\": \"ACCEPT\", + \"label\": \"inbound-rule123\", + \"description\": \"inbound rule123\", + \"ports\": \"4321\", + \"protocol\": \"TCP\", + \"addresses\": { + \"ipv4\": [\"0.0.0.0/0\"] + } + }], + \"inbound_policy\": \"ACCEPT\", + \"outbound_policy\": \"ACCEPT\" + } + } + " + ) + + if [[ $create_fw == "200" ]]; then + echo "fw created" + fi + check: + ($error == null): true + (contains($stdout, 'fw created')): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Annotate service with nodebalancer id + try: + - script: + env: + - name: FWLABEL + value: ($fwname) + content: | + set -e + re='^[0-9]+$' + + fwid=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "X-Filter: {\"label\": \"$FWLABEL\"}" \ + "https://api.linode.com/v4/networking/firewalls" | jq .data[].id) + + if ! [[ $fwid =~ $re ]]; then + echo "Firewall id [$fwid] is incorrect, failed to fetch firewall" + exit 1 + fi + + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-firewall-id=$fwid + sleep 5 + + for i in {1..10}; do + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + fwconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/networking/firewalls/$fwid" || true) + + fw_attached_to_nb=$(echo $fwconfig | jq ".entities[] | select(.id == $nbid) | .id == $nbid") + + if [[ $fw_attached_to_nb == "true" ]]; then + echo "Conditions met" + break + fi + + sleep 10 + done + + curl -s -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/networking/firewalls/$fwid" + check: + ($error == null): true + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/fw-use-specified-nb/create-pods-services.yaml b/e2e/test/fw-use-specified-nb/create-pods-services.yaml new file mode 100644 index 00000000..00113a2f --- /dev/null +++ b/e2e/test/fw-use-specified-nb/create-pods-services.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: fw-use-specified-nb + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: fw-use-specified-nb + template: + metadata: + labels: + app: fw-use-specified-nb + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + name: svc-test + labels: + app: fw-use-specified-nb +spec: + type: LoadBalancer + selector: + app: fw-use-specified-nb + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml b/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml new file mode 100644 index 00000000..df1a0952 --- /dev/null +++ b/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml @@ -0,0 +1,121 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-created-with-new-nb-id +spec: + namespace: "lb-created-with-new-nb-id" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + - name: Create new nodebalancer and update service to use it + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + old_nbid=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "Condition met" + break + fi + sleep 5 + done + + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $old_nb_resp == "404" ]]; then + echo "old nodebalancer not found" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + (contains($stdout, 'old nodebalancer not found')): true diff --git a/e2e/test/lb-created-with-new-nb-id/create-pods-services.yaml b/e2e/test/lb-created-with-new-nb-id/create-pods-services.yaml new file mode 100644 index 00000000..c37615c7 --- /dev/null +++ b/e2e/test/lb-created-with-new-nb-id/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: created-with-new-nb-id + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: created-with-new-nb-id + template: + metadata: + labels: + app: created-with-new-nb-id + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: created-with-new-nb-id +spec: + type: LoadBalancer + selector: + app: created-with-new-nb-id + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml b/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml new file mode 100644 index 00000000..0b77dbe9 --- /dev/null +++ b/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml @@ -0,0 +1,73 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-created-with-specified-nb-id +spec: + namespace: "lb-created-with-specified-nb-id" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true diff --git a/e2e/test/lb-created-with-specified-nb-id/create-pods-services.yaml b/e2e/test/lb-created-with-specified-nb-id/create-pods-services.yaml new file mode 100644 index 00000000..1d286209 --- /dev/null +++ b/e2e/test/lb-created-with-specified-nb-id/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: created-with-specified-nb-id + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: created-with-specified-nb-id + template: + metadata: + labels: + app: created-with-specified-nb-id + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: created-with-specified-nb-id +spec: + type: LoadBalancer + selector: + app: created-with-specified-nb-id + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml b/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml new file mode 100644 index 00000000..723a5d35 --- /dev/null +++ b/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml @@ -0,0 +1,124 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-delete-svc-no-nb +spec: + namespace: "lb-delete-svc-no-nb" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + - name: Delete nodebalancer, delete service and make sure its deleted + try: + - script: + content: | + set -e + + re='^[0-9]+$' + nbid=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect" + exit 1 + fi + + # Delete nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $delete_resp == "200" ]]; then + echo "nodebalancer deleted" + fi + + # Check to make sure nodebalancer is deleted + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $old_nb_resp == "404" ]]; then + echo "old nodebalancer not found" + fi + + # Delete service and make sure its deleted + kubectl --timeout=60s delete svc svc-test -n $NAMESPACE + + for i in {1..10}; do + if kubectl get svc svc-test -n $NAMESPACE > /dev/null 2>&1; then + sleep 5 + else + echo "service is deleted" + break + fi + done + check: + ($error == null): true + (contains($stdout, 'nodebalancer deleted')): true + (contains($stdout, 'old nodebalancer not found')): true + (contains($stdout, 'service is deleted')): true diff --git a/e2e/test/lb-delete-svc-no-nb/create-pods-services.yaml b/e2e/test/lb-delete-svc-no-nb/create-pods-services.yaml new file mode 100644 index 00000000..55ea60f9 --- /dev/null +++ b/e2e/test/lb-delete-svc-no-nb/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: delete-svc-no-nb + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: delete-svc-no-nb + template: + metadata: + labels: + app: delete-svc-no-nb + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: delete-svc-no-nb +spec: + type: LoadBalancer + selector: + app: delete-svc-no-nb + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml b/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml new file mode 100644 index 00000000..7369d478 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml @@ -0,0 +1,121 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-delete-svc-use-new-nbid +spec: + namespace: "lb-delete-svc-use-new-nbid" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + - name: Create new nodebalancer and update service to use it + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + old_nbid=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "Condition met" + break + fi + sleep 5 + done + + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $old_nb_resp == "404" ]]; then + echo "old nodebalancer not found" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + (contains($stdout, 'old nodebalancer not found')): true diff --git a/e2e/test/lb-delete-svc-use-new-nbid/create-pods-services.yaml b/e2e/test/lb-delete-svc-use-new-nbid/create-pods-services.yaml new file mode 100644 index 00000000..58815cf6 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-new-nbid/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: delete-svc-use-new-nbid + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: delete-svc-use-new-nbid + template: + metadata: + labels: + app: delete-svc-use-new-nbid + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: delete-svc-use-new-nbid +spec: + type: LoadBalancer + selector: + app: delete-svc-use-new-nbid + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml b/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml new file mode 100644 index 00000000..99ceb8e4 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml @@ -0,0 +1,73 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-delete-svc-use-specified-nb +spec: + namespace: "lb-delete-svc-use-specified-nb" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true diff --git a/e2e/test/lb-delete-svc-use-specified-nb/create-pods-services.yaml b/e2e/test/lb-delete-svc-use-specified-nb/create-pods-services.yaml new file mode 100644 index 00000000..87461401 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-specified-nb/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: delete-svc-use-specified-nb + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: delete-svc-use-specified-nb + template: + metadata: + labels: + app: delete-svc-use-specified-nb + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: delete-svc-use-specified-nb +spec: + type: LoadBalancer + selector: + app: delete-svc-use-specified-nb + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml b/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml new file mode 100644 index 00000000..69c7cd0e --- /dev/null +++ b/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml @@ -0,0 +1,64 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-hostname-only-ingress +spec: + namespace: "lb-hostname-only-ingress" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that svc-test-1 loadbalancer ingress contains only hostname + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test-1 + status: + (loadBalancer.ingress[0].ip != null): false + (loadBalancer.ingress[0].hostname != null): true + - name: Check that svc-test-2 loadbalancer ingress contains ip + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test-2 + status: + (loadBalancer.ingress[0].ip != null): true + (loadBalancer.ingress[0].hostname != null): true + - name: Annotate service + try: + - script: + content: | + set -e + kubectl annotate svc svc-test-2 -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress=true + check: + ($error == null): true + - name: Check and make sure svc-test-2 ingress only contains hostname + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test-2 + status: + (loadBalancer.ingress[0].ip != null): false + (loadBalancer.ingress[0].hostname != null): true diff --git a/e2e/test/lb-hostname-only-ingress/create-pods-services.yaml b/e2e/test/lb-hostname-only-ingress/create-pods-services.yaml new file mode 100644 index 00000000..59d52fe6 --- /dev/null +++ b/e2e/test/lb-hostname-only-ingress/create-pods-services.yaml @@ -0,0 +1,66 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: hostname-ingress + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: hostname-ingress + template: + metadata: + labels: + app: hostname-ingress + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test-1 + annotations: + service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress: "true" + labels: + app: hostname-ingress +spec: + type: LoadBalancer + selector: + app: hostname-ingress + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test-2 + labels: + app: hostname-ingress +spec: + type: LoadBalancer + selector: + app: hostname-ingress + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-http-body-health-check/chainsaw-test.yaml b/e2e/test/lb-http-body-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..b6246b55 --- /dev/null +++ b/e2e/test/lb-http-body-health-check/chainsaw-test.yaml @@ -0,0 +1,66 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-http-body-health-check +spec: + namespace: "lb-http-body-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "http_body"') + port_80_path=$(echo $nbconfig | jq '.check_path == "/"') + port_80_body=$(echo $nbconfig | jq '.check_body == "nginx"') + port_80_protocol=$(echo $nbconfig | jq '.protocol == "http"') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_path == "true" && $port_80_body == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-http-body-health-check/create-pods-services.yaml b/e2e/test/lb-http-body-health-check/create-pods-services.yaml new file mode 100644 index 00000000..1e93bd31 --- /dev/null +++ b/e2e/test/lb-http-body-health-check/create-pods-services.yaml @@ -0,0 +1,52 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: http-body-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: http-body-health-check + template: + metadata: + labels: + app: http-body-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-body: nginx + service.beta.kubernetes.io/linode-loadbalancer-check-path: / + service.beta.kubernetes.io/linode-loadbalancer-check-type: http_body + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: http + name: svc-test + labels: + app: http-body-health-check +spec: + type: LoadBalancer + selector: + app: http-body-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-http-status-health-check/chainsaw-test.yaml b/e2e/test/lb-http-status-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..16f5b728 --- /dev/null +++ b/e2e/test/lb-http-status-health-check/chainsaw-test.yaml @@ -0,0 +1,65 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-http-status-health-check +spec: + namespace: "lb-http-status-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "http"') + port_80_path=$(echo $nbconfig | jq '.check_path == "/"') + port_80_protocol=$(echo $nbconfig | jq '.protocol == "http"') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_path == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-http-status-health-check/create-pods-services.yaml b/e2e/test/lb-http-status-health-check/create-pods-services.yaml new file mode 100644 index 00000000..ab76db96 --- /dev/null +++ b/e2e/test/lb-http-status-health-check/create-pods-services.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: http-status-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: http-status-health-check + template: + metadata: + labels: + app: http-status-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-path: "/" + service.beta.kubernetes.io/linode-loadbalancer-check-type: "http" + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" + name: svc-test + labels: + app: http-status-health-check +spec: + type: LoadBalancer + selector: + app: http-status-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-passive-health-check/chainsaw-test.yaml b/e2e/test/lb-passive-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..d7479d88 --- /dev/null +++ b/e2e/test/lb-passive-health-check/chainsaw-test.yaml @@ -0,0 +1,64 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-passive-health-check +spec: + namespace: "lb-passive-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "none"') + port_80_passive=$(echo $nbconfig | jq '.check_passive == true') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_passive == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-passive-health-check/create-pods-services.yaml b/e2e/test/lb-passive-health-check/create-pods-services.yaml new file mode 100644 index 00000000..daf4f6fd --- /dev/null +++ b/e2e/test/lb-passive-health-check/create-pods-services.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: passive-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: passive-health-check + template: + metadata: + labels: + app: passive-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-passive: "true" + service.beta.kubernetes.io/linode-loadbalancer-check-type: none + name: svc-test + labels: + app: passive-health-check +spec: + type: LoadBalancer + selector: + app: passive-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml b/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml new file mode 100644 index 00000000..d7f2661d --- /dev/null +++ b/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml @@ -0,0 +1,106 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-preserve-annotation-new-nb-specified +spec: + namespace: "lb-preserve-annotation-new-nb-specified" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create resources + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Create new nodebalancer and update service to use it + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + # Get existing nodebalancer id + old_nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + # Create new nodebalancer and use it + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "updated nodebalancer used" + break + fi + sleep 5 + done + + # Check old nodebalancer still exists + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $old_nb_resp == "200" ]]; then + echo "old nodebalancer found" + fi + + # cleanup old nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $delete_resp != "200" ]]; then + echo "failed deleting nodebalancer" + fi + + # cleanup new nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $delete_resp != "200" ]]; then + echo "failed deleting nodebalancer" + fi + check: + ($error == null): true + (contains($stdout, 'updated nodebalancer used')): true + (contains($stdout, 'old nodebalancer found')): true diff --git a/e2e/test/lb-preserve-annotation-new-nb-specified/create-pods-services.yaml b/e2e/test/lb-preserve-annotation-new-nb-specified/create-pods-services.yaml new file mode 100644 index 00000000..f0b9bc1c --- /dev/null +++ b/e2e/test/lb-preserve-annotation-new-nb-specified/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: preserve-annotation-new-nb-specified + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: preserve-annotation-new-nb-specified + template: + metadata: + labels: + app: preserve-annotation-new-nb-specified + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-preserve: "true" + labels: + app: preserve-annotation-new-nb-specified +spec: + type: LoadBalancer + selector: + app: preserve-annotation-new-nb-specified + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml b/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml new file mode 100644 index 00000000..2e33d401 --- /dev/null +++ b/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml @@ -0,0 +1,68 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-preserve-annotation-svc-delete +spec: + namespace: "lb-preserve-annotation-svc-delete" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create resources + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Delete pods, delete service and validate nb still exists + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + kubectl --timeout=60s -n $NAMESPACE delete deploy test + kubectl --timeout=60s -n $NAMESPACE delete svc svc-test + sleep 20 + + get_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $get_resp == "200" ]]; then + echo "nodebalancer exists" + fi + + # cleanup remaining nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if ! [[ $delete_resp == "200" ]]; then + echo "failed deleting nodebalancer" + fi + check: + ($error == null): true + (contains($stdout, 'nodebalancer exists')): true diff --git a/e2e/test/lb-preserve-annotation-svc-delete/create-pods-services.yaml b/e2e/test/lb-preserve-annotation-svc-delete/create-pods-services.yaml new file mode 100644 index 00000000..3888da4a --- /dev/null +++ b/e2e/test/lb-preserve-annotation-svc-delete/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: preserve-annotation-svc-delete + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: preserve-annotation-svc-delete + template: + metadata: + labels: + app: preserve-annotation-svc-delete + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-preserve: "true" + labels: + app: preserve-annotation-svc-delete +spec: + type: LoadBalancer + selector: + app: preserve-annotation-svc-delete + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-simple/chainsaw-test.yaml b/e2e/test/lb-simple/chainsaw-test.yaml new file mode 100644 index 00000000..2661961a --- /dev/null +++ b/e2e/test/lb-simple/chainsaw-test.yaml @@ -0,0 +1,84 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-simple +spec: + namespace: "lb-simple" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check both pods reachable + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + podnames=() + + for i in {1..10}; do + if [[ ${#podnames[@]} -lt 2 ]]; then + output=$(curl -s $IP:80 | jq -e .podName || true) + + if [[ "$output" == *"test-"* ]]; then + unique=true + for i in "${array[@]}"; do + if [[ "$i" == "$output" ]]; then + unique=false + break + fi + done + if [[ "$unique" == true ]]; then + podnames+=($output) + fi + fi + else + break + fi + sleep 10 + done + + if [[ ${#podnames[@]} -lt 2 ]]; then + echo "all pods failed to respond" + else + echo "all pods responded" + fi + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true + - name: Delete Pods + try: + - delete: + ref: + apiVersion: v1 + kind: Pod + - name: Delete Service + try: + - delete: + ref: + apiVersion: v1 + kind: Service diff --git a/e2e/test/lb-simple/create-pods-services.yaml b/e2e/test/lb-simple/create-pods-services.yaml new file mode 100644 index 00000000..0f503d9a --- /dev/null +++ b/e2e/test/lb-simple/create-pods-services.yaml @@ -0,0 +1,59 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: lb-simple + name: test +spec: + replicas: 2 + selector: + matchLabels: + app: lb-simple + template: + metadata: + labels: + app: lb-simple + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - simple-lb + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: lb-simple +spec: + type: LoadBalancer + selector: + app: lb-simple + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-single-tls/chainsaw-test.yaml b/e2e/test/lb-single-tls/chainsaw-test.yaml new file mode 100644 index 00000000..a75e4964 --- /dev/null +++ b/e2e/test/lb-single-tls/chainsaw-test.yaml @@ -0,0 +1,92 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-single-tls +spec: + namespace: "lb-single-tls" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create secret + try: + - script: + content: | + set -e + kubectl -n $NAMESPACE create secret tls tls-secret --cert=../certificates/server.crt --key=../certificates/server.key + check: + ($error == null): true + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check if pod is reachable + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + podnames=() + + for i in {1..10}; do + if [[ ${#podnames[@]} -lt 1 ]]; then + output=$(curl --resolve linode.test:80:$IP --cacert ../certificates/ca.crt -s https://linode.test:80 | jq -e .podName || true) + + if [[ "$output" == *"test-"* ]]; then + unique=true + for i in "${array[@]}"; do + if [[ "$i" == "$output" ]]; then + unique=false + break + fi + done + if [[ "$unique" == true ]]; then + podnames+=($output) + fi + fi + else + break + fi + sleep 10 + done + + if [[ ${#podnames[@]} -lt 1 ]]; then + echo "all pods failed to respond" + else + echo "all pods responded" + fi + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true + - name: Delete Pods + try: + - delete: + ref: + apiVersion: v1 + kind: Pod + - name: Delete Service + try: + - delete: + ref: + apiVersion: v1 + kind: Service diff --git a/e2e/test/lb-single-tls/create-pods-services.yaml b/e2e/test/lb-single-tls/create-pods-services.yaml new file mode 100644 index 00000000..d749a6b6 --- /dev/null +++ b/e2e/test/lb-single-tls/create-pods-services.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: lb-single-tls + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: lb-single-tls + template: + metadata: + labels: + app: lb-single-tls + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: https + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https + service.beta.kubernetes.io/linode-loadbalancer-port-80: '{ "tls-secret-name": "tls-secret" }' + labels: + app: lb-single-tls +spec: + type: LoadBalancer + selector: + app: lb-single-tls + ports: + - name: https + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml b/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..f59f14e2 --- /dev/null +++ b/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml @@ -0,0 +1,67 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-tcp-connection-health-check +spec: + namespace: "lb-tcp-connection-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "connection"') + port_80_interval=$(echo $nbconfig | jq '.check_interval == 10') + port_80_timeout=$(echo $nbconfig | jq '.check_timeout == 5') + port_80_attempts=$(echo $nbconfig | jq '.check_attempts == 4') + port_80_protocol=$(echo $nbconfig | jq '.protocol == "tcp"') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_interval == "true" && $port_80_timeout == "true" && $port_80_attempts == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-tcp-connection-health-check/create-pods-services.yaml b/e2e/test/lb-tcp-connection-health-check/create-pods-services.yaml new file mode 100644 index 00000000..0eae0673 --- /dev/null +++ b/e2e/test/lb-tcp-connection-health-check/create-pods-services.yaml @@ -0,0 +1,53 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: tcp-connection-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: tcp-connection-health-check + template: + metadata: + labels: + app: tcp-connection-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-attempts: "4" + service.beta.kubernetes.io/linode-loadbalancer-check-interval: "10" + service.beta.kubernetes.io/linode-loadbalancer-check-timeout: "5" + service.beta.kubernetes.io/linode-loadbalancer-check-type: connection + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: tcp + name: svc-test + labels: + app: tcp-connection-health-check +spec: + type: LoadBalancer + selector: + app: tcp-connection-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml b/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml new file mode 100644 index 00000000..c897979b --- /dev/null +++ b/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml @@ -0,0 +1,69 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-updated-with-nb-id +spec: + namespace: "lb-updated-with-nb-id" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Create nodebalancer, annotate svc with nodebalancer id and validate + try: + - script: + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] for label [$lABEL] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + sleep 5 + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "Condition met" + break + fi + sleep 10 + done + check: + ($error == null): true + (contains($stdout, 'Condition met')): true diff --git a/e2e/test/lb-updated-with-nb-id/create-pods-services.yaml b/e2e/test/lb-updated-with-nb-id/create-pods-services.yaml new file mode 100644 index 00000000..41b75aab --- /dev/null +++ b/e2e/test/lb-updated-with-nb-id/create-pods-services.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: updated-with-nb-id + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: updated-with-nb-id + template: + metadata: + labels: + app: updated-with-nb-id + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: updated-with-nb-id +spec: + type: LoadBalancer + selector: + app: updated-with-nb-id + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-http-to-https/chainsaw-test.yaml b/e2e/test/lb-with-http-to-https/chainsaw-test.yaml new file mode 100644 index 00000000..745b77ad --- /dev/null +++ b/e2e/test/lb-with-http-to-https/chainsaw-test.yaml @@ -0,0 +1,90 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-http-to-https +spec: + namespace: "lb-with-http-to-https" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Create secrets + try: + - script: + content: | + set -e + kubectl -n $NAMESPACE create secret tls tls-secret-1 --cert=../certificates/server.crt --key=../certificates/server.key + check: + ($error == null): true + - name: Update service to have another annotation and port + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-443='{"tls-secret-name": "tls-secret-1", "protocol": "https"}' + kubectl patch svc svc-test -n $NAMESPACE --type='json' -p='[{"op": "add", "path": "/spec/ports/-", "value": {"name": "https", "port": 443, "targetPort": 8080, "protocol": "TCP"}}]' + sleep 10 + check: + ($error == null): true + - name: Check endpoints + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + (length(subsets[0].ports)): 2 + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check if pod reachable on different ports with different protocols + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + for i in {1..10}; do + port_80=$(curl -s $IP:80 | grep "test-" || true) + port_443=$(curl --resolve linode.test:443:$IP --cacert ../certificates/ca.crt -s https://linode.test:443 | grep "test-" || true) + + if [[ -z $port_80 || -z $port_443 ]]; then + sleep 20 + else + echo "all pods responded" + break + fi + done + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true diff --git a/e2e/test/lb-with-http-to-https/create-pods-services.yaml b/e2e/test/lb-with-http-to-https/create-pods-services.yaml new file mode 100644 index 00000000..775db623 --- /dev/null +++ b/e2e/test/lb-with-http-to-https/create-pods-services.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: http-to-https + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: http-to-https + template: + metadata: + labels: + app: http-to-https + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: alpha + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https + service.beta.kubernetes.io/linode-loadbalancer-port-80: '{"protocol": "http"}' + name: svc-test + labels: + app: http-to-https +spec: + type: LoadBalancer + selector: + app: http-to-https + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml b/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml new file mode 100644 index 00000000..da73d113 --- /dev/null +++ b/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml @@ -0,0 +1,84 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-multiple-http-https-ports +spec: + namespace: "lb-with-multiple-http-https-ports" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Create secrets + try: + - script: + content: | + set -e + kubectl -n $NAMESPACE create secret tls tls-secret-1 --cert=../certificates/server.crt --key=../certificates/server.key + kubectl -n $NAMESPACE create secret tls tls-secret-2 --cert=../certificates/server.crt --key=../certificates/server.key + sleep 2 + check: + ($error == null): true + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + (length(subsets[0].ports)): 4 + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check if pod reachable on different ports with different protocols + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + for i in {1..10}; do + port_80=$(curl -s $IP:80 | grep "test-" || true) + port_8080=$(curl -s $IP:8080 | grep "test-" || true) + port_443=$(curl --resolve linode.test:443:$IP --cacert ../certificates/ca.crt -s https://linode.test:443 | grep "test-" || true) + port_8443=$(curl --resolve linode.test:8443:$IP --cacert ../certificates/ca.crt -s https://linode.test:8443 | grep "test-" || true) + + if [[ -z $port_80 || -z $port_8080 || -z $port_443 || -z $port_8443 ]]; then + sleep 15 + else + echo "all pods responded" + break + fi + done + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true diff --git a/e2e/test/lb-with-multiple-http-https-ports/create-pods-services.yaml b/e2e/test/lb-with-multiple-http-https-ports/create-pods-services.yaml new file mode 100644 index 00000000..c29dc014 --- /dev/null +++ b/e2e/test/lb-with-multiple-http-https-ports/create-pods-services.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: multiple-http-https-ports + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: multiple-http-https-ports + template: + metadata: + labels: + app: multiple-http-https-ports + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: alpha + containerPort: 8080 + protocol: TCP + - name: beta + containerPort: 8989 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https + service.beta.kubernetes.io/linode-loadbalancer-port-80: '{"protocol": "http"}' + service.beta.kubernetes.io/linode-loadbalancer-port-443: '{"tls-secret-name": "tls-secret-1"}' + service.beta.kubernetes.io/linode-loadbalancer-port-8080: '{"protocol": "http"}' + service.beta.kubernetes.io/linode-loadbalancer-port-8443: '{"tls-secret-name": "tls-secret-2", "protocol": "https"}' + name: svc-test + labels: + app: multiple-http-https-ports +spec: + type: LoadBalancer + selector: + app: multiple-http-https-ports + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8989 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + - name: https-1 + protocol: TCP + port: 443 + targetPort: 8080 + - name: https-2 + protocol: TCP + port: 8443 + targetPort: 8989 + sessionAffinity: None diff --git a/e2e/test/lb-with-node-addition/chainsaw-test.yaml b/e2e/test/lb-with-node-addition/chainsaw-test.yaml new file mode 100644 index 00000000..62f17873 --- /dev/null +++ b/e2e/test/lb-with-node-addition/chainsaw-test.yaml @@ -0,0 +1,99 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-node-addition +spec: + namespace: "lb-with-node-addition" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create resources + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer has 2 nodes + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[]? | select(.port == 80)') + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber == 2') + + if [[ $port_80_up_nodes == "true" ]]; then + echo "all nodes up" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'all nodes up')): true + - name: Add new node and check nodebalancer gets updated + try: + - script: + content: | + set -e + + current_replicas=$(KUBECONFIG=$MGMT_KUBECONFIG kubectl get machinedeployment ${CLUSTER_NAME}-md-0 -o=jsonpath='{.spec.replicas}') + required_replicas=$((current_replicas + 1)) + KUBECONFIG=$MGMT_KUBECONFIG kubectl patch machinedeployment ${CLUSTER_NAME}-md-0 --type='merge' -p "{\"spec\":{\"replicas\":$required_replicas}}" + + sleep 180 + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[]? | select(.port == 80)' || true) + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber == 3') + + if [[ $port_80_up_nodes == "true" ]]; then + echo "all nodes up" + break + fi + sleep 20 + done + + #KUBECONFIG=$MGMT_KUBECONFIG kubectl patch machinedeployment ${CLUSTER_NAME}-md-0 --type='merge' -p "{\"spec\":{\"replicas\":$current_replicas}}" + check: + ($error == null): true + (contains($stdout, 'all nodes up')): true diff --git a/e2e/test/lb-with-node-addition/create-pods-services.yaml b/e2e/test/lb-with-node-addition/create-pods-services.yaml new file mode 100644 index 00000000..39a55b9d --- /dev/null +++ b/e2e/test/lb-with-node-addition/create-pods-services.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: with-node-addition + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: with-node-addition + template: + metadata: + labels: + app: with-node-addition + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: with-node-addition +spec: + type: LoadBalancer + selector: + app: with-node-addition + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml new file mode 100644 index 00000000..e8e07665 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml @@ -0,0 +1,112 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-default-annotation +spec: + namespace: "lb-with-proxyprotocol-default-annotation" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Add ProxyProtocol v2 using deprecated annotation + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-proxy-protocol=v2 + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 and 8080 have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v2"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_v2 == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true + - name: Add default annotation for ProxyProtocol v1 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol=v1 + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 and 8080 have ProxyProtocol v1 + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + hostname=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].hostname) + ip=$(echo $hostname | awk -F'.' '{gsub("-", ".", $1); print $1}') + nbid=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "X-Filter: {\"ipv4\": \"$ip\"}" \ + "https://api.linode.com/v4/nodebalancers" | jq .data[].id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, doesn't meet regex requirements" + exit 1 + fi + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v1"') + port_8080_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v1"') + + if [[ $port_80_v1 == "true" && $port_8080_v1 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-default-annotation/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-default-annotation/create-pods-services.yaml new file mode 100644 index 00000000..4ac2edc2 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-default-annotation/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-default-annotation + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-default-annotation + template: + metadata: + labels: + app: proxyprotocol-default-annotation + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-default-annotation +spec: + type: LoadBalancer + selector: + app: proxyprotocol-default-annotation + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml new file mode 100644 index 00000000..384fdc4a --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml @@ -0,0 +1,100 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-override +spec: + namespace: "lb-with-proxyprotocol-override" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Annotate service port 80 with v1 and 8080 with v2 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-80='{"proxy-protocol": "v1"}' + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v2"}' + sleep 10 + - name: Check NodeBalancerConfig for port 80 to have ProxyProtocol v1 and port 8080 to have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v1"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_v1 == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true + - name: Update service annotation for port 80 to v2 and 8080 with v1 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol=v2 + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-80- + kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v1"}' + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 to have ProxyProtocol v2 and port 8080 to have ProxyProtocol v1 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v2"') + port_8080_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v1"') + + if [[ $port_80_v2 == "true" && $port_8080_v1 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-override/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-override/create-pods-services.yaml new file mode 100644 index 00000000..a6247c4d --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-override/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-override + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-override + template: + metadata: + labels: + app: proxyprotocol-override + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-override +spec: + type: LoadBalancer + selector: + app: proxyprotocol-override + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml new file mode 100644 index 00000000..61cc3d25 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml @@ -0,0 +1,66 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-port-specific +spec: + namespace: "lb-with-proxyprotocol-port-specific" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Annotate service port 80 with v1 and 8080 with v2 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v2"}' + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 to not have ProxyProtocol and port 8080 to have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_none=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "none"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_none == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-port-specific/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-port-specific/create-pods-services.yaml new file mode 100644 index 00000000..95c0a822 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-port-specific/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-port-specific + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-port-specific + template: + metadata: + labels: + app: proxyprotocol-port-specific + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-port-specific +spec: + type: LoadBalancer + selector: + app: proxyprotocol-port-specific + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml new file mode 100644 index 00000000..c4a43b2d --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml @@ -0,0 +1,77 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-set +spec: + namespace: "lb-with-proxyprotocol-set" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Annotate service port 80 with v1 and 8080 with v2 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-80='{"proxy-protocol": "v1"}' + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v2"}' + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 to have ProxyProtocol v1 and port 8080 to have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v1"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_v1 == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-set/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-set/create-pods-services.yaml new file mode 100644 index 00000000..80b96d86 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-set/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-set + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-set + template: + metadata: + labels: + app: proxyprotocol-set + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-set +spec: + type: LoadBalancer + selector: + app: proxyprotocol-set + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/route-controller-test/chainsaw-test.yaml b/e2e/test/route-controller-test/chainsaw-test.yaml new file mode 100644 index 00000000..236f13df --- /dev/null +++ b/e2e/test/route-controller-test/chainsaw-test.yaml @@ -0,0 +1,65 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: route-controller-test +spec: + bindings: + - name: fwname + value: (join('-', ['ccm-fwtest', env('CLUSTER_NAME')])) + namespace: "route-controller-test" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Check if the route controller updated the config for the linode + try: + - script: + content: | + set -e + + if [ -z "$KUBECONFIG" ] || [ -z "$LINODE_TOKEN" ]; then + echo "Error: KUBECONFIG and LINODE_TOKEN environment variables must be set" + exit 1 + fi + + # Get all node names + nodes=$(kubectl get nodes -o jsonpath='{.items[*].metadata.name}') + if [ -z "$nodes" ]; then + echo "Error: No nodes found in cluster" + exit 1 + fi + + # Process each node + for node in $nodes; do + echo "Checking node: $node" + + # Get pod CIDR and instance ID + pod_cidr=$(kubectl get node "$node" -o jsonpath='{.spec.podCIDR}') + instance_id=$(kubectl get node "$node" -o jsonpath='{.spec.providerID}' | sed 's/linode:\/\///') + + echo " Pod CIDR: $pod_cidr" + echo " Instance ID: $instance_id" + + # Get interface details for this config + interfaces=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances/$instance_id/configs" \ + | jq -r '.data[0].interfaces') + + # Check if pod CIDR is in the VPC interface IP ranges + if echo "$interfaces" | jq -e --arg cidr "$pod_cidr" '.[] | select(.purpose == "vpc") | .ip_ranges[] | select(. == $cidr)' > /dev/null; then + echo "Pod CIDR found in VPC interface configuration" + else + echo "Pod CIDR not found in VPC interface configuration" + echo "Current VPC interface configuration:" + echo "$interfaces" | jq '.[] | select(.purpose == "vpc")' + fi + + echo "---" + done + + check: + ($error == null): true + (contains($stdout, 'Pod CIDR not found in VPC interface configuration')): false diff --git a/e2e/test/scripts/create_cluster.sh b/e2e/test/scripts/create_cluster.sh deleted file mode 100755 index 63f0bbb8..00000000 --- a/e2e/test/scripts/create_cluster.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o pipefail -set -o nounset - -export LINODE_API_TOKEN="$1" -export CLUSTER_NAME="$2" -export IMAGE="$3" -export K8S_VERSION="$4" - -if [[ -z "$5" ]] -then - export REGION="eu-west" -else - export REGION="$5" -fi - -cat > cluster.tf < k8s.io/api v0.29.3 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.3 - k8s.io/apimachinery => k8s.io/apimachinery v0.29.3 - k8s.io/apiserver => k8s.io/apiserver v0.29.3 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.3 - k8s.io/client-go => k8s.io/client-go v0.29.3 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.3 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.3 - k8s.io/code-generator => k8s.io/code-generator v0.29.3 - k8s.io/component-base => k8s.io/component-base v0.29.3 - k8s.io/cri-api => k8s.io/cri-api v0.29.3 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.3 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.3 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.3 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.3 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.3 - k8s.io/kubectl => k8s.io/kubectl v0.29.3 - k8s.io/kubelet => k8s.io/kubelet v0.29.3 - k8s.io/kubernetes => k8s.io/kubernetes v1.21.0 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.3 - k8s.io/metrics => k8s.io/metrics v0.29.3 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.3 -) diff --git a/go.sum b/go.sum index a365ff47..d45e7511 100644 --- a/go.sum +++ b/go.sum @@ -1,63 +1,50 @@ -cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= -github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= -github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= -github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= -github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= -github.com/appscode/go v0.0.0-20200323182826-54e98e09185a h1:cZ80NKoLRaW1PVCWXAJE+YFkBAmLZ8BnrJmH0ClY1Gs= -github.com/appscode/go v0.0.0-20200323182826-54e98e09185a/go.mod h1:lIcm8Z6VPuvcw/a3EeOWcG6R3I13iHMLYbtVP7TKufY= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= +github.com/appscode/go v0.0.0-20201105063637-5613f3b8169f h1:heDuWjdnY2rJIgLwIQjWPgOc0BUWWX6OGOeB+0t8v/s= +github.com/appscode/go v0.0.0-20201105063637-5613f3b8169f/go.mod h1:piHRpQ9+NTTuV3V98INxjU7o2KlAJMznaxvB6wHKkfU= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cilium/checkmate v1.0.3 h1:CQC5eOmlAZeEjPrVZY3ZwEBH64lHlx9mXYdUehEwI5w= -github.com/cilium/checkmate v1.0.3/go.mod h1:KiBTasf39/F2hf2yAmHw21YFl3hcEyP4Yk6filxc12A= -github.com/cilium/cilium v1.15.5 h1:AFhWniiqVyQXYfpaPZTRfKdS0pLx+8lCDPp7JpAZqfo= -github.com/cilium/cilium v1.15.5/go.mod h1:hsruyj1KCncND7AyIlbKgHUlk7V+ONxTn3EbrOu39dI= -github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= -github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= -github.com/cilium/proxy v0.0.0-20231031145409-f19708f3d018 h1:R/QlThqx099hS6req1k2Q87fvLSRgCEicQGate9vxO4= -github.com/cilium/proxy v0.0.0-20231031145409-f19708f3d018/go.mod h1:p044XccCmONGIUbx3bJ7qvHXK0RcrdvIvbTGiu/RjUA= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cilium/cilium v1.16.6 h1:KRQn5knO48ERxB6SusQo02nYmE0NO0qiLlvqhwBTXbI= +github.com/cilium/cilium v1.16.6/go.mod h1:NnDWQiYmPef24+pX2U/V85uL8eUTJSFUUjMEy41lGPA= +github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= +github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= +github.com/cilium/hive v0.0.0-20241213121623-605c1412b9b3 h1:RfmUH1ouzj0LzORYJRhp43e1rlGpx6GNv4NIRUakU2w= +github.com/cilium/hive v0.0.0-20241213121623-605c1412b9b3/go.mod h1:pI2GJ1n3SLKIQVFrKF7W6A6gb6BQkZ+3Hp4PAEo5SuI= +github.com/cilium/proxy v0.0.0-20241216122539-268a44ec93e9 h1:3m0eujK8+y8cKqkQsLSulES72gFayNgcaGXlpwc6bKY= +github.com/cilium/proxy v0.0.0-20241216122539-268a44ec93e9/go.mod h1:1jlssjN+8AsZeex4+7ERavw5vRa/lce/ybVRamfeQSU= +github.com/cilium/statedb v0.2.4 h1:jCyXGcsiXgpJSfpfRRGKd+TD3U1teeDtOnqCyErsHsI= +github.com/cilium/statedb v0.2.4/go.mod h1:KPwsudjhZ90zoBguYMtssKpstR74jVKd/D+73PZy+sg= +github.com/cilium/stream v0.0.0-20240226091623-f979d32855f8 h1:j6VF1s6gz3etRH5ObCr0UUyJblP9cK5fbgkQTz8fTRA= +github.com/cilium/stream v0.0.0-20240226091623-f979d32855f8/go.mod h1:/e83AwqvNKpyg4n3C41qmnmj1x2G9DwzI+jb7GkF4lI= +github.com/cncf/xds/go v0.0.0-20241213214725-57cfbe6fad57 h1:put7Je9ZyxbHtwr7IqGrW4LLVUupJQ2gbsDshKISSgU= +github.com/cncf/xds/go v0.0.0-20241213214725-57cfbe6fad57/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -65,128 +52,96 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= -github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= -github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= -github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= -github.com/getsentry/sentry-go v0.4.0 h1:WqRI2/7EiALbdG9qGB47c0Aks1tdznG5DZd6GSQ1y/8= -github.com/getsentry/sentry-go v0.4.0/go.mod h1:xkGcb82SipKQloDNa5b7hTV4VdEyc2bhwd1/UczP52k= -github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= -github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/getsentry/sentry-go v0.31.1 h1:ELVc0h7gwyhnXHDouXkhqTFSO5oslsRDk0++eyE0KJ4= +github.com/getsentry/sentry-go v0.31.1/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= -github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= -github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= -github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.26.2 h1:elWyB9MacRzvIVgAZCBJmqTi7hBzU0hlKD4IvfX0Zl0= -github.com/go-openapi/runtime v0.26.2/go.mod h1:O034jyRZ557uJKzngbMDJXkcKJVzXJiymdSfgejrcRw= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.11 h1:J/TzFDLTt4Rcl/l1PmyErvkqlJDncGvPTMnCI39I4gY= -github.com/go-openapi/spec v0.20.11/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.9 h1:LnEGOO9qyEC1v22Bzr323M98G13paIUGPU7yeJtG9Xs= -github.com/go-openapi/strfmt v0.21.9/go.mod h1:0k3v301mglEaZRJdDDGSlN6Npq4VMVU69DE0LUyf7uA= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= -github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= -github.com/go-openapi/validate v0.22.3 h1:KxG9mu5HBRYbecRb37KRCihvGGtND2aXziBAv0NNfyI= -github.com/go-openapi/validate v0.22.3/go.mod h1:kVxh31KbfsxU8ZyoHaDbLBWU5CnMdqBUEtadQ2G4d5M= -github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= -github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= +github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E= +github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.22.1 h1:AfVXx3chM2qwoSbM7Da8g8hX8OVSkBFwX+rz2+PcK40= +github.com/google/cel-go v0.22.1/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= @@ -195,119 +150,79 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= -github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= -github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= -github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= -github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= +github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= -github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= -github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= -github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= -github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= -github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/linode/linodego v1.36.2 h1:F5ZIXjLq0Zyyy0Kk9UgD/OffUTBx4TQuCkjMSYbqeVA= -github.com/linode/linodego v1.36.2/go.mod h1:KyV4OO/9/tAxaLSjyjFyOQBcS9bYUdei1hwk3nl0UjI= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/linode/linodego v1.46.0 h1:+uOG4SD2MIrhbrLrvOD5HrbdLN3D19Wgn3MgdUNQjeU= +github.com/linode/linodego v1.46.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk= +github.com/mackerelio/go-osstat v0.2.5 h1:+MqTbZUhoIt4m8qzkVoXUJg1EuifwlAJSk4Yl2GXh+o= +github.com/mackerelio/go-osstat v0.2.5/go.mod h1:atxwWF+POUZcdtR1wnsUcQxTytoHG4uhl2AKKzrOajY= +github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= +github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= -github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= -github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= -github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43 h1:ah1dvbqPMN5+ocrg/ZSgZ6k8bOk+kcZQ7fnyx6UvOm4= +github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -317,357 +232,248 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= -github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU= -github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.18.1 h1:rmuU42rScKWlhhJDyXZRKJQHXFX02chSVW1IvkPGiVM= -github.com/spf13/viper v1.18.1/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= -github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= -github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vishvananda/netlink v1.2.1-beta.2.0.20231127184239-0ced8385386a h1:PdKmLjqKUM8AfjGqDbrF/C56RvuGFDMYB0Z+8TMmGpU= -github.com/vishvananda/netlink v1.2.1-beta.2.0.20231127184239-0ced8385386a/go.mod h1:whJevzBpTrid75eZy99s3DqCmy05NfibNaF2Ol5Ox5A= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81 h1:9fkQcQYvtTr9ayFXuMfDMVuDt4+BYG9FwsGLnrBde0M= +github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= -go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E= -go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= -go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A= -go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= -go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= -go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= -go.etcd.io/etcd/client/v3 v3.5.11 h1:ajWtgoNSZJ1gmS8k+icvPtqsqEav+iUorF7b0qozgUU= -go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE= -go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= -go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= -go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= -go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= -go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= -go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= -go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= -go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.etcd.io/etcd/api/v3 v3.5.17 h1:cQB8eb8bxwuxOilBpMJAEo8fAONyrdXTHUNcMd8yT1w= +go.etcd.io/etcd/api/v3 v3.5.17/go.mod h1:d1hvkRuXkts6PmaYk2Vrgqbv7H4ADfAKhyJqHNLJCB4= +go.etcd.io/etcd/client/pkg/v3 v3.5.17 h1:XxnDXAWq2pnxqx76ljWwiQ9jylbpC4rvkAeRVOUKKVw= +go.etcd.io/etcd/client/pkg/v3 v3.5.17/go.mod h1:4DqK1TKacp/86nJk4FLQqo6Mn2vvQFBmruW3pP14H/w= +go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= +go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= +go.etcd.io/etcd/client/v3 v3.5.17 h1:o48sINNeWz5+pjy/Z0+HKpj/xSnBkuVhVvXkjEXbqZY= +go.etcd.io/etcd/client/v3 v3.5.17/go.mod h1:j2d4eXTHWkT2ClBgnnEPm/Wuu7jsqku41v9DZ3OtjQo= +go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= +go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= +go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= +go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= +go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= +go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= +go.mongodb.org/mongo-driver v1.17.1 h1:Wic5cJIwJgSpBhe3lx3+/RybR5PiYRMpVFgO7cOHyIM= +go.mongodb.org/mongo-driver v1.17.1/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/password-generator v0.2.4/go.mod h1:TvwYYTx9+P1pPwKQKfZgB/wr2Id9MqAQ3B5auY7reNg= gomodules.xyz/version v0.1.0/go.mod h1:Y8xuV02mL/45psyPKG3NCVOwvAOy6T5Kx0l3rCjKSjU= -google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= -google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= +google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI= -k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= -k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= -k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= -k8s.io/cloud-provider v0.29.3 h1:y39hNq0lrPD1qmqQ2ykwMJGeWF9LsepVkR2a4wskwLc= -k8s.io/cloud-provider v0.29.3/go.mod h1:daDV1WkAO6pTrdsn7v8TpN/q9n75ExUC4RJDl7vlPKk= -k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= -k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= -k8s.io/component-helpers v0.29.3 h1:1dqZswuZgT2ZMixYeORyCUOAApXxgsvjVSgfoUT+P4o= -k8s.io/component-helpers v0.29.3/go.mod h1:yiDqbRQrnQY+sPju/bL7EkwDJb6LVOots53uZNMZBos= -k8s.io/controller-manager v0.29.3 h1:pvm3mirypgW7kM6dHRk6O5ANZj4bZTWirfk5gO6RlCo= -k8s.io/controller-manager v0.29.3/go.mod h1:RNxpf0d1WAo59sOLd32isWJP0oZ7Zxr+q4VEEaSq4gk= -k8s.io/klog/v2 v2.120.0 h1:z+q5mfovBj1fKFxiRzsa2DsJLPIVMk/KFL81LMOfK+8= -k8s.io/klog/v2 v2.120.0/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.29.3 h1:ReljsAUhYlm2spdT4yXmY+9a8x8dc/OT4mXvwQPPteQ= -k8s.io/kms v0.29.3/go.mod h1:TBGbJKpRUMk59neTMDMddjIDL+D4HuFUbpuiuzmOPg0= -k8s.io/kube-openapi v0.0.0-20240105020646-a37d4de58910 h1:1Rp/XEKP5uxPs6QrsngEHAxBjaAR78iJRiJq5Fi7LSU= -k8s.io/kube-openapi v0.0.0-20240105020646-a37d4de58910/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= +k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= +k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= +k8s.io/cloud-provider v0.32.1 h1:74rRhnfca3o4CsjjnIp/C3ARVuSmyNsxgWPtH0yc9Z0= +k8s.io/cloud-provider v0.32.1/go.mod h1:GECSanFT+EeZ/ToX3xlasjETzMUI+VFu92zHUDUsGHw= +k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= +k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= +k8s.io/component-helpers v0.32.1 h1:TwdsSM1vW9GjnfX18lkrZbwE5G9psCIS2/rhenTDXd8= +k8s.io/component-helpers v0.32.1/go.mod h1:1JT1Ei3FD29yFQ18F3laj1WyvxYdHIhyxx6adKMFQXI= +k8s.io/controller-manager v0.32.1 h1:z3oQp1O5l0cSzM/MKf8V4olhJ9TmnELoJRPcV/v1s+Y= +k8s.io/controller-manager v0.32.1/go.mod h1:dVA1UZPbqHH4hEhrrnLvQ4d5qVQCklNB8GEzYV59v/4= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kms v0.32.1 h1:TW6cswRI/fawoQRFGWLmEceO37rZXupdoRdmO019jCc= +k8s.io/kms v0.32.1/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1 h1:uOuSLOMBWkJH0TWa9X6l+mj5nZdm6Ay6Bli8HL8rNfk= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/main.go b/main.go index 2ef42d02..593755c8 100644 --- a/main.go +++ b/main.go @@ -85,6 +85,8 @@ func main() { command.Flags().StringVar(&linode.Options.VPCNames, "vpc-names", "", "comma separated vpc names whose routes will be managed by route-controller") command.Flags().StringVar(&linode.Options.LoadBalancerType, "load-balancer-type", "nodebalancer", "configures which type of load-balancing to use for LoadBalancer Services (options: nodebalancer, cilium-bgp)") command.Flags().StringVar(&linode.Options.BGPNodeSelector, "bgp-node-selector", "", "node selector to use to perform shared IP fail-over with BGP (e.g. cilium-bgp-peering=true") + command.Flags().StringVar(&linode.Options.IpHolderSuffix, "ip-holder-suffix", "", "suffix to append to the ip holder name when using shared IP fail-over with BGP (e.g. ip-holder-suffix=my-cluster-name") + command.Flags().StringSliceVar(&linode.Options.NodeBalancerTags, "nodebalancer-tags", []string{}, "Linode tags to apply to all NodeBalancers") // Set static flags command.Flags().VisitAll(func(fl *pflag.Flag) { @@ -112,7 +114,7 @@ func main() { linode.Options.KubeconfigFlag = command.Flags().Lookup("kubeconfig") if linode.Options.KubeconfigFlag == nil { msg := "kubeconfig missing from CCM flag set" - sentry.CaptureError(ctx, fmt.Errorf(msg)) + sentry.CaptureError(ctx, fmt.Errorf("%s", msg)) fmt.Fprintf(os.Stderr, "kubeconfig missing from CCM flag set"+"\n") os.Exit(1) } @@ -121,7 +123,7 @@ func main() { _, network, err := net.ParseCIDR(externalSubnet) if err != nil { msg := fmt.Sprintf("Unable to parse %s as network subnet: %v", externalSubnet, err) - sentry.CaptureError(ctx, fmt.Errorf(msg)) + sentry.CaptureError(ctx, fmt.Errorf("%s", msg)) fmt.Fprintf(os.Stderr, "%v\n", msg) os.Exit(1) } diff --git a/sentry/sentry_test.go b/sentry/sentry_test.go new file mode 100644 index 00000000..b26dc9d7 --- /dev/null +++ b/sentry/sentry_test.go @@ -0,0 +1,195 @@ +package sentry + +import ( + "context" + "testing" + + "github.com/getsentry/sentry-go" + "github.com/stretchr/testify/assert" +) + +func TestInitialize(t *testing.T) { + // Reset the initialized flag before each test + initialized = false + + tests := []struct { + name string + dsn string + environment string + release string + wantErr bool + }{ + { + name: "successful initialization", + dsn: "https://test@sentry.io/123", + environment: "test", + release: "1.0.0", + wantErr: false, + }, + { + name: "empty DSN", + dsn: "", + environment: "test", + release: "1.0.0", + wantErr: true, + }, + { + name: "double initialization", + dsn: "https://test@sentry.io/123", + environment: "test", + release: "1.0.0", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := Initialize(tt.dsn, tt.environment, tt.release) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.True(t, initialized) + } + }) + } +} + +func TestSetHubOnContext(t *testing.T) { + // Reset the initialized flag + initialized = false + _ = Initialize("https://test@sentry.io/123", "test", "1.0.0") + + ctx := context.Background() + newCtx := SetHubOnContext(ctx) + + assert.True(t, sentry.HasHubOnContext(newCtx)) + assert.NotNil(t, sentry.GetHubFromContext(newCtx)) +} + +func TestGetHubFromContext(t *testing.T) { + tests := []struct { + name string + setupFunc func() context.Context + initialized bool + wantNil bool + }{ + { + name: "valid hub in context", + setupFunc: func() context.Context { + ctx := context.Background() + return SetHubOnContext(ctx) + }, + initialized: true, + wantNil: false, + }, + { + name: "no hub in context", + setupFunc: func() context.Context { + return context.Background() + }, + initialized: true, + wantNil: true, + }, + { + name: "sentry not initialized", + setupFunc: func() context.Context { + return context.Background() + }, + initialized: false, + wantNil: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Reset the initialized flag + initialized = false + if tt.initialized { + _ = Initialize("https://test@sentry.io/123", "test", "1.0.0") + } + + ctx := tt.setupFunc() + hub := getHubFromContext(ctx) + + if tt.wantNil { + assert.Nil(t, hub) + } else { + assert.NotNil(t, hub) + } + }) + } +} + +func TestSetTag(t *testing.T) { + // Reset the initialized flag + initialized = false + _ = Initialize("https://test@sentry.io/123", "test", "1.0.0") + + tests := []struct { + name string + setupFunc func() context.Context + key string + value string + }{ + { + name: "set tag with valid hub", + setupFunc: func() context.Context { + return SetHubOnContext(context.Background()) + }, + key: "test-key", + value: "test-value", + }, + { + name: "set tag with no hub", + setupFunc: func() context.Context { + return context.Background() + }, + key: "test-key", + value: "test-value", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := tt.setupFunc() + // This should not panic + SetTag(ctx, tt.key, tt.value) + }) + } +} + +func TestCaptureError(t *testing.T) { + // Reset the initialized flag + initialized = false + _ = Initialize("https://test@sentry.io/123", "test", "1.0.0") + + tests := []struct { + name string + setupFunc func() context.Context + err error + }{ + { + name: "capture error with valid hub", + setupFunc: func() context.Context { + return SetHubOnContext(context.Background()) + }, + err: assert.AnError, + }, + { + name: "capture error with no hub", + setupFunc: func() context.Context { + return context.Background() + }, + err: assert.AnError, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := tt.setupFunc() + // This should not panic + CaptureError(ctx, tt.err) + }) + } +}