From 016724850646930243cb612fcb8f91178ab88793 Mon Sep 17 00:00:00 2001 From: Xiangjing Li <55890329+xiangjingli@users.noreply.github.com> Date: Tue, 19 Dec 2023 11:43:42 -0500 Subject: [PATCH] refactor ansible integration (#376) Signed-off-by: Xiangjing Li --- build/e2e-kc.sh | 34 +- cmd/appsubsummary/exec/manager.go | 3 + cmd/manager/exec/manager.go | 3 + cmd/placementrule/exec/manager.go | 3 + deploy/hub/subscription-report-operator.yaml | 47 +++ go.mod | 7 +- go.sum | 47 +-- pkg/apis/apps/v1/subscription_types.go | 3 + pkg/controller/mcmhub/ansiblejob.go | 347 +++++++++++------- pkg/controller/mcmhub/gitrepo_sync.go | 2 +- pkg/controller/mcmhub/hook.go | 176 +++++++-- pkg/controller/mcmhub/hook_test.go | 99 +++-- pkg/controller/mcmhub/hub.go | 9 +- pkg/controller/mcmhub/hub_git.go | 19 +- pkg/controller/mcmhub/mcmhub_controller.go | 78 ++-- .../mcmhub_controller_propagation_test.go | 42 +-- .../mcmhub/mcmhub_controller_suite_test.go | 111 +++++- .../mcmhub/mcmhub_controller_test.go | 55 +-- pkg/controller/mcmhub/placement.go | 2 +- .../subscription/subscription_controller.go | 6 + .../controller/placementrule/placement.go | 7 +- .../placementrule/placementrule_controller.go | 16 + pkg/utils/subscription.go | 22 +- .../failed/failed-appsub-manifestwork.yaml | 16 +- 24 files changed, 782 insertions(+), 372 deletions(-) create mode 100644 deploy/hub/subscription-report-operator.yaml diff --git a/build/e2e-kc.sh b/build/e2e-kc.sh index 1fbdcfae..85a39e4a 100755 --- a/build/e2e-kc.sh +++ b/build/e2e-kc.sh @@ -190,7 +190,7 @@ echo "STARTING test case 06-ansiblejob-post" kubectl config use-context kind-hub kubectl apply -f hack/test/tower.ansible.com_ansiblejobs_crd.yaml kubectl apply -f test/e2e/cases/06-ansiblejob-post/ -sleep 30 +sleep 70 if kubectl get subscriptions.apps.open-cluster-management.io ansible-hook -o yaml | grep lastposthookjob | grep posthook-test; then echo "06-ansiblejob-post: found ansiblejob CR name in subscription output" @@ -198,6 +198,16 @@ else echo "06-ansiblejob-post: FAILED: ansiblejob CR name is not in the subscription output" exit 1 fi + + +kubectl get pods -n open-cluster-management +kubectl logs -n open-cluster-management -l app=multicluster-operators-hub-subscription + +kubectl get placementdecisions -n default -o yaml +kubectl get appsub -n default ansible-hook -o yaml +kubectl get appsubreport -n default ansible-hook -o yaml +kubectl get appsubreport -n cluster1 cluster1 -o yaml + if kubectl get ansiblejobs.tower.ansible.com | grep posthook-test; then echo "06-ansiblejob-post: found ansiblejobs.tower.ansible.com" else @@ -473,7 +483,7 @@ echo "STARTING test case 17-ansiblejob-pre-workflow" kubectl config use-context kind-hub kubectl apply -f hack/test/tower.ansible.com_ansiblejobs_crd.yaml kubectl apply -f test/e2e/cases/17-ansiblejob-pre-workflow/ -sleep 10 +sleep 40 if kubectl get subscriptions.apps.open-cluster-management.io ansible-hook -o yaml | grep lastprehookjob | grep prehook-workflow-test; then echo "17-ansiblejob-pre-workflow: found ansiblejob CR name in subscription output" @@ -548,18 +558,26 @@ fi echo "19-verify-git-pull-time-metric: patching successful subscription and expeting failed metrics" kubectl config use-context kind-hub kubectl apply -f test/e2e/cases/19-verify-git-pull-time-metric/failed -# with high reconcile rate, the updated appsub is handled every 2 minutes. Wait for over 2 minutes to make sure the updated appsub is handled -sleep 140 +# deliver an appsub with invalid channel info, it is expected to get the failure once the appsub is deployed on the managed cluster +sleep 30 echo "19-verify-git-pull-time-metric: fetching failed managed cluster metrics" kubectl config use-context kind-cluster1 collectedFailedMcMetrics=`kubectl exec -n open-cluster-management-agent-addon deploy/application-manager -- curl http://localhost:8388/metrics` # FAILED metrics test -IFS=' ' read -a failedPullTimeCount <<< $(echo "$collectedFailedMcMetrics" | grep "subscription_name=\"git-pull-time-metric-sub\"" | grep git_failed_pull_time_count) -IFS=' ' read -a failedPullTimeSum <<< $(echo "$collectedFailedMcMetrics" | grep "subscription_name=\"git-pull-time-metric-sub\"" | grep git_failed_pull_time_sum) +IFS=' ' read -a failedPullTimeCount <<< $(echo "$collectedFailedMcMetrics" | grep "subscription_name=\"git-pull-time-metric-sub-failed\"" | grep git_failed_pull_time_count) +echo "============" echo "19-verify-git-pull-time-metric: verifying expected git_failed_pull_time metrics for succesful subscription" -if [ "${failedPullTimeCount[1]}" \> 0 ] && [ "${failedPullTimeSum[1]}" \> 100 ] ; then + +echo "$collectedFailedMcMetrics" | grep "subscription_name=\"git-pull-time-metric-sub-failed\"" | grep git_failed_pull_time_count + +kubectl get appsub -n git-pull-time-metric-test git-pull-time-metric-sub-failed -o yaml + +kubectl get pods -n open-cluster-management-agent-addon -l component=application-manager +kubectl logs -n open-cluster-management-agent-addon -l component=application-manager + +if [ "${failedPullTimeCount[1]}" \> 0 ]; then echo "19-verify-git-pull-time-metric: git_failed_pull_time metrics collected by the managed cluster's metrics service" else echo "19-verify-git-pull-time-metric: FAILED: git_failed_pull_time metrics not collected by the managed cluster's metrics service" @@ -664,7 +682,7 @@ echo "STARTING test case 22-ansiblejob-tags" kubectl config use-context kind-hub kubectl apply -f hack/test/tower.ansible.com_ansiblejobs_crd.yaml kubectl apply -f test/e2e/cases/22-ansiblejob-tags/ -sleep 10 +sleep 40 if kubectl get subscriptions.apps.open-cluster-management.io ansible-hook -o yaml | grep lastposthookjob | grep posthook-tags-test; then echo "22-ansiblejob-tags: found ansiblejob CR name in subscription output" diff --git a/cmd/appsubsummary/exec/manager.go b/cmd/appsubsummary/exec/manager.go index 0fc9e1a2..de907acd 100644 --- a/cmd/appsubsummary/exec/manager.go +++ b/cmd/appsubsummary/exec/manager.go @@ -28,6 +28,7 @@ import ( "open-cluster-management.io/multicloud-operators-subscription/pkg/utils" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager/signals" k8swebhook "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -41,6 +42,8 @@ var ( // RunManager starts the actual manager. func RunManager() { + ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + enableLeaderElection := false if _, err := rest.InClusterConfig(); err == nil { diff --git a/cmd/manager/exec/manager.go b/cmd/manager/exec/manager.go index 383f7772..1a21e49b 100644 --- a/cmd/manager/exec/manager.go +++ b/cmd/manager/exec/manager.go @@ -34,6 +34,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" @@ -67,6 +68,8 @@ const ( ) func RunManager() { + ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + enableLeaderElection := false if _, err := rest.InClusterConfig(); err == nil { diff --git a/cmd/placementrule/exec/manager.go b/cmd/placementrule/exec/manager.go index 15d98df0..3d2ab628 100644 --- a/cmd/placementrule/exec/manager.go +++ b/cmd/placementrule/exec/manager.go @@ -27,6 +27,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager/signals" k8swebhook "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -40,6 +41,8 @@ var ( // RunManager starts the actual manager func RunManager() { + ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + enableLeaderElection := false if _, err := rest.InClusterConfig(); err == nil { diff --git a/deploy/hub/subscription-report-operator.yaml b/deploy/hub/subscription-report-operator.yaml new file mode 100644 index 00000000..df364f6a --- /dev/null +++ b/deploy/hub/subscription-report-operator.yaml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: multicluster-operators-subscription-report + name: multicluster-operators-subscription-report + namespace: open-cluster-management +spec: + replicas: 1 + selector: + matchLabels: + app: multicluster-operators-subscription-report + template: + metadata: + labels: + app: multicluster-operators-subscription-report + spec: + serviceAccountName: multicluster-operators + containers: + - command: + - /usr/local/bin/appsubsummary + env: + - name: WATCH_NAMESPACE + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: DEPLOYMENT_LABEL + value: multicluster-operators-subscription-report + - name: OPERATOR_NAME + value: multicluster-operators-subscription-report + image: quay.io/open-cluster-management/multicloud-operators-subscription:latest + imagePullPolicy: IfNotPresent + name: multicluster-operators-subscription-report + resources: + limits: + cpu: 750m + memory: 2Gi + requests: + cpu: 150m + memory: 128Mi diff --git a/go.mod b/go.mod index db704efd..93cea8bf 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.27.1 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-git/go-git/v5 v5.4.2 - github.com/go-logr/logr v1.2.4 + github.com/go-logr/logr v1.3.0 github.com/google/go-github/v42 v42.0.0 github.com/johannesboyne/gofakes3 v0.0.0-20210819161434-5c8dfcfe5310 github.com/onsi/ginkgo/v2 v2.9.5 @@ -21,7 +21,7 @@ require ( github.com/prometheus/client_golang v1.15.1 github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.4 golang.org/x/crypto v0.5.0 golang.org/x/net v0.10.0 gomodules.xyz/jsonpatch/v3 v3.0.1 @@ -95,6 +95,7 @@ require ( github.com/go-git/go-billy/v5 v5.3.1 // indirect github.com/go-git/go-git-fixtures/v4 v4.3.1 // indirect github.com/go-gorp/gorp/v3 v3.0.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.1 // indirect github.com/go-openapi/swag v0.22.3 // indirect @@ -169,6 +170,8 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.1.0 // indirect go.starlark.net v0.0.0-20220714194419-4cadf0a12139 // indirect + go.uber.org/multierr v1.10.0 // indirect + go.uber.org/zap v1.26.0 // indirect golang.org/x/oauth2 v0.5.0 // indirect golang.org/x/sync v0.2.0 // indirect golang.org/x/sys v0.8.0 // indirect diff --git a/go.sum b/go.sum index 930c6f01..b85c7043 100644 --- a/go.sum +++ b/go.sum @@ -63,11 +63,8 @@ github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpz github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= @@ -129,7 +126,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bshuster-repo/logrus-logstash-hook v1.0.2 h1:JYRWo+QGnQdedgshosug9hxpPYTB9oJ1ZZD3fY31alU= @@ -149,13 +145,10 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/containerd v1.6.15 h1:4wWexxzLNHNE46aIETc6ge4TofO550v+BlLoANrbses= github.com/containerd/containerd v1.6.15/go.mod h1:U2NnBPIhzJDm59xF7xB2MMHnKtggpZ+phKg8o2TKj2c= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -187,8 +180,6 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= @@ -201,7 +192,6 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= @@ -249,12 +239,12 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= @@ -268,7 +258,6 @@ github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= @@ -379,8 +368,6 @@ github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= @@ -428,10 +415,8 @@ github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/johannesboyne/gofakes3 v0.0.0-20210819161434-5c8dfcfe5310 h1:CwSccv4SFVzhShEoWx3W4dyiWHwvLEL3lJbw+YIrQYg= github.com/johannesboyne/gofakes3 v0.0.0-20210819161434-5c8dfcfe5310/go.mod h1:LIAXxPvcUXwOcTIj9LSNSUpE9/eMHalTWxsP/kmWxQI= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -441,7 +426,6 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= @@ -548,11 +532,8 @@ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7P github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= @@ -614,7 +595,6 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rubenv/sql-migrate v1.2.0 h1:fOXMPLMd41sK7Tg75SXDec15k3zg5WNV6SjuDRiNfcU= github.com/rubenv/sql-migrate v1.2.0/go.mod h1:Z5uVnq7vrIrPmHbVFfR4YLHRZquxeHpckCnRq0P/K9Y= -github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -643,7 +623,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= @@ -674,8 +653,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= @@ -688,7 +668,6 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -704,7 +683,6 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9 h1 github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= @@ -720,12 +698,13 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe go.starlark.net v0.0.0-20220714194419-4cadf0a12139 h1:zMemyQYZSyEdPaUFixYICrXf/0Rfnil7+jiQRf5IBZ0= go.starlark.net v0.0.0-20220714194419-4cadf0a12139/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1136,7 +1115,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg= @@ -1144,7 +1122,6 @@ gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzW gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1197,12 +1174,8 @@ k8s.io/kubectl v0.26.0 h1:xmrzoKR9CyNdzxBmXV7jW9Ln8WMrwRK6hGbbf69o4T0= k8s.io/kubectl v0.26.0/go.mod h1:eInP0b+U9XUJWSYeU9XZnTA+cVYuWyl3iYPGtru0qhQ= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -open-cluster-management.io/addon-framework v0.7.1-0.20230801094803-822eeca5fb8a h1:Qzf0wmDUz5bB6WHz3xb7KE7r6tilJp32XL0Tm3Del78= -open-cluster-management.io/addon-framework v0.7.1-0.20230801094803-822eeca5fb8a/go.mod h1:gLGpXkdwAzzV+JB5eQPNHbZFJwp7HsKSSwgqOxGNVCw= open-cluster-management.io/addon-framework v0.7.1-0.20230906065628-5497e73d86ad h1:TKGCLoO8o0myk8x7ONJ8scFk68xWqfPb7AlgHS/65CQ= open-cluster-management.io/addon-framework v0.7.1-0.20230906065628-5497e73d86ad/go.mod h1:8ESgg9EzyUZ2n5/Qgl8E2jnMmnd02YxXn92K5+Egedc= -open-cluster-management.io/api v0.11.1-0.20230727093131-915f5826cff9 h1:P5yjl8w09JYsTE1D6JV6y1vY9X2bBN8m494ZYg9HoyY= -open-cluster-management.io/api v0.11.1-0.20230727093131-915f5826cff9/go.mod h1:WgKUCJ7+Bf40DsOmH1Gdkpyj3joco+QLzrlM6Ak39zE= open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83 h1:3zbT3sT/tEAQbpjIk6uRiTQGknQ3kQlfd11ElVuXyyQ= open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83/go.mod h1:nsQ/G5JpfjQUg7dHpblyywWC6BRqklNaF6fIswVCHyY= open-cluster-management.io/multicloud-operators-channel v0.10.1-0.20230316173315-10f48e51f3aa h1:ptIK8Gt9JakTCV1aKjTgFz4Yy6P+c8KEOdv0hbIJ8A4= diff --git a/pkg/apis/apps/v1/subscription_types.go b/pkg/apis/apps/v1/subscription_types.go index 1d4d478d..2b36faee 100644 --- a/pkg/apis/apps/v1/subscription_types.go +++ b/pkg/apis/apps/v1/subscription_types.go @@ -86,6 +86,8 @@ var ( LabelSubscriptionName = SchemeGroupVersion.Group + "/subscription" // AnnotationHookType defines ansible hook job type - prehook/posthook AnnotationHookType = SchemeGroupVersion.Group + "/hook-type" + // AnnotationHookTemplate defines ansible hook job template namespaced name + AnnotationHookTemplate = SchemeGroupVersion.Group + "/hook-template" // AnnotationBucketPath defines s3 object bucket subfolder path AnnotationBucketPath = SchemeGroupVersion.Group + "/bucket-path" // AnnotationManagedCluster identifies this is a deployable for managed cluster @@ -220,6 +222,7 @@ const ( SubscriptionFailed SubscriptionPhase = "Failed" // SubscriptionPropagationFailed means this subscription is the "parent" sitting in hub SubscriptionPropagationFailed SubscriptionPhase = "PropagationFailed" + PreHookSucessful SubscriptionPhase = "PreHookSucessful" ) // SubscriptionUnitStatus defines status of a unit (subscription or package) diff --git a/pkg/controller/mcmhub/ansiblejob.go b/pkg/controller/mcmhub/ansiblejob.go index 31b8c94c..199c895b 100644 --- a/pkg/controller/mcmhub/ansiblejob.go +++ b/pkg/controller/mcmhub/ansiblejob.go @@ -21,12 +21,14 @@ import ( "encoding/json" "fmt" "reflect" + "sort" "strings" "sync" "github.com/go-logr/logr" kerr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" ansiblejob "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/ansible/v1alpha1" subv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" "open-cluster-management.io/multicloud-operators-subscription/pkg/utils" @@ -51,6 +53,196 @@ type appliedJobs struct { lastAppliedJobs []string } +func findLastAnsibleJob(clt client.Client, subIns *subv1.Subscription, hookType string, jobKey types.NamespacedName) (*ansiblejob.AnsibleJob, error) { + // List all AnsibleJob resources under the appsub NS + ansibleJobList := &ansiblejob.AnsibleJobList{} + + err := clt.List(context.TODO(), ansibleJobList, &client.ListOptions{ + Namespace: subIns.Namespace, + }) + + if err != nil { + klog.Infof("failed to list ansible jobs. Namespace: %v, err: %v", subIns.Namespace, err) + return nil, err + } + + // the list is sorted by CreationTimestamp desc, ansibleJobList.Items[0] is the ansible job applied lastly + sort.Slice(ansibleJobList.Items, func(i, j int) bool { + return ansibleJobList.Items[i].ObjectMeta.CreationTimestamp.Time.After(ansibleJobList.Items[j].ObjectMeta.CreationTimestamp.Time) + }) + + klog.Infof("total prehook/posthook ansible jobs num: %v", len(ansibleJobList.Items)) + + for i := 0; i < len(ansibleJobList.Items); i++ { + hostingAppsub, ok := ansibleJobList.Items[i].Annotations[subv1.AnnotationHosting] + + if !ok { + continue + } + + if hostingAppsub != subIns.Namespace+"/"+subIns.Name { + continue + } + + curHookType, ok := ansibleJobList.Items[i].Annotations[subv1.AnnotationHookType] + + if !ok { + continue + } + + if curHookType != hookType { + continue + } + + hookTpl, ok := ansibleJobList.Items[i].Annotations[subv1.AnnotationHookTemplate] + + if !ok { + continue + } + + if hookTpl != jobKey.String() { + continue + } + + lastAnsibleJob := ansibleJobList.Items[i].DeepCopy() + + klog.Infof("last ansible job: %v/%v, hookType: %v, hookTemplate: %v", lastAnsibleJob.Namespace, lastAnsibleJob.Name, hookType, jobKey.String()) + + return lastAnsibleJob, nil + } + + return nil, nil +} + +func isEqualClusterList(logger logr.Logger, lastAnsibleJob, newAnsibleJob *ansiblejob.AnsibleJob) (bool, error) { + if lastAnsibleJob == nil || lastAnsibleJob.Spec.ExtraVars == nil { + return false, nil + } + + newJobMap := make(map[string]interface{}) + lastJobMap := make(map[string]interface{}) + + err := json.Unmarshal(newAnsibleJob.Spec.ExtraVars, &newJobMap) + if err != nil { + return false, err + } + + err = json.Unmarshal(lastAnsibleJob.Spec.ExtraVars, &lastJobMap) + if err != nil { + return false, err + } + + targetClusters := newJobMap["target_clusters"] + lastJobTargetClusters := lastJobMap["target_clusters"] + + if reflect.DeepEqual(targetClusters, lastJobTargetClusters) { + logger.Info("Both last and new ansible job target cluster list are equal") + return true, nil + } + + logger.Info("Both last and new ansible job target cluster list are NOT equal") + + return false, nil +} + +// register single prehook/posthook ansible job +func (jIns *JobInstances) registryAnsibleJob(clt client.Client, logger logr.Logger, subIns *subv1.Subscription, + jobKey types.NamespacedName, newAnsibleJob *ansiblejob.AnsibleJob, hookType string) { + jobRecords := (*jIns)[jobKey] + + if jobRecords == nil { + klog.Infof("invalid ansible job key: %v", jobKey) + return + } + + // if there is appsub manual sync, rename the new ansible job + syncTimeSuffix := getSyncTimeHash(subIns.GetAnnotations()[subv1.AnnotationManualReconcileTime]) + + // reset the ansible job instance list + jobRecords.Instance = []ansiblejob.AnsibleJob{} + jobRecords.Instance = append(jobRecords.Instance, ansiblejob.AnsibleJob{}) + + // 1. reload the last existing ansibleJob as the last pre/post hook ansible job + lastAnsibleJob, err := findLastAnsibleJob(clt, subIns, hookType, jobKey) + if err != nil { + return + } + + // 2. if there is no last ansible job found, register a new one + if lastAnsibleJob == nil { + klog.Infof("register a new ansible job as there is no existing ansible job found. ansilbe job: %v/%v, hookType: %v, hookTemplate: %v", + newAnsibleJob.Namespace, newAnsibleJob.Name, hookType, jobKey.String()) + + jobRecords.Instance[0] = *newAnsibleJob + + return + } + + // 3. if last ansible job is found and it is not complete yet, register the same last ansible job + if !isJobRunSuccessful(lastAnsibleJob, logger) { + klog.Infof("skip the job registration as the last ansible job is still running. ansilbe job: %v/%v, status: %v, hookType: %v, hookTemplate: %v", + lastAnsibleJob.Namespace, lastAnsibleJob.Name, lastAnsibleJob.Status.AnsibleJobResult.Status, hookType, jobKey.String()) + + jobRecords.Instance[0] = *lastAnsibleJob + + return + } + + // 4. if the new ansible job name remains the same as the last done one, register the same last ansible job + if lastAnsibleJob.Name == newAnsibleJob.Name { + klog.Infof("skip the job registration as the ansible job name remains the same. ansilbe job: %v/%v, status: %v, hookType: %v, hookTemplate: %v", + lastAnsibleJob.Namespace, lastAnsibleJob.Name, lastAnsibleJob.Status.AnsibleJobResult.Status, hookType, jobKey.String()) + + jobRecords.Instance[0] = *lastAnsibleJob + + return + } + + // 5. if there is appsub manual sync, register a new ansible job since the last ansible job is done + if syncTimeSuffix != "" && lastAnsibleJob.Name != newAnsibleJob.Name { + klog.Infof("register a new ansible job as the last ansible job is done and there is a new manual sync."+ + "ansilbe job: %v/%v, status: %v, hookType: %v, hookTemplate: %v", + newAnsibleJob.Namespace, newAnsibleJob.Name, newAnsibleJob.Status.AnsibleJobResult.Status, hookType, jobKey.String()) + + jobRecords.Instance[0] = *newAnsibleJob + + return + } + + equalClusterList, err := isEqualClusterList(logger, lastAnsibleJob, newAnsibleJob) + if err != nil { + klog.Infof("failed to compare cluster list. err: %v", err) + + jobRecords.Instance[0] = *lastAnsibleJob + + return + } + + // 6. if there is change in the cluster decision list, register a new ansible job since the last ansible job is done + if !equalClusterList { + klog.Infof("register a new ansible job as the last ansible job is done and the cluster decision list changed."+ + "ansilbe job: %v/%v, status: %v, hookType: %v, hookTemplate: %v", + newAnsibleJob.Namespace, newAnsibleJob.Name, newAnsibleJob.Status.AnsibleJobResult.Status, hookType, jobKey.String()) + + jobRecords.Instance[0] = *newAnsibleJob + + return + } + + // 7. if there is no change in the cluster decision list, still register the last DONE ansible job + klog.Infof("register the last Done ansible job as there is no change in the cluster list. ansilbe job: %v/%v, status: %v, hookType: %v, hookTemplate: %v", + lastAnsibleJob.Namespace, lastAnsibleJob.Name, lastAnsibleJob.Status.AnsibleJobResult.Status, hookType, jobKey.String()) + + jobRecords.Instance[0] = *lastAnsibleJob + + return +} + +// jIns - the original ansible job templates fetched from the git repo, where +// key : appsub NS + ansilbeJob Name +// jIns[key].Instance: The actual ansible Jobs populated from original ansilbe job template +// jIns[key].InstanceSet: the actual ansible job namespaced name +// jobs - the original prehook/posthook ansible job templates from git repo func (jIns *JobInstances) registryJobs(gClt GitOps, subIns *subv1.Subscription, suffixFunc SuffixFunc, jobs []ansiblejob.AnsibleJob, kubeclient client.Client, logger logr.Logger, placementDecisionUpdated bool, placementRuleRv string, hookType string, @@ -58,19 +250,20 @@ func (jIns *JobInstances) registryJobs(gClt GitOps, subIns *subv1.Subscription, logger.Info(fmt.Sprintf("In registryJobs, placementDecisionUpdated = %v, commitIDChanged = %v", placementDecisionUpdated, commitIDChanged)) for _, job := range jobs { - logger.Info("registering " + job.GetNamespace() + "/" + job.GetName()) - - jobKey := types.NamespacedName{Name: job.GetName(), Namespace: job.GetNamespace()} - ins, err := overrideAnsibleInstance(subIns, job, kubeclient, logger, hookType) if err != nil { return err } + logger.Info("registering " + job.GetNamespace() + "/" + job.GetName()) + + jobKey := types.NamespacedName{Name: job.GetName(), Namespace: job.GetNamespace()} + if _, ok := (*jIns)[jobKey]; !ok { (*jIns)[jobKey] = &Job{ mux: sync.Mutex{}, + Instance: []ansiblejob.AnsibleJob{}, InstanceSet: make(map[types.NamespacedName]struct{}), } } @@ -91,7 +284,7 @@ func (jIns *JobInstances) registryJobs(gClt GitOps, subIns *subv1.Subscription, jobRecords.mux.Lock() jobRecords.Original = ins - if placementDecisionUpdated && len(jobRecords.Instance) != 0 { + if placementDecisionUpdated { plrSuffixFunc := func() string { return fmt.Sprintf("-%v-%v", subIns.GetGeneration(), placementRuleRv) } @@ -101,105 +294,18 @@ func (jIns *JobInstances) registryJobs(gClt GitOps, subIns *subv1.Subscription, logger.Info("placementDecisionUpdated suffix is: " + suffix) } - nx.SetName(fmt.Sprintf("%s%s", nx.GetName(), suffix)) - - // The key name is the job name + suffix. - // The suffix can be commit id or placement rule resource version. - // So the same job can have multiple key names -> multiple jobRecords.InstanceSet[nxKey]. - // Why multiple jobRecords.InstanceSet? - nxKey := types.NamespacedName{Name: nx.GetName(), Namespace: nx.GetNamespace()} - - logger.Info("nxKeyWithCommitHash = " + nxKey.String()) - - _, jobWithCommitHashAlreadyExists := jobRecords.InstanceSet[nxKey] - - jobWithSyncTimeHashAlreadyExists := false - syncTimeSuffix := getSyncTimeHash(subIns.GetAnnotations()[subv1.AnnotationManualReconcileTime]) - - // If ansible job with commit prefix already exists AND manual application sync was triggered - if syncTimeSuffix != "" && jobWithCommitHashAlreadyExists { - jobName := fmt.Sprintf("%s%s", ins.GetName(), fmt.Sprintf("-%v-%v", subIns.GetGeneration(), syncTimeSuffix)) - - nxKeyWithSyncTime := types.NamespacedName{Name: jobName, Namespace: nx.GetNamespace()} - - logger.Info("nxKeyWithSyncTime = " + nxKeyWithSyncTime.String()) - - _, jobWithSyncTimeHashAlreadyExists = jobRecords.InstanceSet[nxKeyWithSyncTime] - - nxKey = nxKeyWithSyncTime - - nx.SetName(fmt.Sprintf("%s%s", ins.GetName(), fmt.Sprintf("-%v-%v", subIns.GetGeneration(), syncTimeSuffix))) + if syncTimeSuffix != "" { + suffix = fmt.Sprintf("-%v-%v", subIns.GetGeneration(), syncTimeSuffix) + logger.Info("manual sync suffix is: " + suffix) } - // jobRecords.InstanceSet[nxKey] is to prevent creating the same ansibleJob CR with the same name. - // jobRecords.Instance is an array of ansibleJob CRs that have been created so far. - if !jobWithCommitHashAlreadyExists || !jobWithSyncTimeHashAlreadyExists { - // If there is no instance set, - logger.Info("there is no jobRecords.InstanceSet for " + nxKey.String()) - - jobRecordsInstancePopulated := len(jobRecords.Instance) > 0 - - if jobRecordsInstancePopulated { - if placementDecisionUpdated { - // If placement decision is updated, then see if the previously run ansible job - // has the same target cluster. If so, skip creating a new ansible job. Otherwise, - // re-create the ansible job since the target clusters are different now. - lastJob := jobRecords.Instance[len(jobRecords.Instance)-1] - - // No need to check this because an ansiblejob will not be created if Spec.ExtraVars == nil. - //if nx.Spec.ExtraVars != nil && lastJob.Spec.ExtraVars != nil { - jobDoneOrRunning := isJobDoneOrRunning(lastJob, logger) - - if jobDoneOrRunning { - jobMap := make(map[string]interface{}) - lastJobMap := make(map[string]interface{}) - - err := json.Unmarshal(nx.Spec.ExtraVars, &jobMap) - if err != nil { - jobRecords.mux.Unlock() - - return err - } - - err = json.Unmarshal(lastJob.Spec.ExtraVars, &lastJobMap) - if err != nil { - jobRecords.mux.Unlock() - - return err - } - - targetClusters := jobMap["target_clusters"] - lastJobTargetClusters := lastJobMap["target_clusters"] - - if reflect.DeepEqual(targetClusters, lastJobTargetClusters) { - logger.Info("Both last and new ansible job target cluster list are equal") - jobRecords.mux.Unlock() - - continue - } - } - } else if commitIDChanged { - // Commit ID has changed. Re-run all pre/post ansible jobs - logger.Info("Skipping duplicated AnsibleJob creation check because commit ID has changed") - } else { - // Commit ID hasn't changed and placement decision hasn't been updated. Don't create ansible job. - logger.Info("Commit ID and placement decision are the same.") - jobRecords.mux.Unlock() - - continue - } - } else { - // If there is no jobRecordsInstance, this is the first time to create this ansiblejob CR. Just create it. - logger.Info("Skipping duplicated AnsibleJob creation check because no job has been created yet") - } - - jobRecords.InstanceSet[nxKey] = struct{}{} + nx.SetName(fmt.Sprintf("%s%s", nx.GetName(), suffix)) - logger.Info(fmt.Sprintf("registered ansiblejob %s", nxKey)) + // The suffix can be commit id or placement rule resource version or manu sync timestamp. + // So the actual ansible job name could be the original anisble job template name with different suffix - jobRecords.Instance = append(jobRecords.Instance, *nx) - } + jIns.registryAnsibleJob(kubeclient, logger, subIns, jobKey, nx, hookType) jobRecords.mux.Unlock() } @@ -233,7 +339,7 @@ func (jIns *JobInstances) applyJobs(clt client.Client, subIns *subv1.Subscriptio return nil } - for k, j := range *jIns { + for _, j := range *jIns { if len(j.Instance) == 0 { continue } @@ -243,8 +349,11 @@ func (jIns *JobInstances) applyJobs(clt client.Client, subIns *subv1.Subscriptio logger.Info("locked") n := len(j.Instance) + if n < 1 { + continue + } - nx := j.Instance[n-1] + nx := j.Instance[0] j.mux.Unlock() logger.Info("released lock") @@ -261,11 +370,13 @@ func (jIns *JobInstances) applyJobs(clt client.Client, subIns *subv1.Subscriptio if err := clt.Create(context.TODO(), &nx); err != nil { if !kerr.IsAlreadyExists(err) { - return fmt.Errorf("failed to apply job %v, err: %v", k.String(), err) + return fmt.Errorf("failed to apply job %v, err: %v", jKey, err) } } logger.Info(fmt.Sprintf("applied ansiblejob %s/%s", nx.GetNamespace(), nx.GetName())) + } else { + logger.Info(fmt.Sprintf("no need to apply existing ansiblejob: %s/%s", nx.GetNamespace(), nx.GetName())) } } @@ -275,9 +386,7 @@ func (jIns *JobInstances) applyJobs(clt client.Client, subIns *subv1.Subscriptio // check the last instance of the ansiblejobs to see if it's applied and // completed or not func (jIns *JobInstances) isJobsCompleted(clt client.Client, logger logr.Logger) (bool, error) { - for k, job := range *jIns { - logger.V(DebugLog).Info(fmt.Sprintf("checking if%v job for completed or not", k.String())) - + for _, job := range *jIns { n := len(job.Instance) if n == 0 { return true, nil @@ -286,6 +395,8 @@ func (jIns *JobInstances) isJobsCompleted(clt client.Client, logger logr.Logger) j := job.Instance[n-1] jKey := types.NamespacedName{Name: j.GetName(), Namespace: j.GetNamespace()} + logger.Info(fmt.Sprintf("checking if %v job for completed or not", jKey.String())) + if ok, err := isJobDone(clt, jKey, logger); err != nil || !ok { return ok, err } @@ -300,39 +411,29 @@ func isJobDone(clt client.Client, key types.NamespacedName, logger logr.Logger) if err := clt.Get(context.TODO(), key, job); err != nil { // it might not be created by the k8s side yet if kerr.IsNotFound(err) { + logger.Info(fmt.Sprintf("ansible job not found, job: %v, err: %v", key.String(), err)) + return false, nil } + logger.Info(fmt.Sprintf("faild to get ansible job, job: %v, err: %v", key.String(), err)) + return false, err } if isJobRunSuccessful(job, logger) { + logger.Info(fmt.Sprintf("ansible job done, job: %v", key.String())) return true, nil } + logger.Info(fmt.Sprintf("ansible job NOT done, job: %v", key.String())) + return false, nil } // Check if last job is running or already done // The last job could have not been created in k8s. e.g. posthook job will be created only after prehook jobs // and main subscription are done. But the posthook jobs have been created in memory ansible job list. -func isJobDoneOrRunning(lastJob ansiblejob.AnsibleJob, logger logr.Logger) bool { - job := &lastJob - - if job == nil { - return false - } - - if isJobRunning(job, logger) { - return true - } - - if isJobRunSuccessful(job, logger) { - return true - } - - return false -} type FormatFunc func(ansiblejob.AnsibleJob) string @@ -368,7 +469,7 @@ func getJobsString(jobs []ansiblejob.AnsibleJob, format FormatFunc) []string { return res } -//merge multiple hook string +// merge multiple hook string func (jIns *JobInstances) outputAppliedJobs(format FormatFunc) appliedJobs { res := appliedJobs{} diff --git a/pkg/controller/mcmhub/gitrepo_sync.go b/pkg/controller/mcmhub/gitrepo_sync.go index 8f654910..8a130dd3 100644 --- a/pkg/controller/mcmhub/gitrepo_sync.go +++ b/pkg/controller/mcmhub/gitrepo_sync.go @@ -153,7 +153,7 @@ func (r *ReconcileSubscription) processRepo(chn *chnv1.Channel, sub *appv1.Subsc chartDirs, kustomizeDirs, crdsAndNamespaceFiles, rbacFiles, otherFiles, err := utils.SortResources(localRepoRoot, subPath) if err != nil { - klog.Error(err, "Failed to sort kubernetes resources and helm charts.") + klog.Error(err, " Failed to sort kubernetes resources and helm charts.") return nil, err } diff --git a/pkg/controller/mcmhub/hook.go b/pkg/controller/mcmhub/hook.go index 86c1e913..19bafbe0 100644 --- a/pkg/controller/mcmhub/hook.go +++ b/pkg/controller/mcmhub/hook.go @@ -18,6 +18,8 @@ import ( "context" "encoding/json" "fmt" + "reflect" + "sort" "strings" "sync" "time" @@ -27,9 +29,13 @@ import ( "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + clusterapi "open-cluster-management.io/api/cluster/v1beta1" ansiblejob "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/ansible/v1alpha1" + placementrulev1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1" subv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" placementutils "open-cluster-management.io/multicloud-operators-subscription/pkg/placementrule/utils" + "open-cluster-management.io/multicloud-operators-subscription/pkg/utils" "k8s.io/client-go/kubernetes/scheme" @@ -91,16 +97,26 @@ type Hooks struct { lastSub *subv1.Subscription } +type AnsibleHooks struct { + gitClt GitOps + clt client.Client + // subscription namespacedName will points to hooks + mtx sync.Mutex + registry map[types.NamespacedName]*Hooks + suffixFunc SuffixFunc + //logger + logger logr.Logger + hookInterval time.Duration +} + func (h *Hooks) ConstructStatus() subv1.AnsibleJobsStatus { st := subv1.AnsibleJobsStatus{} preSt := h.constructPrehookStatus() st.LastPrehookJob = preSt.LastPrehookJob - st.PrehookJobsHistory = preSt.PrehookJobsHistory postSt := h.constructPosthookStatus() st.LastPosthookJob = postSt.LastPosthookJob - st.PosthookJobsHistory = postSt.PosthookJobsHistory return st } @@ -111,7 +127,6 @@ func (h *Hooks) constructPrehookStatus() subv1.AnsibleJobsStatus { if h.preHooks != nil { jobRecords := h.preHooks.outputAppliedJobs(ansiblestatusFormat) st.LastPrehookJob = jobRecords.lastApplied - st.PrehookJobsHistory = jobRecords.lastAppliedJobs } return st @@ -123,24 +138,11 @@ func (h *Hooks) constructPosthookStatus() subv1.AnsibleJobsStatus { if h.postHooks != nil { jobRecords := h.postHooks.outputAppliedJobs(ansiblestatusFormat) st.LastPosthookJob = jobRecords.lastApplied - st.PosthookJobsHistory = jobRecords.lastAppliedJobs } return st } -type AnsibleHooks struct { - gitClt GitOps - clt client.Client - // subscription namespacedName will points to hooks - mtx sync.Mutex - registry map[types.NamespacedName]*Hooks - suffixFunc SuffixFunc - //logger - logger logr.Logger - hookInterval time.Duration -} - // make sure the AnsibleHooks implementate the HookProcessor var _ HookProcessor = &AnsibleHooks{} @@ -245,9 +247,80 @@ func (a *AnsibleHooks) DeregisterSubscription(subKey types.NamespacedName) error return nil } +func (a *AnsibleHooks) IsReadyPlacementDecisionList(appsub *subv1.Subscription) (bool, error) { + // get all clusters from all the placementDecisions resources + placementDecisionclusters, err := GetClustersByPlacement(appsub, a.clt, a.logger) + + if err != nil { + klog.Infof("faile to get clusters from placementDecisions, err:%v", err) + return false, err + } + + if len(placementDecisionclusters) == 0 { + klog.Infof("No clusters found from placementDecisions") + return false, fmt.Errorf("no clusters found. sub: %v/%v", appsub.Namespace, appsub.Name) + } + + clusters1 := []string{} + for _, cl := range placementDecisionclusters { + clusters1 = append(clusters1, cl.Name) + } + + sort.Slice(clusters1, func(i, j int) bool { + return clusters1[i] < clusters1[j] + }) + + pref := appsub.Spec.Placement.PlacementRef + + if pref != nil && pref.Kind == "PlacementRule" { + placementRule := &placementrulev1.PlacementRule{} + prKey := types.NamespacedName{Name: pref.Name, Namespace: appsub.GetNamespace()} + + if err := a.clt.Get(context.TODO(), prKey, placementRule); err != nil { + klog.Infof("failed to get placementRule, err:%v", err) + return false, err + } + + placementRuleStatusClusters := placementRule.Status.Decisions + + clusters2 := []string{} + for _, cl := range placementRuleStatusClusters { + clusters2 = append(clusters2, cl.ClusterName) + } + + sort.Slice(clusters2, func(i, j int) bool { + return clusters2[i] < clusters2[j] + }) + + if reflect.DeepEqual(clusters1, clusters2) { + klog.Infof("placementRule cluster decision list is ready, appsub: %v/%v, placementref: %v", appsub.Namespace, appsub.Name, pref.Name) + return true, nil + } + } + + if pref != nil && pref.Kind == "Placement" { + placement := &clusterapi.Placement{} + pKey := types.NamespacedName{Name: pref.Name, Namespace: appsub.GetNamespace()} + + if err := a.clt.Get(context.TODO(), pKey, placement); err != nil { + klog.Infof("failed to get placement, err:%v", err) + return false, err + } + + if int(placement.Status.NumberOfSelectedClusters) == len(placementDecisionclusters) { + klog.Infof("placement cluster decision list is ready, appsub: %v/%v, placementref: %v", appsub.Namespace, appsub.Name, pref.Name) + return true, nil + } + } + + klog.Infof("placement cluster decision list is NOT ready, appsub: %v/%v", appsub.Namespace, appsub.Name) + + return false, fmt.Errorf("placement cluster decision list is NOT ready, appsub: %v/%v", appsub.Namespace, appsub.Name) +} + func (a *AnsibleHooks) RegisterSubscription(subIns *subv1.Subscription, placementDecisionUpdated bool, placementRuleRv string) error { - a.logger.Info("entry register subscription") - defer a.logger.Info("exit register subscription") + a.logger.Info(fmt.Sprintf("entry register subscription, appsub: %v/%v", subIns.Namespace, subIns.Name)) + defer a.logger.Info(fmt.Sprintf("exit register subscription, appsub: %v/%v", subIns.Namespace, subIns.Name)) chn := &chnv1.Channel{} chnkey := utils.NamespacedNameFormat(subIns.Spec.Channel) @@ -264,14 +337,22 @@ func (a *AnsibleHooks) RegisterSubscription(subIns *subv1.Subscription, placemen } if !a.gitClt.HasHookFolders(subIns) { - a.logger.V(DebugLog).Info(fmt.Sprintf("%s doesn't have hook folder(s), skip", PrintHelper(subIns))) + a.logger.Info(fmt.Sprintf("%s doesn't have hook folder(s) yet, skip", PrintHelper(subIns))) return nil } + + // Skip the ansibleJob hook registration if the placement decision list is not ready + isReadyPlacementDecision, err := a.IsReadyPlacementDecisionList(subIns) + if !isReadyPlacementDecision { + return err + } + //if not forcing a register and the subIns has not being changed compare to the hook registry //then skip hook processing commitIDChanged := a.isSubscriptionUpdate(subIns, a.isSubscriptionSpecChange, a.isDesiredStateChanged) if getCommitID(subIns) != "" && !placementDecisionUpdated && !commitIDChanged { + a.logger.Info(fmt.Sprintf("skip hook registry, commitIDChanged: %v, placementDecisionUpdated: %v ", commitIDChanged, placementDecisionUpdated)) return nil } @@ -341,6 +422,32 @@ func (a *AnsibleHooks) registerHook(subIns *subv1.Subscription, hookFlag string, return err } +func (a *AnsibleHooks) printAllHooks() { + for subkey, hook := range a.registry { + klog.Infof("================") + + klog.Infof("subkey: %v", subkey.String()) + + for prehook, prehookJobs := range *hook.preHooks { + klog.Infof("========") + klog.Infof("prehook Ansible job template: %v", prehook.String()) + + for _, prehookJob := range prehookJobs.Instance { + klog.Infof("prehook Ansible Job instance: %v/%v", prehookJob.Namespace, prehookJob.Name) + } + } + + for posthook, posthookJobs := range *hook.postHooks { + klog.Infof("========") + klog.Infof("posthook Ansible job template: %v", posthook.String()) + + for _, posthookJob := range posthookJobs.Instance { + klog.Infof("posthook Ansible Job instance: %v/%v", posthookJob.Namespace, posthookJob.Name) + } + } + } +} + func getHookPath(subIns *subv1.Subscription) (string, string) { annotations := subIns.GetAnnotations() @@ -358,8 +465,8 @@ func getHookPath(subIns *subv1.Subscription) (string, string) { func (a *AnsibleHooks) addHookToRegisitry(subIns *subv1.Subscription, placementDecisionUpdated bool, placementRuleRv string, commitIDChanged bool) error { - a.logger.V(2).Info("entry addNewHook subscription") - defer a.logger.V(2).Info("exit addNewHook subscription") + a.logger.Info("entry addNewHook subscription") + defer a.logger.Info("exit addNewHook subscription") preHookPath, postHookPath := getHookPath(subIns) @@ -390,6 +497,8 @@ func (a *AnsibleHooks) addHookToRegisitry(subIns *subv1.Subscription, placementD } } + a.printAllHooks() + return nil } @@ -405,6 +514,7 @@ func addingHostingSubscriptionAnno(job ansiblejob.AnsibleJob, subKey types.Names a[subv1.AnnotationHosting] = subKey.String() a[subv1.AnnotationHookType] = hookType + a[subv1.AnnotationHookTemplate] = job.Namespace + "/" + job.Name job.SetAnnotations(a) @@ -627,21 +737,11 @@ func (a *AnsibleHooks) IsPostHooksCompleted(subKey types.NamespacedName) (bool, func isJobRunSuccessful(job *ansiblejob.AnsibleJob, logger logr.Logger) bool { curStatus := job.Status.AnsibleJobResult.Status - logger.Info(fmt.Sprintf("job %s status: %v", job.Status.AnsibleJobResult.URL, curStatus)) - logger.V(1).Info(fmt.Sprintf("job %s status: %v", PrintHelper(job), curStatus)) + logger.Info(fmt.Sprintf("job: %v, job url: %v status: %#v", PrintHelper(job), job.Status.AnsibleJobResult.URL, job.Status.AnsibleJobResult)) return strings.EqualFold(curStatus, JobCompleted) } -func isJobRunning(job *ansiblejob.AnsibleJob, logger logr.Logger) bool { - curStatus := job.Status.AnsibleJobResult.Status - logger.Info(fmt.Sprintf("job %s status: %v", job.Status.AnsibleJobResult.URL, curStatus)) - logger.V(3).Info(fmt.Sprintf("job status: %v", curStatus)) - - return curStatus == "" || curStatus == "pending" || curStatus == "new" || - curStatus == "waiting" || curStatus == "running" -} - // Top priority: placementRef, ignore others // Next priority: clusterNames, ignore selector // Bottomline: Use label selector @@ -673,7 +773,15 @@ func GetClustersByPlacement(instance *subv1.Subscription, kubeclient client.Clie } } - logger.V(10).Info(fmt.Sprintln("clusters", clusters)) + sort.Slice(clusters, func(i, j int) bool { + return clusters[i].Name < clusters[j].Name + }) + + if len(clusters) > 20 { + logger.V(1).Info(fmt.Sprintln("The first 20 clusters: ", clusters[:20])) + } else { + logger.V(1).Info(fmt.Sprintln("clusters: ", clusters)) + } return clusters, nil } @@ -693,7 +801,7 @@ func getClustersFromPlacementRef(instance *subv1.Subscription, kubeclient client return nil, nil } - logger.V(10).Info(fmt.Sprintln("Referencing placement: ", pref, " in ", instance.GetNamespace())) + logger.Info(fmt.Sprintln("Referencing placement: ", pref, " in ", instance.GetNamespace())) ns := instance.GetNamespace() diff --git a/pkg/controller/mcmhub/hook_test.go b/pkg/controller/mcmhub/hook_test.go index a1b27efc..08405e91 100644 --- a/pkg/controller/mcmhub/hook_test.go +++ b/pkg/controller/mcmhub/hook_test.go @@ -31,6 +31,7 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" spokeClusterV1 "open-cluster-management.io/api/cluster/v1" ansiblejob "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/ansible/v1alpha1" plrv1alpha1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1" @@ -70,17 +71,17 @@ metadata: labels: cloud: "Amazon" local-cluster: "true" - name: "test-cluster" - name: test-cluster + name: "cluster-1" + name: cluster-1 spec: hubAcceptsClient: true leaseDurationSeconds: 60` func newHookTest() *hookTest { - testNs := "ansible" + testNs := successfulPlacementRuleKey.Namespace dSubKey := types.NamespacedName{Name: "t-sub", Namespace: testNs} chnKey := types.NamespacedName{Name: "t-chn", Namespace: testNs} - hookSecretRef := corev1.ObjectReference{Name: "hook-secret", Namespace: "test"} + hookSecretRef := corev1.ObjectReference{Name: "hook-secret", Namespace: testNs} preAnsibleKey := types.NamespacedName{Name: "prehook-test", Namespace: testNs} postAnsibleKey := types.NamespacedName{Name: "posthook-test", Namespace: testNs} @@ -108,10 +109,9 @@ func newHookTest() *hookTest { Spec: subv1.SubscriptionSpec{ Channel: chnKey.String(), Placement: &plrv1alpha1.Placement{ - GenericPlacementFields: plrv1alpha1.GenericPlacementFields{ - Clusters: []plrv1alpha1.GenericClusterReference{ - {Name: "test-cluster"}, - }, + PlacementRef: &corev1.ObjectReference{ + Name: successfulPlacementRuleKey.Name, + Kind: "PlacementRule", }, }, }, @@ -148,19 +148,13 @@ var _ = Describe("multiple reconcile signal of the same subscription instance sp subIns := testPath.subIns.DeepCopy() chnIns := testPath.chnIns.DeepCopy() - chnIns.SetNamespace(fmt.Sprintf("%s-reconcile-1", chnIns.GetNamespace())) - chnKey := types.NamespacedName{Name: chnIns.GetName(), Namespace: chnIns.GetNamespace()} - subIns.Spec.Channel = chnKey.String() - - subIns.SetNamespace(fmt.Sprintf("%s-reconcile-1", subIns.GetNamespace())) subIns.Spec.HookSecretRef = testPath.hookSecretRef.DeepCopy() - Expect(k8sClt.Create(ctx, chnIns.DeepCopy())).Should(Succeed()) - + Expect(k8sClt.Create(ctx, chnIns)).Should(Succeed()) Expect(k8sClt.Create(ctx, subIns)).Should(Succeed()) defer func() { - Expect(k8sClt.Delete(ctx, chnIns.DeepCopy())).Should(Succeed()) + Expect(k8sClt.Delete(ctx, chnIns)).Should(Succeed()) Expect(k8sClt.Delete(ctx, subIns)).Should(Succeed()) }() @@ -211,7 +205,7 @@ func forceUpdatePrehook(clt client.Client, preKey types.NamespacedName) func() e var _ = Describe("given a subscription pointing to a git path without hook folders", func() { var ( ctx = context.TODO() - testNs = "normal-sub" + testNs = successfulPlacementRuleKey.Namespace subKey = types.NamespacedName{Name: "t-sub", Namespace: testNs} chnKey = types.NamespacedName{Name: "t-chn", Namespace: testNs} appsubReportKey = types.NamespacedName{Name: subKey.Name, Namespace: testNs} @@ -239,10 +233,9 @@ var _ = Describe("given a subscription pointing to a git path without hook folde Spec: subv1.SubscriptionSpec{ Channel: chnKey.String(), Placement: &plrv1alpha1.Placement{ - GenericPlacementFields: plrv1alpha1.GenericPlacementFields{ - Clusters: []plrv1alpha1.GenericClusterReference{ - {Name: "test-cluster"}, - }, + PlacementRef: &corev1.ObjectReference{ + Name: successfulPlacementRuleKey.Name, + Kind: "PlacementRule", }, }, }, @@ -250,11 +243,26 @@ var _ = Describe("given a subscription pointing to a git path without hook folde ) It("should download the git to local and create app AppsubReport", func() { - Expect(k8sClt.Create(ctx, chnIns.DeepCopy())).Should(Succeed()) + subIns := subIns.DeepCopy() + chnIns := chnIns.DeepCopy() + + Expect(k8sClt.Create(ctx, chnIns)).Should(Succeed()) Expect(k8sClt.Create(ctx, subIns)).Should(Succeed()) + newPlacementRule := &plrv1alpha1.PlacementRule{} + err := k8sClt.Get(ctx, successfulPlacementRuleKey, newPlacementRule) + klog.Infof("newPlacementRule: %#v, err: %v", newPlacementRule, err) + + newChannel := &chnv1.Channel{} + err = k8sClt.Get(ctx, chnKey, newChannel) + klog.Infof("newChannel: %#v, err: %v", newChannel, err) + + newSub := &subv1.Subscription{} + err = k8sClt.Get(ctx, subKey, newSub) + klog.Infof("newSub: %#v, err: %v", newSub, err) + defer func() { - Expect(k8sClt.Delete(ctx, chnIns.DeepCopy())).Should(Succeed()) + Expect(k8sClt.Delete(ctx, chnIns)).Should(Succeed()) Expect(k8sClt.Delete(ctx, subIns)).Should(Succeed()) }() @@ -291,13 +299,7 @@ var _ = Describe("given a subscription pointing to a git path,where pre hook fol subIns := testPath.subIns.DeepCopy() chnIns := testPath.chnIns.DeepCopy() - chnIns.SetNamespace(fmt.Sprintf("%s-pre-0", chnIns.GetNamespace())) - chnKey := types.NamespacedName{Name: chnIns.GetName(), Namespace: chnIns.GetNamespace()} - subIns.Spec.Channel = chnKey.String() - - subIns.SetNamespace(fmt.Sprintf("%s-pre-0", subIns.GetNamespace())) subKey := types.NamespacedName{Name: subIns.GetName(), Namespace: subIns.GetNamespace()} - subIns.Spec.HookSecretRef = testPath.hookSecretRef.DeepCopy() testManagedCluster := &spokeClusterV1.ManagedCluster{} @@ -309,13 +311,13 @@ var _ = Describe("given a subscription pointing to a git path,where pre hook fol Expect(k8sClt.Create(ctx, subIns)).Should(Succeed()) defer func() { - Expect(k8sClt.Delete(ctx, chnIns.DeepCopy())).Should(Succeed()) + Expect(k8sClt.Delete(ctx, chnIns)).Should(Succeed()) Expect(k8sClt.Delete(ctx, subIns)).Should(Succeed()) Expect(k8sClt.Delete(ctx, testManagedCluster)).Should(Succeed()) }() chtestManagedCluster := &spokeClusterV1.ManagedCluster{} - Expect(k8sClt.Get(context.TODO(), types.NamespacedName{Name: "test-cluster"}, chtestManagedCluster)).Should(Succeed()) + Expect(k8sClt.Get(context.TODO(), types.NamespacedName{Name: "cluster-1"}, chtestManagedCluster)).Should(Succeed()) ansibleIns := &ansiblejob.AnsibleJob{} @@ -383,13 +385,7 @@ var _ = Describe("given a subscription pointing to a git path,where pre hook fol subIns := testPath.subIns.DeepCopy() chnIns := testPath.chnIns.DeepCopy() - chnIns.SetNamespace(fmt.Sprintf("%s-pre-1", chnIns.GetNamespace())) - chnKey := types.NamespacedName{Name: chnIns.GetName(), Namespace: chnIns.GetNamespace()} - subIns.Spec.Channel = chnKey.String() - - subIns.SetNamespace(fmt.Sprintf("%s-pre-1", subIns.GetNamespace())) subKey := types.NamespacedName{Name: subIns.GetName(), Namespace: subIns.GetNamespace()} - subIns.Spec.HookSecretRef = testPath.hookSecretRef.DeepCopy() testManagedCluster := &spokeClusterV1.ManagedCluster{} @@ -397,17 +393,17 @@ var _ = Describe("given a subscription pointing to a git path,where pre hook fol Expect(err).NotTo(HaveOccurred()) Expect(k8sClt.Create(ctx, testManagedCluster)).Should(Succeed()) - Expect(k8sClt.Create(ctx, chnIns.DeepCopy())).Should(Succeed()) + Expect(k8sClt.Create(ctx, chnIns)).Should(Succeed()) Expect(k8sClt.Create(ctx, subIns)).Should(Succeed()) defer func() { - Expect(k8sClt.Delete(ctx, chnIns.DeepCopy())).Should(Succeed()) + Expect(k8sClt.Delete(ctx, chnIns)).Should(Succeed()) Expect(k8sClt.Delete(ctx, subIns)).Should(Succeed()) Expect(k8sClt.Delete(ctx, testManagedCluster)).Should(Succeed()) }() chtestManagedCluster := &spokeClusterV1.ManagedCluster{} - Expect(k8sClt.Get(context.TODO(), types.NamespacedName{Name: "test-cluster"}, chtestManagedCluster)).Should(Succeed()) + Expect(k8sClt.Get(context.TODO(), types.NamespacedName{Name: "cluster-1"}, chtestManagedCluster)).Should(Succeed()) ansibleIns := &ansiblejob.AnsibleJob{} @@ -464,13 +460,12 @@ var _ = Describe("given a subscription pointing to a git path,where pre hook fol return err } - if updateSub.Status.AnsibleJobsStatus.LastPrehookJob != foundKey.String() || - len(updateSub.Status.AnsibleJobsStatus.PrehookJobsHistory) == 0 { - + if updateSub.Status.AnsibleJobsStatus.LastPrehookJob != foundKey.String() { u := &ansiblejob.AnsibleJob{} - _ = k8sClt.Get(context.TODO(), foundKey, u) + err = k8sClt.Get(context.TODO(), foundKey, u) - return fmt.Errorf("failed to find the prehook %s in status", foundKey) + return fmt.Errorf("failed to find the prehook in status, subkey: %v, ansibleJob: %v", + subKey, u) } return nil @@ -483,27 +478,21 @@ var _ = Describe("given a subscription pointing to a git path,where pre hook fol subIns := testPath.subIns.DeepCopy() chnIns := testPath.chnIns.DeepCopy() - chnIns.SetNamespace(fmt.Sprintf("%s-pre-2", chnIns.GetNamespace())) - chnKey := types.NamespacedName{Name: chnIns.GetName(), Namespace: chnIns.GetNamespace()} - subIns.Spec.Channel = chnKey.String() - - subIns.SetNamespace(fmt.Sprintf("%s-pre-2", subIns.GetNamespace())) - subKey := types.NamespacedName{Name: subIns.GetName(), Namespace: subIns.GetNamespace()} - - Expect(k8sClt.Create(ctx, chnIns.DeepCopy())).Should(Succeed()) + Expect(k8sClt.Create(ctx, chnIns)).Should(Succeed()) a := subIns.GetAnnotations() a[subv1.AnnotationGitPath] = "git-ops/ansible/resources-nonexit" subIns.SetAnnotations(a) // tells the subscription operator to process the hooks + subKey := types.NamespacedName{Name: subIns.GetName(), Namespace: subIns.GetNamespace()} subIns.Spec.HookSecretRef = testPath.hookSecretRef.DeepCopy() Expect(k8sClt.Create(ctx, subIns)).Should(Succeed()) defer func() { - Expect(k8sClt.Delete(ctx, chnIns.DeepCopy())).Should(Succeed()) - Expect(k8sClt.Delete(ctx, subIns.DeepCopy())).Should(Succeed()) + Expect(k8sClt.Delete(ctx, chnIns)).Should(Succeed()) + Expect(k8sClt.Delete(ctx, subIns)).Should(Succeed()) }() nSub := &subv1.Subscription{} diff --git a/pkg/controller/mcmhub/hub.go b/pkg/controller/mcmhub/hub.go index 6b99e211..f4e78c13 100644 --- a/pkg/controller/mcmhub/hub.go +++ b/pkg/controller/mcmhub/hub.go @@ -85,7 +85,7 @@ func (r *ReconcileSubscription) doMCMHubReconcile(sub *appv1.Subscription) error sub.SetLabels(sublabels) } - klog.Infof("subscription: %v/%v", sub.GetNamespace(), sub.GetName()) + klog.Infof("subscribing subscription: %v/%v", sub.GetNamespace(), sub.GetName()) // Check and add cluster-admin annotation for multi-namepsace application isAdmin := r.AddClusterAdminAnnotation(sub) @@ -117,6 +117,13 @@ func (r *ReconcileSubscription) doMCMHubReconcile(sub *appv1.Subscription) error return err } + // if app resource list is empty, we simply regard the appsub status as successful + if len(resources) == 0 { + klog.Infof("empty app resource list, appsub: %v/%v", sub.Namespace, sub.Name) + + return nil + } + // get all managed clusters clusters, err := r.getClustersByPlacement(sub) diff --git a/pkg/controller/mcmhub/hub_git.go b/pkg/controller/mcmhub/hub_git.go index 3c9f231e..6241ff65 100644 --- a/pkg/controller/mcmhub/hub_git.go +++ b/pkg/controller/mcmhub/hub_git.go @@ -340,6 +340,9 @@ func (h *HubGitOps) ResolveLocalGitFolder(subIns *subv1.Subscription) string { } func (h *HubGitOps) RegisterBranch(subIns *subv1.Subscription) error { + h.logger.Info("entry register branch for appsub " + subIns.Namespace + "/" + subIns.Name) + defer h.logger.Info("exit register branch for appsub " + subIns.Namespace + "/" + subIns.Name) + subKey := types.NamespacedName{Name: subIns.GetName(), Namespace: subIns.GetNamespace()} // This does not pick up new changes to channel configuration @@ -763,5 +766,19 @@ func (h *HubGitOps) GetHooks(subIns *subv1.Subscription, hookPath string) ([]ans return parseFromKutomizedAsAnsibleJobs(sortedRes.kustomized, parseAnsibleJobResoures, h.logger) } - return parseAsAnsibleJobs(sortedRes.kubRes, parseAnsibleJobResoures, h.logger) + ansibleJobs, err := parseAsAnsibleJobs(sortedRes.kubRes, parseAnsibleJobResoures, h.logger) + + if err != nil { + return []ansiblejob.AnsibleJob{}, err + } + + // apply appsub NS to each ansible job + newAnsibleJobs := []ansiblejob.AnsibleJob{} + + for _, ansibleJob := range ansibleJobs { + ansibleJob.Namespace = subIns.Namespace + newAnsibleJobs = append(newAnsibleJobs, ansibleJob) + } + + return newAnsibleJobs, nil } diff --git a/pkg/controller/mcmhub/mcmhub_controller.go b/pkg/controller/mcmhub/mcmhub_controller.go index 69d2b834..2e1f10ca 100644 --- a/pkg/controller/mcmhub/mcmhub_controller.go +++ b/pkg/controller/mcmhub/mcmhub_controller.go @@ -99,14 +99,14 @@ rules: const ( reconcileName = "subscription-hub-reconciler" - defaultHookRequeueInterval = time.Second * 15 + defaultHookRequeueInterval = time.Second * 30 INFOLevel = 1 placementDecisionFlag = "--fired-by-placementdecision" subscriptionActive string = "Active" subscriptionBlock string = "Blocked" ) -var defaulRequeueInterval = time.Second * 3 +var defaulRequeueInterval = time.Second * 15 // Add creates a new Subscription Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. @@ -164,12 +164,8 @@ type subscriptionMapper struct { } func (mapper *subscriptionMapper) Map(ctx context.Context, obj client.Object) []reconcile.Request { - if klog.V(utils.QuiteLogLel).Enabled() { - fnName := utils.GetFnName() - klog.Infof("Entering: %v()", fnName) - - defer klog.Infof("Exiting: %v()", fnName) - } + klog.Info("Entering subscription mapper") + defer klog.Info("Exiting subscription mapper") // rolling target subscription changed, need to update the source subscription var requests []reconcile.Request @@ -221,7 +217,7 @@ func (mapper *subscriptionMapper) Map(ctx context.Context, obj client.Object) [] requests = append(requests, reconcile.Request{NamespacedName: *hdplkey}) } - klog.V(5).Info("Out subscription mapper with requests:", requests) + klog.V(1).Info("Out subscription mapper with requests:", requests) return requests } @@ -231,12 +227,8 @@ type channelMapper struct { } func (mapper *channelMapper) Map(ctx context.Context, obj client.Object) []reconcile.Request { - if klog.V(utils.QuiteLogLel).Enabled() { - fnName := utils.GetFnName() - klog.Infof("Entering: %v()", fnName) - - defer klog.Infof("Exiting: %v()", fnName) - } + klog.Info("Entering channel mapper") + defer klog.Info("Exiting channel mapper") // if channel is created/updated/deleted, its relative subscriptions should be reconciled. @@ -273,12 +265,8 @@ type placementDecisionMapper struct { } func (mapper *placementDecisionMapper) Map(ctx context.Context, obj client.Object) []reconcile.Request { - if klog.V(utils.QuiteLogLel).Enabled() { - fnName := utils.GetFnName() - klog.Infof("Entering: %v()", fnName) - - defer klog.Infof("Exiting: %v()", fnName) - } + klog.Info("Entering placementdecision mapper") + defer klog.Info("Exiting placementdecision mapper") // if placementdecision is created/updated/deleted, its relative subscriptions should be reconciled. @@ -526,11 +514,13 @@ func (r *ReconcileSubscription) Reconcile(ctx context.Context, request reconcile var preErr error + localPlacement := false + instance := &appv1.Subscription{} oins := &appv1.Subscription{} defer func() { - r.finalCommit(passedBranchRegistration, passedPrehook, preErr, oins, instance, request, &result) + r.finalCommit(passedBranchRegistration, passedPrehook, preErr, oins, instance, request, &result, localPlacement) }() err := r.Get(context.TODO(), request.NamespacedName, instance) @@ -564,7 +554,7 @@ func (r *ReconcileSubscription) Reconcile(ctx context.Context, request reconcile // process as hub subscription, generate deployable to propagate pl := instance.Spec.Placement - klog.V(2).Infof("Subscription: %v with placement %#v", request.NamespacedName.String(), pl) + klog.Infof("Subscription: %v with placement %#v", request.NamespacedName.String(), pl) //status changes below show override the prehook status if pl == nil { @@ -658,8 +648,15 @@ func (r *ReconcileSubscription) Reconcile(ctx context.Context, request reconcile WithLabelValues(instance.Namespace, instance.Name). Observe(0) + klog.Infof("prehooks not complete, appsub: %v, err: %v", request.NamespacedName.String(), err) return result, nil } + klog.Infof("prehooks complete, appsub: %v", request.NamespacedName.String()) + + instance.Status.Phase = appv1.PreHookSucessful + instance.Status.Reason = "" + instance.Status.LastUpdateTime = metav1.Now() + instance.Status.Statuses = appv1.SubscriptionClusterStatusMap{} } } @@ -676,6 +673,7 @@ func (r *ReconcileSubscription) Reconcile(ctx context.Context, request reconcile instance.Status.Phase = appv1.SubscriptionPropagationFailed instance.Status.Reason = err.Error() instance.Status.Statuses = nil + preErr = err returnErr = err } else { metrics.PropagationSuccessfulPullTime. @@ -688,6 +686,9 @@ func (r *ReconcileSubscription) Reconcile(ctx context.Context, request reconcile } } else { //local: true and handle change true to false // no longer hub subscription + + localPlacement = true + if !utils.IsHostingAppsub(instance) { klog.Infof("Clean up all the manifestWorks owned by appsub: %v/%v", instance.GetNamespace(), instance.GetName()) @@ -782,6 +783,10 @@ func (r *ReconcileSubscription) IsSubscriptionCompleted(subKey types.NamespacedN return false, nil } + if numInProgress > 0 { + return false, nil + } + return true, nil } @@ -794,9 +799,17 @@ func (r *ReconcileSubscription) IsSubscriptionCompleted(subKey types.NamespacedN // reconciel.Result func (r *ReconcileSubscription) finalCommit(passedBranchRegistration bool, passedPrehook bool, preErr error, oIns, nIns *appv1.Subscription, - request reconcile.Request, res *reconcile.Result) { + request reconcile.Request, res *reconcile.Result, localPlacement bool) { r.logger.Info("Enter finalCommit...") defer r.logger.Info("Exit finalCommit...") + + if localPlacement { + r.logger.Info(fmt.Sprintf("skip finalCommit for local subscription, appsub: %v/%v", nIns.Namespace, nIns.Name)) + + res.RequeueAfter = time.Duration(0) + + return + } // meaning the subscription is deleted if nIns.GetName() == "" || !oIns.GetDeletionTimestamp().IsZero() { r.logger.Info("instace is delete, don't run update logic") @@ -856,7 +869,7 @@ func (r *ReconcileSubscription) finalCommit(passedBranchRegistration bool, passe return } - r.logger.Info(fmt.Sprintf("spec or metadata of %s is updated", PrintHelper(nIns))) + r.logger.Info(fmt.Sprintf("spec or metadata of %s is updated, passedPrehook: %v", PrintHelper(nIns), passedPrehook)) //update status early to make sure the status is ready for post hook to //consume if !passedPrehook { @@ -868,6 +881,9 @@ func (r *ReconcileSubscription) finalCommit(passedBranchRegistration bool, passe nIns.Status = r.hooks.AppendStatusToSubscription(nIns) } + klog.Infof("oIns status reason: %v", oIns.Status.Reason) + klog.Infof("nIns status reason: %v", nIns.Status.Reason) + if utils.IsHubRelatedStatusChanged(oIns.Status.DeepCopy(), nIns.Status.DeepCopy()) { nIns.Status.LastUpdateTime = metav1.Now() @@ -885,12 +901,20 @@ func (r *ReconcileSubscription) finalCommit(passedBranchRegistration bool, passe if res.RequeueAfter == time.Duration(0) { res.RequeueAfter = defaulRequeueInterval - r.logger.Info(fmt.Sprintf("only update status, will retry %s for possible posthook", res.RequeueAfter)) + r.logger.Info(fmt.Sprintf("appsub status updated, will retry %v for possible posthooks. appsub: %v", res.RequeueAfter, PrintHelper(nIns))) } return } + if !passedPrehook { + res.RequeueAfter = r.hookRequeueInterval + + r.logger.Info(fmt.Sprintf("prehooks not complete yet. appsub: %v", PrintHelper(nIns))) + + return + } + //if not post hook, quit the reconcile if !r.hooks.HasHooks(PostHookType, request.NamespacedName) { r.logger.Info("no post hooks, exit the reconcile.") @@ -901,7 +925,9 @@ func (r *ReconcileSubscription) finalCommit(passedBranchRegistration bool, passe //wait till the subscription is propagated f, err := r.IsSubscriptionCompleted(request.NamespacedName) if !f || err != nil { + r.logger.Info(fmt.Sprintf("appsub not complete yet, appsub: %v", request.NamespacedName)) res.RequeueAfter = r.hookRequeueInterval + return } diff --git a/pkg/controller/mcmhub/mcmhub_controller_propagation_test.go b/pkg/controller/mcmhub/mcmhub_controller_propagation_test.go index 236a1efc..96fec43c 100644 --- a/pkg/controller/mcmhub/mcmhub_controller_propagation_test.go +++ b/pkg/controller/mcmhub/mcmhub_controller_propagation_test.go @@ -331,35 +331,11 @@ var _ = Describe("test propagation statuses set by the hub reconciler", func() { Namespace: successfulChannelKey.Namespace, }, Spec: channelV1.ChannelSpec{ - Type: channelV1.ChannelTypeNamespace, - Pathname: "propagation-test-cases", - }, - } - successfulPlacementRuleKey := types.NamespacedName{ - Name: "test-propagation-successful-placement", - Namespace: "propagation-test-cases", - } - successfulPlacementRule := &placementv1.PlacementRule{ - ObjectMeta: metav1.ObjectMeta{ - Name: successfulPlacementRuleKey.Name, - Namespace: successfulPlacementRuleKey.Namespace, - }, - Spec: placementv1.PlacementRuleSpec{ - GenericPlacementFields: placementv1.GenericPlacementFields{ - ClusterSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"name": "cluster-1"}, - }, - }, - }, - Status: placementv1.PlacementRuleStatus{ - Decisions: []placementv1.PlacementDecision{ - { - ClusterName: "cluster-1", - ClusterNamespace: "cluster1-ns", - }, - }, + Type: channelV1.ChannelTypeGit, + Pathname: "https://github.com/open-cluster-management-io/multicloud-operators-subscription", }, } + successfulSubscriptionKey := types.NamespacedName{ Name: "test-propagation-successful-sub", Namespace: "propagation-test-cases", @@ -377,13 +353,17 @@ var _ = Describe("test propagation statuses set by the hub reconciler", func() { "app": successfulSubscriptionKey.Name, "app.kubernetes.io/part-of": successfulSubscriptionKey.Name, }, + Annotations: map[string]string{ + "apps.open-cluster-management.io/github-branch": "main", + "apps.open-cluster-management.io/github-path": "examples/git-simple-sub", + }, }, Spec: appsv1.SubscriptionSpec{ Channel: successfulChannelKey.String(), Placement: &placementv1.Placement{ PlacementRef: &corev1.ObjectReference{ - Name: successfulPlacementRuleKey.Name, - Namespace: successfulPlacementRuleKey.Namespace, + Name: successfulPlacementRuleKey.Name, + Kind: "PlacementRule", }, }, }, @@ -395,9 +375,6 @@ var _ = Describe("test propagation statuses set by the hub reconciler", func() { }, } - Expect(sutPropagationTestClient.Create(context.TODO(), successfulPlacementRule)).NotTo(HaveOccurred()) - defer sutPropagationTestClient.Delete(context.TODO(), successfulPlacementRule) - Expect(sutPropagationTestClient.Create(context.TODO(), successfulChannel)).NotTo(HaveOccurred()) defer sutPropagationTestClient.Delete(context.TODO(), successfulChannel) @@ -414,7 +391,6 @@ var _ = Describe("test propagation statuses set by the hub reconciler", func() { Expect(reconciledSubscription.Status.Phase).To(Equal(appsv1.SubscriptionPropagated)) - Expect(promTestUtils.CollectAndCount(metrics.PropagationFailedPullTime)).To(BeZero()) Expect(promTestUtils.CollectAndCount(metrics.PropagationSuccessfulPullTime)).To(Equal(1)) }) }) diff --git a/pkg/controller/mcmhub/mcmhub_controller_suite_test.go b/pkg/controller/mcmhub/mcmhub_controller_suite_test.go index 87ae4b2e..0ebc15cc 100644 --- a/pkg/controller/mcmhub/mcmhub_controller_suite_test.go +++ b/pkg/controller/mcmhub/mcmhub_controller_suite_test.go @@ -22,9 +22,11 @@ import ( "sync" "testing" - "github.com/onsi/gomega" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" @@ -32,16 +34,26 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" + addonV1alpha1 "open-cluster-management.io/api/addon/v1alpha1" spokeClusterV1 "open-cluster-management.io/api/cluster/v1" + clusterapi "open-cluster-management.io/api/cluster/v1beta1" workV1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/multicloud-operators-subscription/pkg/apis" ansiblejob "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/ansible/v1alpha1" + placementv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1" appSubStatusV1alpha1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1alpha1" ) var cfg *rest.Config var c client.Client +var ( + successfulPlacementRuleKey types.NamespacedName + successfulPlacementDecisionKey types.NamespacedName + successfulPlacementRule *placementv1.PlacementRule + successfulPlacementDecision *clusterapi.PlacementDecision +) + func TestMain(m *testing.M) { t := &envtest.Environment{ CRDDirectoryPaths: []string{ @@ -55,6 +67,7 @@ func TestMain(m *testing.M) { spokeClusterV1.AddToScheme(scheme.Scheme) appSubStatusV1alpha1.AddToScheme(scheme.Scheme) workV1.AddToScheme(scheme.Scheme) + addonV1alpha1.AddToScheme(scheme.Scheme) var err error if cfg, err = t.Start(); err != nil { @@ -123,6 +136,13 @@ func TestMain(m *testing.M) { log.Fatal(err) } + err = c.Create(context.Background(), &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster-1"}, + }) + if err != nil { + log.Fatal(err) + } + err = c.Create(context.Background(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "propagation-test-cases"}, }) @@ -130,6 +150,93 @@ func TestMain(m *testing.M) { log.Fatal(err) } + successfulPlacementRuleKey = types.NamespacedName{ + Name: "test-propagation-successful-placement", + Namespace: "propagation-test-cases", + } + successfulPlacementRule = &placementv1.PlacementRule{ + ObjectMeta: metav1.ObjectMeta{ + Name: successfulPlacementRuleKey.Name, + Namespace: successfulPlacementRuleKey.Namespace, + }, + Spec: placementv1.PlacementRuleSpec{ + GenericPlacementFields: placementv1.GenericPlacementFields{ + ClusterSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "cluster-1"}, + }, + }, + }, + } + + successfulPlacementDecisionKey = types.NamespacedName{ + Name: "test-propagation-successful-placement", + Namespace: "propagation-test-cases", + } + successfulPlacementDecision = &clusterapi.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "cluster.open-cluster-management.io/placementrule": successfulPlacementRuleKey.Name, + }, + Name: successfulPlacementDecisionKey.Name, + Namespace: successfulPlacementDecisionKey.Namespace, + }, + } + + // create placementRule, update placementRule cluster decision + err = c.Create(context.TODO(), successfulPlacementRule) + if err != nil { + log.Fatal(err) + } + + placementRule := &placementv1.PlacementRule{} + err = c.Get(context.TODO(), successfulPlacementRuleKey, placementRule) + + if err != nil { + log.Fatal(err) + } + + placementRule.Status = placementv1.PlacementRuleStatus{ + Decisions: []placementv1.PlacementDecision{ + { + ClusterName: "cluster-1", + ClusterNamespace: "cluster-1", + }, + }, + } + + err = c.Status().Update(context.TODO(), placementRule) + if err != nil { + log.Fatal(err) + } + + // create placementrule's placementDecision, update placementDecision cluster decision + err = c.Create(context.TODO(), successfulPlacementDecision) + if err != nil { + log.Fatal(err) + } + + placementDecision := &clusterapi.PlacementDecision{} + err = c.Get(context.TODO(), successfulPlacementDecisionKey, placementDecision) + + if err != nil { + log.Fatal(err) + } + + placementDecision.Status = clusterapi.PlacementDecisionStatus{ + Decisions: []clusterapi.ClusterDecision{ + { + ClusterName: "cluster-1", + Reason: "", + }, + }, + } + + err = c.Status().Update(context.TODO(), placementDecision) + + if err != nil { + log.Fatal(err) + } + code := m.Run() t.Stop() @@ -151,7 +258,7 @@ func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan } // StartTestManager adds recFn -func StartTestManager(ctx context.Context, mgr manager.Manager, g *gomega.GomegaWithT) *sync.WaitGroup { +func StartTestManager(ctx context.Context, mgr manager.Manager, g *GomegaWithT) *sync.WaitGroup { wg := &sync.WaitGroup{} wg.Add(1) diff --git a/pkg/controller/mcmhub/mcmhub_controller_test.go b/pkg/controller/mcmhub/mcmhub_controller_test.go index 9e5ed339..4c616664 100644 --- a/pkg/controller/mcmhub/mcmhub_controller_test.go +++ b/pkg/controller/mcmhub/mcmhub_controller_test.go @@ -83,15 +83,14 @@ var ( ) var ( - labeltest1subkey = types.NamespacedName{ - Name: "labeltest1sub", - Namespace: "labeltest1namespace", + PlacementRuleKey = types.NamespacedName{ + Name: "test-propagation-successful-placement", + Namespace: "propagation-test-cases", } - labeltest1Namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "labeltest1namespace", - }, + labeltest1subkey = types.NamespacedName{ + Name: "labeltest1sub", + Namespace: PlacementRuleKey.Namespace, } labeltest1Channel = &chnv1alpha1.Channel{ @@ -101,7 +100,7 @@ var ( }, ObjectMeta: metav1.ObjectMeta{ Name: "labeltest1channel", - Namespace: "labeltest1namespace", + Namespace: PlacementRuleKey.Namespace, }, Spec: chnv1alpha1.ChannelSpec{ Type: chnv1alpha1.ChannelTypeNamespace, @@ -115,14 +114,14 @@ var ( }, ObjectMeta: metav1.ObjectMeta{ Name: "labeltest1sub", - Namespace: "labeltest1namespace", + Namespace: PlacementRuleKey.Namespace, }, Spec: appv1alpha1.SubscriptionSpec{ - Channel: "labeltest1namespace/labeltest1channel", + Channel: PlacementRuleKey.Namespace + "/labeltest1channel", Placement: &placement.Placement{ PlacementRef: &corev1.ObjectReference{ - Name: "labeltest1Placement", - Kind: "Placement", + Name: PlacementRuleKey.Name, + Kind: "PlacementRule", }, }, }, @@ -134,13 +133,7 @@ var ( var ( labeltest2subkey = types.NamespacedName{ Name: "labeltest2sub", - Namespace: "labeltest2namespace", - } - - labeltest2Namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "labeltest2namespace", - }, + Namespace: PlacementRuleKey.Namespace, } labeltest2Channel = &chnv1alpha1.Channel{ @@ -150,7 +143,7 @@ var ( }, ObjectMeta: metav1.ObjectMeta{ Name: "labeltest2channel", - Namespace: "labeltest2namespace", + Namespace: PlacementRuleKey.Namespace, }, Spec: chnv1alpha1.ChannelSpec{ Type: chnv1alpha1.ChannelTypeNamespace, @@ -164,14 +157,14 @@ var ( }, ObjectMeta: metav1.ObjectMeta{ Name: "labeltest2sub", - Namespace: "labeltest2namespace", + Namespace: PlacementRuleKey.Namespace, }, Spec: appv1alpha1.SubscriptionSpec{ - Channel: "labeltest2namespace/labeltest2channel", + Channel: PlacementRuleKey.Namespace + "/labeltest2channel", Placement: &placement.Placement{ PlacementRef: &corev1.ObjectReference{ - Name: "labeltest2Placement", - Kind: "Placement", + Name: PlacementRuleKey.Name, + Kind: "PlacementRule", }, }, }, @@ -281,12 +274,8 @@ func TestNewAppLabels(t *testing.T) { mgrStopped.Wait() }() - // Create the app label test namespace. - g.Expect(c.Create(context.TODO(), labeltest1Namespace)).NotTo(gomega.HaveOccurred()) - defer c.Delete(context.TODO(), labeltest1Namespace) - // Create a channel - g.Expect(c.Create(context.TODO(), labeltest1Channel.DeepCopy())).NotTo(gomega.HaveOccurred()) + g.Expect(c.Create(context.TODO(), labeltest1Channel)).NotTo(gomega.HaveOccurred()) defer c.Delete(context.TODO(), labeltest1Channel) // Create a subscription @@ -328,12 +317,8 @@ func TestSyncAppLabels(t *testing.T) { mgrStopped.Wait() }() - // Create the app label test namespace. - g.Expect(c.Create(context.TODO(), labeltest2Namespace.DeepCopy())).NotTo(gomega.HaveOccurred()) - defer c.Delete(context.TODO(), labeltest2Namespace) - // Create a channel - g.Expect(c.Create(context.TODO(), labeltest2Channel.DeepCopy())).NotTo(gomega.HaveOccurred()) + g.Expect(c.Create(context.TODO(), labeltest2Channel)).NotTo(gomega.HaveOccurred()) defer c.Delete(context.TODO(), labeltest2Channel) labels := make(map[string]string) @@ -343,7 +328,7 @@ func TestSyncAppLabels(t *testing.T) { labeltest2Subscription.SetLabels(labels) // Create a subscription - g.Expect(c.Create(context.TODO(), labeltest2Subscription.DeepCopy())).NotTo(gomega.HaveOccurred()) + g.Expect(c.Create(context.TODO(), labeltest2Subscription)).NotTo(gomega.HaveOccurred()) defer c.Delete(context.TODO(), labeltest2Subscription) time.Sleep(time.Second * 2) diff --git a/pkg/controller/mcmhub/placement.go b/pkg/controller/mcmhub/placement.go index 669ebda7..08474fb0 100644 --- a/pkg/controller/mcmhub/placement.go +++ b/pkg/controller/mcmhub/placement.go @@ -84,7 +84,7 @@ func (r *ReconcileSubscription) getClustersByPlacement(instance *appSubV1.Subscr } func getDecisionsFromPlacementRef(pref *corev1.ObjectReference, namespace string, kubeClient client.Client) ([]string, error) { - klog.V(1).Info("Preparing cluster names from ", pref.Name) + klog.Info("Preparing cluster names from ", pref.Name) label := placementRuleLabel diff --git a/pkg/controller/subscription/subscription_controller.go b/pkg/controller/subscription/subscription_controller.go index e2b72801..bda6039f 100644 --- a/pkg/controller/subscription/subscription_controller.go +++ b/pkg/controller/subscription/subscription_controller.go @@ -31,6 +31,7 @@ import ( "k8s.io/klog/v2" chnv1 "open-cluster-management.io/multicloud-operators-channel/pkg/apis/apps/v1" appv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" + "open-cluster-management.io/multicloud-operators-subscription/pkg/metrics" ghsub "open-cluster-management.io/multicloud-operators-subscription/pkg/subscriber/git" hrsub "open-cluster-management.io/multicloud-operators-subscription/pkg/subscriber/helmrepo" ossub "open-cluster-management.io/multicloud-operators-subscription/pkg/subscriber/objectbucket" @@ -248,6 +249,11 @@ func (r *ReconcileSubscription) Reconcile(ctx context.Context, request reconcile instance.Status.Statuses = emptyStatuses klog.Errorf("doReconcile got error %v", reconcileErr) + + // if there is appsub reconcile error on the managed cluster such as channel error, one git_failed_pull_time_count is collected + metrics.GitFailedPullTime. + WithLabelValues(instance.Namespace, instance.Name). + Observe(float64(0)) } // Update AppstatusReference diff --git a/pkg/placementrule/controller/placementrule/placement.go b/pkg/placementrule/controller/placementrule/placement.go index 50008795..4fcb87ba 100644 --- a/pkg/placementrule/controller/placementrule/placement.go +++ b/pkg/placementrule/controller/placementrule/placement.go @@ -234,8 +234,13 @@ func (r *ReconcilePlacementRule) pickClustersByReplicas(instance *appv1alpha1.Pl break } } + + // If no ResourceHints is specified, sort the cluster decision list alphabetically by ClusterName + sort.Slice(newpd, func(i, j int) bool { + return newpd[i].ClusterName < newpd[j].ClusterName + }) } else { - // sort by something + // sort by placementrule spec.ResourceHints for _, cli := range clidx.Clusters { if _, ok := clmap[cli.Name]; !ok { continue diff --git a/pkg/placementrule/controller/placementrule/placementrule_controller.go b/pkg/placementrule/controller/placementrule/placementrule_controller.go index ce5a407c..4cc6a1c5 100644 --- a/pkg/placementrule/controller/placementrule/placementrule_controller.go +++ b/pkg/placementrule/controller/placementrule/placementrule_controller.go @@ -21,6 +21,7 @@ import ( appv1alpha1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1" "open-cluster-management.io/multicloud-operators-subscription/pkg/placementrule/utils" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -161,6 +162,8 @@ func (r *ReconcilePlacementRule) Reconcile(ctx context.Context, request reconcil return reconcile.Result{}, err } + orgDecisions := instance.Status.Decisions + orgclmap := make(map[string]string) for _, cl := range instance.Status.Decisions { orgclmap[cl.ClusterName] = cl.ClusterNamespace @@ -179,6 +182,17 @@ func (r *ReconcilePlacementRule) Reconcile(ctx context.Context, request reconcil updated := false + klog.Infof("orgDecisions: %v", orgDecisions) + klog.Infof("newDecisions: %v", instance.Status.Decisions) + + // The new placement decision list will be sorted. + // When the placementRule controller is restarted, the placement decision list for all placementRules will be sorted + if !equality.Semantic.DeepEqual(orgDecisions, instance.Status.Decisions) { + klog.Infof("original decision list is different from the new decision list") + + updated = true + } + for _, cl := range instance.Status.Decisions { ns, ok := orgclmap[cl.ClusterName] if !ok || ns != cl.ClusterNamespace { @@ -190,6 +204,8 @@ func (r *ReconcilePlacementRule) Reconcile(ctx context.Context, request reconcil } if !updated && len(orgclmap) > 0 { + klog.Infof("original decision map is different from the new decision map") + updated = true } diff --git a/pkg/utils/subscription.go b/pkg/utils/subscription.go index 74879534..f7a50715 100644 --- a/pkg/utils/subscription.go +++ b/pkg/utils/subscription.go @@ -73,10 +73,20 @@ const ( // PlacementDecisionPredicateFunctions filters PlacementDecision status decisions update var PlacementDecisionPredicateFunctions = predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { - newPd := e.ObjectNew.(*clusterapi.PlacementDecision) - oldPd := e.ObjectOld.(*clusterapi.PlacementDecision) + newPd, newOK := e.ObjectNew.(*clusterapi.PlacementDecision) + oldPd, oldOK := e.ObjectOld.(*clusterapi.PlacementDecision) - return !reflect.DeepEqual(newPd.Status.Decisions, oldPd.Status.Decisions) + if !newOK || !oldOK { + klog.Infof("not placementDecision object, skip....") + return false + } + + if !reflect.DeepEqual(newPd.Status.Decisions, oldPd.Status.Decisions) { + klog.Infof("cluster Decision list updated old: %v, new:%v", oldPd.Status.Decisions, newPd.Status.Decisions) + return true + } + + return false }, CreateFunc: func(e event.CreateEvent) bool { return true @@ -179,7 +189,11 @@ func IsSubscriptionBasicChanged(o, n *appv1.Subscription) bool { } // we care label change, pass it down + klog.Infof("fOsub_labels: %v", fOsub.GetLabels()) + klog.Infof("fNSub_labels: %v", fNSub.GetLabels()) + if !reflect.DeepEqual(fOsub.GetLabels(), fNSub.GetLabels()) { + klog.Info("different labels found") return true } @@ -317,7 +331,7 @@ func IsHubRelatedStatusChanged(old, nnew *appv1.SubscriptionStatus) bool { return true } - if old.Phase != nnew.Phase || !isSameMessage(old.Message, nnew.Message) { + if old.Phase != nnew.Phase || old.Reason != nnew.Reason || !isSameMessage(old.Message, nnew.Message) { return true } diff --git a/test/e2e/cases/19-verify-git-pull-time-metric/failed/failed-appsub-manifestwork.yaml b/test/e2e/cases/19-verify-git-pull-time-metric/failed/failed-appsub-manifestwork.yaml index 533e30b4..75728d41 100644 --- a/test/e2e/cases/19-verify-git-pull-time-metric/failed/failed-appsub-manifestwork.yaml +++ b/test/e2e/cases/19-verify-git-pull-time-metric/failed/failed-appsub-manifestwork.yaml @@ -3,8 +3,8 @@ apiVersion: work.open-cluster-management.io/v1 kind: ManifestWork metadata: labels: - apps.open-cluster-management.io/hosting-subscription: git-pull-time-metric-test.git-pull-time-metric-sub - name: git-pull-time-metric-test-git-pull-time-metric-sub + apps.open-cluster-management.io/hosting-subscription: git-pull-time-metric-test.git-pull-time-metric-sub-failed + name: git-pull-time-metric-test-git-pull-time-metric-sub-failed namespace: cluster1 spec: deleteOption: @@ -21,7 +21,7 @@ spec: kind: Namespace metadata: annotations: - apps.open-cluster-management.io/hosting-subscription: git-pull-time-metric-test/git-pull-time-metric-sub + apps.open-cluster-management.io/hosting-subscription: git-pull-time-metric-test/git-pull-time-metric-sub-failed name: git-pull-time-metric-test spec: {} status: {} @@ -31,17 +31,17 @@ spec: annotations: apps.open-cluster-management.io/git-branch: main1 apps.open-cluster-management.io/git-path: examples/git-simple-sub - apps.open-cluster-management.io/hosting-subscription: git-pull-time-metric-test/git-pull-time-metric-sub + apps.open-cluster-management.io/hosting-subscription: git-pull-time-metric-test/git-pull-time-metric-sub-failed apps.open-cluster-management.io/reconcile-rate: high open-cluster-management.io/user-group: "" open-cluster-management.io/user-identity: "" labels: - app: git-pull-time-metric-sub - app.kubernetes.io/part-of: git-pull-time-metric-sub + app: git-pull-time-metric-sub-failed + app.kubernetes.io/part-of: git-pull-time-metric-sub-failed apps.open-cluster-management.io/reconcile-rate: high - name: git-pull-time-metric-sub + name: git-pull-time-metric-sub-failed namespace: git-pull-time-metric-test spec: - channel: git-pull-time-metric-test/gitops + channel: git-pull-time-metric-test/empty-channel placement: local: true