diff --git a/rollout/canary_test.go b/rollout/canary_test.go index 56445f3978..1811f6a4c4 100644 --- a/rollout/canary_test.go +++ b/rollout/canary_test.go @@ -427,7 +427,6 @@ func TestResetCurrentStepIndexOnStepChange(t *testing.T) { newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false) expectedPatch := fmt.Sprintf(expectedPatchWithoutPodHash, expectedCurrentPodHash, expectedCurrentStepHash, newConditions) assert.Equal(t, calculatePatch(r2, expectedPatch), patch) - } func TestResetCurrentStepIndexOnPodSpecChange(t *testing.T) { @@ -469,7 +468,6 @@ func TestResetCurrentStepIndexOnPodSpecChange(t *testing.T) { expectedPatch := fmt.Sprintf(expectedPatchWithoutPodHash, expectedCurrentPodHash, newConditions) assert.Equal(t, calculatePatch(r2, expectedPatch), patch) - } func TestCanaryRolloutCreateFirstReplicasetNoSteps(t *testing.T) { @@ -708,7 +706,6 @@ func TestCanaryRolloutScaleDownOldRsDontScaleDownTooMuch(t *testing.T) { assert.Equal(t, int32(0), *updatedRS1.Spec.Replicas) updatedRS2 := f.getUpdatedReplicaSet(updatedRS2Index) assert.Equal(t, int32(4), *updatedRS2.Spec.Replicas) - } // TestCanaryDontScaleDownOldRsDuringInterruptedUpdate tests when we need to prevent scale down an @@ -1019,9 +1016,8 @@ func TestSyncRolloutWaitAddToQueue(t *testing.T) { c, i, k8sI := f.newController(func() time.Duration { return 30 * time.Minute }) f.runController(key, true, false, c, i, k8sI) - //When the controller starts, it will enqueue the rollout while syncing the informer and during the reconciliation step + // When the controller starts, it will enqueue the rollout while syncing the informer and during the reconciliation step assert.Equal(t, 2, f.enqueuedObjects[key]) - } func TestSyncRolloutIgnoreWaitOutsideOfReconciliationPeriod(t *testing.T) { @@ -1034,7 +1030,7 @@ func TestSyncRolloutIgnoreWaitOutsideOfReconciliationPeriod(t *testing.T) { }, { Pause: &v1alpha1.RolloutPause{ - Duration: v1alpha1.DurationFromInt(3600), //1 hour + Duration: v1alpha1.DurationFromInt(3600), // 1 hour }, }, } @@ -1068,9 +1064,8 @@ func TestSyncRolloutIgnoreWaitOutsideOfReconciliationPeriod(t *testing.T) { key := fmt.Sprintf("%s/%s", r2.Namespace, r2.Name) c, i, k8sI := f.newController(func() time.Duration { return 30 * time.Minute }) f.runController(key, true, false, c, i, k8sI) - //When the controller starts, it will enqueue the rollout so we expect the rollout to enqueue at least once. + // When the controller starts, it will enqueue the rollout so we expect the rollout to enqueue at least once. assert.Equal(t, 1, f.enqueuedObjects[key]) - } func TestSyncRolloutWaitIncrementStepIndex(t *testing.T) { @@ -1084,7 +1079,8 @@ func TestSyncRolloutWaitIncrementStepIndex(t *testing.T) { Pause: &v1alpha1.RolloutPause{ Duration: v1alpha1.DurationFromInt(5), }, - }, { + }, + { Pause: &v1alpha1.RolloutPause{}, }, } @@ -1236,6 +1232,7 @@ func TestCanarySVCSelectors(t *testing.T) { }, }, } + rc := rolloutContext{ log: logutil.WithRollout(rollout), reconcilerBase: reconcilerBase{ @@ -1266,6 +1263,7 @@ func TestCanarySVCSelectors(t *testing.T) { }, }, } + stopchan := make(chan struct{}) defer close(stopchan) informers.Start(stopchan) @@ -1286,6 +1284,124 @@ func TestCanarySVCSelectors(t *testing.T) { } } +func TestCanarySVCSelectorsBasicCanaryAbortServiceSwitchBack(t *testing.T) { + for _, tc := range []struct { + canaryReplicas int32 + canaryAvailReplicas int32 + shouldAbortRollout bool + shouldTargetNewRS bool + }{ + {2, 2, false, true}, // Rollout, canaryService should point at the canary RS + {2, 2, true, false}, // Rollout aborted, canaryService should point at the stable RS + } { + namespace := "namespace" + selectorLabel := "selector-labels-test" + selectorNewRSVal := "new-rs-xxx" + selectorStableRSVal := "stable-rs-xxx" + stableService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stable", + Namespace: namespace, + Annotations: map[string]string{v1alpha1.ManagedByRolloutsKey: selectorLabel}, + Labels: map[string]string{ + v1alpha1.DefaultRolloutUniqueLabelKey: selectorStableRSVal, + }, + }, + } + canaryService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "canary", + Namespace: namespace, + Annotations: map[string]string{v1alpha1.ManagedByRolloutsKey: selectorLabel}, + }, + } + kubeclient := k8sfake.NewSimpleClientset(stableService, canaryService) + informers := k8sinformers.NewSharedInformerFactory(kubeclient, 0) + servicesLister := informers.Core().V1().Services().Lister() + + rollout := &v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: selectorLabel, + Namespace: namespace, + }, + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + StableService: stableService.Name, + CanaryService: canaryService.Name, + }, + }, + }, + } + + pc := pauseContext{ + rollout: rollout, + } + if tc.shouldAbortRollout { + pc.AddAbort("Add Abort") + } + + rc := rolloutContext{ + log: logutil.WithRollout(rollout), + pauseContext: &pc, + reconcilerBase: reconcilerBase{ + servicesLister: servicesLister, + kubeclientset: kubeclient, + recorder: record.NewFakeEventRecorder(), + }, + rollout: rollout, + newRS: &v1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "canary", + Namespace: namespace, + Labels: map[string]string{ + v1alpha1.DefaultRolloutUniqueLabelKey: selectorNewRSVal, + }, + }, + Spec: v1.ReplicaSetSpec{ + Replicas: pointer.Int32Ptr(tc.canaryReplicas), + }, + Status: v1.ReplicaSetStatus{ + AvailableReplicas: tc.canaryAvailReplicas, + }, + }, + stableRS: &v1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stable", + Namespace: namespace, + Labels: map[string]string{ + v1alpha1.DefaultRolloutUniqueLabelKey: selectorStableRSVal, + }, + }, + Spec: v1.ReplicaSetSpec{ + Replicas: pointer.Int32Ptr(tc.canaryReplicas), + }, + Status: v1.ReplicaSetStatus{ + AvailableReplicas: tc.canaryAvailReplicas, + }, + }, + } + + stopchan := make(chan struct{}) + defer close(stopchan) + informers.Start(stopchan) + informers.WaitForCacheSync(stopchan) + err := rc.reconcileStableAndCanaryService() + assert.NoError(t, err, "unable to reconcileStableAndCanaryService") + updatedCanarySVC, err := servicesLister.Services(rc.rollout.Namespace).Get(canaryService.Name) + assert.NoError(t, err, "unable to get updated canary service") + if tc.shouldTargetNewRS { + assert.Equal(t, selectorNewRSVal, updatedCanarySVC.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey], + "canary SVC should have newRS selector label when newRS has %d replicas and %d AvailableReplicas", + tc.canaryReplicas, tc.canaryAvailReplicas) + } else { + assert.Equal(t, selectorStableRSVal, updatedCanarySVC.Spec.Selector[v1alpha1.DefaultRolloutUniqueLabelKey], + "canary SVC should have stableRS selector label when newRS has %d replicas and %d AvailableReplicas", + tc.canaryReplicas, tc.canaryAvailReplicas) + } + } +} + func TestCanaryRolloutWithInvalidCanaryServiceName(t *testing.T) { f := newFixture(t) defer f.Close() diff --git a/rollout/service.go b/rollout/service.go index de63f527b3..f808cb55fc 100644 --- a/rollout/service.go +++ b/rollout/service.go @@ -257,6 +257,15 @@ func (c *rolloutContext) reconcileStableAndCanaryService() error { if err != nil { return err } + + if c.pauseContext != nil && c.pauseContext.IsAborted() && c.rollout.Spec.Strategy.Canary.TrafficRouting == nil { + err = c.ensureSVCTargets(c.rollout.Spec.Strategy.Canary.CanaryService, c.stableRS, true) + if err != nil { + return err + } + return nil + } + err = c.ensureSVCTargets(c.rollout.Spec.Strategy.Canary.CanaryService, c.newRS, true) if err != nil { return err diff --git a/rollout/trafficrouting.go b/rollout/trafficrouting.go index c00eb66e1b..1b694d517a 100644 --- a/rollout/trafficrouting.go +++ b/rollout/trafficrouting.go @@ -143,11 +143,7 @@ func (c *rolloutContext) reconcileTrafficRouting() error { c.newStatus.Canary.Weights = nil return nil } - if reconcilers == nil { - // Not using traffic routing - c.newStatus.Canary.Weights = nil - return nil - } + c.log.Infof("Found %d TrafficRouting Reconcilers", len(reconcilers)) // iterate over the list of trafficReconcilers for _, reconciler := range reconcilers { diff --git a/test/e2e/canary_test.go b/test/e2e/canary_test.go index 00b588c598..fe22175074 100644 --- a/test/e2e/canary_test.go +++ b/test/e2e/canary_test.go @@ -443,7 +443,7 @@ spec: port: 80 periodSeconds: 30 strategy: - canary: + canary: steps: - setWeight: 20 - pause: {} @@ -539,7 +539,24 @@ func (s *CanarySuite) TestCanaryScaleDownOnAbort() { AbortRollout(). WaitForRolloutStatus("Degraded"). Then(). - //Expect that the canary service selector has been moved back to stable + // Expect that the canary service selector has been moved back to stable + ExpectServiceSelector("canary-scaledowndelay-canary", map[string]string{"app": "canary-scaledowndelay", "rollouts-pod-template-hash": "66597877b7"}, false). + When(). + Sleep(3*time.Second). + Then(). + ExpectRevisionPodCount("2", 0) +} + +func (s *CanarySuite) TestCanaryScaleDownOnAbortNoTrafficRouting() { + s.Given(). + HealthyRollout(`@functional/canary-scaledownonabortnotrafficrouting.yaml`). + When(). + UpdateSpec(). // update to revision 2 + WaitForRolloutStatus("Paused"). + AbortRollout(). + WaitForRolloutStatus("Degraded"). + Then(). + // Expect that the canary service selector has been moved back to stable ExpectServiceSelector("canary-scaledowndelay-canary", map[string]string{"app": "canary-scaledowndelay", "rollouts-pod-template-hash": "66597877b7"}, false). When(). Sleep(3*time.Second). @@ -590,7 +607,7 @@ func (s *CanarySuite) TestCanaryDynamicStableScale() { WaitForRevisionPodCount("2", 1). Then(). ExpectRevisionPodCount("1", 4). - //Assert that the canary service selector is still not set to stable rs because of dynamic stable scale still in progress + // Assert that the canary service selector is still not set to stable rs because of dynamic stable scale still in progress Assert(func(t *fixtures.Then) { canarySvc, stableSvc := t.GetServices() assert.NotEqual(s.T(), canarySvc.Spec.Selector["rollouts-pod-template-hash"], stableSvc.Spec.Selector["rollouts-pod-template-hash"]) @@ -599,7 +616,7 @@ func (s *CanarySuite) TestCanaryDynamicStableScale() { MarkPodsReady("1", 1). // mark last remaining stable pod as ready (4/4 stable are ready) WaitForRevisionPodCount("2", 0). Then(). - //Expect that the canary service selector is now set to stable because of dynamic stable scale is over and we have all pods up on stable rs + // Expect that the canary service selector is now set to stable because of dynamic stable scale is over and we have all pods up on stable rs ExpectServiceSelector("dynamic-stable-scale-canary", map[string]string{"app": "dynamic-stable-scale", "rollouts-pod-template-hash": "868d98995b"}, false). ExpectRevisionPodCount("1", 4) } diff --git a/test/e2e/functional/canary-scaledownonabortnotrafficrouting.yaml b/test/e2e/functional/canary-scaledownonabortnotrafficrouting.yaml new file mode 100644 index 0000000000..0c42735db3 --- /dev/null +++ b/test/e2e/functional/canary-scaledownonabortnotrafficrouting.yaml @@ -0,0 +1,91 @@ +apiVersion: v1 +kind: Service +metadata: + name: canary-scaledowndelay-root +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: canary-scaledowndelay +--- +apiVersion: v1 +kind: Service +metadata: + name: canary-scaledowndelay-canary +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: canary-scaledowndelay +--- +apiVersion: v1 +kind: Service +metadata: + name: canary-scaledowndelay-stable +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: canary-scaledowndelay +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: canary-scaledowndelay-ingress + annotations: + kubernetes.io/ingress.class: alb +spec: + rules: + - http: + paths: + - path: /* + pathType: Prefix + backend: + service: + name: canary-scaledowndelay-root + port: + name: use-annotation +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: canary-scaledownd-on-abort +spec: + selector: + matchLabels: + app: canary-scaledowndelay + template: + metadata: + labels: + app: canary-scaledowndelay + spec: + containers: + - name: canary-scaledowndelay + image: nginx:1.19-alpine + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m + strategy: + canary: + canaryService: canary-scaledowndelay-canary + stableService: canary-scaledowndelay-stable + steps: + - setWeight: 50 + - pause: {}