From b33bbd6162c85387de65e98564ef1960c64fe183 Mon Sep 17 00:00:00 2001 From: Claudia Beresford Date: Thu, 1 Dec 2022 15:31:28 +0000 Subject: [PATCH] test: Add some microvmreplicaset_controller tests These are not comprehensive. We should move to use envtest and cover everything properly. --- api/v1alpha1/condition_consts.go | 6 + ...re.liquid-metal.io_microvmreplicasets.yaml | 6 +- controllers/helpers_test.go | 107 +++++++++- controllers/microvmreplicaset_controller.go | 52 ++--- .../microvmreplicaset_controller_test.go | 192 ++++++++++++++++++ 5 files changed, 327 insertions(+), 36 deletions(-) create mode 100644 controllers/microvmreplicaset_controller_test.go diff --git a/api/v1alpha1/condition_consts.go b/api/v1alpha1/condition_consts.go index 8547dc7..18e658d 100644 --- a/api/v1alpha1/condition_consts.go +++ b/api/v1alpha1/condition_consts.go @@ -31,9 +31,15 @@ const ( // MicrovmReplicaSetIncompleteReason indicates the microvmreplicaset does not have all replicas yet. MicrovmReplicaSetIncompleteReason = "MicrovmReplicaSetIncomplete" + // MicrovmReplicaSetProvisionFailedReason indicates that the microvm failed to provision. + MicrovmReplicaSetProvisionFailedReason = "MicrovmReplicaSetProvisionFailed" + // MicrovmReplicaSetDeletingReason indicates the microvmreplicaset is in a deleted state. MicrovmReplicaSetDeletingReason = "MicrovmReplicaSetDeleting" // MicrovmReplicaSetDeletedFailedReason indicates the microvmreplicaset failed to deleted cleanly. MicrovmReplicaSetDeleteFailedReason = "MicrovmReplicaSetDeleteFailed" + + // MicrovmReplicaSetUpdatingReason indicates the microvm is in a pending state. + MicrovmReplicaSetUpdatingReason = "MicrovmReplicaSetUpdating" ) diff --git a/config/crd/bases/infrastructure.liquid-metal.io_microvmreplicasets.yaml b/config/crd/bases/infrastructure.liquid-metal.io_microvmreplicasets.yaml index f036bdc..eb76709 100644 --- a/config/crd/bases/infrastructure.liquid-metal.io_microvmreplicasets.yaml +++ b/config/crd/bases/infrastructure.liquid-metal.io_microvmreplicasets.yaml @@ -346,13 +346,13 @@ spec: description: Ready is true when Replicas is Equal to ReadyReplicas. type: boolean readyReplicas: - description: readyReplicas is the number of pods targeted by this + description: ReadyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition. format: int32 type: integer replicas: - description: 'Replicas is the most recently observed number of replicas. - More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller' + description: Replicas is the most recently observed number of replicas + which have been created. format: int32 type: integer type: object diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index e6db214..9801819 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -34,10 +34,11 @@ import ( ) const ( - testNamespace = "ns1" - testMicrovmName = "mvm1" - testMicrovmUID = "ABCDEF123456" - testBootstrapData = "somesamplebootstrapsdata" + testNamespace = "ns1" + testMicrovmName = "mvm1" + testMicrovmReplicaSetName = "rs1" + testMicrovmUID = "ABCDEF123456" + testBootstrapData = "somesamplebootstrapsdata" ) func asRuntimeObject(microvm *infrav1.Microvm) []runtime.Object { @@ -68,6 +69,34 @@ func reconcileMicrovm(client client.Client, mockAPIClient flclient.Client) (ctrl return mvmController.Reconcile(context.TODO(), request) } +func reconcileMicrovmReplicaSet(client client.Client) (ctrl.Result, error) { + mvmRSController := &controllers.MicrovmReplicaSetReconciler{ + Client: client, + Scheme: client.Scheme(), + } + + request := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: testMicrovmReplicaSetName, + Namespace: testNamespace, + }, + } + + return mvmRSController.Reconcile(context.TODO(), request) +} + +func reconcileMicrovmReplicaSetNTimes(g *WithT, client client.Client, count int32) error { + for count > 0 { + ensureMicrovmState(g, client) + if _, err := reconcileMicrovmReplicaSet(client); err != nil { + return err + } + count-- + } + + return nil +} + func getMicrovm(c client.Client, name, namespace string) (*infrav1.Microvm, error) { key := client.ObjectKey{ Name: name, @@ -79,6 +108,23 @@ func getMicrovm(c client.Client, name, namespace string) (*infrav1.Microvm, erro return mvm, err } +func listMicrovm(c client.Client) (*infrav1.MicrovmList, error) { + mvm := &infrav1.MicrovmList{} + err := c.List(context.TODO(), mvm) + return mvm, err +} + +func getMicrovmReplicaSet(c client.Client, name, namespace string) (*infrav1.MicrovmReplicaSet, error) { + key := client.ObjectKey{ + Name: name, + Namespace: namespace, + } + + mvmRS := &infrav1.MicrovmReplicaSet{} + err := c.Get(context.TODO(), key, mvmRS) + return mvmRS, err +} + func createFakeClient(g *WithT, objects []runtime.Object) client.Client { scheme := runtime.NewScheme() @@ -127,6 +173,27 @@ func createMicrovm() *infrav1.Microvm { } } +func createMicrovmReplicaSet(reps int32) *infrav1.MicrovmReplicaSet { + mvm := createMicrovm() + mvm.Spec.Host = microvm.Host{} + + return &infrav1.MicrovmReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: testMicrovmReplicaSetName, + Namespace: testNamespace, + }, + Spec: infrav1.MicrovmReplicaSetSpec{ + Host: microvm.Host{ + Endpoint: "127.0.0.1:9090", + }, + Replicas: pointer.Int32(reps), + Template: infrav1.MicrovmTemplateSpec{ + Spec: mvm.Spec, + }, + }, + } +} + func withExistingMicrovm(fc *fakes.FakeClient, mvmState flintlocktypes.MicroVMStatus_MicroVMState) { fc.GetMicroVMReturns(&flintlockv1.GetMicroVMResponse{ Microvm: &flintlocktypes.MicroVM{ @@ -185,18 +252,40 @@ func assertMicrovmReconciled(g *WithT, reconciled *infrav1.Microvm) { g.Expect(reconciled.Status.Ready).To(BeTrue(), "The Ready property must be true when the mvm has been reconciled") } +func microvmsCreated(g *WithT, c client.Client) int32 { + mvmList, err := listMicrovm(c) + g.Expect(err).NotTo(HaveOccurred()) + return int32(len(mvmList.Items)) +} + +func ensureMicrovmState(g *WithT, c client.Client) { + // update the microvms so they report as ready to move the replicaset reconciliation along + mvmList, err := listMicrovm(c) + g.Expect(err).NotTo(HaveOccurred()) + + for _, mvm := range mvmList.Items { + mvm.Status.Ready = true + g.Expect(c.Update(context.TODO(), &mvm)).To(Succeed()) + } +} + func assertFinalizer(g *WithT, reconciled *infrav1.Microvm) { g.Expect(reconciled.ObjectMeta.Finalizers).NotTo(BeEmpty(), "Expected at least one finalizer to be set") - g.Expect(hasMicrovmFinalizer(reconciled)).To(BeTrue(), "Expect the mvm finalizer") + g.Expect(hasMicrovmFinalizer(&reconciled.ObjectMeta, infrav1.MvmFinalizer)).To(BeTrue(), "Expect the mvm finalizer") +} + +func assertMRSFinalizer(g *WithT, reconciled *infrav1.MicrovmReplicaSet) { + g.Expect(reconciled.ObjectMeta.Finalizers).NotTo(BeEmpty(), "Expected at least one finalizer to be set") + g.Expect(hasMicrovmFinalizer(&reconciled.ObjectMeta, infrav1.MvmRSFinalizer)).To(BeTrue(), "Expect the mvmrs finalizer") } -func hasMicrovmFinalizer(mvm *infrav1.Microvm) bool { - if len(mvm.ObjectMeta.Finalizers) == 0 { +func hasMicrovmFinalizer(meta *metav1.ObjectMeta, finalizer string) bool { + if len(meta.Finalizers) == 0 { return false } - for _, f := range mvm.ObjectMeta.Finalizers { - if f == infrav1.MvmFinalizer { + for _, f := range meta.Finalizers { + if f == finalizer { return true } } diff --git a/controllers/microvmreplicaset_controller.go b/controllers/microvmreplicaset_controller.go index 92f3f09..e6be0f6 100644 --- a/controllers/microvmreplicaset_controller.go +++ b/controllers/microvmreplicaset_controller.go @@ -123,15 +123,17 @@ func (r *MicrovmReplicaSetReconciler) reconcileDelete( for _, mvm := range mvmList { // if the object is already being deleted, skip this - if mvm.DeletionTimestamp == nil { - // otherwise send a delete call - if err := r.Delete(ctx, &mvm); err != nil { - mvmReplicaSetScope.Error(err, "failed deleting microvm") - mvmReplicaSetScope.SetNotReady(infrav1.MicrovmReplicaSetDeleteFailedReason, "Error", "") + if !mvm.DeletionTimestamp.IsZero() { + continue + } - return ctrl.Result{}, err + // otherwise send a delete call + go func(m infrav1.Microvm) { + if err := r.Delete(ctx, &m); err != nil { + mvmReplicaSetScope.Error(err, "failed deleting microvm", "microvm", m.Name) + mvmReplicaSetScope.SetNotReady(infrav1.MicrovmReplicaSetDeleteFailedReason, "Error", "") } - } + }(mvm) } // reset the number of created replicas. @@ -184,13 +186,6 @@ func (r *MicrovmReplicaSetReconciler) reconcileNormal( mvmReplicaSetScope.SetReady() return reconcile.Result{}, nil - // if all desired microvms have been created, but are not quite ready yet, - // set the condition and requeue - case mvmReplicaSetScope.CreatedReplicas() == mvmReplicaSetScope.DesiredReplicas(): - mvmReplicaSetScope.Info("MicrovmReplicaSet creating: waiting for microvms to become ready") - mvmReplicaSetScope.SetNotReady(infrav1.MicrovmReplicaSetIncompleteReason, "Info", "") - - return ctrl.Result{RequeueAfter: requeuePeriod}, nil // if we are in this branch then not all desired microvms have been created. // create a new one and set the ownerref to this controller. case mvmReplicaSetScope.CreatedReplicas() < mvmReplicaSetScope.DesiredReplicas(): @@ -198,27 +193,36 @@ func (r *MicrovmReplicaSetReconciler) reconcileNormal( if err := r.createMicrovm(ctx, mvmReplicaSetScope); err != nil { mvmReplicaSetScope.Error(err, "failed creating owned microvm") + mvmReplicaSetScope.SetNotReady(infrav1.MicrovmReplicaSetProvisionFailedReason, "Error", "") + return reconcile.Result{}, fmt.Errorf("failed to create new microvm for replicaset: %w", err) } mvmReplicaSetScope.SetNotReady(infrav1.MicrovmReplicaSetIncompleteReason, "Info", "") // if we are here then a scale down has been requested. // we delete the first found until the numbers balance out. - // TODO this is not ideal, find a better way + // TODO the way this works is very naive and often ends up deleting everything + // if the timing is wrong/right, find a better way https://github.com/weaveworks-liquidmetal/microvm-operator/issues/17 case mvmReplicaSetScope.CreatedReplicas() > mvmReplicaSetScope.DesiredReplicas(): - mvmReplicaSetScope.Info("MicrovmReplicaSet deleting: delete microvm") + mvmReplicaSetScope.Info("MicrovmReplicaSet updating: delete microvm") + mvmReplicaSetScope.SetNotReady(infrav1.MicrovmReplicaSetUpdatingReason, "Info", "") mvm := mvmList[0] - if mvm.DeletionTimestamp == nil { - if err := r.Delete(ctx, &mvm); err != nil { - mvmReplicaSetScope.Error(err, "failed deleting microvm") - mvmReplicaSetScope.SetNotReady(infrav1.MicrovmReplicaSetDeleteFailedReason, "Error", "") - - return ctrl.Result{}, err - } + if !mvm.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil } - mvmReplicaSetScope.SetNotReady(infrav1.MicrovmReplicaSetUpdatingReason, "Info", "") + if err := r.Delete(ctx, &mvm); err != nil { + mvmReplicaSetScope.Error(err, "failed deleting microvm") + mvmReplicaSetScope.SetNotReady(infrav1.MicrovmReplicaSetDeleteFailedReason, "Error", "") + + return ctrl.Result{}, err + } + // if all desired microvms have been created, but are not quite ready yet, + // set the condition and requeue + default: + mvmReplicaSetScope.Info("MicrovmReplicaSet creating: waiting for microvms to become ready") + mvmReplicaSetScope.SetNotReady(infrav1.MicrovmReplicaSetIncompleteReason, "Info", "") } controllerutil.AddFinalizer(mvmReplicaSetScope.MicrovmReplicaSet, infrav1.MvmRSFinalizer) diff --git a/controllers/microvmreplicaset_controller_test.go b/controllers/microvmreplicaset_controller_test.go new file mode 100644 index 0000000..ff935dc --- /dev/null +++ b/controllers/microvmreplicaset_controller_test.go @@ -0,0 +1,192 @@ +package controllers_test + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + infrav1 "github.com/weaveworks-liquidmetal/microvm-operator/api/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" +) + +func TestMicrovmRS_Reconcile_MissingObject(t *testing.T) { + g := NewWithT(t) + + mvmRS := &infrav1.MicrovmReplicaSet{} + objects := []runtime.Object{mvmRS} + + client := createFakeClient(g, objects) + result, err := reconcileMicrovmReplicaSet(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling when microvmreplicaset doesn't exist should not error") + g.Expect(result.IsZero()).To(BeTrue(), "Expect no requeue to be requested") +} + +func TestMicrovmRS_ReconcileNormal_CreateSucceeds(t *testing.T) { + g := NewWithT(t) + + // creating a replicaset with 2 replicas + var expectedReplicas int32 = 2 + mvmRS := createMicrovmReplicaSet(expectedReplicas) + objects := []runtime.Object{mvmRS} + client := createFakeClient(g, objects) + + // first reconciliation + result, err := reconcileMicrovmReplicaSet(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling microvmreplicaset the first time should not error") + g.Expect(result.IsZero()).To(BeFalse(), "Expect requeue to be requested after create") + + reconciled, err := getMicrovmReplicaSet(client, testMicrovmReplicaSetName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvm should not fail") + assertMRSFinalizer(g, reconciled) + + assertConditionFalse(g, reconciled, infrav1.MicrovmReplicaSetReadyCondition, infrav1.MicrovmReplicaSetIncompleteReason) + g.Expect(reconciled.Status.Ready).To(BeFalse(), "MicrovmReplicaSet should not be ready yet") + g.Expect(reconciled.Status.Replicas).To(Equal(int32(0)), "Expected the record to not have been updated yet") + g.Expect(microvmsCreated(g, client)).To(Equal(expectedReplicas-1), "Expected only one Microvm to have been created after one reconciliation") + + // second reconciliation + result, err = reconcileMicrovmReplicaSet(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling microvmreplicaset the second time should not error") + g.Expect(result.IsZero()).To(BeFalse(), "Expect requeue to be requested after create") + + reconciled, err = getMicrovmReplicaSet(client, testMicrovmReplicaSetName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvm should not fail") + + assertConditionFalse(g, reconciled, infrav1.MicrovmReplicaSetReadyCondition, infrav1.MicrovmReplicaSetIncompleteReason) + g.Expect(reconciled.Status.Ready).To(BeFalse(), "MicrovmReplicaSet should not be ready yet") + g.Expect(reconciled.Status.Replicas).To(Equal(expectedReplicas-1), "Expected the record to contain 1 replica") + g.Expect(microvmsCreated(g, client)).To(Equal(expectedReplicas), "Expected all Microvms to have been created after two reconciliations") + + // final reconciliation + ensureMicrovmState(g, client) + result, err = reconcileMicrovmReplicaSet(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling microvmreplicaset the third time should not error") + g.Expect(result.IsZero()).To(BeTrue(), "Expect requeue to be not requested after create") + + reconciled, err = getMicrovmReplicaSet(client, testMicrovmReplicaSetName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvm should not fail") + + assertConditionTrue(g, reconciled, infrav1.MicrovmReplicaSetReadyCondition) + g.Expect(reconciled.Status.Ready).To(BeTrue(), "MicrovmReplicaSet should be ready now") + g.Expect(reconciled.Status.Replicas).To(Equal(expectedReplicas), "Expected the record to contain 2 replicas") + g.Expect(reconciled.Status.ReadyReplicas).To(Equal(expectedReplicas), "Expected all replicas to be ready") +} + +func TestMicrovmRS_ReconcileNormal_UpdateSucceeds(t *testing.T) { + g := NewWithT(t) + + // updating a replicaset with 2 replicas + var ( + initialReplicaCount int32 = 2 + scaledReplicaCount int32 = 1 + ) + + mvmRS := createMicrovmReplicaSet(initialReplicaCount) + objects := []runtime.Object{mvmRS} + client := createFakeClient(g, objects) + + // create + g.Expect(reconcileMicrovmReplicaSetNTimes(g, client, initialReplicaCount+1)).To(Succeed()) + + reconciled, err := getMicrovmReplicaSet(client, testMicrovmReplicaSetName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvm should not fail") + + assertMRSFinalizer(g, reconciled) + assertConditionTrue(g, reconciled, infrav1.MicrovmReplicaSetReadyCondition) + g.Expect(reconciled.Status.Ready).To(BeTrue(), "MicrovmReplicaSet should be ready now") + g.Expect(reconciled.Status.Replicas).To(Equal(initialReplicaCount), "Expected the record to contain 2 replicas") + g.Expect(reconciled.Status.ReadyReplicas).To(Equal(initialReplicaCount), "Expected all replicas to be ready") + + // update, scale down to 1 + reconciled.Spec.Replicas = pointer.Int32(scaledReplicaCount) + g.Expect(client.Update(context.TODO(), reconciled)).To(Succeed()) + + // first reconciliation + result, err := reconcileMicrovmReplicaSet(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling microvmreplicaset the first time should not error") + g.Expect(result.IsZero()).To(BeFalse(), "Expect requeue to be requested after update") + + reconciled, err = getMicrovmReplicaSet(client, testMicrovmReplicaSetName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvm should not fail") + + assertConditionFalse(g, reconciled, infrav1.MicrovmReplicaSetReadyCondition, infrav1.MicrovmReplicaSetUpdatingReason) + g.Expect(reconciled.Status.Ready).To(BeFalse(), "MicrovmReplicaSet should not be ready") + g.Expect(reconciled.Status.Replicas).To(Equal(initialReplicaCount), "Expected the record to contain 2 replicas") + g.Expect(reconciled.Status.ReadyReplicas).To(Equal(initialReplicaCount), "Expected all replicas to be ready") + + // second reconciliation + result, err = reconcileMicrovmReplicaSet(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling microvmreplicaset the second time should not error") + g.Expect(result.IsZero()).To(BeTrue(), "Expect requeue to not be requested after reconcile") + + reconciled, err = getMicrovmReplicaSet(client, testMicrovmReplicaSetName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvm should not fail") + + assertConditionTrue(g, reconciled, infrav1.MicrovmReplicaSetReadyCondition) + g.Expect(reconciled.Status.Ready).To(BeTrue(), "MicrovmReplicaSet should be ready") + g.Expect(reconciled.Status.Replicas).To(Equal(scaledReplicaCount), "Expected the record to contain 1 replicas") + g.Expect(reconciled.Status.ReadyReplicas).To(Equal(scaledReplicaCount), "Expected all replicas to be ready") + g.Expect(microvmsCreated(g, client)).To(Equal(scaledReplicaCount), "Expected Microvms to have been scaled down after two reconciliations") +} + +func TestMicrovmRS_ReconcileDelete_DeleteSucceeds(t *testing.T) { + g := NewWithT(t) + + // deleting a replicaset with 2 replicas + var initialReplicaCount int32 = 2 + + mvmRS := createMicrovmReplicaSet(initialReplicaCount) + objects := []runtime.Object{mvmRS} + client := createFakeClient(g, objects) + + // create + g.Expect(reconcileMicrovmReplicaSetNTimes(g, client, initialReplicaCount+1)).To(Succeed()) + + reconciled, err := getMicrovmReplicaSet(client, testMicrovmReplicaSetName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvm should not fail") + + assertMRSFinalizer(g, reconciled) + assertConditionTrue(g, reconciled, infrav1.MicrovmReplicaSetReadyCondition) + g.Expect(reconciled.Status.Ready).To(BeTrue(), "MicrovmReplicaSet should be ready now") + g.Expect(reconciled.Status.Replicas).To(Equal(initialReplicaCount), "Expected the record to contain 2 replicas") + g.Expect(reconciled.Status.ReadyReplicas).To(Equal(initialReplicaCount), "Expected all replicas to be ready") + + // delete + g.Expect(client.Delete(context.TODO(), reconciled)).To(Succeed()) + + // first reconciliation + result, err := reconcileMicrovmReplicaSet(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling microvmreplicaset the first time should not error") + g.Expect(result.IsZero()).To(BeFalse(), "Expect requeue to be requested after update") + + reconciled, err = getMicrovmReplicaSet(client, testMicrovmReplicaSetName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvm should not fail") + + assertConditionFalse(g, reconciled, infrav1.MicrovmReplicaSetReadyCondition, infrav1.MicrovmReplicaSetDeletingReason) + g.Expect(reconciled.Status.Ready).To(BeFalse(), "MicrovmReplicaSet should not be ready") + g.Expect(reconciled.Status.Replicas).To(Equal(initialReplicaCount), "Expected the record to contain 2 replicas") + g.Expect(reconciled.Status.ReadyReplicas).To(Equal(int32(0)), "Expected no replicas to be ready") + + // second reconciliation + result, err = reconcileMicrovmReplicaSet(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling microvmreplicaset the third time should not error") + g.Expect(result.IsZero()).To(BeFalse(), "Expect requeue to be requested after reconcile") + + reconciled, err = getMicrovmReplicaSet(client, testMicrovmReplicaSetName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvm should not fail") + + assertConditionFalse(g, reconciled, infrav1.MicrovmReplicaSetReadyCondition, infrav1.MicrovmReplicaSetDeletingReason) + g.Expect(reconciled.Status.Ready).To(BeFalse(), "MicrovmReplicaSet should not be ready") + g.Expect(reconciled.Status.Replicas).To(Equal(int32(0)), "Expected the record to contain 0 replicas") + g.Expect(reconciled.Status.ReadyReplicas).To(Equal(int32(0)), "Expected all no replicas to be ready") + g.Expect(microvmsCreated(g, client)).To(Equal(int32(0)), "Expected Microvms to have been deleted after two reconciliations") + + // third reconciliation + result, err = reconcileMicrovmReplicaSet(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling microvmreplicaset the third time should not error") + g.Expect(result.IsZero()).To(BeTrue(), "Expect requeue to not be requested after reconcile") + + reconciled, err = getMicrovmReplicaSet(client, testMicrovmReplicaSetName, testNamespace) + g.Expect(err).To(HaveOccurred(), "Getting microvm not fail") +}