diff --git a/api/v1alpha1/condition_consts.go b/api/v1alpha1/condition_consts.go index 18e658d..bbf96b8 100644 --- a/api/v1alpha1/condition_consts.go +++ b/api/v1alpha1/condition_consts.go @@ -42,4 +42,13 @@ const ( // MicrovmReplicaSetUpdatingReason indicates the microvm is in a pending state. MicrovmReplicaSetUpdatingReason = "MicrovmReplicaSetUpdating" + + // MicrovmDeploymentReadyCondition indicates that the microvmreplicaset is in a complete state. + MicrovmDeploymentReadyCondition clusterv1.ConditionType = "MicrovmDeploymentReady" + + // MicrovmDeploymentIncompleteReason indicates the microvmreplicaset does not have all replicas yet. + MicrovmDeploymentIncompleteReason = "MicrovmDeploymentIncomplete" + + // MicrovmDeploymentProvisionFailedReason indicates that the microvm deployment failed to provision. + MicrovmDeploymentProvisionFailedReason = "MicrovmDeploymentProvisionFailed" ) diff --git a/api/v1alpha1/microvmdeployment_types.go b/api/v1alpha1/microvmdeployment_types.go index 05eff47..c8db06d 100644 --- a/api/v1alpha1/microvmdeployment_types.go +++ b/api/v1alpha1/microvmdeployment_types.go @@ -22,6 +22,14 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) +const ( + // MvDeploymentSFinalizer allows ReconcileMicrovmDeployment to clean up resources associated with the Deployment + // before removing it from the apiserver. + MvmDeploymentFinalizer = "microvmdeployment.infrastructure.microvm.x-k8s.io" +) + +type HostMap map[string]struct{} + // MicrovmDeploymentSpec defines the desired state of MicrovmDeployment type MicrovmDeploymentSpec struct { // Replicas is the number of Microvms to create on the given Host with the given @@ -49,7 +57,7 @@ type MicrovmDeploymentStatus struct { // +optional Replicas int32 `json:"replicas"` - // ReadyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition. + // ReadyReplicas is the number of microvms controlled by this Deployment with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty"` diff --git a/api/v1alpha1/microvmreplicaset_types.go b/api/v1alpha1/microvmreplicaset_types.go index b418c99..4a8c0c1 100644 --- a/api/v1alpha1/microvmreplicaset_types.go +++ b/api/v1alpha1/microvmreplicaset_types.go @@ -55,7 +55,7 @@ type MicrovmReplicaSetStatus struct { // +optional Replicas int32 `json:"replicas"` - // ReadyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition. + // ReadyReplicas is the number of microvms targeted by this ReplicaSet with a Ready Condition. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty"` diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index 9801819..68af332 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -37,6 +37,7 @@ const ( testNamespace = "ns1" testMicrovmName = "mvm1" testMicrovmReplicaSetName = "rs1" + testMicrovmDeploymentName = "d1" testMicrovmUID = "ABCDEF123456" testBootstrapData = "somesamplebootstrapsdata" ) @@ -97,6 +98,22 @@ func reconcileMicrovmReplicaSetNTimes(g *WithT, client client.Client, count int3 return nil } +func reconcileMicrovmDeployment(client client.Client) (ctrl.Result, error) { + mvmDepController := &controllers.MicrovmDeploymentReconciler{ + Client: client, + Scheme: client.Scheme(), + } + + request := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: testMicrovmDeploymentName, + Namespace: testNamespace, + }, + } + + return mvmDepController.Reconcile(context.TODO(), request) +} + func getMicrovm(c client.Client, name, namespace string) (*infrav1.Microvm, error) { key := client.ObjectKey{ Name: name, @@ -114,6 +131,12 @@ func listMicrovm(c client.Client) (*infrav1.MicrovmList, error) { return mvm, err } +func listMicrovmReplicaSet(c client.Client) (*infrav1.MicrovmReplicaSetList, error) { + mvmRS := &infrav1.MicrovmReplicaSetList{} + err := c.List(context.TODO(), mvmRS) + return mvmRS, err +} + func getMicrovmReplicaSet(c client.Client, name, namespace string) (*infrav1.MicrovmReplicaSet, error) { key := client.ObjectKey{ Name: name, @@ -125,6 +148,17 @@ func getMicrovmReplicaSet(c client.Client, name, namespace string) (*infrav1.Mic return mvmRS, err } +func getMicrovmDeployment(c client.Client, name, namespace string) (*infrav1.MicrovmDeployment, error) { + key := client.ObjectKey{ + Name: name, + Namespace: namespace, + } + + mvmD := &infrav1.MicrovmDeployment{} + err := c.Get(context.TODO(), key, mvmD) + return mvmD, err +} + func createFakeClient(g *WithT, objects []runtime.Object) client.Client { scheme := runtime.NewScheme() @@ -194,6 +228,33 @@ func createMicrovmReplicaSet(reps int32) *infrav1.MicrovmReplicaSet { } } +func createMicrovmDeployment(reps int32, hostCount int) *infrav1.MicrovmDeployment { + mvm := createMicrovm() + mvm.Spec.Host = microvm.Host{} + + var hosts []microvm.Host + + for i := 0; i < hostCount; i++ { + hosts = append(hosts, microvm.Host{ + Endpoint: fmt.Sprintf("1.2.3.4:909%d", i), + }) + } + + return &infrav1.MicrovmDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: testMicrovmDeploymentName, + Namespace: testNamespace, + }, + Spec: infrav1.MicrovmDeploymentSpec{ + Hosts: hosts, + Replicas: pointer.Int32(reps), + Template: infrav1.MicrovmTemplateSpec{ + Spec: mvm.Spec, + }, + }, + } +} + func withExistingMicrovm(fc *fakes.FakeClient, mvmState flintlocktypes.MicroVMStatus_MicroVMState) { fc.GetMicroVMReturns(&flintlockv1.GetMicroVMResponse{ Microvm: &flintlocktypes.MicroVM{ @@ -252,12 +313,34 @@ func assertMicrovmReconciled(g *WithT, reconciled *infrav1.Microvm) { g.Expect(reconciled.Status.Ready).To(BeTrue(), "The Ready property must be true when the mvm has been reconciled") } +func assertOneSetPerHost(g *WithT, reconciled *infrav1.MicrovmDeployment, c client.Client) { + hosts := reconciled.Spec.Hosts + sets, err := listMicrovmReplicaSet(c) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(len(hosts)).To(Equal(len(sets.Items))) + + seen := map[string]struct{}{} + + for _, rs := range sets.Items { + seen[rs.Spec.Host.Endpoint] = struct{}{} + } + + g.Expect(seen).To(HaveLen(len(hosts))) +} + func microvmsCreated(g *WithT, c client.Client) int32 { mvmList, err := listMicrovm(c) g.Expect(err).NotTo(HaveOccurred()) return int32(len(mvmList.Items)) } +func microvmReplicaSetsCreated(g *WithT, c client.Client) int { + mvmList, err := listMicrovmReplicaSet(c) + g.Expect(err).NotTo(HaveOccurred()) + return len(mvmList.Items) +} + func ensureMicrovmState(g *WithT, c client.Client) { // update the microvms so they report as ready to move the replicaset reconciliation along mvmList, err := listMicrovm(c) @@ -269,6 +352,19 @@ func ensureMicrovmState(g *WithT, c client.Client) { } } +func ensureMicrovmReplicaSetState(g *WithT, c client.Client, r, rr int32) { + // update the microvmreplicasets so they report as ready to move the deployment reconciliation along + mvmList, err := listMicrovmReplicaSet(c) + g.Expect(err).NotTo(HaveOccurred()) + + for _, mvm := range mvmList.Items { + mvm.Status.Ready = true + mvm.Status.ReadyReplicas = rr + mvm.Status.Replicas = r + g.Expect(c.Update(context.TODO(), &mvm)).To(Succeed()) + } +} + func assertFinalizer(g *WithT, reconciled *infrav1.Microvm) { g.Expect(reconciled.ObjectMeta.Finalizers).NotTo(BeEmpty(), "Expected at least one finalizer to be set") g.Expect(hasMicrovmFinalizer(&reconciled.ObjectMeta, infrav1.MvmFinalizer)).To(BeTrue(), "Expect the mvm finalizer") @@ -279,6 +375,11 @@ func assertMRSFinalizer(g *WithT, reconciled *infrav1.MicrovmReplicaSet) { g.Expect(hasMicrovmFinalizer(&reconciled.ObjectMeta, infrav1.MvmRSFinalizer)).To(BeTrue(), "Expect the mvmrs finalizer") } +func assertMDFinalizer(g *WithT, reconciled *infrav1.MicrovmDeployment) { + g.Expect(reconciled.ObjectMeta.Finalizers).NotTo(BeEmpty(), "Expected at least one finalizer to be set") + g.Expect(hasMicrovmFinalizer(&reconciled.ObjectMeta, infrav1.MvmDeploymentFinalizer)).To(BeTrue(), "Expect the mvmd finalizer") +} + func hasMicrovmFinalizer(meta *metav1.ObjectMeta, finalizer string) bool { if len(meta.Finalizers) == 0 { return false diff --git a/controllers/microvmdeployment_controller.go b/controllers/microvmdeployment_controller.go index a7c526d..828416b 100644 --- a/controllers/microvmdeployment_controller.go +++ b/controllers/microvmdeployment_controller.go @@ -18,14 +18,23 @@ package controllers import ( "context" + "fmt" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/weaveworks-liquidmetal/controller-pkg/types/microvm" + "github.com/weaveworks-liquidmetal/microvm-operator/api/v1alpha1" infrastructurev1alpha1 "github.com/weaveworks-liquidmetal/microvm-operator/api/v1alpha1" infrav1 "github.com/weaveworks-liquidmetal/microvm-operator/api/v1alpha1" + "github.com/weaveworks-liquidmetal/microvm-operator/internal/scope" ) // MicrovmDeploymentReconciler reconciles a MicrovmDeployment object @@ -40,13 +49,188 @@ type MicrovmDeploymentReconciler struct { //+kubebuilder:rbac:groups=infrastructure.liquid-metal.io,resources=microvmreplicasets,verbs=get;list;watch;create;update;patch;delete func (r *MicrovmDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) + log := log.FromContext(ctx) - // TODO(user): your logic here + mvmD := &infrav1.MicrovmDeployment{} + if err := r.Get(ctx, req.NamespacedName, mvmD); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + log.Error(err, "error getting microvmdeployment", "id", req.NamespacedName) + + return ctrl.Result{}, fmt.Errorf("unable to reconcile: %w", err) + } + + mvmDeploymentScope, err := scope.NewMicrovmDeploymentScope(scope.MicrovmDeploymentScopeParams{ + MicrovmDeployment: mvmD, + Client: r.Client, + Context: ctx, + Logger: log, + }) + if err != nil { + log.Error(err, "failed to create mvm-deployment scope") + + return ctrl.Result{}, fmt.Errorf("failed to create mvm-deployment scope: %w", err) + } + + defer func() { + if err := mvmDeploymentScope.Patch(); err != nil { + log.Error(err, "failed to patch microvmreplicaset") + } + }() + + if !mvmD.ObjectMeta.DeletionTimestamp.IsZero() { + log.Info("Deleting microvmdeployment") + + return r.reconcileDelete(ctx, mvmDeploymentScope) + } + + return r.reconcileNormal(ctx, mvmDeploymentScope) +} + +func (r *MicrovmDeploymentReconciler) reconcileDelete( + ctx context.Context, + mvmDeploymentScope *scope.MicrovmDeploymentScope, +) (reconcile.Result, error) { return ctrl.Result{}, nil } +func (r *MicrovmDeploymentReconciler) reconcileNormal( + ctx context.Context, + mvmDeploymentScope *scope.MicrovmDeploymentScope, +) (reconcile.Result, error) { + mvmDeploymentScope.Info("Reconciling MicrovmDeployment update") + + // fetch all existing replicasets in this namespace + rsList, err := r.getOwnedReplicaSets(ctx, mvmDeploymentScope) + if err != nil { + mvmDeploymentScope.Error(err, "failed getting owned microvms") + + return ctrl.Result{}, fmt.Errorf("failed to list microvms: %w", err) + } + + defer func() { + if err := mvmDeploymentScope.Patch(); err != nil { + mvmDeploymentScope.Error(err, "unable to patch microvm") + } + }() + + // record the microvms per set which have been created and are ready + // and create a map to record which host already has a replicaset + + // we always get a fresh count rather than rely on the status in case + // something was removed + var ( + ready int32 = 0 + created int32 = 0 + + hostMap = v1alpha1.HostMap{} + ) + + for _, rs := range rsList { + created += rs.Status.Replicas + ready += rs.Status.ReadyReplicas + + hostMap[rs.Spec.Host.Endpoint] = struct{}{} + } + + mvmDeploymentScope.SetCreatedReplicas(created) + mvmDeploymentScope.SetReadyReplicas(ready) + + // get a count of the replicasets created + createdSets := len(hostMap) + + switch { + // if all desired microvms are ready, mark the deployment ready. + // we are done here + case mvmDeploymentScope.ReadyReplicas() == mvmDeploymentScope.DesiredTotalReplicas(): + mvmDeploymentScope.Info("MicrovmDeployment created: ready") + mvmDeploymentScope.SetReady() + + return reconcile.Result{}, nil + // if we are in this branch then not all desired replicasets have been created. + // create a new one and set the ownerref to this controller. + case createdSets < mvmDeploymentScope.RequiredSets(): + mvmDeploymentScope.Info("MicrovmDeployment creating: create new microvmreplicaset") + + host, err := mvmDeploymentScope.DetermineHost(hostMap) + if err != nil { + mvmDeploymentScope.Error(err, "failed creating owned microvmreplicaset") + mvmDeploymentScope.SetNotReady(infrav1.MicrovmDeploymentProvisionFailedReason, "Error", "") + + return reconcile.Result{}, fmt.Errorf("failed to create new replicaset for deployment: %w", err) + } + + if err := r.createReplicaSet(ctx, mvmDeploymentScope, host); err != nil { + mvmDeploymentScope.Error(err, "failed creating owned microvmreplicaset") + mvmDeploymentScope.SetNotReady(infrav1.MicrovmDeploymentProvisionFailedReason, "Error", "") + + return reconcile.Result{}, fmt.Errorf("failed to create new replicaset for deployment: %w", err) + } + + mvmDeploymentScope.SetNotReady(infrav1.MicrovmDeploymentIncompleteReason, "Info", "") + // if all desired objects have been created, but are not quite ready yet, + // set the condition and requeue + default: + mvmDeploymentScope.Info("MicrovmReplicaSet creating: waiting for microvms to become ready") + mvmDeploymentScope.SetNotReady(infrav1.MicrovmDeploymentIncompleteReason, "Info", "") + } + + controllerutil.AddFinalizer(mvmDeploymentScope.MicrovmDeployment, infrav1.MvmDeploymentFinalizer) + + return ctrl.Result{RequeueAfter: requeuePeriod}, nil +} + +func (r *MicrovmDeploymentReconciler) createReplicaSet( + ctx context.Context, + mvmDeploymentScope *scope.MicrovmDeploymentScope, + host microvm.Host, +) error { + newRs := &infrav1.MicrovmReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mvmDeploymentScope.Namespace(), + GenerateName: "microvmreplicaset-", + }, + Spec: infrav1.MicrovmReplicaSetSpec{ + Host: host, + Replicas: pointer.Int32(mvmDeploymentScope.DesiredReplicas()), + Template: infrav1.MicrovmTemplateSpec{ + Spec: mvmDeploymentScope.MicrovmSpec(), + }, + }, + } + + if err := controllerutil.SetControllerReference(mvmDeploymentScope.MicrovmDeployment, newRs, r.Scheme); err != nil { + return err + } + + return r.Create(ctx, newRs) +} + +func (r *MicrovmDeploymentReconciler) getOwnedReplicaSets( + ctx context.Context, + mvmDeploymentScope *scope.MicrovmDeploymentScope, +) ([]infrav1.MicrovmReplicaSet, error) { + rsList := &infrav1.MicrovmReplicaSetList{} + opts := []client.ListOption{ + client.InNamespace(mvmDeploymentScope.Namespace()), + } + if err := r.List(ctx, rsList, opts...); err != nil { + return nil, err + } + + owned := []v1alpha1.MicrovmReplicaSet{} + + for _, rs := range rsList.Items { + if metav1.IsControlledBy(&rs, mvmDeploymentScope.MicrovmDeployment) { + owned = append(owned, rs) + } + } + + return owned, nil +} + // SetupWithManager sets up the controller with the Manager. func (r *MicrovmDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). diff --git a/controllers/microvmdeployment_controller_test.go b/controllers/microvmdeployment_controller_test.go new file mode 100644 index 0000000..0743e94 --- /dev/null +++ b/controllers/microvmdeployment_controller_test.go @@ -0,0 +1,81 @@ +package controllers_test + +import ( + "testing" + + . "github.com/onsi/gomega" + infrav1 "github.com/weaveworks-liquidmetal/microvm-operator/api/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestMicrovmDep_Reconcile_MissingObject(t *testing.T) { + g := NewWithT(t) + + mvmDep := &infrav1.MicrovmDeployment{} + objects := []runtime.Object{mvmDep} + + client := createFakeClient(g, objects) + result, err := reconcileMicrovmReplicaSet(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling when microvmdeployment doesn't exist should not error") + g.Expect(result.IsZero()).To(BeTrue(), "Expect no requeue to be requested") +} + +func TestMicrovmDep_ReconcileNormal_CreateSucceeds(t *testing.T) { + g := NewWithT(t) + + // creating a deployment with 2 hosts and 2 microvms per host + var ( + expectedReplicas int32 = 2 + expectedReplicaSets int = 2 + expectedTotalMicrovms int32 = 4 + ) + + mvmD := createMicrovmDeployment(expectedReplicas, expectedReplicaSets) + objects := []runtime.Object{mvmD} + client := createFakeClient(g, objects) + + // first reconciliation + result, err := reconcileMicrovmDeployment(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling microvmdeployment the first time should not error") + g.Expect(result.IsZero()).To(BeFalse(), "Expect requeue to be requested after create") + + reconciled, err := getMicrovmDeployment(client, testMicrovmDeploymentName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvmdeployment should not fail") + assertMDFinalizer(g, reconciled) + + assertConditionFalse(g, reconciled, infrav1.MicrovmDeploymentReadyCondition, infrav1.MicrovmDeploymentIncompleteReason) + g.Expect(reconciled.Status.Ready).To(BeFalse(), "MicrovmDeployment should not be ready yet") + g.Expect(reconciled.Status.Replicas).To(Equal(int32(0)), "Expected the record to not have been updated yet") + g.Expect(microvmReplicaSetsCreated(g, client)).To(Equal(expectedReplicaSets-1), "Expected only one replicaset to have been created after one reconciliation") + + // second reconciliation + ensureMicrovmReplicaSetState(g, client, expectedReplicas, expectedReplicas-1) + g.Expect(err).NotTo(HaveOccurred(), "reconciling microvmReplicaSet should not error") + result, err = reconcileMicrovmDeployment(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling microvmdeployment the second time should not error") + g.Expect(result.IsZero()).To(BeFalse(), "Expect requeue to be requested after create") + + reconciled, err = getMicrovmDeployment(client, testMicrovmDeploymentName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvmdeployment should not fail") + + assertConditionFalse(g, reconciled, infrav1.MicrovmDeploymentReadyCondition, infrav1.MicrovmDeploymentIncompleteReason) + g.Expect(reconciled.Status.Ready).To(BeFalse(), "MicrovmDeployment should not be ready yet") + g.Expect(reconciled.Status.Replicas).To(Equal(expectedTotalMicrovms-2), "Expected the record to contain 2 replicas") + g.Expect(microvmReplicaSetsCreated(g, client)).To(Equal(expectedReplicaSets), "Expected all Microvms to have been created after two reconciliations") + + // final reconciliation + ensureMicrovmReplicaSetState(g, client, expectedReplicas, expectedReplicas) + result, err = reconcileMicrovmDeployment(client) + g.Expect(err).NotTo(HaveOccurred(), "Reconciling microvmdeployment the third time should not error") + g.Expect(result.IsZero()).To(BeTrue(), "Expect requeue to not be requested after create") + + reconciled, err = getMicrovmDeployment(client, testMicrovmDeploymentName, testNamespace) + g.Expect(err).NotTo(HaveOccurred(), "Getting microvmdeployment should not fail") + + assertConditionTrue(g, reconciled, infrav1.MicrovmDeploymentReadyCondition) + g.Expect(reconciled.Status.Ready).To(BeTrue(), "MicrovmDeployment should be ready now") + g.Expect(reconciled.Status.Replicas).To(Equal(expectedTotalMicrovms), "Expected the record to contain 4 replicas") + g.Expect(reconciled.Status.ReadyReplicas).To(Equal(expectedTotalMicrovms), "Expected all replicas to be ready") + g.Expect(microvmReplicaSetsCreated(g, client)).To(Equal(expectedReplicaSets), "Expected all Microvms to have been created after two reconciliations") + assertOneSetPerHost(g, reconciled, client) +} diff --git a/internal/scope/mvm.go b/internal/scope/mvm.go index 8827642..212fbec 100644 --- a/internal/scope/mvm.go +++ b/internal/scope/mvm.go @@ -159,7 +159,6 @@ func (m *MicrovmScope) GetBasicAuthToken() (string, error) { // If it's not there, that's fine; we will log and return an empty string token := string(tokenSecret.Data["token"]) - fmt.Println(tokenSecret.Data) if token == "" { m.Info( diff --git a/internal/scope/mvmd.go b/internal/scope/mvmd.go new file mode 100644 index 0000000..c22fa58 --- /dev/null +++ b/internal/scope/mvmd.go @@ -0,0 +1,167 @@ +// Copyright 2022 Weaveworks or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MPL-2.0 + +package scope + +import ( + "context" + "errors" + "fmt" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/go-logr/logr" + microvm "github.com/weaveworks-liquidmetal/controller-pkg/types/microvm" + infrav1 "github.com/weaveworks-liquidmetal/microvm-operator/api/v1alpha1" + "github.com/weaveworks-liquidmetal/microvm-operator/internal/defaults" +) + +type MicrovmDeploymentScopeParams struct { + Logger logr.Logger + MicrovmDeployment *infrav1.MicrovmDeployment + + Client client.Client + Context context.Context //nolint: containedctx // don't care +} + +type MicrovmDeploymentScope struct { + logr.Logger + + MicrovmDeployment *infrav1.MicrovmDeployment + + client client.Client + patchHelper *patch.Helper + controllerName string + ctx context.Context +} + +func NewMicrovmDeploymentScope(params MicrovmDeploymentScopeParams) (*MicrovmDeploymentScope, error) { + if params.MicrovmDeployment == nil { + return nil, errMicrovmRequired + } + + if params.Client == nil { + return nil, errClientRequired + } + + patchHelper, err := patch.NewHelper(params.MicrovmDeployment, params.Client) + if err != nil { + return nil, fmt.Errorf("creating patch helper for microvmreplicaset: %w", err) + } + + scope := &MicrovmDeploymentScope{ + MicrovmDeployment: params.MicrovmDeployment, + client: params.Client, + controllerName: defaults.ManagerName, + Logger: params.Logger, + patchHelper: patchHelper, + ctx: params.Context, + } + + return scope, nil +} + +// Name returns the MicrovmDeployment name. +func (m *MicrovmDeploymentScope) Name() string { + return m.MicrovmDeployment.Name +} + +// Namespace returns the namespace name. +func (m *MicrovmDeploymentScope) Namespace() string { + return m.MicrovmDeployment.Namespace +} + +// HasAllSets returns true if all required sets have been created +func (m *MicrovmDeploymentScope) HasAllSets(count int) bool { + return count == len(m.MicrovmDeployment.Spec.Hosts) +} + +// RequiredSets returns the number of sets which should be created +func (m *MicrovmDeploymentScope) RequiredSets() int { + return len(m.MicrovmDeployment.Spec.Hosts) +} + +// DesiredTotalReplicas returns the toal requested replicas set on the spec. +func (m *MicrovmDeploymentScope) DesiredTotalReplicas() int32 { + return m.DesiredReplicas() * int32(m.RequiredSets()) +} + +// DesiredReplicas returns the requested replicas set per set on the spec. +func (m *MicrovmDeploymentScope) DesiredReplicas() int32 { + return *m.MicrovmDeployment.Spec.Replicas +} + +// ReadyReplicas returns the number of replicas which are ready. +func (m *MicrovmDeploymentScope) ReadyReplicas() int32 { + return *&m.MicrovmDeployment.Status.ReadyReplicas +} + +// CreatedReplicas returns the number of replicas which have been created. +func (m *MicrovmDeploymentScope) CreatedReplicas() int32 { + return *&m.MicrovmDeployment.Status.Replicas +} + +// GetMicrovmSpec returns the spec for the child MicroVM +func (m *MicrovmDeploymentScope) MicrovmSpec() infrav1.MicrovmSpec { + return m.MicrovmDeployment.Spec.Template.Spec +} + +// Hosts returns the list of hosts for created microvms +func (m *MicrovmDeploymentScope) Hosts() []microvm.Host { + return m.MicrovmDeployment.Spec.Hosts +} + +// DetermineHost returns a host which does not yet have a replicaset +func (m *MicrovmDeploymentScope) DetermineHost(setHosts infrav1.HostMap) (microvm.Host, error) { + for _, host := range m.Hosts() { + if _, ok := setHosts[host.Endpoint]; !ok { + return host, nil + } + } + + return microvm.Host{}, errors.New("could not find free host") +} + +// SetCreatedReplicas records the number of microvms which have been created +// this does not give information about whether the microvms are ready +func (m *MicrovmDeploymentScope) SetCreatedReplicas(count int32) { + m.MicrovmDeployment.Status.Replicas = count +} + +// SetReadyReplicas saves the number of ready MicroVMs to the status +func (m *MicrovmDeploymentScope) SetReadyReplicas(count int32) { + m.MicrovmDeployment.Status.ReadyReplicas = count +} + +// SetReady sets any properties/conditions that are used to indicate that the Microvm is 'Ready'. +func (m *MicrovmDeploymentScope) SetReady() { + conditions.MarkTrue(m.MicrovmDeployment, infrav1.MicrovmDeploymentReadyCondition) + m.MicrovmDeployment.Status.Ready = true +} + +// SetNotReady sets any properties/conditions that are used to indicate that the MicrovmDeployment is NOT 'Ready'. +func (m *MicrovmDeploymentScope) SetNotReady( + reason string, + severity clusterv1.ConditionSeverity, + message string, + messageArgs ...interface{}, +) { + conditions.MarkFalse(m.MicrovmDeployment, infrav1.MicrovmDeploymentReadyCondition, reason, severity, message, messageArgs...) + m.MicrovmDeployment.Status.Ready = false +} + +// Patch persists the resource and status. +func (m *MicrovmDeploymentScope) Patch() error { + err := m.patchHelper.Patch( + m.ctx, + m.MicrovmDeployment, + ) + if err != nil { + return fmt.Errorf("unable to patch microvmreplicaset: %w", err) + } + + return nil +} diff --git a/internal/scope/mvmd_test.go b/internal/scope/mvmd_test.go new file mode 100644 index 0000000..f1936e3 --- /dev/null +++ b/internal/scope/mvmd_test.go @@ -0,0 +1,108 @@ +package scope_test + +import ( + "fmt" + "testing" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + microvm "github.com/weaveworks-liquidmetal/controller-pkg/types/microvm" + infrav1 "github.com/weaveworks-liquidmetal/microvm-operator/api/v1alpha1" + "github.com/weaveworks-liquidmetal/microvm-operator/internal/scope" +) + +func TestDetermineHost(t *testing.T) { + // RegisterTestingT(t) + g := NewWithT(t) + + scheme, err := setupScheme() + g.Expect(err).NotTo(HaveOccurred()) + + mvmDepName := "md-1" + + tt := []struct { + name string + expected func(*WithT, string, string, error) + hostCount int + mapCount int + }{ + { + name: "when a host is not yet recorded in the map, should return that host", + hostCount: 5, + mapCount: 3, + expected: func(g *WithT, wantHost, gotHost string, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(gotHost).To(Equal(wantHost)) + }, + }, + { + name: "testing the same but with different numbers just in case", + hostCount: 10, + mapCount: 4, + expected: func(g *WithT, wantHost, gotHost string, err error) { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(gotHost).To(Equal(wantHost)) + }, + }, + { + name: "when there is no unmapped host to return, return error", + hostCount: 2, + mapCount: 2, + expected: func(g *WithT, _, _ string, err error) { + g.Expect(err).To(MatchError("could not find free host")) + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + mvmDep := newDeployment(mvmDepName, tc.hostCount) + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(mvmDep).Build() + mvmScope, err := scope.NewMicrovmDeploymentScope(scope.MicrovmDeploymentScopeParams{ + Client: client, + MicrovmDeployment: mvmDep, + }) + g.Expect(err).NotTo(HaveOccurred()) + + hostMap := newHostMap(tc.mapCount) + + host, err := mvmScope.DetermineHost(hostMap) + tc.expected(g, fmt.Sprintf("%d", tc.mapCount), host.Endpoint, err) + }) + } +} + +func newHostMap(hostCount int) infrav1.HostMap { + hostMap := infrav1.HostMap{} + for i := 0; i < hostCount; i++ { + hostMap[fmt.Sprintf("%d", i)] = struct{}{} + } + + return hostMap +} + +func newDeployment(name string, hostCount int) *infrav1.MicrovmDeployment { + var hosts []microvm.Host + + for i := 0; i < hostCount; i++ { + hosts = append(hosts, microvm.Host{ + Endpoint: fmt.Sprintf("%d", i), + }) + } + + md := &infrav1.MicrovmDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "default", + }, + Spec: infrav1.MicrovmDeploymentSpec{ + Hosts: hosts, + }, + } + + return md +}