diff --git a/pkg/api/vm/schema.go b/pkg/api/vm/schema.go index 84f44e165d..d767344e01 100644 --- a/pkg/api/vm/schema.go +++ b/pkg/api/vm/schema.go @@ -8,6 +8,7 @@ import ( "github.com/rancher/harvester/pkg/generated/clientset/versioned/scheme" "github.com/rancher/steve/pkg/schema" "github.com/rancher/steve/pkg/server" + "github.com/rancher/steve/pkg/stores/proxy" "github.com/rancher/wrangler/pkg/schemas" k8sschema "k8s.io/apimachinery/pkg/runtime/schema" @@ -55,6 +56,12 @@ func RegisterSchema(scaled *config.Scaled, server *server.Server) error { vmiCache: vmis.Cache(), } + vmStore := &vmStore{ + Store: proxy.NewProxyStore(server.ClientFactory, server.AccessSetLookup), + dataVolumes: scaled.CDIFactory.Cdi().V1beta1().DataVolume(), + vmCache: scaled.VirtFactory.Kubevirt().V1alpha3().VirtualMachine().Cache(), + } + t := schema.Template{ ID: vmSchemaID, Customize: func(apiSchema *types.APISchema) { @@ -82,6 +89,7 @@ func RegisterSchema(scaled *config.Scaled, server *server.Server) error { } }, Formatter: vmformatter.formatter, + Store: vmStore, } server.SchemaTemplates = append(server.SchemaTemplates, t) diff --git a/pkg/api/vm/store.go b/pkg/api/vm/store.go new file mode 100644 index 0000000000..7d58d8961e --- /dev/null +++ b/pkg/api/vm/store.go @@ -0,0 +1,96 @@ +package vm + +import ( + "fmt" + + "github.com/rancher/apiserver/pkg/apierror" + "github.com/rancher/apiserver/pkg/types" + "github.com/rancher/wrangler/pkg/schemas/validation" + "github.com/rancher/wrangler/pkg/slice" + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + ctlcdiv1beta1 "github.com/rancher/harvester/pkg/generated/controllers/cdi.kubevirt.io/v1beta1" + ctlkubevirtv1alpha3 "github.com/rancher/harvester/pkg/generated/controllers/kubevirt.io/v1alpha3" +) + +type vmStore struct { + types.Store + + vmCache ctlkubevirtv1alpha3.VirtualMachineCache + dataVolumes ctlcdiv1beta1.DataVolumeClient +} + +func (s *vmStore) Delete(request *types.APIRequest, schema *types.APISchema, id string) (types.APIObject, error) { + removedDisks := request.Query["removedDisks"] + vm, err := s.vmCache.Get(request.Namespace, request.Name) + if err != nil { + return types.APIObject{}, apierror.NewAPIError(validation.ServerError, fmt.Sprintf("Failed to get vm %s/%s, %v", request.Namespace, request.Name, err)) + } + + var savedDataVolumes []string + var removedDataVolume []string + if vm.Spec.Template != nil { + for _, vol := range vm.Spec.Template.Spec.Volumes { + if vol.DataVolume == nil { + continue + } + + if slice.ContainsString(removedDisks, vol.Name) { + removedDataVolume = append(removedDataVolume, vol.DataVolume.Name) + } else { + savedDataVolumes = append(savedDataVolumes, vol.DataVolume.Name) + } + } + } + + if err := s.removeVMDataVolumeOwnerRef(vm.Namespace, vm.Name, savedDataVolumes); err != nil { + return types.APIObject{}, apierror.NewAPIError(validation.ServerError, fmt.Sprintf("Failed to remove virtualMachine %s/%s from dataVolume's OwnerReferences, %v", request.Namespace, request.Name, err)) + } + + apiObj, err := s.Store.Delete(request, request.Schema, id) + if err != nil { + return types.APIObject{}, apierror.NewAPIError(validation.ServerError, fmt.Sprintf("Failed to remove vm %s/%s, %v", request.Namespace, request.Name, err)) + } + + if err := s.deleteDataVolumes(vm.Namespace, removedDataVolume); err != nil { + return types.APIObject{}, apierror.NewAPIError(validation.ServerError, fmt.Sprintf("Failed to remove dataVolume, %v", err)) + } + return apiObj, nil +} + +func (s *vmStore) removeVMDataVolumeOwnerRef(vmNamespace, vmName string, savedDataVolumes []string) error { + for _, dv := range savedDataVolumes { + dv, err := s.dataVolumes.Get(vmNamespace, dv, metav1.GetOptions{}) + if err != nil { + return err + } + + var updatedOwnerRefs []metav1.OwnerReference + for _, owner := range dv.OwnerReferences { + if owner.Name == vmName && owner.Kind == "VirtualMachine" { + continue + } + updatedOwnerRefs = append(updatedOwnerRefs, owner) + } + + if len(updatedOwnerRefs) != len(dv.OwnerReferences) { + copyDv := dv.DeepCopy() + copyDv.OwnerReferences = updatedOwnerRefs + if _, err = s.dataVolumes.Update(copyDv); err != nil { + return err + } + } + } + + return nil +} + +func (s *vmStore) deleteDataVolumes(namespace string, names []string) error { + for _, v := range names { + if err := s.dataVolumes.Delete(namespace, v, &metav1.DeleteOptions{}); err != nil && !k8sapierrors.IsNotFound(err) { + return err + } + } + return nil +}