diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 71b01850fabb..e4ac118a06b6 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -14986,6 +14986,10 @@ "kind": { "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "type": "string" + }, + "nodeName": { + "description": "NodeName is a request to try to migrate this VMI to a specific node. If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node, assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity and anti-affinity rules set on the VM. If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to.", + "type": "string" } } }, @@ -16717,6 +16721,10 @@ "v1.VirtualMachineInstanceMigrationSpec": { "type": "object", "properties": { + "nodeName": { + "description": "NodeName is a request to try to migrate this VMI to a specific node. If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node, assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity and anti-affinity rules set on the VM. If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to.", + "type": "string" + }, "vmiName": { "description": "The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace", "type": "string" diff --git a/pkg/virt-api/rest/subresource.go b/pkg/virt-api/rest/subresource.go index b5d62f5af581..e35587effc09 100644 --- a/pkg/virt-api/rest/subresource.go +++ b/pkg/virt-api/rest/subresource.go @@ -303,7 +303,8 @@ func (app *SubresourceAPIApp) MigrateVMRequestHandler(request *restful.Request, GenerateName: "kubevirt-migrate-vm-", }, Spec: v1.VirtualMachineInstanceMigrationSpec{ - VMIName: name, + VMIName: name, + NodeName: bodyStruct.NodeName, }, }, k8smetav1.CreateOptions{DryRun: bodyStruct.DryRun}) if err != nil { diff --git a/pkg/virt-api/webhooks/validating-webhook/admitters/migration-create-admitter.go b/pkg/virt-api/webhooks/validating-webhook/admitters/migration-create-admitter.go index 2f62bed30485..76cb050f8888 100644 --- a/pkg/virt-api/webhooks/validating-webhook/admitters/migration-create-admitter.go +++ b/pkg/virt-api/webhooks/validating-webhook/admitters/migration-create-admitter.go @@ -116,6 +116,18 @@ func (admitter *MigrationCreateAdmitter) Admit(ctx context.Context, ar *admissio return webhookutils.ToAdmissionResponseError(err) } + nodeName := migration.Spec.NodeName + if nodeName != "" { + _, err = admitter.VirtClient.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) + if err != nil { + return webhookutils.ToAdmissionResponseError(err) + } + + if nodeName == vmi.Status.NodeName { + return webhookutils.ToAdmissionResponseError(fmt.Errorf("the same source and destination node, so there is no need to migrate")) + } + } + reviewResponse := admissionv1.AdmissionResponse{} reviewResponse.Allowed = true return &reviewResponse diff --git a/pkg/virt-api/webhooks/validating-webhook/admitters/migration-create-admitter_test.go b/pkg/virt-api/webhooks/validating-webhook/admitters/migration-create-admitter_test.go index be00209d0a75..13b52655126c 100644 --- a/pkg/virt-api/webhooks/validating-webhook/admitters/migration-create-admitter_test.go +++ b/pkg/virt-api/webhooks/validating-webhook/admitters/migration-create-admitter_test.go @@ -22,6 +22,7 @@ package admitters import ( "context" "encoding/json" + "fmt" "github.com/golang/mock/gomock" . "github.com/onsi/ginkgo/v2" @@ -30,10 +31,11 @@ import ( k8sv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - - "kubevirt.io/client-go/api" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/testing" v1 "kubevirt.io/api/core/v1" + "kubevirt.io/client-go/api" "kubevirt.io/client-go/kubecli" "kubevirt.io/kubevirt/pkg/virt-api/webhooks" @@ -46,6 +48,8 @@ var _ = Describe("Validating MigrationCreate Admitter", func() { var migrationCreateAdmitter *MigrationCreateAdmitter var migrationInterface *kubecli.MockVirtualMachineInstanceMigrationInterface var mockVMIClient *kubecli.MockVirtualMachineInstanceInterface + //var nodeSource *framework.FakeControllerSource + //var nodeInformer cache.SharedIndexInformer BeforeEach(func() { ctrl = gomock.NewController(GinkgoT()) @@ -97,7 +101,6 @@ var _ = Describe("Validating MigrationCreate Admitter", func() { BeforeEach(func() { migrationInterface.EXPECT().List(gomock.Any(), gomock.Any()).Return(&v1.VirtualMachineInstanceMigrationList{}, nil).MaxTimes(1) - }) It("should reject invalid Migration spec on create", func() { @@ -258,6 +261,153 @@ var _ = Describe("Validating MigrationCreate Admitter", func() { Expect(resp.Result.Message).To(ContainSubstring("DisksNotLiveMigratable")) }) + It("accept Migration spec on create when destination node exists", func() { + vmiName := "testmigratevmi6" + nodeName := "destmigratenode6" + vmi := api.NewMinimalVMI(vmiName) + vmi.Status.Phase = v1.Running + vmi.Status.Conditions = []v1.VirtualMachineInstanceCondition{ + { + Type: v1.VirtualMachineInstanceReady, + Status: k8sv1.ConditionTrue, + }, + } + + mockVMIClient.EXPECT().Get(context.Background(), vmi.Name, gomock.Any()).Return(vmi, nil) + + kubeClient := fake.NewSimpleClientset() + virtClient.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes() + + kubeClient.Fake.PrependReactor("get", "nodes", func(action testing.Action) (handled bool, obj runtime.Object, err error) { + return true, nil, fmt.Errorf("not found") + }) + + migration := v1.VirtualMachineInstanceMigration{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: v1.VirtualMachineInstanceMigrationSpec{ + VMIName: vmiName, + NodeName: nodeName, + }, + } + migrationBytes, _ := json.Marshal(&migration) + + ar := &admissionv1.AdmissionReview{ + Request: &admissionv1.AdmissionRequest{ + Resource: webhooks.MigrationGroupVersionResource, + Object: runtime.RawExtension{ + Raw: migrationBytes, + }, + }, + } + + resp := migrationCreateAdmitter.Admit(context.Background(), ar) + Expect(resp.Allowed).To(BeFalse()) + Expect(resp.Result.Message).To(ContainSubstring("not found")) + }) + + It("should reject Migration spec on create when the source and destination are the same node", func() { + vmiName := "testmigratevmi7" + nodeName := "destmigratenode7" + vmi := api.NewMinimalVMI(vmiName) + vmi.Status.Phase = v1.Running + vmi.Status.Conditions = []v1.VirtualMachineInstanceCondition{ + { + Type: v1.VirtualMachineInstanceReady, + Status: k8sv1.ConditionTrue, + }, + } + vmi.Status.NodeName = nodeName + + mockVMIClient.EXPECT().Get(context.Background(), vmi.Name, gomock.Any()).Return(vmi, nil) + + kubeClient := fake.NewSimpleClientset() + virtClient.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes() + + kubeClient.Fake.PrependReactor("get", "nodes", func(action testing.Action) (handled bool, obj runtime.Object, err error) { + return true, &k8sv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + }, nil + }) + + migration := v1.VirtualMachineInstanceMigration{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: v1.VirtualMachineInstanceMigrationSpec{ + VMIName: vmiName, + NodeName: nodeName, + }, + } + migrationBytes, _ := json.Marshal(&migration) + + ar := &admissionv1.AdmissionReview{ + Request: &admissionv1.AdmissionRequest{ + Resource: webhooks.MigrationGroupVersionResource, + Object: runtime.RawExtension{ + Raw: migrationBytes, + }, + }, + } + + resp := migrationCreateAdmitter.Admit(context.Background(), ar) + Expect(resp.Allowed).To(BeFalse()) + Expect(resp.Result.Message).To(ContainSubstring("the same source and destination node")) + }) + + It("should accept Migration spec on create when the source and destination are not the same node", func() { + vmiName := "testmigratevmi7" + nodeName := "destmigratenode7" + vmi := api.NewMinimalVMI(vmiName) + vmi.Status.Phase = v1.Running + vmi.Status.Conditions = []v1.VirtualMachineInstanceCondition{ + { + Type: v1.VirtualMachineInstanceReady, + Status: k8sv1.ConditionTrue, + }, + } + vmi.Status.NodeName = "sourcemigratenode" + + mockVMIClient.EXPECT().Get(context.Background(), vmi.Name, gomock.Any()).Return(vmi, nil) + + kubeClient := fake.NewSimpleClientset() + virtClient.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes() + + kubeClient.Fake.PrependReactor("get", "nodes", func(action testing.Action) (handled bool, obj runtime.Object, err error) { + return true, &k8sv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + }, nil + }) + + migration := v1.VirtualMachineInstanceMigration{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: v1.VirtualMachineInstanceMigrationSpec{ + VMIName: vmiName, + NodeName: nodeName, + }, + } + migrationBytes, _ := json.Marshal(&migration) + + ar := &admissionv1.AdmissionReview{ + Request: &admissionv1.AdmissionRequest{ + Resource: webhooks.MigrationGroupVersionResource, + Object: runtime.RawExtension{ + Raw: migrationBytes, + }, + }, + } + + resp := migrationCreateAdmitter.Admit(context.Background(), ar) + Expect(resp.Allowed).To(BeTrue()) + }) + DescribeTable("should reject documents containing unknown or missing fields for", func(data string, validationResult string, gvr metav1.GroupVersionResource, review func(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse) { input := map[string]interface{}{} json.Unmarshal([]byte(data), &input) diff --git a/pkg/virt-controller/watch/migration.go b/pkg/virt-controller/watch/migration.go index 28d6636b364d..51406cfc4028 100644 --- a/pkg/virt-controller/watch/migration.go +++ b/pkg/virt-controller/watch/migration.go @@ -678,6 +678,7 @@ func (c *MigrationController) createTargetPod(migration *virtv1.VirtualMachineIn templatePod.ObjectMeta.Labels[virtv1.MigrationJobLabel] = string(migration.UID) templatePod.ObjectMeta.Annotations[virtv1.MigrationJobNameAnnotation] = migration.Name + templatePod.Spec.NodeName = migration.Spec.NodeName // If cpu model is "host model" allow migration only to nodes that supports this cpu model if cpu := vmi.Spec.Domain.CPU; cpu != nil && cpu.Model == virtv1.CPUModeHostModel { diff --git a/pkg/virt-operator/resource/generate/components/crds.go b/pkg/virt-operator/resource/generate/components/crds.go index e9b4ec271472..e38455c4f0f7 100644 --- a/pkg/virt-operator/resource/generate/components/crds.go +++ b/pkg/virt-operator/resource/generate/components/crds.go @@ -330,6 +330,8 @@ func NewVirtualMachineInstanceMigrationCrd() (*extv1.CustomResourceDefinition, e Description: "The current phase of VM instance migration"}, {Name: "VMI", Type: "string", JSONPath: ".spec.vmiName", Description: "The name of the VMI to perform the migration on"}, + {Name: "NodeName", Type: "string", JSONPath: ".spec.nodeName", + Description: "The name of the the destination node to perform the migration on"}, }, &extv1.CustomResourceSubresources{ Status: &extv1.CustomResourceSubresourceStatus{}, }) diff --git a/pkg/virt-operator/resource/generate/components/validations_generated.go b/pkg/virt-operator/resource/generate/components/validations_generated.go index b946c34ef823..dfc1bb537054 100644 --- a/pkg/virt-operator/resource/generate/components/validations_generated.go +++ b/pkg/virt-operator/resource/generate/components/validations_generated.go @@ -13774,6 +13774,14 @@ var CRDsValidation map[string]string = map[string]string{ type: object spec: properties: + nodeName: + description: |- + NodeName is a request to try to migrate this VMI to a specific node. + If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node, + assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity + and anti-affinity rules set on the VM. + If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to. + type: string vmiName: description: The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace diff --git a/pkg/virtctl/vm/migrate.go b/pkg/virtctl/vm/migrate.go index 9bc34c1bab72..2d3399295f3a 100644 --- a/pkg/virtctl/vm/migrate.go +++ b/pkg/virtctl/vm/migrate.go @@ -32,7 +32,10 @@ import ( const COMMAND_MIGRATE = "migrate" +var nodeName string + func NewMigrateCommand(clientConfig clientcmd.ClientConfig) *cobra.Command { + cmd := &cobra.Command{ Use: "migrate (VM)", Short: "Migrate a virtual machine.", @@ -43,6 +46,8 @@ func NewMigrateCommand(clientConfig clientcmd.ClientConfig) *cobra.Command { return c.migrateRun(args) }, } + + cmd.Flags().StringVar(&nodeName, "nodeName", nodeName, "--nodeName=: Flag to migrate this VM to a specific node regardless of its affinity rules. If it's omitted, recommended, the scheduler becomes responsible for finding the best Node to migrate the VM to.") cmd.Flags().BoolVar(&dryRun, dryRunArg, false, dryRunCommandUsage) cmd.SetUsageTemplate(templates.UsageTemplate()) return cmd @@ -58,7 +63,7 @@ func (o *Command) migrateRun(args []string) error { dryRunOption := setDryRunOption(dryRun) - err = virtClient.VirtualMachine(namespace).Migrate(context.Background(), vmiName, &v1.MigrateOptions{DryRun: dryRunOption}) + err = virtClient.VirtualMachine(namespace).Migrate(context.Background(), vmiName, &v1.MigrateOptions{DryRun: dryRunOption, NodeName: nodeName}) if err != nil { return fmt.Errorf("Error migrating VirtualMachine %v", err) } diff --git a/staging/src/kubevirt.io/api/core/v1/types.go b/staging/src/kubevirt.io/api/core/v1/types.go index 1d8bdcc56744..e9521fcd5700 100644 --- a/staging/src/kubevirt.io/api/core/v1/types.go +++ b/staging/src/kubevirt.io/api/core/v1/types.go @@ -1361,6 +1361,14 @@ type VirtualMachineInstanceMigrationList struct { type VirtualMachineInstanceMigrationSpec struct { // The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace VMIName string `json:"vmiName,omitempty" valid:"required"` + + // NodeName is a request to try to migrate this VMI to a specific node. + // If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node, + // assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity + // and anti-affinity rules set on the VM. + // If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to. + // +optional + NodeName string `json:"nodeName,omitempty"` } // VirtualMachineInstanceMigrationPhaseTransitionTimestamp gives a timestamp in relation to when a phase is set on a vmi @@ -2236,6 +2244,13 @@ type MigrateOptions struct { // +optional // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` + // NodeName is a request to try to migrate this VMI to a specific node. + // If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node, + // assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity + // and anti-affinity rules set on the VM. + // If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to. + // +optional + NodeName string `json:"nodeName,omitempty"` } // VirtualMachineInstanceGuestAgentInfo represents information from the installed guest agent diff --git a/staging/src/kubevirt.io/api/core/v1/types_swagger_generated.go b/staging/src/kubevirt.io/api/core/v1/types_swagger_generated.go index b40fe118f634..8e33ce0b7ded 100644 --- a/staging/src/kubevirt.io/api/core/v1/types_swagger_generated.go +++ b/staging/src/kubevirt.io/api/core/v1/types_swagger_generated.go @@ -313,7 +313,8 @@ func (VirtualMachineInstanceMigrationList) SwaggerDoc() map[string]string { func (VirtualMachineInstanceMigrationSpec) SwaggerDoc() map[string]string { return map[string]string{ - "vmiName": "The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace", + "vmiName": "The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace", + "nodeName": "NodeName is a request to try to migrate this VMI to a specific node.\nIf it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node,\nassuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity\nand anti-affinity rules set on the VM.\nIf it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to.\n+optional", } } @@ -608,8 +609,9 @@ func (StopOptions) SwaggerDoc() map[string]string { func (MigrateOptions) SwaggerDoc() map[string]string { return map[string]string{ - "": "MigrateOptions may be provided on migrate request.", - "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + "": "MigrateOptions may be provided on migrate request.", + "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + "nodeName": "NodeName is a request to try to migrate this VMI to a specific node.\nIf it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node,\nassuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity\nand anti-affinity rules set on the VM.\nIf it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to.\n+optional", } } diff --git a/staging/src/kubevirt.io/client-go/api/openapi_generated.go b/staging/src/kubevirt.io/client-go/api/openapi_generated.go index ffffc501aeaa..1d981dd8126b 100644 --- a/staging/src/kubevirt.io/client-go/api/openapi_generated.go +++ b/staging/src/kubevirt.io/client-go/api/openapi_generated.go @@ -21511,6 +21511,13 @@ func schema_kubevirtio_api_core_v1_MigrateOptions(ref common.ReferenceCallback) }, }, }, + "nodeName": { + SchemaProps: spec.SchemaProps{ + Description: "NodeName is a request to try to migrate this VMI to a specific node. If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node, assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity and anti-affinity rules set on the VM. If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to.", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -24785,6 +24792,13 @@ func schema_kubevirtio_api_core_v1_VirtualMachineInstanceMigrationSpec(ref commo Format: "", }, }, + "nodeName": { + SchemaProps: spec.SchemaProps{ + Description: "NodeName is a request to try to migrate this VMI to a specific node. If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node, assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity and anti-affinity rules set on the VM. If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to.", + Type: []string{"string"}, + Format: "", + }, + }, }, }, },