Skip to content

Commit

Permalink
WIP: live migration to a named node
Browse files Browse the repository at this point in the history
Follow-up and derived from:
kubevirt#10712
Implements:
kubevirt/community#320

TODO: add functional tests

Signed-off-by: zhonglin6666 <zhangzl06@chinatelecom.cn>
Signed-off-by: Simone Tiraboschi <stirabos@redhat.com>
  • Loading branch information
张忠琳 authored and tiraboschi committed Sep 3, 2024
1 parent b20b06b commit c8413fd
Show file tree
Hide file tree
Showing 11 changed files with 226 additions and 8 deletions.
8 changes: 8 additions & 0 deletions api/openapi-spec/swagger.json
Original file line number Diff line number Diff line change
Expand Up @@ -14986,6 +14986,10 @@
"kind": {
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"type": "string"
},
"nodeName": {
"description": "NodeName is a request to try to migrate this VMI to a specific node. If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node, assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity and anti-affinity rules set on the VM. If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to.",
"type": "string"
}
}
},
Expand Down Expand Up @@ -16717,6 +16721,10 @@
"v1.VirtualMachineInstanceMigrationSpec": {
"type": "object",
"properties": {
"nodeName": {
"description": "NodeName is a request to try to migrate this VMI to a specific node. If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node, assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity and anti-affinity rules set on the VM. If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to.",
"type": "string"
},
"vmiName": {
"description": "The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace",
"type": "string"
Expand Down
3 changes: 2 additions & 1 deletion pkg/virt-api/rest/subresource.go
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,8 @@ func (app *SubresourceAPIApp) MigrateVMRequestHandler(request *restful.Request,
GenerateName: "kubevirt-migrate-vm-",
},
Spec: v1.VirtualMachineInstanceMigrationSpec{
VMIName: name,
VMIName: name,
NodeName: bodyStruct.NodeName,
},
}, k8smetav1.CreateOptions{DryRun: bodyStruct.DryRun})
if err != nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,18 @@ func (admitter *MigrationCreateAdmitter) Admit(ctx context.Context, ar *admissio
return webhookutils.ToAdmissionResponseError(err)
}

nodeName := migration.Spec.NodeName
if nodeName != "" {
_, err = admitter.VirtClient.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}

if nodeName == vmi.Status.NodeName {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("the same source and destination node, so there is no need to migrate"))
}
}

reviewResponse := admissionv1.AdmissionResponse{}
reviewResponse.Allowed = true
return &reviewResponse
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ package admitters
import (
"context"
"encoding/json"
"fmt"

"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo/v2"
Expand All @@ -30,10 +31,11 @@ import (
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"

"kubevirt.io/client-go/api"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/testing"

v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/api"
"kubevirt.io/client-go/kubecli"

"kubevirt.io/kubevirt/pkg/virt-api/webhooks"
Expand All @@ -46,6 +48,8 @@ var _ = Describe("Validating MigrationCreate Admitter", func() {
var migrationCreateAdmitter *MigrationCreateAdmitter
var migrationInterface *kubecli.MockVirtualMachineInstanceMigrationInterface
var mockVMIClient *kubecli.MockVirtualMachineInstanceInterface
//var nodeSource *framework.FakeControllerSource
//var nodeInformer cache.SharedIndexInformer

BeforeEach(func() {
ctrl = gomock.NewController(GinkgoT())
Expand Down Expand Up @@ -97,7 +101,6 @@ var _ = Describe("Validating MigrationCreate Admitter", func() {

BeforeEach(func() {
migrationInterface.EXPECT().List(gomock.Any(), gomock.Any()).Return(&v1.VirtualMachineInstanceMigrationList{}, nil).MaxTimes(1)

})

It("should reject invalid Migration spec on create", func() {
Expand Down Expand Up @@ -258,6 +261,153 @@ var _ = Describe("Validating MigrationCreate Admitter", func() {
Expect(resp.Result.Message).To(ContainSubstring("DisksNotLiveMigratable"))
})

It("accept Migration spec on create when destination node exists", func() {
vmiName := "testmigratevmi6"
nodeName := "destmigratenode6"
vmi := api.NewMinimalVMI(vmiName)
vmi.Status.Phase = v1.Running
vmi.Status.Conditions = []v1.VirtualMachineInstanceCondition{
{
Type: v1.VirtualMachineInstanceReady,
Status: k8sv1.ConditionTrue,
},
}

mockVMIClient.EXPECT().Get(context.Background(), vmi.Name, gomock.Any()).Return(vmi, nil)

kubeClient := fake.NewSimpleClientset()
virtClient.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes()

kubeClient.Fake.PrependReactor("get", "nodes", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return true, nil, fmt.Errorf("not found")
})

migration := v1.VirtualMachineInstanceMigration{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
},
Spec: v1.VirtualMachineInstanceMigrationSpec{
VMIName: vmiName,
NodeName: nodeName,
},
}
migrationBytes, _ := json.Marshal(&migration)

ar := &admissionv1.AdmissionReview{
Request: &admissionv1.AdmissionRequest{
Resource: webhooks.MigrationGroupVersionResource,
Object: runtime.RawExtension{
Raw: migrationBytes,
},
},
}

resp := migrationCreateAdmitter.Admit(context.Background(), ar)
Expect(resp.Allowed).To(BeFalse())
Expect(resp.Result.Message).To(ContainSubstring("not found"))
})

It("should reject Migration spec on create when the source and destination are the same node", func() {
vmiName := "testmigratevmi7"
nodeName := "destmigratenode7"
vmi := api.NewMinimalVMI(vmiName)
vmi.Status.Phase = v1.Running
vmi.Status.Conditions = []v1.VirtualMachineInstanceCondition{
{
Type: v1.VirtualMachineInstanceReady,
Status: k8sv1.ConditionTrue,
},
}
vmi.Status.NodeName = nodeName

mockVMIClient.EXPECT().Get(context.Background(), vmi.Name, gomock.Any()).Return(vmi, nil)

kubeClient := fake.NewSimpleClientset()
virtClient.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes()

kubeClient.Fake.PrependReactor("get", "nodes", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return true, &k8sv1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
}, nil
})

migration := v1.VirtualMachineInstanceMigration{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
},
Spec: v1.VirtualMachineInstanceMigrationSpec{
VMIName: vmiName,
NodeName: nodeName,
},
}
migrationBytes, _ := json.Marshal(&migration)

ar := &admissionv1.AdmissionReview{
Request: &admissionv1.AdmissionRequest{
Resource: webhooks.MigrationGroupVersionResource,
Object: runtime.RawExtension{
Raw: migrationBytes,
},
},
}

resp := migrationCreateAdmitter.Admit(context.Background(), ar)
Expect(resp.Allowed).To(BeFalse())
Expect(resp.Result.Message).To(ContainSubstring("the same source and destination node"))
})

It("should accept Migration spec on create when the source and destination are not the same node", func() {
vmiName := "testmigratevmi7"
nodeName := "destmigratenode7"
vmi := api.NewMinimalVMI(vmiName)
vmi.Status.Phase = v1.Running
vmi.Status.Conditions = []v1.VirtualMachineInstanceCondition{
{
Type: v1.VirtualMachineInstanceReady,
Status: k8sv1.ConditionTrue,
},
}
vmi.Status.NodeName = "sourcemigratenode"

mockVMIClient.EXPECT().Get(context.Background(), vmi.Name, gomock.Any()).Return(vmi, nil)

kubeClient := fake.NewSimpleClientset()
virtClient.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes()

kubeClient.Fake.PrependReactor("get", "nodes", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return true, &k8sv1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
}, nil
})

migration := v1.VirtualMachineInstanceMigration{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
},
Spec: v1.VirtualMachineInstanceMigrationSpec{
VMIName: vmiName,
NodeName: nodeName,
},
}
migrationBytes, _ := json.Marshal(&migration)

ar := &admissionv1.AdmissionReview{
Request: &admissionv1.AdmissionRequest{
Resource: webhooks.MigrationGroupVersionResource,
Object: runtime.RawExtension{
Raw: migrationBytes,
},
},
}

resp := migrationCreateAdmitter.Admit(context.Background(), ar)
Expect(resp.Allowed).To(BeTrue())
})

DescribeTable("should reject documents containing unknown or missing fields for", func(data string, validationResult string, gvr metav1.GroupVersionResource, review func(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse) {
input := map[string]interface{}{}
json.Unmarshal([]byte(data), &input)
Expand Down
1 change: 1 addition & 0 deletions pkg/virt-controller/watch/migration.go
Original file line number Diff line number Diff line change
Expand Up @@ -678,6 +678,7 @@ func (c *MigrationController) createTargetPod(migration *virtv1.VirtualMachineIn

templatePod.ObjectMeta.Labels[virtv1.MigrationJobLabel] = string(migration.UID)
templatePod.ObjectMeta.Annotations[virtv1.MigrationJobNameAnnotation] = migration.Name
templatePod.Spec.NodeName = migration.Spec.NodeName

// If cpu model is "host model" allow migration only to nodes that supports this cpu model
if cpu := vmi.Spec.Domain.CPU; cpu != nil && cpu.Model == virtv1.CPUModeHostModel {
Expand Down
2 changes: 2 additions & 0 deletions pkg/virt-operator/resource/generate/components/crds.go
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,8 @@ func NewVirtualMachineInstanceMigrationCrd() (*extv1.CustomResourceDefinition, e
Description: "The current phase of VM instance migration"},
{Name: "VMI", Type: "string", JSONPath: ".spec.vmiName",
Description: "The name of the VMI to perform the migration on"},
{Name: "NodeName", Type: "string", JSONPath: ".spec.nodeName",
Description: "The name of the the destination node to perform the migration on"},
}, &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13774,6 +13774,14 @@ var CRDsValidation map[string]string = map[string]string{
type: object
spec:
properties:
nodeName:
description: |-
NodeName is a request to try to migrate this VMI to a specific node.
If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node,
assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity
and anti-affinity rules set on the VM.
If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to.
type: string
vmiName:
description: The name of the VMI to perform the migration on. VMI must exist
in the migration objects namespace
Expand Down
7 changes: 6 additions & 1 deletion pkg/virtctl/vm/migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,10 @@ import (

const COMMAND_MIGRATE = "migrate"

var nodeName string

func NewMigrateCommand(clientConfig clientcmd.ClientConfig) *cobra.Command {

cmd := &cobra.Command{
Use: "migrate (VM)",
Short: "Migrate a virtual machine.",
Expand All @@ -43,6 +46,8 @@ func NewMigrateCommand(clientConfig clientcmd.ClientConfig) *cobra.Command {
return c.migrateRun(args)
},
}

cmd.Flags().StringVar(&nodeName, "nodeName", nodeName, "--nodeName=<nodeName>: Flag to migrate this VM to a specific node regardless of its affinity rules. If it's omitted, recommended, the scheduler becomes responsible for finding the best Node to migrate the VM to.")
cmd.Flags().BoolVar(&dryRun, dryRunArg, false, dryRunCommandUsage)
cmd.SetUsageTemplate(templates.UsageTemplate())
return cmd
Expand All @@ -58,7 +63,7 @@ func (o *Command) migrateRun(args []string) error {

dryRunOption := setDryRunOption(dryRun)

err = virtClient.VirtualMachine(namespace).Migrate(context.Background(), vmiName, &v1.MigrateOptions{DryRun: dryRunOption})
err = virtClient.VirtualMachine(namespace).Migrate(context.Background(), vmiName, &v1.MigrateOptions{DryRun: dryRunOption, NodeName: nodeName})
if err != nil {
return fmt.Errorf("Error migrating VirtualMachine %v", err)
}
Expand Down
15 changes: 15 additions & 0 deletions staging/src/kubevirt.io/api/core/v1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -1361,6 +1361,14 @@ type VirtualMachineInstanceMigrationList struct {
type VirtualMachineInstanceMigrationSpec struct {
// The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace
VMIName string `json:"vmiName,omitempty" valid:"required"`

// NodeName is a request to try to migrate this VMI to a specific node.
// If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node,
// assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity
// and anti-affinity rules set on the VM.
// If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to.
// +optional
NodeName string `json:"nodeName,omitempty"`
}

// VirtualMachineInstanceMigrationPhaseTransitionTimestamp gives a timestamp in relation to when a phase is set on a vmi
Expand Down Expand Up @@ -2236,6 +2244,13 @@ type MigrateOptions struct {
// +optional
// +listType=atomic
DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
// NodeName is a request to try to migrate this VMI to a specific node.
// If it is non-empty, the migration controller simply try to configure the target VMI pod to be started onto that node,
// assuming that it fits resource, limits and other node placement constraints; it will override nodeSelector and affinity
// and anti-affinity rules set on the VM.
// If it is empty, recommended, the scheduler becomes responsible for finding the best Node to migrate the VMI to.
// +optional
NodeName string `json:"nodeName,omitempty"`
}

// VirtualMachineInstanceGuestAgentInfo represents information from the installed guest agent
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 14 additions & 0 deletions staging/src/kubevirt.io/client-go/api/openapi_generated.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit c8413fd

Please sign in to comment.