Skip to content

Commit

Permalink
Merge pull request kubevirt#2666 from slintes/backport-ocs-tests
Browse files Browse the repository at this point in the history
[release-0.20] Backport of kubevirt#2617 and kubevirt#2636: VM tests with OCS disks and Block DV fix
  • Loading branch information
slintes authored Sep 5, 2019
2 parents 8279f01 + e2bf505 commit 5b0ba3b
Show file tree
Hide file tree
Showing 21 changed files with 575 additions and 344 deletions.
2 changes: 1 addition & 1 deletion WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ http_file(
name = "virtio_win_image",
sha256 = "7bf7f53e30c69a360f89abb3d2cc19cc978f533766b1b2270c2d8344edf9b3ef",
urls = [
"https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/latest-virtio/virtio-win.iso",
"https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-0.1.171-1/virtio-win-0.1.171.iso",
],
)

Expand Down
2 changes: 1 addition & 1 deletion cluster-up-sha.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
851d37c4db3aac525847656472b5f23a5c90a79b
3a398e0bd5bf61d9f42968a75dfc4066d0b18820
45 changes: 45 additions & 0 deletions cluster-up/cluster/k8s-1.15.1/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Kubernetes 1.15.1 in ephemeral containers

Provides a pre-deployed Kubernetes with version 1.15.1 purely in docker
containers with qemu. The provided VMs are completely ephemeral and are
recreated on every cluster restart. The KubeVirt containers are built on the
local machine and are the pushed to a registry which is exposed at
`localhost:5000`.

## Bringing the cluster up

```bash
export KUBEVIRT_PROVIDER=k8s-1.15.1
export KUBEVIRT_NUM_NODES=2 # master + one node
make cluster-up
```

The cluster can be accessed as usual:

```bash
$ cluster/kubectl.sh get nodes
NAME STATUS ROLES AGE VERSION
node01 NotReady master 31s v1.15.1
node02 NotReady <none> 5s v1.15.1
```

## Bringing the cluster down

```bash
export KUBEVIRT_PROVIDER=k8s-1.15.1
make cluster-down
```

This destroys the whole cluster. Recreating the cluster is fast, since k8s is
already pre-deployed. The only state which is kept is the state of the local
docker registry.

## Destroying the docker registry state

The docker registry survives a `make cluster-down`. It's state is stored in a
docker volume called `kubevirt_registry`. If the volume gets too big or the
volume contains corrupt data, it can be deleted with

```bash
docker volume rm kubevirt_registry
```
24 changes: 24 additions & 0 deletions cluster-up/cluster/k8s-1.15.1/provider.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#!/usr/bin/env bash

set -e

image="k8s-1.15.1@sha256:14d7b1806f24e527167d2913deafd910ea46e69b830bf0b094dde35ba961b159"

source ${KUBEVIRTCI_PATH}/cluster/ephemeral-provider-common.sh

function up() {
${_cli} run $(_add_common_params)

# Copy k8s config and kubectl
${_cli} scp --prefix $provider_prefix /usr/bin/kubectl - >${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl
chmod u+x ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl
${_cli} scp --prefix $provider_prefix /etc/kubernetes/admin.conf - >${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig

# Set server and disable tls check
export KUBECONFIG=${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig
${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl config set-cluster kubernetes --server=https://$(_main_ip):$(_port k8s)
${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl config set-cluster kubernetes --insecure-skip-tls-verify=true

# Make sure that local config is correct
prepare_config
}
4 changes: 2 additions & 2 deletions cluster-up/cluster/kind/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,12 @@ function _wait_kind_up {
sleep 10
done
echo "Waiting for dns to be ready ..."
kubectl wait -n kube-system --timeout=12m --for=condition=Ready -l k8s-app=kube-dns pods
_kubectl wait -n kube-system --timeout=12m --for=condition=Ready -l k8s-app=kube-dns pods
}

function _wait_containers_ready {
echo "Waiting for all containers to become ready ..."
kubectl wait --for=condition=Ready pod --all -n kube-system --timeout 12m
_kubectl wait --for=condition=Ready pod --all -n kube-system --timeout 12m
}

function _fetch_kind() {
Expand Down
2 changes: 1 addition & 1 deletion cluster-up/version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
7ba5a0fca723a32aca204f778f6776811b4d94fa
d749b29f7a2fa6cb7b46c3590040f2a23e5704e0
2 changes: 1 addition & 1 deletion hack/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ image_pull_policy=${IMAGE_PULL_POLICY:-IfNotPresent}
verbosity=${VERBOSITY:-2}
package_name=${PACKAGE_NAME:-kubevirt-dev}
push_log_file=${PUSH_LOG_FILE:-_out/imagePush.log}
kubevirtci_git_hash="7ba5a0fca723a32aca204f778f6776811b4d94fa"
kubevirtci_git_hash="d749b29f7a2fa6cb7b46c3590040f2a23e5704e0"

# try to derive csv_version from docker tag. But it must start with x.y.z, without leading v
default_csv_version="${docker_tag/latest/0.0.0}"
Expand Down
15 changes: 15 additions & 0 deletions images/cdi-http-import-server/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,20 @@ pkg_tar(
package_dir = "usr/share/nginx/html/images",
)

genrule(
name = "fedora-img",
srcs = ["@fedora_image//file"],
outs = ["usr/share/nginx/html/images/fedora.img"],
cmd = "mkdir disk && cat $(location @fedora_image//file) > $@",
)

pkg_tar(
name = "fedora-img-tar",
srcs = [":fedora-img"],
mode = "644",
package_dir = "usr/share/nginx/html/images",
)

pkg_tar(
name = "nginx-config-tar",
srcs = [
Expand All @@ -71,6 +85,7 @@ container_image(
tars = [
":alpine-tar",
":cirros-img-tar",
":fedora-img-tar",
":nginx-config-tar",
],
visibility = ["//visibility:public"],
Expand Down
2 changes: 2 additions & 0 deletions images/cdi-http-import-server/entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,14 @@ trap 'echo "Graceful exit"; exit 0' SIGINT SIGQUIT SIGTERM

ALPINE_IMAGE_PATH=/usr/share/nginx/html/images/alpine.iso
CIRROS_IMAGE_PATH=/usr/share/nginx/html/images/cirros.img
FEDORA_IMAGE_PATH=/usr/share/nginx/html/images/fedora.img
IMAGE_PATH=/images
IMAGE_NAME=${IMAGE_NAME:-cirros}

case "$IMAGE_NAME" in
cirros) CONVERT_PATH=$CIRROS_IMAGE_PATH ;;
alpine) CONVERT_PATH=$ALPINE_IMAGE_PATH ;;
fedora-cloud) CONVERT_PATH=$FEDORA_IMAGE_PATH ;;
*)
echo "failed to find image $IMAGE_NAME"
;;
Expand Down
15 changes: 6 additions & 9 deletions pkg/util/types/pvc.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,22 +59,19 @@ func isPVCBlock(pvc *k8sv1.PersistentVolumeClaim) bool {
return pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == k8sv1.PersistentVolumeBlock
}

func IsPVCShared(pvc *k8sv1.PersistentVolumeClaim) (isShared bool) {
func IsPVCShared(pvc *k8sv1.PersistentVolumeClaim) bool {
for _, accessMode := range pvc.Spec.AccessModes {
if accessMode == k8sv1.ReadWriteMany {
isShared = true
break
return true
}
}
return
return false
}

func IsSharedPVCFromClient(client kubecli.KubevirtClient, namespace string, claimName string) (pvc *k8sv1.PersistentVolumeClaim, isShared bool, err error) {
pvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, v1.GetOptions{})
if err != nil {
return nil, false, err
if err == nil {
isShared = IsPVCShared(pvc)
}

isShared = IsPVCShared(pvc)
return pvc, isShared, nil
return
}
23 changes: 21 additions & 2 deletions pkg/virt-controller/services/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -442,12 +442,31 @@ func (t *templateService) RenderLaunchManifest(vmi *v1.VirtualMachineInstance) (
})
}
if volume.DataVolume != nil {
volumeMounts = append(volumeMounts, volumeMount)
logger := log.DefaultLogger()
claimName := volume.DataVolume.Name
_, exists, isBlock, err := types.IsPVCBlockFromStore(t.persistentVolumeClaimStore, namespace, claimName)
if err != nil {
logger.Errorf("error getting PVC associated with DataVolume: %v", claimName)
return nil, err
} else if !exists {
logger.Errorf("didn't find PVC associated with DataVolume: %v", claimName)
return nil, PvcNotFoundError(fmt.Errorf("didn't find PVC associated with DataVolume: %v", claimName))
} else if isBlock {
devicePath := filepath.Join(string(filepath.Separator), "dev", volume.Name)
device := k8sv1.VolumeDevice{
Name: volume.Name,
DevicePath: devicePath,
}
volumeDevices = append(volumeDevices, device)
} else {
volumeMounts = append(volumeMounts, volumeMount)
}

volumes = append(volumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: volume.DataVolume.Name,
ClaimName: claimName,
},
},
})
Expand Down
14 changes: 14 additions & 0 deletions pkg/virt-controller/watch/vmi_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,11 +142,13 @@ var _ = Describe("VirtualMachineInstance watcher", func() {
syncCaches := func(stop chan struct{}) {
go vmiInformer.Run(stop)
go podInformer.Run(stop)
go pvcInformer.Run(stop)

go dataVolumeInformer.Run(stop)
Expect(cache.WaitForCacheSync(stop,
vmiInformer.HasSynced,
podInformer.HasSynced,
pvcInformer.HasSynced,
dataVolumeInformer.HasSynced)).To(BeTrue())
}

Expand Down Expand Up @@ -217,6 +219,18 @@ var _ = Describe("VirtualMachineInstance watcher", func() {
},
})

dvPVC := &k8sv1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
Namespace: vmi.Namespace,
Name: "test1"},
}
// we are mocking a successful DataVolume. we expect the PVC to
// be available in the store if DV is successful.
pvcInformer.GetIndexer().Add(dvPVC)

dataVolume := &cdiv1.DataVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "test1",
Expand Down
11 changes: 3 additions & 8 deletions pkg/virt-handler/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -1248,18 +1248,13 @@ func (d *VirtualMachineController) checkVolumesForMigration(vmi *v1.VirtualMachi
} else if err != nil {
return blockMigrate, err
}
blockMigrate = blockMigrate || !shared
if !shared {
return blockMigrate, fmt.Errorf("cannot migrate VMI with non-shared PVCs")
return true, fmt.Errorf("cannot migrate VMI with non-shared PVCs")
}
} else if volSrc.HostDisk != nil {
shared := false
if volSrc.HostDisk.Shared != nil {
shared = *volSrc.HostDisk.Shared
}
blockMigrate = blockMigrate || !shared
shared := volSrc.HostDisk.Shared != nil && *volSrc.HostDisk.Shared
if !shared {
return blockMigrate, fmt.Errorf("cannot migrate VMI with non-shared HostDisk")
return true, fmt.Errorf("cannot migrate VMI with non-shared HostDisk")
}
} else {
blockMigrate = true
Expand Down
10 changes: 9 additions & 1 deletion pkg/virt-launcher/virtwrap/api/converter.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ type ConverterContext struct {
VirtualMachine *v1.VirtualMachineInstance
CPUSet []int
IsBlockPVC map[string]bool
IsBlockDV map[string]bool
DiskType map[string]*containerdisk.DiskInfo
SRIOVDevices map[string][]string
}
Expand Down Expand Up @@ -241,7 +242,7 @@ func Convert_v1_Volume_To_api_Disk(source *v1.Volume, disk *Disk, c *ConverterCo
}

if source.DataVolume != nil {
return Convert_v1_FilesystemVolumeSource_To_api_Disk(source.Name, disk, c)
return Convert_v1_DataVolume_To_api_Disk(source.Name, disk, c)
}

if source.Ephemeral != nil {
Expand Down Expand Up @@ -298,6 +299,13 @@ func Convert_v1_PersistentVolumeClaim_To_api_Disk(name string, disk *Disk, c *Co
return Convert_v1_FilesystemVolumeSource_To_api_Disk(name, disk, c)
}

func Convert_v1_DataVolume_To_api_Disk(name string, disk *Disk, c *ConverterContext) error {
if c.IsBlockDV[name] {
return Convert_v1_BlockVolumeSource_To_api_Disk(name, disk, c)
}
return Convert_v1_FilesystemVolumeSource_To_api_Disk(name, disk, c)
}

// Convert_v1_FilesystemVolumeSource_To_api_Disk takes a FS source and builds the KVM Disk representation
func Convert_v1_FilesystemVolumeSource_To_api_Disk(volumeName string, disk *Disk, c *ConverterContext) error {
disk.Type = "file"
Expand Down
23 changes: 22 additions & 1 deletion pkg/virt-launcher/virtwrap/api/converter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,10 @@ var _ = Describe("Converter", func() {
Name: "pvc_block_test",
Cache: "writethrough",
},
{
Name: "dv_block_test",
Cache: "writethrough",
},
{
Name: "serviceaccount_test",
},
Expand Down Expand Up @@ -342,6 +346,14 @@ var _ = Describe("Converter", func() {
},
},
},
{
Name: "dv_block_test",
VolumeSource: v1.VolumeSource{
DataVolume: &v1.DataVolumeSource{
Name: "dv_block_test",
},
},
},
{
Name: "serviceaccount_test",
VolumeSource: v1.VolumeSource{
Expand Down Expand Up @@ -471,9 +483,15 @@ var _ = Describe("Converter", func() {
<driver cache="writethrough" name="qemu" type="raw" iothread="1"></driver>
<alias name="ua-pvc_block_test"></alias>
</disk>
<disk device="disk" type="block">
<source dev="/dev/dv_block_test"></source>
<target bus="sata" dev="sdh"></target>
<driver cache="writethrough" name="qemu" type="raw" iothread="1"></driver>
<alias name="ua-dv_block_test"></alias>
</disk>
<disk device="disk" type="file">
<source file="/var/run/kubevirt-private/service-account-disk/service-account.iso"></source>
<target bus="sata" dev="sdh"></target>
<target bus="sata" dev="sdi"></target>
<driver name="qemu" type="raw" iothread="1"></driver>
<alias name="ua-serviceaccount_test"></alias>
</disk>
Expand Down Expand Up @@ -541,6 +559,8 @@ var _ = Describe("Converter", func() {

isBlockPVCMap := make(map[string]bool)
isBlockPVCMap["pvc_block_test"] = true
isBlockDVMap := make(map[string]bool)
isBlockDVMap["dv_block_test"] = true
BeforeEach(func() {
c = &ConverterContext{
VirtualMachine: vmi,
Expand All @@ -553,6 +573,7 @@ var _ = Describe("Converter", func() {
},
UseEmulation: true,
IsBlockPVC: isBlockPVCMap,
IsBlockDV: isBlockDVMap,
SRIOVDevices: map[string][]string{},
}
})
Expand Down
Loading

0 comments on commit 5b0ba3b

Please sign in to comment.