Skip to content

Commit

Permalink
ci: add single disk and hotplug test
Browse files Browse the repository at this point in the history
    - SingleDisk: autoprovision, unprovision, provision
    - Hotplug: add, remove

Signed-off-by: Vicente Cheng <vicente.cheng@suse.com>
  • Loading branch information
Vicente-Cheng committed Mar 28, 2023
1 parent 94d449c commit 7311c01
Show file tree
Hide file tree
Showing 7 changed files with 321 additions and 109 deletions.
6 changes: 4 additions & 2 deletions .github/workflows/basic-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,14 +59,16 @@ jobs:
run: |
pushd ndm-vagrant-rancherd
./scripts/attach-disk.sh node1
sleep 30
vagrant ssh-config node1 > ../ssh-config
cp kubeconfig ../kubeconfig
popd
make ci-integration
echo Running integration tests
NDM_HOME=`pwd` go test -v ./tests/...
- name: The Test failed
if: ${{ failure() && steps.basic-test.conclusion == 'failure' }}
run: |
./ci/scripts/get-ndm-log.sh
./ci/scripts/get-debug-info.sh
teardown:
needs: tests
runs-on:
Expand Down
6 changes: 5 additions & 1 deletion ci/scripts/get-ndm-log.sh → ci/scripts/get-debug-info.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,8 @@ export KUBECONFIG=kubeconfig
NDMPOD=$(kubectl get pods -n harvester-system --field-selector spec.nodeName=$TARGETNODE |grep ^harvester-node-disk-manager |awk '{print $1}')

# filter out the redundant Skip log
kubectl logs $NDMPOD -n harvester-system |grep -v Skip
kubectl logs $NDMPOD -n harvester-system |grep -v Skip

# get blockdevices info
echo "========== Dump blockdevices =========="
kubectl get blockdevice -n longhorn-system -o yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- node-disk-manager
- --debug
env:
{{- with .Values.vendorFilter }}
- name: NDM_VENDOR_FILTER
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/blockdevice/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,7 @@ func (c *Controller) unprovisionDeviceFromNode(device *diskv1.BlockDevice) error
} else {
// Still unprovisioning
c.Blockdevices.EnqueueAfter(c.Namespace, device.Name, jitterEnqueueDelay())
logrus.Debugf("device %s is unprovisioning", device.Name)
logrus.Debugf("device %s is unprovisioning, status: %+v, ScheduledReplica: %d", device.Name, node.Status.DiskStatus[device.Name], len(status.ScheduledReplica))
}
} else {
// Start unprovisioing
Expand Down
105 changes: 0 additions & 105 deletions tests/integration/add_disk_test.go

This file was deleted.

150 changes: 150 additions & 0 deletions tests/integration/test_0_single_disk_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
package integration

import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"time"

"github.com/kevinburke/ssh_config"
"github.com/melbahja/goph"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"

diskv1 "github.com/harvester/node-disk-manager/pkg/apis/harvesterhci.io/v1beta1"
clientset "github.com/harvester/node-disk-manager/pkg/generated/clientset/versioned"
)

type SingleDiskSuite struct {
suite.Suite
SSHClient *goph.Client
clientSet *clientset.Clientset
targetNodeName string
targetDiskName string
}

type ProvisionedDisk struct {
devPath string
UUID string
}

func (s *SingleDiskSuite) SetupSuite() {
nodeName := ""
f, _ := os.Open(filepath.Join(os.Getenv("NDM_HOME"), "ssh-config"))
cfg, _ := ssh_config.Decode(f)
// consider wildcard, so length shoule be 2
require.Equal(s.T(), len(cfg.Hosts), 2, "number of Hosts on SSH-config should be 1")
for _, host := range cfg.Hosts {
if host.String() == "" {
// wildcard, continue
continue
}
nodeName = host.Patterns[0].String()
break
}
require.NotEqual(s.T(), nodeName, "", "nodeName should not be empty.")
s.targetNodeName = nodeName
targetHost, _ := cfg.Get(nodeName, "HostName")
targetUser, _ := cfg.Get(nodeName, "User")
targetPrivateKey, _ := cfg.Get(nodeName, "IdentityFile")
splitedResult := strings.Split(targetPrivateKey, "node-disk-manager/")
privateKey := filepath.Join(os.Getenv("NDM_HOME"), splitedResult[len(splitedResult)-1])
// Start new ssh connection with private key.
auth, err := goph.Key(privateKey, "")
require.Equal(s.T(), err, nil, "generate ssh auth key should not get error")

s.SSHClient, err = goph.NewUnknown(targetUser, targetHost, auth)
require.Equal(s.T(), err, nil, "New ssh connection should not get error")

kubeconfig := filepath.Join(os.Getenv("NDM_HOME"), "kubeconfig")
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
require.Equal(s.T(), err, nil, "Generate kubeconfig should not get error")

s.clientSet, err = clientset.NewForConfig(config)
require.Equal(s.T(), err, nil, "New clientset should not get error")
}

func (s *SingleDiskSuite) AfterTest(_, _ string) {
if s.SSHClient != nil {
s.SSHClient.Close()
}
}

func TestSingleDiskOperation(t *testing.T) {
suite.Run(t, new(SingleDiskSuite))
}

func (s *SingleDiskSuite) Test_0_AutoProvisionSingleDisk() {
// prepare to check the added disk
var provisionedDisk ProvisionedDisk
bdi := s.clientSet.HarvesterhciV1beta1().BlockDevices("longhorn-system")
bdList, err := bdi.List(context.TODO(), v1.ListOptions{})
require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error")
for _, blockdevice := range bdList.Items {
if blockdevice.Spec.NodeName != s.targetNodeName {
// focus the target node
continue
}
bdStatus := blockdevice.Status
if bdStatus.State == "Active" && bdStatus.ProvisionPhase == "Provisioned" {
s.targetDiskName = blockdevice.Name
// get from blockdevice resource
provisionedDisk.devPath = bdStatus.DeviceStatus.DevPath
provisionedDisk.UUID = bdStatus.DeviceStatus.Details.UUID

// checking with the device on the host
cmd := "sudo blkid -s UUID name -o value " + provisionedDisk.devPath
out, err := s.SSHClient.Run(cmd)
require.Equal(s.T(), err, nil, "Running command `blkid` should not get error")
require.NotEqual(s.T(), "", string(out), "blkid command should not return empty, ", provisionedDisk.devPath)
convertOutPut := strings.Split(string(out), "\n")[0]
require.Equal(s.T(), provisionedDisk.UUID, convertOutPut, "Provisioned disk UUID should be the same")
}
}
}

func (s *SingleDiskSuite) Test_1_UnprovisionSingleDisk() {
require.NotEqual(s.T(), s.targetDiskName, "", "target disk name should not be empty before we do the remove test")
bdi := s.clientSet.HarvesterhciV1beta1().BlockDevices("longhorn-system")
curBlockdevice, err := bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get Blockdevices should not get error")

newBlockdevice := curBlockdevice.DeepCopy()
newBlockdevice.Spec.FileSystem.Provisioned = false
bdi.Update(context.TODO(), newBlockdevice, v1.UpdateOptions{})

// sleep 30 seconds to wait controller handle. jitter is between 7~13 seconds so 30 seconds would be enough to run twice
time.Sleep(30 * time.Second)

// check for the removed status
curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove")
require.Equal(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should be empty after we remove disk!")
require.Equal(s.T(), diskv1.ProvisionPhaseUnprovisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned")

}

func (s *SingleDiskSuite) Test_2_ManuallyProvisionSingleDisk() {
require.NotEqual(s.T(), s.targetDiskName, "", "target disk name should not be empty before we do the remove test")
bdi := s.clientSet.HarvesterhciV1beta1().BlockDevices("longhorn-system")
curBlockdevice, err := bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get Blockdevices should not get error")

newBlockdevice := curBlockdevice.DeepCopy()
newBlockdevice.Spec.FileSystem.Provisioned = true
bdi.Update(context.TODO(), newBlockdevice, v1.UpdateOptions{})

// sleep 3 seconds to wait controller handle
time.Sleep(3 * time.Second)

// check for the added status
curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove")
require.NotEqual(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should be empty after we remove disk!")
require.Equal(s.T(), diskv1.ProvisionPhaseProvisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned")
require.Equal(s.T(), diskv1.BlockDeviceActive, curBlockdevice.Status.State, "Block device State should be Active")
}
Loading

0 comments on commit 7311c01

Please sign in to comment.