Skip to content

Commit

Permalink
ci: add upgrade
Browse files Browse the repository at this point in the history
    - Add upgrade test on CI. The detail steps as below

      1. Install latest version
      2. Add disk
      3. Upgrade to the develping image
      4. Run integration test

Signed-off-by: Vicente Cheng <vicente.cheng@suse.com>
  • Loading branch information
Vicente-Cheng committed Jul 3, 2023
1 parent e6c1457 commit c0b0b7f
Show file tree
Hide file tree
Showing 5 changed files with 105 additions and 23 deletions.
14 changes: 12 additions & 2 deletions .github/workflows/basic-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,22 @@ jobs:
cp ../ci/scripts/deploy_ndm.sh ./deploy_ndm.sh
./deploy_ndm.sh
popd
- name: "Basic Test"
id: basic-test
- name: "Add disk"
run: |
pushd ndm-vagrant-rancherd
./scripts/attach-disk.sh node1
sleep 30
popd
- name: "Upgrade NDM"
run: |
pushd ndm-vagrant-rancherd
cp ../ci/scripts/upgrade_ndm.sh ./upgrade_ndm.sh
./upgrade_ndm.sh
popd
- name: "Run Basic Test"
id: basic-test
run: |
pushd ndm-vagrant-rancherd
vagrant ssh-config node1 > ../ssh-config
cp kubeconfig ../kubeconfig
popd
Expand Down
20 changes: 5 additions & 15 deletions ci/scripts/deploy_ndm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -83,24 +83,14 @@ echo "cluster nodes: $cluster_nodes"
ensure_longhorn_ready

pushd $TOP_DIR
# cleanup first
rm -rf harvester-node-disk-manager

cp -r ../deploy/charts/harvester-node-disk-manager harvester-node-disk-manager
cp ../ci/charts/ndm-override.yaml ndm-override.yaml
cat >> ndm-override.yaml << 'EOF'
autoProvisionFilter: [/dev/sd*]
EOF

$HELM install -f $TOP_DIR/ndm-override.yaml harvester-node-disk-manager harvester-node-disk-manager/ --create-namespace -n harvester-system
$HELM pull harvester-node-disk-manager --repo https://charts.harvesterhci.io --untar
$HELM install -f $TOP_DIR/ndm-override.yaml harvester-node-disk-manager ./harvester-node-disk-manager --create-namespace -n harvester-system

wait_ndm_ready
# check image
pod_name=$(kubectl get pods -n harvester-system |grep ^harvester-node-disk-manager|head -n1 |awk '{print $1}')
container_img=$(kubectl get pods ${pod_name} -n harvester-system -o yaml |yq -e .spec.containers[0].image |tr ":" \n)
yaml_img=$(yq -e .image.repository ndm-override.yaml)
if grep -q ${yaml_img} <<< ${container_img}; then
echo "Image is equal: ${yaml_img}"
else
echo "Image is non-equal, container: ${container_img}, yaml file: ${yaml_img}"
exit 1
fi
echo "harvester-node-disk-manager is ready"
popd
73 changes: 73 additions & 0 deletions ci/scripts/upgrade_ndm.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
#!/bin/bash -e

TOP_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/" &> /dev/null && pwd )"

ensure_command() {
local cmd=$1
if ! which $cmd &> /dev/null; then
echo 1
return
fi
echo 0
}

wait_ndm_ready() {
while [ true ]; do
running_num=$(kubectl get ds harvester-node-disk-manager -n harvester-system -o 'jsonpath={.status.numberReady}')
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "harvester-node-disk-manager pods are ready!"
break
fi
echo "harvester-node-disk-manager pods are not ready (${running_num}/${cluster_nodes}), sleep 10 seconds."
sleep 10
done
}

if [ ! -f $TOP_DIR/kubeconfig ]; then
echo "kubeconfig does not exist. Please create cluster first."
echo "Maybe try new_cluster.sh"
exit 1
fi
echo $TOP_DIR/kubeconfig
export KUBECONFIG=$TOP_DIR/kubeconfig

if [[ $(ensure_command helm) -eq 1 ]]; then
echo "no helm, try to curl..."
curl -O https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz
tar -zxvf helm-v3.9.4-linux-amd64.tar.gz
HELM=$TOP_DIR/linux-amd64/helm
$HELM version
else
echo "Get helm, version info as below"
HELM=$(which helm)
$HELM version
fi

cluster_nodes=$(yq -e e '.cluster_size' $TOP_DIR/settings.yaml)
echo "cluster nodes: $cluster_nodes"

pushd $TOP_DIR
# cleanup first
rm -rf harvester-node-disk-manager*
rm -rf ndm-override.yaml

cp -r ../deploy/charts/harvester-node-disk-manager harvester-node-disk-manager
cp ../ci/charts/ndm-override.yaml ndm-override.yaml

$HELM upgrade -f $TOP_DIR/ndm-override.yaml harvester-node-disk-manager harvester-node-disk-manager/ -n harvester-system

sleep 10 # wait 10 seconds for ndm start to respwan pods

wait_ndm_ready
# check image
pod_name=$(kubectl get pods -n harvester-system |grep ^harvester-node-disk-manager|head -n1 |awk '{print $1}')
container_img=$(kubectl get pods ${pod_name} -n harvester-system -o yaml |yq -e .spec.containers[0].image |tr ":" \n)
yaml_img=$(yq -e .image.repository ndm-override.yaml)
if grep -q ${yaml_img} <<< ${container_img}; then
echo "Image is equal: ${yaml_img}"
else
echo "Image is non-equal, container: ${container_img}, yaml file: ${yaml_img}"
exit 1
fi
echo "harvester-node-disk-manager upgrade successfully!"
popd
4 changes: 2 additions & 2 deletions deploy/charts/harvester-node-disk-manager/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.4.8
version: 0.4.11

# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: v0.4.8
appVersion: 0.4.11

maintainers:
- name: harvester
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ spec:
- jsonPath: .spec.nodeName
name: NodeName
type: string
- jsonPath: .status.state
name: Status
- jsonPath: .status.provisionPhase
name: ProvisionPhase
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
Expand Down Expand Up @@ -66,13 +66,18 @@ spec:
overwrite the existing one
type: boolean
mountPoint:
description: a string with the partition's mount point, or ""
if no mount point was discovered
description: 'DEPRECATED: no longer use and has no effect. a string
with the partition''s mount point, or "" if no mount point was
discovered'
type: string
provisioned:
description: a bool indicating whether the filesystem can be provisioned
as a disk for the node to store data.
type: boolean
repaired:
description: a bool indicating whether the filesystem is manually
repaired of not
type: boolean
required:
- mountPoint
type: object
Expand Down Expand Up @@ -223,6 +228,10 @@ spec:
when user operate device formatting through the CRD controller
format: date-time
type: string
corrupted:
description: indicating whether the filesystem is corrupted
or not
type: boolean
isReadOnly:
description: a bool indicating the partition is read-only
type: boolean
Expand Down

0 comments on commit c0b0b7f

Please sign in to comment.