Skip to content

Update warp dependency to fix security alerts (#666) #1260

Update warp dependency to fix security alerts (#666)

Update warp dependency to fix security alerts (#666) #1260

name: Test K3s, Kubernetes, and MicroK8s
on:
pull_request:
branches: [main]
paths:
- test/e2e/**
- .github/workflows/run-test-cases.yml
- build/containers/Dockerfile.agent
- build/containers/Dockerfile.controller
- deployment/helm/**
- agent/**
- controller/**
- shared/**
- version.txt
- build/akri-containers.mk
- Makefile
push:
branches: [main]
paths:
- test/e2e/**
- .github/workflows/run-test-cases.yml
- build/containers/Dockerfile.agent
- build/containers/Dockerfile.controller
- deployment/helm/**
- agent/**
- controller/**
- shared/**
- version.txt
- build/akri-containers.mk
- Makefile
release:
types:
- published
jobs:
build-containers:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- name: Checkout the head commit of the branch
uses: actions/checkout@v3
with:
persist-credentials: false
- name: Rust install
uses: dtolnay/rust-toolchain@master
with:
toolchain: 1.68.1
components: clippy, rustfmt
- name: Build local containers for PR tests
if: startsWith(github.event_name, 'pull_request')
env:
BUILD_AMD64: 1
BUILD_ARM32: 0
BUILD_ARM64: 0
BUILD_SLIM_AGENT: 1
PREFIX: ghcr.io/project-akri/akri
LABEL_PREFIX: pr
CARGO_INCREMENTAL: 0
run: |
make akri-build
make controller-build-amd64
make agent-build-amd64
make webhook-configuration-build-amd64
make debug-echo-discovery-build-amd64
make udev-discovery-build-amd64
make onvif-discovery-build-amd64
make opcua-discovery-build-amd64
docker save ${PREFIX}/agent:${LABEL_PREFIX}-amd64 > agent.tar
docker save ${PREFIX}/controller:${LABEL_PREFIX}-amd64 > controller.tar
docker save ${PREFIX}/webhook-configuration:${LABEL_PREFIX}-amd64 > webhook-configuration.tar
docker save ${PREFIX}/debug-echo-discovery:${LABEL_PREFIX}-amd64 > debug-echo-discovery.tar
docker save ${PREFIX}/udev-discovery:${LABEL_PREFIX}-amd64 > udev-discovery.tar
docker save ${PREFIX}/opcua-discovery:${LABEL_PREFIX}-amd64 > opcua-discovery.tar
docker save ${PREFIX}/onvif-discovery:${LABEL_PREFIX}-amd64 > onvif-discovery.tar
- name: Upload Agent container as artifact
if: startsWith(github.event_name, 'pull_request')
uses: actions/upload-artifact@v3
with:
name: agent.tar
path: agent.tar
- name: Upload Controller container as artifact
if: startsWith(github.event_name, 'pull_request')
uses: actions/upload-artifact@v3
with:
name: controller.tar
path: controller.tar
- name: Upload Webhook-Configuration container as artifact
if: startsWith(github.event_name, 'pull_request')
uses: actions/upload-artifact@v3
with:
name: webhook-configuration.tar
path: webhook-configuration.tar
- name: Upload DebugEcho discovery container as artifact
if: startsWith(github.event_name, 'pull_request')
uses: actions/upload-artifact@v3
with:
name: debug-echo-discovery.tar
path: debug-echo-discovery.tar
- name: Upload UDEV discovery container as artifact
if: startsWith(github.event_name, 'pull_request')
uses: actions/upload-artifact@v3
with:
name: udev-discovery.tar
path: udev-discovery.tar
- name: Upload ONVIF discovery container as artifact
if: startsWith(github.event_name, 'pull_request')
uses: actions/upload-artifact@v3
with:
name: onvif-discovery.tar
path: onvif-discovery.tar
- name: Upload OPCUA discovery container as artifact
if: startsWith(github.event_name, 'pull_request')
uses: actions/upload-artifact@v3
with:
name: opcua-discovery.tar
path: opcua-discovery.tar
test-cases:
needs: build-containers
runs-on: ubuntu-latest
timeout-minutes: 90
strategy:
fail-fast: false
matrix:
kube:
- runtime: microk8s
version: 1.25/stable
- runtime: microk8s
version: 1.26/stable
- runtime: microk8s
version: 1.27/stable
- runtime: microk8s
version: 1.28/stable
- runtime: k3s
version: v1.25.13+k3s1
- runtime: k3s
version: v1.26.8+k3s1
- runtime: k3s
version: v1.27.5+k3s1
- runtime: k3s
version: v1.28.1+k3s1
- runtime: k8s
version: 1.25.13
- runtime: k8s
version: 1.26.8
- runtime: k8s
version: 1.27.5
- runtime: k8s
version: 1.28.1
steps:
- name: Checkout the head commit of the branch
uses: actions/checkout@v3
with:
persist-credentials: false
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Install Poetry and dependencies
working-directory: ./test/e2e
run: |
POETRY_HOME=/opt/poetry
python3 -m venv $POETRY_HOME
$POETRY_HOME/bin/pip install poetry==1.5.1
$POETRY_HOME/bin/poetry --version
$POETRY_HOME/bin/poetry install --no-root
- name: Download container artifacts
if: startsWith(github.event_name, 'pull_request')
uses: actions/download-artifact@v3
with:
path: /tmp/images
- if: startsWith(matrix.kube.runtime, 'k3s')
name: Install K3s
env:
INSTALL_K3S_VERSION: ${{ matrix.kube.version }}
run: |
sudo curl -sfL https://get.k3s.io -o install.sh
sudo chmod +x install.sh
./install.sh server --kubelet-arg=eviction-hard="imagefs.available<1%,nodefs.available<1%" --kubelet-arg=eviction-minimum-reclaim="imagefs.available=1%,nodefs.available=1%"
mkdir -p $HOME/.kube
sudo cp -i /etc/rancher/k3s/k3s.yaml $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
until kubectl get node ${HOSTNAME,,} -o jsonpath='{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status}' | grep 'Ready=True'; do echo "waiting for k3s to become ready"; sleep 10; done
- if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube.runtime, 'K3s'))
name: Import local agent and controller to K3s
working-directory: /tmp/images
run: |
sudo find -name "*.tar" -type f -exec k3s ctr image import {} \;
- if: startsWith(matrix.kube.runtime, 'k8s')
name: Install Kubernetes
run: |
SHORTVERSION=$(cut -d '.' -f 1,2 <<< "${{ matrix.kube.version }}")
sudo apt-get update -y
sudo apt-get install -y apt-transport-https ca-certificates curl
curl -fsSL https://pkgs.k8s.io/core:/stable:/v${SHORTVERSION}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${SHORTVERSION}/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt-get update
sudo apt-get install -o Dpkg::Options::="--force-overwrite" -y --allow-downgrades kubelet=${{ matrix.kube.version }}-* kubeadm=${{ matrix.kube.version }}-* kubectl=${{ matrix.kube.version }}-* containerd.io
kubectl version && echo "kubectl return code: $?" || echo "kubectl return code: $?"
kubeadm version && echo "kubeadm return code: $?" || echo "kubeadm return code: $?"
kubelet --version && echo "kubelet return code: $?" || echo "kubelet return code: $?"
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system
sudo mkdir -p /etc/containerd
containerd config default | sudo tee /etc/containerd/config.toml
sudo sed -i "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml
sudo systemctl restart containerd
sudo swapoff -a
sudo kubeadm init --pod-network-cidr=10.244.0.0/16 --cri-socket=unix:///run/containerd/containerd.sock
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
verlte() { [ "$1" = "`echo -e "$1\n$2" | sort -V | head -n1`" ] ; }
verlt() { [ "$1" = "$2" ] && return 1 || verlte $1 $2 ; }
# Remove control plane label
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
until kubectl get node ${HOSTNAME,,} -o jsonpath='{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status}' | grep 'Ready=True'; do echo "waiting for kubernetes to become ready"; sleep 10; done
- if: startsWith(matrix.kube.runtime, 'k8s')
name: Output Kubelet, Containerd, Docker Logs
run: |
echo "Kubelet Logs:"
sudo journalctl -xeu kubelet --no-pager
echo "\nContainerd Logs"
sudo journalctl -xeu containerd --no-pager
echo "\nDocker Logs"
sudo journalctl -xeu docker --no-pager
- if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube.runtime, 'k8s'))
name: Import local agent and controller to Kubernetes
working-directory: /tmp/images
run: |
# Need to load to containerd with ctr
# -n allows kubernetes to see the image
sudo find -name "*.tar" -type f -exec ctr -n=k8s.io image import {} \;
sudo crictl --runtime-endpoint unix:///var/run/containerd/containerd.sock images
- if: startsWith(matrix.kube.runtime, 'microk8s')
name: Install MicroK8s
run: |
set -x
sudo snap install microk8s --classic --channel=${{ matrix.kube.version }}
sudo microk8s status --wait-ready
sudo usermod -a -G microk8s $USER
mkdir -p $HOME/.kube
sudo microk8s.kubectl config view --raw > $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sudo microk8s.enable rbac dns
sudo sed -i 's/memory.available<100Mi,nodefs.available<1Gi,imagefs.available<1Gi/memory.available<25Mi,nodefs.available<50Mi,imagefs.available<50Mi/' /var/snap/microk8s/current/args/kubelet
sudo systemctl restart snap.microk8s.daemon-kubelite
until sudo microk8s.status --wait-ready; do sleep 5s; echo "Try again"; done
- if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube.runtime, 'microk8s'))
name: Import local agent and controller to MicroK8s
working-directory: /tmp/images
run: |
sudo microk8s.status --wait-ready
until sudo microk8s ctr images ls; do sleep 5s; echo "Try again"; done
sudo microk8s ctr images ls
sudo find -name "*.tar" -type f -exec microk8s ctr --debug --timeout 60s images import {} \;
sudo microk8s ctr images ls
- name: Add Akri Helm Chart
run: helm repo add akri-helm-charts https://project-akri.github.io/akri/
# For push and release, we need to wait for the Helm chart and
# associated containers to build.
- if: github.event_name == 'push' || github.event_name == 'release'
name: sleep before running script for 2700s
run: sleep 2700
- name: Execute test script
working-directory: ./test/e2e
run: |
case "${{ github.event_name }}" in "push");; "release") extra_param="--release";; *) extra_param="--use-local";; esac
/opt/poetry/bin/poetry run pytest -v --distribution ${{ matrix.kube.runtime}} --test-version $(cat ../../version.txt) $extra_param
- name: Sanitize logs artifact name
id: sanitize-artifact-name
if: always()
run: echo "name=${{ matrix.kube.runtime }}-${{ matrix.kube.version }}-logs" | tr "/:" "_" >> $GITHUB_OUTPUT
- name: Upload logs as artifact
if: always()
uses: actions/upload-artifact@v3
with:
name: ${{ steps.sanitize-artifact-name.outputs.name }}
path: /tmp/logs/