Skip to content

Commit

Permalink
Fix ClusterSet scoped policy rule not compatible with namespaces fi…
Browse files Browse the repository at this point in the history
…eld (antrea-io#4571)

Signed-off-by: Dyanngg <dingyang@vmware.com>
  • Loading branch information
Dyanngg authored Mar 15, 2023
1 parent c770205 commit 910511d
Show file tree
Hide file tree
Showing 17 changed files with 394 additions and 145 deletions.
22 changes: 19 additions & 3 deletions ci/jenkins/test-mc.sh
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,9 @@ function wait_for_antrea_multicluster_pods_ready {

function wait_for_multicluster_controller_ready {
echo "====== Deploying Antrea Multicluster Leader Cluster with ${LEADER_CLUSTER_CONFIG} ======"
leader_cluster_pod_cidr="10.244.0.0/20"
export leader_cluster_pod_cidr
perl -0777 -pi -e 's| podCIDRs\:\n - \"\"| podCIDRs\:\n - $ENV{leader_cluster_pod_cidr}|g' ./multicluster/test/yamls/leader-manifest.yml
kubectl create ns antrea-multicluster "${LEADER_CLUSTER_CONFIG}" || true
kubectl apply -f ./multicluster/build/yamls/antrea-multicluster-leader-global.yml "${LEADER_CLUSTER_CONFIG}"
kubectl apply -f ./multicluster/test/yamls/leader-manifest.yml "${LEADER_CLUSTER_CONFIG}"
Expand All @@ -206,10 +209,17 @@ function wait_for_multicluster_controller_ready {
sed -i 's/antrea-multicluster/kube-system/g' ./multicluster/test/yamls/leader-access-token.yml
echo "type: Opaque" >> ./multicluster/test/yamls/leader-access-token.yml

for config in "${membercluster_kubeconfigs[@]}";
member_cluster_pod_cidrs=("10.244.16.0/20" "10.244.32.0/20")
for i in "${!membercluster_kubeconfigs[@]}";
do
pod_cidr=${member_cluster_pod_cidrs[$i]}
export pod_cidr
cp ./multicluster/test/yamls/member-manifest.yml ./multicluster/test/yamls/member-manifest-$i.yml
perl -0777 -pi -e 's| podCIDRs\:\n - \"\"| podCIDRs\:\n - $ENV{pod_cidr}|g' ./multicluster/test/yamls/member-manifest-$i.yml

config=${membercluster_kubeconfigs[$i]}
echo "====== Deploying Antrea Multicluster Member Cluster with ${config} ======"
kubectl apply -f ./multicluster/test/yamls/member-manifest.yml ${config}
kubectl apply -f ./multicluster/test/yamls/member-manifest-$i.yml ${config}
kubectl rollout status deployment/antrea-mc-controller -n kube-system ${config}
kubectl apply -f ./multicluster/test/yamls/leader-access-token.yml ${config}
done
Expand Down Expand Up @@ -254,6 +264,7 @@ function modify_config {
multicluster:
enableGateway: true
enableStretchedNetworkPolicy: true
enablePodToPodConnectivity: true
featureGates: {
Multicluster: true
}
Expand Down Expand Up @@ -349,8 +360,10 @@ function deliver_multicluster_controller {
sed -i "s|<LEADER_CLUSTER_IP>|${leader_ip}|" ./multicluster/test/yamls/west-member-cluster.yml
if [[ ${KIND} == "true" ]]; then
docker cp ./multicluster/test/yamls/test-acnp-copy-span-ns-isolation.yml leader-control-plane:/root/test-acnp-copy-span-ns-isolation.yml
docker cp ./multicluster/test/yamls/test-acnp-cross-cluster-ns-isolation.yml leader-control-plane:/root/test-acnp-cross-cluster-ns-isolation.yml
else
rsync -avr --progress --inplace -e "ssh -o StrictHostKeyChecking=no" ./multicluster/test/yamls/test-acnp-copy-span-ns-isolation.yml jenkins@["${leader_ip}"]:"${WORKDIR}"/test-acnp-copy-span-ns-isolation.yml
rsync -avr --progress --inplace -e "ssh -o StrictHostKeyChecking=no" ./multicluster/test/yamls/test-acnp-cross-cluster-ns-isolation.yml jenkins@["${leader_ip}"]:"${WORKDIR}"/test-acnp-cross-cluster-ns-isolation.yml
fi

for kubeconfig in "${membercluster_kubeconfigs[@]}"
Expand Down Expand Up @@ -430,6 +443,8 @@ function run_multicluster_e2e {
fi
set +x
set -e

tar -zcf antrea-test-logs.tar.gz antrea-multicluster-test-logs
}

function collect_coverage {
Expand All @@ -455,8 +470,9 @@ clean_images
if [[ ${KIND} == "true" ]]; then
# Preparing a ClusterSet contains three Kind clusters.
SERVICE_CIDRS=("10.96.10.0/24" "10.96.20.0/24" "10.96.30.0/24")
POD_CIDRS=("10.244.0.0/20" "10.244.16.0/20" "10.244.32.0/20")
for i in {0..2}; do
./ci/kind/kind-setup.sh create ${CLUSTER_NAMES[$i]} --service-cidr ${SERVICE_CIDRS[$i]} --num-workers 1
./ci/kind/kind-setup.sh create ${CLUSTER_NAMES[$i]} --service-cidr ${SERVICE_CIDRS[$i]} --pod-cidr ${POD_CIDRS[$i]} --num-workers 1
done

for name in ${CLUSTER_NAMES[*]}; do
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

100 changes: 68 additions & 32 deletions multicluster/test/e2e/antreapolicy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,12 @@ import (
)

const (
// Provide enough time for policies to be enforced & deleted by the CNI plugin.
networkPolicyDelay = 2 * time.Second
acnpIsolationResourceExport = "test-acnp-copy-span-ns-isolation.yml"
acnpName = "antrea-mc-strict-namespace-isolation"
// Provide enough time for policies to be imported and enforced by the CNI plugin.
policyRealizedTimeout = 6 * time.Second
acnpIsolationResourceExport = "test-acnp-copy-span-ns-isolation.yml"
acnpIsolationName = "antrea-mc-strict-namespace-isolation"
acnpCrossClusterIsolationResExport = "test-acnp-cross-cluster-ns-isolation.yml"
acnpCrossClusterIsolationName = "antrea-mc-strict-namespace-isolation-cross-cluster"
)

var (
Expand Down Expand Up @@ -70,8 +72,10 @@ func initializeForPolicyTest(t *testing.T, data *MCTestData) {
d := data.clusterTestDataMap[clusterName]
k8sUtils, err := antreae2e.NewKubernetesUtils(&d)
failOnError(err, t)
_, err = k8sUtils.Bootstrap(perClusterNamespaces, perNamespacePods, true)
failOnError(err, t)
if clusterName != leaderCluster {
_, err = k8sUtils.Bootstrap(perClusterNamespaces, perNamespacePods, true)
failOnError(err, t)
}
clusterK8sUtilsMap[clusterName] = k8sUtils
}
}
Expand All @@ -83,18 +87,12 @@ func tearDownForPolicyTest() {
}
}

func testMCAntreaPolicy(t *testing.T, data *MCTestData) {
data.testAntreaPolicyCopySpanNSIsolation(t)
}

// testAntreaPolicyCopySpanNSIsolation tests that after applying a ResourceExport of an ACNP
// for Namespace isolation, strict Namespace isolation is enforced in each of the member clusters.
func (data *MCTestData) testAntreaPolicyCopySpanNSIsolation(t *testing.T) {
func testAntreaPolicyCopySpanNSIsolation(t *testing.T, data *MCTestData) {
setup := func() {
err := data.deployACNPResourceExport(t, acnpIsolationResourceExport)
failOnError(err, t)
// Sleep 5s to wait resource export/import process to finish resource exchange.
time.Sleep(5 * time.Second)
}
teardown := func() {
err := data.deleteACNPResourceExport(acnpIsolationResourceExport)
Expand All @@ -114,33 +112,71 @@ func (data *MCTestData) testAntreaPolicyCopySpanNSIsolation(t *testing.T) {
Steps: []*antreae2e.TestStep{testStep},
},
}
executeTestsOnAllMemberClusters(t, testCaseList, setup, teardown)
executeTestsOnAllMemberClusters(t, testCaseList, acnpIsolationName, setup, teardown, false)
}

func testAntreaPolicyCrossClusterNSIsolation(t *testing.T, data *MCTestData) {
setup := func() {
err := data.deployACNPResourceExport(t, acnpCrossClusterIsolationResExport)
failOnError(err, t)
}
teardown := func() {
err := data.deleteACNPResourceExport(acnpCrossClusterIsolationResExport)
failOnError(err, t)
}
reachability := antreae2e.NewReachability(allPodsPerCluster, antreae2e.Dropped)
reachability.ExpectAllSelfNamespace(antreae2e.Connected)
testStep := &antreae2e.TestStep{
Name: "Port 80",
Reachability: reachability,
Ports: []int32{80},
Protocol: utils.ProtocolTCP,
}
testCaseList := []*antreae2e.TestCase{
{
Name: "ACNP strict cross-cluster Namespace isolation",
Steps: []*antreae2e.TestStep{testStep},
},
}
executeTestsOnAllMemberClusters(t, testCaseList, acnpCrossClusterIsolationName, setup, teardown, true)
}

func executeTestsOnAllMemberClusters(t *testing.T, testList []*antreae2e.TestCase, setup, teardown func()) {
func executeTestsOnAllMemberClusters(t *testing.T, testList []*antreae2e.TestCase, acnpName string, setup, teardown func(), testCrossCluster bool) {
setup()
time.Sleep(networkPolicyDelay)
for _, testCase := range testList {
t.Logf("Running test case %s", testCase.Name)
for _, step := range testCase.Steps {
t.Logf("Running step %s of test case %s", step.Name, testCase.Name)
reachability := step.Reachability
if reachability != nil {
for clusterName, k8sUtils := range clusterK8sUtilsMap {
if clusterName == leaderCluster {
// skip traffic test for the leader cluster
continue
}
if _, err := k8sUtils.GetACNP(acnpName); err != nil {
t.Errorf("Failed to get ACNP to be replicated in cluster %s", clusterName)
}
start := time.Now()
k8sUtils.Validate(allPodsPerCluster, reachability, step.Ports, step.Protocol)
step.Duration = time.Since(start)
_, wrong, _ := step.Reachability.Summary()
if wrong != 0 {
t.Errorf("Failure in cluster %s -- %d wrong results", clusterName, wrong)
reachability.PrintSummary(true, true, true)
for clusterName, k8sUtils := range clusterK8sUtilsMap {
if clusterName == leaderCluster {
// skip verification for the leader cluster
continue
}
if err := k8sUtils.WaitForACNPCreationAndRealization(t, acnpName, policyRealizedTimeout); err != nil {
t.Errorf("Failed to get ACNP to be replicated in cluster %s", clusterName)
failOnError(err, t)
}
start := time.Now()
k8sUtils.Validate(allPodsPerCluster, reachability, step.Ports, step.Protocol)
step.Duration = time.Since(start)
_, wrong, _ := step.Reachability.Summary()
if wrong != 0 {
t.Errorf("Failure in cluster %s -- %d wrong results", clusterName, wrong)
reachability.PrintSummary(true, true, true)
}
if testCrossCluster {
for remoteClusterName, remoteClusterK8s := range clusterK8sUtilsMap {
if remoteClusterName == leaderCluster || remoteClusterName == clusterName {
continue
}
newReachability := reachability.NewReachabilityWithSameExpectations()
k8sUtils.ValidateRemoteCluster(remoteClusterK8s, allPodsPerCluster, newReachability, step.Ports[0], step.Protocol)
_, wrong, _ = newReachability.Summary()
if wrong != 0 {
t.Errorf("Failure from cluster %s to cluster %s -- %d wrong results", clusterName, remoteClusterName, wrong)
newReachability.PrintSummary(true, true, true)
}
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion multicluster/test/e2e/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func (data *MCTestData) createClients() error {
}
data.clusterTestDataMap = map[string]antreae2e.TestData{}
for i, cluster := range data.clusters {
testData := antreae2e.TestData{}
testData := antreae2e.TestData{ClusterName: cluster}
if err := testData.CreateClient(kubeConfigPaths[i]); err != nil {
return fmt.Errorf("error initializing clients for cluster %s: %v", cluster, err)
}
Expand Down
5 changes: 3 additions & 2 deletions multicluster/test/e2e/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,10 @@ func TestConnectivity(t *testing.T) {
t.Run("TestAntreaPolicy", func(t *testing.T) {
defer tearDownForPolicyTest()
initializeForPolicyTest(t, data)
testMCAntreaPolicy(t, data)
t.Run("Case=CopySpanNSIsolation", func(t *testing.T) { testAntreaPolicyCopySpanNSIsolation(t, data) })
t.Run("Case=CrossClusterNSIsolation", func(t *testing.T) { testAntreaPolicyCrossClusterNSIsolation(t, data) })
})
// Wait 5 seconds to let both member and leader controllers clean up all resources,
// otherwise, Namespace deletion may stuck into termininating status.
// otherwise, Namespace deletion may be stuck in terminating status.
time.Sleep(5 * time.Second)
}
23 changes: 23 additions & 0 deletions multicluster/test/e2e/service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,11 @@ func (data *MCTestData) testANPToServices(t *testing.T) {
if _, err := data.createOrUpdateANP(eastCluster, anpBuilder1.Get()); err != nil {
t.Fatalf("Error creating ANP %s: %v", anpBuilder1.Name, err)
}
eastClusterData := data.clusterTestDataMap[eastCluster]
if err := eastClusterData.WaitForANPCreationAndRealization(t, anpBuilder1.Namespace, anpBuilder1.Name, policyRealizedTimeout); err != nil {
t.Errorf("Failed to wait for ANP %s/%s to be realized in cluster %s", anpBuilder1.Namespace, anpBuilder1.Name, eastCluster)
failOnError(err, t)
}

connectivity := data.probeFromPodInCluster(eastCluster, multiClusterTestNamespace, eastGwClientName, "client", eastIP, mcWestClusterTestService, 80, corev1.ProtocolTCP)
assert.Equal(t, antreae2e.Dropped, connectivity, "Failure -- wrong result from probing exported Service from gateway clientPod after applying toServices AntreaNetworkPolicy")
Expand All @@ -213,6 +218,10 @@ func (data *MCTestData) testANPToServices(t *testing.T) {
if _, err := data.createOrUpdateANP(eastCluster, anpBuilder2.Get()); err != nil {
t.Fatalf("Error creating ANP %s: %v", anpBuilder2.Name, err)
}
if err := eastClusterData.WaitForANPCreationAndRealization(t, anpBuilder2.Namespace, anpBuilder2.Name, policyRealizedTimeout); err != nil {
t.Errorf("Failed to wait for ANP %s/%s to be realized in cluster %s", anpBuilder2.Namespace, anpBuilder2.Name, eastCluster)
failOnError(err, t)
}
defer data.deleteANP(eastCluster, multiClusterTestNamespace, anpBuilder2.Name)

connectivity = data.probeFromPodInCluster(eastCluster, multiClusterTestNamespace, eastGwClientName, "client", eastIP, mcWestClusterTestService, 80, corev1.ProtocolTCP)
Expand Down Expand Up @@ -242,6 +251,11 @@ func (data *MCTestData) testStretchedNetworkPolicy(t *testing.T) {
if _, err := data.createOrUpdateACNP(westCluster, acnpBuilder1.Get()); err != nil {
t.Fatalf("Error creating ACNP %s: %v", acnpBuilder1.Name, err)
}
westClusterData := data.clusterTestDataMap[westCluster]
if err := westClusterData.WaitForACNPCreationAndRealization(t, acnpBuilder1.Name, policyRealizedTimeout); err != nil {
t.Errorf("Failed to wait for ACNP %s to be realized in cluster %s", acnpBuilder1.Name, westCluster)
failOnError(err, t)
}

connectivity := data.probeFromPodInCluster(eastCluster, multiClusterTestNamespace, eastGwClientName, "client", westExpSvcIP, mcWestClusterTestService, 80, corev1.ProtocolTCP)
assert.Equal(t, antreae2e.Dropped, connectivity, getStretchedNetworkPolicyErrorMessage(eastGwClientName))
Expand All @@ -261,6 +275,10 @@ func (data *MCTestData) testStretchedNetworkPolicy(t *testing.T) {
t.Fatalf("Error creating ACNP %s: %v", acnpBuilder2.Name, err)
}
defer data.deleteACNP(westCluster, acnpBuilder2.Name)
if err := westClusterData.WaitForACNPCreationAndRealization(t, acnpBuilder2.Name, policyRealizedTimeout); err != nil {
t.Errorf("Failed to wait for ACNP %s to be realized in cluster %s", acnpBuilder2.Name, westCluster)
failOnError(err, t)
}

connectivity = data.probeFromPodInCluster(eastCluster, multiClusterTestNamespace, eastGwClientName, "client", westExpSvcIP, mcWestClusterTestService, 80, corev1.ProtocolTCP)
assert.Equal(t, antreae2e.Dropped, connectivity, getStretchedNetworkPolicyErrorMessage(eastGwClientName))
Expand All @@ -287,6 +305,11 @@ func (data *MCTestData) testStretchedNetworkPolicyReject(t *testing.T) {
if _, err := data.createOrUpdateACNP(westCluster, acnpBuilder.Get()); err != nil {
t.Fatalf("Error creating ACNP %s: %v", acnpBuilder.Name, err)
}
westClusterData := data.clusterTestDataMap[westCluster]
if err := westClusterData.WaitForACNPCreationAndRealization(t, acnpBuilder.Name, policyRealizedTimeout); err != nil {
t.Errorf("Failed to wait for ACNP %s to be realized in cluster %s", acnpBuilder.Name, westCluster)
failOnError(err, t)
}
defer data.deleteACNP(westCluster, acnpBuilder.Name)

testConnectivity := func() {
Expand Down
25 changes: 25 additions & 0 deletions multicluster/test/yamls/test-acnp-cross-cluster-ns-isolation.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
apiVersion: multicluster.crd.antrea.io/v1alpha1
kind: ResourceExport
metadata:
name: strict-namespace-isolation-cross-cluster
namespace: antrea-multicluster
spec:
kind: AntreaClusterNetworkPolicy
name: strict-namespace-isolation-cross-cluster
clusterNetworkPolicy:
priority: 1
tier: securityops
appliedTo:
- namespaceSelector: # Selects all non-system Namespaces in the cluster
matchExpressions:
- {key: kubernetes.io/metadata.name, operator: NotIn, values: [kube-system]}
ingress:
- action: Pass
from:
- namespaces:
match: Self
scope: ClusterSet
- action: Drop
from:
- namespaceSelector: {}
scope: ClusterSet
Loading

0 comments on commit 910511d

Please sign in to comment.