From 6445ee1a3e5df2b3338b5225acc105074610933c Mon Sep 17 00:00:00 2001 From: pulkit_jain <42600268+jainpulkit22@users.noreply.github.com> Date: Thu, 26 May 2022 15:23:43 +0530 Subject: [PATCH] Create different namespaces for different e2e tests (#3506) Created different namespaces for different e2e tests, so that the test should not be delayed because of waiting for namespace deletion from prior tests. Time Statistics: Name old time new time antrea-e2e test 2692.584s 2316.626s antrea-e2e test 2640.578s 2309.422s Removed stale TODO comment from agent.go file. Signed-off-by: Pulkit Jain Co-authored-by: Zhengsheng Zhou --- cmd/antrea-agent/agent.go | 1 - multicluster/test/e2e/antreapolicy_test.go | 14 +- test/e2e/antreaipam_service_test.go | 16 +- test/e2e/antreaipam_test.go | 12 +- test/e2e/antreapolicy_test.go | 761 +++++++++++---------- test/e2e/bandwidth_test.go | 37 +- test/e2e/basic_test.go | 14 +- test/e2e/batch_test.go | 2 +- test/e2e/clustergroup_test.go | 13 +- test/e2e/connectivity_test.go | 24 +- test/e2e/egress_test.go | 26 +- test/e2e/fixtures.go | 13 +- test/e2e/flowaggregator_test.go | 136 ++-- test/e2e/framework.go | 46 +- test/e2e/k8s_util.go | 8 +- test/e2e/multicast_test.go | 16 +- test/e2e/networkpolicy_test.go | 116 ++-- test/e2e/nodeportlocal_test.go | 80 +-- test/e2e/performance_test.go | 16 +- test/e2e/providers/exec/docker.go | 15 +- test/e2e/proxy_test.go | 104 +-- test/e2e/service_externalip_test.go | 22 +- test/e2e/service_test.go | 9 +- test/e2e/traceflow_test.go | 218 +++--- test/e2e/upgrade_test.go | 6 +- test/e2e/wireguard_test.go | 14 +- 26 files changed, 873 insertions(+), 866 deletions(-) diff --git a/cmd/antrea-agent/agent.go b/cmd/antrea-agent/agent.go index 3e9124e7dae..79780c7cd64 100644 --- a/cmd/antrea-agent/agent.go +++ b/cmd/antrea-agent/agent.go @@ -138,7 +138,6 @@ func run(o *Options) error { _, serviceCIDRNet, _ := net.ParseCIDR(o.config.ServiceCIDR) var serviceCIDRNetv6 *net.IPNet - // Todo: use FeatureGate to check if IPv6 is enabled and then read configuration item "ServiceCIDRv6". if o.config.ServiceCIDRv6 != "" { _, serviceCIDRNetv6, _ = net.ParseCIDR(o.config.ServiceCIDRv6) } diff --git a/multicluster/test/e2e/antreapolicy_test.go b/multicluster/test/e2e/antreapolicy_test.go index 11d50b3f8c3..59ad973a561 100644 --- a/multicluster/test/e2e/antreapolicy_test.go +++ b/multicluster/test/e2e/antreapolicy_test.go @@ -33,10 +33,11 @@ const ( ) var ( - allPodsPerCluster []antreae2e.Pod - perNamespacePods, perClusterNamespaces []string - podsByNamespace map[string][]antreae2e.Pod - clusterK8sUtilsMap map[string]*antreae2e.KubernetesUtils + allPodsPerCluster []antreae2e.Pod + perNamespacePods []string + perClusterNamespaces map[string]string + podsByNamespace map[string][]antreae2e.Pod + clusterK8sUtilsMap map[string]*antreae2e.KubernetesUtils ) func failOnError(err error, t *testing.T) { @@ -52,7 +53,10 @@ func failOnError(err error, t *testing.T) { // initializeForPolicyTest creates three Pods in three test Namespaces for each test cluster. func initializeForPolicyTest(t *testing.T, data *MCTestData) { perNamespacePods = []string{"a", "b", "c"} - perClusterNamespaces = []string{"x", "y", "z"} + perClusterNamespaces = make(map[string]string) + perClusterNamespaces["x"] = "x" + perClusterNamespaces["y"] = "y" + perClusterNamespaces["z"] = "z" allPodsPerCluster = []antreae2e.Pod{} podsByNamespace = make(map[string][]antreae2e.Pod) diff --git a/test/e2e/antreaipam_service_test.go b/test/e2e/antreaipam_service_test.go index cee0c1fc0d4..2ccc3cc5ba8 100644 --- a/test/e2e/antreaipam_service_test.go +++ b/test/e2e/antreaipam_service_test.go @@ -54,12 +54,12 @@ func TestAntreaIPAMService(t *testing.T) { }) t.Run("testAntreaIPAMClusterIPv4", func(t *testing.T) { skipIfNotIPv4Cluster(t) - data.testClusterIP(t, false, testNamespace, testAntreaIPAMNamespace) + data.testClusterIP(t, false, data.testNamespace, testAntreaIPAMNamespace) checkIPPoolsEmpty(t, data, ipPools) }) t.Run("testAntreaIPAMPodToClusterIPv4", func(t *testing.T) { skipIfNotIPv4Cluster(t) - data.testClusterIP(t, false, testAntreaIPAMNamespace, testNamespace) + data.testClusterIP(t, false, testAntreaIPAMNamespace, data.testNamespace) checkIPPoolsEmpty(t, data, ipPools) }) t.Run("testAntreaIPAMVLAN11PodToAntreaIPAMVLAN11ClusterIPv4", func(t *testing.T) { @@ -84,12 +84,12 @@ func TestAntreaIPAMService(t *testing.T) { }) t.Run("testAntreaIPAMVLAN11ClusterIPv4", func(t *testing.T) { skipIfNotIPv4Cluster(t) - data.testClusterIP(t, false, testNamespace, testAntreaIPAMNamespace11) + data.testClusterIP(t, false, data.testNamespace, testAntreaIPAMNamespace11) checkIPPoolsEmpty(t, data, ipPools) }) t.Run("testAntreaIPAMVLAN11PodToClusterIPv4", func(t *testing.T) { skipIfNotIPv4Cluster(t) - data.testClusterIP(t, false, testAntreaIPAMNamespace11, testNamespace) + data.testClusterIP(t, false, testAntreaIPAMNamespace11, data.testNamespace) checkIPPoolsEmpty(t, data, ipPools) }) @@ -100,12 +100,12 @@ func TestAntreaIPAMService(t *testing.T) { }) t.Run("testAntreaIPAMNodePort", func(t *testing.T) { skipIfHasWindowsNodes(t) - data.testNodePort(t, false, testNamespace, testAntreaIPAMNamespace) + data.testNodePort(t, false, data.testNamespace, testAntreaIPAMNamespace) checkIPPoolsEmpty(t, data, ipPools) }) t.Run("testAntreaIPAMPodToNodePort", func(t *testing.T) { skipIfHasWindowsNodes(t) - data.testNodePort(t, false, testAntreaIPAMNamespace, testNamespace) + data.testNodePort(t, false, testAntreaIPAMNamespace, data.testNamespace) checkIPPoolsEmpty(t, data, ipPools) }) t.Run("testAntreaIPAMVLAN11PodToAntreaIPAMVLAN11NodePort", func(t *testing.T) { @@ -130,12 +130,12 @@ func TestAntreaIPAMService(t *testing.T) { }) t.Run("testAntreaIPAMVLAN11NodePort", func(t *testing.T) { skipIfHasWindowsNodes(t) - data.testNodePort(t, false, testNamespace, testAntreaIPAMNamespace11) + data.testNodePort(t, false, data.testNamespace, testAntreaIPAMNamespace11) checkIPPoolsEmpty(t, data, ipPools) }) t.Run("testAntreaIPAMVLAN11PodToNodePort", func(t *testing.T) { skipIfHasWindowsNodes(t) - data.testNodePort(t, false, testAntreaIPAMNamespace11, testNamespace) + data.testNodePort(t, false, testAntreaIPAMNamespace11, data.testNamespace) checkIPPoolsEmpty(t, data, ipPools) }) } diff --git a/test/e2e/antreaipam_test.go b/test/e2e/antreaipam_test.go index ffa520905d8..0a1780a9ac5 100644 --- a/test/e2e/antreaipam_test.go +++ b/test/e2e/antreaipam_test.go @@ -186,12 +186,12 @@ func TestAntreaIPAM(t *testing.T) { }) t.Run("testAntreaIPAMHostPortPodConnectivity", func(t *testing.T) { skipIfHasWindowsNodes(t) - data.testHostPortPodConnectivity(t, testNamespace, testAntreaIPAMNamespace) + data.testHostPortPodConnectivity(t, data.testNamespace, testAntreaIPAMNamespace) checkIPPoolsEmpty(t, data, ipPools) }) t.Run("testAntreaIPAMPodToHostPortPodConnectivity", func(t *testing.T) { skipIfHasWindowsNodes(t) - data.testHostPortPodConnectivity(t, testAntreaIPAMNamespace, testNamespace) + data.testHostPortPodConnectivity(t, testAntreaIPAMNamespace, data.testNamespace) checkIPPoolsEmpty(t, data, ipPools) }) t.Run("testAntreaIPAMVLAN11PodToAntreaIPAMVLAN11HostPortPodConnectivity", func(t *testing.T) { @@ -216,12 +216,12 @@ func TestAntreaIPAM(t *testing.T) { }) t.Run("testAntreaIPAMVLAN11HostPortPodConnectivity", func(t *testing.T) { skipIfHasWindowsNodes(t) - data.testHostPortPodConnectivity(t, testNamespace, testAntreaIPAMNamespace11) + data.testHostPortPodConnectivity(t, data.testNamespace, testAntreaIPAMNamespace11) checkIPPoolsEmpty(t, data, ipPools) }) t.Run("testAntreaIPAMVLAN11PodToHostPortPodConnectivity", func(t *testing.T) { skipIfHasWindowsNodes(t) - data.testHostPortPodConnectivity(t, testAntreaIPAMNamespace11, testNamespace) + data.testHostPortPodConnectivity(t, testAntreaIPAMNamespace11, data.testNamespace) checkIPPoolsEmpty(t, data, ipPools) }) t.Run("testAntreaIPAMOVSRestartSameNode", func(t *testing.T) { @@ -260,7 +260,7 @@ func testAntreaIPAMPodConnectivitySameNode(t *testing.T, data *TestData) { // One Per-Node IPAM Pod podInfos = append(podInfos, podInfo{ name: randName("test-pod-0-"), - namespace: testNamespace, + namespace: data.testNamespace, }) workerNode := workerNodeName(1) @@ -279,7 +279,7 @@ func testAntreaIPAMPodConnectivitySameNode(t *testing.T, data *TestData) { func testAntreaIPAMPodConnectivityDifferentNodes(t *testing.T, data *TestData) { maxNodes := 3 var podInfos []podInfo - for _, namespace := range []string{testNamespace, testAntreaIPAMNamespace, testAntreaIPAMNamespace11, testAntreaIPAMNamespace12} { + for _, namespace := range []string{data.testNamespace, testAntreaIPAMNamespace, testAntreaIPAMNamespace11, testAntreaIPAMNamespace12} { createdPodInfos, deletePods := createPodsOnDifferentNodes(t, data, namespace, "differentnodes") defer deletePods() if len(createdPodInfos) > maxNodes { diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index a2767bbcdba..d4b7bfc5784 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -48,7 +48,8 @@ var ( podsByNamespace map[string][]Pod k8sUtils *KubernetesUtils allTestList []*TestCase - pods, namespaces []string + pods []string + namespaces map[string]string podIPs map[string][]string p80, p81, p8080, p8081, p8082, p8085, p6443 int32 ) @@ -121,7 +122,11 @@ func initialize(t *testing.T, data *TestData) { p8082 = 8082 p8085 = 8085 pods = []string{"a", "b", "c"} - namespaces = []string{"x", "y", "z"} + namespaces = make(map[string]string) + suffix := randName("") + namespaces["x"] = "x-" + suffix + namespaces["y"] = "y-" + suffix + namespaces["z"] = "z-" + suffix // This function "initialize" will be used more than once, and variable "allPods" is global. // It should be empty every time when "initialize" is performed, otherwise there will be unexpected // results. @@ -149,7 +154,7 @@ func skipIfAntreaPolicyDisabled(tb testing.TB) { skipIfFeatureDisabled(tb, features.AntreaPolicy, true, true) } -func applyDefaultDenyToAllNamespaces(k8s *KubernetesUtils, namespaces []string) error { +func applyDefaultDenyToAllNamespaces(k8s *KubernetesUtils, namespaces map[string]string) error { if err := k8s.CleanNetworkPolicies(namespaces); err != nil { return err } @@ -171,7 +176,7 @@ func applyDefaultDenyToAllNamespaces(k8s *KubernetesUtils, namespaces []string) return nil } -func cleanupDefaultDenyNPs(k8s *KubernetesUtils, namespaces []string) error { +func cleanupDefaultDenyNPs(k8s *KubernetesUtils, namespaces map[string]string) error { if err := k8s.CleanNetworkPolicies(namespaces); err != nil { return err } @@ -206,7 +211,7 @@ func testMutateACNPNoTier(t *testing.T) { func testMutateANPNoTier(t *testing.T) { invalidNpErr := fmt.Errorf("ANP tier not mutated to default tier") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("x", "anp-no-tier"). + builder = builder.SetName(namespaces["x"], "anp-no-tier"). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(10.0) anp := builder.Get() @@ -227,7 +232,7 @@ func testMutateACNPNoRuleName(t *testing.T) { builder = builder.SetName("acnp-no-rule-name"). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(10.0). - AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) acnp := builder.Get() log.Debugf("creating ACNP %v", acnp.Name) @@ -249,10 +254,10 @@ func testMutateACNPNoRuleName(t *testing.T) { func testMutateANPNoRuleName(t *testing.T) { mutateErr := fmt.Errorf("ANP Rule name not mutated automatically") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("x", "anp-no-rule-name"). + builder = builder.SetName(namespaces["x"], "anp-no-rule-name"). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(10.0). - AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, nil, crdv1alpha1.RuleActionAllow, "") anp := builder.Get() log.Debugf("creating ANP %v", anp.Name) @@ -287,7 +292,7 @@ func testInvalidACNPNoPriority(t *testing.T) { func testInvalidANPNoPriority(t *testing.T) { invalidNpErr := fmt.Errorf("invalid Antrea NetworkPolicy without a priority accepted") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("x", "anp-no-priority"). + builder = builder.SetName(namespaces["x"], "anp-no-priority"). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) anp := builder.Get() log.Debugf("creating ANP %v", anp.Name) @@ -300,11 +305,11 @@ func testInvalidANPNoPriority(t *testing.T) { func testInvalidANPRuleNameNotUnique(t *testing.T) { invalidNpErr := fmt.Errorf("invalid Antrea NetworkPolicy without unique rule names accepted") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("x", "anp-rule-name-not-unique"). + builder = builder.SetName(namespaces["x"], "anp-rule-name-not-unique"). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). - AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, nil, crdv1alpha1.RuleActionAllow, "not-unique"). - AddIngress(ProtocolTCP, &p81, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, + AddIngress(ProtocolTCP, &p81, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": namespaces["x"]}, nil, nil, nil, crdv1alpha1.RuleActionAllow, "not-unique") anp := builder.Get() log.Debugf("creating ANP %v", anp.Name) @@ -317,7 +322,7 @@ func testInvalidANPRuleNameNotUnique(t *testing.T) { func testInvalidANPTierDoesNotExist(t *testing.T) { invalidNpErr := fmt.Errorf("invalid Antrea NetworkPolicy without existing Tier accepted") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("x", "anp-tier-not-exist"). + builder = builder.SetName(namespaces["x"], "anp-tier-not-exist"). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetTier("i-dont-exist") anp := builder.Get() @@ -331,10 +336,10 @@ func testInvalidANPTierDoesNotExist(t *testing.T) { func testInvalidANPPortRangePortUnset(t *testing.T) { invalidNpErr := fmt.Errorf("invalid Antrea NetworkPolicy egress rule with endPort but no port accepted") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("y", "anp-egress-port-range-port-unset"). + builder = builder.SetName(namespaces["y"], "anp-egress-port-range-port-unset"). SetPriority(1.0). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}) - builder.AddEgress(ProtocolTCP, nil, nil, &p8085, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, + builder.AddEgress(ProtocolTCP, nil, nil, &p8085, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": namespaces["x"]}, nil, nil, nil, crdv1alpha1.RuleActionDrop, "anp-port-range") anp := builder.Get() @@ -348,10 +353,10 @@ func testInvalidANPPortRangePortUnset(t *testing.T) { func testInvalidANPPortRangeEndPortSmall(t *testing.T) { invalidNpErr := fmt.Errorf("invalid Antrea NetworkPolicy egress rule with endPort smaller than port accepted") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("y", "anp-egress-port-range-endport-small"). + builder = builder.SetName(namespaces["y"], "anp-egress-port-range-endport-small"). SetPriority(1.0). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}) - builder.AddEgress(ProtocolTCP, &p8082, nil, &p8081, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, + builder.AddEgress(ProtocolTCP, &p8082, nil, &p8081, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": namespaces["x"]}, nil, nil, nil, crdv1alpha1.RuleActionDrop, "anp-port-range") anp := builder.Get() @@ -443,7 +448,7 @@ func testInvalidTierANPRefDelete(t *testing.T) { failOnError(fmt.Errorf("create Tier failed for tier tier-anp: %v", err), t) } builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("x", "anp-for-tier"). + builder = builder.SetName(namespaces["x"], "anp-for-tier"). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetTier("tier-anp"). SetPriority(13.0) @@ -461,7 +466,7 @@ func testInvalidTierANPRefDelete(t *testing.T) { } // testInvalidACNPPodSelectorNsSelectorMatchExpressions tests creating a ClusterNetworkPolicy with invalid LabelSelector(MatchExpressions) -func testInvalidACNPPodSelectorNsSelectorMatchExpressions(t *testing.T) { +func testInvalidACNPPodSelectorNsSelectorMatchExpressions(t *testing.T, data *TestData) { invalidLSErr := fmt.Errorf("create Antrea NetworkPolicy with namespaceSelector but matchExpressions invalid") allowAction := crdv1alpha1.RuleActionAllow @@ -470,7 +475,7 @@ func testInvalidACNPPodSelectorNsSelectorMatchExpressions(t *testing.T) { var acnp = &crdv1alpha1.ClusterNetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ - Namespace: testNamespace, Name: "cnptest", Labels: map[string]string{"antrea-e2e": "cnp1"}}, + Namespace: data.testNamespace, Name: "cnptest", Labels: map[string]string{"antrea-e2e": "cnp1"}}, Spec: crdv1alpha1.ClusterNetworkPolicySpec{ AppliedTo: []crdv1alpha1.NetworkPolicyPeer{ {PodSelector: &selectorA}, @@ -497,13 +502,13 @@ func testACNPAllowXBtoA(t *testing.T) { builder = builder.SetName("acnp-allow-xb-to-a"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Dropped) - reachability.Expect(Pod("x/b"), Pod("x/a"), Connected) - reachability.Expect(Pod("x/b"), Pod("y/a"), Connected) - reachability.Expect(Pod("x/b"), Pod("z/a"), Connected) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Connected) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Connected) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["z"]+"/a"), Connected) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -530,12 +535,12 @@ func testACNPAllowXBtoYA(t *testing.T) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-allow-xb-to-ya"). SetPriority(2.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "y"}}}) - builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["y"]}}}) + builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Dropped) - reachability.Expect(Pod("x/b"), Pod("y/a"), Connected) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Connected) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -562,25 +567,25 @@ func testACNPPriorityOverrideDefaultDeny(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-priority2"). SetPriority(2). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority1"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) // Ingress from ns:z to x/a will be dropped since acnp-priority1 has higher precedence. reachabilityBothACNP := NewReachability(allPods, Dropped) - reachabilityBothACNP.Expect(Pod("z/a"), Pod("x/b"), Connected) - reachabilityBothACNP.Expect(Pod("z/a"), Pod("x/c"), Connected) - reachabilityBothACNP.Expect(Pod("z/b"), Pod("x/b"), Connected) - reachabilityBothACNP.Expect(Pod("z/b"), Pod("x/c"), Connected) - reachabilityBothACNP.Expect(Pod("z/c"), Pod("x/b"), Connected) - reachabilityBothACNP.Expect(Pod("z/c"), Pod("x/c"), Connected) + reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Connected) + reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Connected) + reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Connected) + reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Connected) + reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Connected) + reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Connected) reachabilityBothACNP.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -613,10 +618,10 @@ func testACNPAllowNoDefaultIsolation(t *testing.T, protocol AntreaPolicyProtocol builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-allow-x-ingress-y-egress-z"). SetPriority(1.1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "y"}, + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + builder.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) - builder.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Connected) @@ -651,14 +656,14 @@ func testACNPDropEgress(t *testing.T, protocol AntreaPolicyProtocol) { builder = builder.SetName("acnp-deny-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod("x/a"), "z", Dropped) - reachability.ExpectEgressToNamespace(Pod("y/a"), "z", Dropped) - reachability.Expect(Pod("z/a"), Pod("z/b"), Dropped) - reachability.Expect(Pod("z/a"), Pod("z/c"), Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -683,14 +688,14 @@ func testACNPDropIngressInSelectedNamespace(t *testing.T) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-ingress-to-x"). SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "drop-all-ingress", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectAllIngress("x/a", Dropped) - reachability.ExpectAllIngress("x/b", Dropped) - reachability.ExpectAllIngress("x/c", Dropped) + reachability.ExpectAllIngress(Pod(namespaces["x"]+"/a"), Dropped) + reachability.ExpectAllIngress(Pod(namespaces["x"]+"/b"), Dropped) + reachability.ExpectAllIngress(Pod(namespaces["x"]+"/c"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { @@ -715,18 +720,18 @@ func testACNPNoEffectOnOtherProtocols(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-z-ingress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachability1 := NewReachability(allPods, Connected) - reachability1.Expect(Pod("z/a"), Pod("x/a"), Dropped) - reachability1.Expect(Pod("z/b"), Pod("x/a"), Dropped) - reachability1.Expect(Pod("z/c"), Pod("x/a"), Dropped) - reachability1.Expect(Pod("z/a"), Pod("y/a"), Dropped) - reachability1.Expect(Pod("z/b"), Pod("y/a"), Dropped) - reachability1.Expect(Pod("z/c"), Pod("y/a"), Dropped) - reachability1.Expect(Pod("z/b"), Pod("z/a"), Dropped) - reachability1.Expect(Pod("z/c"), Pod("z/a"), Dropped) + reachability1.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachability1.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability1.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/a"), Dropped) + reachability1.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability1.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability1.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["y"]+"/a"), Dropped) + reachability1.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["z"]+"/a"), Dropped) + reachability1.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/a"), Dropped) reachability2 := NewReachability(allPods, Connected) @@ -761,18 +766,18 @@ func testACNPAppliedToDenyXBtoCGWithYA(t *testing.T) { cgName := "cg-pods-ya" cgBuilder := &ClusterGroupV1Alpha2SpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": "y"}, nil). + SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil). SetPodSelector(map[string]string{"pod": "a"}, nil) port81Name := "serve-81" builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-ya-from-xb"). SetPriority(2.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod("x/b"), Pod("y/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -798,18 +803,18 @@ func testACNPIngressRuleDenyCGWithXBtoYA(t *testing.T) { cgName := "cg-pods-xb" cgBuilder := &ClusterGroupV1Alpha2SpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": "x"}, nil). + SetNamespaceSelector(map[string]string{"ns": namespaces["x"]}, nil). SetPodSelector(map[string]string{"pod": "b"}, nil) port81Name := "serve-81" builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-xb-to-ya"). SetPriority(2.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "y"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["y"]}}}) builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod("x/b"), Pod("y/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -837,14 +842,14 @@ func testACNPAppliedToRuleCGWithPodsAToNsZ(t *testing.T) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-a-to-z"). SetPriority(1.0) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, []ACNPAppliedToSpec{{Group: cgName}}, crdv1alpha1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod("x/a"), "z", Dropped) - reachability.ExpectEgressToNamespace(Pod("y/a"), "z", Dropped) - reachability.Expect(Pod("z/a"), Pod("z/b"), Dropped) - reachability.Expect(Pod("z/a"), Pod("z/c"), Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -867,7 +872,7 @@ func testACNPAppliedToRuleCGWithPodsAToNsZ(t *testing.T) { func testACNPEgressRulePodsAToCGWithNsZ(t *testing.T) { cgName := "cg-ns-z" cgBuilder := &ClusterGroupV1Alpha3SpecBuilder{} - cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": "z"}, nil) + cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). @@ -876,10 +881,10 @@ func testACNPEgressRulePodsAToCGWithNsZ(t *testing.T) { nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod("x/a"), "z", Dropped) - reachability.ExpectEgressToNamespace(Pod("y/a"), "z", Dropped) - reachability.Expect(Pod("z/a"), Pod("z/b"), Dropped) - reachability.Expect(Pod("z/a"), Pod("z/c"), Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -909,20 +914,20 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) { builder = builder.SetName("acnp-deny-cg-with-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod("x/a"), "z", Dropped) - reachability.ExpectEgressToNamespace(Pod("y/a"), "z", Dropped) - reachability.Expect(Pod("z/a"), Pod("z/b"), Dropped) - reachability.Expect(Pod("z/a"), Pod("z/c"), Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.ExpectEgressToNamespace(Pod("x/c"), "z", Dropped) - updatedReachability.ExpectEgressToNamespace(Pod("y/c"), "z", Dropped) - updatedReachability.Expect(Pod("z/c"), Pod("z/a"), Dropped) - updatedReachability.Expect(Pod("z/c"), Pod("z/b"), Dropped) + updatedReachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/c"), namespaces["z"], Dropped) + updatedReachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/c"), namespaces["z"], Dropped) + updatedReachability.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/a"), Dropped) + updatedReachability.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/b"), Dropped) testStep := []*TestStep{ { "CG Pods A", @@ -952,10 +957,10 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) { func testACNPClusterGroupUpdate(t *testing.T) { cgName := "cg-ns-z-then-y" cgBuilder := &ClusterGroupV1Alpha3SpecBuilder{} - cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": "z"}, nil) + cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) // Update CG NS selector to group Pods from Namespace Y updatedCgBuilder := &ClusterGroupV1Alpha3SpecBuilder{} - updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": "y"}, nil) + updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). @@ -964,16 +969,16 @@ func testACNPClusterGroupUpdate(t *testing.T) { nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod("x/a"), "z", Dropped) - reachability.ExpectEgressToNamespace(Pod("y/a"), "z", Dropped) - reachability.Expect(Pod("z/a"), Pod("z/b"), Dropped) - reachability.Expect(Pod("z/a"), Pod("z/c"), Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.ExpectEgressToNamespace(Pod("x/a"), "y", Dropped) - updatedReachability.ExpectEgressToNamespace(Pod("z/a"), "y", Dropped) - updatedReachability.Expect(Pod("y/a"), Pod("y/b"), Dropped) - updatedReachability.Expect(Pod("y/a"), Pod("y/c"), Dropped) + updatedReachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) + updatedReachability.ExpectEgressToNamespace(Pod(namespaces["z"]+"/a"), namespaces["y"], Dropped) + updatedReachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["y"]+"/b"), Dropped) + updatedReachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["y"]+"/c"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -1004,22 +1009,22 @@ func testACNPClusterGroupAppliedToPodAdd(t *testing.T, data *TestData) { cgName := "cg-pod-custom-pod-zj" cgBuilder := &ClusterGroupV1Alpha3SpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": "z"}, nil). + SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil). SetPodSelector(map[string]string{"pod": "j"}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-zj-to-xj-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "j"}, map[string]string{"ns": "x"}, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "j"}, map[string]string{"ns": namespaces["x"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod("z", "j"), + Pod: NewPod(namespaces["z"], "j"), Labels: map[string]string{"pod": "j"}, }, DestPod: CustomPod{ - Pod: NewPod("x", "j"), + Pod: NewPod(namespaces["x"], "j"), Labels: map[string]string{"pod": "j"}, }, ExpectConnectivity: Dropped, @@ -1047,7 +1052,7 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { cgName := "cg-pod-custom-pod-zk" cgBuilder := &ClusterGroupV1Alpha3SpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": "z"}, nil). + SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil). SetPodSelector(map[string]string{"pod": "k"}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-xk-to-cg-with-zk-egress"). @@ -1055,7 +1060,7 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { SetAppliedToGroup([]ACNPAppliedToSpec{ { PodSelector: map[string]string{"pod": "k"}, - NSSelector: map[string]string{"ns": "x"}, + NSSelector: map[string]string{"ns": namespaces["x"]}, }, }) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, @@ -1063,11 +1068,11 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod("x", "k"), + Pod: NewPod(namespaces["x"], "k"), Labels: map[string]string{"pod": "k"}, }, DestPod: CustomPod{ - Pod: NewPod("z", "k"), + Pod: NewPod(namespaces["z"], "k"), Labels: map[string]string{"pod": "k"}, }, ExpectConnectivity: Dropped, @@ -1093,10 +1098,10 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { } func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { - podXAIP, _ := podIPs["x/a"] - podXBIP, _ := podIPs["x/b"] - podXCIP, _ := podIPs["x/c"] - podZAIP, _ := podIPs["z/a"] + podXAIP, _ := podIPs[namespaces["x"]+"/a"] + podXBIP, _ := podIPs[namespaces["x"]+"/b"] + podXCIP, _ := podIPs[namespaces["x"]+"/c"] + podZAIP, _ := podIPs[namespaces["z"]+"/a"] // There are three situations of a Pod's IP(s): // 1. Only one IPv4 address. // 2. Only one IPv6 address. @@ -1132,7 +1137,7 @@ func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{ { PodSelector: map[string]string{"pod": "a"}, - NSSelector: map[string]string{"ns": "y"}, + NSSelector: map[string]string{"ns": namespaces["y"]}, }, }) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, @@ -1141,10 +1146,10 @@ func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgv1a2Name, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod("x/a"), Pod("y/a"), Dropped) - reachability.Expect(Pod("x/b"), Pod("y/a"), Dropped) - reachability.Expect(Pod("x/c"), Pod("y/a"), Dropped) - reachability.Expect(Pod("z/a"), Pod("y/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/c"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -1169,12 +1174,12 @@ func testBaselineNamespaceIsolation(t *testing.T) { nsExpOtherThanX := metav1.LabelSelectorRequirement{ Key: "ns", Operator: metav1.LabelSelectorOpNotIn, - Values: []string{"x"}, + Values: []string{namespaces["x"]}, } builder = builder.SetName("acnp-baseline-isolate-ns-x"). SetTier("baseline"). SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, []metav1.LabelSelectorRequirement{nsExpOtherThanX}, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) @@ -1182,23 +1187,23 @@ func testBaselineNamespaceIsolation(t *testing.T) { // create a K8s NetworkPolicy for Pods in namespace x to allow ingress traffic from Pods in the same namespace, // as well as from the y/a Pod. It should open up ingress from y/a since it's evaluated before the baseline tier. k8sNPBuilder := &NetworkPolicySpecBuilder{} - k8sNPBuilder = k8sNPBuilder.SetName("x", "allow-ns-x-and-y-a"). + k8sNPBuilder = k8sNPBuilder.SetName(namespaces["x"], "allow-ns-x-and-y-a"). SetTypeIngress(). AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, - nil, map[string]string{"ns": "x"}, nil, nil). + nil, map[string]string{"ns": namespaces["x"]}, nil, nil). AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, - map[string]string{"pod": "a"}, map[string]string{"ns": "y"}, nil, nil) + map[string]string{"pod": "a"}, map[string]string{"ns": namespaces["y"]}, nil, nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod("y/b"), Pod("x/a"), Dropped) - reachability.Expect(Pod("y/c"), Pod("x/a"), Dropped) - reachability.ExpectIngressFromNamespace(Pod("x/a"), "z", Dropped) - reachability.Expect(Pod("y/b"), Pod("x/b"), Dropped) - reachability.Expect(Pod("y/c"), Pod("x/b"), Dropped) - reachability.ExpectIngressFromNamespace(Pod("x/b"), "z", Dropped) - reachability.Expect(Pod("y/b"), Pod("x/c"), Dropped) - reachability.Expect(Pod("y/c"), Pod("x/c"), Dropped) - reachability.ExpectIngressFromNamespace(Pod("x/c"), "z", Dropped) + reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(Pod(namespaces["y"]+"/c"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) + reachability.Expect(Pod(namespaces["y"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) + reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/b"), namespaces["z"], Dropped) + reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.Expect(Pod(namespaces["y"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/c"), namespaces["z"], Dropped) testStep := []*TestStep{ { "Port 80", @@ -1215,7 +1220,7 @@ func testBaselineNamespaceIsolation(t *testing.T) { } executeTests(t, testCase) // Cleanup the K8s NetworkPolicy created for this test. - failOnError(k8sUtils.CleanNetworkPolicies([]string{"x"}), t) + failOnError(k8sUtils.CleanNetworkPolicies(map[string]string{"x": namespaces["x"]}), t) time.Sleep(networkPolicyDelay) } @@ -1225,43 +1230,43 @@ func testACNPPriorityOverride(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-priority1"). SetPriority(1.001). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Highest priority. Drops traffic from z/b to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority2"). SetPriority(1.002). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Medium priority. Allows traffic from z to x/a. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-priority3"). SetPriority(1.003). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Lowest priority. Drops traffic from z to x. - builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) - reachabilityTwoACNPs.Expect(Pod("z/a"), Pod("x/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/a"), Pod("x/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/b"), Pod("x/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/b"), Pod("x/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/c"), Pod("x/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/c"), Pod("x/c"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) reachabilityAllACNPs := NewReachability(allPods, Connected) - reachabilityAllACNPs.Expect(Pod("z/a"), Pod("x/b"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/a"), Pod("x/c"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/b"), Pod("x/a"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/b"), Pod("x/b"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/b"), Pod("x/c"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/c"), Pod("x/b"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/c"), Pod("x/c"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) testStepTwoACNP := []*TestStep{ { @@ -1300,45 +1305,45 @@ func testACNPTierOverride(t *testing.T) { builder1 = builder1.SetName("acnp-tier-emergency"). SetTier("emergency"). SetPriority(100). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Highest priority tier. Drops traffic from z/b to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-securityops"). SetTier("securityops"). SetPriority(10). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Medium priority tier. Allows traffic from z to x/a. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-tier-application"). SetTier("application"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Lowest priority tier. Drops traffic from z to x. - builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) - reachabilityTwoACNPs.Expect(Pod("z/a"), Pod("x/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/a"), Pod("x/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/b"), Pod("x/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/b"), Pod("x/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/c"), Pod("x/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/c"), Pod("x/c"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) reachabilityAllACNPs := NewReachability(allPods, Connected) - reachabilityAllACNPs.Expect(Pod("z/a"), Pod("x/b"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/a"), Pod("x/c"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/b"), Pod("x/a"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/b"), Pod("x/b"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/b"), Pod("x/c"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/c"), Pod("x/b"), Dropped) - reachabilityAllACNPs.Expect(Pod("z/c"), Pod("x/c"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) testStepTwoACNP := []*TestStep{ { @@ -1384,27 +1389,27 @@ func testACNPCustomTiers(t *testing.T) { builder1 = builder1.SetName("acnp-tier-high"). SetTier("high-priority"). SetPriority(100). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Medium priority tier. Allows traffic from z to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-low"). SetTier("low-priority"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Lowest priority tier. Drops traffic from z to x. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) - reachabilityTwoACNPs.Expect(Pod("z/a"), Pod("x/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/a"), Pod("x/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/b"), Pod("x/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/b"), Pod("x/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/c"), Pod("x/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod("z/c"), Pod("x/c"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) + reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) testStepTwoACNP := []*TestStep{ { "Two Policies in different tiers", @@ -1433,23 +1438,23 @@ func testACNPPriorityConflictingRule(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-drop"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-allow"). SetPriority(2). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) // The following ingress rule will take no effect as it is exactly the same as ingress rule of cnp-drop, // but cnp-allow has lower priority. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) reachabilityBothACNP := NewReachability(allPods, Connected) - reachabilityBothACNP.ExpectEgressToNamespace(Pod("z/a"), "x", Dropped) - reachabilityBothACNP.ExpectEgressToNamespace(Pod("z/b"), "x", Dropped) - reachabilityBothACNP.ExpectEgressToNamespace(Pod("z/c"), "x", Dropped) + reachabilityBothACNP.ExpectEgressToNamespace(Pod(namespaces["z"]+"/a"), namespaces["x"], Dropped) + reachabilityBothACNP.ExpectEgressToNamespace(Pod(namespaces["z"]+"/b"), namespaces["x"], Dropped) + reachabilityBothACNP.ExpectEgressToNamespace(Pod(namespaces["z"]+"/c"), namespaces["x"], Dropped) testStep := []*TestStep{ { "Both ACNP", @@ -1474,29 +1479,29 @@ func testACNPRulePriority(t *testing.T) { // acnp-deny will apply to all pods in namespace x builder1 = builder1.SetName("acnp-deny"). SetPriority(5). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "y"}, + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) // This rule should take no effect as it will be overridden by the first rule of cnp-allow - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} // acnp-allow will also apply to all pods in namespace x builder2 = builder2.SetName("acnp-allow"). SetPriority(5). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) // This rule should take no effect as it will be overridden by the first rule of cnp-drop - builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "y"}, + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) // Only egress from pods in namespace x to namespace y should be denied reachabilityBothACNP := NewReachability(allPods, Connected) - reachabilityBothACNP.ExpectIngressFromNamespace("y/a", "x", Dropped) - reachabilityBothACNP.ExpectIngressFromNamespace("y/b", "x", Dropped) - reachabilityBothACNP.ExpectIngressFromNamespace("y/c", "x", Dropped) + reachabilityBothACNP.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/a"), namespaces["x"], Dropped) + reachabilityBothACNP.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/b"), namespaces["x"], Dropped) + reachabilityBothACNP.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/c"), namespaces["x"], Dropped) testStep := []*TestStep{ { "Both ACNP", @@ -1520,14 +1525,14 @@ func testACNPPortRange(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-z-egress-port-range"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p8080, nil, &p8085, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(ProtocolTCP, &p8080, nil, &p8085, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "acnp-port-range", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod("x/a"), "z", Dropped) - reachability.ExpectEgressToNamespace(Pod("y/a"), "z", Dropped) - reachability.Expect(Pod("z/a"), Pod("z/b"), Dropped) - reachability.Expect(Pod("z/a"), Pod("z/c"), Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) testSteps := []*TestStep{ { fmt.Sprintf("ACNP Drop Ports 8080:8085"), @@ -1552,14 +1557,14 @@ func testACNPRejectEgress(t *testing.T) { builder = builder.SetName("acnp-reject-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod("x/a"), "z", Rejected) - reachability.ExpectEgressToNamespace(Pod("y/a"), "z", Rejected) - reachability.Expect(Pod("z/a"), Pod("z/b"), Rejected) - reachability.Expect(Pod("z/a"), Pod("z/c"), Rejected) + reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Rejected) + reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Rejected) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Rejected) + reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Rejected) testStep := []*TestStep{ { "Port 80", @@ -1583,14 +1588,14 @@ func testACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) { builder = builder.SetName("acnp-reject-a-from-z-ingress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectIngressFromNamespace(Pod("x/a"), "z", Rejected) - reachability.ExpectIngressFromNamespace(Pod("y/a"), "z", Rejected) - reachability.Expect(Pod("z/b"), Pod("z/a"), Rejected) - reachability.Expect(Pod("z/c"), Pod("z/a"), Rejected) + reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Rejected) + reachability.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Rejected) + reachability.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["z"]+"/a"), Rejected) + reachability.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/a"), Rejected) testStep := []*TestStep{ { "Port 80", @@ -1610,26 +1615,26 @@ func testACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) { func testRejectServiceTraffic(t *testing.T, data *TestData) { clientName := "agnhost-client" - require.NoError(t, data.createAgnhostPodOnNode(clientName, testNamespace, nodeName(0), false)) - defer data.deletePodAndWait(defaultTimeout, clientName, testNamespace) - _, err := data.podWaitForIPs(defaultTimeout, clientName, testNamespace) + require.NoError(t, data.createAgnhostPodOnNode(clientName, data.testNamespace, nodeName(0), false)) + defer data.deletePodAndWait(defaultTimeout, clientName, data.testNamespace) + _, err := data.podWaitForIPs(defaultTimeout, clientName, data.testNamespace) require.NoError(t, err) - svc1, cleanup1 := data.createAgnhostServiceAndBackendPods(t, "s1", testNamespace, nodeName(0), v1.ServiceTypeClusterIP) + svc1, cleanup1 := data.createAgnhostServiceAndBackendPods(t, "s1", data.testNamespace, nodeName(0), v1.ServiceTypeClusterIP) defer cleanup1() - svc2, cleanup2 := data.createAgnhostServiceAndBackendPods(t, "s2", testNamespace, nodeName(1), v1.ServiceTypeClusterIP) + svc2, cleanup2 := data.createAgnhostServiceAndBackendPods(t, "s2", data.testNamespace, nodeName(1), v1.ServiceTypeClusterIP) defer cleanup2() testcases := []podToAddrTestStep{ { - "antrea-test/agnhost-client", + Pod(data.testNamespace + "/agnhost-client"), svc1.Spec.ClusterIP, 80, Rejected, }, { - "antrea-test/agnhost-client", + Pod(data.testNamespace + "/agnhost-client"), svc2.Spec.ClusterIP, 80, Rejected, @@ -1698,28 +1703,28 @@ func testRejectServiceTraffic(t *testing.T, data *TestData) { // RejectNoInfiniteLoop tests that a reject action in both traffic directions won't cause an infinite rejection loop. func testRejectNoInfiniteLoop(t *testing.T, data *TestData) { clientName := "agnhost-client" - require.NoError(t, data.createAgnhostPodOnNode(clientName, testNamespace, nodeName(0), false)) - defer data.deletePodAndWait(defaultTimeout, clientName, testNamespace) - _, err := data.podWaitForIPs(defaultTimeout, clientName, testNamespace) + require.NoError(t, data.createAgnhostPodOnNode(clientName, data.testNamespace, nodeName(0), false)) + defer data.deletePodAndWait(defaultTimeout, clientName, data.testNamespace) + _, err := data.podWaitForIPs(defaultTimeout, clientName, data.testNamespace) require.NoError(t, err) - _, server0IP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server", nodeName(0), testNamespace, false) + _, server0IP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server", nodeName(0), data.testNamespace, false) defer cleanupFunc() - _, server1IP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server", nodeName(1), testNamespace, false) + _, server1IP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server", nodeName(1), data.testNamespace, false) defer cleanupFunc() var testcases []podToAddrTestStep if clusterInfo.podV4NetworkCIDR != "" { testcases = append(testcases, []podToAddrTestStep{ { - "antrea-test/agnhost-client", + Pod(data.testNamespace + "/agnhost-client"), server0IP.ipv4.String(), 80, Rejected, }, { - "antrea-test/agnhost-client", + Pod(data.testNamespace + "/agnhost-client"), server1IP.ipv4.String(), 80, Rejected, @@ -1729,13 +1734,13 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData) { if clusterInfo.podV6NetworkCIDR != "" { testcases = append(testcases, []podToAddrTestStep{ { - "antrea-test/agnhost-client", + Pod(data.testNamespace + "/agnhost-client"), server0IP.ipv6.String(), 80, Rejected, }, { - "antrea-test/agnhost-client", + Pod(data.testNamespace + "/agnhost-client"), server1IP.ipv6.String(), 80, Rejected, @@ -1814,14 +1819,14 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData) { // testANPPortRange tests the port range in a ANP can work. func testANPPortRange(t *testing.T) { builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("y", "anp-deny-yb-to-xc-egress-port-range"). + builder = builder.SetName(namespaces["y"], "anp-deny-yb-to-xc-egress-port-range"). SetPriority(1.0). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}) - builder.AddEgress(ProtocolTCP, &p8080, nil, &p8085, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, + builder.AddEgress(ProtocolTCP, &p8080, nil, &p8085, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": namespaces["x"]}, nil, nil, nil, crdv1alpha1.RuleActionDrop, "anp-port-range") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod("y/b"), Pod("x/c"), Dropped) + reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) var testSteps []*TestStep testSteps = append(testSteps, &TestStep{ @@ -1844,14 +1849,14 @@ func testANPPortRange(t *testing.T) { // that specifies that. Also it tests that a K8s NetworkPolicy with same appliedTo will not affect its behavior. func testANPBasic(t *testing.T) { builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("y", "np-same-name"). + builder = builder.SetName(namespaces["y"], "np-same-name"). SetPriority(1.0). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, nil, crdv1alpha1.RuleActionDrop, "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod("x/b"), Pod("y/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -1865,7 +1870,7 @@ func testANPBasic(t *testing.T) { } // build a K8s NetworkPolicy that has the same appliedTo but allows all traffic. k8sNPBuilder := &NetworkPolicySpecBuilder{} - k8sNPBuilder = k8sNPBuilder.SetName("y", "np-same-name"). + k8sNPBuilder = k8sNPBuilder.SetName(namespaces["y"], "np-same-name"). SetPodSelector(map[string]string{"pod": "a"}) k8sNPBuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil) @@ -1893,22 +1898,22 @@ func testANPBasic(t *testing.T) { func testANPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { tempLabel := randName("temp-") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("y", "np-multiple-appliedto").SetPriority(1.0) + builder = builder.SetName(namespaces["y"], "np-multiple-appliedto").SetPriority(1.0) // Make it apply to an extra dummy AppliedTo to ensure it handles multiple AppliedToGroups correctly. // See https://github.com/antrea-io/antrea/issues/2083. if singleRule { builder.SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}, {PodSelector: map[string]string{tempLabel: ""}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, nil, crdv1alpha1.RuleActionDrop, "") } else { - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, []ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}, crdv1alpha1.RuleActionDrop, "") - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, []ANPAppliedToSpec{{PodSelector: map[string]string{tempLabel: ""}}}, crdv1alpha1.RuleActionDrop, "") } reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod("x/b"), Pod("y/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) anp, err := k8sUtils.CreateOrUpdateANP(builder.Get()) failOnError(err, t) @@ -1921,7 +1926,7 @@ func testANPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { } t.Logf("Making the Policy apply to y/c by labeling it with the temporary label that matches the dummy AppliedTo") - podYC, err := k8sUtils.GetPodByLabel("y", "c") + podYC, err := k8sUtils.GetPodByLabel(namespaces["y"], "c") if err != nil { t.Errorf("Failed to get Pod in Namespace y with label 'pod=c': %v", err) } @@ -1929,8 +1934,8 @@ func testANPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { podYC, err = k8sUtils.clientset.CoreV1().Pods(podYC.Namespace).Update(context.TODO(), podYC, metav1.UpdateOptions{}) assert.NoError(t, err) reachability = NewReachability(allPods, Connected) - reachability.Expect(Pod("x/b"), Pod("y/a"), Dropped) - reachability.Expect(Pod("x/b"), Pod("y/c"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/c"), Dropped) time.Sleep(networkPolicyDelay) k8sUtils.Validate(allPods, reachability, []int32{80}, ProtocolTCP) _, wrong, _ = reachability.Summary() @@ -1944,7 +1949,7 @@ func testANPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { _, err = k8sUtils.clientset.CoreV1().Pods(podYC.Namespace).Update(context.TODO(), podYC, metav1.UpdateOptions{}) assert.NoError(t, err) reachability = NewReachability(allPods, Connected) - reachability.Expect(Pod("x/b"), Pod("y/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) time.Sleep(networkPolicyDelay) k8sUtils.Validate(allPods, reachability, []int32{80}, ProtocolTCP) _, wrong, _ = reachability.Summary() @@ -1961,8 +1966,8 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("test-log-acnp-deny"). SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) builder.AddEgressLogging() @@ -1979,12 +1984,12 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { k8sUtils.Probe(ns1, pod1, ns2, pod2, p80, ProtocolTCP) }() } - oneProbe("x", "a", "z", "a") - oneProbe("x", "a", "z", "b") - oneProbe("x", "a", "z", "c") + oneProbe(namespaces["x"], "a", namespaces["z"], "a") + oneProbe(namespaces["x"], "a", namespaces["z"], "b") + oneProbe(namespaces["x"], "a", namespaces["z"], "c") wg.Wait() - podXA, err := k8sUtils.GetPodByLabel("x", "a") + podXA, err := k8sUtils.GetPodByLabel(namespaces["x"], "a") if err != nil { t.Errorf("Failed to get Pod in Namespace x with label 'pod=a': %v", err) } @@ -2008,8 +2013,8 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { return false, nil } - destinations := []string{"z/a", "z/b", "z/c"} - srcIPs, _ := podIPs["x/a"] + destinations := []string{namespaces["z"] + "/a", namespaces["z"] + "/b", namespaces["z"] + "/c"} + srcIPs, _ := podIPs[namespaces["x"]+"/a"] var expectedNumEntries, actualNumEntries int for _, d := range destinations { dstIPs, _ := podIPs[d] @@ -2046,17 +2051,17 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { func testAppliedToPerRule(t *testing.T) { builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName("y", "np1").SetPriority(1.0) + builder = builder.SetName(namespaces["y"], "np1").SetPriority(1.0) anpATGrp1 := ANPAppliedToSpec{PodSelector: map[string]string{"pod": "a"}, PodSelectorMatchExp: nil} anpATGrp2 := ANPAppliedToSpec{PodSelector: map[string]string{"pod": "b"}, PodSelectorMatchExp: nil} - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, []ANPAppliedToSpec{anpATGrp1}, crdv1alpha1.RuleActionDrop, "") - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]}, nil, nil, []ANPAppliedToSpec{anpATGrp2}, crdv1alpha1.RuleActionDrop, "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod("x/b"), Pod("y/a"), Dropped) - reachability.Expect(Pod("z/b"), Pod("y/b"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["y"]+"/b"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -2073,18 +2078,18 @@ func testAppliedToPerRule(t *testing.T) { builder2 = builder2.SetName("cnp1").SetPriority(1.0) cnpATGrp1 := ACNPAppliedToSpec{PodSelector: map[string]string{"pod": "a"}, PodSelectorMatchExp: nil} cnpATGrp2 := ACNPAppliedToSpec{ - PodSelector: map[string]string{"pod": "b"}, NSSelector: map[string]string{"ns": "y"}, + PodSelector: map[string]string{"pod": "b"}, NSSelector: map[string]string{"ns": namespaces["y"]}, PodSelectorMatchExp: nil, NSSelectorMatchExp: nil} - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp1}, crdv1alpha1.RuleActionDrop, "", "", nil) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]}, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp2}, crdv1alpha1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod("x/b"), Pod("x/a"), Dropped) - reachability2.Expect(Pod("x/b"), Pod("y/a"), Dropped) - reachability2.Expect(Pod("x/b"), Pod("z/a"), Dropped) - reachability2.Expect(Pod("z/b"), Pod("y/b"), Dropped) + reachability2.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability2.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability2.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["z"]+"/a"), Dropped) + reachability2.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["y"]+"/b"), Dropped) testStep2 := []*TestStep{ { "Port 80", @@ -2105,14 +2110,14 @@ func testAppliedToPerRule(t *testing.T) { } func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) { - svc1 := k8sUtils.BuildService("svc1", "x", 80, 80, map[string]string{"app": "a"}, nil) - svc2 := k8sUtils.BuildService("svc2", "y", 80, 80, map[string]string{"app": "b"}, nil) + svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) + svc2 := k8sUtils.BuildService("svc2", namespaces["y"], 80, 80, map[string]string{"app": "b"}, nil) cg1Name, cg2Name := "cg-svc1", "cg-svc2" cgBuilder1 := &ClusterGroupV1Alpha3SpecBuilder{} - cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference("x", "svc1") + cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference(namespaces["x"], "svc1") cgBuilder2 := &ClusterGroupV1Alpha3SpecBuilder{} - cgBuilder2 = cgBuilder2.SetName(cg2Name).SetServiceReference("y", "svc2") + cgBuilder2 = cgBuilder2.SetName(cg2Name).SetServiceReference(namespaces["y"], "svc2") builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("cnp-cg-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cg1Name}}) @@ -2121,7 +2126,7 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) // Pods backing svc1 (label pod=a) in Namespace x should not allow ingress from Pods backing svc2 (label pod=b) in Namespace y. reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod("y/b"), Pod("x/a"), Dropped) + reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) testStep1 := &TestStep{ "Port 80", reachability, @@ -2133,17 +2138,17 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) } // Test update selector of Service referred in cg-svc1, and update serviceReference of cg-svc2. - svc1Updated := k8sUtils.BuildService("svc1", "x", 80, 80, map[string]string{"app": "b"}, nil) - svc3 := k8sUtils.BuildService("svc3", "y", 80, 80, map[string]string{"app": "a"}, nil) - cgBuilder2Updated := cgBuilder2.SetServiceReference("y", "svc3") + svc1Updated := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "b"}, nil) + svc3 := k8sUtils.BuildService("svc3", namespaces["y"], 80, 80, map[string]string{"app": "a"}, nil) + cgBuilder2Updated := cgBuilder2.SetServiceReference(namespaces["y"], "svc3") cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod("y", "test-add-pod-svc3"), + Pod: NewPod(namespaces["y"], "test-add-pod-svc3"), Labels: map[string]string{"pod": "test-add-pod-svc3", "app": "a"}, }, DestPod: CustomPod{ - Pod: NewPod("x", "test-add-pod-svc1"), + Pod: NewPod(namespaces["x"], "test-add-pod-svc1"), Labels: map[string]string{"pod": "test-add-pod-svc1", "app": "b"}, }, ExpectConnectivity: Dropped, @@ -2153,7 +2158,7 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) // Pods backing svc1 (label pod=b) in namespace x should not allow ingress from Pods backing svc3 (label pod=a) in namespace y. reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod("y/a"), Pod("x/b"), Dropped) + reachability2.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) testStep2 := &TestStep{ "Port 80 updated", reachability2, @@ -2166,8 +2171,8 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) builderUpdated := &ClusterNetworkPolicySpecBuilder{} builderUpdated = builderUpdated.SetName("cnp-cg-svc-ref").SetPriority(1.0) - builderUpdated.SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) - builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "y"}, + builderUpdated.SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["y"]}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) // Pod x/a should not allow ingress from y/b per the updated ACNP spec. @@ -2189,17 +2194,17 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) } func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { - svc1 := k8sUtils.BuildService("svc1", "x", 80, 80, map[string]string{"app": "a"}, nil) + svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) cg1Name, cg2Name, cg3Name := "cg-svc-x-a", "cg-select-y-b", "cg-select-y-c" cgBuilder1 := &ClusterGroupV1Alpha3SpecBuilder{} - cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference("x", "svc1") + cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference(namespaces["x"], "svc1") cgBuilder2 := &ClusterGroupV1Alpha3SpecBuilder{} cgBuilder2 = cgBuilder2.SetName(cg2Name). - SetNamespaceSelector(map[string]string{"ns": "y"}, nil). + SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil). SetPodSelector(map[string]string{"pod": "b"}, nil) cgBuilder3 := &ClusterGroupV1Alpha3SpecBuilder{} cgBuilder3 = cgBuilder3.SetName(cg3Name). - SetNamespaceSelector(map[string]string{"ns": "y"}, nil). + SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil). SetPodSelector(map[string]string{"pod": "c"}, nil) cgNestedName := "cg-nested" cgBuilderNested := &ClusterGroupV1Alpha3SpecBuilder{} @@ -2207,7 +2212,7 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("cnp-nested-cg").SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "z"}}}). + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["z"]}}}). AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgNestedName, "", nil) @@ -2215,7 +2220,7 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { // Note that in this testStep cg3 will not be created yet, so even though cg-nested selects cg1 and // cg3 as childGroups, only members of cg1 will be included as this time. reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace("x/a", "z", Dropped) + reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) testStep1 := &TestStep{ "Port 80", @@ -2232,17 +2237,17 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { cgBuilderNested = cgBuilderNested.SetChildGroups([]string{cg1Name, cg2Name, cg3Name}) // In addition to x/a, all traffic from y/b to Namespace z should also be denied. reachability2 := NewReachability(allPods, Connected) - reachability2.ExpectEgressToNamespace("x/a", "z", Dropped) - reachability2.ExpectEgressToNamespace("y/b", "z", Dropped) + reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability2.ExpectEgressToNamespace(Pod(namespaces["y"]+"/b"), namespaces["z"], Dropped) // New member in cg-svc-x-a should be reflected in cg-nested as well. cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod("x", "test-add-pod-svc1"), + Pod: NewPod(namespaces["x"], "test-add-pod-svc1"), Labels: map[string]string{"pod": "test-add-pod-svc1", "app": "a"}, }, DestPod: CustomPod{ - Pod: NewPod("z", "test-add-pod-ns-z"), + Pod: NewPod(namespaces["z"], "test-add-pod-ns-z"), Labels: map[string]string{"pod": "test-add-pod-ns-z"}, }, ExpectConnectivity: Dropped, @@ -2262,9 +2267,9 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { // In this testStep cg3 is created. It's members should reflect in cg-nested // and as a result, all traffic from y/c to Namespace z should be denied as well. reachability3 := NewReachability(allPods, Connected) - reachability3.ExpectEgressToNamespace("x/a", "z", Dropped) - reachability3.ExpectEgressToNamespace("y/b", "z", Dropped) - reachability3.ExpectEgressToNamespace("y/c", "z", Dropped) + reachability3.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability3.ExpectEgressToNamespace(Pod(namespaces["y"]+"/b"), namespaces["z"], Dropped) + reachability3.ExpectEgressToNamespace(Pod(namespaces["y"]+"/c"), namespaces["z"], Dropped) testStep3 := &TestStep{ "Port 80 updated", reachability3, @@ -2283,8 +2288,8 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { } func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { - podXAIP, _ := podIPs["x/a"] - podXBIP, _ := podIPs["x/b"] + podXAIP, _ := podIPs[namespaces["x"]+"/a"] + podXBIP, _ := podIPs[namespaces["x"]+"/b"] genCIDR := func(ip string) string { if strings.Contains(ip, ".") { return ip + "/32" @@ -2311,15 +2316,15 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{ { PodSelector: map[string]string{"pod": "a"}, - NSSelector: map[string]string{"ns": "y"}, + NSSelector: map[string]string{"ns": namespaces["y"]}, }, }) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgParentName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect("x/a", "y/a", Dropped) - reachability.Expect("x/b", "y/a", Dropped) + reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) testStep := &TestStep{ "Port 80", reachability, @@ -2332,14 +2337,14 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { cgBuilder3 := &ClusterGroupV1Alpha3SpecBuilder{} cgBuilder3 = cgBuilder3.SetName(cg3Name). - SetNamespaceSelector(map[string]string{"ns": "x"}, nil). + SetNamespaceSelector(map[string]string{"ns": namespaces["x"]}, nil). SetPodSelector(map[string]string{"pod": "c"}, nil) updatedCGParent := &ClusterGroupV1Alpha3SpecBuilder{} updatedCGParent = updatedCGParent.SetName(cgParentName).SetChildGroups([]string{cg1Name, cg3Name}) reachability2 := NewReachability(allPods, Connected) - reachability2.Expect("x/a", "y/a", Dropped) - reachability2.Expect("x/c", "y/a", Dropped) + reachability2.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability2.Expect(Pod(namespaces["x"]+"/c"), Pod(namespaces["y"]+"/a"), Dropped) testStep2 := &TestStep{ "Port 80, updated", reachability2, @@ -2385,17 +2390,17 @@ func testACNPNamespaceIsolation(t *testing.T) { SetTier("baseline"). SetPriority(1.0) builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - true, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}, crdv1alpha1.RuleActionAllow, "", "", nil) + true, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1alpha1.RuleActionAllow, "", "", nil) builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, - false, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}, crdv1alpha1.RuleActionDrop, "", "", nil) + false, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1alpha1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) - reachability2.ExpectEgressToNamespace(Pod("x/a"), "y", Dropped) - reachability2.ExpectEgressToNamespace(Pod("x/a"), "z", Dropped) - reachability2.ExpectEgressToNamespace(Pod("x/b"), "y", Dropped) - reachability2.ExpectEgressToNamespace(Pod("x/b"), "z", Dropped) - reachability2.ExpectEgressToNamespace(Pod("x/c"), "y", Dropped) - reachability2.ExpectEgressToNamespace(Pod("x/c"), "z", Dropped) + reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) + reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/b"), namespaces["y"], Dropped) + reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/b"), namespaces["z"], Dropped) + reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/c"), namespaces["y"], Dropped) + reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/c"), namespaces["z"], Dropped) testStep2 := &TestStep{ "Port 80", reachability2, @@ -2439,11 +2444,11 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { // Add a K8s namespaced NetworkPolicy in ns x that isolates all Pods in that namespace. builder2 := &NetworkPolicySpecBuilder{} - builder2 = builder2.SetName("x", "default-deny-in-namespace-x") + builder2 = builder2.SetName(namespaces["x"], "default-deny-in-namespace-x") builder2.SetTypeIngress() reachability2 := NewReachability(allPods, Dropped) reachability2.ExpectAllSelfNamespace(Connected) - reachability2.ExpectSelfNamespace("x", Dropped) + reachability2.ExpectSelfNamespace(namespaces["x"], Dropped) reachability2.ExpectSelf(allPods, Connected) testStep2 := &TestStep{ "Namespace isolation with K8s NP, Port 80", @@ -2478,25 +2483,25 @@ func testFQDNPolicy(t *testing.T) { testcases := []podToAddrTestStep{ { - "x/a", + Pod(namespaces["x"] + "/a"), "drive.google.com", 80, Rejected, }, { - "x/b", + Pod(namespaces["x"] + "/b"), "maps.google.com", 80, Rejected, }, { - "y/a", + Pod(namespaces["y"] + "/a"), "wayfair.com", 80, Dropped, }, { - "y/b", + Pod(namespaces["y"] + "/b"), "facebook.com", 80, Connected, @@ -2537,13 +2542,13 @@ func testFQDNPolicyInClusterService(t *testing.T) { defer log.SetLevel(logLevel) var services []*v1.Service if clusterInfo.podV4NetworkCIDR != "" { - ipv4Svc := k8sUtils.BuildService("ipv4-svc", "x", 80, 80, map[string]string{"pod": "a"}, nil) + ipv4Svc := k8sUtils.BuildService("ipv4-svc", namespaces["x"], 80, 80, map[string]string{"pod": "a"}, nil) ipv4Svc.Spec.ClusterIP = "None" ipv4Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv4Protocol} services = append(services, ipv4Svc) } if clusterInfo.podV6NetworkCIDR != "" { - ipv6Svc := k8sUtils.BuildService("ipv6-svc", "x", 80, 80, map[string]string{"pod": "b"}, nil) + ipv6Svc := k8sUtils.BuildService("ipv6-svc", namespaces["x"], 80, 80, map[string]string{"pod": "b"}, nil) ipv6Svc.Spec.ClusterIP = "None" ipv6Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv6Protocol} services = append(services, ipv6Svc) @@ -2563,8 +2568,8 @@ func testFQDNPolicyInClusterService(t *testing.T) { SetTier("application"). SetPriority(1.0) for idx, service := range services { - builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "y"}, PodSelector: map[string]string{"pod": "b"}}}, crdv1alpha1.RuleActionReject) - builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2+1), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "z"}, PodSelector: map[string]string{"pod": "c"}}}, crdv1alpha1.RuleActionDrop) + builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["y"]}, PodSelector: map[string]string{"pod": "b"}}}, crdv1alpha1.RuleActionReject) + builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2+1), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["z"]}, PodSelector: map[string]string{"pod": "c"}}}, crdv1alpha1.RuleActionDrop) } acnp := builder.Get() k8sUtils.CreateOrUpdateACNP(acnp) @@ -2574,19 +2579,19 @@ func testFQDNPolicyInClusterService(t *testing.T) { for _, service := range services { eachServiceCases := []podToAddrTestStep{ { - "y/b", + Pod(namespaces["y"] + "/b"), svcDNSName(service), 80, Rejected, }, { - "z/c", + Pod(namespaces["z"] + "/c"), svcDNSName(service), 80, Dropped, }, { - "x/c", + Pod(namespaces["x"] + "/c"), svcDNSName(service), 80, Connected, @@ -2620,12 +2625,12 @@ func testToServices(t *testing.T) { skipIfProxyDisabled(t) var services []*v1.Service if clusterInfo.podV4NetworkCIDR != "" { - ipv4Svc := k8sUtils.BuildService("ipv4-svc", "x", 81, 81, map[string]string{"pod": "a"}, nil) + ipv4Svc := k8sUtils.BuildService("ipv4-svc", namespaces["x"], 81, 81, map[string]string{"pod": "a"}, nil) ipv4Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv4Protocol} services = append(services, ipv4Svc) } if clusterInfo.podV6NetworkCIDR != "" { - ipv6Svc := k8sUtils.BuildService("ipv6-svc", "x", 80, 80, map[string]string{"pod": "b"}, nil) + ipv6Svc := k8sUtils.BuildService("ipv6-svc", namespaces["x"], 80, 80, map[string]string{"pod": "b"}, nil) ipv6Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv6Protocol} services = append(services, ipv6Svc) } @@ -2646,7 +2651,7 @@ func testToServices(t *testing.T) { builder = builder.SetName("test-acnp-to-services"). SetTier("application"). SetPriority(1.0) - builder.AddToServicesRule(svcRefs, "svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "y"}}}, crdv1alpha1.RuleActionDrop) + builder.AddToServicesRule(svcRefs, "svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["y"]}}}, crdv1alpha1.RuleActionDrop) time.Sleep(networkPolicyDelay) acnp := builder.Get() @@ -2657,13 +2662,13 @@ func testToServices(t *testing.T) { for _, service := range builtSvcs { eachServiceCases := []podToAddrTestStep{ { - "y/b", + Pod(namespaces["y"] + "/b"), service.Spec.ClusterIP, service.Spec.Ports[0].Port, Dropped, }, { - "z/c", + Pod(namespaces["z"] + "/c"), service.Spec.ClusterIP, service.Spec.Ports[0].Port, Connected, @@ -2694,21 +2699,21 @@ func testToServices(t *testing.T) { } func testServiceAccountSelector(t *testing.T, data *TestData) { - k8sUtils.CreateOrUpdateServiceAccount(k8sUtils.BuildServiceAccount("test-sa", "x", nil)) - defer k8sUtils.DeleteServiceAccount("x", "test-sa") + k8sUtils.CreateOrUpdateServiceAccount(k8sUtils.BuildServiceAccount("test-sa", namespaces["x"], nil)) + defer k8sUtils.DeleteServiceAccount(namespaces["x"], "test-sa") - serverName, serverIP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server", controlPlaneNodeName(), testNamespace, false) + serverName, serverIP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server", controlPlaneNodeName(), data.testNamespace, false) defer cleanupFunc() - client0Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), "x", false, "test-sa") + client0Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), namespaces["x"], false, "test-sa") defer cleanupFunc() - client1Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), "x", false, "default") + client1Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), namespaces["x"], false, "default") defer cleanupFunc() sa := &crdv1alpha1.NamespacedName{ Name: "test-sa", - Namespace: "x", + Namespace: namespaces["x"], } builder := &ClusterNetworkPolicySpecBuilder{} @@ -2730,13 +2735,13 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { if clusterInfo.podV4NetworkCIDR != "" { ipv4Testcases := []podToAddrTestStep{ { - Pod("x/" + client0Name), + Pod(namespaces["x"] + "/" + client0Name), serverIP.ipv4.String(), 80, Dropped, }, { - Pod("x/" + client1Name), + Pod(namespaces["x"] + "/" + client1Name), serverIP.ipv4.String(), 80, Connected, @@ -2748,13 +2753,13 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { if clusterInfo.podV6NetworkCIDR != "" { ipv6Testcases := []podToAddrTestStep{ { - Pod("x/" + client0Name), + Pod(namespaces["x"] + "/" + client0Name), serverIP.ipv6.String(), 80, Dropped, }, { - Pod("x/" + client1Name), + Pod(namespaces["x"] + "/" + client1Name), serverIP.ipv6.String(), 80, Connected, @@ -2785,20 +2790,20 @@ func testACNPNodeSelectorEgress(t *testing.T) { SetPriority(1.0) nodeSelector := metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/hostname": controlPlaneNodeName()}} builder.AddNodeSelectorRule(&nodeSelector, ProtocolTCP, &p6443, "egress-control-plane-drop", - []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}, PodSelector: map[string]string{"pod": "a"}}}, + []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}, PodSelector: map[string]string{"pod": "a"}}}, crdv1alpha1.RuleActionDrop, true) var testcases []podToAddrTestStep if clusterInfo.podV4NetworkCIDR != "" { ipv4Testcases := []podToAddrTestStep{ { - "x/a", + Pod(namespaces["x"] + "/a"), controlPlaneNodeIPv4(), 6443, Dropped, }, { - "x/b", + Pod(namespaces["x"] + "/b"), controlPlaneNodeIPv4(), 6443, Connected, @@ -2810,13 +2815,13 @@ func testACNPNodeSelectorEgress(t *testing.T) { if clusterInfo.podV6NetworkCIDR != "" { ipv6Testcases := []podToAddrTestStep{ { - "x/a", + Pod(namespaces["x"] + "/a"), controlPlaneNodeIPv6(), 6443, Dropped, }, { - "x/b", + Pod(namespaces["x"] + "/b"), controlPlaneNodeIPv6(), 6443, Connected, @@ -2845,16 +2850,16 @@ func testACNPNodeSelectorEgress(t *testing.T) { } func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { - _, serverIP0, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server0", nodeName(1), "x", false) + _, serverIP0, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server0", nodeName(1), namespaces["x"], false) defer cleanupFunc() - _, serverIP1, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server1", nodeName(1), "y", false) + _, serverIP1, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server1", nodeName(1), namespaces["y"], false) defer cleanupFunc() clientName := "agnhost-client" - require.NoError(t, data.createAgnhostPodOnNode(clientName, "z", controlPlaneNodeName(), true)) - defer data.deletePodAndWait(defaultTimeout, clientName, "z") - _, err := data.podWaitForIPs(defaultTimeout, clientName, "z") + require.NoError(t, data.createAgnhostPodOnNode(clientName, namespaces["z"], controlPlaneNodeName(), true)) + defer data.deletePodAndWait(defaultTimeout, clientName, namespaces["z"]) + _, err := data.podWaitForIPs(defaultTimeout, clientName, namespaces["z"]) require.NoError(t, err) builder := &ClusterNetworkPolicySpecBuilder{} @@ -2862,20 +2867,20 @@ func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { SetPriority(1.0) nodeSelector := metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/hostname": controlPlaneNodeName()}} builder.AddNodeSelectorRule(&nodeSelector, ProtocolTCP, &p80, "ingress-control-plane-drop", - []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}, + []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1alpha1.RuleActionDrop, false) testcases := []podToAddrTestStep{} if clusterInfo.podV4NetworkCIDR != "" { ipv4TestCases := []podToAddrTestStep{ { - Pod("z/" + clientName), + Pod(namespaces["z"] + "/" + clientName), serverIP0.ipv4.String(), 80, Dropped, }, { - Pod("z/" + clientName), + Pod(namespaces["z"] + "/" + clientName), serverIP1.ipv4.String(), 80, Connected, @@ -2886,13 +2891,13 @@ func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { if clusterInfo.podV6NetworkCIDR != "" { ipv6TestCases := []podToAddrTestStep{ { - Pod("z/" + clientName), + Pod(namespaces["z"] + "/" + clientName), serverIP0.ipv6.String(), 80, Dropped, }, { - Pod("z/" + clientName), + Pod(namespaces["z"] + "/" + clientName), serverIP1.ipv6.String(), 80, Connected, @@ -2922,13 +2927,13 @@ func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { } func testACNPICMPSupport(t *testing.T, data *TestData) { - clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createNetshootPodOnNode, "client", nodeName(1), testNamespace, false) + clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createNetshootPodOnNode, "client", nodeName(1), data.testNamespace, false) defer cleanupFunc() - server0Name, server0IP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server0", nodeName(0), testNamespace, false) + server0Name, server0IP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server0", nodeName(0), data.testNamespace, false) defer cleanupFunc() - server1Name, server1IP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server1", nodeName(1), testNamespace, false) + server1Name, server1IP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server1", nodeName(1), data.testNamespace, false) defer cleanupFunc() icmpType := int32(8) @@ -2945,13 +2950,13 @@ func testACNPICMPSupport(t *testing.T, data *TestData) { if clusterInfo.podV4NetworkCIDR != "" { testcases = append(testcases, []podToAddrTestStep{ { - Pod(fmt.Sprintf("%s/%s", testNamespace, clientName)), + Pod(fmt.Sprintf("%s/%s", data.testNamespace, clientName)), server0IP.ipv4.String(), -1, Rejected, }, { - Pod(fmt.Sprintf("%s/%s", testNamespace, clientName)), + Pod(fmt.Sprintf("%s/%s", data.testNamespace, clientName)), server1IP.ipv4.String(), -1, Dropped, @@ -2961,13 +2966,13 @@ func testACNPICMPSupport(t *testing.T, data *TestData) { if clusterInfo.podV6NetworkCIDR != "" { testcases = append(testcases, []podToAddrTestStep{ { - Pod(fmt.Sprintf("%s/%s", testNamespace, clientName)), + Pod(fmt.Sprintf("%s/%s", data.testNamespace, clientName)), server0IP.ipv6.String(), -1, Rejected, }, { - Pod(fmt.Sprintf("%s/%s", testNamespace, clientName)), + Pod(fmt.Sprintf("%s/%s", data.testNamespace, clientName)), server1IP.ipv6.String(), -1, Dropped, @@ -3254,7 +3259,7 @@ func TestAntreaPolicy(t *testing.T) { t.Run("Case=ANPTierDoesNotExistDenied", func(t *testing.T) { testInvalidANPTierDoesNotExist(t) }) t.Run("Case=ANPPortRangePortUnsetDenied", func(t *testing.T) { testInvalidANPPortRangePortUnset(t) }) t.Run("Case=ANPPortRangePortEndPortSmallDenied", func(t *testing.T) { testInvalidANPPortRangeEndPortSmall(t) }) - t.Run("Case=ACNPInvalidPodSelectorNsSelectorMatchExpressions", func(t *testing.T) { testInvalidACNPPodSelectorNsSelectorMatchExpressions(t) }) + t.Run("Case=ACNPInvalidPodSelectorNsSelectorMatchExpressions", func(t *testing.T) { testInvalidACNPPodSelectorNsSelectorMatchExpressions(t, data) }) }) t.Run("TestGroupValidateTiers", func(t *testing.T) { @@ -3350,16 +3355,16 @@ func TestAntreaPolicyStatus(t *testing.T) { } defer teardownTest(t, data) - _, _, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server-0", controlPlaneNodeName(), testNamespace, false) + _, _, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server-0", controlPlaneNodeName(), data.testNamespace, false) defer cleanupFunc() - _, _, cleanupFunc = createAndWaitForPod(t, data, data.createNginxPodOnNode, "server-1", workerNodeName(1), testNamespace, false) + _, _, cleanupFunc = createAndWaitForPod(t, data, data.createNginxPodOnNode, "server-1", workerNodeName(1), data.testNamespace, false) defer cleanupFunc() anpBuilder := &AntreaNetworkPolicySpecBuilder{} - anpBuilder = anpBuilder.SetName(testNamespace, "anp-applied-to-two-nodes"). + anpBuilder = anpBuilder.SetName(data.testNamespace, "anp-applied-to-two-nodes"). SetPriority(1.0). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - anpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + anpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, nil, crdv1alpha1.RuleActionAllow, "") anp := anpBuilder.Get() log.Debugf("creating ANP %v", anp.Name) @@ -3371,7 +3376,7 @@ func TestAntreaPolicyStatus(t *testing.T) { acnpBuilder = acnpBuilder.SetName("acnp-applied-to-two-nodes"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - acnpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + acnpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) acnp := acnpBuilder.Get() log.Debugf("creating ACNP %v", acnp.Name) @@ -3399,17 +3404,17 @@ func TestAntreaPolicyStatusWithAppliedToPerRule(t *testing.T) { } defer teardownTest(t, data) - server0Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server-0", controlPlaneNodeName(), testNamespace, false) + server0Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server-0", controlPlaneNodeName(), data.testNamespace, false) defer cleanupFunc() - server1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server-1", workerNodeName(1), testNamespace, false) + server1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server-1", workerNodeName(1), data.testNamespace, false) defer cleanupFunc() anpBuilder := &AntreaNetworkPolicySpecBuilder{} - anpBuilder = anpBuilder.SetName(testNamespace, "anp-applied-to-per-rule"). + anpBuilder = anpBuilder.SetName(data.testNamespace, "anp-applied-to-per-rule"). SetPriority(1.0) - anpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + anpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, []ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": server0Name}}}, crdv1alpha1.RuleActionAllow, "") - anpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + anpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, nil, []ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": server1Name}}}, crdv1alpha1.RuleActionAllow, "") anp := anpBuilder.Get() log.Debugf("creating ANP %v", anp.Name) @@ -3499,10 +3504,10 @@ func (data *TestData) waitForACNPRealized(t *testing.T, name string) error { // testANPNetworkPolicyStatsWithDropAction tests antreanetworkpolicystats can correctly collect dropped packets stats from ANP if // networkpolicystats feature is enabled func testANPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { - serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", testNamespace, false) + serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", data.testNamespace, false) defer cleanupFunc() - clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace, false) + clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", data.testNamespace, false) defer cleanupFunc() var err error k8sUtils, err = NewKubernetesUtils(data) @@ -3521,14 +3526,14 @@ func testANPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { // So we need to "warm-up" the tunnel. if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } var anp = &crdv1alpha1.NetworkPolicy{ - ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: "np1", Labels: map[string]string{"antrea-e2e": "np1"}}, + ObjectMeta: metav1.ObjectMeta{Namespace: data.testNamespace, Name: "np1", Labels: map[string]string{"antrea-e2e": "np1"}}, Spec: crdv1alpha1.NetworkPolicySpec{ AppliedTo: []crdv1alpha1.NetworkPolicyPeer{ {PodSelector: &selectorC}, @@ -3583,14 +3588,14 @@ func testANPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 80", serverIPs.ipv4.String())} cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 443", serverIPs.ipv4.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd2) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 80", serverIPs.ipv6.String())} cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 443", serverIPs.ipv6.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd2) } wg.Done() }() @@ -3606,7 +3611,7 @@ func testANPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { } if err := wait.Poll(5*time.Second, defaultTimeout, func() (bool, error) { - stats, err := data.crdClient.StatsV1alpha1().AntreaNetworkPolicyStats(testNamespace).Get(context.TODO(), "np1", metav1.GetOptions{}) + stats, err := data.crdClient.StatsV1alpha1().AntreaNetworkPolicyStats(data.testNamespace).Get(context.TODO(), "np1", metav1.GetOptions{}) if err != nil { return false, err } @@ -3634,10 +3639,10 @@ func testANPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { } func testAntreaClusterNetworkPolicyStats(t *testing.T, data *TestData) { - serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", testNamespace, false) + serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", data.testNamespace, false) defer cleanupFunc() - clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace, false) + clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", data.testNamespace, false) defer cleanupFunc() var err error k8sUtils, err = NewKubernetesUtils(data) @@ -3656,14 +3661,14 @@ func testAntreaClusterNetworkPolicyStats(t *testing.T, data *TestData) { // So we need to "warm-up" the tunnel. if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } var acnp = &crdv1alpha1.ClusterNetworkPolicy{ - ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: "cnp1", Labels: map[string]string{"antrea-e2e": "cnp1"}}, + ObjectMeta: metav1.ObjectMeta{Namespace: data.testNamespace, Name: "cnp1", Labels: map[string]string{"antrea-e2e": "cnp1"}}, Spec: crdv1alpha1.ClusterNetworkPolicySpec{ AppliedTo: []crdv1alpha1.NetworkPolicyPeer{ {PodSelector: &selectorC}, @@ -3718,14 +3723,14 @@ func testAntreaClusterNetworkPolicyStats(t *testing.T, data *TestData) { if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 800", serverIPs.ipv4.String())} cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 4430", serverIPs.ipv4.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd2) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 800", serverIPs.ipv6.String())} cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 4430", serverIPs.ipv6.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd2) } wg.Done() }() diff --git a/test/e2e/bandwidth_test.go b/test/e2e/bandwidth_test.go index 9ea79dd4704..a48aa867caa 100644 --- a/test/e2e/bandwidth_test.go +++ b/test/e2e/bandwidth_test.go @@ -46,6 +46,7 @@ func TestBandwidth(t *testing.T) { func TestBenchmarkBandwidth(t *testing.T) { skipIfNotBenchmarkTest(t) skipIfHasWindowsNodes(t) + data, err := setupTest(t) if err != nil { t.Fatalf("Error when setting up test: %v", err) @@ -68,21 +69,21 @@ func TestBenchmarkBandwidth(t *testing.T) { // testBenchmarkBandwidthIntraNode runs the bandwidth benchmark between Pods on same node. func testBenchmarkBandwidthIntraNode(t *testing.T, data *TestData) { - if err := data.createPodOnNode("perftest-a", testNamespace, controlPlaneNodeName(), perftoolImage, nil, nil, nil, nil, false, nil); err != nil { + if err := data.createPodOnNode("perftest-a", data.testNamespace, controlPlaneNodeName(), perftoolImage, nil, nil, nil, nil, false, nil); err != nil { t.Fatalf("Error when creating the perftest client Pod: %v", err) } - if err := data.podWaitForRunning(defaultTimeout, "perftest-a", testNamespace); err != nil { + if err := data.podWaitForRunning(defaultTimeout, "perftest-a", data.testNamespace); err != nil { t.Fatalf("Error when waiting for the perftest client Pod: %v", err) } - if err := data.createPodOnNode("perftest-b", testNamespace, controlPlaneNodeName(), perftoolImage, nil, nil, nil, []v1.ContainerPort{{Protocol: v1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { + if err := data.createPodOnNode("perftest-b", data.testNamespace, controlPlaneNodeName(), perftoolImage, nil, nil, nil, []v1.ContainerPort{{Protocol: v1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { t.Fatalf("Error when creating the perftest server Pod: %v", err) } - podBIPs, err := data.podWaitForIPs(defaultTimeout, "perftest-b", testNamespace) + podBIPs, err := data.podWaitForIPs(defaultTimeout, "perftest-b", data.testNamespace) if err != nil { t.Fatalf("Error when getting the perftest server Pod's IP: %v", err) } podBIP := podBIPs.ipv4.String() - stdout, _, err := data.RunCommandFromPod(testNamespace, "perftest-a", "perftool", []string{"bash", "-c", fmt.Sprintf("iperf3 -c %s|grep sender|awk '{print $7,$8}'", podBIP)}) + stdout, _, err := data.RunCommandFromPod(data.testNamespace, "perftest-a", "perftool", []string{"bash", "-c", fmt.Sprintf("iperf3 -c %s|grep sender|awk '{print $7,$8}'", podBIP)}) if err != nil { t.Fatalf("Error when running iperf3 client: %v", err) } @@ -91,23 +92,23 @@ func testBenchmarkBandwidthIntraNode(t *testing.T, data *TestData) { } func benchmarkBandwidthService(t *testing.T, endpointNode, clientNode string, data *TestData) { - svc, err := data.CreateService("perftest-b", testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-b"}, false, false, v1.ServiceTypeClusterIP, nil) + svc, err := data.CreateService("perftest-b", data.testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-b"}, false, false, v1.ServiceTypeClusterIP, nil) if err != nil { t.Fatalf("Error when creating perftest service: %v", err) } - if err := data.createPodOnNode("perftest-a", testNamespace, clientNode, perftoolImage, nil, nil, nil, nil, false, nil); err != nil { + if err := data.createPodOnNode("perftest-a", data.testNamespace, clientNode, perftoolImage, nil, nil, nil, nil, false, nil); err != nil { t.Fatalf("Error when creating the perftest client Pod: %v", err) } - if err := data.podWaitForRunning(defaultTimeout, "perftest-a", testNamespace); err != nil { + if err := data.podWaitForRunning(defaultTimeout, "perftest-a", data.testNamespace); err != nil { t.Fatalf("Error when waiting for the perftest client Pod: %v", err) } - if err := data.createPodOnNode("perftest-b", testNamespace, endpointNode, perftoolImage, nil, nil, nil, []v1.ContainerPort{{Protocol: v1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { + if err := data.createPodOnNode("perftest-b", data.testNamespace, endpointNode, perftoolImage, nil, nil, nil, []v1.ContainerPort{{Protocol: v1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { t.Fatalf("Error when creating the perftest server Pod: %v", err) } - if err := data.podWaitForRunning(defaultTimeout, "perftest-b", testNamespace); err != nil { + if err := data.podWaitForRunning(defaultTimeout, "perftest-b", data.testNamespace); err != nil { t.Fatalf("Error when getting the perftest server Pod's IP: %v", err) } - stdout, stderr, err := data.RunCommandFromPod(testNamespace, "perftest-a", perftoolContainerName, []string{"bash", "-c", fmt.Sprintf("iperf3 -c %s|grep sender|awk '{print $7,$8}'", svc.Spec.ClusterIP)}) + stdout, stderr, err := data.RunCommandFromPod(data.testNamespace, "perftest-a", perftoolContainerName, []string{"bash", "-c", fmt.Sprintf("iperf3 -c %s|grep sender|awk '{print $7,$8}'", svc.Spec.ClusterIP)}) if err != nil { t.Fatalf("Error when running iperf3 client: %v, stderr: %s", err, stderr) } @@ -158,32 +159,32 @@ func testPodTrafficShaping(t *testing.T, data *TestData) { t.Run(tt.name, func(t *testing.T) { clientPodName := fmt.Sprintf("client-a-%d", i) serverPodName := fmt.Sprintf("server-a-%d", i) - if err := data.createPodOnNode(clientPodName, testNamespace, nodeName, perftoolImage, nil, nil, nil, nil, false, func(pod *v1.Pod) { + if err := data.createPodOnNode(clientPodName, data.testNamespace, nodeName, perftoolImage, nil, nil, nil, nil, false, func(pod *v1.Pod) { pod.Annotations = map[string]string{ "kubernetes.io/egress-bandwidth": fmt.Sprintf("%dM", tt.clientEgressBandwidth), } }); err != nil { t.Fatalf("Error when creating the perftest client Pod: %v", err) } - defer deletePodWrapper(t, data, testNamespace, clientPodName) - if err := data.podWaitForRunning(defaultTimeout, clientPodName, testNamespace); err != nil { + defer deletePodWrapper(t, data, data.testNamespace, clientPodName) + if err := data.podWaitForRunning(defaultTimeout, clientPodName, data.testNamespace); err != nil { t.Fatalf("Error when waiting for the perftest client Pod: %v", err) } - if err := data.createPodOnNode(serverPodName, testNamespace, nodeName, perftoolImage, nil, nil, nil, []v1.ContainerPort{{Protocol: v1.ProtocolTCP, ContainerPort: iperfPort}}, false, func(pod *v1.Pod) { + if err := data.createPodOnNode(serverPodName, data.testNamespace, nodeName, perftoolImage, nil, nil, nil, []v1.ContainerPort{{Protocol: v1.ProtocolTCP, ContainerPort: iperfPort}}, false, func(pod *v1.Pod) { pod.Annotations = map[string]string{ "kubernetes.io/ingress-bandwidth": fmt.Sprintf("%dM", tt.serverIngressBandwidth), } }); err != nil { t.Fatalf("Error when creating the perftest server Pod: %v", err) } - defer deletePodWrapper(t, data, testNamespace, serverPodName) - podIPs, err := data.podWaitForIPs(defaultTimeout, serverPodName, testNamespace) + defer deletePodWrapper(t, data, data.testNamespace, serverPodName) + podIPs, err := data.podWaitForIPs(defaultTimeout, serverPodName, data.testNamespace) if err != nil { t.Fatalf("Error when getting the perftest server Pod's IP: %v", err) } runIperf := func(cmd []string) { - stdout, _, err := data.RunCommandFromPod(testNamespace, clientPodName, "perftool", cmd) + stdout, _, err := data.RunCommandFromPod(data.testNamespace, clientPodName, "perftool", cmd) if err != nil { t.Fatalf("Error when running iperf3 client: %v", err) } diff --git a/test/e2e/basic_test.go b/test/e2e/basic_test.go index d795f979474..99e20da54b4 100644 --- a/test/e2e/basic_test.go +++ b/test/e2e/basic_test.go @@ -49,12 +49,12 @@ func TestBasic(t *testing.T) { } defer teardownTest(t, data) - t.Run("testPodAssignIP", func(t *testing.T) { testPodAssignIP(t, data, testNamespace, "", "") }) - t.Run("testDeletePod", func(t *testing.T) { testDeletePod(t, data, testNamespace) }) + t.Run("testPodAssignIP", func(t *testing.T) { testPodAssignIP(t, data, data.testNamespace, "", "") }) + t.Run("testDeletePod", func(t *testing.T) { testDeletePod(t, data, data.testNamespace) }) t.Run("testAntreaGracefulExit", func(t *testing.T) { testAntreaGracefulExit(t, data) }) - t.Run("testIPAMRestart", func(t *testing.T) { testIPAMRestart(t, data, testNamespace) }) + t.Run("testIPAMRestart", func(t *testing.T) { testIPAMRestart(t, data, data.testNamespace) }) t.Run("testDeletePreviousRoundFlowsOnStartup", func(t *testing.T) { testDeletePreviousRoundFlowsOnStartup(t, data) }) - t.Run("testGratuitousARP", func(t *testing.T) { testGratuitousARP(t, data, testNamespace) }) + t.Run("testGratuitousARP", func(t *testing.T) { testGratuitousARP(t, data, data.testNamespace) }) t.Run("testClusterIdentity", func(t *testing.T) { testClusterIdentity(t, data) }) } @@ -522,7 +522,7 @@ func TestCleanStaleClusterIPRoutes(t *testing.T) { skipIfProxyAllDisabled(t, data) // Create a backend Pod for test Service: if a Service has no backend Pod, no ClusterIP route will be installed. - createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-clean-stale-route-pod", nodeName(0), testNamespace, false) + createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-clean-stale-route-pod", nodeName(0), data.testNamespace, false) if len(clusterInfo.podV4NetworkCIDR) != 0 { t.Logf("Running IPv4 test") @@ -540,10 +540,10 @@ func testCleanStaleClusterIPRoutes(t *testing.T, data *TestData, isIPv6 bool) { ipProtocol = corev1.IPv6Protocol } // Create two test ClusterIPs. - svc, err := data.createNginxClusterIPService(fmt.Sprintf("test-clean-stale-route-svc1-%v", isIPv6), testNamespace, false, &ipProtocol) + svc, err := data.createNginxClusterIPService(fmt.Sprintf("test-clean-stale-route-svc1-%v", isIPv6), data.testNamespace, false, &ipProtocol) require.NoError(t, err) require.NotEqual(t, "", svc.Spec.ClusterIP, "ClusterIP should not be empty") - svc, err = data.createNginxClusterIPService(fmt.Sprintf("test-clean-stale-route-svc2-%v", isIPv6), testNamespace, false, &ipProtocol) + svc, err = data.createNginxClusterIPService(fmt.Sprintf("test-clean-stale-route-svc2-%v", isIPv6), data.testNamespace, false, &ipProtocol) require.NoError(t, err) require.NotEqual(t, "", svc.Spec.ClusterIP, "ClusterIP should not be empty") time.Sleep(time.Second) diff --git a/test/e2e/batch_test.go b/test/e2e/batch_test.go index f66121f5e87..1ddac2e2090 100644 --- a/test/e2e/batch_test.go +++ b/test/e2e/batch_test.go @@ -54,7 +54,7 @@ func TestBatchCreatePods(t *testing.T) { oldFDs := getFDs() - _, _, cleanupFn := createTestBusyboxPods(t, data, batchNum, testNamespace, node1) + _, _, cleanupFn := createTestBusyboxPods(t, data, batchNum, data.testNamespace, node1) defer cleanupFn() newFDs := getFDs() diff --git a/test/e2e/clustergroup_test.go b/test/e2e/clustergroup_test.go index 723fb3c8755..ab37691c789 100644 --- a/test/e2e/clustergroup_test.go +++ b/test/e2e/clustergroup_test.go @@ -52,7 +52,7 @@ func testInvalidCGIPBlockWithPodSelector(t *testing.T) { func testInvalidCGIPBlockWithNSSelector(t *testing.T) { invalidErr := fmt.Errorf("clustergroup created with ipblock and namespaceSelector") cgName := "ipb-ns" - nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": "y"}} + nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": namespaces["y"]}} cidr := "10.0.0.10/32" ipb := []crdv1alpha1.IPBlock{{CIDR: cidr}} cg := &crdv1alpha3.ClusterGroup{ @@ -97,7 +97,7 @@ func testInvalidCGServiceRefWithPodSelector(t *testing.T) { cgName := "svcref-pod-selector" pSel := &metav1.LabelSelector{MatchLabels: map[string]string{"pod": "x"}} svcRef := &crdv1alpha1.NamespacedName{ - Namespace: "y", + Namespace: namespaces["y"], Name: "test-svc", } cg := &crdv1alpha3.ClusterGroup{ @@ -118,9 +118,9 @@ func testInvalidCGServiceRefWithPodSelector(t *testing.T) { func testInvalidCGServiceRefWithNSSelector(t *testing.T) { invalidErr := fmt.Errorf("clustergroup created with serviceReference and namespaceSelector") cgName := "svcref-ns-selector" - nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": "y"}} + nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": namespaces["y"]}} svcRef := &crdv1alpha1.NamespacedName{ - Namespace: "y", + Namespace: namespaces["y"], Name: "test-svc", } cg := &crdv1alpha3.ClusterGroup{ @@ -144,7 +144,7 @@ func testInvalidCGServiceRefWithIPBlock(t *testing.T) { cidr := "10.0.0.10/32" ipb := []crdv1alpha1.IPBlock{{CIDR: cidr}} svcRef := &crdv1alpha1.NamespacedName{ - Namespace: "y", + Namespace: namespaces["y"], Name: "test-svc", } cg := &crdv1alpha3.ClusterGroup{ @@ -207,7 +207,7 @@ func testInvalidCGChildGroupWithServiceReference(t *testing.T) { invalidErr := fmt.Errorf("clustergroup created with childGroups and ServiceReference") cgName := "child-group-svcref" svcRef := &crdv1alpha1.NamespacedName{ - Namespace: "y", + Namespace: namespaces["y"], Name: "test-svc", } cg := &crdv1alpha3.ClusterGroup{ @@ -389,6 +389,7 @@ func TestClusterGroup(t *testing.T) { t.Fatalf("Error when setting up test: %v", err) } defer teardownTest(t, data) + initialize(t, data) t.Run("TestGroupClusterGroupValidate", func(t *testing.T) { diff --git a/test/e2e/connectivity_test.go b/test/e2e/connectivity_test.go index 7b76846549b..6210cfabc56 100644 --- a/test/e2e/connectivity_test.go +++ b/test/e2e/connectivity_test.go @@ -54,16 +54,16 @@ func TestConnectivity(t *testing.T) { }) t.Run("testPodConnectivityAfterAntreaRestart", func(t *testing.T) { skipIfHasWindowsNodes(t) - testPodConnectivityAfterAntreaRestart(t, data, testNamespace) + testPodConnectivityAfterAntreaRestart(t, data, data.testNamespace) }) t.Run("testOVSRestartSameNode", func(t *testing.T) { skipIfNotIPv4Cluster(t) skipIfHasWindowsNodes(t) - testOVSRestartSameNode(t, data, testNamespace) + testOVSRestartSameNode(t, data, data.testNamespace) }) t.Run("testOVSFlowReplay", func(t *testing.T) { skipIfHasWindowsNodes(t) - testOVSFlowReplay(t, data, testNamespace) + testOVSFlowReplay(t, data, data.testNamespace) }) t.Run("testPingLargeMTU", func(t *testing.T) { skipIfNumNodesLessThan(t, 2) @@ -76,7 +76,7 @@ func waitForPodIPs(t *testing.T, data *TestData, podInfos []podInfo) map[string] podIPs := make(map[string]*PodIPs) for _, pi := range podInfos { podName := pi.name - podNamespace := testNamespace + podNamespace := data.testNamespace if pi.namespace != "" { podNamespace = pi.namespace } @@ -101,11 +101,11 @@ func (data *TestData) runPingMesh(t *testing.T, podInfos []podInfo, ctrname stri if pi1.name == pi2.name { continue } - podNamespace := testNamespace + podNamespace := data.testNamespace if pi1.namespace != "" { podNamespace = pi1.namespace } - pod2Namespace := testNamespace + pod2Namespace := data.testNamespace if pi2.namespace != "" { pod2Namespace = pi2.namespace } @@ -133,10 +133,10 @@ func (data *TestData) testPodConnectivitySameNode(t *testing.T) { t.Logf("Creating %d agnhost Pods on '%s'", numPods, workerNode) for i := range podInfos { podInfos[i].os = clusterInfo.nodesOS[workerNode] - if err := data.createAgnhostPodOnNode(podInfos[i].name, testNamespace, workerNode, false); err != nil { + if err := data.createAgnhostPodOnNode(podInfos[i].name, data.testNamespace, workerNode, false); err != nil { t.Fatalf("Error when creating agnhost test Pod '%s': %v", podInfos[i], err) } - defer deletePodWrapper(t, data, testNamespace, podInfos[i].name) + defer deletePodWrapper(t, data, data.testNamespace, podInfos[i].name) } data.runPingMesh(t, podInfos, agnhostContainerName) @@ -181,7 +181,7 @@ func (data *TestData) testHostPortPodConnectivity(t *testing.T, clientNamespace, // testHostPortPodConnectivity checks that a Pod with hostPort set is reachable. func testHostPortPodConnectivity(t *testing.T, data *TestData) { - data.testHostPortPodConnectivity(t, testNamespace, testNamespace) + data.testHostPortPodConnectivity(t, data.testNamespace, data.testNamespace) } // createPodsOnDifferentNodes creates agnhost Pods through a DaemonSet. This function returns information of the created @@ -238,7 +238,7 @@ func (data *TestData) testPodConnectivityDifferentNodes(t *testing.T) { // subnet, all Nodes should have a Pod. numPods = maxPods } - podInfos, deletePods := createPodsOnDifferentNodes(t, data, testNamespace, "differentnodes") + podInfos, deletePods := createPodsOnDifferentNodes(t, data, data.testNamespace, "differentnodes") defer deletePods() if len(podInfos) > maxPods { @@ -489,13 +489,13 @@ func testOVSFlowReplay(t *testing.T, data *TestData, namespace string) { func testPingLargeMTU(t *testing.T, data *TestData) { skipIfNumNodesLessThan(t, 2) - podInfos, deletePods := createPodsOnDifferentNodes(t, data, testNamespace, "largemtu") + podInfos, deletePods := createPodsOnDifferentNodes(t, data, data.testNamespace, "largemtu") defer deletePods() podIPs := waitForPodIPs(t, data, podInfos) pingSize := 2000 t.Logf("Running ping with size %d between Pods %s and %s", pingSize, podInfos[0].name, podInfos[1].name) - if err := data.runPingCommandFromTestPod(podInfos[0], testNamespace, podIPs[podInfos[1].name], agnhostContainerName, pingCount, pingSize); err != nil { + if err := data.runPingCommandFromTestPod(podInfos[0], data.testNamespace, podIPs[podInfos[1].name], agnhostContainerName, pingCount, pingSize); err != nil { t.Error(err) } } diff --git a/test/e2e/egress_test.go b/test/e2e/egress_test.go index 01f808e3f70..70f9ed86f5b 100644 --- a/test/e2e/egress_test.go +++ b/test/e2e/egress_test.go @@ -125,31 +125,31 @@ ip netns exec %[1]s ip link set dev %[1]s-a up && \ ip netns exec %[1]s ip route replace default via %[3]s && \ ip netns exec %[1]s /agnhost netexec `, tt.fakeServer, tt.serverIP, tt.localIP0, tt.localIP1, tt.ipMaskLen) - if err := data.createPodOnNode(tt.fakeServer, testNamespace, egressNode, agnhostImage, []string{"sh", "-c", cmd}, nil, nil, nil, true, func(pod *v1.Pod) { + if err := data.createPodOnNode(tt.fakeServer, data.testNamespace, egressNode, agnhostImage, []string{"sh", "-c", cmd}, nil, nil, nil, true, func(pod *v1.Pod) { privileged := true pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{Privileged: &privileged} }); err != nil { t.Fatalf("Failed to create server Pod: %v", err) } - defer deletePodWrapper(t, data, testNamespace, tt.fakeServer) - if err := data.podWaitForRunning(defaultTimeout, tt.fakeServer, testNamespace); err != nil { + defer deletePodWrapper(t, data, data.testNamespace, tt.fakeServer) + if err := data.podWaitForRunning(defaultTimeout, tt.fakeServer, data.testNamespace); err != nil { t.Fatalf("Error when waiting for Pod '%s' to be in the Running state", tt.fakeServer) } localPod := fmt.Sprintf("localpod%s", tt.name) remotePod := fmt.Sprintf("remotepod%s", tt.name) - if err := data.createBusyboxPodOnNode(localPod, testNamespace, egressNode, false); err != nil { + if err := data.createBusyboxPodOnNode(localPod, data.testNamespace, egressNode, false); err != nil { t.Fatalf("Failed to create local Pod: %v", err) } - defer deletePodWrapper(t, data, testNamespace, localPod) - if err := data.podWaitForRunning(defaultTimeout, localPod, testNamespace); err != nil { + defer deletePodWrapper(t, data, data.testNamespace, localPod) + if err := data.podWaitForRunning(defaultTimeout, localPod, data.testNamespace); err != nil { t.Fatalf("Error when waiting for Pod '%s' to be in the Running state", localPod) } - if err := data.createBusyboxPodOnNode(remotePod, testNamespace, workerNodeName(1), false); err != nil { + if err := data.createBusyboxPodOnNode(remotePod, data.testNamespace, workerNodeName(1), false); err != nil { t.Fatalf("Failed to create remote Pod: %v", err) } - defer deletePodWrapper(t, data, testNamespace, remotePod) - if err := data.podWaitForRunning(defaultTimeout, remotePod, testNamespace); err != nil { + defer deletePodWrapper(t, data, data.testNamespace, remotePod) + if err := data.podWaitForRunning(defaultTimeout, remotePod, data.testNamespace); err != nil { t.Fatalf("Error when waiting for Pod '%s' to be in the Running state", remotePod) } @@ -161,7 +161,7 @@ ip netns exec %[1]s /agnhost netexec // getClientIP gets the translated client IP by accessing the API that replies the request's client IP. getClientIP := func(pod string) (string, string, error) { url := fmt.Sprintf("%s:8080/clientip", serverIPStr) - return data.runWgetCommandOnBusyboxWithRetry(pod, testNamespace, url, 5) + return data.runWgetCommandOnBusyboxWithRetry(pod, data.testNamespace, url, 5) } // assertClientIP asserts the Pod is translated to the provided client IP. @@ -239,13 +239,13 @@ ip netns exec %[1]s /agnhost netexec clientIPStr = fmt.Sprintf("[%s]", clientIPStr) } cmd = fmt.Sprintf("wget -T 3 -O - %s:8080/clientip | grep %s:", serverIPStr, clientIPStr) - if err := data.createPodOnNode(initialIPChecker, testNamespace, egressNode, busyboxImage, []string{"sh", "-c", cmd}, nil, nil, nil, false, func(pod *v1.Pod) { + if err := data.createPodOnNode(initialIPChecker, data.testNamespace, egressNode, busyboxImage, []string{"sh", "-c", cmd}, nil, nil, nil, false, func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }); err != nil { t.Fatalf("Failed to create Pod initial-ip-checker: %v", err) } - defer data.DeletePod(testNamespace, initialIPChecker) - _, err = data.PodWaitFor(timeout, initialIPChecker, testNamespace, func(pod *v1.Pod) (bool, error) { + defer data.DeletePod(data.testNamespace, initialIPChecker) + _, err = data.PodWaitFor(timeout, initialIPChecker, data.testNamespace, func(pod *v1.Pod) (bool, error) { if pod.Status.Phase == v1.PodFailed { return false, fmt.Errorf("Pod terminated with failure") } diff --git a/test/e2e/fixtures.go b/test/e2e/fixtures.go index acb764b5ba1..a8596f1eec3 100644 --- a/test/e2e/fixtures.go +++ b/test/e2e/fixtures.go @@ -208,11 +208,12 @@ func setupTest(tb testing.TB) (*TestData, error) { exportLogs(tb, testData, "afterSetupTest", true) } }() - tb.Logf("Creating '%s' K8s Namespace", testNamespace) + testData.testNamespace = randName(strings.ToLower(tb.Name()) + "-") + tb.Logf("Creating '%s' K8s Namespace", testData.testNamespace) if err := ensureAntreaRunning(testData); err != nil { return nil, err } - if err := testData.createTestNamespace(); err != nil { + if err := testData.CreateNamespace(testData.testNamespace, nil); err != nil { return nil, err } success = true @@ -227,10 +228,10 @@ func setupTestForFlowAggregator(tb testing.TB) (*TestData, bool, bool, error) { return testData, v4Enabled, v6Enabled, err } // Create pod using ipfix collector image - if err = testData.createPodOnNode("ipfix-collector", testNamespace, "", ipfixCollectorImage, nil, nil, nil, nil, true, nil); err != nil { + if err = testData.createPodOnNode("ipfix-collector", testData.testNamespace, "", ipfixCollectorImage, nil, nil, nil, nil, true, nil); err != nil { tb.Errorf("Error when creating the ipfix collector Pod: %v", err) } - ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testNamespace) + ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testData.testNamespace) if err != nil || len(ipfixCollectorIP.ipStrings) == 0 { tb.Errorf("Error when waiting to get ipfix collector Pod IP: %v", err) return nil, v4Enabled, v6Enabled, err @@ -415,8 +416,8 @@ func teardownTest(tb testing.TB, data *TestData) { if empty, _ := IsDirEmpty(data.logsDirForTestCase); empty { _ = os.Remove(data.logsDirForTestCase) } - tb.Logf("Deleting '%s' K8s Namespace", testNamespace) - if err := data.deleteTestNamespace(defaultTimeout); err != nil { + tb.Logf("Deleting '%s' K8s Namespace", testData.testNamespace) + if err := data.DeleteNamespace(testData.testNamespace, defaultTimeout); err != nil { tb.Logf("Error when tearing down test: %v", err) } } diff --git a/test/e2e/flowaggregator_test.go b/test/e2e/flowaggregator_test.go index 27f85672538..dc1e5abfe9c 100644 --- a/test/e2e/flowaggregator_test.go +++ b/test/e2e/flowaggregator_test.go @@ -333,10 +333,10 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs anp1, anp2 := deployAntreaNetworkPolicies(t, data, "perftest-a", "perftest-c") defer func() { if anp1 != nil { - k8sUtils.DeleteANP(testNamespace, anp1.Name) + k8sUtils.DeleteANP(data.testNamespace, anp1.Name) } if anp2 != nil { - k8sUtils.DeleteANP(testNamespace, anp2.Name) + k8sUtils.DeleteANP(data.testNamespace, anp2.Name) } }() if !isIPv6 { @@ -457,11 +457,11 @@ func testHelper(t *testing.T, data *TestData, podAIPs, podBIPs, podCIPs, podDIPs // Creating an agnhost server as a host network Pod serverPodPort := int32(80) _, serverIPs, cleanupFunc := createAndWaitForPod(t, data, func(name string, ns string, nodeName string, hostNetwork bool) error { - return data.createServerPod(name, testNamespace, "", serverPodPort, false, true) - }, "test-server-", "", testNamespace, false) + return data.createServerPod(name, data.testNamespace, "", serverPodPort, false, true) + }, "test-server-", "", data.testNamespace, false) defer cleanupFunc() - clientName, clientIPs, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", nodeName(0), testNamespace, false) + clientName, clientIPs, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", nodeName(0), data.testNamespace, false) defer cleanupFunc() if !isIPv6 { @@ -547,7 +547,7 @@ func checkAntctlGetFlowRecordsJson(t *testing.T, data *TestData, podName string, dstIP = podBIPs.ipv6.String() cmdStr = fmt.Sprintf("iperf3 -6 -c %s -t %d", dstIP, iperfTimeSecShort) } - stdout, _, err := data.RunCommandFromPod(testNamespace, "perftest-a", "perftool", []string{"bash", "-c", cmdStr}) + stdout, _, err := data.RunCommandFromPod(data.testNamespace, "perftest-a", "perftool", []string{"bash", "-c", cmdStr}) require.NoErrorf(t, err, "Error when running iperf3 client: %v", err) _, srcPort, dstPort := getBandwidthAndPorts(stdout) @@ -569,10 +569,10 @@ func checkAntctlGetFlowRecordsJson(t *testing.T, data *TestData, podName string, require.NoErrorf(t, err, "Error when parsing flow records from antctl: %v", err) require.Len(t, records, 1) - checkAntctlRecord(t, records[0], srcIP, dstIP, srcPort, dstPort, isIPv6) + checkAntctlRecord(t, records[0], srcIP, dstIP, srcPort, dstPort, isIPv6, data.testNamespace) } -func checkAntctlRecord(t *testing.T, record map[string]interface{}, srcIP, dstIP, srcPort, dstPort string, isIPv6 bool) { +func checkAntctlRecord(t *testing.T, record map[string]interface{}, srcIP, dstIP, srcPort, dstPort string, isIPv6 bool, namespace string) { assert := assert.New(t) if isIPv6 { assert.Equal(srcIP, record["sourceIPv6Address"], "The record from antctl does not have correct sourceIPv6Address") @@ -585,14 +585,14 @@ func checkAntctlRecord(t *testing.T, record map[string]interface{}, srcIP, dstIP require.NoErrorf(t, err, "error when converting the iperf srcPort to int type: %s", srcPort) assert.EqualValues(srcPortNum, record["sourceTransportPort"], "The record from antctl does not have correct sourceTransportPort") assert.Equal("perftest-a", record["sourcePodName"], "The record from antctl does not have correct sourcePodName") - assert.Equal("antrea-test", record["sourcePodNamespace"], "The record from antctl does not have correct sourcePodNamespace") + assert.Equal(namespace, record["sourcePodNamespace"], "The record from antctl does not have correct sourcePodNamespace") assert.Equal(controlPlaneNodeName(), record["sourceNodeName"], "The record from antctl does not have correct sourceNodeName") dstPortNum, err := strconv.Atoi(dstPort) require.NoErrorf(t, err, "error when converting the iperf dstPort to int type: %s", dstPort) assert.EqualValues(dstPortNum, record["destinationTransportPort"], "The record from antctl does not have correct destinationTransportPort") assert.Equal("perftest-b", record["destinationPodName"], "The record from antctl does not have correct destinationPodName") - assert.Equal("antrea-test", record["destinationPodNamespace"], "The record from antctl does not have correct destinationPodNamespace") + assert.Equal(namespace, record["destinationPodNamespace"], "The record from antctl does not have correct destinationPodNamespace") assert.Equal(controlPlaneNodeName(), record["destinationNodeName"], "The record from antctl does not have correct destinationNodeName") assert.EqualValues(ipfixregistry.FlowTypeIntraNode, record["flowType"], "The record from antctl does not have correct flowType") @@ -606,7 +606,7 @@ func checkRecordsForFlows(t *testing.T, data *TestData, srcIP string, dstIP stri } else { cmdStr = fmt.Sprintf("iperf3 -6 -c %s -t %d -b %s", dstIP, iperfTimeSec, iperfBandwidth) } - stdout, _, err := data.RunCommandFromPod(testNamespace, "perftest-a", "perftool", []string{"bash", "-c", cmdStr}) + stdout, _, err := data.RunCommandFromPod(data.testNamespace, "perftest-a", "perftool", []string{"bash", "-c", cmdStr}) require.NoErrorf(t, err, "Error when running iperf3 client: %v", err) bwSlice, srcPort, _ := getBandwidthAndPorts(stdout) require.Equal(t, 2, len(bwSlice), "bandwidth value and / or bandwidth unit are not available") @@ -637,38 +637,38 @@ func checkRecordsForFlowsCollector(t *testing.T, data *TestData, srcIP, dstIP, s dataRecordsCount = dataRecordsCount + 1 // Check if record has both Pod name of source and destination Pod. if isIntraNode { - checkPodAndNodeData(t, record, "perftest-a", controlPlaneNodeName(), "perftest-b", controlPlaneNodeName()) + checkPodAndNodeData(t, record, "perftest-a", controlPlaneNodeName(), "perftest-b", controlPlaneNodeName(), data.testNamespace) checkFlowType(t, record, ipfixregistry.FlowTypeIntraNode) } else { - checkPodAndNodeData(t, record, "perftest-a", controlPlaneNodeName(), "perftest-c", workerNodeName(1)) + checkPodAndNodeData(t, record, "perftest-a", controlPlaneNodeName(), "perftest-c", workerNodeName(1), data.testNamespace) checkFlowType(t, record, ipfixregistry.FlowTypeInterNode) } assert := assert.New(t) if checkService { if isIntraNode { - assert.Contains(record, "antrea-test/perftest-b", "Record with ServiceIP does not have Service name") + assert.Contains(record, data.testNamespace+"/perftest-b", "Record with ServiceIP does not have Service name") } else { - assert.Contains(record, "antrea-test/perftest-c", "Record with ServiceIP does not have Service name") + assert.Contains(record, data.testNamespace+"/perftest-c", "Record with ServiceIP does not have Service name") } } if checkK8sNetworkPolicy { // Check if records have both ingress and egress network policies. assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyName: %s", ingressAllowNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the ingress rule") - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", testNamespace), "Record does not have the correct NetworkPolicy Namespace with the ingress rule") + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the ingress rule") assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyType: %d", ipfixregistry.PolicyTypeK8sNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the ingress rule") assert.Contains(record, fmt.Sprintf("egressNetworkPolicyName: %s", egressAllowNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the egress rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", testNamespace), "Record does not have the correct NetworkPolicy Namespace with the egress rule") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the egress rule") assert.Contains(record, fmt.Sprintf("egressNetworkPolicyType: %d", ipfixregistry.PolicyTypeK8sNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the egress rule") } if checkAntreaNetworkPolicy { // Check if records have both ingress and egress network policies. assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyName: %s", ingressAntreaNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the ingress rule") - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", testNamespace), "Record does not have the correct NetworkPolicy Namespace with the ingress rule") + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the ingress rule") assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyType: %d", ipfixregistry.PolicyTypeAntreaNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the ingress rule") assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyRuleName: %s", testIngressRuleName), "Record does not have the correct NetworkPolicy RuleName with the ingress rule") assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyRuleAction: %d", ipfixregistry.NetworkPolicyRuleActionAllow), "Record does not have the correct NetworkPolicy RuleAction with the ingress rule") assert.Contains(record, fmt.Sprintf("egressNetworkPolicyName: %s", egressAntreaNetworkPolicyName), "Record does not have the correct NetworkPolicy name with the egress rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", testNamespace), "Record does not have the correct NetworkPolicy Namespace with the egress rule") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have the correct NetworkPolicy Namespace with the egress rule") assert.Contains(record, fmt.Sprintf("egressNetworkPolicyType: %d", ipfixregistry.PolicyTypeAntreaNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the egress rule") assert.Contains(record, fmt.Sprintf("egressNetworkPolicyRuleName: %s", testEgressRuleName), "Record does not have the correct NetworkPolicy RuleName with the egress rule") assert.Contains(record, fmt.Sprintf("egressNetworkPolicyRuleAction: %d", ipfixregistry.NetworkPolicyRuleActionAllow), "Record does not have the correct NetworkPolicy RuleAction with the egress rule") @@ -710,10 +710,10 @@ func checkRecordsForFlowsClickHouse(t *testing.T, data *TestData, srcIP, dstIP, for _, record := range clickHouseRecords { // Check if record has both Pod name of source and destination Pod. if isIntraNode { - checkPodAndNodeDataClickHouse(t, record, "perftest-a", controlPlaneNodeName(), "perftest-b", controlPlaneNodeName()) + checkPodAndNodeDataClickHouse(data, t, record, "perftest-a", controlPlaneNodeName(), "perftest-b", controlPlaneNodeName()) checkFlowTypeClickHouse(t, record, ipfixregistry.FlowTypeIntraNode) } else { - checkPodAndNodeDataClickHouse(t, record, "perftest-a", controlPlaneNodeName(), "perftest-c", workerNodeName(1)) + checkPodAndNodeDataClickHouse(data, t, record, "perftest-a", controlPlaneNodeName(), "perftest-c", workerNodeName(1)) checkFlowTypeClickHouse(t, record, ipfixregistry.FlowTypeInterNode) } assert := assert.New(t) @@ -727,21 +727,21 @@ func checkRecordsForFlowsClickHouse(t *testing.T, data *TestData, srcIP, dstIP, if checkK8sNetworkPolicy { // Check if records have both ingress and egress network policies. assert.Equal(record.IngressNetworkPolicyName, ingressAllowNetworkPolicyName, "Record does not have the correct NetworkPolicy name with the ingress rule") - assert.Equal(record.IngressNetworkPolicyNamespace, testNamespace, "Record does not have the correct NetworkPolicy Namespace with the ingress rule") + assert.Equal(record.IngressNetworkPolicyNamespace, data.testNamespace, "Record does not have the correct NetworkPolicy Namespace with the ingress rule") assert.Equal(record.IngressNetworkPolicyType, ipfixregistry.PolicyTypeK8sNetworkPolicy, "Record does not have the correct NetworkPolicy Type with the ingress rule") assert.Equal(record.EgressNetworkPolicyName, egressAllowNetworkPolicyName, "Record does not have the correct NetworkPolicy name with the egress rule") - assert.Equal(record.EgressNetworkPolicyNamespace, testNamespace, "Record does not have the correct NetworkPolicy Namespace with the egress rule") + assert.Equal(record.EgressNetworkPolicyNamespace, data.testNamespace, "Record does not have the correct NetworkPolicy Namespace with the egress rule") assert.Equal(record.EgressNetworkPolicyType, ipfixregistry.PolicyTypeK8sNetworkPolicy, "Record does not have the correct NetworkPolicy Type with the egress rule") } if checkAntreaNetworkPolicy { // Check if records have both ingress and egress network policies. assert.Equal(record.IngressNetworkPolicyName, ingressAntreaNetworkPolicyName, "Record does not have the correct NetworkPolicy name with the ingress rule") - assert.Equal(record.IngressNetworkPolicyNamespace, testNamespace, "Record does not have the correct NetworkPolicy Namespace with the ingress rule") + assert.Equal(record.IngressNetworkPolicyNamespace, data.testNamespace, "Record does not have the correct NetworkPolicy Namespace with the ingress rule") assert.Equal(record.IngressNetworkPolicyType, ipfixregistry.PolicyTypeAntreaNetworkPolicy, "Record does not have the correct NetworkPolicy Type with the ingress rule") assert.Equal(record.IngressNetworkPolicyRuleName, testIngressRuleName, "Record does not have the correct NetworkPolicy RuleName with the ingress rule") assert.Equal(record.IngressNetworkPolicyRuleAction, ipfixregistry.NetworkPolicyRuleActionAllow, "Record does not have the correct NetworkPolicy RuleAction with the ingress rule") assert.Equal(record.EgressNetworkPolicyName, egressAntreaNetworkPolicyName, "Record does not have the correct NetworkPolicy name with the egress rule") - assert.Equal(record.EgressNetworkPolicyNamespace, testNamespace, "Record does not have the correct NetworkPolicy Namespace with the egress rule") + assert.Equal(record.EgressNetworkPolicyNamespace, data.testNamespace, "Record does not have the correct NetworkPolicy Namespace with the egress rule") assert.Equal(record.EgressNetworkPolicyType, ipfixregistry.PolicyTypeAntreaNetworkPolicy, "Record does not have the correct NetworkPolicy Type with the egress rule") assert.Equal(record.EgressNetworkPolicyRuleName, testEgressRuleName, "Record does not have the correct NetworkPolicy RuleName with the egress rule") assert.Equal(record.EgressNetworkPolicyRuleAction, ipfixregistry.NetworkPolicyRuleActionAllow, "Record does not have the correct NetworkPolicy RuleAction with the egress rule") @@ -778,13 +778,13 @@ func checkRecordsForToExternalFlows(t *testing.T, data *TestData, srcNodeName st } else { cmd = fmt.Sprintf("wget -O- [%s]:%d", dstIP, dstPort) } - stdout, stderr, err := data.RunCommandFromPod(testNamespace, srcPodName, busyboxContainerName, strings.Fields(cmd)) + stdout, stderr, err := data.RunCommandFromPod(data.testNamespace, srcPodName, busyboxContainerName, strings.Fields(cmd)) require.NoErrorf(t, err, "Error when running wget command, stdout: %s, stderr: %s", stdout, stderr) _, recordSlices := getCollectorOutput(t, srcIP, dstIP, "", false, false, isIPv6, data) for _, record := range recordSlices { if strings.Contains(record, srcIP) && strings.Contains(record, dstIP) { - checkPodAndNodeData(t, record, srcPodName, srcNodeName, "", "") + checkPodAndNodeData(t, record, srcPodName, srcNodeName, "", "", data.testNamespace) checkFlowType(t, record, ipfixregistry.FlowTypeToExternal) assert.NotContains(t, record, "octetDeltaCount: 0", "octetDeltaCount should be non-zero") } @@ -792,7 +792,7 @@ func checkRecordsForToExternalFlows(t *testing.T, data *TestData, srcNodeName st clickHouseRecords := getClickHouseOutput(t, data, srcIP, dstIP, "", false, false) for _, record := range clickHouseRecords { - checkPodAndNodeDataClickHouse(t, record, srcPodName, srcNodeName, "", "") + checkPodAndNodeDataClickHouse(data, t, record, srcPodName, srcNodeName, "", "") checkFlowTypeClickHouse(t, record, ipfixregistry.FlowTypeToExternal) // Since the OVS userspace conntrack implementation doesn't maintain // packet or byte counter statistics, skip the check for Kind clusters @@ -811,9 +811,9 @@ func checkRecordsForDenyFlows(t *testing.T, data *TestData, testFlow1, testFlow2 cmdStr1 = fmt.Sprintf("iperf3 -6 -c %s -n 1", testFlow1.dstIP) cmdStr2 = fmt.Sprintf("iperf3 -6 -c %s -n 1", testFlow2.dstIP) } - _, _, err := data.RunCommandFromPod(testNamespace, testFlow1.srcPodName, "", []string{"timeout", "2", "bash", "-c", cmdStr1}) + _, _, err := data.RunCommandFromPod(data.testNamespace, testFlow1.srcPodName, "", []string{"timeout", "2", "bash", "-c", cmdStr1}) assert.Error(t, err) - _, _, err = data.RunCommandFromPod(testNamespace, testFlow2.srcPodName, "", []string{"timeout", "2", "bash", "-c", cmdStr2}) + _, _, err = data.RunCommandFromPod(data.testNamespace, testFlow2.srcPodName, "", []string{"timeout", "2", "bash", "-c", cmdStr2}) assert.Error(t, err) checkRecordsForDenyFlowsCollector(t, data, testFlow1, testFlow2, isIPv6, isIntraNode, isANP) @@ -843,10 +843,10 @@ func checkRecordsForDenyFlowsCollector(t *testing.T, data *TestData, testFlow1, egressDropStr := fmt.Sprintf("egressNetworkPolicyRuleAction: %d", ipfixregistry.NetworkPolicyRuleActionDrop) if isIntraNode { - checkPodAndNodeData(t, record, srcPodName, controlPlaneNodeName(), dstPodName, controlPlaneNodeName()) + checkPodAndNodeData(t, record, srcPodName, controlPlaneNodeName(), dstPodName, controlPlaneNodeName(), data.testNamespace) checkFlowType(t, record, ipfixregistry.FlowTypeIntraNode) } else { - checkPodAndNodeData(t, record, srcPodName, controlPlaneNodeName(), dstPodName, workerNodeName(1)) + checkPodAndNodeData(t, record, srcPodName, controlPlaneNodeName(), dstPodName, workerNodeName(1), data.testNamespace) checkFlowType(t, record, ipfixregistry.FlowTypeInterNode) } assert := assert.New(t) @@ -859,22 +859,22 @@ func checkRecordsForDenyFlowsCollector(t *testing.T, data *TestData, testFlow1, } else { // Antrea Network Policies if strings.Contains(record, ingressRejectStr) { assert.Contains(record, ingressRejectANPName, "Record does not have Antrea NetworkPolicy name with ingress reject rule") - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", testNamespace), "Record does not have correct ingressNetworkPolicyNamespace") + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have correct ingressNetworkPolicyNamespace") assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyType: %d", ipfixregistry.PolicyTypeAntreaNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the ingress reject rule") assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyRuleName: %s", testIngressRuleName), "Record does not have the correct NetworkPolicy RuleName with the ingress reject rule") } else if strings.Contains(record, ingressDropStr) { assert.Contains(record, ingressDropANPName, "Record does not have Antrea NetworkPolicy name with ingress drop rule") - assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", testNamespace), "Record does not have correct ingressNetworkPolicyNamespace") + assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have correct ingressNetworkPolicyNamespace") assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyType: %d", ipfixregistry.PolicyTypeAntreaNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the ingress drop rule") assert.Contains(record, fmt.Sprintf("ingressNetworkPolicyRuleName: %s", testIngressRuleName), "Record does not have the correct NetworkPolicy RuleName with the ingress drop rule") } else if strings.Contains(record, egressRejectStr) { assert.Contains(record, egressRejectANPName, "Record does not have Antrea NetworkPolicy name with egress reject rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", testNamespace), "Record does not have correct egressNetworkPolicyNamespace") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have correct egressNetworkPolicyNamespace") assert.Contains(record, fmt.Sprintf("egressNetworkPolicyType: %d", ipfixregistry.PolicyTypeAntreaNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the egress reject rule") assert.Contains(record, fmt.Sprintf("egressNetworkPolicyRuleName: %s", testEgressRuleName), "Record does not have the correct NetworkPolicy RuleName with the egress reject rule") } else if strings.Contains(record, egressDropStr) { assert.Contains(record, egressDropANPName, "Record does not have Antrea NetworkPolicy name with egress drop rule") - assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", testNamespace), "Record does not have correct egressNetworkPolicyNamespace") + assert.Contains(record, fmt.Sprintf("egressNetworkPolicyNamespace: %s", data.testNamespace), "Record does not have correct egressNetworkPolicyNamespace") assert.Contains(record, fmt.Sprintf("egressNetworkPolicyType: %d", ipfixregistry.PolicyTypeAntreaNetworkPolicy), "Record does not have the correct NetworkPolicy Type with the egress drop rule") assert.Contains(record, fmt.Sprintf("egressNetworkPolicyRuleName: %s", testEgressRuleName), "Record does not have the correct NetworkPolicy RuleName with the egress drop rule") } @@ -900,10 +900,10 @@ func checkRecordsForDenyFlowsClickHouse(t *testing.T, data *TestData, testFlow1, } if isIntraNode { - checkPodAndNodeDataClickHouse(t, record, srcPodName, controlPlaneNodeName(), dstPodName, controlPlaneNodeName()) + checkPodAndNodeDataClickHouse(data, t, record, srcPodName, controlPlaneNodeName(), dstPodName, controlPlaneNodeName()) checkFlowTypeClickHouse(t, record, ipfixregistry.FlowTypeIntraNode) } else { - checkPodAndNodeDataClickHouse(t, record, srcPodName, controlPlaneNodeName(), dstPodName, workerNodeName(1)) + checkPodAndNodeDataClickHouse(data, t, record, srcPodName, controlPlaneNodeName(), dstPodName, workerNodeName(1)) checkFlowTypeClickHouse(t, record, ipfixregistry.FlowTypeInterNode) } assert := assert.New(t) @@ -916,22 +916,22 @@ func checkRecordsForDenyFlowsClickHouse(t *testing.T, data *TestData, testFlow1, } else { // Antrea Network Policies if record.IngressNetworkPolicyRuleAction == ipfixregistry.NetworkPolicyRuleActionReject { assert.Equal(record.IngressNetworkPolicyName, ingressRejectANPName, "Record does not have Antrea NetworkPolicy name with ingress reject rule") - assert.Equal(record.IngressNetworkPolicyNamespace, testNamespace, "Record does not have correct ingressNetworkPolicyNamespace") + assert.Equal(record.IngressNetworkPolicyNamespace, data.testNamespace, "Record does not have correct ingressNetworkPolicyNamespace") assert.Equal(record.IngressNetworkPolicyType, ipfixregistry.PolicyTypeAntreaNetworkPolicy, "Record does not have the correct NetworkPolicy Type with the ingress reject rule") assert.Equal(record.IngressNetworkPolicyRuleName, testIngressRuleName, "Record does not have the correct NetworkPolicy RuleName with the ingress reject rule") } else if record.IngressNetworkPolicyRuleAction == ipfixregistry.NetworkPolicyRuleActionDrop { assert.Equal(record.IngressNetworkPolicyName, ingressDropANPName, "Record does not have Antrea NetworkPolicy name with ingress drop rule") - assert.Equal(record.IngressNetworkPolicyNamespace, testNamespace, "Record does not have correct ingressNetworkPolicyNamespace") + assert.Equal(record.IngressNetworkPolicyNamespace, data.testNamespace, "Record does not have correct ingressNetworkPolicyNamespace") assert.Equal(record.IngressNetworkPolicyType, ipfixregistry.PolicyTypeAntreaNetworkPolicy, "Record does not have the correct NetworkPolicy Type with the ingress drop rule") assert.Equal(record.IngressNetworkPolicyRuleName, testIngressRuleName, "Record does not have the correct NetworkPolicy RuleName with the ingress drop rule") } else if record.EgressNetworkPolicyRuleAction == ipfixregistry.NetworkPolicyRuleActionReject { assert.Equal(record.EgressNetworkPolicyName, egressRejectANPName, "Record does not have Antrea NetworkPolicy name with egress reject rule") - assert.Equal(record.EgressNetworkPolicyNamespace, testNamespace, "Record does not have correct egressNetworkPolicyNamespace") + assert.Equal(record.EgressNetworkPolicyNamespace, data.testNamespace, "Record does not have correct egressNetworkPolicyNamespace") assert.Equal(record.EgressNetworkPolicyType, ipfixregistry.PolicyTypeAntreaNetworkPolicy, "Record does not have the correct NetworkPolicy Type with the egress reject rule") assert.Equal(record.EgressNetworkPolicyRuleName, testEgressRuleName, "Record does not have the correct NetworkPolicy RuleName with the egress reject rule") } else if record.EgressNetworkPolicyRuleAction == ipfixregistry.NetworkPolicyRuleActionDrop { assert.Equal(record.EgressNetworkPolicyName, egressDropANPName, "Record does not have Antrea NetworkPolicy name with egress drop rule") - assert.Equal(record.EgressNetworkPolicyNamespace, testNamespace, "Record does not have correct egressNetworkPolicyNamespace") + assert.Equal(record.EgressNetworkPolicyNamespace, data.testNamespace, "Record does not have correct egressNetworkPolicyNamespace") assert.Equal(record.EgressNetworkPolicyType, ipfixregistry.PolicyTypeAntreaNetworkPolicy, "Record does not have the correct NetworkPolicy Type with the egress drop rule") assert.Equal(record.EgressNetworkPolicyRuleName, testEgressRuleName, "Record does not have the correct NetworkPolicy RuleName with the egress drop rule") } @@ -939,17 +939,17 @@ func checkRecordsForDenyFlowsClickHouse(t *testing.T, data *TestData, testFlow1, } } -func checkPodAndNodeData(t *testing.T, record, srcPod, srcNode, dstPod, dstNode string) { +func checkPodAndNodeData(t *testing.T, record, srcPod, srcNode, dstPod, dstNode string, namespace string) { assert := assert.New(t) assert.Contains(record, srcPod, "Record with srcIP does not have Pod name: %s", srcPod) - assert.Contains(record, fmt.Sprintf("sourcePodNamespace: %s", testNamespace), "Record does not have correct sourcePodNamespace: %s", testNamespace) + assert.Contains(record, fmt.Sprintf("sourcePodNamespace: %s", namespace), "Record does not have correct sourcePodNamespace: %s", namespace) assert.Contains(record, fmt.Sprintf("sourceNodeName: %s", srcNode), "Record does not have correct sourceNodeName: %s", srcNode) // For Pod-To-External flow type, we send traffic to an external address, // so we skip the verification of destination Pod info. // Also, source Pod labels are different for Pod-To-External flow test. if dstPod != "" { assert.Contains(record, dstPod, "Record with dstIP does not have Pod name: %s", dstPod) - assert.Contains(record, fmt.Sprintf("destinationPodNamespace: %s", testNamespace), "Record does not have correct destinationPodNamespace: %s", testNamespace) + assert.Contains(record, fmt.Sprintf("destinationPodNamespace: %s", namespace), "Record does not have correct destinationPodNamespace: %s", namespace) assert.Contains(record, fmt.Sprintf("destinationNodeName: %s", dstNode), "Record does not have correct destinationNodeName: %s", dstNode) assert.Contains(record, fmt.Sprintf("{\"antrea-e2e\":\"%s\",\"app\":\"perftool\"}", srcPod), "Record does not have correct label for source Pod") assert.Contains(record, fmt.Sprintf("{\"antrea-e2e\":\"%s\",\"app\":\"perftool\"}", dstPod), "Record does not have correct label for destination Pod") @@ -958,17 +958,17 @@ func checkPodAndNodeData(t *testing.T, record, srcPod, srcNode, dstPod, dstNode } } -func checkPodAndNodeDataClickHouse(t *testing.T, record *ClickHouseFullRow, srcPod, srcNode, dstPod, dstNode string) { +func checkPodAndNodeDataClickHouse(data *TestData, t *testing.T, record *ClickHouseFullRow, srcPod, srcNode, dstPod, dstNode string) { assert := assert.New(t) assert.Equal(record.SourcePodName, srcPod, "Record with srcIP does not have Pod name: %s", srcPod) - assert.Equal(record.SourcePodNamespace, testNamespace, "Record does not have correct sourcePodNamespace: %s", testNamespace) + assert.Equal(record.SourcePodNamespace, data.testNamespace, "Record does not have correct sourcePodNamespace: %s", data.testNamespace) assert.Equal(record.SourceNodeName, srcNode, "Record does not have correct sourceNodeName: %s", srcNode) // For Pod-To-External flow type, we send traffic to an external address, // so we skip the verification of destination Pod info. // Also, source Pod labels are different for Pod-To-External flow test. if dstPod != "" { assert.Equal(record.DestinationPodName, dstPod, "Record with dstIP does not have Pod name: %s", dstPod) - assert.Equal(record.DestinationPodNamespace, testNamespace, "Record does not have correct destinationPodNamespace: %s", testNamespace) + assert.Equal(record.DestinationPodNamespace, data.testNamespace, "Record does not have correct destinationPodNamespace: %s", data.testNamespace) assert.Equal(record.DestinationNodeName, dstNode, "Record does not have correct destinationNodeName: %s", dstNode) assert.Equal(record.SourcePodLabels, fmt.Sprintf("{\"antrea-e2e\":\"%s\",\"app\":\"perftool\"}", srcPod), "Record does not have correct label for source Pod") assert.Equal(record.DestinationPodLabels, fmt.Sprintf("{\"antrea-e2e\":\"%s\",\"app\":\"perftool\"}", dstPod), "Record does not have correct label for destination Pod") @@ -1014,7 +1014,7 @@ func getCollectorOutput(t *testing.T, srcIP, dstIP, srcPort string, isDstService var rc int var err error // `pod-running-timeout` option is added to cover scenarios where ipfix flow-collector has crashed after being deployed - rc, collectorOutput, _, err = data.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl logs --pod-running-timeout=%v ipfix-collector -n antrea-test", aggregatorInactiveFlowRecordTimeout.String())) + rc, collectorOutput, _, err = data.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl logs --pod-running-timeout=%v ipfix-collector -n %s", aggregatorInactiveFlowRecordTimeout.String(), data.testNamespace)) if err != nil || rc != 0 { return false, err } @@ -1156,7 +1156,7 @@ func deployK8sNetworkPolicies(t *testing.T, data *TestData, srcPod, dstPod strin func deployAntreaNetworkPolicies(t *testing.T, data *TestData, srcPod, dstPod string) (anp1 *secv1alpha1.NetworkPolicy, anp2 *secv1alpha1.NetworkPolicy) { builder1 := &utils.AntreaNetworkPolicySpecBuilder{} // apply anp to dstPod, allow ingress from srcPod - builder1 = builder1.SetName(testNamespace, ingressAntreaNetworkPolicyName). + builder1 = builder1.SetName(data.testNamespace, ingressAntreaNetworkPolicyName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": dstPod}}}) builder1 = builder1.AddIngress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": srcPod}, map[string]string{}, @@ -1169,7 +1169,7 @@ func deployAntreaNetworkPolicies(t *testing.T, data *TestData, srcPod, dstPod st builder2 := &utils.AntreaNetworkPolicySpecBuilder{} // apply anp to srcPod, allow egress to dstPod - builder2 = builder2.SetName(testNamespace, egressAntreaNetworkPolicyName). + builder2 = builder2.SetName(data.testNamespace, egressAntreaNetworkPolicyName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": srcPod}}}) builder2 = builder2.AddEgress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": dstPod}, map[string]string{}, @@ -1194,24 +1194,24 @@ func deployDenyAntreaNetworkPolicies(t *testing.T, data *TestData, srcPod, podRe builder2 := &utils.AntreaNetworkPolicySpecBuilder{} if isIngress { // apply reject and drop ingress rule to destination pods - builder1 = builder1.SetName(testNamespace, ingressRejectANPName). + builder1 = builder1.SetName(data.testNamespace, ingressRejectANPName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": podReject}}}) builder1 = builder1.AddIngress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": srcPod}, map[string]string{}, nil, nil, nil, secv1alpha1.RuleActionReject, testIngressRuleName) - builder2 = builder2.SetName(testNamespace, ingressDropANPName). + builder2 = builder2.SetName(data.testNamespace, ingressDropANPName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": podDrop}}}) builder2 = builder2.AddIngress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": srcPod}, map[string]string{}, nil, nil, nil, secv1alpha1.RuleActionDrop, testIngressRuleName) } else { // apply reject and drop egress rule to source pod - builder1 = builder1.SetName(testNamespace, egressRejectANPName). + builder1 = builder1.SetName(data.testNamespace, egressRejectANPName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": srcPod}}}) builder1 = builder1.AddEgress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": podReject}, map[string]string{}, nil, nil, nil, secv1alpha1.RuleActionReject, testEgressRuleName) - builder2 = builder2.SetName(testNamespace, egressDropANPName). + builder2 = builder2.SetName(data.testNamespace, egressDropANPName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": srcPod}}}) builder2 = builder2.AddEgress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": podDrop}, map[string]string{}, @@ -1269,42 +1269,42 @@ func deployDenyNetworkPolicies(t *testing.T, data *TestData, pod1, pod2 string) } func createPerftestPods(data *TestData) (podAIPs *PodIPs, podBIPs *PodIPs, podCIPs *PodIPs, podDIPs *PodIPs, podEIPs *PodIPs, err error) { - if err := data.createPodOnNode("perftest-a", testNamespace, controlPlaneNodeName(), perftoolImage, nil, nil, nil, nil, false, nil); err != nil { + if err := data.createPodOnNode("perftest-a", data.testNamespace, controlPlaneNodeName(), perftoolImage, nil, nil, nil, nil, false, nil); err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("Error when creating the perftest client Pod: %v", err) } - podAIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-a", testNamespace) + podAIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-a", data.testNamespace) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("Error when waiting for the perftest client Pod: %v", err) } - if err := data.createPodOnNode("perftest-b", testNamespace, controlPlaneNodeName(), perftoolImage, nil, nil, nil, []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { + if err := data.createPodOnNode("perftest-b", data.testNamespace, controlPlaneNodeName(), perftoolImage, nil, nil, nil, []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("Error when creating the perftest server Pod: %v", err) } - podBIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-b", testNamespace) + podBIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-b", data.testNamespace) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("Error when getting the perftest server Pod's IPs: %v", err) } - if err := data.createPodOnNode("perftest-c", testNamespace, workerNodeName(1), perftoolImage, nil, nil, nil, []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { + if err := data.createPodOnNode("perftest-c", data.testNamespace, workerNodeName(1), perftoolImage, nil, nil, nil, []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("Error when creating the perftest server Pod: %v", err) } - podCIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-c", testNamespace) + podCIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-c", data.testNamespace) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("Error when getting the perftest server Pod's IPs: %v", err) } - if err := data.createPodOnNode("perftest-d", testNamespace, controlPlaneNodeName(), perftoolImage, nil, nil, nil, []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { + if err := data.createPodOnNode("perftest-d", data.testNamespace, controlPlaneNodeName(), perftoolImage, nil, nil, nil, []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("Error when creating the perftest server Pod: %v", err) } - podDIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-d", testNamespace) + podDIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-d", data.testNamespace) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("Error when getting the perftest server Pod's IPs: %v", err) } - if err := data.createPodOnNode("perftest-e", testNamespace, workerNodeName(1), perftoolImage, nil, nil, nil, []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { + if err := data.createPodOnNode("perftest-e", data.testNamespace, workerNodeName(1), perftoolImage, nil, nil, nil, []corev1.ContainerPort{{Protocol: corev1.ProtocolTCP, ContainerPort: iperfPort}}, false, nil); err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("Error when creating the perftest server Pod: %v", err) } - podEIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-e", testNamespace) + podEIPs, err = data.podWaitForIPs(defaultTimeout, "perftest-e", data.testNamespace) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("Error when getting the perftest server Pod's IPs: %v", err) } @@ -1318,12 +1318,12 @@ func createPerftestServices(data *TestData, isIPv6 bool) (svcB *corev1.Service, svcIPFamily = corev1.IPv6Protocol } - svcB, err = data.CreateService("perftest-b", testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-b"}, false, false, corev1.ServiceTypeClusterIP, &svcIPFamily) + svcB, err = data.CreateService("perftest-b", data.testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-b"}, false, false, corev1.ServiceTypeClusterIP, &svcIPFamily) if err != nil { return nil, nil, fmt.Errorf("Error when creating perftest-b Service: %v", err) } - svcC, err = data.CreateService("perftest-c", testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-c"}, false, false, corev1.ServiceTypeClusterIP, &svcIPFamily) + svcC, err = data.CreateService("perftest-c", data.testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-c"}, false, false, corev1.ServiceTypeClusterIP, &svcIPFamily) if err != nil { return nil, nil, fmt.Errorf("Error when creating perftest-c Service: %v", err) } @@ -1333,7 +1333,7 @@ func createPerftestServices(data *TestData, isIPv6 bool) (svcB *corev1.Service, func deletePerftestServices(t *testing.T, data *TestData) { for _, serviceName := range []string{"perftest-b", "perftest-c"} { - err := data.deleteService(testNamespace, serviceName) + err := data.deleteService(data.testNamespace, serviceName) if err != nil { t.Logf("Error when deleting %s Service: %v", serviceName, err) } diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 83590118b69..27b193fff21 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -82,7 +82,6 @@ const ( antreaDeployment string = "antrea-controller" flowAggregatorDeployment string = "flow-aggregator" antreaDefaultGW string = "antrea-gw0" - testNamespace string = "antrea-test" testAntreaIPAMNamespace string = "antrea-ipam-test" testAntreaIPAMNamespace11 string = "antrea-ipam-test-11" testAntreaIPAMNamespace12 string = "antrea-ipam-test-12" @@ -210,6 +209,7 @@ type TestData struct { aggregatorClient aggregatorclientset.Interface crdClient crdclientset.Interface logsDirForTestCase string + testNamespace string } var testData *TestData @@ -610,11 +610,6 @@ func (data *TestData) CreateNamespace(namespace string, mutateFunc func(*corev1. return nil } -// createTestNamespace creates the namespace used for tests. -func (data *TestData) createTestNamespace() error { - return data.CreateNamespace(testNamespace, nil) -} - // createNamespaceWithAnnotations creates the namespace with Annotations. func (data *TestData) createNamespaceWithAnnotations(namespace string, annotations map[string]string) error { var mutateFunc func(*corev1.Namespace) @@ -653,26 +648,8 @@ func (data *TestData) DeleteNamespace(namespace string, timeout time.Duration) e } return fmt.Errorf("error when deleting '%s' Namespace: %v", namespace, err) } - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { - if ns, err := data.clientset.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); err != nil { - if errors.IsNotFound(err) { - // Success - return true, nil - } - return false, fmt.Errorf("error when getting Namespace '%s' after delete: %v", namespace, err) - } else if ns.Status.Phase != corev1.NamespaceTerminating { - return false, fmt.Errorf("deleted Namespace '%s' should be in 'Terminating' phase", namespace) - } - // Keep trying - return false, nil - }) - return err -} - -// deleteTestNamespace deletes test namespace and waits for deletion to actually complete. -func (data *TestData) deleteTestNamespace(timeout time.Duration) error { - return data.DeleteNamespace(testNamespace, timeout) + return nil } // deployAntreaCommon deploys Antrea using kubectl on the control-plane Node. @@ -1605,7 +1582,7 @@ func (data *TestData) CreateServiceWithAnnotations(serviceName, namespace string // createNginxClusterIPServiceWithAnnotations creates nginx service with Annotation func (data *TestData) createNginxClusterIPServiceWithAnnotations(affinity bool, ipFamily *corev1.IPFamily, annotation map[string]string) (*corev1.Service, error) { - return data.CreateServiceWithAnnotations("nginx", testNamespace, 80, 80, corev1.ProtocolTCP, map[string]string{"app": "nginx"}, affinity, false, corev1.ServiceTypeClusterIP, ipFamily, annotation) + return data.CreateServiceWithAnnotations("nginx", data.testNamespace, 80, 80, corev1.ProtocolTCP, map[string]string{"app": "nginx"}, affinity, false, corev1.ServiceTypeClusterIP, ipFamily, annotation) } // createNginxClusterIPService creates a nginx service with the given name. @@ -1618,21 +1595,21 @@ func (data *TestData) createNginxClusterIPService(name, namespace string, affini // createAgnhostClusterIPService creates a ClusterIP agnhost service with the given name. func (data *TestData) createAgnhostClusterIPService(serviceName string, affinity bool, ipFamily *corev1.IPFamily) (*corev1.Service, error) { - return data.CreateService(serviceName, testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, false, corev1.ServiceTypeClusterIP, ipFamily) + return data.CreateService(serviceName, data.testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, false, corev1.ServiceTypeClusterIP, ipFamily) } // createAgnhostNodePortService creates a NodePort agnhost service with the given name. func (data *TestData) createAgnhostNodePortService(serviceName string, affinity, nodeLocalExternal bool, ipFamily *corev1.IPFamily) (*corev1.Service, error) { - return data.CreateService(serviceName, testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, nodeLocalExternal, corev1.ServiceTypeNodePort, ipFamily) + return data.CreateService(serviceName, data.testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, nodeLocalExternal, corev1.ServiceTypeNodePort, ipFamily) } // createNginxNodePortService creates a NodePort nginx service with the given name. func (data *TestData) createNginxNodePortService(serviceName string, affinity, nodeLocalExternal bool, ipFamily *corev1.IPFamily) (*corev1.Service, error) { - return data.CreateService(serviceName, testNamespace, 80, 80, map[string]string{"app": "nginx"}, affinity, nodeLocalExternal, corev1.ServiceTypeNodePort, ipFamily) + return data.CreateService(serviceName, data.testNamespace, 80, 80, map[string]string{"app": "nginx"}, affinity, nodeLocalExternal, corev1.ServiceTypeNodePort, ipFamily) } func (data *TestData) updateServiceExternalTrafficPolicy(serviceName string, nodeLocalExternal bool) (*corev1.Service, error) { - svc, err := data.clientset.CoreV1().Services(testNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) + svc, err := data.clientset.CoreV1().Services(data.testNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) if err != nil { return svc, err } @@ -1642,12 +1619,12 @@ func (data *TestData) updateServiceExternalTrafficPolicy(serviceName string, nod svc.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeCluster } - return data.clientset.CoreV1().Services(testNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{}) + return data.clientset.CoreV1().Services(data.testNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{}) } // createAgnhostLoadBalancerService creates a LoadBalancer agnhost service with the given name. func (data *TestData) createAgnhostLoadBalancerService(serviceName string, affinity, nodeLocalExternal bool, ingressIPs []string, ipFamily *corev1.IPFamily) (*corev1.Service, error) { - svc, err := data.CreateService(serviceName, testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, nodeLocalExternal, corev1.ServiceTypeLoadBalancer, ipFamily) + svc, err := data.CreateService(serviceName, data.testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, nodeLocalExternal, corev1.ServiceTypeLoadBalancer, ipFamily) if err != nil { return svc, err } @@ -1666,7 +1643,7 @@ func (data *TestData) createAgnhostLoadBalancerService(serviceName string, affin } func (data *TestData) createNginxLoadBalancerService(affinity bool, ingressIPs []string, ipFamily *corev1.IPFamily) (*corev1.Service, error) { - svc, err := data.CreateService(nginxLBService, testNamespace, 80, 80, map[string]string{"app": "nginx"}, affinity, false, corev1.ServiceTypeLoadBalancer, ipFamily) + svc, err := data.CreateService(nginxLBService, data.testNamespace, 80, 80, map[string]string{"app": "nginx"}, affinity, false, corev1.ServiceTypeLoadBalancer, ipFamily) if err != nil { return svc, err } @@ -1724,7 +1701,7 @@ func (data *TestData) createNetworkPolicy(name string, spec *networkingv1.Networ }, Spec: *spec, } - return data.clientset.NetworkingV1().NetworkPolicies(testNamespace).Create(context.TODO(), policy, metav1.CreateOptions{}) + return data.clientset.NetworkingV1().NetworkPolicies(data.testNamespace).Create(context.TODO(), policy, metav1.CreateOptions{}) } // deleteNetworkpolicy deletes the network policy. @@ -1748,6 +1725,7 @@ func randSeq(n int) string { return string(b) } +// randName generates a DNS-1123 subdomain name func randName(prefix string) string { return prefix + randSeq(nameSuffixLength) } diff --git a/test/e2e/k8s_util.go b/test/e2e/k8s_util.go index ab6cb5d1e3f..8e4250af97a 100644 --- a/test/e2e/k8s_util.go +++ b/test/e2e/k8s_util.go @@ -484,7 +484,7 @@ func (data *TestData) DeleteService(ns, name string) error { } // CleanServices is a convenience function for deleting Services in the cluster. -func (data *TestData) CleanServices(namespaces []string) error { +func (data *TestData) CleanServices(namespaces map[string]string) error { for _, ns := range namespaces { l, err := data.clientset.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { @@ -577,7 +577,7 @@ func (data *TestData) DeleteNetworkPolicy(ns, name string) error { } // CleanNetworkPolicies is a convenience function for deleting NetworkPolicies in the provided namespaces. -func (data *TestData) CleanNetworkPolicies(namespaces []string) error { +func (data *TestData) CleanNetworkPolicies(namespaces map[string]string) error { for _, ns := range namespaces { l, err := data.clientset.NetworkingV1().NetworkPolicies(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { @@ -977,7 +977,7 @@ func (k *KubernetesUtils) Validate(allPods []Pod, reachability *Reachability, po } } -func (k *KubernetesUtils) Bootstrap(namespaces, pods []string) (*map[string][]string, error) { +func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string) (*map[string][]string, error) { for _, ns := range namespaces { _, err := k.CreateOrUpdateNamespace(ns, map[string]string{"ns": ns}) if err != nil { @@ -1016,7 +1016,7 @@ func (k *KubernetesUtils) Bootstrap(namespaces, pods []string) (*map[string][]st return &podIPs, nil } -func (k *KubernetesUtils) Cleanup(namespaces []string) { +func (k *KubernetesUtils) Cleanup(namespaces map[string]string) { // Cleanup any cluster-scoped resources. if err := k.CleanACNPs(); err != nil { log.Errorf("Error when cleaning up ACNPs: %v", err) diff --git a/test/e2e/multicast_test.go b/test/e2e/multicast_test.go index 0d7755550ff..0d04c7acbde 100644 --- a/test/e2e/multicast_test.go +++ b/test/e2e/multicast_test.go @@ -160,15 +160,15 @@ type multicastTestcase struct { func runTestMulticastForwardToMultipleInterfaces(t *testing.T, data *TestData, senderIdx int, senderPort int, senderGroup string, senderMulticastInterfaces []string) { mcjoinWaitTimeout := defaultTimeout / time.Second - senderName, _, cleanupFunc := createAndWaitForPod(t, data, data.createMcJoinPodOnNode, "test-sender-", nodeName(senderIdx), testNamespace, false) + senderName, _, cleanupFunc := createAndWaitForPod(t, data, data.createMcJoinPodOnNode, "test-sender-", nodeName(senderIdx), data.testNamespace, false) defer cleanupFunc() - tcpdumpName, _, cleanupFunc := createAndWaitForPod(t, data, data.createNetshootPodOnNode, "test-tcpdump-", nodeName(senderIdx), testNamespace, true) + tcpdumpName, _, cleanupFunc := createAndWaitForPod(t, data, data.createNetshootPodOnNode, "test-tcpdump-", nodeName(senderIdx), data.testNamespace, true) defer cleanupFunc() // Wait 2 seconds(-w 2) before sending multicast traffic. // It sends two multicast packets for every second(-f 500 means it takes 500 milliseconds for sending one packet). sendMulticastCommand := []string{"/bin/sh", "-c", fmt.Sprintf("timeout 90s mcjoin -f 500 -o -p %d -s -t 3 -w 2 -W %d %s", senderPort, mcjoinWaitTimeout, senderGroup)} go func() { - data.RunCommandFromPod(testNamespace, senderName, mcjoinContainerName, sendMulticastCommand) + data.RunCommandFromPod(data.testNamespace, senderName, mcjoinContainerName, sendMulticastCommand) }() if err := wait.Poll(5*time.Second, defaultTimeout, func() (bool, error) { @@ -177,7 +177,7 @@ func runTestMulticastForwardToMultipleInterfaces(t *testing.T, data *TestData, s // If multicast traffic is sent from non-HostNetwork pods, all multicast interfaces in senders should receive multicast traffic. for _, multicastInterface := range senderMulticastInterfaces { tcpdumpReceiveMulticastCommand := []string{"/bin/sh", "-c", fmt.Sprintf("timeout 5s tcpdump -q -i %s -c 1 -W 90 host %s", multicastInterface, senderGroup)} - _, stderr, err := data.RunCommandFromPod(testNamespace, tcpdumpName, tcpdumpContainerName, tcpdumpReceiveMulticastCommand) + _, stderr, err := data.RunCommandFromPod(data.testNamespace, tcpdumpName, tcpdumpContainerName, tcpdumpReceiveMulticastCommand) if err != nil { return false, err } @@ -195,11 +195,11 @@ func runTestMulticastBetweenPods(t *testing.T, data *TestData, mc multicastTestc mcjoinWaitTimeout := defaultTimeout / time.Second gatewayInterface, err := data.GetGatewayInterfaceName(antreaNamespace) failOnError(err, t) - senderName, _, cleanupFunc := createAndWaitForPod(t, data, data.createMcJoinPodOnNode, "test-sender-", nodeName(mc.senderConfig.nodeIdx), testNamespace, mc.senderConfig.isHostNetwork) + senderName, _, cleanupFunc := createAndWaitForPod(t, data, data.createMcJoinPodOnNode, "test-sender-", nodeName(mc.senderConfig.nodeIdx), data.testNamespace, mc.senderConfig.isHostNetwork) defer cleanupFunc() receiverNames := make([]string, 0) for _, receiver := range mc.receiverConfigs { - receiverName, _, cleanupFunc := createAndWaitForPod(t, data, data.createMcJoinPodOnNode, "test-receiver-", nodeName(receiver.nodeIdx), testNamespace, receiver.isHostNetwork) + receiverName, _, cleanupFunc := createAndWaitForPod(t, data, data.createMcJoinPodOnNode, "test-receiver-", nodeName(receiver.nodeIdx), data.testNamespace, receiver.isHostNetwork) receiverNames = append(receiverNames, receiverName) defer cleanupFunc() } @@ -212,7 +212,7 @@ func runTestMulticastBetweenPods(t *testing.T, data *TestData, mc multicastTestc // The following command joins a multicast group and sets the timeout to 100 seconds(-W 100) before exit. // The command will return after receiving 1 packet(-c 1). receiveMulticastCommand := []string{"/bin/sh", "-c", fmt.Sprintf("mcjoin -c 10 -o -p %d -W %d %s", mc.port, mcjoinWaitTimeout, mc.group.String())} - res, _, err := data.RunCommandFromPod(testNamespace, r, mcjoinContainerName, receiveMulticastCommand) + res, _, err := data.RunCommandFromPod(data.testNamespace, r, mcjoinContainerName, receiveMulticastCommand) failOnError(err, t) assert.Contains(t, res, "Total: 10 packets") }() @@ -221,7 +221,7 @@ func runTestMulticastBetweenPods(t *testing.T, data *TestData, mc multicastTestc // It sends two multicast packets for every second(-f 500 means it takes 500 milliseconds for sending one packet). sendMulticastCommand := []string{"/bin/sh", "-c", fmt.Sprintf("mcjoin -f 500 -o -p %d -s -t 3 -w 2 -W %d %s", mc.port, mcjoinWaitTimeout, mc.group.String())} go func() { - data.RunCommandFromPod(testNamespace, senderName, mcjoinContainerName, sendMulticastCommand) + data.RunCommandFromPod(data.testNamespace, senderName, mcjoinContainerName, sendMulticastCommand) }() readyReceivers := sets.NewInt() diff --git a/test/e2e/networkpolicy_test.go b/test/e2e/networkpolicy_test.go index c4d3939f418..736acf65cca 100644 --- a/test/e2e/networkpolicy_test.go +++ b/test/e2e/networkpolicy_test.go @@ -84,10 +84,10 @@ func TestNetworkPolicy(t *testing.T) { } func testNetworkPolicyStats(t *testing.T, data *TestData) { - serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", testNamespace, false) + serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", data.testNamespace, false) defer cleanupFunc() - clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace, false) + clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", data.testNamespace, false) defer cleanupFunc() // When using the userspace OVS datapath and tunneling, @@ -95,11 +95,11 @@ func testNetworkPolicyStats(t *testing.T, data *TestData) { // So we need to "warm-up" the tunnel. if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } np1, err := data.createNetworkPolicy("test-networkpolicy-ingress", &networkingv1.NetworkPolicySpec{ @@ -155,11 +155,11 @@ func testNetworkPolicyStats(t *testing.T, data *TestData) { go func() { if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} - data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(data.testNamespace, clientName, busyboxContainerName, cmd) } wg.Done() }() @@ -177,7 +177,7 @@ func testNetworkPolicyStats(t *testing.T, data *TestData) { if err := wait.Poll(5*time.Second, defaultTimeout, func() (bool, error) { var ingressStats *v1alpha1.NetworkPolicyStats for _, np := range []string{"test-networkpolicy-ingress", "test-networkpolicy-egress"} { - stats, err := data.crdClient.StatsV1alpha1().NetworkPolicyStats(testNamespace).Get(context.TODO(), np, metav1.GetOptions{}) + stats, err := data.crdClient.StatsV1alpha1().NetworkPolicyStats(data.testNamespace).Get(context.TODO(), np, metav1.GetOptions{}) if err != nil { return false, err } @@ -228,29 +228,29 @@ func (data *TestData) setupDifferentNamedPorts(t *testing.T) (checkFn func(), cl server0Port := int32(80) server0Name, server0IPs, cleanupFunc := createAndWaitForPod(t, data, func(name string, ns string, nodeName string, hostNetwork bool) error { - return data.createServerPod(name, testNamespace, "http", server0Port, false, false) - }, "test-server-", "", testNamespace, false) + return data.createServerPod(name, data.testNamespace, "http", server0Port, false, false) + }, "test-server-", "", data.testNamespace, false) cleanupFuncs = append(cleanupFuncs, cleanupFunc) server1Port := int32(8080) server1Name, server1IPs, cleanupFunc := createAndWaitForPod(t, data, func(name string, ns string, nodeName string, hostNetwork bool) error { - return data.createServerPod(name, testNamespace, "http", server1Port, false, false) - }, "test-server-", "", testNamespace, false) + return data.createServerPod(name, data.testNamespace, "http", server1Port, false, false) + }, "test-server-", "", data.testNamespace, false) cleanupFuncs = append(cleanupFuncs, cleanupFunc) - client0Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace, false) + client0Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", data.testNamespace, false) cleanupFuncs = append(cleanupFuncs, cleanupFunc) - client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace, false) + client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", data.testNamespace, false) cleanupFuncs = append(cleanupFuncs, cleanupFunc) preCheckFunc := func(server0IP, server1IP string) { // Both clients can connect to both servers. for _, clientName := range []string{client0Name, client1Name} { - if err := data.runNetcatCommandFromTestPod(clientName, testNamespace, server0IP, server0Port); err != nil { + if err := data.runNetcatCommandFromTestPod(clientName, data.testNamespace, server0IP, server0Port); err != nil { t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(server0IP, fmt.Sprint(server0Port))) } - if err := data.runNetcatCommandFromTestPod(clientName, testNamespace, server1IP, server1Port); err != nil { + if err := data.runNetcatCommandFromTestPod(clientName, data.testNamespace, server1IP, server1Port); err != nil { t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(server1IP, fmt.Sprint(server1Port))) } } @@ -302,17 +302,17 @@ func (data *TestData) setupDifferentNamedPorts(t *testing.T) (checkFn func(), cl server0Address := net.JoinHostPort(server0IP, fmt.Sprint(server0Port)) server1Address := net.JoinHostPort(server1IP, fmt.Sprint(server1Port)) // client0 can connect to both servers. - if err = data.runNetcatCommandFromTestPod(client0Name, testNamespace, server0IP, server0Port); err != nil { + if err = data.runNetcatCommandFromTestPod(client0Name, data.testNamespace, server0IP, server0Port); err != nil { t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", client0Name, server0Address) } - if err = data.runNetcatCommandFromTestPod(client0Name, testNamespace, server1IP, server1Port); err != nil { + if err = data.runNetcatCommandFromTestPod(client0Name, data.testNamespace, server1IP, server1Port); err != nil { t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", client0Name, server1Address) } // client1 cannot connect to both servers. - if err = data.runNetcatCommandFromTestPod(client1Name, testNamespace, server0IP, server0Port); err == nil { + if err = data.runNetcatCommandFromTestPod(client1Name, data.testNamespace, server0IP, server0Port); err == nil { t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", client1Name, server0Address) } - if err = data.runNetcatCommandFromTestPod(client1Name, testNamespace, server1IP, server1Port); err == nil { + if err = data.runNetcatCommandFromTestPod(client1Name, data.testNamespace, server1IP, server1Port); err == nil { t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", client1Name, server1Address) } } @@ -338,21 +338,21 @@ func testDefaultDenyIngressPolicy(t *testing.T, data *TestData) { serverNode := workerNodeName(1) serverNodeIP := workerNodeIP(1) serverPort := int32(80) - _, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", serverNode, testNamespace, false) + _, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", serverNode, data.testNamespace, false) defer cleanupFunc() - service, err := data.CreateService("nginx", testNamespace, serverPort, serverPort, map[string]string{"app": "nginx"}, false, false, corev1.ServiceTypeNodePort, nil) + service, err := data.CreateService("nginx", data.testNamespace, serverPort, serverPort, map[string]string{"app": "nginx"}, false, false, corev1.ServiceTypeNodePort, nil) if err != nil { t.Fatalf("Error when creating nginx NodePort service: %v", err) } defer data.deleteService(service.Namespace, service.Name) // client1 is a host network Pod and is on the same node as the server Pod, simulating kubelet probe traffic. - client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-hostnetwork-client-can-connect-", serverNode, testNamespace, true) + client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-hostnetwork-client-can-connect-", serverNode, data.testNamespace, true) defer cleanupFunc() // client2 is a host network Pod and is on a different node from the server Pod, accessing the server Pod via the NodePort service. - client2Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-hostnetwork-client-cannot-connect-", controlPlaneNodeName(), testNamespace, true) + client2Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-hostnetwork-client-cannot-connect-", controlPlaneNodeName(), data.testNamespace, true) defer cleanupFunc() spec := &networkingv1.NetworkPolicySpec{ @@ -371,7 +371,7 @@ func testDefaultDenyIngressPolicy(t *testing.T, data *TestData) { }() npCheck := func(clientName, serverIP string, serverPort int32, wantErr bool) { - if err = data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, serverPort); wantErr && err == nil { + if err = data.runNetcatCommandFromTestPod(clientName, data.testNamespace, serverIP, serverPort); wantErr && err == nil { t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort))) } else if !wantErr && err != nil { t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort))) @@ -397,14 +397,14 @@ func testDefaultDenyIngressPolicy(t *testing.T, data *TestData) { func testDefaultDenyEgressPolicy(t *testing.T, data *TestData) { serverPort := int32(80) - _, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", testNamespace, false) + _, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", data.testNamespace, false) defer cleanupFunc() - clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace, false) + clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", data.testNamespace, false) defer cleanupFunc() preCheckFunc := func(serverIP string) { - if err := data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, serverPort); err != nil { + if err := data.runNetcatCommandFromTestPod(clientName, data.testNamespace, serverIP, serverPort); err != nil { t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort))) } } @@ -431,7 +431,7 @@ func testDefaultDenyEgressPolicy(t *testing.T, data *TestData) { }() npCheck := func(serverIP string) { - if err = data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, serverPort); err == nil { + if err = data.runNetcatCommandFromTestPod(clientName, data.testNamespace, serverIP, serverPort); err == nil { t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort))) } } @@ -451,12 +451,12 @@ func testDefaultDenyEgressPolicy(t *testing.T, data *TestData) { // https://github.com/kubernetes/kubernetes/pull/93583 func testEgressToServerInCIDRBlock(t *testing.T, data *TestData) { workerNode := workerNodeName(1) - serverAName, serverAIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, testNamespace, false) + serverAName, serverAIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, data.testNamespace, false) defer cleanupFunc() - serverBName, serverBIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, testNamespace, false) + serverBName, serverBIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, data.testNamespace, false) defer cleanupFunc() - clientA, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, testNamespace, false) + clientA, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, data.testNamespace, false) defer cleanupFunc() var serverCIDR string var serverAIP, serverBIP string @@ -467,10 +467,10 @@ func testEgressToServerInCIDRBlock(t *testing.T, data *TestData) { serverAIP = serverAIPs.ipv6.String() serverBIP = serverBIPs.ipv6.String() - if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverAIP, 80); err != nil { + if err := data.runNetcatCommandFromTestPod(clientA, data.testNamespace, serverAIP, 80); err != nil { t.Fatalf("%s should be able to netcat %s", clientA, serverAName) } - if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverBIP, 80); err != nil { + if err := data.runNetcatCommandFromTestPod(clientA, data.testNamespace, serverBIP, 80); err != nil { t.Fatalf("%s should be able to netcat %s", clientA, serverBName) } @@ -503,10 +503,10 @@ func testEgressToServerInCIDRBlock(t *testing.T, data *TestData) { } defer cleanupNP() - if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverAIP, 80); err != nil { + if err := data.runNetcatCommandFromTestPod(clientA, data.testNamespace, serverAIP, 80); err != nil { t.Fatalf("%s should be able to netcat %s", clientA, serverAName) } - if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverBIP, 80); err == nil { + if err := data.runNetcatCommandFromTestPod(clientA, data.testNamespace, serverBIP, 80); err == nil { t.Fatalf("%s should not be able to netcat %s", clientA, serverBName) } } @@ -518,10 +518,10 @@ func testEgressToServerInCIDRBlock(t *testing.T, data *TestData) { // https://github.com/kubernetes/kubernetes/pull/93583 func testEgressToServerInCIDRBlockWithException(t *testing.T, data *TestData) { workerNode := workerNodeName(1) - serverAName, serverAIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, testNamespace, false) + serverAName, serverAIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, data.testNamespace, false) defer cleanupFunc() - clientA, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, testNamespace, false) + clientA, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, data.testNamespace, false) defer cleanupFunc() var serverAAllowCIDR string var serverAExceptList []string @@ -537,7 +537,7 @@ func testEgressToServerInCIDRBlockWithException(t *testing.T, data *TestData) { serverAExceptList = []string{fmt.Sprintf("%s/%d", serverAIPs.ipv6.String(), 128)} serverAIP = serverAIPs.ipv6.String() - if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverAIP, 80); err != nil { + if err := data.runNetcatCommandFromTestPod(clientA, data.testNamespace, serverAIP, 80); err != nil { t.Fatalf("%s should be able to netcat %s", clientA, serverAName) } @@ -571,7 +571,7 @@ func testEgressToServerInCIDRBlockWithException(t *testing.T, data *TestData) { } defer cleanupNP() - if err := data.runNetcatCommandFromTestPod(clientA, testNamespace, serverAIP, 80); err == nil { + if err := data.runNetcatCommandFromTestPod(clientA, data.testNamespace, serverAIP, 80); err == nil { t.Fatalf("%s should not be able to netcat %s", clientA, serverAName) } } @@ -583,16 +583,16 @@ func testNetworkPolicyResyncAfterRestart(t *testing.T, data *TestData) { t.Fatalf("Error when getting antrea-agent pod name: %v", err) } - server0Name, server0IPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, testNamespace, false) + server0Name, server0IPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, data.testNamespace, false) defer cleanupFunc() - server1Name, server1IPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, testNamespace, false) + server1Name, server1IPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", workerNode, data.testNamespace, false) defer cleanupFunc() - client0Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, testNamespace, false) + client0Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, data.testNamespace, false) defer cleanupFunc() - client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, testNamespace, false) + client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", workerNode, data.testNamespace, false) defer cleanupFunc() netpol0, err := data.createNetworkPolicy("test-isolate-server0", &networkingv1.NetworkPolicySpec{ @@ -617,10 +617,10 @@ func testNetworkPolicyResyncAfterRestart(t *testing.T, data *TestData) { defer cleanupNetpol0() preCheckFunc := func(server0IP, server1IP string) { - if err = data.runNetcatCommandFromTestPod(client0Name, testNamespace, server0IP, 80); err == nil { + if err = data.runNetcatCommandFromTestPod(client0Name, data.testNamespace, server0IP, 80); err == nil { t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", client0Name, server0Name) } - if err = data.runNetcatCommandFromTestPod(client1Name, testNamespace, server1IP, 80); err != nil { + if err = data.runNetcatCommandFromTestPod(client1Name, data.testNamespace, server1IP, 80); err != nil { t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", client1Name, server1Name) } } @@ -673,10 +673,10 @@ func testNetworkPolicyResyncAfterRestart(t *testing.T, data *TestData) { waitForAgentCondition(t, data, antreaPod, v1beta1.ControllerConnectionUp, corev1.ConditionTrue) npCheck := func(server0IP, server1IP string) { - if err = data.runNetcatCommandFromTestPod(client0Name, testNamespace, server0IP, 80); err != nil { + if err = data.runNetcatCommandFromTestPod(client0Name, data.testNamespace, server0IP, 80); err != nil { t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", client0Name, server0Name) } - if err = data.runNetcatCommandFromTestPod(client1Name, testNamespace, server1IP, 80); err == nil { + if err = data.runNetcatCommandFromTestPod(client1Name, data.testNamespace, server1IP, 80); err == nil { t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", client1Name, server1Name) } } @@ -691,19 +691,19 @@ func testNetworkPolicyResyncAfterRestart(t *testing.T, data *TestData) { func testIngressPolicyWithoutPortNumber(t *testing.T, data *TestData) { serverPort := int32(80) - _, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", testNamespace, false) + _, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", data.testNamespace, false) defer cleanupFunc() - client0Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace, false) + client0Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", data.testNamespace, false) defer cleanupFunc() - client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace, false) + client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", data.testNamespace, false) defer cleanupFunc() preCheckFunc := func(serverIP string) { // Both clients can connect to server. for _, clientName := range []string{client0Name, client1Name} { - if err := data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, serverPort); err != nil { + if err := data.runNetcatCommandFromTestPod(clientName, data.testNamespace, serverIP, serverPort); err != nil { t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort))) } } @@ -750,11 +750,11 @@ func testIngressPolicyWithoutPortNumber(t *testing.T, data *TestData) { npCheck := func(serverIP string) { serverAddress := net.JoinHostPort(serverIP, fmt.Sprint(serverPort)) // Client0 can access server. - if err = data.runNetcatCommandFromTestPod(client0Name, testNamespace, serverIP, serverPort); err != nil { + if err = data.runNetcatCommandFromTestPod(client0Name, data.testNamespace, serverIP, serverPort); err != nil { t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", client0Name, serverAddress) } // Client1 can't access server. - if err = data.runNetcatCommandFromTestPod(client1Name, testNamespace, serverIP, serverPort); err == nil { + if err = data.runNetcatCommandFromTestPod(client1Name, data.testNamespace, serverIP, serverPort); err == nil { t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", client1Name, serverAddress) } } @@ -834,16 +834,16 @@ func testIngressPolicyWithEndPort(t *testing.T, data *TestData) { return nil } - serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, createAgnhostPodOnNodeWithMultiPort, "test-server-", "", testNamespace, false) + serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, createAgnhostPodOnNodeWithMultiPort, "test-server-", "", data.testNamespace, false) defer cleanupFunc() - clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", testNamespace, false) + clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "", data.testNamespace, false) defer cleanupFunc() preCheck := func(serverIP string) { // The client can connect to server on all ports. for _, port := range serverPorts { - if err := data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, port); err != nil { + if err := data.runNetcatCommandFromTestPod(clientName, data.testNamespace, serverIP, port); err != nil { t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port))) } } @@ -900,7 +900,7 @@ func testIngressPolicyWithEndPort(t *testing.T, data *TestData) { npCheck := func(serverIP string) { for _, port := range serverPorts { - err = data.runNetcatCommandFromTestPod(clientName, testNamespace, serverIP, port) + err = data.runNetcatCommandFromTestPod(clientName, data.testNamespace, serverIP, port) if port >= policyPort && port <= policyEndPort { if err != nil { t.Errorf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port))) @@ -941,7 +941,7 @@ func createAndWaitForPodWithServiceAccount(t *testing.T, data *TestData, createF t.Fatalf("Error when creating busybox test Pod: %v", err) } cleanupFunc := func() { - deletePodWrapper(t, data, testNamespace, name) + deletePodWrapper(t, data, data.testNamespace, name) } podIP, err := data.podWaitForIPs(defaultTimeout, name, ns) if err != nil { diff --git a/test/e2e/nodeportlocal_test.go b/test/e2e/nodeportlocal_test.go index fede7abbf32..2f4a328639a 100644 --- a/test/e2e/nodeportlocal_test.go +++ b/test/e2e/nodeportlocal_test.go @@ -103,7 +103,7 @@ func getNPLAnnotation(t *testing.T, data *TestData, r *require.Assertions, testP podTimeout = 18 * time.Second } for i := 0; i <= maxRetries; i++ { - _, err = data.PodWaitFor(podTimeout, testPodName, testNamespace, func(pod *corev1.Pod) (bool, error) { + _, err = data.PodWaitFor(podTimeout, testPodName, data.testNamespace, func(pod *corev1.Pod) (bool, error) { var err error if pod.Status.Phase != corev1.PodRunning { return false, nil @@ -248,17 +248,17 @@ func deleteNPLRuleFromIPTables(t *testing.T, data *TestData, r *require.Assertio func checkTrafficForNPL(data *TestData, r *require.Assertions, nplAnnotations []k8s.NPLAnnotation, clientName string) { for i := range nplAnnotations { for j := range nplAnnotations[i].Protocols { - err := data.runNetcatCommandFromTestPodWithProtocol(clientName, testNamespace, nplAnnotations[i].NodeIP, int32(nplAnnotations[i].NodePort), nplAnnotations[i].Protocols[j]) + err := data.runNetcatCommandFromTestPodWithProtocol(clientName, data.testNamespace, nplAnnotations[i].NodeIP, int32(nplAnnotations[i].NodePort), nplAnnotations[i].Protocols[j]) r.NoError(err, "Traffic test failed for NodeIP: %s, NodePort: %d, Protocol: %s", nplAnnotations[i].NodeIP, nplAnnotations[i].NodePort, nplAnnotations[i].Protocols[j]) } } } func testNPLAddPod(t *testing.T, data *TestData) { - t.Run("NPLTestMultiplePods", NPLTestMultiplePods) - t.Run("NPLTestPodAddMultiPort", NPLTestPodAddMultiPort) - t.Run("NPLTestPodAddMultiProtocol", NPLTestPodAddMultiProtocol) - t.Run("NPLTestLocalAccess", NPLTestLocalAccess) + t.Run("NPLTestMultiplePods", func(t *testing.T) { NPLTestMultiplePods(t, data) }) + t.Run("NPLTestPodAddMultiPort", func(t *testing.T) { NPLTestPodAddMultiPort(t, data) }) + t.Run("NPLTestPodAddMultiProtocol", func(t *testing.T) { NPLTestPodAddMultiProtocol(t, data) }) + t.Run("NPLTestLocalAccess", func(t *testing.T) { NPLTestLocalAccess(t, data) }) } // NPLTestMultiplePods tests NodePortLocal functionalities after adding multiple Pods. @@ -267,7 +267,7 @@ func testNPLAddPod(t *testing.T, data *TestData) { // - Make sure iptables rules are correctly added in the Node from Antrea Agent Pod. // - Create a client Pod and test traffic through netcat. // - Delete the nginx test Pods and verify that the iptables rules are deleted. -func NPLTestMultiplePods(t *testing.T) { +func NPLTestMultiplePods(t *testing.T, data *TestData) { r := require.New(t) annotation := make(map[string]string) @@ -280,15 +280,15 @@ func NPLTestMultiplePods(t *testing.T) { for i := 0; i < 4; i++ { testPodName := randName("test-pod-") testPods = append(testPods, testPodName) - err := testData.createNginxPodOnNode(testPodName, testNamespace, node, false) + err := testData.createNginxPodOnNode(testPodName, data.testNamespace, node, false) r.NoError(err, "Error creating test Pod: %v", err) } clientName := randName("test-client-") - err := testData.createBusyboxPodOnNode(clientName, testNamespace, node, false) + err := testData.createBusyboxPodOnNode(clientName, data.testNamespace, node, false) r.NoError(err, "Error creating Pod %s: %v", clientName) - err = testData.podWaitForRunning(defaultTimeout, clientName, testNamespace) + err = testData.podWaitForRunning(defaultTimeout, clientName, data.testNamespace) r.NoError(err, "Error when waiting for Pod %s to be running", clientName) antreaPod, err := testData.getAntreaPodOnNode(node) @@ -302,13 +302,13 @@ func NPLTestMultiplePods(t *testing.T) { expectedAnnotations.Check(t, nplAnnotations) checkTrafficForNPL(testData, r, nplAnnotations, clientName) - testData.DeletePod(testNamespace, testPodName) + testData.DeletePod(data.testNamespace, testPodName) checkNPLRulesForPod(t, testData, r, nplAnnotations, antreaPod, testPodIP, false) } } // NPLTestPodAddMultiPort tests NodePortLocal functionalities for a Pod with multiple ports. -func NPLTestPodAddMultiPort(t *testing.T) { +func NPLTestPodAddMultiPort(t *testing.T, data *TestData) { r := require.New(t) node := nodeName(0) @@ -319,15 +319,15 @@ func NPLTestPodAddMultiPort(t *testing.T) { selector := make(map[string]string) selector["app"] = "agnhost" ipFamily := corev1.IPv4Protocol - testData.CreateServiceWithAnnotations("agnhost1", testNamespace, 80, 80, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) - testData.CreateServiceWithAnnotations("agnhost2", testNamespace, 80, 8080, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) + testData.CreateServiceWithAnnotations("agnhost1", data.testNamespace, 80, 80, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) + testData.CreateServiceWithAnnotations("agnhost2", data.testNamespace, 80, 8080, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) expectedAnnotations := newExpectedNPLAnnotations(defaultStartPort, defaultEndPort). Add(nil, 80, "tcp").Add(nil, 8080, "tcp") podcmd := "porter" // Creating a Pod using agnhost image to support multiple ports, instead of nginx. - err := testData.createPodOnNode(testPodName, testNamespace, node, agnhostImage, nil, []string{podcmd}, []corev1.EnvVar{ + err := testData.createPodOnNode(testPodName, data.testNamespace, node, agnhostImage, nil, []string{podcmd}, []corev1.EnvVar{ { Name: fmt.Sprintf("SERVE_PORT_%d", 80), Value: "foo", }, @@ -352,10 +352,10 @@ func NPLTestPodAddMultiPort(t *testing.T) { nplAnnotations, testPodIP := getNPLAnnotations(t, testData, r, testPodName) clientName := randName("test-client-") - err = testData.createBusyboxPodOnNode(clientName, testNamespace, node, false) + err = testData.createBusyboxPodOnNode(clientName, data.testNamespace, node, false) r.NoError(err, "Error when creating Pod %s", clientName) - err = testData.podWaitForRunning(defaultTimeout, clientName, testNamespace) + err = testData.podWaitForRunning(defaultTimeout, clientName, data.testNamespace) r.NoError(err, "Error when waiting for Pod %s to be running", clientName) antreaPod, err := testData.getAntreaPodOnNode(node) @@ -365,14 +365,14 @@ func NPLTestPodAddMultiPort(t *testing.T) { expectedAnnotations.Check(t, nplAnnotations) checkTrafficForNPL(testData, r, nplAnnotations, clientName) - testData.DeletePod(testNamespace, testPodName) - testData.DeleteService(testNamespace, "agnhost1") - testData.DeleteService(testNamespace, "agnhost2") + testData.DeletePod(data.testNamespace, testPodName) + testData.DeleteService(data.testNamespace, "agnhost1") + testData.DeleteService(data.testNamespace, "agnhost2") checkNPLRulesForPod(t, testData, r, nplAnnotations, antreaPod, testPodIP, false) } // NPLTestPodAddMultiProtocol tests NodePortLocal functionalities for a Pod using a single port with multiple protocols. -func NPLTestPodAddMultiProtocol(t *testing.T) { +func NPLTestPodAddMultiProtocol(t *testing.T, data *TestData) { r := require.New(t) node := nodeName(0) @@ -383,8 +383,8 @@ func NPLTestPodAddMultiProtocol(t *testing.T) { selector := make(map[string]string) selector["app"] = "agnhost" ipFamily := corev1.IPv4Protocol - testData.CreateServiceWithAnnotations("agnhost1", testNamespace, 80, 8080, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) - testData.CreateServiceWithAnnotations("agnhost2", testNamespace, 80, 8080, corev1.ProtocolUDP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) + testData.CreateServiceWithAnnotations("agnhost1", data.testNamespace, 80, 8080, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) + testData.CreateServiceWithAnnotations("agnhost2", data.testNamespace, 80, 8080, corev1.ProtocolUDP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) expectedAnnotations := newExpectedNPLAnnotations(defaultStartPort, defaultEndPort). Add(nil, 8080, "tcp").Add(nil, 8080, "udp") @@ -400,17 +400,17 @@ func NPLTestPodAddMultiProtocol(t *testing.T) { pod.Labels[k] = v } } - err := testData.CreatePodOnNodeInNamespace(testPodName, testNamespace, node, containerName, agnhostImage, cmd, args, []corev1.EnvVar{}, []corev1.ContainerPort{port}, false, mutateLabels) + err := testData.CreatePodOnNodeInNamespace(testPodName, data.testNamespace, node, containerName, agnhostImage, cmd, args, []corev1.EnvVar{}, []corev1.ContainerPort{port}, false, mutateLabels) r.NoError(err, "Error creating test Pod: %v", err) nplAnnotations, testPodIP := getNPLAnnotations(t, testData, r, testPodName) clientName := randName("test-client-") - err = testData.createBusyboxPodOnNode(clientName, testNamespace, node, false) + err = testData.createBusyboxPodOnNode(clientName, data.testNamespace, node, false) r.NoError(err, "Error when creating Pod %s", clientName) - err = testData.podWaitForRunning(defaultTimeout, clientName, testNamespace) + err = testData.podWaitForRunning(defaultTimeout, clientName, data.testNamespace) r.NoError(err, "Error when waiting for Pod %s to be running", clientName) antreaPod, err := testData.getAntreaPodOnNode(node) @@ -421,15 +421,15 @@ func NPLTestPodAddMultiProtocol(t *testing.T) { expectedAnnotations.Check(t, nplAnnotations) checkTrafficForNPL(testData, r, nplAnnotations, clientName) - testData.DeletePod(testNamespace, testPodName) - testData.DeleteService(testNamespace, "agnhost1") - testData.DeleteService(testNamespace, "agnhost2") + testData.DeletePod(data.testNamespace, testPodName) + testData.DeleteService(data.testNamespace, "agnhost1") + testData.DeleteService(data.testNamespace, "agnhost2") checkNPLRulesForPod(t, testData, r, nplAnnotations, antreaPod, testPodIP, false) } // NPLTestLocalAccess validates that a NodePortLocal Pod can be accessed locally // from the host network namespace. -func NPLTestLocalAccess(t *testing.T) { +func NPLTestLocalAccess(t *testing.T, data *TestData) { r := require.New(t) annotation := make(map[string]string) @@ -441,14 +441,14 @@ func NPLTestLocalAccess(t *testing.T) { node := nodeName(0) testPodName := randName("test-pod-") - err := testData.createNginxPodOnNode(testPodName, testNamespace, node, false) + err := testData.createNginxPodOnNode(testPodName, data.testNamespace, node, false) r.NoError(err, "Error creating test Pod: %v", err) clientName := randName("test-client-") - err = testData.createBusyboxPodOnNode(clientName, testNamespace, node, true) + err = testData.createBusyboxPodOnNode(clientName, data.testNamespace, node, true) r.NoError(err, "Error creating hostNetwork Pod %s: %v", clientName) - err = testData.podWaitForRunning(defaultTimeout, clientName, testNamespace) + err = testData.podWaitForRunning(defaultTimeout, clientName, data.testNamespace) r.NoError(err, "Error when waiting for Pod %s to be running", clientName) antreaPod, err := testData.getAntreaPodOnNode(node) @@ -460,7 +460,7 @@ func NPLTestLocalAccess(t *testing.T) { expectedAnnotations.Check(t, nplAnnotations) checkTrafficForNPL(testData, r, nplAnnotations, clientName) - testData.DeletePod(testNamespace, testPodName) + testData.DeletePod(data.testNamespace, testPodName) checkNPLRulesForPod(t, testData, r, nplAnnotations, antreaPod, testPodIP, false) } @@ -483,15 +483,15 @@ func testNPLMultiplePodsAgentRestart(t *testing.T, data *TestData) { for i := 0; i < 4; i++ { testPodName := randName("test-pod-") testPods = append(testPods, testPodName) - err = data.createNginxPodOnNode(testPodName, testNamespace, node, false) + err = data.createNginxPodOnNode(testPodName, data.testNamespace, node, false) r.NoError(err, "Error creating test Pod: %v", err) } clientName := randName("test-client-") - err = data.createBusyboxPodOnNode(clientName, testNamespace, node, false) + err = data.createBusyboxPodOnNode(clientName, data.testNamespace, node, false) r.NoError(err, "Error when creating Pod %s", clientName) - err = data.podWaitForRunning(defaultTimeout, clientName, testNamespace) + err = data.podWaitForRunning(defaultTimeout, clientName, data.testNamespace) r.NoError(err, "Error when waiting for Pod %s to be running", clientName) antreaPod, err := data.getAntreaPodOnNode(node) @@ -547,15 +547,15 @@ func testNPLChangePortRangeAgentRestart(t *testing.T, data *TestData) { for i := 0; i < 4; i++ { testPodName := randName("test-pod-") testPods = append(testPods, testPodName) - err = data.createNginxPodOnNode(testPodName, testNamespace, node, false) + err = data.createNginxPodOnNode(testPodName, data.testNamespace, node, false) r.NoError(err, "Error Creating test Pod: %v", err) } clientName := randName("test-client-") - err = data.createBusyboxPodOnNode(clientName, testNamespace, node, false) + err = data.createBusyboxPodOnNode(clientName, data.testNamespace, node, false) r.NoError(err, "Error when creating Pod %s", clientName) - err = data.podWaitForRunning(defaultTimeout, clientName, testNamespace) + err = data.podWaitForRunning(defaultTimeout, clientName, data.testNamespace) r.NoError(err, "Error when waiting for Pod %s to be running", clientName) var rules []nplRuleData diff --git a/test/e2e/performance_test.go b/test/e2e/performance_test.go index c8b2c61de32..37aaf6e4930 100644 --- a/test/e2e/performance_test.go +++ b/test/e2e/performance_test.go @@ -142,7 +142,7 @@ func setupTestPodsConnection(data *TestData) error { ObjectMeta: metav1.ObjectMeta{Name: podsConnectionNetworkPolicyName}, Spec: npSpec, } - _, err := data.clientset.NetworkingV1().NetworkPolicies(testNamespace).Create(context.TODO(), np, metav1.CreateOptions{}) + _, err := data.clientset.NetworkingV1().NetworkPolicies(data.testNamespace).Create(context.TODO(), np, metav1.CreateOptions{}) return err } @@ -170,31 +170,31 @@ func generateWorkloadNetworkPolicy(policyRules int) *networkv1.NetworkPolicy { } func populateWorkloadNetworkPolicy(np *networkv1.NetworkPolicy, data *TestData) error { - _, err := data.clientset.NetworkingV1().NetworkPolicies(testNamespace).Create(context.TODO(), np, metav1.CreateOptions{}) + _, err := data.clientset.NetworkingV1().NetworkPolicies(data.testNamespace).Create(context.TODO(), np, metav1.CreateOptions{}) return err } func setupTestPods(data *TestData, b *testing.B) (nginxPodIP, perfPodIP *PodIPs) { b.Logf("Creating a nginx test Pod") nginxPod := createPerfTestPodDefinition(benchNginxPodName, nginxContainerName, nginxImage) - _, err := data.clientset.CoreV1().Pods(testNamespace).Create(context.TODO(), nginxPod, metav1.CreateOptions{}) + _, err := data.clientset.CoreV1().Pods(data.testNamespace).Create(context.TODO(), nginxPod, metav1.CreateOptions{}) if err != nil { b.Fatalf("Error when creating nginx test pod: %v", err) } b.Logf("Waiting IP assignment of the nginx test Pod") - nginxPodIP, err = data.podWaitForIPs(defaultTimeout, benchNginxPodName, testNamespace) + nginxPodIP, err = data.podWaitForIPs(defaultTimeout, benchNginxPodName, data.testNamespace) if err != nil { b.Fatalf("Error when waiting for IP assignment of nginx test Pod: %v", err) } b.Logf("Creating a perftool test Pod") perfPod := createPerfTestPodDefinition(perftoolPodName, perftoolContainerName, perftoolImage) - _, err = data.clientset.CoreV1().Pods(testNamespace).Create(context.TODO(), perfPod, metav1.CreateOptions{}) + _, err = data.clientset.CoreV1().Pods(data.testNamespace).Create(context.TODO(), perfPod, metav1.CreateOptions{}) if err != nil { b.Fatalf("Error when creating perftool test Pod: %v", err) } b.Logf("Waiting for IP assignment of the perftool test Pod") - perfPodIP, err = data.podWaitForIPs(defaultTimeout, perftoolPodName, testNamespace) + perfPodIP, err = data.podWaitForIPs(defaultTimeout, perftoolPodName, data.testNamespace) if err != nil { b.Fatalf("Error when waiting for IP assignment of perftool test Pod: %v", err) } @@ -235,7 +235,7 @@ func httpRequest(requests, policyRules int, data *TestData, b *testing.B) { for i := 0; i < b.N; i++ { b.Logf("Running http request bench %d/%d", i+1, b.N) cmd := []string{"ab", "-n", fmt.Sprint(requests), "-c", fmt.Sprint(*httpConcurrency), serverURL.String()} - stdout, stderr, err := data.RunCommandFromPod(testNamespace, perftoolPodName, perftoolContainerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(data.testNamespace, perftoolPodName, perftoolContainerName, cmd) if err != nil { b.Errorf("Error when running http request %dx: %v, stdout: %s, stderr: %s\n", requests, err, stdout, stderr) } @@ -268,7 +268,7 @@ func networkPolicyRealize(policyRules int, data *TestData, b *testing.B) { b.StopTimer() b.Log("Network policy realized") - err = data.clientset.NetworkingV1().NetworkPolicies(testNamespace).Delete(context.TODO(), workloadNetworkPolicyName, metav1.DeleteOptions{}) + err = data.clientset.NetworkingV1().NetworkPolicies(data.testNamespace).Delete(context.TODO(), workloadNetworkPolicyName, metav1.DeleteOptions{}) if err != nil { b.Fatalf("Error when cleaning up network policies after running one bench iteration: %v", err) } diff --git a/test/e2e/providers/exec/docker.go b/test/e2e/providers/exec/docker.go index 917adb43461..5f5fc59193c 100644 --- a/test/e2e/providers/exec/docker.go +++ b/test/e2e/providers/exec/docker.go @@ -20,6 +20,7 @@ import ( "io/ioutil" "os/exec" "strings" + "sync" ) // TODO: we could use the Docker Go SDK for this, but it seems like a big dependency to pull in just @@ -75,8 +76,18 @@ func RunDockerExecCommand(container, cmd, workdir string, envs map[string]string if err := dockerCmd.Start(); err != nil { return 0, "", "", fmt.Errorf("error when starting command: %v", err) } - stdoutBytes, _ := ioutil.ReadAll(stdoutPipe) - stderrBytes, _ := ioutil.ReadAll(stderrPipe) + var stdoutBytes, stderrBytes []byte + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + stdoutBytes, _ = ioutil.ReadAll(stdoutPipe) + }() + go func() { + defer wg.Done() + stderrBytes, _ = ioutil.ReadAll(stderrPipe) + }() + wg.Wait() if err := dockerCmd.Wait(); err != nil { if e, ok := err.(*exec.ExitError); ok { diff --git a/test/e2e/proxy_test.go b/test/e2e/proxy_test.go index 4a495a18845..4289b1558a5 100644 --- a/test/e2e/proxy_test.go +++ b/test/e2e/proxy_test.go @@ -110,9 +110,9 @@ func probeClientIPFromNode(node string, baseUrl string, data *TestData) (string, func probeFromPod(data *TestData, pod, container string, url string) error { var err error if container == busyboxContainerName { - _, _, err = data.runWgetCommandOnBusyboxWithRetry(pod, testNamespace, url, 5) + _, _, err = data.runWgetCommandOnBusyboxWithRetry(pod, data.testNamespace, url, 5) } else { - _, _, err = data.RunCommandFromPod(testNamespace, pod, container, []string{"wget", "-O", "-", url, "-T", "5"}) + _, _, err = data.RunCommandFromPod(data.testNamespace, pod, container, []string{"wget", "-O", "-", url, "-T", "5"}) } return err } @@ -122,9 +122,9 @@ func probeHostnameFromPod(data *TestData, pod, container string, baseUrl string) var err error var hostname string if container == busyboxContainerName { - hostname, _, err = data.runWgetCommandOnBusyboxWithRetry(pod, testNamespace, url, 5) + hostname, _, err = data.runWgetCommandOnBusyboxWithRetry(pod, data.testNamespace, url, 5) } else { - hostname, _, err = data.RunCommandFromPod(testNamespace, pod, container, []string{"wget", "-O", "-", url, "-T", "5"}) + hostname, _, err = data.RunCommandFromPod(data.testNamespace, pod, container, []string{"wget", "-O", "-", url, "-T", "5"}) } return hostname, err } @@ -134,9 +134,9 @@ func probeClientIPFromPod(data *TestData, pod, container string, baseUrl string) var err error var hostPort string if container == busyboxContainerName { - hostPort, _, err = data.runWgetCommandOnBusyboxWithRetry(pod, testNamespace, url, 5) + hostPort, _, err = data.runWgetCommandOnBusyboxWithRetry(pod, data.testNamespace, url, 5) } else { - hostPort, _, err = data.RunCommandFromPod(testNamespace, pod, container, []string{"wget", "-O", "-", url, "-T", "5"}) + hostPort, _, err = data.RunCommandFromPod(data.testNamespace, pod, container, []string{"wget", "-O", "-", url, "-T", "5"}) } if err != nil { return "", err @@ -167,6 +167,7 @@ func testProxyLoadBalancerService(t *testing.T, isIPv6 bool) { skipIfProxyDisabled(t) skipIfHasWindowsNodes(t) skipIfNumNodesLessThan(t, 2) + data, err := setupTest(t) if err != nil { t.Fatalf("Error when setting up test: %v", err) @@ -178,7 +179,7 @@ func testProxyLoadBalancerService(t *testing.T, isIPv6 bool) { nodes := []string{nodeName(0), nodeName(1)} var busyboxes, busyboxIPs []string for idx, node := range nodes { - podName, ips, _ := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, fmt.Sprintf("busybox-%d-", idx), node, testNamespace, false) + podName, ips, _ := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, fmt.Sprintf("busybox-%d-", idx), node, data.testNamespace, false) busyboxes = append(busyboxes, podName) if !isIPv6 { busyboxIPs = append(busyboxIPs, ips.ipv4.String()) @@ -219,7 +220,7 @@ func testProxyLoadBalancerService(t *testing.T, isIPv6 bool) { // Delete agnhost Pods which are not on host network and create new agnhost Pods which are on host network. hostAgnhosts := []string{"agnhost-host-0", "agnhost-host-1"} for idx, node := range nodes { - require.NoError(t, data.DeletePod(testNamespace, agnhosts[idx])) + require.NoError(t, data.DeletePod(data.testNamespace, agnhosts[idx])) createAgnhostPod(t, data, hostAgnhosts[idx], node, true) } t.Run("HostNetwork Endpoints", func(t *testing.T) { @@ -291,6 +292,7 @@ func testProxyNodePortService(t *testing.T, isIPv6 bool) { skipIfHasWindowsNodes(t) skipIfNumNodesLessThan(t, 2) skipIfProxyDisabled(t) + data, err := setupTest(t) if err != nil { t.Fatalf("Error when setting up test: %v", err) @@ -309,7 +311,7 @@ func testProxyNodePortService(t *testing.T, isIPv6 bool) { // Create a busybox Pod on every Node. The busybox Pod is used as a client. var busyboxes, busyboxIPs []string for idx, node := range nodes { - podName, ips, _ := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, fmt.Sprintf("busybox-%d-", idx), node, testNamespace, false) + podName, ips, _ := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, fmt.Sprintf("busybox-%d-", idx), node, data.testNamespace, false) busyboxes = append(busyboxes, podName) if !isIPv6 { busyboxIPs = append(busyboxIPs, ips.ipv4.String()) @@ -352,7 +354,7 @@ func testProxyNodePortService(t *testing.T, isIPv6 bool) { // Delete agnhost Pods which are not on host network and create new agnhost Pods which are on host network. hostAgnhosts := []string{"agnhost-host-0", "agnhost-host-1"} for idx, node := range nodes { - require.NoError(t, data.DeletePod(testNamespace, agnhosts[idx])) + require.NoError(t, data.DeletePod(data.testNamespace, agnhosts[idx])) createAgnhostPod(t, data, hostAgnhosts[idx], node, true) } t.Run("HostNetwork Endpoints", func(t *testing.T) { @@ -427,9 +429,9 @@ func TestNodePortAndEgressWithTheSameBackendPod(t *testing.T) { // Create the backend Pod on control plane Node. backendPodName := "test-nodeport-egress-backend-pod" - require.NoError(t, data.createNginxPodOnNode(backendPodName, testNamespace, controlPlaneNodeName(), false)) - defer deletePodWrapper(t, data, testNamespace, backendPodName) - if err := data.podWaitForRunning(defaultTimeout, backendPodName, testNamespace); err != nil { + require.NoError(t, data.createNginxPodOnNode(backendPodName, data.testNamespace, controlPlaneNodeName(), false)) + defer deletePodWrapper(t, data, data.testNamespace, backendPodName) + if err := data.podWaitForRunning(defaultTimeout, backendPodName, data.testNamespace); err != nil { t.Fatalf("Error when waiting for Pod '%s' to be in the Running state", backendPodName) } @@ -446,19 +448,19 @@ ip netns exec %[1]s ip link set dev %[1]s-a up && \ ip netns exec %[1]s ip route replace default via %[3]s && \ sleep 3600 `, testNetns, "1.1.1.1", "1.1.1.254", 24) - if err := data.createPodOnNode(testPod, testNamespace, controlPlaneNodeName(), agnhostImage, []string{"sh", "-c", cmd}, nil, nil, nil, true, func(pod *corev1.Pod) { + if err := data.createPodOnNode(testPod, data.testNamespace, controlPlaneNodeName(), agnhostImage, []string{"sh", "-c", cmd}, nil, nil, nil, true, func(pod *corev1.Pod) { privileged := true pod.Spec.Containers[0].SecurityContext = &corev1.SecurityContext{Privileged: &privileged} }); err != nil { t.Fatalf("Failed to create client Pod: %v", err) } - defer deletePodWrapper(t, data, testNamespace, testPod) - if err := data.podWaitForRunning(defaultTimeout, testPod, testNamespace); err != nil { + defer deletePodWrapper(t, data, data.testNamespace, testPod) + if err := data.podWaitForRunning(defaultTimeout, testPod, data.testNamespace); err != nil { t.Fatalf("Error when waiting for Pod '%s' to be in the Running state", testPod) } // Connect to NodePort on control plane Node in the fake external network. cmd = fmt.Sprintf("ip netns exec %s curl --connect-timeout 1 --retry 5 --retry-connrefused %s", testNetns, testNodePortURL) - _, _, err = data.RunCommandFromPod(testNamespace, testPod, agnhostContainerName, []string{"sh", "-c", cmd}) + _, _, err = data.RunCommandFromPod(data.testNamespace, testPod, agnhostContainerName, []string{"sh", "-c", cmd}) require.NoError(t, err, "Service NodePort should be able to be connected from external network when Egress is enabled") } @@ -472,10 +474,10 @@ func createAgnhostPod(t *testing.T, data *TestData, podName string, node string, }, } - require.NoError(t, data.createPodOnNode(podName, testNamespace, node, agnhostImage, []string{}, args, nil, ports, hostNetwork, nil)) - _, err := data.podWaitForIPs(defaultTimeout, podName, testNamespace) + require.NoError(t, data.createPodOnNode(podName, data.testNamespace, node, agnhostImage, []string{}, args, nil, ports, hostNetwork, nil)) + _, err := data.podWaitForIPs(defaultTimeout, podName, data.testNamespace) require.NoError(t, err) - require.NoError(t, data.podWaitForRunning(defaultTimeout, podName, testNamespace)) + require.NoError(t, data.podWaitForRunning(defaultTimeout, podName, data.testNamespace)) } func testNodePortClusterFromRemote(t *testing.T, data *TestData, nodes, urls []string) { @@ -539,6 +541,7 @@ func testNodePortLocalFromPod(t *testing.T, data *TestData, pods, urls, expected func TestProxyServiceSessionAffinity(t *testing.T) { skipIfHasWindowsNodes(t) skipIfProxyDisabled(t) + data, err := setupTest(t) if err != nil { t.Fatalf("Error when setting up test: %v", err) @@ -569,6 +572,7 @@ func testProxyExternalTrafficPolicy(t *testing.T, isIPv6 bool) { skipIfHasWindowsNodes(t) skipIfNumNodesLessThan(t, 2) skipIfProxyDisabled(t) + data, err := setupTest(t) if err != nil { t.Fatalf("Error when setting up test: %v", err) @@ -628,26 +632,26 @@ func testProxyServiceSessionAffinity(ipFamily *corev1.IPFamily, ingressIPs []str nodeName := nodeName(1) nginx := randName("nginx-") - require.NoError(t, data.createNginxPodOnNode(nginx, testNamespace, nodeName, false)) - nginxIP, err := data.podWaitForIPs(defaultTimeout, nginx, testNamespace) - defer data.deletePodAndWait(defaultTimeout, nginx, testNamespace) + require.NoError(t, data.createNginxPodOnNode(nginx, data.testNamespace, nodeName, false)) + nginxIP, err := data.podWaitForIPs(defaultTimeout, nginx, data.testNamespace) + defer data.deletePodAndWait(defaultTimeout, nginx, data.testNamespace) require.NoError(t, err) - require.NoError(t, data.podWaitForRunning(defaultTimeout, nginx, testNamespace)) - svc, err := data.createNginxClusterIPService(nginx, testNamespace, true, ipFamily) - defer data.deleteServiceAndWait(defaultTimeout, nginx, testNamespace) + require.NoError(t, data.podWaitForRunning(defaultTimeout, nginx, data.testNamespace)) + svc, err := data.createNginxClusterIPService(nginx, data.testNamespace, true, ipFamily) + defer data.deleteServiceAndWait(defaultTimeout, nginx, data.testNamespace) require.NoError(t, err) _, err = data.createNginxLoadBalancerService(true, ingressIPs, ipFamily) - defer data.deleteServiceAndWait(defaultTimeout, nginxLBService, testNamespace) + defer data.deleteServiceAndWait(defaultTimeout, nginxLBService, data.testNamespace) require.NoError(t, err) busyboxPod := randName("busybox-") - require.NoError(t, data.createBusyboxPodOnNode(busyboxPod, testNamespace, nodeName, false)) - defer data.deletePodAndWait(defaultTimeout, busyboxPod, testNamespace) - require.NoError(t, data.podWaitForRunning(defaultTimeout, busyboxPod, testNamespace)) - stdout, stderr, err := data.runWgetCommandOnBusyboxWithRetry(busyboxPod, testNamespace, svc.Spec.ClusterIP, 5) + require.NoError(t, data.createBusyboxPodOnNode(busyboxPod, data.testNamespace, nodeName, false)) + defer data.deletePodAndWait(defaultTimeout, busyboxPod, data.testNamespace) + require.NoError(t, data.podWaitForRunning(defaultTimeout, busyboxPod, data.testNamespace)) + stdout, stderr, err := data.runWgetCommandOnBusyboxWithRetry(busyboxPod, data.testNamespace, svc.Spec.ClusterIP, 5) require.NoError(t, err, fmt.Sprintf("ipFamily: %v\nstdout: %s\nstderr: %s\n", *ipFamily, stdout, stderr)) for _, ingressIP := range ingressIPs { - stdout, stderr, err := data.runWgetCommandOnBusyboxWithRetry(busyboxPod, testNamespace, ingressIP, 5) + stdout, stderr, err := data.runWgetCommandOnBusyboxWithRetry(busyboxPod, data.testNamespace, ingressIP, 5) require.NoError(t, err, fmt.Sprintf("ipFamily: %v\nstdout: %s\nstderr: %s\n", *ipFamily, stdout, stderr)) } @@ -713,7 +717,7 @@ func testProxyHairpin(t *testing.T, isIPv6 bool) { // Create a ClusterIP Service. serviceClusterIP := fmt.Sprintf("clusterip-%v", isIPv6) clusterIPSvc, err := data.createAgnhostClusterIPService(serviceClusterIP, true, &ipProtocol) - defer data.deleteServiceAndWait(defaultTimeout, serviceClusterIP, testNamespace) + defer data.deleteServiceAndWait(defaultTimeout, serviceClusterIP, data.testNamespace) require.NoError(t, err) // Create two NodePort Services. The externalTrafficPolicy of one Service is Cluster, and the externalTrafficPolicy @@ -722,7 +726,7 @@ func testProxyHairpin(t *testing.T, isIPv6 bool) { serviceNodePortCluster := fmt.Sprintf("nodeport-cluster-%v", isIPv6) serviceNodePortLocal := fmt.Sprintf("nodeport-local-%v", isIPv6) nodePortSvc, err := data.createAgnhostNodePortService(serviceNodePortCluster, true, false, &ipProtocol) - defer data.deleteServiceAndWait(defaultTimeout, serviceNodePortCluster, testNamespace) + defer data.deleteServiceAndWait(defaultTimeout, serviceNodePortCluster, data.testNamespace) require.NoError(t, err) for _, port := range nodePortSvc.Spec.Ports { if port.NodePort != 0 { @@ -733,7 +737,7 @@ func testProxyHairpin(t *testing.T, isIPv6 bool) { require.NotEqual(t, "", nodePortCluster, "NodePort port number should not be empty") nodePortSvc, err = data.createAgnhostNodePortService(serviceNodePortLocal, true, true, &ipProtocol) require.NoError(t, err) - defer data.deleteServiceAndWait(defaultTimeout, serviceNodePortLocal, testNamespace) + defer data.deleteServiceAndWait(defaultTimeout, serviceNodePortLocal, data.testNamespace) for _, port := range nodePortSvc.Spec.Ports { if port.NodePort != 0 { nodePortLocal = fmt.Sprint(port.NodePort) @@ -775,7 +779,7 @@ func testProxyHairpin(t *testing.T, isIPv6 bool) { testProxyIntraNodeHairpinCases(data, t, expectedGatewayIP, agnhost, clusterIPUrl, workerNodePortClusterUrl, workerNodePortLocalUrl, lbClusterUrl, lbLocalUrl) testProxyInterNodeHairpinCases(data, t, false, expectedControllerIP, nodeName(0), clusterIPUrl, controllerNodePortClusterUrl, lbClusterUrl) }) - require.NoError(t, data.DeletePod(testNamespace, agnhost)) + require.NoError(t, data.DeletePod(data.testNamespace, agnhost)) agnhostHost := fmt.Sprintf("agnhost-host-%v", isIPv6) createAgnhostPod(t, data, agnhostHost, node, true) @@ -896,6 +900,7 @@ func testProxyEndpointLifeCycleCase(t *testing.T, data *TestData) { func TestProxyEndpointLifeCycle(t *testing.T) { skipIfHasWindowsNodes(t) skipIfProxyDisabled(t) + data, err := setupTest(t) if err != nil { t.Fatalf("Error when setting up test: %v", err) @@ -915,11 +920,11 @@ func TestProxyEndpointLifeCycle(t *testing.T) { func testProxyEndpointLifeCycle(ipFamily *corev1.IPFamily, data *TestData, t *testing.T) { nodeName := nodeName(1) nginx := randName("nginx-") - require.NoError(t, data.createNginxPodOnNode(nginx, testNamespace, nodeName, false)) - nginxIPs, err := data.podWaitForIPs(defaultTimeout, nginx, testNamespace) + require.NoError(t, data.createNginxPodOnNode(nginx, data.testNamespace, nodeName, false)) + nginxIPs, err := data.podWaitForIPs(defaultTimeout, nginx, data.testNamespace) require.NoError(t, err) - _, err = data.createNginxClusterIPService(nginx, testNamespace, false, ipFamily) - defer data.deleteServiceAndWait(defaultTimeout, nginx, testNamespace) + _, err = data.createNginxClusterIPService(nginx, data.testNamespace, false, ipFamily) + defer data.deleteServiceAndWait(defaultTimeout, nginx, data.testNamespace) require.NoError(t, err) // Hold on to make sure that the Service is realized. @@ -956,7 +961,7 @@ func testProxyEndpointLifeCycle(ipFamily *corev1.IPFamily, data *TestData, t *te require.Contains(t, groupOutput, k) } - require.NoError(t, data.deletePodAndWait(defaultTimeout, nginx, testNamespace)) + require.NoError(t, data.deletePodAndWait(defaultTimeout, nginx, data.testNamespace)) // Wait for one second to make sure the pipeline to be updated. time.Sleep(time.Second) @@ -988,6 +993,7 @@ func testProxyServiceLifeCycleCase(t *testing.T, data *TestData) { func TestProxyServiceLifeCycle(t *testing.T) { skipIfHasWindowsNodes(t) skipIfProxyDisabled(t) + data, err := setupTest(t) if err != nil { t.Fatalf("Error when setting up test: %v", err) @@ -1008,9 +1014,9 @@ func testProxyServiceLifeCycle(ipFamily *corev1.IPFamily, ingressIPs []string, d nodeName := nodeName(1) nginx := randName("nginx-") - require.NoError(t, data.createNginxPodOnNode(nginx, testNamespace, nodeName, false)) - defer data.deletePodAndWait(defaultTimeout, nginx, testNamespace) - nginxIPs, err := data.podWaitForIPs(defaultTimeout, nginx, testNamespace) + require.NoError(t, data.createNginxPodOnNode(nginx, data.testNamespace, nodeName, false)) + defer data.deletePodAndWait(defaultTimeout, nginx, data.testNamespace) + nginxIPs, err := data.podWaitForIPs(defaultTimeout, nginx, data.testNamespace) require.NoError(t, err) var nginxIP string if *ipFamily == corev1.IPv6Protocol { @@ -1018,11 +1024,11 @@ func testProxyServiceLifeCycle(ipFamily *corev1.IPFamily, ingressIPs []string, d } else { nginxIP = nginxIPs.ipv4.String() } - svc, err := data.createNginxClusterIPService(nginx, testNamespace, false, ipFamily) - defer data.deleteServiceAndWait(defaultTimeout, nginx, testNamespace) + svc, err := data.createNginxClusterIPService(nginx, data.testNamespace, false, ipFamily) + defer data.deleteServiceAndWait(defaultTimeout, nginx, data.testNamespace) require.NoError(t, err) _, err = data.createNginxLoadBalancerService(false, ingressIPs, ipFamily) - defer data.deleteServiceAndWait(defaultTimeout, nginxLBService, testNamespace) + defer data.deleteServiceAndWait(defaultTimeout, nginxLBService, data.testNamespace) require.NoError(t, err) agentName, err := data.getAntreaPodOnNode(nodeName) require.NoError(t, err) @@ -1075,8 +1081,8 @@ func testProxyServiceLifeCycle(ipFamily *corev1.IPFamily, ingressIPs []string, d } } - require.NoError(t, data.deleteService(testNamespace, nginx)) - require.NoError(t, data.deleteService(testNamespace, nginxLBService)) + require.NoError(t, data.deleteService(data.testNamespace, nginx)) + require.NoError(t, data.deleteService(data.testNamespace, nginxLBService)) // Hold on to make sure that the Service is realized. time.Sleep(3 * time.Second) diff --git a/test/e2e/service_externalip_test.go b/test/e2e/service_externalip_test.go index ccc8561b41c..9d1bc4f0391 100644 --- a/test/e2e/service_externalip_test.go +++ b/test/e2e/service_externalip_test.go @@ -226,7 +226,7 @@ func testServiceExternalTrafficPolicyLocal(t *testing.T, data *TestData) { antreaagenttypes.ServiceExternalIPPoolAnnotationKey: ipPool.Name, } service, err = data.CreateServiceWithAnnotations(fmt.Sprintf("test-svc-local-%d", idx), - testNamespace, 80, 80, corev1.ProtocolTCP, nil, false, true, v1.ServiceTypeLoadBalancer, nil, annotation) + data.testNamespace, 80, 80, corev1.ProtocolTCP, nil, false, true, v1.ServiceTypeLoadBalancer, nil, annotation) require.NoError(t, err) defer data.clientset.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{}) @@ -343,7 +343,7 @@ func testServiceWithExternalIPCRUD(t *testing.T, data *TestData) { antreaagenttypes.ServiceExternalIPPoolAnnotationKey: ipPool.Name, } service, err = data.CreateServiceWithAnnotations(fmt.Sprintf("test-svc-eip-%d", idx), - testNamespace, 80, 80, corev1.ProtocolTCP, nil, false, false, v1.ServiceTypeLoadBalancer, nil, annotation) + data.testNamespace, 80, 80, corev1.ProtocolTCP, nil, false, false, v1.ServiceTypeLoadBalancer, nil, annotation) require.NoError(t, err) defer data.clientset.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{}) @@ -435,7 +435,7 @@ func testServiceUpdateExternalIP(t *testing.T, data *TestData) { antreaagenttypes.ServiceExternalIPPoolAnnotationKey: originalPool.Name, } service, err := data.CreateServiceWithAnnotations(fmt.Sprintf("test-update-eip-%d", idx), - testNamespace, 80, 80, corev1.ProtocolTCP, nil, false, false, v1.ServiceTypeLoadBalancer, nil, annotation) + data.testNamespace, 80, 80, corev1.ProtocolTCP, nil, false, false, v1.ServiceTypeLoadBalancer, nil, annotation) require.NoError(t, err) defer data.clientset.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{}) @@ -516,7 +516,7 @@ func testServiceNodeFailure(t *testing.T, data *TestData) { annotation := map[string]string{ antreaagenttypes.ServiceExternalIPPoolAnnotationKey: externalIPPoolTwoNodes.Name, } - service, err := data.CreateServiceWithAnnotations("test-service-node-failure", testNamespace, 80, 80, + service, err := data.CreateServiceWithAnnotations("test-service-node-failure", data.testNamespace, 80, 80, corev1.ProtocolTCP, nil, false, false, v1.ServiceTypeLoadBalancer, nil, annotation) require.NoError(t, err) defer data.clientset.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{}) @@ -591,7 +591,7 @@ func testExternalIPAccess(t *testing.T, data *TestData) { // Create agnhost Pods on each Node. for idx, node := range nodes { createAgnhostPod(t, data, agnhosts[idx], node, false) - defer data.deletePodAndWait(defaultTimeout, agnhosts[idx], testNamespace) + defer data.deletePodAndWait(defaultTimeout, agnhosts[idx], data.testNamespace) } var port int32 = 8080 externalIPTestCases := []struct { @@ -635,7 +635,7 @@ func testExternalIPAccess(t *testing.T, data *TestData) { annotations := map[string]string{ antreaagenttypes.ServiceExternalIPPoolAnnotationKey: ipPool.Name, } - service, err := data.CreateServiceWithAnnotations(et.serviceName, testNamespace, port, port, corev1.ProtocolTCP, map[string]string{"app": "agnhost"}, false, et.externalTrafficPolicyLocal, corev1.ServiceTypeLoadBalancer, &ipFamily, annotations) + service, err := data.CreateServiceWithAnnotations(et.serviceName, data.testNamespace, port, port, corev1.ProtocolTCP, map[string]string{"app": "agnhost"}, false, et.externalTrafficPolicyLocal, corev1.ServiceTypeLoadBalancer, &ipFamily, annotations) require.NoError(t, err) defer data.deleteService(service.Namespace, service.Name) @@ -656,7 +656,7 @@ sleep 3600`, tt.clientName, tt.clientIP, tt.localIP, tt.clientIPMaskLen) baseUrl := net.JoinHostPort(externalIP, strconv.FormatInt(int64(port), 10)) - require.NoError(t, data.createPodOnNode(tt.clientName, testNamespace, host, agnhostImage, []string{"sh", "-c", cmd}, nil, nil, nil, true, func(pod *v1.Pod) { + require.NoError(t, data.createPodOnNode(tt.clientName, data.testNamespace, host, agnhostImage, []string{"sh", "-c", cmd}, nil, nil, nil, true, func(pod *v1.Pod) { privileged := true pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{Privileged: &privileged} delete(pod.Labels, "app") @@ -675,7 +675,7 @@ sleep 3600`, tt.clientName, tt.clientIP, tt.localIP, tt.clientIPMaskLen) } })) - _, err = data.PodWaitFor(defaultTimeout, tt.clientName, testNamespace, func(p *v1.Pod) (bool, error) { + _, err = data.PodWaitFor(defaultTimeout, tt.clientName, data.testNamespace, func(p *v1.Pod) (bool, error) { for _, condition := range p.Status.Conditions { if condition.Type == corev1.PodReady { return condition.Status == corev1.ConditionTrue, nil @@ -684,11 +684,11 @@ sleep 3600`, tt.clientName, tt.clientIP, tt.localIP, tt.clientIPMaskLen) return false, nil }) require.NoError(t, err) - defer data.deletePodAndWait(defaultTimeout, tt.clientName, testNamespace) + defer data.deletePodAndWait(defaultTimeout, tt.clientName, data.testNamespace) hostNameUrl := fmt.Sprintf("%s/%s", baseUrl, "hostname") probeCmd := fmt.Sprintf("ip netns exec %s curl --connect-timeout 1 --retry 5 --retry-connrefused %s", tt.clientName, hostNameUrl) - hostname, stderr, err := data.RunCommandFromPod(testNamespace, tt.clientName, "", []string{"sh", "-c", probeCmd}) + hostname, stderr, err := data.RunCommandFromPod(data.testNamespace, tt.clientName, "", []string{"sh", "-c", probeCmd}) assert.NoError(t, err, "External IP should be able to be connected from remote: %s", stderr) if et.externalTrafficPolicyLocal { @@ -699,7 +699,7 @@ sleep 3600`, tt.clientName, tt.clientIP, tt.localIP, tt.clientIPMaskLen) } clientIPUrl := fmt.Sprintf("%s/clientip", baseUrl) probeClientIPCmd := fmt.Sprintf("ip netns exec %s curl --connect-timeout 1 --retry 5 --retry-connrefused %s", tt.clientName, clientIPUrl) - clientIPPort, stderr, err := data.RunCommandFromPod(testNamespace, tt.clientName, "", []string{"sh", "-c", probeClientIPCmd}) + clientIPPort, stderr, err := data.RunCommandFromPod(data.testNamespace, tt.clientName, "", []string{"sh", "-c", probeClientIPCmd}) assert.NoError(t, err, "External IP should be able to be connected from remote: %s", stderr) clientIP, _, err := net.SplitHostPort(clientIPPort) assert.NoError(t, err) diff --git a/test/e2e/service_test.go b/test/e2e/service_test.go index 6887a8f4325..267a6d732be 100644 --- a/test/e2e/service_test.go +++ b/test/e2e/service_test.go @@ -38,13 +38,14 @@ func TestClusterIPv6(t *testing.T) { func testClusterIP(t *testing.T, isIPv6 bool) { skipIfNumNodesLessThan(t, 2) + data, err := setupTest(t) if err != nil { t.Fatalf("Error when setting up test: %v", err) } defer teardownTest(t, data) - data.testClusterIP(t, isIPv6, testNamespace, testNamespace) + data.testClusterIP(t, isIPv6, data.testNamespace, data.testNamespace) } func (data *TestData) testClusterIP(t *testing.T, isIPv6 bool, clientNamespace, serverNamespace string) { @@ -84,7 +85,7 @@ func (data *TestData) testClusterIP(t *testing.T, isIPv6 bool, clientNamespace, _, _, cleanupFunc = createAndWaitForPod(t, data, data.createNginxPodOnNode, hostNginx, nodeName(0), serverNamespace, true) defer cleanupFunc() t.Run("HostNetwork Endpoints", func(t *testing.T) { - skipIfNamespaceIsNotEqual(t, serverNamespace, testNamespace) + skipIfNamespaceIsNotEqual(t, serverNamespace, data.testNamespace) testClusterIPCases(t, data, url, clients, hostNetworkClients, clientNamespace) }) } @@ -93,7 +94,7 @@ func testClusterIPCases(t *testing.T, data *TestData, url string, clients, hostN t.Run("All Nodes can access Service ClusterIP", func(t *testing.T) { skipIfProxyAllDisabled(t, data) skipIfKubeProxyEnabled(t, data) - skipIfNamespaceIsNotEqual(t, namespace, testNamespace) + skipIfNamespaceIsNotEqual(t, namespace, data.testNamespace) for node, pod := range hostNetworkClients { testClusterIPFromPod(t, data, url, node, pod, true, namespace) } @@ -136,7 +137,7 @@ func TestNodePortWindows(t *testing.T) { } defer teardownTest(t, data) - data.testNodePort(t, true, testNamespace, testNamespace) + data.testNodePort(t, true, data.testNamespace, data.testNamespace) } func (data *TestData) testNodePort(t *testing.T, isWindows bool, clientNamespace, serverNamespace string) { diff --git a/test/e2e/traceflow_test.go b/test/e2e/traceflow_test.go index b2cc0235ed7..2531dc946c6 100644 --- a/test/e2e/traceflow_test.go +++ b/test/e2e/traceflow_test.go @@ -102,7 +102,7 @@ func testTraceflowIntraNodeANP(t *testing.T, data *TestData) { nodeIdx = clusterInfo.windowsNodes[0] } node1 := nodeName(nodeIdx) - node1Pods, _, node1CleanupFn := createTestAgnhostPods(t, data, 3, testNamespace, node1) + node1Pods, _, node1CleanupFn := createTestAgnhostPods(t, data, 3, data.testNamespace, node1) defer node1CleanupFn() var denyIngress *v1alpha1.NetworkPolicy @@ -115,7 +115,7 @@ func testTraceflowIntraNodeANP(t *testing.T, data *TestData) { t.Errorf("Error when deleting Antrea NetworkPolicy: %v", err) } }() - if err = data.waitForANPRealized(t, testNamespace, denyIngressName); err != nil { + if err = data.waitForANPRealized(t, data.testNamespace, denyIngressName); err != nil { t.Fatal(err) } var rejectIngress *v1alpha1.NetworkPolicy @@ -128,7 +128,7 @@ func testTraceflowIntraNodeANP(t *testing.T, data *TestData) { t.Errorf("Error when deleting Antrea NetworkPolicy: %v", err) } }() - if err = data.waitForANPRealized(t, testNamespace, rejectIngressName); err != nil { + if err = data.waitForANPRealized(t, data.testNamespace, rejectIngressName); err != nil { t.Fatal(err) } @@ -138,15 +138,15 @@ func testTraceflowIntraNodeANP(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, node1Pods[0], testNamespace, node1Pods[1])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, node1Pods[0], data.testNamespace, node1Pods[1])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[1], }, Packet: v1alpha1.Packet{ @@ -186,15 +186,15 @@ func testTraceflowIntraNodeANP(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, node1Pods[0], testNamespace, node1Pods[2])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, node1Pods[0], data.testNamespace, node1Pods[2])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[2], }, Packet: v1alpha1.Packet{ @@ -234,15 +234,15 @@ func testTraceflowIntraNodeANP(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, node1Pods[0], testNamespace, node1Pods[1])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, node1Pods[0], data.testNamespace, node1Pods[1])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[1], }, Packet: v1alpha1.Packet{ @@ -299,7 +299,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { node1 := nodeName(nodeIdx) agentPod, _ := data.getAntreaPodOnNode(node1) - node1Pods, node1IPs, node1CleanupFn := createTestAgnhostPods(t, data, 3, testNamespace, node1) + node1Pods, node1IPs, node1CleanupFn := createTestAgnhostPods(t, data, 3, data.testNamespace, node1) defer node1CleanupFn() var pod0IPv4Str, pod1IPv4Str, dstPodIPv4Str, dstPodIPv6Str string if node1IPs[0].ipv4 != nil { @@ -366,15 +366,15 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, node1Pods[0], testNamespace, node1Pods[1])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, node1Pods[0], data.testNamespace, node1Pods[1])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[1], }, Packet: v1alpha1.Packet{ @@ -419,15 +419,15 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], node1Pods[2])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], node1Pods[2])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[2], }, Packet: v1alpha1.Packet{ @@ -471,11 +471,11 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], dstPodIPv4Str)), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], dstPodIPv4Str)), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ @@ -522,11 +522,11 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], dstPodIPv4Str)), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], dstPodIPv4Str)), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ @@ -567,15 +567,15 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, node1Pods[0], testNamespace, "non-existing-pod")), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, node1Pods[0], data.testNamespace, "non-existing-pod")), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: "non-existing-pod", }, }, @@ -588,28 +588,28 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, "non-existing-pod", testNamespace, node1Pods[1])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, "non-existing-pod", data.testNamespace, node1Pods[1])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: "non-existing-pod", }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[1], }, }, }, expectedPhase: v1alpha1.Failed, - expectedReasons: []string{fmt.Sprintf("Invalid Traceflow request, err: %+v", fmt.Errorf("requested source Pod %s not found", k8s.NamespacedName(testNamespace, "non-existing-pod")))}, + expectedReasons: []string{fmt.Sprintf("Invalid Traceflow request, err: %+v", fmt.Errorf("requested source Pod %s not found", k8s.NamespacedName(data.testNamespace, "non-existing-pod")))}, }, { name: "hostNetworkSrcPodIPv4", ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", antreaNamespace, agentPod, testNamespace, node1Pods[1])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", antreaNamespace, agentPod, data.testNamespace, node1Pods[1])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ @@ -617,7 +617,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { Pod: agentPod, }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[1], }, }, @@ -630,11 +630,11 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], dstPodIPv4Str)), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], dstPodIPv4Str)), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ @@ -681,14 +681,14 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, pod0IPv4Str, node1Pods[1])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, pod0IPv4Str, node1Pods[1])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ IP: pod0IPv4Str, }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[1], }, Packet: v1alpha1.Packet{ @@ -733,15 +733,15 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, node1Pods[0], testNamespace, node1Pods[1])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, node1Pods[0], data.testNamespace, node1Pods[1])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[1], }, Packet: v1alpha1.Packet{ @@ -786,15 +786,15 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], node1Pods[2])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], node1Pods[2])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[2], }, Packet: v1alpha1.Packet{ @@ -838,11 +838,11 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], strings.ReplaceAll(dstPodIPv6Str, ":", "--"))), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], strings.ReplaceAll(dstPodIPv6Str, ":", "--"))), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ @@ -889,11 +889,11 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], strings.ReplaceAll(dstPodIPv6Str, ":", "--"))), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], strings.ReplaceAll(dstPodIPv6Str, ":", "--"))), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ @@ -934,15 +934,15 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, node1Pods[0], testNamespace, "non-existing-pod")), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, node1Pods[0], data.testNamespace, "non-existing-pod")), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: "non-existing-pod", }, Packet: v1alpha1.Packet{ @@ -960,11 +960,11 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], strings.ReplaceAll(dstPodIPv6Str, ":", "--"))), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], strings.ReplaceAll(dstPodIPv6Str, ":", "--"))), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ @@ -1010,11 +1010,11 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], gwIPv4Str)), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], gwIPv4Str)), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ @@ -1058,11 +1058,11 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], strings.ReplaceAll(gwIPv6Str, ":", "--"))), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], strings.ReplaceAll(gwIPv6Str, ":", "--"))), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ @@ -1124,8 +1124,8 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { node1 := nodeName(nodeIdx0) node2 := nodeName(nodeIdx1) - node1Pods, _, node1CleanupFn := createTestAgnhostPods(t, data, 1, testNamespace, node1) - node2Pods, node2IPs, node2CleanupFn := createTestAgnhostPods(t, data, 3, testNamespace, node2) + node1Pods, _, node1CleanupFn := createTestAgnhostPods(t, data, 1, data.testNamespace, node1) + node2Pods, node2IPs, node2CleanupFn := createTestAgnhostPods(t, data, 3, data.testNamespace, node2) gatewayIPv4, gatewayIPv6 := nodeGatewayIPs(1) defer node1CleanupFn() defer node2CleanupFn() @@ -1143,22 +1143,22 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { mutateFunc := func(pod *corev1.Pod) { pod.Labels["app"] = "agnhost-server" } - require.NoError(t, data.createPodOnNode(agnhostPodName, testNamespace, node2, agnhostImage, []string{"sleep", strconv.Itoa(3600)}, nil, nil, nil, false, mutateFunc)) - agnhostIP, err := data.podWaitForIPs(defaultTimeout, agnhostPodName, testNamespace) + require.NoError(t, data.createPodOnNode(agnhostPodName, data.testNamespace, node2, agnhostImage, []string{"sleep", strconv.Itoa(3600)}, nil, nil, nil, false, mutateFunc)) + agnhostIP, err := data.podWaitForIPs(defaultTimeout, agnhostPodName, data.testNamespace) require.NoError(t, err) var agnhostIPv4Str, agnhostIPv6Str, svcIPv4Name, svcIPv6Name string if agnhostIP.ipv4 != nil { agnhostIPv4Str = agnhostIP.ipv4.String() ipv4Protocol := corev1.IPv4Protocol - svcIPv4, err := data.CreateService("agnhost-ipv4", testNamespace, 80, 8080, map[string]string{"app": "agnhost-server"}, false, false, corev1.ServiceTypeClusterIP, &ipv4Protocol) + svcIPv4, err := data.CreateService("agnhost-ipv4", data.testNamespace, 80, 8080, map[string]string{"app": "agnhost-server"}, false, false, corev1.ServiceTypeClusterIP, &ipv4Protocol) require.NoError(t, err) svcIPv4Name = svcIPv4.Name } if agnhostIP.ipv6 != nil { agnhostIPv6Str = agnhostIP.ipv6.String() ipv6Protocol := corev1.IPv6Protocol - svcIPv6, err := data.CreateService("agnhost-ipv6", testNamespace, 80, 8080, map[string]string{"app": "agnhost-server"}, false, false, corev1.ServiceTypeClusterIP, &ipv6Protocol) + svcIPv6, err := data.CreateService("agnhost-ipv6", data.testNamespace, 80, 8080, map[string]string{"app": "agnhost-server"}, false, false, corev1.ServiceTypeClusterIP, &ipv6Protocol) require.NoError(t, err) svcIPv6Name = svcIPv6.Name } @@ -1170,10 +1170,10 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { if isWindows { podInfos := make([]podInfo, 2) podInfos[0].name = node1Pods[0] - podInfos[0].namespace = testNamespace + podInfos[0].namespace = data.testNamespace podInfos[0].os = "windows" podInfos[1].name = node2Pods[2] - podInfos[1].namespace = testNamespace + podInfos[1].namespace = data.testNamespace podInfos[1].os = "windows" data.runPingMesh(t, podInfos, agnhostContainerName) } @@ -1217,15 +1217,15 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, node1Pods[0], testNamespace, node2Pods[0])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, node1Pods[0], data.testNamespace, node2Pods[0])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node2Pods[0], }, Packet: v1alpha1.Packet{ @@ -1284,11 +1284,11 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], dstPodIPv4Str)), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], dstPodIPv4Str)), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ @@ -1349,15 +1349,15 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], node2Pods[1])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], node2Pods[1])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node2Pods[1], }, Packet: v1alpha1.Packet{ @@ -1412,15 +1412,15 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-svc-%s-", testNamespace, node1Pods[0], svcIPv4Name)), + Name: randName(fmt.Sprintf("%s-%s-to-svc-%s-", data.testNamespace, node1Pods[0], svcIPv4Name)), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Service: svcIPv4Name, }, Packet: v1alpha1.Packet{ @@ -1448,7 +1448,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentLB, - Pod: fmt.Sprintf("%s/%s", testNamespace, agnhostPodName), + Pod: fmt.Sprintf("%s/%s", data.testNamespace, agnhostPodName), TranslatedDstIP: agnhostIPv4Str, Action: v1alpha1.ActionForwarded, }, @@ -1488,15 +1488,15 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-svc-%s-", testNamespace, agnhostPodName, svcIPv4Name)), + Name: randName(fmt.Sprintf("%s-%s-to-svc-%s-", data.testNamespace, agnhostPodName, svcIPv4Name)), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: agnhostPodName, }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Service: svcIPv4Name, }, Packet: v1alpha1.Packet{ @@ -1524,7 +1524,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentLB, - Pod: fmt.Sprintf("%s/%s", testNamespace, agnhostPodName), + Pod: fmt.Sprintf("%s/%s", data.testNamespace, agnhostPodName), TranslatedSrcIP: gatewayIPv4, TranslatedDstIP: agnhostIPv4Str, Action: v1alpha1.ActionForwarded, @@ -1548,15 +1548,15 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], node2Pods[0])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], node2Pods[0])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node2Pods[0], }, LiveTraffic: true, @@ -1605,15 +1605,15 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, node1Pods[0], testNamespace, node2Pods[0])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, node1Pods[0], data.testNamespace, node2Pods[0])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node2Pods[0], }, Packet: v1alpha1.Packet{ @@ -1675,11 +1675,11 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], strings.ReplaceAll(dstPodIPv6Str, ":", "--"))), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], strings.ReplaceAll(dstPodIPv6Str, ":", "--"))), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ @@ -1740,11 +1740,11 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], strings.ReplaceAll(dstPodIPv6Str, ":", "--"))), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], strings.ReplaceAll(dstPodIPv6Str, ":", "--"))), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ @@ -1799,15 +1799,15 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-svc-%s-", testNamespace, node1Pods[0], svcIPv6Name)), + Name: randName(fmt.Sprintf("%s-%s-to-svc-%s-", data.testNamespace, node1Pods[0], svcIPv6Name)), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Service: svcIPv6Name, }, Packet: v1alpha1.Packet{ @@ -1835,7 +1835,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentLB, - Pod: fmt.Sprintf("%s/%s", testNamespace, agnhostPodName), + Pod: fmt.Sprintf("%s/%s", data.testNamespace, agnhostPodName), TranslatedDstIP: agnhostIPv6Str, Action: v1alpha1.ActionForwarded, }, @@ -1872,15 +1872,15 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-svc-%s-", testNamespace, agnhostPodName, svcIPv6Name)), + Name: randName(fmt.Sprintf("%s-%s-to-svc-%s-", data.testNamespace, agnhostPodName, svcIPv6Name)), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: agnhostPodName, }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Service: svcIPv6Name, }, Packet: v1alpha1.Packet{ @@ -1908,7 +1908,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentLB, - Pod: fmt.Sprintf("%s/%s", testNamespace, agnhostPodName), + Pod: fmt.Sprintf("%s/%s", data.testNamespace, agnhostPodName), TranslatedSrcIP: gatewayIPv6, TranslatedDstIP: agnhostIPv6Str, Action: v1alpha1.ActionForwarded, @@ -1932,15 +1932,15 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { ipVersion: 6, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-", testNamespace, node1Pods[0], node2Pods[0])), + Name: randName(fmt.Sprintf("%s-%s-to-%s-", data.testNamespace, node1Pods[0], node2Pods[0])), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node1Pods[0], }, Destination: v1alpha1.Destination{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: node2Pods[0], }, Packet: v1alpha1.Packet{ @@ -2014,7 +2014,7 @@ func testTraceflowExternalIP(t *testing.T, data *TestData) { } node := nodeName(nodeIdx) nodeIP := nodeIP(nodeIdx) - podNames, _, cleanupFn := createTestAgnhostPods(t, data, 1, testNamespace, node) + podNames, _, cleanupFn := createTestAgnhostPods(t, data, 1, data.testNamespace, node) defer cleanupFn() testcase := testcase{ @@ -2022,11 +2022,11 @@ func testTraceflowExternalIP(t *testing.T, data *TestData) { ipVersion: 4, tf: &v1alpha1.Traceflow{ ObjectMeta: metav1.ObjectMeta{ - Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", testNamespace, podNames[0], testNamespace, strings.ReplaceAll(nodeIP, ":", "--"))), + Name: randName(fmt.Sprintf("%s-%s-to-%s-%s-", data.testNamespace, podNames[0], data.testNamespace, strings.ReplaceAll(nodeIP, ":", "--"))), }, Spec: v1alpha1.TraceflowSpec{ Source: v1alpha1.Source{ - Namespace: testNamespace, + Namespace: data.testNamespace, Pod: podNames[0], }, Destination: v1alpha1.Destination{ @@ -2138,7 +2138,7 @@ func (data *TestData) createANPDenyIngress(key string, value string, name string Egress: []v1alpha1.Rule{}, }, } - anpCreated, err := k8sUtils.crdClient.CrdV1alpha1().NetworkPolicies(testNamespace).Create(context.TODO(), &anp, metav1.CreateOptions{}) + anpCreated, err := k8sUtils.crdClient.CrdV1alpha1().NetworkPolicies(data.testNamespace).Create(context.TODO(), &anp, metav1.CreateOptions{}) if err != nil { return nil, err } @@ -2147,7 +2147,7 @@ func (data *TestData) createANPDenyIngress(key string, value string, name string // deleteAntreaNetworkpolicy deletes an Antrea NetworkPolicy. func (data *TestData) deleteAntreaNetworkpolicy(policy *v1alpha1.NetworkPolicy) error { - if err := k8sUtils.crdClient.CrdV1alpha1().NetworkPolicies(testNamespace).Delete(context.TODO(), policy.Name, metav1.DeleteOptions{}); err != nil { + if err := k8sUtils.crdClient.CrdV1alpha1().NetworkPolicies(data.testNamespace).Delete(context.TODO(), policy.Name, metav1.DeleteOptions{}); err != nil { return fmt.Errorf("unable to cleanup policy %v: %v", policy.Name, err) } return nil @@ -2188,18 +2188,18 @@ func (data *TestData) waitForNetworkpolicyRealized(pod string, node string, isWi var stdout, stderr string var err error if isWindows { - antctlCmd := fmt.Sprintf("C:/k/antrea/bin/antctl.exe get networkpolicy -S %s -n %s -T %s", networkpolicy, testNamespace, npOption) + antctlCmd := fmt.Sprintf("C:/k/antrea/bin/antctl.exe get networkpolicy -S %s -n %s -T %s", networkpolicy, data.testNamespace, npOption) envCmd := fmt.Sprintf("export POD_NAME=antrea-agent;export KUBERNETES_SERVICE_HOST=%s;export KUBERNETES_SERVICE_PORT=%d", clusterInfo.k8sServiceHost, clusterInfo.k8sServicePort) cmd := fmt.Sprintf("%s && %s", envCmd, antctlCmd) _, stdout, stderr, err = data.RunCommandOnNode(node, cmd) } else { - cmds := []string{"antctl", "get", "networkpolicy", "-S", networkpolicy, "-n", testNamespace, "-T", npOption} + cmds := []string{"antctl", "get", "networkpolicy", "-S", networkpolicy, "-n", data.testNamespace, "-T", npOption} stdout, stderr, err = runAntctl(pod, cmds, data) } if err != nil { return false, fmt.Errorf("Error when executing antctl get NetworkPolicy, stdout: %s, stderr: %s, err: %v", stdout, stderr, err) } - return strings.Contains(stdout, fmt.Sprintf("%s:%s/%s", npType, testNamespace, networkpolicy)), nil + return strings.Contains(stdout, fmt.Sprintf("%s:%s/%s", npType, data.testNamespace, networkpolicy)), nil }); err == wait.ErrWaitTimeout { return fmt.Errorf("NetworkPolicy %s isn't realized in time", networkpolicy) } else if err != nil { @@ -2251,7 +2251,7 @@ func runTestTraceflow(t *testing.T, data *TestData, tc testcase) { // Give a little time for Nodes to install OVS flows. time.Sleep(time.Second * 2) // Send an ICMP echo packet from the source Pod to the destination. - if err := data.runPingCommandFromTestPod(podInfo{srcPod, osString, "", ""}, testNamespace, dstPodIPs, agnhostContainerName, 2, 0); err != nil { + if err := data.runPingCommandFromTestPod(podInfo{srcPod, osString, "", ""}, data.testNamespace, dstPodIPs, agnhostContainerName, 2, 0); err != nil { t.Logf("Ping '%s' -> '%v' failed: ERROR (%v)", srcPod, *dstPodIPs, err) } } diff --git a/test/e2e/upgrade_test.go b/test/e2e/upgrade_test.go index d03b162de5c..4a6fbd63099 100644 --- a/test/e2e/upgrade_test.go +++ b/test/e2e/upgrade_test.go @@ -54,10 +54,10 @@ func TestUpgrade(t *testing.T) { podName := randName("test-pod-") t.Logf("Creating a busybox test Pod on '%s'", nodeName) - if err := data.createBusyboxPodOnNode(podName, testNamespace, nodeName, false); err != nil { + if err := data.createBusyboxPodOnNode(podName, data.testNamespace, nodeName, false); err != nil { t.Fatalf("Error when creating busybox test Pod: %v", err) } - if err := data.podWaitForRunning(defaultTimeout, podName, testNamespace); err != nil { + if err := data.podWaitForRunning(defaultTimeout, podName, data.testNamespace); err != nil { t.Fatalf("Error when waiting for Pod '%s' to be in the Running state", podName) } @@ -113,5 +113,5 @@ func TestUpgrade(t *testing.T) { t.Errorf("Namespace deletion failed: %v", err) } - data.testDeletePod(t, podName, nodeName, testNamespace, false) + data.testDeletePod(t, podName, nodeName, data.testNamespace, false) } diff --git a/test/e2e/wireguard_test.go b/test/e2e/wireguard_test.go index 3199f4e8a35..b6667ab7d81 100644 --- a/test/e2e/wireguard_test.go +++ b/test/e2e/wireguard_test.go @@ -99,7 +99,7 @@ func (data *TestData) getWireGuardPeerEndpointsWithHandshake(nodeName string) ([ } func testPodConnectivity(t *testing.T, data *TestData) { - podInfos, deletePods := createPodsOnDifferentNodes(t, data, testNamespace, "differentnodes") + podInfos, deletePods := createPodsOnDifferentNodes(t, data, data.testNamespace, "differentnodes") defer deletePods() numPods := 2 data.runPingMesh(t, podInfos[:numPods], agnhostContainerName) @@ -113,17 +113,17 @@ func testServiceConnectivity(t *testing.T, data *TestData) { // nodeIP() returns IPv6 address if this is a IPv6 cluster. clientPodNodeIP := nodeIP(0) serverPodNode := nodeName(1) - svc, cleanup := data.createAgnhostServiceAndBackendPods(t, svcName, testNamespace, serverPodNode, corev1.ServiceTypeNodePort) + svc, cleanup := data.createAgnhostServiceAndBackendPods(t, svcName, data.testNamespace, serverPodNode, corev1.ServiceTypeNodePort) defer cleanup() // Create the a hostNetwork Pod on a Node different from the service's backend Pod, so the service traffic will be transferred across the tunnel. - require.NoError(t, data.createPodOnNode(clientPodName, testNamespace, clientPodNode, busyboxImage, []string{"sleep", strconv.Itoa(3600)}, nil, nil, nil, true, nil)) - defer data.deletePodAndWait(defaultTimeout, clientPodName, testNamespace) - require.NoError(t, data.podWaitForRunning(defaultTimeout, clientPodName, testNamespace)) + require.NoError(t, data.createPodOnNode(clientPodName, data.testNamespace, clientPodNode, busyboxImage, []string{"sleep", strconv.Itoa(3600)}, nil, nil, nil, true, nil)) + defer data.deletePodAndWait(defaultTimeout, clientPodName, data.testNamespace) + require.NoError(t, data.podWaitForRunning(defaultTimeout, clientPodName, data.testNamespace)) - err := data.runNetcatCommandFromTestPod(clientPodName, testNamespace, svc.Spec.ClusterIP, 80) + err := data.runNetcatCommandFromTestPod(clientPodName, data.testNamespace, svc.Spec.ClusterIP, 80) require.NoError(t, err, "Pod %s should be able to connect the service's ClusterIP %s, but was not able to connect", clientPodName, net.JoinHostPort(svc.Spec.ClusterIP, fmt.Sprint(80))) - err = data.runNetcatCommandFromTestPod(clientPodName, testNamespace, clientPodNodeIP, svc.Spec.Ports[0].NodePort) + err = data.runNetcatCommandFromTestPod(clientPodName, data.testNamespace, clientPodNodeIP, svc.Spec.Ports[0].NodePort) require.NoError(t, err, "Pod %s should be able to connect the service's NodePort %s:%d, but was not able to connect", clientPodName, clientPodNodeIP, svc.Spec.Ports[0].NodePort) }