diff --git a/.github/workflows/kind.yml b/.github/workflows/kind.yml index 55c20c395a3..8d4230d7c3d 100755 --- a/.github/workflows/kind.yml +++ b/.github/workflows/kind.yml @@ -10,7 +10,7 @@ on: - release-* env: - KIND_VERSION: v0.9.0 + KIND_VERSION: v0.11.0 jobs: check-changes: diff --git a/.github/workflows/kind_upgrade.yml b/.github/workflows/kind_upgrade.yml index c3190d12df0..10bc4d72085 100644 --- a/.github/workflows/kind_upgrade.yml +++ b/.github/workflows/kind_upgrade.yml @@ -10,7 +10,7 @@ on: - release-* env: - KIND_VERSION: v0.9.0 + KIND_VERSION: v0.11.0 jobs: check-changes: diff --git a/.github/workflows/netpol_cyclonus.yml b/.github/workflows/netpol_cyclonus.yml index 630574ac193..010f5979539 100644 --- a/.github/workflows/netpol_cyclonus.yml +++ b/.github/workflows/netpol_cyclonus.yml @@ -5,7 +5,7 @@ on: - cron: '0 0 * * *' env: - KIND_VERSION: v0.9.0 + KIND_VERSION: v0.11.0 jobs: diff --git a/ci/kind/kind-setup.sh b/ci/kind/kind-setup.sh index 0c49b47dabb..61afe8c9051 100755 --- a/ci/kind/kind-setup.sh +++ b/ci/kind/kind-setup.sh @@ -233,6 +233,8 @@ function create { cat < $config_file kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 +featureGates: + "NetworkPolicyEndPort": true networking: disableDefaultCNI: true podSubnet: $POD_CIDR diff --git a/pkg/controller/networkpolicy/networkpolicy_controller.go b/pkg/controller/networkpolicy/networkpolicy_controller.go index 0bb29fc3c12..5965fc1c360 100644 --- a/pkg/controller/networkpolicy/networkpolicy_controller.go +++ b/pkg/controller/networkpolicy/networkpolicy_controller.go @@ -529,11 +529,11 @@ func toAntreaServices(npPorts []networkingv1.NetworkPolicyPort) ([]controlplane. if npPort.Port != nil && npPort.Port.Type == intstr.String { namedPortExists = true } - antreaService := controlplane.Service{ + antreaServices = append(antreaServices, controlplane.Service{ Protocol: toAntreaProtocol(npPort.Protocol), Port: npPort.Port, - } - antreaServices = append(antreaServices, antreaService) + EndPort: npPort.EndPort, + }) } return antreaServices, namedPortExists } diff --git a/pkg/controller/networkpolicy/networkpolicy_controller_test.go b/pkg/controller/networkpolicy/networkpolicy_controller_test.go index 7237691d04f..0d2bdeb4afa 100644 --- a/pkg/controller/networkpolicy/networkpolicy_controller_test.go +++ b/pkg/controller/networkpolicy/networkpolicy_controller_test.go @@ -532,6 +532,61 @@ func TestAddNetworkPolicy(t *testing.T) { expAppliedToGroups: 1, expAddressGroups: 2, }, + { + name: "rule-with-end-port", + inputPolicy: &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npG", UID: "uidG"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: selectorA, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &k8sProtocolTCP, + Port: &int1000, + EndPort: &int32For1999, + }, + }, + From: []networkingv1.NetworkPolicyPeer{ + { + PodSelector: &selectorB, + }, + }, + }, + }, + }, + }, + expPolicy: &antreatypes.NetworkPolicy{ + UID: "uidG", + Name: "uidG", + SourceRef: &controlplane.NetworkPolicyReference{ + Type: controlplane.K8sNetworkPolicy, + Namespace: "nsA", + Name: "npG", + UID: "uidG", + }, + Rules: []controlplane.NetworkPolicyRule{ + { + Direction: controlplane.DirectionIn, + From: controlplane.NetworkPolicyPeer{ + AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, nil, nil).NormalizedName)}, + }, + Services: []controlplane.Service{ + { + Protocol: &protocolTCP, + Port: &int1000, + EndPort: &int32For1999, + }, + }, + Priority: defaultRulePriority, + Action: &defaultAction, + }, + }, + AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorA, nil, nil).NormalizedName)}, + }, + expAppliedToGroups: 1, + expAddressGroups: 1, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -549,7 +604,7 @@ func TestAddNetworkPolicy(t *testing.T) { for _, tt := range tests { npc.addNetworkPolicy(tt.inputPolicy) } - assert.Equal(t, 6, npc.GetNetworkPolicyNum(), "expected networkPolicy number is 6") + assert.Equal(t, 7, npc.GetNetworkPolicyNum(), "expected networkPolicy number is 7") assert.Equal(t, 4, npc.GetAddressGroupNum(), "expected addressGroup number is 4") assert.Equal(t, 2, npc.GetAppliedToGroupNum(), "appliedToGroup number is 2") } diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 37e82fa62db..e8b3d54374d 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -972,6 +972,24 @@ func (data *TestData) createNginxPodOnNode(name string, nodeName string) error { }, false, nil) } +// createAgnhostPodOnNodeWithMultiPort creates a Pod in the test namespace with +// a single agnhost container listening multiple ports. +// The Pod will be scheduled on the specified Node (if nodeName is not empty). +func (data *TestData) createAgnhostPodOnNodeWithMultiPort(name string, nodeName string, ports []int32) error { + var args []string + var containerPorts []corev1.ContainerPort + for _, port := range ports { + args = append(args, fmt.Sprintf("/agnhost serve-hostname --tcp --http=false --port=%d", port)) + containerPorts = append(containerPorts, corev1.ContainerPort{ + Name: fmt.Sprintf("c%d", port), + ContainerPort: port, + Protocol: corev1.ProtocolTCP, + }) + } + + return data.createPodOnNode(name, nodeName, agnhostImage, []string{"/bin/bash", "-c"}, []string{strings.Join(args, " & ")}, nil, containerPorts, false, nil) +} + // createNginxPod creates a Pod in the test namespace with a single nginx container. func (data *TestData) createNginxPod(name, nodeName string) error { return data.createNginxPodOnNode(name, nodeName) diff --git a/test/e2e/infra/vagrant/playbook/roles/control-plane/templates/kubeadm.conf.j2 b/test/e2e/infra/vagrant/playbook/roles/control-plane/templates/kubeadm.conf.j2 index ccfe7e42e72..04b9aee6181 100644 --- a/test/e2e/infra/vagrant/playbook/roles/control-plane/templates/kubeadm.conf.j2 +++ b/test/e2e/infra/vagrant/playbook/roles/control-plane/templates/kubeadm.conf.j2 @@ -13,3 +13,5 @@ networking: apiServer: certSANs: - "{{ k8s_api_server_ip }}" + extraArgs: + feature-gates: "NetworkPolicyEndPort=true" diff --git a/test/e2e/networkpolicy_test.go b/test/e2e/networkpolicy_test.go index dadee9e6a6d..2eba15a4db7 100644 --- a/test/e2e/networkpolicy_test.go +++ b/test/e2e/networkpolicy_test.go @@ -806,6 +806,107 @@ func TestIngressPolicyWithoutPortNumber(t *testing.T) { } } +func TestIngressPolicyWithEndPort(t *testing.T) { + skipIfHasWindowsNodes(t) + + data, err := setupTest(t) + if err != nil { + t.Fatalf("Error when setting up test: %v", err) + } + defer teardownTest(t, data) + + serverPort := int32(80) + serverEndPort := int32(84) + policyPort := int32(81) + policyEndPort := int32(83) + + var serverPorts []int32 + for i := serverPort; i <= serverEndPort; i++ { + serverPorts = append(serverPorts, i) + } + + serverName, serverIPs, cleanupFunc := createAndWaitForPodWithMultiPorts(t, data, data.createAgnhostPodOnNodeWithMultiPort, "test-server-", "", serverPorts) + defer cleanupFunc() + + clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", "") + defer cleanupFunc() + + preCheckFunc := func(serverIP string) { + // The client can connect to server on all ports. + for _, port := range serverPorts { + if err = data.runNetcatCommandFromTestPod(clientName, serverIP, port); err != nil { + t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(port))) + } + } + + } + + if clusterInfo.podV4NetworkCIDR != "" { + preCheckFunc(serverIPs.ipv4.String()) + } + if clusterInfo.podV6NetworkCIDR != "" { + preCheckFunc(serverIPs.ipv6.String()) + } + + protocol := corev1.ProtocolTCP + spec := &networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "antrea-e2e": serverName, + }, + }, + PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &protocol, + Port: &intstr.IntOrString{Type: intstr.Int, IntVal: policyPort}, + EndPort: &policyEndPort, + }, + }, + From: []networkingv1.NetworkPolicyPeer{{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "antrea-e2e": clientName, + }, + }}, + }, + }, + }, + } + np, err := data.createNetworkPolicy("test-networkpolicy-ingress-with-endport", spec) + if err != nil { + t.Fatalf("Error when creating network policy: %v", err) + } + defer func() { + if err = data.deleteNetworkpolicy(np); err != nil { + t.Fatalf("Error when deleting network policy: %v", err) + } + }() + + npCheck := func(serverIP string) { + for _, port := range serverPorts { + serverAddress := net.JoinHostPort(serverIP, fmt.Sprint(port)) + err = data.runNetcatCommandFromTestPod(clientName, serverIP, port) + if port >= policyPort && port <= policyEndPort { + if err != nil { + t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, serverAddress) + } + } else if err == nil{ + t.Fatalf("Pod %s should be not able to connect %s, but was able to connect", clientName, serverAddress) + } + } + } + + if clusterInfo.podV4NetworkCIDR != "" { + npCheck(serverIPs.ipv4.String()) + } + if clusterInfo.podV6NetworkCIDR != "" { + npCheck(serverIPs.ipv6.String()) + } +} + func createAndWaitForPod(t *testing.T, data *TestData, createFunc func(name string, nodeName string) error, namePrefix string, nodeName string) (string, *PodIPs, func()) { name := randName(namePrefix) if err := createFunc(name, nodeName); err != nil { @@ -822,6 +923,22 @@ func createAndWaitForPod(t *testing.T, data *TestData, createFunc func(name stri return name, podIP, cleanupFunc } +func createAndWaitForPodWithMultiPorts(t *testing.T, data *TestData, createFunc func(name string, nodeName string, ports []int32) error, namePrefix string, nodeName string, ports []int32) (string, *PodIPs, func()) { + name := randName(namePrefix) + if err := createFunc(name, nodeName, ports); err != nil { + t.Fatalf("Error when creating busybox test Pod: %v", err) + } + cleanupFunc := func() { + deletePodWrapper(t, data, name) + } + podIP, err := data.podWaitForIPs(defaultTimeout, name, testNamespace) + if err != nil { + cleanupFunc() + t.Fatalf("Error when waiting for IP for Pod '%s': %v", name, err) + } + return name, podIP, cleanupFunc +} + func createAndWaitForPodWithLabels(t *testing.T, data *TestData, createFunc func(name, ns string, portNum int32, labels map[string]string) error, name, ns string, portNum int32, labels map[string]string) (string, *PodIPs, func() error) { if err := createFunc(name, ns, portNum, labels); err != nil { t.Fatalf("Error when creating busybox test Pod: %v", err)