Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automated cherry pick of #5687: Fix NP not working on Hairpin #5699

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions docs/antrea-network-policy.md
Original file line number Diff line number Diff line change
Expand Up @@ -1776,3 +1776,10 @@ Similar RBAC is applied to the ClusterGroup resource.
matched at the same priority with conflicting actions. It will be the policy writer's
responsibility to identify such ambiguities in rule definitions and avoid potential
undeterministic rule enforcement results.
- For hairpin service traffic, when a Pod initiates traffic towards the service it provides,
and the same Pod is selected as the Endpoint, NetworkPolicies will consistently permit
this traffic during ingress enforcement if AntreaProxy is enabled. However, when AntreaProxy
is disabled, NetworkPolicies may not function as expected for hairpin service traffic.
This is due to kube-proxy performing SNAT, which conceals the original source IP from Antrea.
Consequently, NetworkPolicies are unable to differentiate between hairpin service traffic and
external traffic in this scenario.
1 change: 1 addition & 0 deletions pkg/agent/openflow/network_policy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1387,6 +1387,7 @@ func networkPolicyInitFlows(ovsMeterSupported, externalNodeEnabled, l7NetworkPol
"cookie=0x1020000000000, table=IngressSecurityClassifier, priority=200,reg0=0x20/0xf0 actions=goto_table:IngressMetric",
"cookie=0x1020000000000, table=IngressSecurityClassifier, priority=200,reg0=0x10/0xf0 actions=goto_table:IngressMetric",
"cookie=0x1020000000000, table=IngressSecurityClassifier, priority=200,reg0=0x40/0xf0 actions=goto_table:IngressMetric",
"cookie=0x1020000000000, table=IngressSecurityClassifier, priority=200,ct_mark=0x40/0x40 actions=goto_table:ConntrackCommit",
"cookie=0x1020000000000, table=AntreaPolicyEgressRule, priority=64990,ct_state=-new+est,ip actions=goto_table:EgressMetric",
"cookie=0x1020000000000, table=AntreaPolicyEgressRule, priority=64990,ct_state=-new+rel,ip actions=goto_table:EgressMetric",
"cookie=0x1020000000000, table=AntreaPolicyIngressRule, priority=64990,ct_state=-new+est,ip actions=goto_table:IngressMetric",
Expand Down
6 changes: 6 additions & 0 deletions pkg/agent/openflow/pipeline.go
Original file line number Diff line number Diff line change
Expand Up @@ -2170,6 +2170,12 @@ func (f *featureNetworkPolicy) ingressClassifierFlows() []binding.Flow {
MatchRegMark(ToUplinkRegMark).
Action().GotoTable(IngressMetricTable.GetID()).
Done(),
// This generates the flow to match the hairpin service packets and forward them to stageConntrack.
IngressSecurityClassifierTable.ofTable.BuildFlow(priorityNormal).
Cookie(cookieID).
MatchCTMark(HairpinCTMark).
Action().GotoStage(stageConntrack).
Done(),
}
if f.enableAntreaPolicy && f.proxyAll {
// This generates the flow to match the NodePort Service packets and forward them to AntreaPolicyIngressRuleTable.
Expand Down
11 changes: 9 additions & 2 deletions test/e2e/antreapolicy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3445,7 +3445,7 @@ func testToServices(t *testing.T) {
services = append(services, ipv4Svc)
}
if clusterInfo.podV6NetworkCIDR != "" {
ipv6Svc := k8sUtils.BuildService("ipv6-svc", namespaces["x"], 80, 80, map[string]string{"pod": "b"}, nil)
ipv6Svc := k8sUtils.BuildService("ipv6-svc", namespaces["x"], 80, 80, map[string]string{"pod": "a"}, nil)
ipv6Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv6Protocol}
services = append(services, ipv6Svc)
}
Expand All @@ -3466,7 +3466,8 @@ func testToServices(t *testing.T) {
builder = builder.SetName("test-acnp-to-services").
SetTier("application").
SetPriority(1.0)
builder.AddToServicesRule(svcRefs, "svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["y"]}}}, crdv1alpha1.RuleActionDrop)
builder.AddToServicesRule(svcRefs, "x-to-svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1alpha1.RuleActionDrop)
builder.AddToServicesRule(svcRefs, "y-to-svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["y"]}}}, crdv1alpha1.RuleActionDrop)
time.Sleep(networkPolicyDelay)

acnp := builder.Get()
Expand All @@ -3476,6 +3477,12 @@ func testToServices(t *testing.T) {
var testcases []podToAddrTestStep
for _, service := range builtSvcs {
eachServiceCases := []podToAddrTestStep{
{
Pod(namespaces["x"] + "/a"),
service.Spec.ClusterIP,
service.Spec.Ports[0].Port,
Dropped,
},
{
Pod(namespaces["y"] + "/b"),
service.Spec.ClusterIP,
Expand Down
47 changes: 47 additions & 0 deletions test/e2e/networkpolicy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,10 @@ func TestNetworkPolicy(t *testing.T) {
t.Run("testIngressPolicyWithEndPort", func(t *testing.T) {
testIngressPolicyWithEndPort(t, data)
})
t.Run("testAllowHairpinService", func(t *testing.T) {
skipIfProxyDisabled(t)
testAllowHairpinService(t, data)
})
}

func testNetworkPolicyStats(t *testing.T, data *TestData) {
Expand Down Expand Up @@ -919,6 +923,49 @@ func testIngressPolicyWithEndPort(t *testing.T, data *TestData) {
}
}

func testAllowHairpinService(t *testing.T, data *TestData) {
serverNode := workerNodeName(1)
serverPort := int32(80)
serverName, _, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", serverNode, data.testNamespace, false)
defer cleanupFunc()

service, err := data.CreateService("nginx", data.testNamespace, serverPort, serverPort, map[string]string{"app": "nginx"}, false, false, corev1.ServiceTypeClusterIP, nil)
if err != nil {
t.Fatalf("Error when creating nginx service: %v", err)
}
defer data.deleteService(service.Namespace, service.Name)

clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-client-", serverNode, data.testNamespace, false)
defer cleanupFunc()

spec := &networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
}
np, err := data.createNetworkPolicy("test-networkpolicy-ns-iso", spec)
if err != nil {
t.Fatalf("Error when creating network policy: %v", err)
}
defer func() {
if err = data.deleteNetworkpolicy(np); err != nil {
t.Fatalf("Error when deleting network policy: %v", err)
}
}()

npCheck := func(clientName, serverIP, containerName string, serverPort int32, wantErr bool) {
if err = data.runNetcatCommandFromTestPodWithProtocol(clientName, data.testNamespace, containerName, serverIP, serverPort, "tcp"); wantErr && err == nil {
t.Fatalf("Pod %s should not be able to connect %s, but was able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort)))
} else if !wantErr && err != nil {
t.Fatalf("Pod %s should be able to connect %s, but was not able to connect", clientName, net.JoinHostPort(serverIP, fmt.Sprint(serverPort)))
}
}

for _, clusterIP := range service.Spec.ClusterIPs {
npCheck(serverName, clusterIP, nginxContainerName, serverPort, false)
npCheck(clientName, clusterIP, busyboxContainerName, serverPort, true)
}
}

func createAndWaitForPod(t *testing.T, data *TestData, createFunc func(name string, ns string, nodeName string, hostNetwork bool) error, namePrefix string, nodeName string, ns string, hostNetwork bool) (string, *PodIPs, func()) {
name := randName(namePrefix)
return createAndWaitForPodWithExactName(t, data, createFunc, name, nodeName, ns, hostNetwork)
Expand Down