1616#include <bpf/bpf_helpers.h>
1717#include <bpf/bpf_endian.h>
1818
19+ /* Pin map under /sys/fs/bpf/tc/globals/<map name> */
20+ #define PIN_GLOBAL_NS 2
21+
22+ /* Must match struct bpf_elf_map layout from iproute2 */
23+ struct {
24+ __u32 type ;
25+ __u32 size_key ;
26+ __u32 size_value ;
27+ __u32 max_elem ;
28+ __u32 flags ;
29+ __u32 id ;
30+ __u32 pinning ;
31+ } server_map SEC ("maps" ) = {
32+ .type = BPF_MAP_TYPE_SOCKMAP ,
33+ .size_key = sizeof (int ),
34+ .size_value = sizeof (__u64 ),
35+ .max_elem = 1 ,
36+ .pinning = PIN_GLOBAL_NS ,
37+ };
38+
1939int _version SEC ("version" ) = 1 ;
2040char _license [] SEC ("license" ) = "GPL" ;
2141
@@ -72,7 +92,9 @@ handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
7292{
7393 struct bpf_sock_tuple ln = {0 };
7494 struct bpf_sock * sk ;
95+ const int zero = 0 ;
7596 size_t tuple_len ;
97+ __be16 dport ;
7698 int ret ;
7799
78100 tuple_len = ipv4 ? sizeof (tuple -> ipv4 ) : sizeof (tuple -> ipv6 );
@@ -83,32 +105,11 @@ handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
83105 if (sk )
84106 goto assign ;
85107
86- if (ipv4 ) {
87- if (tuple -> ipv4 .dport != bpf_htons (4321 ))
88- return TC_ACT_OK ;
89-
90- ln .ipv4 .daddr = bpf_htonl (0x7f000001 );
91- ln .ipv4 .dport = bpf_htons (1234 );
92-
93- sk = bpf_sk_lookup_udp (skb , & ln , sizeof (ln .ipv4 ),
94- BPF_F_CURRENT_NETNS , 0 );
95- } else {
96- if (tuple -> ipv6 .dport != bpf_htons (4321 ))
97- return TC_ACT_OK ;
98-
99- /* Upper parts of daddr are already zero. */
100- ln .ipv6 .daddr [3 ] = bpf_htonl (0x1 );
101- ln .ipv6 .dport = bpf_htons (1234 );
102-
103- sk = bpf_sk_lookup_udp (skb , & ln , sizeof (ln .ipv6 ),
104- BPF_F_CURRENT_NETNS , 0 );
105- }
108+ dport = ipv4 ? tuple -> ipv4 .dport : tuple -> ipv6 .dport ;
109+ if (dport != bpf_htons (4321 ))
110+ return TC_ACT_OK ;
106111
107- /* workaround: We can't do a single socket lookup here, because then
108- * the compiler will likely spill tuple_len to the stack. This makes it
109- * lose all bounds information in the verifier, which then rejects the
110- * call as unsafe.
111- */
112+ sk = bpf_map_lookup_elem (& server_map , & zero );
112113 if (!sk )
113114 return TC_ACT_SHOT ;
114115
@@ -123,7 +124,9 @@ handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
123124{
124125 struct bpf_sock_tuple ln = {0 };
125126 struct bpf_sock * sk ;
127+ const int zero = 0 ;
126128 size_t tuple_len ;
129+ __be16 dport ;
127130 int ret ;
128131
129132 tuple_len = ipv4 ? sizeof (tuple -> ipv4 ) : sizeof (tuple -> ipv6 );
@@ -137,32 +140,11 @@ handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
137140 bpf_sk_release (sk );
138141 }
139142
140- if ( ipv4 ) {
141- if (tuple -> ipv4 . dport != bpf_htons (4321 ))
142- return TC_ACT_OK ;
143+ dport = ipv4 ? tuple -> ipv4 . dport : tuple -> ipv6 . dport ;
144+ if (dport != bpf_htons (4321 ))
145+ return TC_ACT_OK ;
143146
144- ln .ipv4 .daddr = bpf_htonl (0x7f000001 );
145- ln .ipv4 .dport = bpf_htons (1234 );
146-
147- sk = bpf_skc_lookup_tcp (skb , & ln , sizeof (ln .ipv4 ),
148- BPF_F_CURRENT_NETNS , 0 );
149- } else {
150- if (tuple -> ipv6 .dport != bpf_htons (4321 ))
151- return TC_ACT_OK ;
152-
153- /* Upper parts of daddr are already zero. */
154- ln .ipv6 .daddr [3 ] = bpf_htonl (0x1 );
155- ln .ipv6 .dport = bpf_htons (1234 );
156-
157- sk = bpf_skc_lookup_tcp (skb , & ln , sizeof (ln .ipv6 ),
158- BPF_F_CURRENT_NETNS , 0 );
159- }
160-
161- /* workaround: We can't do a single socket lookup here, because then
162- * the compiler will likely spill tuple_len to the stack. This makes it
163- * lose all bounds information in the verifier, which then rejects the
164- * call as unsafe.
165- */
147+ sk = bpf_map_lookup_elem (& server_map , & zero );
166148 if (!sk )
167149 return TC_ACT_SHOT ;
168150
0 commit comments