Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tcp_bpf: update the rmem scheduling for ingress redirection #7904

Open
wants to merge 2 commits into
base: bpf_base
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions include/linux/skmsg.h
Original file line number Diff line number Diff line change
Expand Up @@ -317,17 +317,22 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}

static inline void sk_psock_queue_msg(struct sk_psock *psock,
static inline bool sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
bool ret;

spin_lock_bh(&psock->ingress_lock);
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
list_add_tail(&msg->list, &psock->ingress_msg);
else {
ret = true;
} else {
sk_msg_free(psock->sk, msg);
kfree(msg);
ret = false;
}
spin_unlock_bh(&psock->ingress_lock);
return ret;
}

static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
Expand Down
10 changes: 8 additions & 2 deletions include/net/sock.h
Original file line number Diff line number Diff line change
Expand Up @@ -1519,15 +1519,21 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size)
}

static inline bool
sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
__sk_rmem_schedule(struct sock *sk, int size, bool pfmemalloc)
{
int delta;

if (!sk_has_account(sk))
return true;
delta = size - sk->sk_forward_alloc;
return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) ||
skb_pfmemalloc(skb);
pfmemalloc;
}

static inline bool
sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
{
return __sk_rmem_schedule(sk, size, skb_pfmemalloc(skb));
}

static inline int sk_unused_reserved_mem(const struct sock *sk)
Expand Down
6 changes: 5 additions & 1 deletion net/core/skmsg.c
Original file line number Diff line number Diff line change
Expand Up @@ -445,8 +445,10 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
if (likely(!peek)) {
sge->offset += copy;
sge->length -= copy;
if (!msg_rx->skb)
if (!msg_rx->skb) {
sk_mem_uncharge(sk, copy);
atomic_sub(copy, &sk->sk_rmem_alloc);
}
msg_rx->sg.size -= copy;

if (!sge->length) {
Expand Down Expand Up @@ -772,6 +774,8 @@ static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)

list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
list_del(&msg->list);
if (!msg->skb)
atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
sk_msg_free(psock->sk, msg);
kfree(msg);
}
Expand Down
6 changes: 4 additions & 2 deletions net/ipv4/tcp_bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,14 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
sge = sk_msg_elem(msg, i);
size = (apply && apply_bytes < sge->length) ?
apply_bytes : sge->length;
if (!sk_wmem_schedule(sk, size)) {
if (!__sk_rmem_schedule(sk, size, false)) {
if (!copied)
ret = -ENOMEM;
break;
}

sk_mem_charge(sk, size);
atomic_add(size, &sk->sk_rmem_alloc);
sk_msg_xfer(tmp, msg, i, size);
copied += size;
if (sge->length)
Expand All @@ -74,7 +75,8 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,

if (!ret) {
msg->sg.start = i;
sk_psock_queue_msg(psock, tmp);
if (!sk_psock_queue_msg(psock, tmp))
atomic_sub(copied, &sk->sk_rmem_alloc);
sk_psock_data_ready(sk, psock);
} else {
sk_msg_free(sk, tmp);
Expand Down
Loading