Skip to content

Commit

Permalink
sctp: signal sk_data_ready earlier on data chunks reception
Browse files Browse the repository at this point in the history
Dave Miller pointed out that fb586f2 ("sctp: delay calls to
sk_data_ready() as much as possible") may insert latency specially if
the receiving application is running on another CPU and that it would be
better if we signalled as early as possible.

This patch thus basically inverts the logic on fb586f2 and signals
it as early as possible, similar to what we had before.

Fixes: fb586f2 ("sctp: delay calls to sk_data_ready() as much as possible")
Reported-by: Dave Miller <davem@davemloft.net>
Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
marceloleitner authored and davem330 committed May 2, 2016
1 parent 70e927b commit 0970f5b
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 14 deletions.
2 changes: 1 addition & 1 deletion include/net/sctp/structs.h
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ struct sctp_sock {
frag_interleave:1,
recvrcvinfo:1,
recvnxtinfo:1,
pending_data_ready:1;
data_ready_signalled:1;

atomic_t pd_mode;
/* Receive to here while partial delivery is in effect. */
Expand Down
7 changes: 3 additions & 4 deletions net/sctp/sm_sideeffect.c
Original file line number Diff line number Diff line change
Expand Up @@ -1741,10 +1741,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
} else if (local_cork)
error = sctp_outq_uncork(&asoc->outqueue, gfp);

if (sp->pending_data_ready) {
sk->sk_data_ready(sk);
sp->pending_data_ready = 0;
}
if (sp->data_ready_signalled)
sp->data_ready_signalled = 0;

return error;
nomem:
error = -ENOMEM;
Expand Down
25 changes: 16 additions & 9 deletions net/sctp/ulpqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,7 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
struct sock *sk = ulpq->asoc->base.sk;
struct sctp_sock *sp = sctp_sk(sk);
struct sk_buff_head *queue, *skb_list;
struct sk_buff *skb = sctp_event2skb(event);
int clear_pd = 0;
Expand All @@ -211,15 +212,15 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
sk_incoming_cpu_update(sk);
}
/* Check if the user wishes to receive this event. */
if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
goto out_free;

/* If we are in partial delivery mode, post to the lobby until
* partial delivery is cleared, unless, of course _this_ is
* the association the cause of the partial delivery.
*/

if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
if (atomic_read(&sp->pd_mode) == 0) {
queue = &sk->sk_receive_queue;
} else {
if (ulpq->pd_mode) {
Expand All @@ -231,7 +232,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
if ((event->msg_flags & MSG_NOTIFICATION) ||
(SCTP_DATA_NOT_FRAG ==
(event->msg_flags & SCTP_DATA_FRAG_MASK)))
queue = &sctp_sk(sk)->pd_lobby;
queue = &sp->pd_lobby;
else {
clear_pd = event->msg_flags & MSG_EOR;
queue = &sk->sk_receive_queue;
Expand All @@ -242,10 +243,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
* can queue this to the receive queue instead
* of the lobby.
*/
if (sctp_sk(sk)->frag_interleave)
if (sp->frag_interleave)
queue = &sk->sk_receive_queue;
else
queue = &sctp_sk(sk)->pd_lobby;
queue = &sp->pd_lobby;
}
}

Expand All @@ -264,8 +265,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
if (clear_pd)
sctp_ulpq_clear_pd(ulpq);

if (queue == &sk->sk_receive_queue)
sctp_sk(sk)->pending_data_ready = 1;
if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
sp->data_ready_signalled = 1;
sk->sk_data_ready(sk);
}
return 1;

out_free:
Expand Down Expand Up @@ -1126,11 +1129,13 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{
struct sctp_ulpevent *ev = NULL;
struct sock *sk;
struct sctp_sock *sp;

if (!ulpq->pd_mode)
return;

sk = ulpq->asoc->base.sk;
sp = sctp_sk(sk);
if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
&sctp_sk(sk)->subscribe))
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
Expand All @@ -1140,6 +1145,8 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));

/* If there is data waiting, send it up the socket now. */
if (sctp_ulpq_clear_pd(ulpq) || ev)
sctp_sk(sk)->pending_data_ready = 1;
if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
sp->data_ready_signalled = 1;
sk->sk_data_ready(sk);
}
}

0 comments on commit 0970f5b

Please sign in to comment.