Skip to content

Commit 2ae4513

Browse files
edumazetPaolo Abeni
authored andcommitted
net_sched: sch_fq: remove q->ktime_cache
Now that both enqueue() and dequeue() need to use ktime_get_ns(), there is no point wasting 8 bytes in struct fq_sched_data. This makes room for future fields. ;) Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Dave Taht <dave.taht@gmail.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
1 parent 93e7eca commit 2ae4513

File tree

1 file changed

+15
-13
lines changed

1 file changed

+15
-13
lines changed

net/sched/sch_fq.c

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,6 @@ struct fq_sched_data {
106106

107107
struct rb_root delayed; /* for rate limited flows */
108108
u64 time_next_delayed_flow;
109-
u64 ktime_cache; /* copy of last ktime_get_ns() */
110109
unsigned long unthrottle_latency_ns;
111110

112111
struct fq_flow internal; /* for non classified or high prio packets */
@@ -282,12 +281,13 @@ static void fq_gc(struct fq_sched_data *q,
282281
*
283282
* FQ can not use generic TCQ_F_CAN_BYPASS infrastructure.
284283
*/
285-
static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb)
284+
static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
285+
u64 now)
286286
{
287287
const struct fq_sched_data *q = qdisc_priv(sch);
288288
const struct sock *sk;
289289

290-
if (fq_skb_cb(skb)->time_to_send > q->ktime_cache)
290+
if (fq_skb_cb(skb)->time_to_send > now)
291291
return false;
292292

293293
if (sch->q.qlen != 0) {
@@ -317,7 +317,8 @@ static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb)
317317
return true;
318318
}
319319

320-
static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb)
320+
static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb,
321+
u64 now)
321322
{
322323
struct fq_sched_data *q = qdisc_priv(sch);
323324
struct rb_node **p, *parent;
@@ -360,7 +361,7 @@ static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb)
360361
sk = (struct sock *)((hash << 1) | 1UL);
361362
}
362363

363-
if (fq_fastpath_check(sch, skb)) {
364+
if (fq_fastpath_check(sch, skb, now)) {
364365
q->internal.stat_fastpath_packets++;
365366
return &q->internal;
366367
}
@@ -497,37 +498,38 @@ static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
497498
}
498499

499500
static bool fq_packet_beyond_horizon(const struct sk_buff *skb,
500-
const struct fq_sched_data *q)
501+
const struct fq_sched_data *q, u64 now)
501502
{
502-
return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon));
503+
return unlikely((s64)skb->tstamp > (s64)(now + q->horizon));
503504
}
504505

505506
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
506507
struct sk_buff **to_free)
507508
{
508509
struct fq_sched_data *q = qdisc_priv(sch);
509510
struct fq_flow *f;
511+
u64 now;
510512

511513
if (unlikely(sch->q.qlen >= sch->limit))
512514
return qdisc_drop(skb, sch, to_free);
513515

514-
q->ktime_cache = ktime_get_ns();
516+
now = ktime_get_ns();
515517
if (!skb->tstamp) {
516-
fq_skb_cb(skb)->time_to_send = q->ktime_cache;
518+
fq_skb_cb(skb)->time_to_send = now;
517519
} else {
518520
/* Check if packet timestamp is too far in the future. */
519-
if (fq_packet_beyond_horizon(skb, q)) {
521+
if (fq_packet_beyond_horizon(skb, q, now)) {
520522
if (q->horizon_drop) {
521523
q->stat_horizon_drops++;
522524
return qdisc_drop(skb, sch, to_free);
523525
}
524526
q->stat_horizon_caps++;
525-
skb->tstamp = q->ktime_cache + q->horizon;
527+
skb->tstamp = now + q->horizon;
526528
}
527529
fq_skb_cb(skb)->time_to_send = skb->tstamp;
528530
}
529531

530-
f = fq_classify(sch, skb);
532+
f = fq_classify(sch, skb, now);
531533

532534
if (f != &q->internal) {
533535
if (unlikely(f->qlen >= q->flow_plimit)) {
@@ -602,7 +604,7 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
602604
goto out;
603605
}
604606

605-
q->ktime_cache = now = ktime_get_ns();
607+
now = ktime_get_ns();
606608
fq_check_throttled(q, now);
607609
begin:
608610
head = &q->new_flows;

0 commit comments

Comments
 (0)