Skip to content

Commit

Permalink
FIX: from CodeAurora - cherry picked from intersactRaven
Browse files Browse the repository at this point in the history
sched: match mainline commit
    f26f9aff6aaf67e9a430d16c266f91b13a5bff64 (Sched: fix skip_clock_update optimization)

    This is needed for the following 2 commits, and is related to https://bugzilla.kernel.org/show_bug.cgi?id=23902 which was introduced in 2.6.35.12. I don't know if we were really impacted by this issue, but it's best to fix the logic. Hopefully it will be properly fixed in 2.6.35.14.

sched: Fix the irqtime code to deal with u64 wraps

    Some ARM systems have a short sched_clock() [ which needs to be fixed
    too ], but this exposed a bug in the irq_time code as well, it doesn't
    deal with wraps at all.

    Fix the irq_time code to deal with u64 wraps by re-writing the code to
    only use delta increments, which avoids the whole issue.

    Reviewed-by: Venkatesh Pallipadi <venki@google.com>
    Reported-by: Mikael Pettersson <mikpe@it.uu.se>
    Tested-by: Mikael Pettersson <mikpe@it.uu.se>
    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    LKML-Reference: <1292242433.6803.199.camel@twins>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

sched: Fix the irqtime code for 32bit

    Since the irqtime accounting is using non-atomic u64 and can be read
    from remote cpus (writes are strictly cpu local, reads are not) we
    have to deal with observing partial updates.

    When we do observe partial updates the clock movement (in particular,
    ->clock_task movement) will go funny (in either direction), a
    subsequent clock update (observing the full update) will make it go
    funny in the oposite direction.

    Since we rely on these clocks to be strictly monotonic we cannot
    suffer backwards motion. One possible solution would be to simply
    ignore all backwards deltas, but that will lead to accounting
    artefacts, most notable: clock_task + irq_time != clock, this
    inaccuracy would end up in user visible stats.

    Therefore serialize the reads using a seqcount.

    Reviewed-by: Venkatesh Pallipadi <venki@google.com>
    Reported-by: Mikael Pettersson <mikpe@it.uu.se>
    Tested-by: Mikael Pettersson <mikpe@it.uu.se>
    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    LKML-Reference: <1292242434.6803.200.camel@twins>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
LorDClockaN authored and LeeDrOiD committed Sep 17, 2011
1 parent 85a55e9 commit ebc6028
Showing 1 changed file with 94 additions and 37 deletions.
131 changes: 94 additions & 37 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -647,21 +647,17 @@ static inline struct task_group *task_group(struct task_struct *p)

#endif /* CONFIG_CGROUP_SCHED */

static u64 irq_time_cpu(int cpu);
static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);
static void update_rq_clock_task(struct rq *rq, s64 delta);

inline void update_rq_clock(struct rq *rq)
static void update_rq_clock(struct rq *rq)
{
int cpu = cpu_of(rq);
u64 irq_time;

if (!rq->skip_clock_update)
rq->clock = sched_clock_cpu(cpu_of(rq));
irq_time = irq_time_cpu(cpu);
if (rq->clock - irq_time > rq->clock_task)
rq->clock_task = rq->clock - irq_time;
s64 delta;
if (rq->skip_clock_update)
return;

sched_irq_time_avg_update(rq, irq_time);
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
rq->clock += delta;
update_rq_clock_task(rq, delta);
}

/*
Expand Down Expand Up @@ -1845,10 +1841,9 @@ static const struct sched_class rt_sched_class;
* They are read and saved off onto struct rq in update_rq_clock().
* This may result in other CPU reading this CPU's irq time and can
* race with irq/account_system_vtime on this CPU. We would either get old
* or new value (or semi updated value on 32 bit) with a side effect of
* accounting a slice of irq time to wrong task when irq is in progress
* while we read rq->clock. That is a worthy compromise in place of having
* locks on each irq in account_system_time.
* or new value with a side effect of accounting a slice of irq time to wrong
* task when irq is in progress while we read rq->clock. That is a worthy
* compromise in place of having locks on each irq in account_system_time.
*/
static DEFINE_PER_CPU(u64, cpu_hardirq_time);
static DEFINE_PER_CPU(u64, cpu_softirq_time);
Expand All @@ -1866,63 +1861,125 @@ void disable_sched_clock_irqtime(void)
sched_clock_irqtime = 0;
}

static u64 irq_time_cpu(int cpu)
#ifndef CONFIG_64BIT
static DEFINE_PER_CPU(seqcount_t, irq_time_seq);

static inline void irq_time_write_begin(void)
{
if (!sched_clock_irqtime)
return 0;
__this_cpu_inc(irq_time_seq.sequence);
smp_wmb();
}

static inline void irq_time_write_end(void)
{
smp_wmb();
__this_cpu_inc(irq_time_seq.sequence);
}

static inline u64 irq_time_read(int cpu)
{
u64 irq_time;
unsigned seq;

do {
seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
irq_time = per_cpu(cpu_softirq_time, cpu) +
per_cpu(cpu_hardirq_time, cpu);
} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));

return irq_time;
}
#else /* CONFIG_64BIT */
static inline void irq_time_write_begin(void)
{
}

static inline void irq_time_write_end(void)
{
}

static inline u64 irq_time_read(int cpu)
{
return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
}
#endif /* CONFIG_64BIT */

/*
* Called before incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit.
*/
void account_system_vtime(struct task_struct *curr)
{
unsigned long flags;
s64 delta;
int cpu;
u64 now, delta;

if (!sched_clock_irqtime)
return;

local_irq_save(flags);

cpu = smp_processor_id();
now = sched_clock_cpu(cpu);
delta = now - per_cpu(irq_start_time, cpu);
per_cpu(irq_start_time, cpu) = now;
delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
__this_cpu_add(irq_start_time, delta);

irq_time_write_begin();
/*
* We do not account for softirq time from ksoftirqd here.
* We want to continue accounting softirq time to ksoftirqd thread
* in that case, so as not to confuse scheduler with a special task
* that do not consume any time, but still wants to run.
*/
if (hardirq_count())
per_cpu(cpu_hardirq_time, cpu) += delta;
__this_cpu_add(cpu_hardirq_time, delta);
else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
per_cpu(cpu_softirq_time, cpu) += delta;
__this_cpu_add(cpu_softirq_time, delta);

irq_time_write_end();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(account_system_vtime);

static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
u64 delta_irq = curr_irq_time - rq->prev_irq_time;
rq->prev_irq_time = curr_irq_time;
sched_rt_avg_update(rq, delta_irq);
}
s64 irq_delta;

irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;

/*
* Since irq_time is only updated on {soft,}irq_exit, we might run into
* this case when a previous update_rq_clock() happened inside a
* {soft,}irq region.
*
* When this happens, we stop ->clock_task and only update the
* prev_irq_time stamp to account for the part that fit, so that a next
* update will consume the rest. This ensures ->clock_task is
* monotonic.
*
* It does however cause some slight miss-attribution of {soft,}irq
* time, a more accurate solution would be to update the irq_time using
* the current rq->clock timestamp, except that would require using
* atomic ops.
*/
if (irq_delta > delta)
irq_delta = delta;

rq->prev_irq_time += irq_delta;
delta -= irq_delta;
rq->clock_task += delta;

if (irq_delta && sched_feat(NONIRQ_POWER))
sched_rt_avg_update(rq, irq_delta);
}

#else
#else /* CONFIG_IRQ_TIME_ACCOUNTING */

static u64 irq_time_cpu(int cpu)
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
return 0;
rq->clock_task += delta;
}

static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }

#endif
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */

#include "sched_stats.h"

Expand Down

0 comments on commit ebc6028

Please sign in to comment.