Skip to content

Commit

Permalink
cputime: Consolidate cputime adjustment code
Browse files Browse the repository at this point in the history
task_cputime_adjusted() and thread_group_cputime_adjusted()
essentially share the same code. They just don't use the same
source:

* The first function uses the cputime in the task struct and the
previous adjusted snapshot that ensures monotonicity.

* The second adds the cputime of all tasks in the group and the
previous adjusted snapshot of the whole group from the signal
structure.

Just consolidate the common code that does the adjustment. These
functions just need to fetch the values from the appropriate
source.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
  • Loading branch information
fweisbec committed Nov 28, 2012
1 parent e80d0a1 commit d37f761
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 28 deletions.
23 changes: 19 additions & 4 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -433,14 +433,29 @@ struct cpu_itimer {
u32 incr_error;
};

/**
* struct cputime - snaphsot of system and user cputime
* @utime: time spent in user mode
* @stime: time spent in system mode
*
* Gathers a generic snapshot of user and system time.
*/
struct cputime {
cputime_t utime;
cputime_t stime;
};

/**
* struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
* This structure groups together three kinds of CPU time that are
* tracked for threads and thread groups. Most things considering
* This is an extension of struct cputime that includes the total runtime
* spent by the task from the scheduler point of view.
*
* As a result, this structure groups together three kinds of CPU time
* that are tracked for threads and thread groups. Most things considering
* CPU time want to group these counts together and treat all three
* of them in parallel.
*/
Expand Down Expand Up @@ -581,7 +596,7 @@ struct signal_struct {
cputime_t gtime;
cputime_t cgtime;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
cputime_t prev_utime, prev_stime;
struct cputime prev_cputime;
#endif
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
Expand Down Expand Up @@ -1340,7 +1355,7 @@ struct task_struct {
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
cputime_t prev_utime, prev_stime;
struct cputime prev_cputime;
#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time; /* monotonic time */
Expand Down
2 changes: 1 addition & 1 deletion kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -1222,7 +1222,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->utime = p->stime = p->gtime = 0;
p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
p->prev_utime = p->prev_stime = 0;
p->prev_cputime.utime = p->prev_cputime.stime = 0;
#endif
#if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
Expand Down
46 changes: 23 additions & 23 deletions kernel/sched/cputime.c
Original file line number Diff line number Diff line change
Expand Up @@ -516,14 +516,18 @@ static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
return (__force cputime_t) temp;
}

void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
static void cputime_adjust(struct task_cputime *curr,
struct cputime *prev,
cputime_t *ut, cputime_t *st)
{
cputime_t rtime, utime = p->utime, total = utime + p->stime;
cputime_t rtime, utime, total;

utime = curr->utime;
total = utime + curr->stime;
/*
* Use CFS's precise accounting:
*/
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
rtime = nsecs_to_cputime(curr->sum_exec_runtime);

if (total)
utime = scale_utime(utime, rtime, total);
Expand All @@ -533,36 +537,32 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
/*
* Compare with previous values, to keep monotonicity:
*/
p->prev_utime = max(p->prev_utime, utime);
p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
prev->utime = max(prev->utime, utime);
prev->stime = max(prev->stime, rtime - prev->utime);

*ut = prev->utime;
*st = prev->stime;
}

*ut = p->prev_utime;
*st = p->prev_stime;
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
struct task_cputime cputime = {
.utime = p->utime,
.stime = p->stime,
.sum_exec_runtime = p->se.sum_exec_runtime,
};

cputime_adjust(&cputime, &p->prev_cputime, ut, st);
}

/*
* Must be called with siglock held.
*/
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
struct signal_struct *sig = p->signal;
struct task_cputime cputime;
cputime_t rtime, utime, total;

thread_group_cputime(p, &cputime);

total = cputime.utime + cputime.stime;
rtime = nsecs_to_cputime(cputime.sum_exec_runtime);

if (total)
utime = scale_utime(cputime.utime, rtime, total);
else
utime = rtime;

sig->prev_utime = max(sig->prev_utime, utime);
sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);

*ut = sig->prev_utime;
*st = sig->prev_stime;
cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
}
#endif

0 comments on commit d37f761

Please sign in to comment.