Skip to content

Commit d274cb3

Browse files
Peter ZijlstraIngo Molnar
Peter Zijlstra
authored and
Ingo Molnar
committed
sched: Simplify ->cpu_power initialization
The code in update_group_power() does what init_sched_groups_power() does and more, so remove the special init_ code and call the generic code instead. Also move the sd->span_weight initialization because update_group_power() needs it. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110407122941.875856012@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent c4a8849 commit d274cb3

File tree

1 file changed

+5
-39
lines changed

1 file changed

+5
-39
lines changed

kernel/sched.c

+5-39
Original file line numberDiff line numberDiff line change
@@ -6679,9 +6679,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
66796679
struct rq *rq = cpu_rq(cpu);
66806680
struct sched_domain *tmp;
66816681

6682-
for (tmp = sd; tmp; tmp = tmp->parent)
6683-
tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
6684-
66856682
/* Remove the sched domains which do not contribute to scheduling. */
66866683
for (tmp = sd; tmp; ) {
66876684
struct sched_domain *parent = tmp->parent;
@@ -7159,48 +7156,14 @@ static void free_sched_groups(const struct cpumask *cpu_map,
71597156
*/
71607157
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
71617158
{
7162-
struct sched_domain *child;
7163-
struct sched_group *group;
7164-
long power;
7165-
int weight;
7166-
71677159
WARN_ON(!sd || !sd->groups);
71687160

71697161
if (cpu != group_first_cpu(sd->groups))
71707162
return;
71717163

71727164
sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
71737165

7174-
child = sd->child;
7175-
7176-
sd->groups->cpu_power = 0;
7177-
7178-
if (!child) {
7179-
power = SCHED_LOAD_SCALE;
7180-
weight = cpumask_weight(sched_domain_span(sd));
7181-
/*
7182-
* SMT siblings share the power of a single core.
7183-
* Usually multiple threads get a better yield out of
7184-
* that one core than a single thread would have,
7185-
* reflect that in sd->smt_gain.
7186-
*/
7187-
if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
7188-
power *= sd->smt_gain;
7189-
power /= weight;
7190-
power >>= SCHED_LOAD_SHIFT;
7191-
}
7192-
sd->groups->cpu_power += power;
7193-
return;
7194-
}
7195-
7196-
/*
7197-
* Add cpu_power of each child group to this groups cpu_power.
7198-
*/
7199-
group = child->groups;
7200-
do {
7201-
sd->groups->cpu_power += group->cpu_power;
7202-
group = group->next;
7203-
} while (group != child->groups);
7166+
update_group_power(sd, cpu);
72047167
}
72057168

72067169
/*
@@ -7507,7 +7470,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
75077470
{
75087471
enum s_alloc alloc_state = sa_none;
75097472
struct s_data d;
7510-
struct sched_domain *sd;
7473+
struct sched_domain *sd, *tmp;
75117474
int i;
75127475
#ifdef CONFIG_NUMA
75137476
d.sd_allnodes = 0;
@@ -7530,6 +7493,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
75307493
sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
75317494
sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
75327495
sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
7496+
7497+
for (tmp = sd; tmp; tmp = tmp->parent)
7498+
tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
75337499
}
75347500

75357501
for_each_cpu(i, cpu_map) {

0 commit comments

Comments
 (0)