Skip to content

Commit

Permalink
sched: dynamically update the root-domain span/online maps
Browse files Browse the repository at this point in the history
The baseline code statically builds the span maps when the domain is formed.
Previous attempts at dynamically updating the maps caused a suspend-to-ram
regression, which should now be fixed.

Signed-off-by: Gregory Haskins <ghaskins@novell.com>
CC: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Gregory Haskins authored and Ingo Molnar committed Jan 25, 2008
1 parent f85d6c7 commit dc93852
Showing 1 changed file with 19 additions and 12 deletions.
31 changes: 19 additions & 12 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -359,8 +359,6 @@ struct rt_rq {
* exclusive cpuset is created, we also create and attach a new root-domain
* object.
*
* By default the system creates a single root-domain with all cpus as
* members (mimicking the global state we have today).
*/
struct root_domain {
atomic_t refcount;
Expand All @@ -375,6 +373,10 @@ struct root_domain {
atomic_t rto_count;
};

/*
* By default the system creates a single root-domain with all cpus as
* members (mimicking the global state we have today).
*/
static struct root_domain def_root_domain;

#endif
Expand Down Expand Up @@ -5859,13 +5861,20 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
class->leave_domain(rq);
}

cpu_clear(rq->cpu, old_rd->span);
cpu_clear(rq->cpu, old_rd->online);

if (atomic_dec_and_test(&old_rd->refcount))
kfree(old_rd);
}

atomic_inc(&rd->refcount);
rq->rd = rd;

cpu_set(rq->cpu, rd->span);
if (cpu_isset(rq->cpu, cpu_online_map))
cpu_set(rq->cpu, rd->online);

for (class = sched_class_highest; class; class = class->next) {
if (class->join_domain)
class->join_domain(rq);
Expand All @@ -5874,31 +5883,29 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
spin_unlock_irqrestore(&rq->lock, flags);
}

static void init_rootdomain(struct root_domain *rd, const cpumask_t *map)
static void init_rootdomain(struct root_domain *rd)
{
memset(rd, 0, sizeof(*rd));

rd->span = *map;
cpus_and(rd->online, rd->span, cpu_online_map);
cpus_clear(rd->span);
cpus_clear(rd->online);
}

static void init_defrootdomain(void)
{
cpumask_t cpus = CPU_MASK_ALL;

init_rootdomain(&def_root_domain, &cpus);
init_rootdomain(&def_root_domain);
atomic_set(&def_root_domain.refcount, 1);
}

static struct root_domain *alloc_rootdomain(const cpumask_t *map)
static struct root_domain *alloc_rootdomain(void)
{
struct root_domain *rd;

rd = kmalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return NULL;

init_rootdomain(rd, map);
init_rootdomain(rd);

return rd;
}
Expand Down Expand Up @@ -6319,7 +6326,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
#endif

rd = alloc_rootdomain(cpu_map);
rd = alloc_rootdomain();
if (!rd) {
printk(KERN_WARNING "Cannot alloc root domain\n");
return -ENOMEM;
Expand Down Expand Up @@ -6894,7 +6901,6 @@ void __init sched_init(void)
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
rq_attach_root(rq, &def_root_domain);
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
Expand All @@ -6903,6 +6909,7 @@ void __init sched_init(void)
INIT_LIST_HEAD(&rq->migration_queue);
rq->rt.highest_prio = MAX_RT_PRIO;
rq->rt.overloaded = 0;
rq_attach_root(rq, &def_root_domain);
#endif
atomic_set(&rq->nr_iowait, 0);

Expand Down

0 comments on commit dc93852

Please sign in to comment.