Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-s…
Browse files Browse the repository at this point in the history
…ched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  sched: proper prototype for kernel/sched.c:migration_init()
  sched: avoid large irq-latencies in smp-balancing
  sched: fix copy_namespace() <-> sched_fork() dependency in do_fork
  sched: clean up the wakeup preempt check, #2
  sched: clean up the wakeup preempt check
  sched: wakeup preemption fix
  sched: remove PREEMPT_RESTRICT
  sched: turn off PREEMPT_RESTRICT
  KVM: fix !SMP build error
  x86: make nmi_cpu_busy() always defined
  x86: make ipi_handler() always defined
  sched: cleanup, use NSEC_PER_MSEC and NSEC_PER_SEC
  sched: reintroduce SMP tunings again
  sched: restore deterministic CPU accounting on powerpc
  sched: fix delay accounting regression
  sched: reintroduce the sched_min_granularity tunable
  sched: documentation: place_entity() comments
  sched: fix vslice
  • Loading branch information
Linus Torvalds committed Nov 9, 2007
2 parents a80b824 + e6fe664 commit a70a932
Show file tree
Hide file tree
Showing 16 changed files with 172 additions and 123 deletions.
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
local_irq_save(flags);

account_system_vtime(current);
account_process_vtime(current);
account_process_tick(current, 0);
calculate_steal_time();

last = _switch(old_thread, new_thread);
Expand Down
25 changes: 1 addition & 24 deletions arch/powerpc/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ void account_system_vtime(struct task_struct *tsk)
* user and system time records.
* Must be called with interrupts disabled.
*/
void account_process_vtime(struct task_struct *tsk)
void account_process_tick(struct task_struct *tsk, int user_tick)
{
cputime_t utime, utimescaled;

Expand All @@ -274,18 +274,6 @@ void account_process_vtime(struct task_struct *tsk)
account_user_time_scaled(tsk, utimescaled);
}

static void account_process_time(struct pt_regs *regs)
{
int cpu = smp_processor_id();

account_process_vtime(current);
run_local_timers();
if (rcu_pending(cpu))
rcu_check_callbacks(cpu, user_mode(regs));
scheduler_tick();
run_posix_cpu_timers(current);
}

/*
* Stuff for accounting stolen time.
*/
Expand Down Expand Up @@ -375,7 +363,6 @@ static void snapshot_purr(void)

#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
#define calc_cputime_factors()
#define account_process_time(regs) update_process_times(user_mode(regs))
#define calculate_steal_time() do { } while (0)
#endif

Expand Down Expand Up @@ -599,16 +586,6 @@ void timer_interrupt(struct pt_regs * regs)
get_lppaca()->int_dword.fields.decr_int = 0;
#endif

/*
* We cannot disable the decrementer, so in the period
* between this cpu's being marked offline in cpu_online_map
* and calling stop-self, it is taking timer interrupts.
* Avoid calling into the scheduler rebalancing code if this
* is the case.
*/
if (!cpu_is_offline(cpu))
account_process_time(regs);

if (evt->event_handler)
evt->event_handler(evt);

Expand Down
4 changes: 0 additions & 4 deletions arch/s390/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,12 +145,8 @@ void account_ticks(u64 time)
do_timer(ticks);
#endif

#ifdef CONFIG_VIRT_CPU_ACCOUNTING
account_tick_vtime(current);
#else
while (ticks--)
update_process_times(user_mode(get_irq_regs()));
#endif

s390_do_profile();
}
Expand Down
8 changes: 1 addition & 7 deletions arch/s390/kernel/vtime.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
void account_tick_vtime(struct task_struct *tsk)
void account_process_tick(struct task_struct *tsk, int user_tick)
{
cputime_t cputime;
__u64 timer, clock;
Expand Down Expand Up @@ -64,12 +64,6 @@ void account_tick_vtime(struct task_struct *tsk)
S390_lowcore.steal_clock -= cputime << 12;
account_steal_time(tsk, cputime);
}

run_local_timers();
if (rcu_pending(smp_processor_id()))
rcu_check_callbacks(smp_processor_id(), rcu_user_flag);
scheduler_tick();
run_posix_cpu_timers(tsk);
}

/*
Expand Down
6 changes: 2 additions & 4 deletions arch/x86/kernel/cpu/mtrr/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,13 +139,12 @@ struct set_mtrr_data {
mtrr_type smp_type;
};

#ifdef CONFIG_SMP

static void ipi_handler(void *info)
/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
[RETURNS] Nothing.
*/
{
#ifdef CONFIG_SMP
struct set_mtrr_data *data = info;
unsigned long flags;

Expand All @@ -168,9 +167,8 @@ static void ipi_handler(void *info)

atomic_dec(&data->count);
local_irq_restore(flags);
}

#endif
}

static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
return type1 == MTRR_TYPE_UNCACHABLE ||
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/nmi_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,13 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);

static int endflag __initdata = 0;

#ifdef CONFIG_SMP
/* The performance counters used by NMI_LOCAL_APIC don't trigger when
* the CPU is idle. To make sure the NMI watchdog really ticks on all
* CPUs during the test make them busy.
*/
static __init void nmi_cpu_busy(void *data)
{
#ifdef CONFIG_SMP
local_irq_enable_in_hardirq();
/* Intentionally don't use cpu_relax here. This is
to make sure that the performance counter really ticks,
Expand All @@ -67,8 +67,8 @@ static __init void nmi_cpu_busy(void *data)
care if they get somewhat less cycles. */
while (endflag == 0)
mb();
}
#endif
}

static int __init check_nmi_watchdog(void)
{
Expand Down
17 changes: 15 additions & 2 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,7 @@ long io_schedule_timeout(long timeout);

extern void cpu_init (void);
extern void trap_init(void);
extern void account_process_tick(struct task_struct *task, int user);
extern void update_process_times(int user);
extern void scheduler_tick(void);

Expand Down Expand Up @@ -862,7 +863,6 @@ struct sched_entity {
struct load_weight load; /* for load-balancing */
struct rb_node run_node;
unsigned int on_rq;
int peer_preempt;

u64 exec_start;
u64 sum_exec_runtime;
Expand Down Expand Up @@ -1460,12 +1460,17 @@ extern void sched_idle_next(void);

#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_nr_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_batch_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;

int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length,
loff_t *ppos);
#endif

extern unsigned int sysctl_sched_compat_yield;
Expand Down Expand Up @@ -1983,6 +1988,14 @@ static inline void inc_syscw(struct task_struct *tsk)
}
#endif

#ifdef CONFIG_SMP
void migration_init(void);
#else
static inline void migration_init(void)
{
}
#endif

#endif /* __KERNEL__ */

#endif
7 changes: 5 additions & 2 deletions include/linux/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,11 +84,12 @@ void smp_prepare_boot_cpu(void);
* These macros fold the SMP functionality into a single CPU system
*/
#define raw_smp_processor_id() 0
static inline int up_smp_call_function(void)
static inline int up_smp_call_function(void (*func)(void *), void *info)
{
return 0;
}
#define smp_call_function(func,info,retry,wait) (up_smp_call_function())
#define smp_call_function(func, info, retry, wait) \
(up_smp_call_function(func, info))
#define on_each_cpu(func,info,retry,wait) \
({ \
local_irq_disable(); \
Expand All @@ -107,6 +108,8 @@ static inline void smp_send_reschedule(int cpu) { }
local_irq_enable(); \
0; \
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))

#endif /* !SMP */

Expand Down
4 changes: 1 addition & 3 deletions init/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
#include <linux/pid_namespace.h>
#include <linux/device.h>
#include <linux/kthread.h>
#include <linux/sched.h>

#include <asm/io.h>
#include <asm/bugs.h>
Expand Down Expand Up @@ -747,11 +748,8 @@ __setup("nosoftlockup", nosoftlockup_setup);
static void __init do_pre_smp_initcalls(void)
{
extern int spawn_ksoftirqd(void);
#ifdef CONFIG_SMP
extern int migration_init(void);

migration_init();
#endif
spawn_ksoftirqd();
if (!nosoftlockup)
spawn_softlockup_task();
Expand Down
6 changes: 3 additions & 3 deletions kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -1123,6 +1123,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->blocked_on = NULL; /* not blocked yet */
#endif

/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);

if ((retval = security_task_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
Expand Down Expand Up @@ -1212,9 +1215,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
INIT_LIST_HEAD(&p->ptrace_children);
INIT_LIST_HEAD(&p->ptrace_list);

/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);

/* Now that the task is set up, run cgroup callbacks if
* necessary. We need to run them before the task is visible
* on the tasklist. */
Expand Down
Loading

0 comments on commit a70a932

Please sign in to comment.