Skip to content

Commit

Permalink
[PATCH] rtmutex: Propagate priority settings into PI lock chains
Browse files Browse the repository at this point in the history
When the priority of a task, which is blocked on a lock, changes we must
propagate this change into the PI lock chain.  Therefor the chain walk code
is changed to get rid of the references to current to avoid false positives
in the deadlock detector, as setscheduler might be called by a task which
holds the lock on which the task whose priority is changed is blocked.

Also add some comments about the get/put_task_struct usage to avoid
confusion.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
KAGA-KOKO authored and Linus Torvalds committed Jun 28, 2006
1 parent 0bafd21 commit 95e02ca
Show file tree
Hide file tree
Showing 3 changed files with 40 additions and 6 deletions.
2 changes: 2 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1044,11 +1044,13 @@ extern void sched_idle_next(void);
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(task_t *p);
extern void rt_mutex_setprio(task_t *p, int prio);
extern void rt_mutex_adjust_pi(task_t *p);
#else
static inline int rt_mutex_getprio(task_t *p)
{
return p->normal_prio;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
#endif

extern void set_user_nice(task_t *p, long nice);
Expand Down
42 changes: 36 additions & 6 deletions kernel/rtmutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,8 @@ int max_lock_depth = 1024;
static int rt_mutex_adjust_prio_chain(task_t *task,
int deadlock_detect,
struct rt_mutex *orig_lock,
struct rt_mutex_waiter *orig_waiter
struct rt_mutex_waiter *orig_waiter,
struct task_struct *top_task
__IP_DECL__)
{
struct rt_mutex *lock;
Expand Down Expand Up @@ -189,7 +190,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task,
prev_max = max_lock_depth;
printk(KERN_WARNING "Maximum lock depth %d reached "
"task: %s (%d)\n", max_lock_depth,
current->comm, current->pid);
top_task->comm, top_task->pid);
}
put_task_struct(task);

Expand Down Expand Up @@ -229,7 +230,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task,
}

/* Deadlock detection */
if (lock == orig_lock || rt_mutex_owner(lock) == current) {
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
spin_unlock(&lock->wait_lock);
ret = deadlock_detect ? -EDEADLK : 0;
Expand Down Expand Up @@ -433,6 +434,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on) {
boost = 1;
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner);
}
spin_unlock_irqrestore(&owner->pi_lock, flags);
Expand All @@ -441,6 +443,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
spin_lock_irqsave(&owner->pi_lock, flags);
if (owner->pi_blocked_on) {
boost = 1;
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner);
}
spin_unlock_irqrestore(&owner->pi_lock, flags);
Expand All @@ -450,8 +453,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,

spin_unlock(&lock->wait_lock);

res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
waiter __IP__);
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
current __IP__);

spin_lock(&lock->wait_lock);

Expand Down Expand Up @@ -552,6 +555,7 @@ static void remove_waiter(struct rt_mutex *lock,

if (owner->pi_blocked_on) {
boost = 1;
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner);
}
spin_unlock_irqrestore(&owner->pi_lock, flags);
Expand All @@ -564,11 +568,36 @@ static void remove_waiter(struct rt_mutex *lock,

spin_unlock(&lock->wait_lock);

rt_mutex_adjust_prio_chain(owner, 0, lock, NULL __IP__);
rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__);

spin_lock(&lock->wait_lock);
}

/*
* Recheck the pi chain, in case we got a priority setting
*
* Called from sched_setscheduler
*/
void rt_mutex_adjust_pi(struct task_struct *task)
{
struct rt_mutex_waiter *waiter;
unsigned long flags;

spin_lock_irqsave(&task->pi_lock, flags);

waiter = task->pi_blocked_on;
if (!waiter || waiter->list_entry.prio == task->prio) {
spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}

/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
spin_unlock_irqrestore(&task->pi_lock, flags);

rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__);
}

/*
* Slow path lock function:
*/
Expand Down Expand Up @@ -636,6 +665,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
if (unlikely(ret))
break;
}

spin_unlock(&lock->wait_lock);

debug_rt_mutex_print_deadlock(&waiter);
Expand Down
2 changes: 2 additions & 0 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -4070,6 +4070,8 @@ int sched_setscheduler(struct task_struct *p, int policy,
__task_rq_unlock(rq);
spin_unlock_irqrestore(&p->pi_lock, flags);

rt_mutex_adjust_pi(p);

return 0;
}
EXPORT_SYMBOL_GPL(sched_setscheduler);
Expand Down

0 comments on commit 95e02ca

Please sign in to comment.