Skip to content

Commit

Permalink
memcg: mm_update_next_owner: kill the "retry" logic
Browse files Browse the repository at this point in the history
Add the new helper, try_to_set_owner(), which tries to update mm->owner
once we see c->mm == mm.  This way mm_update_next_owner() doesn't need to
restart the list_for_each_entry/for_each_process loops from the very
beginning if it races with exit/exec, it can just continue.

Unlike the current code, try_to_set_owner() re-checks tsk->mm == mm before
it drops tasklist_lock, so it doesn't need get/put_task_struct().

Link: https://lkml.kernel.org/r/20240626152924.GA17933@redhat.com
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jinliang Zheng <alexjlzheng@tencent.com>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Tycho Andersen <tandersen@netflix.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
oleg-nesterov authored and akpm00 committed Jul 5, 2024
1 parent 1419ff9 commit 2a22b77
Showing 1 changed file with 27 additions and 30 deletions.
57 changes: 27 additions & 30 deletions kernel/exit.c
Original file line number Diff line number Diff line change
Expand Up @@ -439,14 +439,30 @@ static void coredump_task_exit(struct task_struct *tsk)
}

#ifdef CONFIG_MEMCG
/* drops tasklist_lock if succeeds */
static bool try_to_set_owner(struct task_struct *tsk, struct mm_struct *mm)
{
bool ret = false;

task_lock(tsk);
if (likely(tsk->mm == mm)) {
/* tsk can't pass exit_mm/exec_mmap and exit */
read_unlock(&tasklist_lock);
WRITE_ONCE(mm->owner, tsk);
lru_gen_migrate_mm(mm);
ret = true;
}
task_unlock(tsk);
return ret;
}

/*
* A task is exiting. If it owned this mm, find a new owner for the mm.
*/
void mm_update_next_owner(struct mm_struct *mm)
{
struct task_struct *c, *g, *p = current;

retry:
/*
* If the exiting or execing task is not the owner, it's
* someone else's problem.
Expand All @@ -468,16 +484,16 @@ void mm_update_next_owner(struct mm_struct *mm)
* Search in the children
*/
list_for_each_entry(c, &p->children, sibling) {
if (c->mm == mm)
goto assign_new_owner;
if (c->mm == mm && try_to_set_owner(c, mm))
goto ret;
}

/*
* Search in the siblings
*/
list_for_each_entry(c, &p->real_parent->children, sibling) {
if (c->mm == mm)
goto assign_new_owner;
if (c->mm == mm && try_to_set_owner(c, mm))
goto ret;
}

/*
Expand All @@ -489,9 +505,11 @@ void mm_update_next_owner(struct mm_struct *mm)
if (g->flags & PF_KTHREAD)
continue;
for_each_thread(g, c) {
if (c->mm == mm)
goto assign_new_owner;
if (c->mm)
struct mm_struct *c_mm = READ_ONCE(c->mm);
if (c_mm == mm) {
if (try_to_set_owner(c, mm))
goto ret;
} else if (c_mm)
break;
}
}
Expand All @@ -502,30 +520,9 @@ void mm_update_next_owner(struct mm_struct *mm)
* ptrace or page migration (get_task_mm()). Mark owner as NULL.
*/
WRITE_ONCE(mm->owner, NULL);
ret:
return;

assign_new_owner:
BUG_ON(c == p);
get_task_struct(c);
/*
* The task_lock protects c->mm from changing.
* We always want mm->owner->mm == mm
*/
task_lock(c);
/*
* Delay read_unlock() till we have the task_lock()
* to ensure that c does not slip away underneath us
*/
read_unlock(&tasklist_lock);
if (c->mm != mm) {
task_unlock(c);
put_task_struct(c);
goto retry;
}
WRITE_ONCE(mm->owner, c);
lru_gen_migrate_mm(mm);
task_unlock(c);
put_task_struct(c);
}
#endif /* CONFIG_MEMCG */

Expand Down

0 comments on commit 2a22b77

Please sign in to comment.