Skip to content

Commit

Permalink
mm: add CONFIG_DEBUG_VM_RB build option
Browse files Browse the repository at this point in the history
Add a CONFIG_DEBUG_VM_RB build option for the previously existing
DEBUG_MM_RB code.  Now that Andi Kleen modified it to avoid using
recursive algorithms, we can expose it a bit more.

Also extend this code to validate_mm() after stack expansion, and to check
that the vma's start and last pgoffs have not changed since the nodes were
inserted on the anon vma interval tree (as it is important that the nodes
be reindexed after each such update).

Signed-off-by: Michel Lespinasse <walken@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Daniel Santos <daniel.santos@pobox.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
walken-google authored and torvalds committed Oct 9, 2012
1 parent 86c2ad1 commit ed8ea81
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 11 deletions.
3 changes: 3 additions & 0 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1386,6 +1386,9 @@ struct anon_vma_chain *anon_vma_interval_tree_iter_first(
struct rb_root *root, unsigned long start, unsigned long last);
struct anon_vma_chain *anon_vma_interval_tree_iter_next(
struct anon_vma_chain *node, unsigned long start, unsigned long last);
#ifdef CONFIG_DEBUG_VM_RB
void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
#endif

#define anon_vma_interval_tree_foreach(avc, root, start, last) \
for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
Expand Down
3 changes: 3 additions & 0 deletions include/linux/rmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,9 @@ struct anon_vma_chain {
struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
struct rb_node rb; /* locked by anon_vma->mutex */
unsigned long rb_subtree_last;
#ifdef CONFIG_DEBUG_VM_RB
unsigned long cached_vma_start, cached_vma_last;
#endif
};

#ifdef CONFIG_MMU
Expand Down
9 changes: 9 additions & 0 deletions lib/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -798,6 +798,15 @@ config DEBUG_VM

If unsure, say N.

config DEBUG_VM_RB
bool "Debug VM red-black trees"
depends on DEBUG_VM
help
Enable this to turn on more extended checks in the virtual-memory
system that may impact performance.

If unsure, say N.

config DEBUG_VIRTUAL
bool "Debug VM translations"
depends on DEBUG_KERNEL && X86
Expand Down
41 changes: 40 additions & 1 deletion mm/interval_tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,4 +70,43 @@ static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
}

INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
avc_start_pgoff, avc_last_pgoff,, anon_vma_interval_tree)
avc_start_pgoff, avc_last_pgoff,
static inline, __anon_vma_interval_tree)

void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
struct rb_root *root)
{
#ifdef CONFIG_DEBUG_VM_RB
node->cached_vma_start = avc_start_pgoff(node);
node->cached_vma_last = avc_last_pgoff(node);
#endif
__anon_vma_interval_tree_insert(node, root);
}

void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
struct rb_root *root)
{
__anon_vma_interval_tree_remove(node, root);
}

struct anon_vma_chain *
anon_vma_interval_tree_iter_first(struct rb_root *root,
unsigned long first, unsigned long last)
{
return __anon_vma_interval_tree_iter_first(root, first, last);
}

struct anon_vma_chain *
anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
unsigned long first, unsigned long last)
{
return __anon_vma_interval_tree_iter_next(node, first, last);
}

#ifdef CONFIG_DEBUG_VM_RB
void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
{
WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
}
#endif
19 changes: 9 additions & 10 deletions mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,6 @@ static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end);

/*
* WARNING: the debugging will use recursive algorithms so never enable this
* unless you know what you are doing.
*/
#undef DEBUG_MM_RB

/* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
* behavior is in parens:
Expand Down Expand Up @@ -303,7 +297,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
return retval;
}

#ifdef DEBUG_MM_RB
#ifdef CONFIG_DEBUG_VM_RB
static int browse_rb(struct rb_root *root)
{
int i = 0, j;
Expand Down Expand Up @@ -337,9 +331,12 @@ void validate_mm(struct mm_struct *mm)
{
int bug = 0;
int i = 0;
struct vm_area_struct *tmp = mm->mmap;
while (tmp) {
tmp = tmp->vm_next;
struct vm_area_struct *vma = mm->mmap;
while (vma) {
struct anon_vma_chain *avc;
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_verify(avc);
vma = vma->vm_next;
i++;
}
if (i != mm->map_count)
Expand Down Expand Up @@ -1790,6 +1787,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
return error;
}
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
Expand Down Expand Up @@ -1843,6 +1841,7 @@ int expand_downwards(struct vm_area_struct *vma,
}
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
return error;
}

Expand Down

0 comments on commit ed8ea81

Please sign in to comment.