Skip to content

Commit 50a28ba

Browse files
Hugh Dickinsgregkh
authored andcommitted
mm: fix crashes from mbind() merging vmas
commit d05f0cd upstream. In v2.6.34 commit 9d8cebd ("mm: fix mbind vma merge problem") introduced vma merging to mbind(), but it should have also changed the convention of passing start vma from queue_pages_range() (formerly check_range()) to new_vma_page(): vma merging may have already freed that structure, resulting in BUG at mm/mempolicy.c:1738 and probably worse crashes. Fixes: 9d8cebd ("mm: fix mbind vma merge problem") Reported-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Tested-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 2bcdd49 commit 50a28ba

File tree

1 file changed

+24
-29
lines changed

1 file changed

+24
-29
lines changed

mm/mempolicy.c

Lines changed: 24 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -566,24 +566,24 @@ static inline int check_pgd_range(struct vm_area_struct *vma,
566566
* If pagelist != NULL then isolate pages from the LRU and
567567
* put them on the pagelist.
568568
*/
569-
static struct vm_area_struct *
569+
static int
570570
check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
571571
const nodemask_t *nodes, unsigned long flags, void *private)
572572
{
573-
int err;
574-
struct vm_area_struct *first, *vma, *prev;
573+
int err = 0;
574+
struct vm_area_struct *vma, *prev;
575575

576576

577-
first = find_vma(mm, start);
578-
if (!first)
579-
return ERR_PTR(-EFAULT);
577+
vma = find_vma(mm, start);
578+
if (!vma)
579+
return -EFAULT;
580580
prev = NULL;
581-
for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
581+
for (; vma && vma->vm_start < end; vma = vma->vm_next) {
582582
if (!(flags & MPOL_MF_DISCONTIG_OK)) {
583583
if (!vma->vm_next && vma->vm_end < end)
584-
return ERR_PTR(-EFAULT);
584+
return -EFAULT;
585585
if (prev && prev->vm_end < vma->vm_start)
586-
return ERR_PTR(-EFAULT);
586+
return -EFAULT;
587587
}
588588
if (!is_vm_hugetlb_page(vma) &&
589589
((flags & MPOL_MF_STRICT) ||
@@ -597,14 +597,12 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
597597
start = vma->vm_start;
598598
err = check_pgd_range(vma, start, endvma, nodes,
599599
flags, private);
600-
if (err) {
601-
first = ERR_PTR(err);
600+
if (err)
602601
break;
603-
}
604602
}
605603
prev = vma;
606604
}
607-
return first;
605+
return err;
608606
}
609607

610608
/*
@@ -945,16 +943,15 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
945943
{
946944
nodemask_t nmask;
947945
LIST_HEAD(pagelist);
948-
int err = 0;
949-
struct vm_area_struct *vma;
946+
int err;
950947

951948
nodes_clear(nmask);
952949
node_set(source, nmask);
953950

954-
vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
951+
err = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
955952
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
956-
if (IS_ERR(vma))
957-
return PTR_ERR(vma);
953+
if (err)
954+
return err;
958955

959956
if (!list_empty(&pagelist)) {
960957
err = migrate_pages(&pagelist, new_node_page, dest,
@@ -1058,16 +1055,17 @@ int do_migrate_pages(struct mm_struct *mm,
10581055

10591056
/*
10601057
* Allocate a new page for page migration based on vma policy.
1061-
* Start assuming that page is mapped by vma pointed to by @private.
1058+
* Start by assuming the page is mapped by the same vma as contains @start.
10621059
* Search forward from there, if not. N.B., this assumes that the
10631060
* list of pages handed to migrate_pages()--which is how we get here--
10641061
* is in virtual address order.
10651062
*/
1066-
static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1063+
static struct page *new_page(struct page *page, unsigned long start, int **x)
10671064
{
1068-
struct vm_area_struct *vma = (struct vm_area_struct *)private;
1065+
struct vm_area_struct *vma;
10691066
unsigned long uninitialized_var(address);
10701067

1068+
vma = find_vma(current->mm, start);
10711069
while (vma) {
10721070
address = page_address_in_vma(page, vma);
10731071
if (address != -EFAULT)
@@ -1093,7 +1091,7 @@ int do_migrate_pages(struct mm_struct *mm,
10931091
return -ENOSYS;
10941092
}
10951093

1096-
static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1094+
static struct page *new_page(struct page *page, unsigned long start, int **x)
10971095
{
10981096
return NULL;
10991097
}
@@ -1103,7 +1101,6 @@ static long do_mbind(unsigned long start, unsigned long len,
11031101
unsigned short mode, unsigned short mode_flags,
11041102
nodemask_t *nmask, unsigned long flags)
11051103
{
1106-
struct vm_area_struct *vma;
11071104
struct mm_struct *mm = current->mm;
11081105
struct mempolicy *new;
11091106
unsigned long end;
@@ -1167,19 +1164,17 @@ static long do_mbind(unsigned long start, unsigned long len,
11671164
if (err)
11681165
goto mpol_out;
11691166

1170-
vma = check_range(mm, start, end, nmask,
1167+
err = check_range(mm, start, end, nmask,
11711168
flags | MPOL_MF_INVERT, &pagelist);
11721169

1173-
err = PTR_ERR(vma);
1174-
if (!IS_ERR(vma)) {
1170+
if (!err) {
11751171
int nr_failed = 0;
11761172

11771173
err = mbind_range(mm, start, end, new);
11781174

11791175
if (!list_empty(&pagelist)) {
1180-
nr_failed = migrate_pages(&pagelist, new_vma_page,
1181-
(unsigned long)vma,
1182-
false, true);
1176+
nr_failed = migrate_pages(&pagelist, new_page,
1177+
start, false, true);
11831178
if (nr_failed)
11841179
putback_lru_pages(&pagelist);
11851180
}

0 commit comments

Comments
 (0)