Skip to content

Commit

Permalink
mm: prevent get_user_pages() from overflowing page refcount
Browse files Browse the repository at this point in the history
commit 8fde12c upstream.

If the page refcount wraps around past zero, it will be freed while
there are still four billion references to it.  One of the possible
avenues for an attacker to try to make this happen is by doing direct IO
on a page multiple times.  This patch makes get_user_pages() refuse to
take a new page reference if there are already more than two billion
references to the page.

Reported-by: Jann Horn <jannh@google.com>
Acked-by: Matthew Wilcox <willy@infradead.org>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
torvalds authored and gregkh committed May 4, 2019
1 parent 0612cae commit d972ebb
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 11 deletions.
45 changes: 34 additions & 11 deletions mm/gup.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,10 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
}

if (flags & FOLL_GET) {
get_page(page);
if (unlikely(!try_get_page(page))) {
page = ERR_PTR(-ENOMEM);
goto out;
}

/* drop the pgmap reference now that we hold the page */
if (pgmap) {
Expand Down Expand Up @@ -296,7 +299,10 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (pmd_trans_unstable(pmd))
ret = -EBUSY;
} else {
get_page(page);
if (unlikely(!try_get_page(page))) {
spin_unlock(ptl);
return ERR_PTR(-ENOMEM);
}
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
Expand Down Expand Up @@ -480,7 +486,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
if (is_device_public_page(*page))
goto unmap;
}
get_page(*page);
if (unlikely(!try_get_page(*page))) {
ret = -ENOMEM;
goto unmap;
}
out:
ret = 0;
unmap:
Expand Down Expand Up @@ -1368,6 +1377,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
}
}

/*
* Return the compund head page with ref appropriately incremented,
* or NULL if that failed.
*/
static inline struct page *try_get_compound_head(struct page *page, int refs)
{
struct page *head = compound_head(page);
if (WARN_ON_ONCE(page_ref_count(head) < 0))
return NULL;
if (unlikely(!page_cache_add_speculative(head, refs)))
return NULL;
return head;
}

#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
Expand Down Expand Up @@ -1402,9 +1425,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,

VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
head = compound_head(page);

if (!page_cache_get_speculative(head))
head = try_get_compound_head(page, 1);
if (!head)
goto pte_unmap;

if (unlikely(pte_val(pte) != pte_val(*ptep))) {
Expand Down Expand Up @@ -1543,8 +1566,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);

head = compound_head(pmd_page(orig));
if (!page_cache_add_speculative(head, refs)) {
head = try_get_compound_head(pmd_page(orig), refs);
if (!head) {
*nr -= refs;
return 0;
}
Expand Down Expand Up @@ -1581,8 +1604,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);

head = compound_head(pud_page(orig));
if (!page_cache_add_speculative(head, refs)) {
head = try_get_compound_head(pud_page(orig), refs);
if (!head) {
*nr -= refs;
return 0;
}
Expand Down Expand Up @@ -1618,8 +1641,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);

head = compound_head(pgd_page(orig));
if (!page_cache_add_speculative(head, refs)) {
head = try_get_compound_head(pgd_page(orig), refs);
if (!head) {
*nr -= refs;
return 0;
}
Expand Down
13 changes: 13 additions & 0 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -4299,6 +4299,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,

pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
page = pte_page(huge_ptep_get(pte));

/*
* Instead of doing 'try_get_page()' below in the same_page
* loop, just check the count once here.
*/
if (unlikely(page_count(page) <= 0)) {
if (pages) {
spin_unlock(ptl);
remainder = 0;
err = -ENOMEM;
break;
}
}
same_page:
if (pages) {
pages[i] = mem_map_offset(page, pfn_offset);
Expand Down

0 comments on commit d972ebb

Please sign in to comment.