Skip to content

Commit 2bcdd49

Browse files
Naoya Horiguchigregkh
authored andcommitted
hugetlb: fix copy_hugetlb_page_range() to handle migration/hwpoisoned entry
commit 4a705fe upstream. There's a race between fork() and hugepage migration, as a result we try to "dereference" a swap entry as a normal pte, causing kernel panic. The cause of the problem is that copy_hugetlb_page_range() can't handle "swap entry" family (migration entry and hwpoisoned entry) so let's fix it. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: <stable@vger.kernel.org> [2.6.37+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 08ccce4 commit 2bcdd49

File tree

1 file changed

+43
-28
lines changed

1 file changed

+43
-28
lines changed

mm/hugetlb.c

Lines changed: 43 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -2276,6 +2276,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
22762276
update_mmu_cache(vma, address, ptep);
22772277
}
22782278

2279+
static int is_hugetlb_entry_migration(pte_t pte)
2280+
{
2281+
swp_entry_t swp;
2282+
2283+
if (huge_pte_none(pte) || pte_present(pte))
2284+
return 0;
2285+
swp = pte_to_swp_entry(pte);
2286+
if (non_swap_entry(swp) && is_migration_entry(swp))
2287+
return 1;
2288+
else
2289+
return 0;
2290+
}
2291+
2292+
static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2293+
{
2294+
swp_entry_t swp;
2295+
2296+
if (huge_pte_none(pte) || pte_present(pte))
2297+
return 0;
2298+
swp = pte_to_swp_entry(pte);
2299+
if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2300+
return 1;
2301+
else
2302+
return 0;
2303+
}
22792304

22802305
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
22812306
struct vm_area_struct *vma)
@@ -2303,10 +2328,26 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
23032328

23042329
spin_lock(&dst->page_table_lock);
23052330
spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2306-
if (!huge_pte_none(huge_ptep_get(src_pte))) {
2331+
entry = huge_ptep_get(src_pte);
2332+
if (huge_pte_none(entry)) { /* skip none entry */
2333+
;
2334+
} else if (unlikely(is_hugetlb_entry_migration(entry) ||
2335+
is_hugetlb_entry_hwpoisoned(entry))) {
2336+
swp_entry_t swp_entry = pte_to_swp_entry(entry);
2337+
2338+
if (is_write_migration_entry(swp_entry) && cow) {
2339+
/*
2340+
* COW mappings require pages in both
2341+
* parent and child to be set to read.
2342+
*/
2343+
make_migration_entry_read(&swp_entry);
2344+
entry = swp_entry_to_pte(swp_entry);
2345+
set_huge_pte_at(src, addr, src_pte, entry);
2346+
}
2347+
set_huge_pte_at(dst, addr, dst_pte, entry);
2348+
} else {
23072349
if (cow)
23082350
huge_ptep_set_wrprotect(src, addr, src_pte);
2309-
entry = huge_ptep_get(src_pte);
23102351
ptepage = pte_page(entry);
23112352
get_page(ptepage);
23122353
page_dup_rmap(ptepage);
@@ -2321,32 +2362,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
23212362
return -ENOMEM;
23222363
}
23232364

2324-
static int is_hugetlb_entry_migration(pte_t pte)
2325-
{
2326-
swp_entry_t swp;
2327-
2328-
if (huge_pte_none(pte) || pte_present(pte))
2329-
return 0;
2330-
swp = pte_to_swp_entry(pte);
2331-
if (non_swap_entry(swp) && is_migration_entry(swp))
2332-
return 1;
2333-
else
2334-
return 0;
2335-
}
2336-
2337-
static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2338-
{
2339-
swp_entry_t swp;
2340-
2341-
if (huge_pte_none(pte) || pte_present(pte))
2342-
return 0;
2343-
swp = pte_to_swp_entry(pte);
2344-
if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2345-
return 1;
2346-
else
2347-
return 0;
2348-
}
2349-
23502365
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
23512366
unsigned long end, struct page *ref_page)
23522367
{

0 commit comments

Comments
 (0)