Skip to content

Commit 59d9094

Browse files
Liu Shixinakpm00
Liu Shixin
authored andcommitted
mm: hugetlb: independent PMD page table shared count
The folio refcount may be increased unexpectly through try_get_folio() by caller such as split_huge_pages. In huge_pmd_unshare(), we use refcount to check whether a pmd page table is shared. The check is incorrect if the refcount is increased by the above caller, and this can cause the page table leaked: BUG: Bad page state in process sh pfn:109324 page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x66 pfn:0x109324 flags: 0x17ffff800000000(node=0|zone=2|lastcpupid=0xfffff) page_type: f2(table) raw: 017ffff800000000 0000000000000000 0000000000000000 0000000000000000 raw: 0000000000000066 0000000000000000 00000000f2000000 0000000000000000 page dumped because: nonzero mapcount ... CPU: 31 UID: 0 PID: 7515 Comm: sh Kdump: loaded Tainted: G B 6.13.0-rc2master+ torvalds#7 Tainted: [B]=BAD_PAGE Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 Call trace: show_stack+0x20/0x38 (C) dump_stack_lvl+0x80/0xf8 dump_stack+0x18/0x28 bad_page+0x8c/0x130 free_page_is_bad_report+0xa4/0xb0 free_unref_page+0x3cc/0x620 __folio_put+0xf4/0x158 split_huge_pages_all+0x1e0/0x3e8 split_huge_pages_write+0x25c/0x2d8 full_proxy_write+0x64/0xd8 vfs_write+0xcc/0x280 ksys_write+0x70/0x110 __arm64_sys_write+0x24/0x38 invoke_syscall+0x50/0x120 el0_svc_common.constprop.0+0xc8/0xf0 do_el0_svc+0x24/0x38 el0_svc+0x34/0x128 el0t_64_sync_handler+0xc8/0xd0 el0t_64_sync+0x190/0x198 The issue may be triggered by damon, offline_page, page_idle, etc, which will increase the refcount of page table. 1. The page table itself will be discarded after reporting the "nonzero mapcount". 2. The HugeTLB page mapped by the page table miss freeing since we treat the page table as shared and a shared page table will not be unmapped. Fix it by introducing independent PMD page table shared count. As described by comment, pt_index/pt_mm/pt_frag_refcount are used for s390 gmap, x86 pgds and powerpc, pt_share_count is used for x86/arm64/riscv pmds, so we can reuse the field as pt_share_count. Link: https://lkml.kernel.org/r/20241216071147.3984217-1-liushixin2@huawei.com Fixes: 39dde65 ("[PATCH] shared page table for hugetlb page") Signed-off-by: Liu Shixin <liushixin2@huawei.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Ken Chen <kenneth.w.chen@intel.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Nanyong Sun <sunnanyong@huawei.com> Cc: Jane Chu <jane.chu@oracle.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 1fd8bc7 commit 59d9094

File tree

3 files changed

+38
-9
lines changed

3 files changed

+38
-9
lines changed

include/linux/mm.h

+1
Original file line numberDiff line numberDiff line change
@@ -3125,6 +3125,7 @@ static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
31253125
if (!pmd_ptlock_init(ptdesc))
31263126
return false;
31273127
__folio_set_pgtable(folio);
3128+
ptdesc_pmd_pts_init(ptdesc);
31283129
lruvec_stat_add_folio(folio, NR_PAGETABLE);
31293130
return true;
31303131
}

include/linux/mm_types.h

+30
Original file line numberDiff line numberDiff line change
@@ -445,6 +445,7 @@ FOLIO_MATCH(compound_head, _head_2a);
445445
* @pt_index: Used for s390 gmap.
446446
* @pt_mm: Used for x86 pgds.
447447
* @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
448+
* @pt_share_count: Used for HugeTLB PMD page table share count.
448449
* @_pt_pad_2: Padding to ensure proper alignment.
449450
* @ptl: Lock for the page table.
450451
* @__page_type: Same as page->page_type. Unused for page tables.
@@ -471,6 +472,9 @@ struct ptdesc {
471472
pgoff_t pt_index;
472473
struct mm_struct *pt_mm;
473474
atomic_t pt_frag_refcount;
475+
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
476+
atomic_t pt_share_count;
477+
#endif
474478
};
475479

476480
union {
@@ -516,6 +520,32 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
516520
const struct page *: (const struct ptdesc *)(p), \
517521
struct page *: (struct ptdesc *)(p)))
518522

523+
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
524+
static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
525+
{
526+
atomic_set(&ptdesc->pt_share_count, 0);
527+
}
528+
529+
static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc)
530+
{
531+
atomic_inc(&ptdesc->pt_share_count);
532+
}
533+
534+
static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
535+
{
536+
atomic_dec(&ptdesc->pt_share_count);
537+
}
538+
539+
static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc)
540+
{
541+
return atomic_read(&ptdesc->pt_share_count);
542+
}
543+
#else
544+
static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
545+
{
546+
}
547+
#endif
548+
519549
/*
520550
* Used for sizing the vmemmap region on some architectures
521551
*/

mm/hugetlb.c

+7-9
Original file line numberDiff line numberDiff line change
@@ -7211,7 +7211,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
72117211
spte = hugetlb_walk(svma, saddr,
72127212
vma_mmu_pagesize(svma));
72137213
if (spte) {
7214-
get_page(virt_to_page(spte));
7214+
ptdesc_pmd_pts_inc(virt_to_ptdesc(spte));
72157215
break;
72167216
}
72177217
}
@@ -7226,7 +7226,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
72267226
(pmd_t *)((unsigned long)spte & PAGE_MASK));
72277227
mm_inc_nr_pmds(mm);
72287228
} else {
7229-
put_page(virt_to_page(spte));
7229+
ptdesc_pmd_pts_dec(virt_to_ptdesc(spte));
72307230
}
72317231
spin_unlock(&mm->page_table_lock);
72327232
out:
@@ -7238,10 +7238,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
72387238
/*
72397239
* unmap huge page backed by shared pte.
72407240
*
7241-
* Hugetlb pte page is ref counted at the time of mapping. If pte is shared
7242-
* indicated by page_count > 1, unmap is achieved by clearing pud and
7243-
* decrementing the ref count. If count == 1, the pte page is not shared.
7244-
*
72457241
* Called with page table lock held.
72467242
*
72477243
* returns: 1 successfully unmapped a shared pte page
@@ -7250,18 +7246,20 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
72507246
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
72517247
unsigned long addr, pte_t *ptep)
72527248
{
7249+
unsigned long sz = huge_page_size(hstate_vma(vma));
72537250
pgd_t *pgd = pgd_offset(mm, addr);
72547251
p4d_t *p4d = p4d_offset(pgd, addr);
72557252
pud_t *pud = pud_offset(p4d, addr);
72567253

72577254
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
72587255
hugetlb_vma_assert_locked(vma);
7259-
BUG_ON(page_count(virt_to_page(ptep)) == 0);
7260-
if (page_count(virt_to_page(ptep)) == 1)
7256+
if (sz != PMD_SIZE)
7257+
return 0;
7258+
if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep)))
72617259
return 0;
72627260

72637261
pud_clear(pud);
7264-
put_page(virt_to_page(ptep));
7262+
ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep));
72657263
mm_dec_nr_pmds(mm);
72667264
return 1;
72677265
}

0 commit comments

Comments
 (0)