Skip to content

Commit 8b88176

Browse files
committed
mm/migrate: make isolate_movable_page() skip slab pages
In the next commit we want to rearrange struct slab fields to allow a larger rcu_head. Afterwards, the page->mapping field will overlap with SLUB's "struct list_head slab_list", where the value of prev pointer can become LIST_POISON2, which is 0x122 + POISON_POINTER_DELTA. Unfortunately the bit 1 being set can confuse PageMovable() to be a false positive and cause a GPF as reported by lkp [1]. To fix this, make isolate_movable_page() skip pages with the PageSlab flag set. This is a bit tricky as we need to add memory barriers to SLAB and SLUB's page allocation and freeing, and their counterparts to isolate_movable_page(). Based on my RFC from [2]. Added a comment update from Matthew's variant in [3] and, as done there, moved the PageSlab checks to happen before trying to take the page lock. [1] https://lore.kernel.org/all/208c1757-5edd-fd42-67d4-1940cc43b50f@intel.com/ [2] https://lore.kernel.org/all/aec59f53-0e53-1736-5932-25407125d4d4@suse.cz/ [3] https://lore.kernel.org/all/YzsVM8eToHUeTP75@casper.infradead.org/ Reported-by: kernel test robot <yujie.liu@intel.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
1 parent bc29d5b commit 8b88176

File tree

3 files changed

+22
-5
lines changed

3 files changed

+22
-5
lines changed

mm/migrate.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,13 +74,22 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
7474
if (unlikely(!get_page_unless_zero(page)))
7575
goto out;
7676

77+
if (unlikely(PageSlab(page)))
78+
goto out_putpage;
79+
/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
80+
smp_rmb();
7781
/*
78-
* Check PageMovable before holding a PG_lock because page's owner
79-
* assumes anybody doesn't touch PG_lock of newly allocated page
80-
* so unconditionally grabbing the lock ruins page's owner side.
82+
* Check movable flag before taking the page lock because
83+
* we use non-atomic bitops on newly allocated page flags so
84+
* unconditionally grabbing the lock ruins page's owner side.
8185
*/
8286
if (unlikely(!__PageMovable(page)))
8387
goto out_putpage;
88+
/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
89+
smp_rmb();
90+
if (unlikely(PageSlab(page)))
91+
goto out_putpage;
92+
8493
/*
8594
* As movable pages are not isolated from LRU lists, concurrent
8695
* compaction threads can race against page migration functions

mm/slab.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1370,6 +1370,8 @@ static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
13701370

13711371
account_slab(slab, cachep->gfporder, cachep, flags);
13721372
__folio_set_slab(folio);
1373+
/* Make the flag visible before any changes to folio->mapping */
1374+
smp_wmb();
13731375
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
13741376
if (sk_memalloc_socks() && page_is_pfmemalloc(folio_page(folio, 0)))
13751377
slab_set_pfmemalloc(slab);
@@ -1387,9 +1389,11 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
13871389

13881390
BUG_ON(!folio_test_slab(folio));
13891391
__slab_clear_pfmemalloc(slab);
1390-
__folio_clear_slab(folio);
13911392
page_mapcount_reset(folio_page(folio, 0));
13921393
folio->mapping = NULL;
1394+
/* Make the mapping reset visible before clearing the flag */
1395+
smp_wmb();
1396+
__folio_clear_slab(folio);
13931397

13941398
if (current->reclaim_state)
13951399
current->reclaim_state->reclaimed_slab += 1 << order;

mm/slub.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1800,6 +1800,8 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
18001800

18011801
slab = folio_slab(folio);
18021802
__folio_set_slab(folio);
1803+
/* Make the flag visible before any changes to folio->mapping */
1804+
smp_wmb();
18031805
if (page_is_pfmemalloc(folio_page(folio, 0)))
18041806
slab_set_pfmemalloc(slab);
18051807

@@ -2000,8 +2002,10 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
20002002
int pages = 1 << order;
20012003

20022004
__slab_clear_pfmemalloc(slab);
2003-
__folio_clear_slab(folio);
20042005
folio->mapping = NULL;
2006+
/* Make the mapping reset visible before clearing the flag */
2007+
smp_wmb();
2008+
__folio_clear_slab(folio);
20052009
if (current->reclaim_state)
20062010
current->reclaim_state->reclaimed_slab += pages;
20072011
unaccount_slab(slab, order, s);

0 commit comments

Comments
 (0)