Skip to content

Commit cb9f753

Browse files
yhuang-inteltorvalds
authored andcommitted
mm: fix races between swapoff and flush dcache
Thanks to commit 4b3ef9d ("mm/swap: split swap cache into 64MB trunks"), after swapoff the address_space associated with the swap device will be freed. So page_mapping() users which may touch the address_space need some kind of mechanism to prevent the address_space from being freed during accessing. The dcache flushing functions (flush_dcache_page(), etc) in architecture specific code may access the address_space of swap device for anonymous pages in swap cache via page_mapping() function. But in some cases there are no mechanisms to prevent the swap device from being swapoff, for example, CPU1 CPU2 __get_user_pages() swapoff() flush_dcache_page() mapping = page_mapping() ... exit_swap_address_space() ... kvfree(spaces) mapping_mapped(mapping) The address space may be accessed after being freed. But from cachetlb.txt and Russell King, flush_dcache_page() only care about file cache pages, for anonymous pages, flush_anon_page() should be used. The implementation of flush_dcache_page() in all architectures follows this too. They will check whether page_mapping() is NULL and whether mapping_mapped() is true to determine whether to flush the dcache immediately. And they will use interval tree (mapping->i_mmap) to find all user space mappings. While mapping_mapped() and mapping->i_mmap isn't used by anonymous pages in swap cache at all. So, to fix the race between swapoff and flush dcache, __page_mapping() is add to return the address_space for file cache pages and NULL otherwise. All page_mapping() invoking in flush dcache functions are replaced with page_mapping_file(). [akpm@linux-foundation.org: simplify page_mapping_file(), per Mike] Link: http://lkml.kernel.org/r/20180305083634.15174-1-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Chen Liqin <liqin.linux@gmail.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Zankel <chris@zankel.net> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Ley Foon Tan <lftan@altera.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Andi Kleen <ak@linux.intel.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 1c0ff0f commit cb9f753

File tree

19 files changed

+38
-26
lines changed

19 files changed

+38
-26
lines changed

arch/arc/mm/cache.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -833,7 +833,7 @@ void flush_dcache_page(struct page *page)
833833
}
834834

835835
/* don't handle anon pages here */
836-
mapping = page_mapping(page);
836+
mapping = page_mapping_file(page);
837837
if (!mapping)
838838
return;
839839

arch/arm/mm/copypage-v4mc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
7070
void *kto = kmap_atomic(to);
7171

7272
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
73-
__flush_dcache_page(page_mapping(from), from);
73+
__flush_dcache_page(page_mapping_file(from), from);
7474

7575
raw_spin_lock(&minicache_lock);
7676

arch/arm/mm/copypage-v6.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
7676
unsigned long kfrom, kto;
7777

7878
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
79-
__flush_dcache_page(page_mapping(from), from);
79+
__flush_dcache_page(page_mapping_file(from), from);
8080

8181
/* FIXME: not highmem safe */
8282
discard_old_kernel_data(page_address(to));

arch/arm/mm/copypage-xscale.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
9090
void *kto = kmap_atomic(to);
9191

9292
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
93-
__flush_dcache_page(page_mapping(from), from);
93+
__flush_dcache_page(page_mapping_file(from), from);
9494

9595
raw_spin_lock(&minicache_lock);
9696

arch/arm/mm/fault-armv.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
195195
if (page == ZERO_PAGE(0))
196196
return;
197197

198-
mapping = page_mapping(page);
198+
mapping = page_mapping_file(page);
199199
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
200200
__flush_dcache_page(mapping, page);
201201
if (mapping) {

arch/arm/mm/flush.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ void __sync_icache_dcache(pte_t pteval)
285285

286286
page = pfn_to_page(pfn);
287287
if (cache_is_vipt_aliasing())
288-
mapping = page_mapping(page);
288+
mapping = page_mapping_file(page);
289289
else
290290
mapping = NULL;
291291

@@ -333,7 +333,7 @@ void flush_dcache_page(struct page *page)
333333
return;
334334
}
335335

336-
mapping = page_mapping(page);
336+
mapping = page_mapping_file(page);
337337

338338
if (!cache_ops_need_broadcast() &&
339339
mapping && !page_mapcount(page))
@@ -363,7 +363,7 @@ void flush_kernel_dcache_page(struct page *page)
363363
if (cache_is_vivt() || cache_is_vipt_aliasing()) {
364364
struct address_space *mapping;
365365

366-
mapping = page_mapping(page);
366+
mapping = page_mapping_file(page);
367367

368368
if (!mapping || mapping_mapped(mapping)) {
369369
void *addr;

arch/mips/mm/cache.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
8686

8787
void __flush_dcache_page(struct page *page)
8888
{
89-
struct address_space *mapping = page_mapping(page);
89+
struct address_space *mapping = page_mapping_file(page);
9090
unsigned long addr;
9191

9292
if (mapping && !mapping_mapped(mapping)) {

arch/nios2/mm/cacheflush.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ void flush_dcache_page(struct page *page)
180180
if (page == ZERO_PAGE(0))
181181
return;
182182

183-
mapping = page_mapping(page);
183+
mapping = page_mapping_file(page);
184184

185185
/* Flush this page if there are aliases. */
186186
if (mapping && !mapping_mapped(mapping)) {
@@ -215,7 +215,7 @@ void update_mmu_cache(struct vm_area_struct *vma,
215215
if (page == ZERO_PAGE(0))
216216
return;
217217

218-
mapping = page_mapping(page);
218+
mapping = page_mapping_file(page);
219219
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
220220
__flush_dcache_page(mapping, page);
221221

arch/parisc/kernel/cache.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
8888
return;
8989

9090
page = pfn_to_page(pfn);
91-
if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91+
if (page_mapping_file(page) &&
92+
test_bit(PG_dcache_dirty, &page->flags)) {
9293
flush_kernel_dcache_page_addr(pfn_va(pfn));
9394
clear_bit(PG_dcache_dirty, &page->flags);
9495
} else if (parisc_requires_coherency())
@@ -304,7 +305,7 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
304305

305306
void flush_dcache_page(struct page *page)
306307
{
307-
struct address_space *mapping = page_mapping(page);
308+
struct address_space *mapping = page_mapping_file(page);
308309
struct vm_area_struct *mpnt;
309310
unsigned long offset;
310311
unsigned long addr, old_addr = 0;

arch/sh/mm/cache-sh4.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ static void sh4_flush_dcache_page(void *arg)
112112
struct page *page = arg;
113113
unsigned long addr = (unsigned long)page_address(page);
114114
#ifndef CONFIG_SMP
115-
struct address_space *mapping = page_mapping(page);
115+
struct address_space *mapping = page_mapping_file(page);
116116

117117
if (mapping && !mapping_mapped(mapping))
118118
clear_bit(PG_dcache_clean, &page->flags);

arch/sh/mm/cache-sh7705.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ static void __flush_dcache_page(unsigned long phys)
136136
static void sh7705_flush_dcache_page(void *arg)
137137
{
138138
struct page *page = arg;
139-
struct address_space *mapping = page_mapping(page);
139+
struct address_space *mapping = page_mapping_file(page);
140140

141141
if (mapping && !mapping_mapped(mapping))
142142
clear_bit(PG_dcache_clean, &page->flags);

arch/sparc/kernel/smp_64.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -929,9 +929,9 @@ static inline void __local_flush_dcache_page(struct page *page)
929929
#ifdef DCACHE_ALIASING_POSSIBLE
930930
__flush_dcache_page(page_address(page),
931931
((tlb_type == spitfire) &&
932-
page_mapping(page) != NULL));
932+
page_mapping_file(page) != NULL));
933933
#else
934-
if (page_mapping(page) != NULL &&
934+
if (page_mapping_file(page) != NULL &&
935935
tlb_type == spitfire)
936936
__flush_icache_page(__pa(page_address(page)));
937937
#endif
@@ -958,7 +958,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
958958

959959
if (tlb_type == spitfire) {
960960
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
961-
if (page_mapping(page) != NULL)
961+
if (page_mapping_file(page) != NULL)
962962
data0 |= ((u64)1 << 32);
963963
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
964964
#ifdef DCACHE_ALIASING_POSSIBLE
@@ -994,7 +994,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
994994
pg_addr = page_address(page);
995995
if (tlb_type == spitfire) {
996996
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
997-
if (page_mapping(page) != NULL)
997+
if (page_mapping_file(page) != NULL)
998998
data0 |= ((u64)1 << 32);
999999
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
10001000
#ifdef DCACHE_ALIASING_POSSIBLE

arch/sparc/mm/init_64.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -206,9 +206,9 @@ inline void flush_dcache_page_impl(struct page *page)
206206
#ifdef DCACHE_ALIASING_POSSIBLE
207207
__flush_dcache_page(page_address(page),
208208
((tlb_type == spitfire) &&
209-
page_mapping(page) != NULL));
209+
page_mapping_file(page) != NULL));
210210
#else
211-
if (page_mapping(page) != NULL &&
211+
if (page_mapping_file(page) != NULL &&
212212
tlb_type == spitfire)
213213
__flush_icache_page(__pa(page_address(page)));
214214
#endif
@@ -490,7 +490,7 @@ void flush_dcache_page(struct page *page)
490490

491491
this_cpu = get_cpu();
492492

493-
mapping = page_mapping(page);
493+
mapping = page_mapping_file(page);
494494
if (mapping && !mapping_mapped(mapping)) {
495495
int dirty = test_bit(PG_dcache_dirty, &page->flags);
496496
if (dirty) {

arch/sparc/mm/tlb.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
128128
goto no_cache_flush;
129129

130130
/* A real file page? */
131-
mapping = page_mapping(page);
131+
mapping = page_mapping_file(page);
132132
if (!mapping)
133133
goto no_cache_flush;
134134

arch/unicore32/mm/flush.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ void flush_dcache_page(struct page *page)
8383
if (page == ZERO_PAGE(0))
8484
return;
8585

86-
mapping = page_mapping(page);
86+
mapping = page_mapping_file(page);
8787

8888
if (mapping && !mapping_mapped(mapping))
8989
clear_bit(PG_dcache_clean, &page->flags);

arch/unicore32/mm/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -503,7 +503,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
503503
if (page == ZERO_PAGE(0))
504504
return;
505505

506-
mapping = page_mapping(page);
506+
mapping = page_mapping_file(page);
507507
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
508508
__flush_dcache_page(mapping, page);
509509
if (mapping)

arch/xtensa/mm/cache.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ EXPORT_SYMBOL(copy_user_highpage);
127127

128128
void flush_dcache_page(struct page *page)
129129
{
130-
struct address_space *mapping = page_mapping(page);
130+
struct address_space *mapping = page_mapping_file(page);
131131

132132
/*
133133
* If we have a mapping but the page is not mapped to user-space

include/linux/mm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1155,6 +1155,7 @@ static inline pgoff_t page_index(struct page *page)
11551155

11561156
bool page_mapped(struct page *page);
11571157
struct address_space *page_mapping(struct page *page);
1158+
struct address_space *page_mapping_file(struct page *page);
11581159

11591160
/*
11601161
* Return true only if the page has been allocated with

mm/util.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -515,6 +515,16 @@ struct address_space *page_mapping(struct page *page)
515515
}
516516
EXPORT_SYMBOL(page_mapping);
517517

518+
/*
519+
* For file cache pages, return the address_space, otherwise return NULL
520+
*/
521+
struct address_space *page_mapping_file(struct page *page)
522+
{
523+
if (unlikely(PageSwapCache(page)))
524+
return NULL;
525+
return page_mapping(page);
526+
}
527+
518528
/* Slow path of page_mapcount() for compound pages */
519529
int __page_mapcount(struct page *page)
520530
{

0 commit comments

Comments
 (0)