Skip to content

Commit f45840b

Browse files
Nick Piggintorvalds
authored andcommitted
mm: pagecache insertion fewer atomics
Setting and clearing the page locked when inserting it into swapcache / pagecache when it has no other references can use non-atomic page flags operations because no other CPU may be operating on it at this time. This saves one atomic operation when inserting a page into pagecache. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 9978ad5 commit f45840b

File tree

2 files changed

+9
-9
lines changed

2 files changed

+9
-9
lines changed

include/linux/pagemap.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -299,14 +299,14 @@ extern int __lock_page_killable(struct page *page);
299299
extern void __lock_page_nosync(struct page *page);
300300
extern void unlock_page(struct page *page);
301301

302-
static inline void set_page_locked(struct page *page)
302+
static inline void __set_page_locked(struct page *page)
303303
{
304-
set_bit(PG_locked, &page->flags);
304+
__set_bit(PG_locked, &page->flags);
305305
}
306306

307-
static inline void clear_page_locked(struct page *page)
307+
static inline void __clear_page_locked(struct page *page)
308308
{
309-
clear_bit(PG_locked, &page->flags);
309+
__clear_bit(PG_locked, &page->flags);
310310
}
311311

312312
static inline int trylock_page(struct page *page)
@@ -438,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page);
438438

439439
/*
440440
* Like add_to_page_cache_locked, but used to add newly allocated pages:
441-
* the page is new, so we can just run set_page_locked() against it.
441+
* the page is new, so we can just run __set_page_locked() against it.
442442
*/
443443
static inline int add_to_page_cache(struct page *page,
444444
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
445445
{
446446
int error;
447447

448-
set_page_locked(page);
448+
__set_page_locked(page);
449449
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
450450
if (unlikely(error))
451-
clear_page_locked(page);
451+
__clear_page_locked(page);
452452
return error;
453453
}
454454

mm/swap_state.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
303303
* re-using the just freed swap entry for an existing page.
304304
* May fail (-ENOMEM) if radix-tree node allocation failed.
305305
*/
306-
set_page_locked(new_page);
306+
__set_page_locked(new_page);
307307
SetPageSwapBacked(new_page);
308308
err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
309309
if (likely(!err)) {
@@ -315,7 +315,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
315315
return new_page;
316316
}
317317
ClearPageSwapBacked(new_page);
318-
clear_page_locked(new_page);
318+
__clear_page_locked(new_page);
319319
swap_free(entry);
320320
} while (err != -ENOMEM);
321321

0 commit comments

Comments
 (0)