Skip to content

Commit 2bcf887

Browse files
Hugh Dickinstorvalds
authored andcommitted
mm: take pagevecs off reclaim stack
Replace pagevecs in putback_lru_pages() and move_active_pages_to_lru() by lists of pages_to_free: then apply Konstantin Khlebnikov's free_hot_cold_page_list() to them instead of pagevec_release(). Which simplifies the flow (no need to drop and retake lock whenever pagevec fills up) and reduces stale addresses in stack backtraces (which often showed through the pagevecs); but more importantly, removes another 120 bytes from the deepest stacks in page reclaim. Although I've not recently seen an actual stack overflow here with a vanilla kernel, move_active_pages_to_lru() has often featured in deep backtraces. However, free_hot_cold_page_list() does not handle compound pages (nor need it: a Transparent HugePage would have been split by the time it reaches the call in shrink_page_list()), but it is possible for putback_lru_pages() or move_active_pages_to_lru() to be left holding the last reference on a THP, so must exclude the unlikely compound case before putting on pages_to_free. Remove pagevec_strip(), its work now done in move_active_pages_to_lru(). The pagevec in scan_mapping_unevictable_pages() remains in mm/vmscan.c, but that is never on the reclaim path, and cannot be replaced by a list. Signed-off-by: Hugh Dickins <hughd@google.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 90b3fea commit 2bcf887

File tree

3 files changed

+40
-39
lines changed

3 files changed

+40
-39
lines changed

include/linux/pagevec.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ struct pagevec {
2222

2323
void __pagevec_release(struct pagevec *pvec);
2424
void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
25-
void pagevec_strip(struct pagevec *pvec);
2625
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
2726
pgoff_t start, unsigned nr_pages);
2827
unsigned pagevec_lookup_tag(struct pagevec *pvec,
@@ -59,7 +58,6 @@ static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page)
5958
return pagevec_space(pvec);
6059
}
6160

62-
6361
static inline void pagevec_release(struct pagevec *pvec)
6462
{
6563
if (pagevec_count(pvec))

mm/swap.c

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
#include <linux/init.h>
2424
#include <linux/export.h>
2525
#include <linux/mm_inline.h>
26-
#include <linux/buffer_head.h> /* for try_to_release_page() */
2726
#include <linux/percpu_counter.h>
2827
#include <linux/percpu.h>
2928
#include <linux/cpu.h>
@@ -730,24 +729,6 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
730729

731730
EXPORT_SYMBOL(____pagevec_lru_add);
732731

733-
/*
734-
* Try to drop buffers from the pages in a pagevec
735-
*/
736-
void pagevec_strip(struct pagevec *pvec)
737-
{
738-
int i;
739-
740-
for (i = 0; i < pagevec_count(pvec); i++) {
741-
struct page *page = pvec->pages[i];
742-
743-
if (page_has_private(page) && trylock_page(page)) {
744-
if (page_has_private(page))
745-
try_to_release_page(page, 0);
746-
unlock_page(page);
747-
}
748-
}
749-
}
750-
751732
/**
752733
* pagevec_lookup - gang pagecache lookup
753734
* @pvec: Where the resulting pages are placed

mm/vmscan.c

Lines changed: 40 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1398,12 +1398,10 @@ putback_lru_pages(struct mem_cgroup_zone *mz, struct scan_control *sc,
13981398
struct list_head *page_list)
13991399
{
14001400
struct page *page;
1401-
struct pagevec pvec;
1401+
LIST_HEAD(pages_to_free);
14021402
struct zone *zone = mz->zone;
14031403
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
14041404

1405-
pagevec_init(&pvec, 1);
1406-
14071405
/*
14081406
* Put back any unfreeable pages.
14091407
*/
@@ -1427,17 +1425,24 @@ putback_lru_pages(struct mem_cgroup_zone *mz, struct scan_control *sc,
14271425
int numpages = hpage_nr_pages(page);
14281426
reclaim_stat->recent_rotated[file] += numpages;
14291427
}
1430-
if (!pagevec_add(&pvec, page)) {
1431-
spin_unlock_irq(&zone->lru_lock);
1432-
__pagevec_release(&pvec);
1433-
spin_lock_irq(&zone->lru_lock);
1428+
if (put_page_testzero(page)) {
1429+
__ClearPageLRU(page);
1430+
__ClearPageActive(page);
1431+
del_page_from_lru_list(zone, page, lru);
1432+
1433+
if (unlikely(PageCompound(page))) {
1434+
spin_unlock_irq(&zone->lru_lock);
1435+
(*get_compound_page_dtor(page))(page);
1436+
spin_lock_irq(&zone->lru_lock);
1437+
} else
1438+
list_add(&page->lru, &pages_to_free);
14341439
}
14351440
}
14361441
__mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
14371442
__mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
14381443

14391444
spin_unlock_irq(&zone->lru_lock);
1440-
pagevec_release(&pvec);
1445+
free_hot_cold_page_list(&pages_to_free, 1);
14411446
}
14421447

14431448
static noinline_for_stack void
@@ -1647,13 +1652,23 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
16471652

16481653
static void move_active_pages_to_lru(struct zone *zone,
16491654
struct list_head *list,
1655+
struct list_head *pages_to_free,
16501656
enum lru_list lru)
16511657
{
16521658
unsigned long pgmoved = 0;
1653-
struct pagevec pvec;
16541659
struct page *page;
16551660

1656-
pagevec_init(&pvec, 1);
1661+
if (buffer_heads_over_limit) {
1662+
spin_unlock_irq(&zone->lru_lock);
1663+
list_for_each_entry(page, list, lru) {
1664+
if (page_has_private(page) && trylock_page(page)) {
1665+
if (page_has_private(page))
1666+
try_to_release_page(page, 0);
1667+
unlock_page(page);
1668+
}
1669+
}
1670+
spin_lock_irq(&zone->lru_lock);
1671+
}
16571672

16581673
while (!list_empty(list)) {
16591674
struct lruvec *lruvec;
@@ -1667,12 +1682,17 @@ static void move_active_pages_to_lru(struct zone *zone,
16671682
list_move(&page->lru, &lruvec->lists[lru]);
16681683
pgmoved += hpage_nr_pages(page);
16691684

1670-
if (!pagevec_add(&pvec, page) || list_empty(list)) {
1671-
spin_unlock_irq(&zone->lru_lock);
1672-
if (buffer_heads_over_limit)
1673-
pagevec_strip(&pvec);
1674-
__pagevec_release(&pvec);
1675-
spin_lock_irq(&zone->lru_lock);
1685+
if (put_page_testzero(page)) {
1686+
__ClearPageLRU(page);
1687+
__ClearPageActive(page);
1688+
del_page_from_lru_list(zone, page, lru);
1689+
1690+
if (unlikely(PageCompound(page))) {
1691+
spin_unlock_irq(&zone->lru_lock);
1692+
(*get_compound_page_dtor(page))(page);
1693+
spin_lock_irq(&zone->lru_lock);
1694+
} else
1695+
list_add(&page->lru, pages_to_free);
16761696
}
16771697
}
16781698
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
@@ -1766,12 +1786,14 @@ static void shrink_active_list(unsigned long nr_pages,
17661786
*/
17671787
reclaim_stat->recent_rotated[file] += nr_rotated;
17681788

1769-
move_active_pages_to_lru(zone, &l_active,
1789+
move_active_pages_to_lru(zone, &l_active, &l_hold,
17701790
LRU_ACTIVE + file * LRU_FILE);
1771-
move_active_pages_to_lru(zone, &l_inactive,
1791+
move_active_pages_to_lru(zone, &l_inactive, &l_hold,
17721792
LRU_BASE + file * LRU_FILE);
17731793
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
17741794
spin_unlock_irq(&zone->lru_lock);
1795+
1796+
free_hot_cold_page_list(&l_hold, 1);
17751797
}
17761798

17771799
#ifdef CONFIG_SWAP

0 commit comments

Comments
 (0)