Skip to content

Commit

Permalink
vmscan: remove isolate_pages callback scan control
Browse files Browse the repository at this point in the history
For now, we have global isolation vs.  memory control group isolation, do
not allow the reclaim entry function to set an arbitrary page isolation
callback, we do not need that flexibility.

And since we already pass around the group descriptor for the memory
control group isolation case, just use it to decide which one of the two
isolator functions to use.

The decisions can be merged into nearby branches, so no extra cost there.
In fact, we save the indirect calls.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
hnaz authored and torvalds committed May 25, 2010
1 parent 0aeb233 commit 8b25c6d
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 30 deletions.
13 changes: 7 additions & 6 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,13 @@ struct page_cgroup;
struct page;
struct mm_struct;

extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst,
unsigned long *scanned, int order,
int mode, struct zone *z,
struct mem_cgroup *mem_cont,
int active, int file);

#ifdef CONFIG_CGROUP_MEM_RES_CTLR
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
Expand Down Expand Up @@ -64,12 +71,6 @@ extern void mem_cgroup_uncharge_cache_page(struct page *page);
extern int mem_cgroup_shmem_charge_fallback(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask);

extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst,
unsigned long *scanned, int order,
int mode, struct zone *z,
struct mem_cgroup *mem_cont,
int active, int file);
extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);

Expand Down
52 changes: 28 additions & 24 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,12 +89,6 @@ struct scan_control {
* are scanned.
*/
nodemask_t *nodemask;

/* Pluggable isolate pages callback */
unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
unsigned long *scanned, int order, int mode,
struct zone *z, struct mem_cgroup *mem_cont,
int active, int file);
};

#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
Expand Down Expand Up @@ -1010,7 +1004,6 @@ static unsigned long isolate_pages_global(unsigned long nr,
struct list_head *dst,
unsigned long *scanned, int order,
int mode, struct zone *z,
struct mem_cgroup *mem_cont,
int active, int file)
{
int lru = LRU_BASE;
Expand Down Expand Up @@ -1154,18 +1147,28 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_anon;
unsigned long nr_file;

nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
&page_list, &nr_scan, sc->order, mode,
zone, sc->mem_cgroup, 0, file);

if (scanning_global_lru(sc)) {
nr_taken = isolate_pages_global(SWAP_CLUSTER_MAX,
&page_list, &nr_scan,
sc->order, mode,
zone, 0, file);
zone->pages_scanned += nr_scan;
if (current_is_kswapd())
__count_zone_vm_events(PGSCAN_KSWAPD, zone,
nr_scan);
else
__count_zone_vm_events(PGSCAN_DIRECT, zone,
nr_scan);
} else {
nr_taken = mem_cgroup_isolate_pages(SWAP_CLUSTER_MAX,
&page_list, &nr_scan,
sc->order, mode,
zone, sc->mem_cgroup,
0, file);
/*
* mem_cgroup_isolate_pages() keeps track of
* scanned pages on its own.
*/
}

if (nr_taken == 0)
Expand Down Expand Up @@ -1343,16 +1346,23 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,

lru_add_drain();
spin_lock_irq(&zone->lru_lock);
nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
ISOLATE_ACTIVE, zone,
sc->mem_cgroup, 1, file);
/*
* zone->pages_scanned is used for detect zone's oom
* mem_cgroup remembers nr_scan by itself.
*/
if (scanning_global_lru(sc)) {
nr_taken = isolate_pages_global(nr_pages, &l_hold,
&pgscanned, sc->order,
ISOLATE_ACTIVE, zone,
1, file);
zone->pages_scanned += pgscanned;
} else {
nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
&pgscanned, sc->order,
ISOLATE_ACTIVE, zone,
sc->mem_cgroup, 1, file);
/*
* mem_cgroup_isolate_pages() keeps track of
* scanned pages on its own.
*/
}

reclaim_stat->recent_scanned[file] += nr_taken;

__count_zone_vm_events(PGREFILL, zone, pgscanned);
Expand Down Expand Up @@ -1882,7 +1892,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.swappiness = vm_swappiness,
.order = order,
.mem_cgroup = NULL,
.isolate_pages = isolate_pages_global,
.nodemask = nodemask,
};

Expand All @@ -1903,7 +1912,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
.swappiness = swappiness,
.order = 0,
.mem_cgroup = mem,
.isolate_pages = mem_cgroup_isolate_pages,
};
nodemask_t nm = nodemask_of_node(nid);

Expand Down Expand Up @@ -1937,7 +1945,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
.swappiness = swappiness,
.order = 0,
.mem_cgroup = mem_cont,
.isolate_pages = mem_cgroup_isolate_pages,
.nodemask = NULL, /* we don't care the placement */
};

Expand Down Expand Up @@ -2015,7 +2022,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
.swappiness = vm_swappiness,
.order = order,
.mem_cgroup = NULL,
.isolate_pages = isolate_pages_global,
};
/*
* temp_priority is used to remember the scanning priority at which
Expand Down Expand Up @@ -2394,7 +2400,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
.hibernation_mode = 1,
.swappiness = vm_swappiness,
.order = 0,
.isolate_pages = isolate_pages_global,
};
struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
struct task_struct *p = current;
Expand Down Expand Up @@ -2579,7 +2584,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.gfp_mask = gfp_mask,
.swappiness = vm_swappiness,
.order = order,
.isolate_pages = isolate_pages_global,
};
unsigned long slab_reclaimable;

Expand Down

0 comments on commit 8b25c6d

Please sign in to comment.