Skip to content

Commit 64f2199

Browse files
hnaztorvalds
authored andcommitted
mm: memcontrol: remove obsolete kmemcg pinning tricks
As charges now pin the css explicitely, there is no more need for kmemcg to acquire a proxy reference for outstanding pages during offlining, or maintain state to identify such "dead" groups. This was the last user of the uncharge functions' return values, so remove them as well. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent e8ea14c commit 64f2199

File tree

3 files changed

+7
-94
lines changed

3 files changed

+7
-94
lines changed

include/linux/page_counter.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,12 +34,12 @@ static inline unsigned long page_counter_read(struct page_counter *counter)
3434
return atomic_long_read(&counter->count);
3535
}
3636

37-
int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
37+
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
3838
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
3939
int page_counter_try_charge(struct page_counter *counter,
4040
unsigned long nr_pages,
4141
struct page_counter **fail);
42-
int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
42+
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
4343
int page_counter_limit(struct page_counter *counter, unsigned long limit);
4444
int page_counter_memparse(const char *buf, unsigned long *nr_pages);
4545

mm/memcontrol.c

Lines changed: 1 addition & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,6 @@ struct mem_cgroup {
369369
/* internal only representation about the status of kmem accounting. */
370370
enum {
371371
KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
372-
KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
373372
};
374373

375374
#ifdef CONFIG_MEMCG_KMEM
@@ -383,22 +382,6 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
383382
return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
384383
}
385384

386-
static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
387-
{
388-
/*
389-
* Our caller must use css_get() first, because memcg_uncharge_kmem()
390-
* will call css_put() if it sees the memcg is dead.
391-
*/
392-
smp_wmb();
393-
if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
394-
set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
395-
}
396-
397-
static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
398-
{
399-
return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
400-
&memcg->kmem_account_flags);
401-
}
402385
#endif
403386

404387
/* Stuffs for move charges at task migration. */
@@ -2758,22 +2741,7 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg,
27582741
if (do_swap_account)
27592742
page_counter_uncharge(&memcg->memsw, nr_pages);
27602743

2761-
/* Not down to 0 */
2762-
if (page_counter_uncharge(&memcg->kmem, nr_pages)) {
2763-
css_put_many(&memcg->css, nr_pages);
2764-
return;
2765-
}
2766-
2767-
/*
2768-
* Releases a reference taken in kmem_cgroup_css_offline in case
2769-
* this last uncharge is racing with the offlining code or it is
2770-
* outliving the memcg existence.
2771-
*
2772-
* The memory barrier imposed by test&clear is paired with the
2773-
* explicit one in memcg_kmem_mark_dead().
2774-
*/
2775-
if (memcg_kmem_test_and_clear_dead(memcg))
2776-
css_put(&memcg->css);
2744+
page_counter_uncharge(&memcg->kmem, nr_pages);
27772745

27782746
css_put_many(&memcg->css, nr_pages);
27792747
}
@@ -4757,40 +4725,6 @@ static void memcg_destroy_kmem(struct mem_cgroup *memcg)
47574725
{
47584726
mem_cgroup_sockets_destroy(memcg);
47594727
}
4760-
4761-
static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
4762-
{
4763-
if (!memcg_kmem_is_active(memcg))
4764-
return;
4765-
4766-
/*
4767-
* kmem charges can outlive the cgroup. In the case of slab
4768-
* pages, for instance, a page contain objects from various
4769-
* processes. As we prevent from taking a reference for every
4770-
* such allocation we have to be careful when doing uncharge
4771-
* (see memcg_uncharge_kmem) and here during offlining.
4772-
*
4773-
* The idea is that that only the _last_ uncharge which sees
4774-
* the dead memcg will drop the last reference. An additional
4775-
* reference is taken here before the group is marked dead
4776-
* which is then paired with css_put during uncharge resp. here.
4777-
*
4778-
* Although this might sound strange as this path is called from
4779-
* css_offline() when the referencemight have dropped down to 0 and
4780-
* shouldn't be incremented anymore (css_tryget_online() would
4781-
* fail) we do not have other options because of the kmem
4782-
* allocations lifetime.
4783-
*/
4784-
css_get(&memcg->css);
4785-
4786-
memcg_kmem_mark_dead(memcg);
4787-
4788-
if (page_counter_read(&memcg->kmem))
4789-
return;
4790-
4791-
if (memcg_kmem_test_and_clear_dead(memcg))
4792-
css_put(&memcg->css);
4793-
}
47944728
#else
47954729
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
47964730
{
@@ -4800,10 +4734,6 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
48004734
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
48014735
{
48024736
}
4803-
4804-
static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
4805-
{
4806-
}
48074737
#endif
48084738

48094739
/*
@@ -5407,8 +5337,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
54075337
}
54085338
spin_unlock(&memcg->event_list_lock);
54095339

5410-
kmem_cgroup_css_offline(memcg);
5411-
54125340
/*
54135341
* This requires that offlining is serialized. Right now that is
54145342
* guaranteed because css_killed_work_fn() holds the cgroup_mutex.

mm/page_counter.c

Lines changed: 4 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -16,19 +16,14 @@
1616
* page_counter_cancel - take pages out of the local counter
1717
* @counter: counter
1818
* @nr_pages: number of pages to cancel
19-
*
20-
* Returns whether there are remaining pages in the counter.
2119
*/
22-
int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
20+
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
2321
{
2422
long new;
2523

2624
new = atomic_long_sub_return(nr_pages, &counter->count);
27-
2825
/* More uncharges than charges? */
2926
WARN_ON_ONCE(new < 0);
30-
31-
return new > 0;
3227
}
3328

3429
/**
@@ -117,23 +112,13 @@ int page_counter_try_charge(struct page_counter *counter,
117112
* page_counter_uncharge - hierarchically uncharge pages
118113
* @counter: counter
119114
* @nr_pages: number of pages to uncharge
120-
*
121-
* Returns whether there are remaining charges in @counter.
122115
*/
123-
int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
116+
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
124117
{
125118
struct page_counter *c;
126-
int ret = 1;
127119

128-
for (c = counter; c; c = c->parent) {
129-
int remainder;
130-
131-
remainder = page_counter_cancel(c, nr_pages);
132-
if (c == counter && !remainder)
133-
ret = 0;
134-
}
135-
136-
return ret;
120+
for (c = counter; c; c = c->parent)
121+
page_counter_cancel(c, nr_pages);
137122
}
138123

139124
/**

0 commit comments

Comments
 (0)