@@ -369,7 +369,6 @@ struct mem_cgroup {
369369/* internal only representation about the status of kmem accounting. */
370370enum {
371371 KMEM_ACCOUNTED_ACTIVE , /* accounted by this cgroup itself */
372- KMEM_ACCOUNTED_DEAD , /* dead memcg with pending kmem charges */
373372};
374373
375374#ifdef CONFIG_MEMCG_KMEM
@@ -383,22 +382,6 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
383382 return test_bit (KMEM_ACCOUNTED_ACTIVE , & memcg -> kmem_account_flags );
384383}
385384
386- static void memcg_kmem_mark_dead (struct mem_cgroup * memcg )
387- {
388- /*
389- * Our caller must use css_get() first, because memcg_uncharge_kmem()
390- * will call css_put() if it sees the memcg is dead.
391- */
392- smp_wmb ();
393- if (test_bit (KMEM_ACCOUNTED_ACTIVE , & memcg -> kmem_account_flags ))
394- set_bit (KMEM_ACCOUNTED_DEAD , & memcg -> kmem_account_flags );
395- }
396-
397- static bool memcg_kmem_test_and_clear_dead (struct mem_cgroup * memcg )
398- {
399- return test_and_clear_bit (KMEM_ACCOUNTED_DEAD ,
400- & memcg -> kmem_account_flags );
401- }
402385#endif
403386
404387/* Stuffs for move charges at task migration. */
@@ -2758,22 +2741,7 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg,
27582741 if (do_swap_account )
27592742 page_counter_uncharge (& memcg -> memsw , nr_pages );
27602743
2761- /* Not down to 0 */
2762- if (page_counter_uncharge (& memcg -> kmem , nr_pages )) {
2763- css_put_many (& memcg -> css , nr_pages );
2764- return ;
2765- }
2766-
2767- /*
2768- * Releases a reference taken in kmem_cgroup_css_offline in case
2769- * this last uncharge is racing with the offlining code or it is
2770- * outliving the memcg existence.
2771- *
2772- * The memory barrier imposed by test&clear is paired with the
2773- * explicit one in memcg_kmem_mark_dead().
2774- */
2775- if (memcg_kmem_test_and_clear_dead (memcg ))
2776- css_put (& memcg -> css );
2744+ page_counter_uncharge (& memcg -> kmem , nr_pages );
27772745
27782746 css_put_many (& memcg -> css , nr_pages );
27792747}
@@ -4757,40 +4725,6 @@ static void memcg_destroy_kmem(struct mem_cgroup *memcg)
47574725{
47584726 mem_cgroup_sockets_destroy (memcg );
47594727}
4760-
4761- static void kmem_cgroup_css_offline (struct mem_cgroup * memcg )
4762- {
4763- if (!memcg_kmem_is_active (memcg ))
4764- return ;
4765-
4766- /*
4767- * kmem charges can outlive the cgroup. In the case of slab
4768- * pages, for instance, a page contain objects from various
4769- * processes. As we prevent from taking a reference for every
4770- * such allocation we have to be careful when doing uncharge
4771- * (see memcg_uncharge_kmem) and here during offlining.
4772- *
4773- * The idea is that that only the _last_ uncharge which sees
4774- * the dead memcg will drop the last reference. An additional
4775- * reference is taken here before the group is marked dead
4776- * which is then paired with css_put during uncharge resp. here.
4777- *
4778- * Although this might sound strange as this path is called from
4779- * css_offline() when the referencemight have dropped down to 0 and
4780- * shouldn't be incremented anymore (css_tryget_online() would
4781- * fail) we do not have other options because of the kmem
4782- * allocations lifetime.
4783- */
4784- css_get (& memcg -> css );
4785-
4786- memcg_kmem_mark_dead (memcg );
4787-
4788- if (page_counter_read (& memcg -> kmem ))
4789- return ;
4790-
4791- if (memcg_kmem_test_and_clear_dead (memcg ))
4792- css_put (& memcg -> css );
4793- }
47944728#else
47954729static int memcg_init_kmem (struct mem_cgroup * memcg , struct cgroup_subsys * ss )
47964730{
@@ -4800,10 +4734,6 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
48004734static void memcg_destroy_kmem (struct mem_cgroup * memcg )
48014735{
48024736}
4803-
4804- static void kmem_cgroup_css_offline (struct mem_cgroup * memcg )
4805- {
4806- }
48074737#endif
48084738
48094739/*
@@ -5407,8 +5337,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
54075337 }
54085338 spin_unlock (& memcg -> event_list_lock );
54095339
5410- kmem_cgroup_css_offline (memcg );
5411-
54125340 /*
54135341 * This requires that offlining is serialized. Right now that is
54145342 * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
0 commit comments