Skip to content

Commit

Permalink
mm, sl[ou]b: improve memory accounting
Browse files Browse the repository at this point in the history
Patch series "guarantee natural alignment for kmalloc()", v2.

This patch (of 2):

SLOB currently doesn't account its pages at all, so in /proc/meminfo the
Slab field shows zero.  Modifying a counter on page allocation and
freeing should be acceptable even for the small system scenarios SLOB is
intended for.  Since reclaimable caches are not separated in SLOB,
account everything as unreclaimable.

SLUB currently doesn't account kmalloc() and kmalloc_node() allocations
larger than order-1 page, that are passed directly to the page
allocator.  As they also don't appear in /proc/slabinfo, it might look
like a memory leak.  For consistency, account them as well.  (SLAB
doesn't actually use page allocator directly, so no change there).

Ideally SLOB and SLUB would be handled in separate patches, but due to
the shared kmalloc_order() function and different kfree()
implementations, it's easier to patch both at once to prevent
inconsistencies.

Link: http://lkml.kernel.org/r/20190826111627.7505-2-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: "Darrick J . Wong" <darrick.wong@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
tehcaster authored and torvalds committed Oct 7, 2019
1 parent 1bc63fb commit 6a486c0
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 9 deletions.
8 changes: 6 additions & 2 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1287,12 +1287,16 @@ void __init create_kmalloc_caches(slab_flags_t flags)
*/
void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{
void *ret;
void *ret = NULL;
struct page *page;

flags |= __GFP_COMP;
page = alloc_pages(flags, order);
ret = page ? page_address(page) : NULL;
if (likely(page)) {
ret = page_address(page);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
}
ret = kasan_kmalloc_large(ret, size, flags);
/* As ret might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc(ret, size, 1, flags);
Expand Down
20 changes: 16 additions & 4 deletions mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ static int slob_last(slob_t *s)

static void *slob_new_pages(gfp_t gfp, int order, int node)
{
void *page;
struct page *page;

#ifdef CONFIG_NUMA
if (node != NUMA_NO_NODE)
Expand All @@ -202,14 +202,21 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
if (!page)
return NULL;

mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
return page_address(page);
}

static void slob_free_pages(void *b, int order)
{
struct page *sp = virt_to_page(b);

if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += 1 << order;
free_pages((unsigned long)b, order);

mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
-(1 << order));
__free_pages(sp, order);
}

/*
Expand Down Expand Up @@ -521,8 +528,13 @@ void kfree(const void *block)
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
} else
__free_pages(sp, compound_order(sp));
} else {
unsigned int order = compound_order(sp);
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
-(1 << order));
__free_pages(sp, order);

}
}
EXPORT_SYMBOL(kfree);

Expand Down
14 changes: 11 additions & 3 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -3821,11 +3821,15 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page;
void *ptr = NULL;
unsigned int order = get_order(size);

flags |= __GFP_COMP;
page = alloc_pages_node(node, flags, get_order(size));
if (page)
page = alloc_pages_node(node, flags, order);
if (page) {
ptr = page_address(page);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1 << order);
}

return kmalloc_large_node_hook(ptr, size, flags);
}
Expand Down Expand Up @@ -3951,9 +3955,13 @@ void kfree(const void *x)

page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
unsigned int order = compound_order(page);

BUG_ON(!PageCompound(page));
kfree_hook(object);
__free_pages(page, compound_order(page));
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
-(1 << order));
__free_pages(page, order);
return;
}
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
Expand Down

0 comments on commit 6a486c0

Please sign in to comment.