Skip to content

Commit

Permalink
mm/slub.c: wrap cpu_slab->partial in CONFIG_SLUB_CPU_PARTIAL
Browse files Browse the repository at this point in the history
cpu_slab's field partial is used when CONFIG_SLUB_CPU_PARTIAL is set,
which means we can save a pointer's space on each cpu for every slub
item.

This patch wraps cpu_slab->partial in CONFIG_SLUB_CPU_PARTIAL and wraps
its sysfs use too.

[akpm@linux-foundation.org: avoid strange 80-col tricks]
Link: http://lkml.kernel.org/r/20170502144533.10729-3-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
RichardWeiYang authored and torvalds committed Jul 6, 2017
1 parent d3111e6 commit a93cf07
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 7 deletions.
19 changes: 19 additions & 0 deletions include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,31 @@ struct kmem_cache_cpu {
void **freelist; /* Pointer to next available object */
unsigned long tid; /* Globally unique transaction id */
struct page *page; /* The slab from which we are allocating */
#ifdef CONFIG_SLUB_CPU_PARTIAL
struct page *partial; /* Partially allocated frozen slabs */
#endif
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
};

#ifdef CONFIG_SLUB_CPU_PARTIAL
#define slub_percpu_partial(c) ((c)->partial)

#define slub_set_percpu_partial(c, p) \
({ \
slub_percpu_partial(c) = (p)->next; \
})

#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
#else
#define slub_percpu_partial(c) NULL

#define slub_set_percpu_partial(c, p)

#define slub_percpu_partial_read_once(c) NULL
#endif // CONFIG_SLUB_CPU_PARTIAL

/*
* Word size structure that can be atomically updated or read and that
* contains both the order and the number of objects that a slab of the
Expand Down
18 changes: 11 additions & 7 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2303,7 +2303,7 @@ static bool has_cpu_slab(int cpu, void *info)
struct kmem_cache *s = info;
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);

return c->page || c->partial;
return c->page || slub_percpu_partial(c);
}

static void flush_all(struct kmem_cache *s)
Expand Down Expand Up @@ -2565,9 +2565,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,

new_slab:

if (c->partial) {
page = c->page = c->partial;
c->partial = page->next;
if (slub_percpu_partial(c)) {
page = c->page = slub_percpu_partial(c);
slub_set_percpu_partial(c, page);
stat(s, CPU_PARTIAL_ALLOC);
goto redo;
}
Expand Down Expand Up @@ -4754,7 +4754,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
total += x;
nodes[node] += x;

page = READ_ONCE(c->partial);
page = slub_percpu_partial_read_once(c);
if (page) {
node = page_to_nid(page);
if (flags & SO_TOTAL)
Expand Down Expand Up @@ -4982,7 +4982,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
int len;

for_each_online_cpu(cpu) {
struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
struct page *page;

page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));

if (page) {
pages += page->pages;
Expand All @@ -4994,7 +4996,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)

#ifdef CONFIG_SMP
for_each_online_cpu(cpu) {
struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
struct page *page;

page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));

if (page && len < PAGE_SIZE - 20)
len += sprintf(buf + len, " C%d=%d(%d)", cpu,
Expand Down

0 comments on commit a93cf07

Please sign in to comment.