Skip to content

Commit

Permalink
mm/sl[aou]b: Extract a common function for kmem_cache_destroy
Browse files Browse the repository at this point in the history
kmem_cache_destroy does basically the same in all allocators.

Extract common code which is easy since we already have common mutex
handling.

Reviewed-by: Glauber Costa <glommer@parallels.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
  • Loading branch information
Christoph Lameter authored and penberg committed Sep 5, 2012
1 parent 7c9adf5 commit 945cf2b
Show file tree
Hide file tree
Showing 5 changed files with 49 additions and 75 deletions.
45 changes: 3 additions & 42 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -2206,7 +2206,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
}
}

static void __kmem_cache_destroy(struct kmem_cache *cachep)
void __kmem_cache_destroy(struct kmem_cache *cachep)
{
int i;
struct kmem_list3 *l3;
Expand Down Expand Up @@ -2763,49 +2763,10 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
}
EXPORT_SYMBOL(kmem_cache_shrink);

/**
* kmem_cache_destroy - delete a cache
* @cachep: the cache to destroy
*
* Remove a &struct kmem_cache object from the slab cache.
*
* It is expected this function will be called by a module when it is
* unloaded. This will remove the cache completely, and avoid a duplicate
* cache being allocated each time a module is loaded and unloaded, if the
* module doesn't have persistent in-kernel storage across loads and unloads.
*
* The cache must be empty before calling this function.
*
* The caller must guarantee that no one will allocate memory from the cache
* during the kmem_cache_destroy().
*/
void kmem_cache_destroy(struct kmem_cache *cachep)
int __kmem_cache_shutdown(struct kmem_cache *cachep)
{
BUG_ON(!cachep || in_interrupt());

/* Find the cache in the chain of caches. */
get_online_cpus();
mutex_lock(&slab_mutex);
/*
* the chain is never empty, cache_cache is never destroyed
*/
list_del(&cachep->list);
if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects");
list_add(&cachep->list, &slab_caches);
mutex_unlock(&slab_mutex);
put_online_cpus();
return;
}

if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
rcu_barrier();

__kmem_cache_destroy(cachep);
mutex_unlock(&slab_mutex);
put_online_cpus();
return __cache_shrink(cachep);
}
EXPORT_SYMBOL(kmem_cache_destroy);

/*
* Get the memory for a slab management obj.
Expand Down
3 changes: 3 additions & 0 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,7 @@ extern struct list_head slab_caches;
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *));

int __kmem_cache_shutdown(struct kmem_cache *);
void __kmem_cache_destroy(struct kmem_cache *);

#endif
25 changes: 25 additions & 0 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,31 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
}
EXPORT_SYMBOL(kmem_cache_create);

void kmem_cache_destroy(struct kmem_cache *s)
{
get_online_cpus();
mutex_lock(&slab_mutex);
s->refcount--;
if (!s->refcount) {
list_del(&s->list);

if (!__kmem_cache_shutdown(s)) {
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();

__kmem_cache_destroy(s);
} else {
list_add(&s->list, &slab_caches);
printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
s->name);
dump_stack();
}
}
mutex_unlock(&slab_mutex);
put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);

int slab_is_available(void)
{
return slab_state >= UP;
Expand Down
15 changes: 7 additions & 8 deletions mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -538,18 +538,11 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
return c;
}

void kmem_cache_destroy(struct kmem_cache *c)
void __kmem_cache_destroy(struct kmem_cache *c)
{
mutex_lock(&slab_mutex);
list_del(&c->list);
mutex_unlock(&slab_mutex);

kmemleak_free(c);
if (c->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
slob_free(c, sizeof(struct kmem_cache));
}
EXPORT_SYMBOL(kmem_cache_destroy);

void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
Expand Down Expand Up @@ -617,6 +610,12 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
}
EXPORT_SYMBOL(kmem_cache_size);

int __kmem_cache_shutdown(struct kmem_cache *c)
{
/* No way to check for remaining objects */
return 0;
}

int kmem_cache_shrink(struct kmem_cache *d)
{
return 0;
Expand Down
36 changes: 11 additions & 25 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -624,7 +624,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
print_trailer(s, page, object);
}

static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
{
va_list args;
char buf[100];
Expand Down Expand Up @@ -3146,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
sizeof(long), GFP_ATOMIC);
if (!map)
return;
slab_err(s, page, "%s", text);
slab_err(s, page, text, s->name);
slab_lock(page);

get_map(s, page, map);
Expand Down Expand Up @@ -3178,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
discard_slab(s, page);
} else {
list_slab_objects(s, page,
"Objects remaining on kmem_cache_close()");
"Objects remaining in %s on kmem_cache_close()");
}
}
}
Expand All @@ -3191,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
int node;

flush_all(s);
free_percpu(s->cpu_slab);
/* Attempt to free all objects */
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
Expand All @@ -3200,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
if (n->nr_partial || slabs_node(s, node))
return 1;
}
free_percpu(s->cpu_slab);
free_kmem_cache_nodes(s);
return 0;
}

/*
* Close a cache and release the kmem_cache structure
* (must be used for caches created using kmem_cache_create)
*/
void kmem_cache_destroy(struct kmem_cache *s)
int __kmem_cache_shutdown(struct kmem_cache *s)
{
mutex_lock(&slab_mutex);
s->refcount--;
if (!s->refcount) {
list_del(&s->list);
mutex_unlock(&slab_mutex);
if (kmem_cache_close(s)) {
printk(KERN_ERR "SLUB %s: %s called for cache that "
"still has objects.\n", s->name, __func__);
dump_stack();
}
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
sysfs_slab_remove(s);
} else
mutex_unlock(&slab_mutex);
return kmem_cache_close(s);
}

void __kmem_cache_destroy(struct kmem_cache *s)
{
sysfs_slab_remove(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);

/********************************************************************
* Kmalloc subsystem
Expand Down

0 comments on commit 945cf2b

Please sign in to comment.