Skip to content

Commit 4138fdf

Browse files
rgushchintorvalds
authored andcommitted
mm: slub: implement SLUB version of obj_to_index()
This commit implements SLUB version of the obj_to_index() function, which will be required to calculate the offset of obj_cgroup in the obj_cgroups vector to store/obtain the objcg ownership data. To make it faster, let's repeat the SLAB's trick introduced by commit 6a2d7a9 ("SLAB: use a multiply instead of a divide in obj_to_index()") and avoid an expensive division. Vlastimil Babka noticed, that SLUB does have already a similar function called slab_index(), which is defined only if SLUB_DEBUG is enabled. The function does a similar math, but with a division, and it also takes a page address instead of a page pointer. Let's remove slab_index() and replace it with the new helper __obj_to_index(), which takes a page address. obj_to_index() will be a simple wrapper taking a page pointer and passing page_address(page) into __obj_to_index(). Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Shakeel Butt <shakeelb@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Lameter <cl@linux.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/20200623174037.3951353-5-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent d42f324 commit 4138fdf

File tree

2 files changed

+21
-10
lines changed

2 files changed

+21
-10
lines changed

include/linux/slub_def.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
* (C) 2007 SGI, Christoph Lameter
99
*/
1010
#include <linux/kobject.h>
11+
#include <linux/reciprocal_div.h>
1112

1213
enum stat_item {
1314
ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -86,6 +87,7 @@ struct kmem_cache {
8687
unsigned long min_partial;
8788
unsigned int size; /* The size of an object including metadata */
8889
unsigned int object_size;/* The size of an object without metadata */
90+
struct reciprocal_value reciprocal_size;
8991
unsigned int offset; /* Free pointer offset */
9092
#ifdef CONFIG_SLUB_CPU_PARTIAL
9193
/* Number of per cpu partial objects to keep around */
@@ -182,4 +184,18 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
182184
return result;
183185
}
184186

187+
/* Determine object index from a given position */
188+
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
189+
void *addr, void *obj)
190+
{
191+
return reciprocal_divide(kasan_reset_tag(obj) - addr,
192+
cache->reciprocal_size);
193+
}
194+
195+
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
196+
const struct page *page, void *obj)
197+
{
198+
return __obj_to_index(cache, page_address(page), obj);
199+
}
200+
185201
#endif /* _LINUX_SLUB_DEF_H */

mm/slub.c

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -317,12 +317,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
317317
__p < (__addr) + (__objects) * (__s)->size; \
318318
__p += (__s)->size)
319319

320-
/* Determine object index from a given position */
321-
static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
322-
{
323-
return (kasan_reset_tag(p) - addr) / s->size;
324-
}
325-
326320
static inline unsigned int order_objects(unsigned int order, unsigned int size)
327321
{
328322
return ((unsigned int)PAGE_SIZE << order) / size;
@@ -465,7 +459,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
465459
bitmap_zero(object_map, page->objects);
466460

467461
for (p = page->freelist; p; p = get_freepointer(s, p))
468-
set_bit(slab_index(p, s, addr), object_map);
462+
set_bit(__obj_to_index(s, addr, p), object_map);
469463

470464
return object_map;
471465
}
@@ -3754,6 +3748,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
37543748
*/
37553749
size = ALIGN(size, s->align);
37563750
s->size = size;
3751+
s->reciprocal_size = reciprocal_value(size);
37573752
if (forced_order >= 0)
37583753
order = forced_order;
37593754
else
@@ -3858,7 +3853,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
38583853
map = get_map(s, page);
38593854
for_each_object(p, s, addr, page->objects) {
38603855

3861-
if (!test_bit(slab_index(p, s, addr), map)) {
3856+
if (!test_bit(__obj_to_index(s, addr, p), map)) {
38623857
pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
38633858
print_tracking(s, p);
38643859
}
@@ -4574,7 +4569,7 @@ static void validate_slab(struct kmem_cache *s, struct page *page)
45744569
/* Now we know that a valid freelist exists */
45754570
map = get_map(s, page);
45764571
for_each_object(p, s, addr, page->objects) {
4577-
u8 val = test_bit(slab_index(p, s, addr), map) ?
4572+
u8 val = test_bit(__obj_to_index(s, addr, p), map) ?
45784573
SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
45794574

45804575
if (!check_object(s, page, p, val))
@@ -4765,7 +4760,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
47654760

47664761
map = get_map(s, page);
47674762
for_each_object(p, s, addr, page->objects)
4768-
if (!test_bit(slab_index(p, s, addr), map))
4763+
if (!test_bit(__obj_to_index(s, addr, p), map))
47694764
add_location(t, s, get_track(s, p, alloc));
47704765
put_map(map);
47714766
}

0 commit comments

Comments
 (0)