@@ -290,6 +290,12 @@ struct arena_s {
290
290
291
291
uint64_t prof_accumbytes ;
292
292
293
+ /*
294
+ * PRNG state for cache index randomization of large allocation base
295
+ * pointers.
296
+ */
297
+ uint64_t offset_state ;
298
+
293
299
dss_prec_t dss_prec ;
294
300
295
301
/*
@@ -394,7 +400,15 @@ struct arena_s {
394
400
/******************************************************************************/
395
401
#ifdef JEMALLOC_H_EXTERNS
396
402
397
- extern ssize_t opt_lg_dirty_mult ;
403
+ static const size_t large_pad =
404
+ #ifdef JEMALLOC_CACHE_OBLIVIOUS
405
+ PAGE
406
+ #else
407
+ 0
408
+ #endif
409
+ ;
410
+
411
+ extern ssize_t opt_lg_dirty_mult ;
398
412
399
413
extern arena_bin_info_t arena_bin_info [NBINS ];
400
414
@@ -475,7 +489,7 @@ void arena_stats_merge(arena_t *arena, const char **dss,
475
489
arena_stats_t * astats , malloc_bin_stats_t * bstats ,
476
490
malloc_large_stats_t * lstats , malloc_huge_stats_t * hstats );
477
491
arena_t * arena_new (unsigned ind );
478
- void arena_boot (void );
492
+ bool arena_boot (void );
479
493
void arena_prefork (arena_t * arena );
480
494
void arena_postfork_parent (arena_t * arena );
481
495
void arena_postfork_child (arena_t * arena );
@@ -721,7 +735,7 @@ arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
721
735
{
722
736
size_t * mapbitsp = arena_mapbitsp_get (chunk , pageind );
723
737
724
- assert (( size & PAGE_MASK ) == 0 );
738
+ assert (size == PAGE_CEILING ( size ) );
725
739
assert ((flags & ~CHUNK_MAP_FLAGS_MASK ) == 0 );
726
740
assert ((flags & (CHUNK_MAP_DIRTY |CHUNK_MAP_UNZEROED )) == flags );
727
741
arena_mapbitsp_write (mapbitsp , size | CHUNK_MAP_BININD_INVALID | flags );
@@ -734,7 +748,7 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
734
748
size_t * mapbitsp = arena_mapbitsp_get (chunk , pageind );
735
749
size_t mapbits = arena_mapbitsp_read (mapbitsp );
736
750
737
- assert (( size & PAGE_MASK ) == 0 );
751
+ assert (size == PAGE_CEILING ( size ) );
738
752
assert ((mapbits & (CHUNK_MAP_LARGE |CHUNK_MAP_ALLOCATED )) == 0 );
739
753
arena_mapbitsp_write (mapbitsp , size | (mapbits & PAGE_MASK ));
740
754
}
@@ -747,7 +761,7 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
747
761
size_t mapbits = arena_mapbitsp_read (mapbitsp );
748
762
size_t unzeroed ;
749
763
750
- assert (( size & PAGE_MASK ) == 0 );
764
+ assert (size == PAGE_CEILING ( size ) );
751
765
assert ((flags & CHUNK_MAP_DIRTY ) == flags );
752
766
unzeroed = mapbits & CHUNK_MAP_UNZEROED ; /* Preserve unzeroed. */
753
767
arena_mapbitsp_write (mapbitsp , size | CHUNK_MAP_BININD_INVALID | flags
@@ -762,7 +776,8 @@ arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
762
776
size_t mapbits = arena_mapbitsp_read (mapbitsp );
763
777
764
778
assert (binind <= BININD_INVALID );
765
- assert (arena_mapbits_large_size_get (chunk , pageind ) == LARGE_MINCLASS );
779
+ assert (arena_mapbits_large_size_get (chunk , pageind ) == LARGE_MINCLASS +
780
+ large_pad );
766
781
arena_mapbitsp_write (mapbitsp , (mapbits & ~CHUNK_MAP_BININD_MASK ) |
767
782
(binind << CHUNK_MAP_BININD_SHIFT ));
768
783
}
@@ -1107,13 +1122,16 @@ arena_salloc(const void *ptr, bool demote)
1107
1122
* end up looking at binind to determine that ptr is a
1108
1123
* small allocation.
1109
1124
*/
1110
- assert (((uintptr_t )ptr & PAGE_MASK ) == 0 );
1111
- ret = arena_mapbits_large_size_get (chunk , pageind );
1125
+ assert (config_cache_oblivious || ((uintptr_t )ptr &
1126
+ PAGE_MASK ) == 0 );
1127
+ ret = arena_mapbits_large_size_get (chunk , pageind ) -
1128
+ large_pad ;
1112
1129
assert (ret != 0 );
1113
- assert (pageind + (ret >>LG_PAGE ) <= chunk_npages );
1130
+ assert (pageind + ((ret + large_pad )>>LG_PAGE ) <=
1131
+ chunk_npages );
1114
1132
assert (arena_mapbits_dirty_get (chunk , pageind ) ==
1115
1133
arena_mapbits_dirty_get (chunk ,
1116
- pageind + (ret >>LG_PAGE )- 1 ));
1134
+ pageind + (( ret + large_pad ) >>LG_PAGE )- 1 ));
1117
1135
} else {
1118
1136
/*
1119
1137
* Small allocation (possibly promoted to a large
@@ -1157,11 +1175,13 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
1157
1175
size_t size = arena_mapbits_large_size_get (chunk ,
1158
1176
pageind );
1159
1177
1160
- assert (((uintptr_t )ptr & PAGE_MASK ) == 0 );
1178
+ assert (config_cache_oblivious || ((uintptr_t )ptr &
1179
+ PAGE_MASK ) == 0 );
1161
1180
1162
- if (likely (tcache != NULL ) && size <= tcache_maxclass )
1163
- tcache_dalloc_large (tsd , tcache , ptr , size );
1164
- else {
1181
+ if (likely (tcache != NULL ) && size <= tcache_maxclass ) {
1182
+ tcache_dalloc_large (tsd , tcache , ptr , size -
1183
+ large_pad );
1184
+ } else {
1165
1185
arena_dalloc_large (extent_node_arena_get (
1166
1186
& chunk -> node ), chunk , ptr );
1167
1187
}
@@ -1188,7 +1208,7 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
1188
1208
*/
1189
1209
assert (((uintptr_t )ptr & PAGE_MASK ) == 0 );
1190
1210
size = arena_mapbits_large_size_get (chunk ,
1191
- pageind );
1211
+ pageind ) - large_pad ;
1192
1212
}
1193
1213
}
1194
1214
assert (s2u (size ) == s2u (arena_salloc (ptr , false)));
@@ -1205,7 +1225,8 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
1205
1225
& chunk -> node ), chunk , ptr , pageind );
1206
1226
}
1207
1227
} else {
1208
- assert (((uintptr_t )ptr & PAGE_MASK ) == 0 );
1228
+ assert (config_cache_oblivious || ((uintptr_t )ptr &
1229
+ PAGE_MASK ) == 0 );
1209
1230
1210
1231
if (likely (tcache != NULL ) && size <= tcache_maxclass )
1211
1232
tcache_dalloc_large (tsd , tcache , ptr , size );
0 commit comments