17
17
#pragma once
18
18
19
19
#include < folly/logging/xlog.h>
20
-
21
20
#include < memory>
22
-
23
21
#include " cachelib/allocator/memory/Slab.h"
24
22
23
+ #include < deque>
24
+ #include < forward_list>
25
+ #include < list>
26
+ #include < map>
27
+ #include < queue>
28
+ #include < set>
29
+ #include < stack>
30
+ #include < string>
31
+ #include < tuple>
32
+ #include < type_traits>
33
+ #include < unordered_map>
34
+ #include < unordered_set>
35
+ #include < utility>
36
+ #include < vector>
37
+ #include < type_traits>
38
+
39
+ // specialize a type for all of the STL containers.
40
+ namespace IsContainerImpl {
41
+ template <typename T> struct IsContainer :std::false_type{};
42
+ template <typename T, std::size_t N> struct IsContainer <std::array<T,N>>:std::true_type{};
43
+ template <typename ... Args> struct IsContainer <std::vector<Args...>>:std::true_type{};
44
+ template <typename ... Args> struct IsContainer <std::deque<Args...>>:std::true_type{};
45
+ template <typename ... Args> struct IsContainer <std::list<Args...>>:std::true_type{};
46
+ template <typename ... Args> struct IsContainer <std::forward_list<Args...>>:std::true_type{};
47
+ template <typename ... Args> struct IsContainer <std::set<Args...>>:std::true_type{};
48
+ template <typename ... Args> struct IsContainer <std::multiset<Args...>>:std::true_type{};
49
+ template <typename ... Args> struct IsContainer <std::map<Args...>>:std::true_type{};
50
+ template <typename ... Args> struct IsContainer <std::multimap<Args...>>:std::true_type{};
51
+ template <typename ... Args> struct IsContainer <std::unordered_set<Args...>>:std::true_type{};
52
+ template <typename ... Args> struct IsContainer <std::unordered_multiset<Args...>>:std::true_type{};
53
+ template <typename ... Args> struct IsContainer <std::unordered_map<Args...>>:std::true_type{};
54
+ template <typename ... Args> struct IsContainer <std::unordered_multimap<Args...>>:std::true_type{};
55
+ template <typename ... Args> struct IsContainer <std::stack<Args...>>:std::true_type{};
56
+ template <typename ... Args> struct IsContainer <std::queue<Args...>>:std::true_type{};
57
+ template <typename ... Args> struct IsContainer <std::priority_queue<Args...>>:std::true_type{};
58
+ }
59
+
60
+ // type trait to utilize the implementation type traits as well as decay the type
61
+ template <typename T> struct IsContainer {
62
+ static constexpr bool const value = IsContainerImpl::IsContainer<std::decay_t <T>>::value;
63
+ };
64
+
25
65
namespace facebook {
26
66
namespace cachelib {
27
67
@@ -31,20 +71,22 @@ template <typename PtrType, typename AllocatorContainer>
31
71
class PtrCompressor ;
32
72
33
73
// the following are for pointer compression for the memory allocator. We
34
- // compress pointers by storing the slab index and the alloc index of the
35
- // allocation inside the slab. With slab worth kNumSlabBits of data, if we
36
- // have the min allocation size as 64 bytes, that requires kNumSlabBits - 6
37
- // bits for storing the alloc index. This leaves the remaining (32 -
38
- // (kNumSlabBits - 6)) bits for the slab index. Hence we can index 256 GiB
39
- // of memory in slabs and index anything more than 64 byte allocations inside
40
- // the slab using a 32 bit representation.
74
+ // compress pointers by storing the tier index, slab index and alloc index
75
+ // of the allocation inside the slab. With slab worth kNumSlabBits (22 bits)
76
+ // of data, if we have the min allocation size as 64 bytes, that requires
77
+ // kNumSlabBits - 6 = 16 bits for storing the alloc index. The tier id
78
+ // occupies the 32nd bit only since its value cannot exceed kMaxTiers (2).
79
+ // This leaves the remaining (32 -(kNumSlabBits - 6) - 1 bit for tier id) =
80
+ // 15 bits for the slab index. Hence we can index 128 GiB of memory in slabs
81
+ // per tier and index anything more than 64 byte allocations inside the slab
82
+ // using a 32 bit representation.
41
83
//
42
84
// This CompressedPtr makes decompression fast by staying away from division and
43
85
// modulo arithmetic and doing those during the compression time. We most often
44
86
// decompress a CompressedPtr than compress a pointer while creating one.
45
87
class CACHELIB_PACKED_ATTR CompressedPtr {
46
88
public:
47
- using PtrType = uint64_t ;
89
+ using PtrType = uint32_t ;
48
90
// Thrift doesn't support unsigned type
49
91
using SerializedPtrType = int64_t ;
50
92
@@ -103,26 +145,28 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
103
145
static constexpr unsigned int kNumAllocIdxBits =
104
146
Slab::kNumSlabBits - Slab::kMinAllocPower ;
105
147
106
- // Use topmost 32 bits for TierId
107
- // XXX: optimize
108
- static constexpr unsigned int kNumTierIdxOffset = 32 ;
148
+ // Use the top bit for tier id
149
+ static constexpr unsigned int kNumTierIdxOffset = 31 ;
109
150
110
151
static constexpr PtrType kAllocIdxMask = ((PtrType)1 << kNumAllocIdxBits ) - 1 ;
111
152
112
153
// kNumTierIdxBits most significant bits
113
- static constexpr PtrType kTierIdxMask = ((( PtrType)1 << kNumTierIdxOffset ) - 1 ) << (NumBits<PtrType>::value - kNumTierIdxOffset ) ;
154
+ static constexpr PtrType kTierIdxMask = (PtrType)1 << kNumTierIdxOffset ;
114
155
115
- // Number of bits for the slab index. This will be the top 16 bits of the
156
+ // Number of bits for the slab index. This will be the 16th - 31st bits of the
116
157
// compressed ptr.
117
158
static constexpr unsigned int kNumSlabIdxBits =
118
- NumBits<PtrType>::value - kNumTierIdxOffset - kNumAllocIdxBits ;
159
+ kNumTierIdxOffset - kNumAllocIdxBits ;
119
160
120
161
// Compress the given slabIdx and allocIdx into a 64-bit compressed
121
162
// pointer.
122
- static PtrType compress (uint32_t slabIdx, uint32_t allocIdx, TierId tid) noexcept {
163
+ static PtrType compress (uint32_t slabIdx,
164
+ uint32_t allocIdx,
165
+ TierId tid) noexcept {
123
166
XDCHECK_LE (allocIdx, kAllocIdxMask );
124
167
XDCHECK_LT (slabIdx, (1u << kNumSlabIdxBits ) - 1 );
125
- return (static_cast <uint64_t >(tid) << kNumTierIdxOffset ) + (slabIdx << kNumAllocIdxBits ) + allocIdx;
168
+ return (static_cast <uint64_t >(tid) << kNumTierIdxOffset ) +
169
+ (slabIdx << kNumAllocIdxBits ) + allocIdx;
126
170
}
127
171
128
172
// Get the slab index of the compressed ptr
@@ -153,62 +197,43 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
153
197
friend class PtrCompressor ;
154
198
};
155
199
156
- template <typename PtrType, typename AllocatorT>
157
- class SingleTierPtrCompressor {
158
- public:
159
- explicit SingleTierPtrCompressor (const AllocatorT& allocator) noexcept
160
- : allocator_(allocator) {}
161
-
162
- const CompressedPtr compress (const PtrType* uncompressed) const {
163
- return allocator_.compress (uncompressed);
164
- }
165
-
166
- PtrType* unCompress (const CompressedPtr compressed) const {
167
- return static_cast <PtrType*>(allocator_.unCompress (compressed));
168
- }
169
-
170
- bool operator ==(const SingleTierPtrCompressor& rhs) const noexcept {
171
- return &allocator_ == &rhs.allocator_ ;
172
- }
173
-
174
- bool operator !=(const SingleTierPtrCompressor& rhs) const noexcept {
175
- return !(*this == rhs);
176
- }
177
-
178
- private:
179
- // memory allocator that does the pointer compression.
180
- const AllocatorT& allocator_;
181
- };
182
-
183
200
template <typename PtrType, typename AllocatorContainer>
184
201
class PtrCompressor {
185
202
public:
186
203
explicit PtrCompressor (const AllocatorContainer& allocators) noexcept
187
204
: allocators_(allocators) {}
188
205
189
206
const CompressedPtr compress (const PtrType* uncompressed) const {
190
- if (uncompressed == nullptr )
207
+ if (uncompressed == nullptr ) {
191
208
return CompressedPtr{};
192
-
193
- TierId tid;
194
- for (tid = 0 ; tid < allocators_.size (); tid++) {
195
- if (allocators_[tid]->isMemoryInAllocator (static_cast <const void *>(uncompressed)))
196
- break ;
197
209
}
198
-
199
- auto cptr = allocators_[tid]->compress (uncompressed);
200
- cptr.setTierId (tid);
201
-
202
- return cptr;
210
+ if (IsContainer<decltype (allocators_)>::value) {
211
+ TierId tid;
212
+ for (tid = 0 ; tid < allocators_.size (); tid++) {
213
+ if (allocators_[tid]->isMemoryInAllocator (
214
+ static_cast <const void *>(uncompressed)))
215
+ break ;
216
+ }
217
+ auto cptr = allocators_[tid]->compress (uncompressed);
218
+ cptr.setTierId (tid);
219
+ return cptr;
220
+
221
+ } else {
222
+ return allocators_.compress (uncompressed);
223
+ }
203
224
}
204
225
205
226
PtrType* unCompress (const CompressedPtr compressed) const {
206
227
if (compressed.isNull ()) {
207
228
return nullptr ;
208
229
}
230
+ if (IsContainer<decltype (allocators_)>::value) {
231
+ auto & allocator = *allocators_[compressed.getTierId ()];
232
+ return static_cast <PtrType*>(allocator.unCompress (compressed));
209
233
210
- auto &allocator = *allocators_[compressed.getTierId ()];
211
- return static_cast <PtrType*>(allocator.unCompress (compressed));
234
+ } else {
235
+ return static_cast <PtrType*>(allocators_.unCompress (compressed));
236
+ }
212
237
}
213
238
214
239
bool operator ==(const PtrCompressor& rhs) const noexcept {
0 commit comments