@@ -27,18 +27,28 @@ namespace cachelib {
27
27
28
28
class SlabAllocator ;
29
29
30
- // the following are for pointer compression for the memory allocator. We
31
- // compress pointers by storing the slab index and the alloc index of the
32
- // allocation inside the slab. With slab worth kNumSlabBits of data, if we
33
- // have the min allocation size as 64 bytes, that requires kNumSlabBits - 6
34
- // bits for storing the alloc index. This leaves the remaining (32 -
35
- // (kNumSlabBits - 6)) bits for the slab index. Hence we can index 256 GiB
36
- // of memory in slabs and index anything more than 64 byte allocations inside
37
- // the slab using a 32 bit representation.
38
- //
39
30
// This CompressedPtr makes decompression fast by staying away from division and
40
31
// modulo arithmetic and doing those during the compression time. We most often
41
- // decompress a CompressedPtr than compress a pointer while creating one.
32
+ // decompress a CompressedPtr than compress a pointer while creating one. This
33
+ // is used for pointer compression by the memory allocator.
34
+
35
+ // We compress pointers by storing the tier index, slab index and alloc index of
36
+ // the allocation inside the slab.
37
+
38
+ // In original design (without memory tiers):
39
+ // Each slab addresses 22 bits of allocations (kNumSlabBits). This is split into
40
+ // allocation index and allocation size. If we have the min allocation size of 64
41
+ // bytes (kMinAllocPower = 6 bits), remaining kNumSlabBits(22) - kMinAllocPower(6)
42
+ // = 16 bits for storing the alloc index. This leaves the remaining 32 -
43
+ // (kNumSlabBits - kMinAllocPower) = 16 bits for the slab index. Hence we can
44
+ // index 256 GiB of memory.
45
+
46
+ // In multi-tier design:
47
+ // kNumSlabIds and kMinAllocPower remains unchanged. The tier id occupies the 32nd
48
+ // bit only since its value cannot exceed kMaxTiers(2). This leaves the remaining
49
+ // 32 - (kNumSlabBits - kMinAllocPower) - 1 bit for tier id = 15 bits for the
50
+ // slab index. Hence we can index 128 GiB of memory per tier in multi-tier configuration.
51
+
42
52
class CACHELIB_PACKED_ATTR CompressedPtr {
43
53
public:
44
54
using PtrType = uint32_t ;
@@ -62,9 +72,10 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
62
72
return static_cast <uint32_t >(1 ) << (Slab::kMinAllocPower );
63
73
}
64
74
65
- // maximum adressable memory for pointer compression to work.
75
+ // maximum addressable memory for pointer compression to work.
66
76
static constexpr size_t getMaxAddressableSize () noexcept {
67
- return static_cast <size_t >(1 ) << (kNumSlabIdxBits + Slab::kNumSlabBits );
77
+ return static_cast <size_t >(1 )
78
+ << (numSlabIdxBits (false ) + Slab::kNumSlabBits );
68
79
}
69
80
70
81
// default construct to nullptr.
@@ -89,8 +100,11 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
89
100
PtrType ptr_{kNull };
90
101
91
102
// create a compressed pointer for a valid memory allocation.
92
- CompressedPtr (uint32_t slabIdx, uint32_t allocIdx)
93
- : ptr_(compress(slabIdx, allocIdx)) {}
103
+ CompressedPtr (uint32_t slabIdx,
104
+ uint32_t allocIdx,
105
+ bool isMultiTiered,
106
+ TierId tid = 0 )
107
+ : ptr_(compress(slabIdx, allocIdx, isMultiTiered, tid)) {}
94
108
95
109
constexpr explicit CompressedPtr (PtrType ptr) noexcept : ptr_{ptr} {}
96
110
@@ -100,31 +114,56 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
100
114
static constexpr unsigned int kNumAllocIdxBits =
101
115
Slab::kNumSlabBits - Slab::kMinAllocPower ;
102
116
117
+ // Use 32nd bit position for TierId
118
+ static constexpr unsigned int kNumTierIdxOffset = 31 ;
119
+
103
120
static constexpr PtrType kAllocIdxMask = ((PtrType)1 << kNumAllocIdxBits ) - 1 ;
104
121
122
+ // kNumTierIdxBits most significant bits
123
+ static constexpr PtrType kTierIdxMask = (PtrType)1 << kNumTierIdxOffset ;
124
+
105
125
// Number of bits for the slab index. This will be the top 16 bits of the
106
126
// compressed ptr.
107
- static constexpr unsigned int kNumSlabIdxBits =
108
- NumBits<PtrType>::value - kNumAllocIdxBits ;
127
+ static constexpr unsigned int numSlabIdxBits (bool isMultiTiered) {
128
+ return kNumTierIdxOffset - kNumAllocIdxBits + (!isMultiTiered);
129
+ }
109
130
110
131
// Compress the given slabIdx and allocIdx into a 32-bit compressed
111
132
// pointer.
112
- static PtrType compress (uint32_t slabIdx, uint32_t allocIdx) noexcept {
133
+ static PtrType compress (uint32_t slabIdx,
134
+ uint32_t allocIdx,
135
+ bool isMultiTiered,
136
+ TierId tid) noexcept {
113
137
XDCHECK_LE (allocIdx, kAllocIdxMask );
114
- XDCHECK_LT (slabIdx, (1u << kNumSlabIdxBits ) - 1 );
115
- return (slabIdx << kNumAllocIdxBits ) + allocIdx;
138
+ XDCHECK_LT (slabIdx, (1u << numSlabIdxBits (isMultiTiered)) - 1 );
139
+ if (!isMultiTiered) {
140
+ return (slabIdx << kNumAllocIdxBits ) + allocIdx;
141
+ }
142
+ return (static_cast <uint32_t >(tid) << kNumTierIdxOffset ) +
143
+ (slabIdx << kNumAllocIdxBits ) + allocIdx;
116
144
}
117
145
118
146
// Get the slab index of the compressed ptr
119
- uint32_t getSlabIdx () const noexcept {
147
+ uint32_t getSlabIdx (bool isMultiTiered ) const noexcept {
120
148
XDCHECK (!isNull ());
121
- return static_cast <uint32_t >(ptr_ >> kNumAllocIdxBits );
149
+ auto noTierIdPtr = isMultiTiered ? ptr_ & ~kTierIdxMask : ptr_;
150
+ return static_cast <uint32_t >(noTierIdPtr >> kNumAllocIdxBits );
122
151
}
123
152
124
153
// Get the allocation index of the compressed ptr
125
- uint32_t getAllocIdx () const noexcept {
154
+ uint32_t getAllocIdx (bool isMultiTiered ) const noexcept {
126
155
XDCHECK (!isNull ());
127
- return static_cast <uint32_t >(ptr_ & kAllocIdxMask );
156
+ auto noTierIdPtr = isMultiTiered ? ptr_ & ~kTierIdxMask : ptr_;
157
+ return static_cast <uint32_t >(noTierIdPtr & kAllocIdxMask );
158
+ }
159
+
160
+ uint32_t getTierId (bool isMultiTiered) const noexcept {
161
+ XDCHECK (!isNull ());
162
+ return isMultiTiered ? static_cast <uint32_t >(ptr_ >> kNumTierIdxOffset ) : 0 ;
163
+ }
164
+
165
+ void setTierId (TierId tid) noexcept {
166
+ ptr_ += static_cast <uint32_t >(tid) << kNumTierIdxOffset ;
128
167
}
129
168
130
169
friend SlabAllocator;
@@ -137,11 +176,12 @@ class PtrCompressor {
137
176
: allocator_(allocator) {}
138
177
139
178
const CompressedPtr compress (const PtrType* uncompressed) const {
140
- return allocator_.compress (uncompressed);
179
+ return allocator_.compress (uncompressed, false /* isMultiTiered */ );
141
180
}
142
181
143
182
PtrType* unCompress (const CompressedPtr compressed) const {
144
- return static_cast <PtrType*>(allocator_.unCompress (compressed));
183
+ return static_cast <PtrType*>(
184
+ allocator_.unCompress (compressed, false /* isMultiTiered */ ));
145
185
}
146
186
147
187
bool operator ==(const PtrCompressor& rhs) const noexcept {
0 commit comments