summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator.h
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2013-05-20 07:29:21 +0000
committerKostya Serebryany <kcc@google.com>2013-05-20 07:29:21 +0000
commit1b54cbfbd12d6302838a6d4d6c42d90bd9c6b9d5 (patch)
treeb9728afcd388dcf1602ab04cb25ca82861636198 /lib/sanitizer_common/sanitizer_allocator.h
parentb877cb56b8ae051cab995704e3eec48119cffc64 (diff)
downloadcompiler-rt-1b54cbfbd12d6302838a6d4d6c42d90bd9c6b9d5.tar.gz
compiler-rt-1b54cbfbd12d6302838a6d4d6c42d90bd9c6b9d5.tar.bz2
compiler-rt-1b54cbfbd12d6302838a6d4d6c42d90bd9c6b9d5.tar.xz
[sanitizer] factor out ByteMap from SizeClassAllocator32 so that it can be later replaced with a more memory-efficient implementation on 64-bit.
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@182234 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h57
1 files changed, 39 insertions, 18 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index 0472edf2..0542addb 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -561,6 +561,30 @@ class SizeClassAllocator64 {
}
};
+// Maps integers in rage [0, kSize) to u8 values.
+template<u64 kSize>
+class FlatByteMap {
+ public:
+ void TestOnlyInit() {
+ internal_memset(map_, 0, sizeof(map_));
+ }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize);
+ CHECK_EQ(0U, map_[idx]);
+ map_[idx] = val;
+ }
+ u8 operator[] (uptr idx) {
+ CHECK_LT(idx, kSize);
+ // FIXME: CHECK may be too expensive here.
+ return map_[idx];
+ }
+ private:
+ u8 map_[kSize];
+};
+
+// FIXME: Also implement TwoLevelByteMap.
+
// SizeClassAllocator32 -- allocator for 32-bit address space.
// This allocator can theoretically be used on 64-bit arch, but there it is less
// efficient than SizeClassAllocator64.
@@ -572,7 +596,7 @@ class SizeClassAllocator64 {
// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
// Since the regions are aligned by kRegionSize, there are exactly
// kNumPossibleRegions possible regions in the address space and so we keep
-// an u8 array possible_regions[kNumPossibleRegions] to store the size classes.
+// a ByteMap possible_regions to store the size classes of each Region.
// 0 size class means the region is not used by the allocator.
//
// One Region is used to allocate chunks of a single size class.
@@ -583,16 +607,19 @@ class SizeClassAllocator64 {
// chache-line aligned.
template <const uptr kSpaceBeg, const u64 kSpaceSize,
const uptr kMetadataSize, class SizeClassMap,
+ const uptr kRegionSizeLog,
+ class ByteMap,
class MapUnmapCallback = NoOpMapUnmapCallback>
class SizeClassAllocator32 {
public:
typedef typename SizeClassMap::TransferBatch Batch;
typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
- SizeClassMap, MapUnmapCallback> ThisT;
+ SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
void Init() {
- state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
+ possible_regions.TestOnlyInit();
+ internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
}
void *MapWithCallback(uptr size) {
@@ -649,7 +676,7 @@ class SizeClassAllocator32 {
}
uptr GetSizeClass(void *p) {
- return state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
}
void *GetBlockBegin(void *p) {
@@ -674,16 +701,15 @@ class SizeClassAllocator32 {
// No need to lock here.
uptr res = 0;
for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (state_->possible_regions[i])
+ if (possible_regions[i])
res += kRegionSize;
return res;
}
void TestOnlyUnmap() {
for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (state_->possible_regions[i])
+ if (possible_regions[i])
UnmapWithCallback((i * kRegionSize), kRegionSize);
- UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State));
}
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
@@ -706,8 +732,8 @@ class SizeClassAllocator32 {
template<typename Callable>
void ForEachChunk(const Callable &callback) {
for (uptr region = 0; region < kNumPossibleRegions; region++)
- if (state_->possible_regions[region]) {
- uptr chunk_size = SizeClassMap::Size(state_->possible_regions[region]);
+ if (possible_regions[region]) {
+ uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
uptr region_beg = region * kRegionSize;
for (uptr p = region_beg;
@@ -726,7 +752,6 @@ class SizeClassAllocator32 {
static const uptr kNumClasses = SizeClassMap::kNumClasses;
private:
- static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
static const uptr kRegionSize = 1 << kRegionSizeLog;
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
@@ -754,14 +779,13 @@ class SizeClassAllocator32 {
MapUnmapCallback().OnMap(res, kRegionSize);
stat->Add(AllocatorStatMmapped, kRegionSize);
CHECK_EQ(0U, (res & (kRegionSize - 1)));
- CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]);
- state_->possible_regions[ComputeRegionId(res)] = class_id;
+ possible_regions.set(ComputeRegionId(res), class_id);
return res;
}
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
CHECK_LT(class_id, kNumClasses);
- return &state_->size_class_info_array[class_id];
+ return &size_class_info_array[class_id];
}
void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
@@ -792,11 +816,8 @@ class SizeClassAllocator32 {
}
}
- struct State {
- u8 possible_regions[kNumPossibleRegions];
- SizeClassInfo size_class_info_array[kNumClasses];
- };
- State *state_;
+ ByteMap possible_regions;
+ SizeClassInfo size_class_info_array[kNumClasses];
};
// Objects of this type should be used as local caches for SizeClassAllocator64