summaryrefslogtreecommitdiff
path: root/lib/Support/Allocator.cpp
blob: 6e7a541dd4bd57ee78ce947b12fe9a3ddcde81b7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
//===--- Allocator.cpp - Simple memory allocation abstraction -------------===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the BumpPtrAllocator interface.
//
//===----------------------------------------------------------------------===//

#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/Recycler.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>

namespace llvm {

BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold,
                                   SlabAllocator &allocator)
    : SlabSize(size), SizeThreshold(std::min(size, threshold)),
      Allocator(allocator), CurSlab(0), BytesAllocated(0) { }

BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold)
    : SlabSize(size), SizeThreshold(std::min(size, threshold)),
      Allocator(DefaultSlabAllocator), CurSlab(0), BytesAllocated(0) { }

BumpPtrAllocator::~BumpPtrAllocator() {
  DeallocateSlabs(CurSlab);
}

/// AlignPtr - Align Ptr to Alignment bytes, rounding up.  Alignment should
/// be a power of two.  This method rounds up, so AlignPtr(7, 4) == 8 and
/// AlignPtr(8, 4) == 8.
char *BumpPtrAllocator::AlignPtr(char *Ptr, size_t Alignment) {
  assert(Alignment && (Alignment & (Alignment - 1)) == 0 &&
         "Alignment is not a power of two!");

  // Do the alignment.
  return (char*)(((uintptr_t)Ptr + Alignment - 1) &
                 ~(uintptr_t)(Alignment - 1));
}

/// StartNewSlab - Allocate a new slab and move the bump pointers over into
/// the new slab.  Modifies CurPtr and End.
void BumpPtrAllocator::StartNewSlab() {
  // If we allocated a big number of slabs already it's likely that we're going
  // to allocate more. Increase slab size to reduce mallocs and possibly memory
  // overhead. The factors are chosen conservatively to avoid overallocation.
  if (BytesAllocated >= SlabSize * 128)
    SlabSize *= 2;

  MemSlab *NewSlab = Allocator.Allocate(SlabSize);
  NewSlab->NextPtr = CurSlab;
  CurSlab = NewSlab;
  CurPtr = (char*)(CurSlab + 1);
  End = ((char*)CurSlab) + CurSlab->Size;
}

/// DeallocateSlabs - Deallocate all memory slabs after and including this
/// one.
void BumpPtrAllocator::DeallocateSlabs(MemSlab *Slab) {
  while (Slab) {
    MemSlab *NextSlab = Slab->NextPtr;
#ifndef NDEBUG
    // Poison the memory so stale pointers crash sooner.  Note we must
    // preserve the Size and NextPtr fields at the beginning.
    sys::Memory::setRangeWritable(Slab + 1, Slab->Size - sizeof(MemSlab));
    memset(Slab + 1, 0xCD, Slab->Size - sizeof(MemSlab));
#endif
    Allocator.Deallocate(Slab);
    Slab = NextSlab;
  }
}

/// Reset - Deallocate all but the current slab and reset the current pointer
/// to the beginning of it, freeing all memory allocated so far.
void BumpPtrAllocator::Reset() {
  if (!CurSlab)
    return;
  DeallocateSlabs(CurSlab->NextPtr);
  CurSlab->NextPtr = 0;
  CurPtr = (char*)(CurSlab + 1);
  End = ((char*)CurSlab) + CurSlab->Size;
  BytesAllocated = 0;
}

/// Allocate - Allocate space at the specified alignment.
///
void *BumpPtrAllocator::Allocate(size_t Size, size_t Alignment) {
  if (!CurSlab) // Start a new slab if we haven't allocated one already.
    StartNewSlab();

  // Keep track of how many bytes we've allocated.
  BytesAllocated += Size;

  // 0-byte alignment means 1-byte alignment.
  if (Alignment == 0) Alignment = 1;

  // Allocate the aligned space, going forwards from CurPtr.
  char *Ptr = AlignPtr(CurPtr, Alignment);

  // Check if we can hold it.
  if (Ptr + Size <= End) {
    CurPtr = Ptr + Size;
    // Update the allocation point of this memory block in MemorySanitizer.
    // Without this, MemorySanitizer messages for values originated from here
    // will point to the allocation of the entire slab.
    __msan_allocated_memory(Ptr, Size);
    return Ptr;
  }

  // If Size is really big, allocate a separate slab for it.
  size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
  if (PaddedSize > SizeThreshold) {
    MemSlab *NewSlab = Allocator.Allocate(PaddedSize);

    // Put the new slab after the current slab, since we are not allocating
    // into it.
    NewSlab->NextPtr = CurSlab->NextPtr;
    CurSlab->NextPtr = NewSlab;

    Ptr = AlignPtr((char*)(NewSlab + 1), Alignment);
    assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + NewSlab->Size);
    __msan_allocated_memory(Ptr, Size);
    return Ptr;
  }

  // Otherwise, start a new slab and try again.
  StartNewSlab();
  Ptr = AlignPtr(CurPtr, Alignment);
  CurPtr = Ptr + Size;
  assert(CurPtr <= End && "Unable to allocate memory!");
  __msan_allocated_memory(Ptr, Size);
  return Ptr;
}

unsigned BumpPtrAllocator::GetNumSlabs() const {
  unsigned NumSlabs = 0;
  for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
    ++NumSlabs;
  }
  return NumSlabs;
}

size_t BumpPtrAllocator::getTotalMemory() const {
  size_t TotalMemory = 0;
  for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
    TotalMemory += Slab->Size;
  }
  return TotalMemory;
}
  
void BumpPtrAllocator::PrintStats() const {
  unsigned NumSlabs = 0;
  size_t TotalMemory = 0;
  for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
    TotalMemory += Slab->Size;
    ++NumSlabs;
  }

  errs() << "\nNumber of memory regions: " << NumSlabs << '\n'
         << "Bytes used: " << BytesAllocated << '\n'
         << "Bytes allocated: " << TotalMemory << '\n'
         << "Bytes wasted: " << (TotalMemory - BytesAllocated)
         << " (includes alignment, etc)\n";
}

SlabAllocator::~SlabAllocator() { }

MallocSlabAllocator::~MallocSlabAllocator() { }

MemSlab *MallocSlabAllocator::Allocate(size_t Size) {
  MemSlab *Slab = (MemSlab*)Allocator.Allocate(Size, 0);
  Slab->Size = Size;
  Slab->NextPtr = 0;
  return Slab;
}

void MallocSlabAllocator::Deallocate(MemSlab *Slab) {
  Allocator.Deallocate(Slab);
}

void PrintRecyclerStats(size_t Size,
                        size_t Align,
                        size_t FreeListSize) {
  errs() << "Recycler element size: " << Size << '\n'
         << "Recycler element alignment: " << Align << '\n'
         << "Number of elements free for recycling: " << FreeListSize << '\n';
}

}