Line data Source code
1 : //
2 : // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
3 : // Use of this source code is governed by a BSD-style license that can be
4 : // found in the LICENSE file.
5 : //
6 :
7 : #ifndef COMPILER_TRANSLATOR_POOLALLOC_H_
8 : #define COMPILER_TRANSLATOR_POOLALLOC_H_
9 :
10 : #ifdef _DEBUG
11 : #define GUARD_BLOCKS // define to enable guard block sanity checking
12 : #endif
13 :
14 : //
15 : // This header defines an allocator that can be used to efficiently
16 : // allocate a large number of small requests for heap memory, with the
17 : // intention that they are not individually deallocated, but rather
18 : // collectively deallocated at one time.
19 : //
20 : // This simultaneously
21 : //
22 : // * Makes each individual allocation much more efficient; the
23 : // typical allocation is trivial.
24 : // * Completely avoids the cost of doing individual deallocation.
25 : // * Saves the trouble of tracking down and plugging a large class of leaks.
26 : //
27 : // Individual classes can use this allocator by supplying their own
28 : // new and delete methods.
29 : //
30 : // STL containers can use this allocator by using the pool_allocator
31 : // class as the allocator (second) template argument.
32 : //
33 :
34 : #include <stddef.h>
35 : #include <string.h>
36 : #include <vector>
37 :
38 : // If we are using guard blocks, we must track each indivual
39 : // allocation. If we aren't using guard blocks, these
40 : // never get instantiated, so won't have any impact.
41 : //
42 :
43 : class TAllocation {
44 : public:
45 : TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
46 : size(size), mem(mem), prevAlloc(prev) {
47 : // Allocations are bracketed:
48 : // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
49 : // This would be cleaner with if (guardBlockSize)..., but that
50 : // makes the compiler print warnings about 0 length memsets,
51 : // even with the if() protecting them.
52 : #ifdef GUARD_BLOCKS
53 : memset(preGuard(), guardBlockBeginVal, guardBlockSize);
54 : memset(data(), userDataFill, size);
55 : memset(postGuard(), guardBlockEndVal, guardBlockSize);
56 : #endif
57 : }
58 :
59 0 : void check() const {
60 0 : checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
61 0 : checkGuardBlock(postGuard(), guardBlockEndVal, "after");
62 0 : }
63 :
64 : void checkAllocList() const;
65 :
66 : // Return total size needed to accomodate user buffer of 'size',
67 : // plus our tracking data.
68 0 : inline static size_t allocationSize(size_t size) {
69 0 : return size + 2 * guardBlockSize + headerSize();
70 : }
71 :
72 : // Offset from surrounding buffer to get to user data buffer.
73 0 : inline static unsigned char* offsetAllocation(unsigned char* m) {
74 0 : return m + guardBlockSize + headerSize();
75 : }
76 :
77 : private:
78 : void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
79 :
80 : // Find offsets to pre and post guard blocks, and user data buffer
81 0 : unsigned char* preGuard() const { return mem + headerSize(); }
82 0 : unsigned char* data() const { return preGuard() + guardBlockSize; }
83 0 : unsigned char* postGuard() const { return data() + size; }
84 :
85 : size_t size; // size of the user data area
86 : unsigned char* mem; // beginning of our allocation (pts to header)
87 : TAllocation* prevAlloc; // prior allocation in the chain
88 :
89 : // Support MSVC++ 6.0
90 : const static unsigned char guardBlockBeginVal;
91 : const static unsigned char guardBlockEndVal;
92 : const static unsigned char userDataFill;
93 :
94 : const static size_t guardBlockSize;
95 : #ifdef GUARD_BLOCKS
96 : inline static size_t headerSize() { return sizeof(TAllocation); }
97 : #else
98 0 : inline static size_t headerSize() { return 0; }
99 : #endif
100 : };
101 :
102 : //
103 : // There are several stacks. One is to track the pushing and popping
104 : // of the user, and not yet implemented. The others are simply a
105 : // repositories of free pages or used pages.
106 : //
107 : // Page stacks are linked together with a simple header at the beginning
108 : // of each allocation obtained from the underlying OS. Multi-page allocations
109 : // are returned to the OS. Individual page allocations are kept for future
110 : // re-use.
111 : //
112 : // The "page size" used is not, nor must it match, the underlying OS
113 : // page size. But, having it be about that size or equal to a set of
114 : // pages is likely most optimal.
115 : //
116 : class TPoolAllocator {
117 : public:
118 : TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
119 :
120 : //
121 : // Don't call the destructor just to free up the memory, call pop()
122 : //
123 : ~TPoolAllocator();
124 :
125 : //
126 : // Call push() to establish a new place to pop memory too. Does not
127 : // have to be called to get things started.
128 : //
129 : void push();
130 :
131 : //
132 : // Call pop() to free all memory allocated since the last call to push(),
133 : // or if no last call to push, frees all memory since first allocation.
134 : //
135 : void pop();
136 :
137 : //
138 : // Call popAll() to free all memory allocated.
139 : //
140 : void popAll();
141 :
142 : //
143 : // Call allocate() to actually acquire memory. Returns 0 if no memory
144 : // available, otherwise a properly aligned pointer to 'numBytes' of memory.
145 : //
146 : void* allocate(size_t numBytes);
147 :
148 : //
149 : // There is no deallocate. The point of this class is that
150 : // deallocation can be skipped by the user of it, as the model
151 : // of use is to simultaneously deallocate everything at once
152 : // by calling pop(), and to not have to solve memory leak problems.
153 : //
154 :
155 : // Catch unwanted allocations.
156 : // TODO(jmadill): Remove this when we remove the global allocator.
157 : void lock();
158 : void unlock();
159 :
160 : private:
161 : size_t alignment; // all returned allocations will be aligned at
162 : // this granularity, which will be a power of 2
163 : size_t alignmentMask;
164 :
165 : #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
166 : friend struct tHeader;
167 :
168 : struct tHeader {
169 0 : tHeader(tHeader* nextPage, size_t pageCount) :
170 : nextPage(nextPage),
171 0 : pageCount(pageCount)
172 : #ifdef GUARD_BLOCKS
173 : , lastAllocation(0)
174 : #endif
175 0 : { }
176 :
177 0 : ~tHeader() {
178 : #ifdef GUARD_BLOCKS
179 : if (lastAllocation)
180 : lastAllocation->checkAllocList();
181 : #endif
182 0 : }
183 :
184 : tHeader* nextPage;
185 : size_t pageCount;
186 : #ifdef GUARD_BLOCKS
187 : TAllocation* lastAllocation;
188 : #endif
189 : };
190 :
191 : struct tAllocState {
192 : size_t offset;
193 : tHeader* page;
194 : };
195 : typedef std::vector<tAllocState> tAllocStack;
196 :
197 : // Track allocations if and only if we're using guard blocks
198 0 : void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
199 : #ifdef GUARD_BLOCKS
200 : new(memory) TAllocation(numBytes, memory, block->lastAllocation);
201 : block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
202 : #endif
203 : // This is optimized entirely away if GUARD_BLOCKS is not defined.
204 0 : return TAllocation::offsetAllocation(memory);
205 : }
206 :
207 : size_t pageSize; // granularity of allocation from the OS
208 : size_t headerSkip; // amount of memory to skip to make room for the
209 : // header (basically, size of header, rounded
210 : // up to make it aligned
211 : size_t currentPageOffset; // next offset in top of inUseList to allocate from
212 : tHeader* freeList; // list of popped memory
213 : tHeader* inUseList; // list of all memory currently being used
214 : tAllocStack mStack; // stack of where to allocate from, to partition pool
215 :
216 : int numCalls; // just an interesting statistic
217 : size_t totalBytes; // just an interesting statistic
218 :
219 : #else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
220 : std::vector<std::vector<void *>> mStack;
221 : #endif
222 :
223 : TPoolAllocator& operator=(const TPoolAllocator&); // dont allow assignment operator
224 : TPoolAllocator(const TPoolAllocator&); // dont allow default copy constructor
225 : bool mLocked;
226 : };
227 :
228 :
229 : //
230 : // There could potentially be many pools with pops happening at
231 : // different times. But a simple use is to have a global pop
232 : // with everyone using the same global allocator.
233 : //
234 : extern TPoolAllocator* GetGlobalPoolAllocator();
235 : extern void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator);
236 :
237 : //
238 : // This STL compatible allocator is intended to be used as the allocator
239 : // parameter to templatized STL containers, like vector and map.
240 : //
241 : // It will use the pools for allocation, and not
242 : // do any deallocation, but will still do destruction.
243 : //
244 : template<class T>
245 : class pool_allocator {
246 : public:
247 : typedef size_t size_type;
248 : typedef ptrdiff_t difference_type;
249 : typedef T* pointer;
250 : typedef const T* const_pointer;
251 : typedef T& reference;
252 : typedef const T& const_reference;
253 : typedef T value_type;
254 :
255 : template<class Other>
256 : struct rebind {
257 : typedef pool_allocator<Other> other;
258 : };
259 : pointer address(reference x) const { return &x; }
260 : const_pointer address(const_reference x) const { return &x; }
261 :
262 0 : pool_allocator() { }
263 :
264 : template<class Other>
265 : pool_allocator(const pool_allocator<Other>& p) { }
266 :
267 : template <class Other>
268 : pool_allocator<T>& operator=(const pool_allocator<Other>& p) { return *this; }
269 :
270 : #if defined(__SUNPRO_CC) && !defined(_RWSTD_ALLOCATOR)
271 : // libCStd on some platforms have a different allocate/deallocate interface.
272 : // Caller pre-bakes sizeof(T) into 'n' which is the number of bytes to be
273 : // allocated, not the number of elements.
274 : void* allocate(size_type n) {
275 : return getAllocator().allocate(n);
276 : }
277 : void* allocate(size_type n, const void*) {
278 : return getAllocator().allocate(n);
279 : }
280 : void deallocate(void*, size_type) {}
281 : #else
282 0 : pointer allocate(size_type n) {
283 0 : return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
284 : }
285 : pointer allocate(size_type n, const void*) {
286 : return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
287 : }
288 0 : void deallocate(pointer, size_type) {}
289 : #endif // _RWSTD_ALLOCATOR
290 :
291 0 : void construct(pointer p, const T& val) { new ((void *)p) T(val); }
292 0 : void destroy(pointer p) { p->T::~T(); }
293 :
294 : bool operator==(const pool_allocator& rhs) const { return true; }
295 0 : bool operator!=(const pool_allocator& rhs) const { return false; }
296 :
297 0 : size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
298 : size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
299 :
300 0 : TPoolAllocator& getAllocator() const { return *GetGlobalPoolAllocator(); }
301 : };
302 :
303 : #endif // COMPILER_TRANSLATOR_POOLALLOC_H_
|