Line data Source code
1 : //
2 : // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
3 : // Use of this source code is governed by a BSD-style license that can be
4 : // found in the LICENSE file.
5 : //
6 :
7 : #include "compiler/translator/PoolAlloc.h"
8 :
9 : #include <stdint.h>
10 : #include <stdio.h>
11 : #include <assert.h>
12 :
13 : #include "common/angleutils.h"
14 : #include "common/debug.h"
15 : #include "common/platform.h"
16 : #include "common/tls.h"
17 : #include "compiler/translator/InitializeGlobals.h"
18 :
19 : TLSIndex PoolIndex = TLS_INVALID_INDEX;
20 :
21 0 : bool InitializePoolIndex()
22 : {
23 0 : assert(PoolIndex == TLS_INVALID_INDEX);
24 :
25 0 : PoolIndex = CreateTLSIndex();
26 0 : return PoolIndex != TLS_INVALID_INDEX;
27 : }
28 :
29 0 : void FreePoolIndex()
30 : {
31 0 : assert(PoolIndex != TLS_INVALID_INDEX);
32 :
33 0 : DestroyTLSIndex(PoolIndex);
34 0 : PoolIndex = TLS_INVALID_INDEX;
35 0 : }
36 :
37 0 : TPoolAllocator* GetGlobalPoolAllocator()
38 : {
39 0 : assert(PoolIndex != TLS_INVALID_INDEX);
40 0 : return static_cast<TPoolAllocator*>(GetTLSValue(PoolIndex));
41 : }
42 :
43 0 : void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator)
44 : {
45 0 : assert(PoolIndex != TLS_INVALID_INDEX);
46 0 : SetTLSValue(PoolIndex, poolAllocator);
47 0 : }
48 :
49 : //
50 : // Implement the functionality of the TPoolAllocator class, which
51 : // is documented in PoolAlloc.h.
52 : //
53 0 : TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment)
54 : : alignment(allocationAlignment),
55 : #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
56 : pageSize(growthIncrement),
57 : freeList(0),
58 : inUseList(0),
59 : numCalls(0),
60 : totalBytes(0),
61 : #endif
62 0 : mLocked(false)
63 : {
64 : //
65 : // Adjust alignment to be at least pointer aligned and
66 : // power of 2.
67 : //
68 0 : size_t minAlign = sizeof(void*);
69 0 : alignment &= ~(minAlign - 1);
70 0 : if (alignment < minAlign)
71 0 : alignment = minAlign;
72 0 : size_t a = 1;
73 0 : while (a < alignment)
74 0 : a <<= 1;
75 0 : alignment = a;
76 0 : alignmentMask = a - 1;
77 :
78 : #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
79 : //
80 : // Don't allow page sizes we know are smaller than all common
81 : // OS page sizes.
82 : //
83 0 : if (pageSize < 4 * 1024)
84 0 : pageSize = 4 * 1024;
85 :
86 : //
87 : // A large currentPageOffset indicates a new page needs to
88 : // be obtained to allocate memory.
89 : //
90 0 : currentPageOffset = pageSize;
91 :
92 : //
93 : // Align header skip
94 : //
95 0 : headerSkip = minAlign;
96 0 : if (headerSkip < sizeof(tHeader)) {
97 0 : headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
98 : }
99 : #else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
100 : mStack.push_back({});
101 : #endif
102 0 : }
103 :
104 0 : TPoolAllocator::~TPoolAllocator()
105 : {
106 : #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
107 0 : while (inUseList) {
108 0 : tHeader* next = inUseList->nextPage;
109 0 : inUseList->~tHeader();
110 0 : delete [] reinterpret_cast<char*>(inUseList);
111 0 : inUseList = next;
112 : }
113 :
114 : // We should not check the guard blocks
115 : // here, because we did it already when the block was
116 : // placed into the free list.
117 : //
118 0 : while (freeList) {
119 0 : tHeader* next = freeList->nextPage;
120 0 : delete [] reinterpret_cast<char*>(freeList);
121 0 : freeList = next;
122 : }
123 : #else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
124 : for (auto &allocs : mStack)
125 : {
126 : for (auto alloc : allocs)
127 : {
128 : free(alloc);
129 : }
130 : }
131 : mStack.clear();
132 : #endif
133 0 : }
134 :
135 : // Support MSVC++ 6.0
136 : const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
137 : const unsigned char TAllocation::guardBlockEndVal = 0xfe;
138 : const unsigned char TAllocation::userDataFill = 0xcd;
139 :
140 : #ifdef GUARD_BLOCKS
141 : const size_t TAllocation::guardBlockSize = 16;
142 : #else
143 : const size_t TAllocation::guardBlockSize = 0;
144 : #endif
145 :
146 : //
147 : // Check a single guard block for damage
148 : //
149 0 : void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
150 : {
151 : #ifdef GUARD_BLOCKS
152 : for (size_t x = 0; x < guardBlockSize; x++) {
153 : if (blockMem[x] != val) {
154 : char assertMsg[80];
155 :
156 : // We don't print the assert message. It's here just to be helpful.
157 : #if defined(_MSC_VER)
158 : snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n",
159 : locText, size, data());
160 : #else
161 : snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
162 : locText, size, data());
163 : #endif
164 : assert(0 && "PoolAlloc: Damage in guard block");
165 : }
166 : }
167 : #endif
168 0 : }
169 :
170 :
171 0 : void TPoolAllocator::push()
172 : {
173 : #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
174 0 : tAllocState state = { currentPageOffset, inUseList };
175 :
176 0 : mStack.push_back(state);
177 :
178 : //
179 : // Indicate there is no current page to allocate from.
180 : //
181 0 : currentPageOffset = pageSize;
182 : #else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
183 : mStack.push_back({});
184 : #endif
185 0 : }
186 :
187 : //
188 : // Do a mass-deallocation of all the individual allocations
189 : // that have occurred since the last push(), or since the
190 : // last pop(), or since the object's creation.
191 : //
192 : // The deallocated pages are saved for future allocations.
193 : //
194 0 : void TPoolAllocator::pop()
195 : {
196 0 : if (mStack.size() < 1)
197 0 : return;
198 :
199 : #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
200 0 : tHeader *page = mStack.back().page;
201 0 : currentPageOffset = mStack.back().offset;
202 :
203 0 : while (inUseList != page) {
204 : // invoke destructor to free allocation list
205 0 : inUseList->~tHeader();
206 :
207 0 : tHeader* nextInUse = inUseList->nextPage;
208 0 : if (inUseList->pageCount > 1)
209 0 : delete [] reinterpret_cast<char*>(inUseList);
210 : else {
211 0 : inUseList->nextPage = freeList;
212 0 : freeList = inUseList;
213 : }
214 0 : inUseList = nextInUse;
215 : }
216 :
217 0 : mStack.pop_back();
218 : #else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
219 : for (auto &alloc : mStack.back())
220 : {
221 : free(alloc);
222 : }
223 : mStack.pop_back();
224 : #endif
225 : }
226 :
227 : //
228 : // Do a mass-deallocation of all the individual allocations
229 : // that have occurred.
230 : //
231 0 : void TPoolAllocator::popAll()
232 : {
233 0 : while (mStack.size() > 0)
234 0 : pop();
235 0 : }
236 :
237 0 : void* TPoolAllocator::allocate(size_t numBytes)
238 : {
239 0 : ASSERT(!mLocked);
240 :
241 : #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
242 : //
243 : // Just keep some interesting statistics.
244 : //
245 0 : ++numCalls;
246 0 : totalBytes += numBytes;
247 :
248 : // If we are using guard blocks, all allocations are bracketed by
249 : // them: [guardblock][allocation][guardblock]. numBytes is how
250 : // much memory the caller asked for. allocationSize is the total
251 : // size including guard blocks. In release build,
252 : // guardBlockSize=0 and this all gets optimized away.
253 0 : size_t allocationSize = TAllocation::allocationSize(numBytes);
254 : // Detect integer overflow.
255 0 : if (allocationSize < numBytes)
256 0 : return 0;
257 :
258 : //
259 : // Do the allocation, most likely case first, for efficiency.
260 : // This step could be moved to be inline sometime.
261 : //
262 0 : if (allocationSize <= pageSize - currentPageOffset) {
263 : //
264 : // Safe to allocate from currentPageOffset.
265 : //
266 0 : unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
267 0 : currentPageOffset += allocationSize;
268 0 : currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
269 :
270 0 : return initializeAllocation(inUseList, memory, numBytes);
271 : }
272 :
273 0 : if (allocationSize > pageSize - headerSkip) {
274 : //
275 : // Do a multi-page allocation. Don't mix these with the others.
276 : // The OS is efficient and allocating and free-ing multiple pages.
277 : //
278 0 : size_t numBytesToAlloc = allocationSize + headerSkip;
279 : // Detect integer overflow.
280 0 : if (numBytesToAlloc < allocationSize)
281 0 : return 0;
282 :
283 0 : tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
284 0 : if (memory == 0)
285 0 : return 0;
286 :
287 : // Use placement-new to initialize header
288 0 : new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
289 0 : inUseList = memory;
290 :
291 0 : currentPageOffset = pageSize; // make next allocation come from a new page
292 :
293 : // No guard blocks for multi-page allocations (yet)
294 0 : return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
295 : }
296 :
297 : //
298 : // Need a simple page to allocate from.
299 : //
300 : tHeader* memory;
301 0 : if (freeList) {
302 0 : memory = freeList;
303 0 : freeList = freeList->nextPage;
304 : } else {
305 0 : memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
306 0 : if (memory == 0)
307 0 : return 0;
308 : }
309 :
310 : // Use placement-new to initialize header
311 0 : new(memory) tHeader(inUseList, 1);
312 0 : inUseList = memory;
313 :
314 0 : unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
315 0 : currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
316 :
317 0 : return initializeAllocation(inUseList, ret, numBytes);
318 : #else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
319 : void *alloc = malloc(numBytes + alignmentMask);
320 : mStack.back().push_back(alloc);
321 :
322 : intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
323 : intAlloc = (intAlloc + alignmentMask) & ~alignmentMask;
324 : return reinterpret_cast<void *>(intAlloc);
325 : #endif
326 : }
327 :
328 0 : void TPoolAllocator::lock()
329 : {
330 0 : ASSERT(!mLocked);
331 0 : mLocked = true;
332 0 : }
333 :
334 0 : void TPoolAllocator::unlock()
335 : {
336 0 : ASSERT(mLocked);
337 0 : mLocked = false;
338 0 : }
339 :
340 : //
341 : // Check all allocations in a list for damage by calling check on each.
342 : //
343 0 : void TAllocation::checkAllocList() const
344 : {
345 0 : for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
346 0 : alloc->check();
347 0 : }
|