Line data Source code
1 : /*
2 : * Copyright 2010 Google Inc.
3 : *
4 : * Use of this source code is governed by a BSD-style license that can be
5 : * found in the LICENSE file.
6 : */
7 :
8 :
9 : #include "GrBufferAllocPool.h"
10 : #include "GrBuffer.h"
11 : #include "GrCaps.h"
12 : #include "GrContext.h"
13 : #include "GrGpu.h"
14 : #include "GrResourceProvider.h"
15 : #include "GrTypes.h"
16 :
17 : #include "SkTraceEvent.h"
18 :
19 : #ifdef SK_DEBUG
20 : #define VALIDATE validate
21 : #else
22 : static void VALIDATE(bool = false) {}
23 : #endif
24 :
25 : static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
26 : static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
27 :
28 : // page size
29 : #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
30 :
31 : #define UNMAP_BUFFER(block) \
32 : do { \
33 : TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \
34 : "GrBufferAllocPool Unmapping Buffer", \
35 : TRACE_EVENT_SCOPE_THREAD, \
36 : "percent_unwritten", \
37 : (float)((block).fBytesFree) / (block).fBuffer->gpuMemorySize()); \
38 : (block).fBuffer->unmap(); \
39 : } while (false)
40 :
41 0 : GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
42 : GrBufferType bufferType,
43 0 : size_t blockSize)
44 0 : : fBlocks(8) {
45 :
46 0 : fGpu = SkRef(gpu);
47 0 : fCpuData = nullptr;
48 0 : fBufferType = bufferType;
49 0 : fBufferPtr = nullptr;
50 0 : fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
51 :
52 0 : fBytesInUse = 0;
53 :
54 0 : fBufferMapThreshold = gpu->caps()->bufferMapThreshold();
55 0 : }
56 :
57 0 : void GrBufferAllocPool::deleteBlocks() {
58 0 : if (fBlocks.count()) {
59 0 : GrBuffer* buffer = fBlocks.back().fBuffer;
60 0 : if (buffer->isMapped()) {
61 0 : UNMAP_BUFFER(fBlocks.back());
62 : }
63 : }
64 0 : while (!fBlocks.empty()) {
65 0 : this->destroyBlock();
66 : }
67 0 : SkASSERT(!fBufferPtr);
68 0 : }
69 :
70 0 : GrBufferAllocPool::~GrBufferAllocPool() {
71 0 : VALIDATE();
72 0 : this->deleteBlocks();
73 0 : sk_free(fCpuData);
74 0 : fGpu->unref();
75 0 : }
76 :
77 0 : void GrBufferAllocPool::reset() {
78 0 : VALIDATE();
79 0 : fBytesInUse = 0;
80 0 : this->deleteBlocks();
81 0 : this->resetCpuData(0); // delete all the cpu-side memory
82 0 : VALIDATE();
83 0 : }
84 :
85 0 : void GrBufferAllocPool::unmap() {
86 0 : VALIDATE();
87 :
88 0 : if (fBufferPtr) {
89 0 : BufferBlock& block = fBlocks.back();
90 0 : if (block.fBuffer->isMapped()) {
91 0 : UNMAP_BUFFER(block);
92 : } else {
93 0 : size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
94 0 : this->flushCpuData(fBlocks.back(), flushSize);
95 : }
96 0 : fBufferPtr = nullptr;
97 : }
98 0 : VALIDATE();
99 0 : }
100 :
101 : #ifdef SK_DEBUG
102 0 : void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
103 0 : bool wasDestroyed = false;
104 0 : if (fBufferPtr) {
105 0 : SkASSERT(!fBlocks.empty());
106 0 : if (fBlocks.back().fBuffer->isMapped()) {
107 0 : GrBuffer* buf = fBlocks.back().fBuffer;
108 0 : SkASSERT(buf->mapPtr() == fBufferPtr);
109 : } else {
110 0 : SkASSERT(fCpuData == fBufferPtr);
111 : }
112 : } else {
113 0 : SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
114 : }
115 0 : size_t bytesInUse = 0;
116 0 : for (int i = 0; i < fBlocks.count() - 1; ++i) {
117 0 : SkASSERT(!fBlocks[i].fBuffer->isMapped());
118 : }
119 0 : for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
120 0 : if (fBlocks[i].fBuffer->wasDestroyed()) {
121 0 : wasDestroyed = true;
122 : } else {
123 0 : size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
124 0 : bytesInUse += bytes;
125 0 : SkASSERT(bytes || unusedBlockAllowed);
126 : }
127 : }
128 :
129 0 : if (!wasDestroyed) {
130 0 : SkASSERT(bytesInUse == fBytesInUse);
131 0 : if (unusedBlockAllowed) {
132 0 : SkASSERT((fBytesInUse && !fBlocks.empty()) ||
133 : (!fBytesInUse && (fBlocks.count() < 2)));
134 : } else {
135 0 : SkASSERT((0 == fBytesInUse) == fBlocks.empty());
136 : }
137 : }
138 0 : }
139 : #endif
140 :
141 0 : void* GrBufferAllocPool::makeSpace(size_t size,
142 : size_t alignment,
143 : const GrBuffer** buffer,
144 : size_t* offset) {
145 0 : VALIDATE();
146 :
147 0 : SkASSERT(buffer);
148 0 : SkASSERT(offset);
149 :
150 0 : if (fBufferPtr) {
151 0 : BufferBlock& back = fBlocks.back();
152 0 : size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
153 0 : size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
154 0 : if ((size + pad) <= back.fBytesFree) {
155 0 : memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
156 0 : usedBytes += pad;
157 0 : *offset = usedBytes;
158 0 : *buffer = back.fBuffer;
159 0 : back.fBytesFree -= size + pad;
160 0 : fBytesInUse += size + pad;
161 0 : VALIDATE();
162 0 : return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
163 : }
164 : }
165 :
166 : // We could honor the space request using by a partial update of the current
167 : // VB (if there is room). But we don't currently use draw calls to GL that
168 : // allow the driver to know that previously issued draws won't read from
169 : // the part of the buffer we update. Also, the GL buffer implementation
170 : // may be cheating on the actual buffer size by shrinking the buffer on
171 : // updateData() if the amount of data passed is less than the full buffer
172 : // size.
173 :
174 0 : if (!this->createBlock(size)) {
175 0 : return nullptr;
176 : }
177 0 : SkASSERT(fBufferPtr);
178 :
179 0 : *offset = 0;
180 0 : BufferBlock& back = fBlocks.back();
181 0 : *buffer = back.fBuffer;
182 0 : back.fBytesFree -= size;
183 0 : fBytesInUse += size;
184 0 : VALIDATE();
185 0 : return fBufferPtr;
186 : }
187 :
188 0 : void GrBufferAllocPool::putBack(size_t bytes) {
189 0 : VALIDATE();
190 :
191 0 : while (bytes) {
192 : // caller shouldn't try to put back more than they've taken
193 0 : SkASSERT(!fBlocks.empty());
194 0 : BufferBlock& block = fBlocks.back();
195 0 : size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
196 0 : if (bytes >= bytesUsed) {
197 0 : bytes -= bytesUsed;
198 0 : fBytesInUse -= bytesUsed;
199 : // if we locked a vb to satisfy the make space and we're releasing
200 : // beyond it, then unmap it.
201 0 : if (block.fBuffer->isMapped()) {
202 0 : UNMAP_BUFFER(block);
203 : }
204 0 : this->destroyBlock();
205 : } else {
206 0 : block.fBytesFree += bytes;
207 0 : fBytesInUse -= bytes;
208 0 : bytes = 0;
209 0 : break;
210 : }
211 : }
212 :
213 0 : VALIDATE();
214 0 : }
215 :
216 0 : bool GrBufferAllocPool::createBlock(size_t requestSize) {
217 :
218 0 : size_t size = SkTMax(requestSize, fMinBlockSize);
219 0 : SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
220 :
221 0 : VALIDATE();
222 :
223 0 : BufferBlock& block = fBlocks.push_back();
224 :
225 0 : block.fBuffer = this->getBuffer(size);
226 0 : if (!block.fBuffer) {
227 0 : fBlocks.pop_back();
228 0 : return false;
229 : }
230 :
231 0 : block.fBytesFree = block.fBuffer->gpuMemorySize();
232 0 : if (fBufferPtr) {
233 0 : SkASSERT(fBlocks.count() > 1);
234 0 : BufferBlock& prev = fBlocks.fromBack(1);
235 0 : if (prev.fBuffer->isMapped()) {
236 0 : UNMAP_BUFFER(prev);
237 : } else {
238 0 : this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
239 : }
240 0 : fBufferPtr = nullptr;
241 : }
242 :
243 0 : SkASSERT(!fBufferPtr);
244 :
245 : // If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
246 : // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
247 : // threshold.
248 0 : bool attemptMap = block.fBuffer->isCPUBacked();
249 0 : if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
250 0 : attemptMap = size > fBufferMapThreshold;
251 : }
252 :
253 0 : if (attemptMap) {
254 0 : fBufferPtr = block.fBuffer->map();
255 : }
256 :
257 0 : if (!fBufferPtr) {
258 0 : fBufferPtr = this->resetCpuData(block.fBytesFree);
259 : }
260 :
261 0 : VALIDATE(true);
262 :
263 0 : return true;
264 : }
265 :
266 0 : void GrBufferAllocPool::destroyBlock() {
267 0 : SkASSERT(!fBlocks.empty());
268 :
269 0 : BufferBlock& block = fBlocks.back();
270 :
271 0 : SkASSERT(!block.fBuffer->isMapped());
272 0 : block.fBuffer->unref();
273 0 : fBlocks.pop_back();
274 0 : fBufferPtr = nullptr;
275 0 : }
276 :
277 0 : void* GrBufferAllocPool::resetCpuData(size_t newSize) {
278 0 : sk_free(fCpuData);
279 0 : if (newSize) {
280 0 : if (fGpu->caps()->mustClearUploadedBufferData()) {
281 0 : fCpuData = sk_calloc_throw(newSize);
282 : } else {
283 0 : fCpuData = sk_malloc_throw(newSize);
284 : }
285 : } else {
286 0 : fCpuData = nullptr;
287 : }
288 0 : return fCpuData;
289 : }
290 :
291 :
292 0 : void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
293 0 : GrBuffer* buffer = block.fBuffer;
294 0 : SkASSERT(buffer);
295 0 : SkASSERT(!buffer->isMapped());
296 0 : SkASSERT(fCpuData == fBufferPtr);
297 0 : SkASSERT(flushSize <= buffer->gpuMemorySize());
298 0 : VALIDATE(true);
299 :
300 0 : if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
301 0 : flushSize > fBufferMapThreshold) {
302 0 : void* data = buffer->map();
303 0 : if (data) {
304 0 : memcpy(data, fBufferPtr, flushSize);
305 0 : UNMAP_BUFFER(block);
306 0 : return;
307 : }
308 : }
309 0 : buffer->updateData(fBufferPtr, flushSize);
310 0 : VALIDATE(true);
311 : }
312 :
313 0 : GrBuffer* GrBufferAllocPool::getBuffer(size_t size) {
314 :
315 0 : GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
316 :
317 : // Shouldn't have to use this flag (https://bug.skia.org/4156)
318 : static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
319 0 : return rp->createBuffer(size, fBufferType, kDynamic_GrAccessPattern, kFlags);
320 : }
321 :
322 : ////////////////////////////////////////////////////////////////////////////////
323 :
324 0 : GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
325 0 : : GrBufferAllocPool(gpu, kVertex_GrBufferType, MIN_VERTEX_BUFFER_SIZE) {
326 0 : }
327 :
328 0 : void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
329 : int vertexCount,
330 : const GrBuffer** buffer,
331 : int* startVertex) {
332 :
333 0 : SkASSERT(vertexCount >= 0);
334 0 : SkASSERT(buffer);
335 0 : SkASSERT(startVertex);
336 :
337 0 : size_t offset SK_INIT_TO_AVOID_WARNING;
338 0 : void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
339 : vertexSize,
340 : buffer,
341 0 : &offset);
342 :
343 0 : SkASSERT(0 == offset % vertexSize);
344 0 : *startVertex = static_cast<int>(offset / vertexSize);
345 0 : return ptr;
346 : }
347 :
348 : ////////////////////////////////////////////////////////////////////////////////
349 :
350 0 : GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
351 0 : : GrBufferAllocPool(gpu, kIndex_GrBufferType, MIN_INDEX_BUFFER_SIZE) {
352 0 : }
353 :
354 0 : void* GrIndexBufferAllocPool::makeSpace(int indexCount,
355 : const GrBuffer** buffer,
356 : int* startIndex) {
357 :
358 0 : SkASSERT(indexCount >= 0);
359 0 : SkASSERT(buffer);
360 0 : SkASSERT(startIndex);
361 :
362 0 : size_t offset SK_INIT_TO_AVOID_WARNING;
363 0 : void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
364 : sizeof(uint16_t),
365 : buffer,
366 0 : &offset);
367 :
368 0 : SkASSERT(0 == offset % sizeof(uint16_t));
369 0 : *startIndex = static_cast<int>(offset / sizeof(uint16_t));
370 0 : return ptr;
371 : }
|