Line data Source code
1 : /*
2 : * Copyright 2015 Google Inc.
3 : *
4 : * Use of this source code is governed by a BSD-style license that can be
5 : * found in the LICENSE file.
6 : */
7 :
8 : #include "GrResourceProvider.h"
9 :
10 : #include "GrBuffer.h"
11 : #include "GrCaps.h"
12 : #include "GrContext.h"
13 : #include "GrContextPriv.h"
14 : #include "GrGpu.h"
15 : #include "GrPathRendering.h"
16 : #include "GrRenderTarget.h"
17 : #include "GrRenderTargetPriv.h"
18 : #include "GrResourceCache.h"
19 : #include "GrResourceKey.h"
20 : #include "GrSemaphore.h"
21 : #include "GrStencilAttachment.h"
22 : #include "GrSurfaceProxyPriv.h"
23 : #include "GrTexturePriv.h"
24 : #include "../private/GrSingleOwner.h"
25 : #include "SkMathPriv.h"
26 :
27 : GR_DECLARE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
28 :
29 : const int GrResourceProvider::kMinScratchTextureSize = 16;
30 :
31 : #define ASSERT_SINGLE_OWNER \
32 : SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
33 :
34 0 : GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner)
35 : : fCache(cache)
36 : , fGpu(gpu)
37 : #ifdef SK_DEBUG
38 0 : , fSingleOwner(owner)
39 : #endif
40 : {
41 0 : fCaps = sk_ref_sp(fGpu->caps());
42 :
43 0 : GR_DEFINE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
44 0 : fQuadIndexBufferKey = gQuadIndexBufferKey;
45 0 : }
46 :
47 0 : bool GrResourceProvider::IsFunctionallyExact(GrSurfaceProxy* proxy) {
48 0 : return proxy->priv().isExact() || (SkIsPow2(proxy->width()) && SkIsPow2(proxy->height()));
49 : }
50 :
51 : // MDB TODO: this should probably be a factory on GrSurfaceProxy
52 0 : sk_sp<GrTextureProxy> GrResourceProvider::createMipMappedTexture(
53 : const GrSurfaceDesc& desc,
54 : SkBudgeted budgeted,
55 : const GrMipLevel* texels,
56 : int mipLevelCount,
57 : uint32_t flags,
58 : SkDestinationSurfaceColorMode mipColorMode) {
59 0 : ASSERT_SINGLE_OWNER
60 :
61 0 : if (!mipLevelCount) {
62 0 : if (texels) {
63 0 : return nullptr;
64 : }
65 0 : return GrSurfaceProxy::MakeDeferred(this, desc, budgeted, nullptr, 0);
66 : }
67 :
68 0 : if (this->isAbandoned()) {
69 0 : return nullptr;
70 : }
71 :
72 0 : for (int i = 0; i < mipLevelCount; ++i) {
73 0 : if (!texels[i].fPixels) {
74 0 : return nullptr;
75 : }
76 : }
77 0 : if (mipLevelCount > 1 && GrPixelConfigIsSint(desc.fConfig)) {
78 0 : return nullptr;
79 : }
80 0 : if ((desc.fFlags & kRenderTarget_GrSurfaceFlag) &&
81 0 : !fGpu->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
82 0 : return nullptr;
83 : }
84 0 : if (!GrPixelConfigIsCompressed(desc.fConfig)) {
85 0 : if (mipLevelCount < 2) {
86 0 : flags |= kExact_Flag | kNoCreate_Flag;
87 0 : sk_sp<GrTexture> tex(this->refScratchTexture(desc, flags));
88 0 : if (tex) {
89 0 : sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeWrapped(tex);
90 :
91 0 : if (fGpu->getContext()->contextPriv().writeSurfacePixels(
92 0 : proxy.get(), nullptr, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
93 0 : nullptr, texels[0].fPixels, texels[0].fRowBytes)) {
94 0 : if (SkBudgeted::kNo == budgeted) {
95 0 : tex->resourcePriv().makeUnbudgeted();
96 : }
97 0 : tex->texturePriv().setMipColorMode(mipColorMode);
98 0 : return proxy;
99 : }
100 : }
101 : }
102 : }
103 :
104 0 : SkTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
105 0 : for (int i = 0; i < mipLevelCount; ++i) {
106 0 : texelsShallowCopy.push_back(texels[i]);
107 : }
108 0 : sk_sp<GrTexture> tex(fGpu->createTexture(desc, budgeted, texelsShallowCopy));
109 0 : if (tex) {
110 0 : tex->texturePriv().setMipColorMode(mipColorMode);
111 : }
112 :
113 0 : return GrSurfaceProxy::MakeWrapped(std::move(tex));
114 : }
115 :
116 0 : sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
117 : uint32_t flags) {
118 0 : ASSERT_SINGLE_OWNER
119 :
120 0 : if (this->isAbandoned()) {
121 0 : return nullptr;
122 : }
123 :
124 0 : if ((desc.fFlags & kRenderTarget_GrSurfaceFlag) &&
125 0 : !fGpu->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
126 0 : return nullptr;
127 : }
128 :
129 0 : if (!GrPixelConfigIsCompressed(desc.fConfig)) {
130 0 : flags |= kExact_Flag | kNoCreate_Flag;
131 0 : sk_sp<GrTexture> tex(this->refScratchTexture(desc, flags));
132 0 : if (tex) {
133 0 : if (SkBudgeted::kNo == budgeted) {
134 0 : tex->resourcePriv().makeUnbudgeted();
135 : }
136 0 : return tex;
137 : }
138 : }
139 :
140 0 : sk_sp<GrTexture> tex(fGpu->createTexture(desc, budgeted));
141 0 : return tex;
142 : }
143 :
144 0 : GrTexture* GrResourceProvider::createApproxTexture(const GrSurfaceDesc& desc, uint32_t flags) {
145 0 : ASSERT_SINGLE_OWNER
146 0 : SkASSERT(0 == flags || kNoPendingIO_Flag == flags);
147 :
148 0 : if (this->isAbandoned()) {
149 0 : return nullptr;
150 : }
151 :
152 : // Currently we don't recycle compressed textures as scratch.
153 0 : if (GrPixelConfigIsCompressed(desc.fConfig)) {
154 0 : return nullptr;
155 : }
156 :
157 0 : return this->refScratchTexture(desc, flags);
158 : }
159 :
160 0 : GrTexture* GrResourceProvider::refScratchTexture(const GrSurfaceDesc& inDesc,
161 : uint32_t flags) {
162 0 : ASSERT_SINGLE_OWNER
163 0 : SkASSERT(!this->isAbandoned());
164 0 : SkASSERT(!GrPixelConfigIsCompressed(inDesc.fConfig));
165 :
166 0 : SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc);
167 :
168 0 : if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
169 0 : if (!(kExact_Flag & flags)) {
170 : // bin by pow2 with a reasonable min
171 0 : GrSurfaceDesc* wdesc = desc.writable();
172 0 : wdesc->fWidth = SkTMax(kMinScratchTextureSize, GrNextPow2(desc->fWidth));
173 0 : wdesc->fHeight = SkTMax(kMinScratchTextureSize, GrNextPow2(desc->fHeight));
174 : }
175 :
176 0 : GrScratchKey key;
177 0 : GrTexturePriv::ComputeScratchKey(*desc, &key);
178 0 : uint32_t scratchFlags = 0;
179 0 : if (kNoPendingIO_Flag & flags) {
180 0 : scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
181 0 : } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
182 : // If it is not a render target then it will most likely be populated by
183 : // writePixels() which will trigger a flush if the texture has pending IO.
184 0 : scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
185 : }
186 0 : GrGpuResource* resource = fCache->findAndRefScratchResource(key,
187 : GrSurface::WorstCaseSize(*desc),
188 0 : scratchFlags);
189 0 : if (resource) {
190 0 : GrSurface* surface = static_cast<GrSurface*>(resource);
191 0 : return surface->asTexture();
192 : }
193 : }
194 :
195 0 : if (!(kNoCreate_Flag & flags)) {
196 0 : return fGpu->createTexture(*desc, SkBudgeted::kYes);
197 : }
198 :
199 0 : return nullptr;
200 : }
201 :
202 0 : sk_sp<GrTexture> GrResourceProvider::wrapBackendTexture(const GrBackendTextureDesc& desc,
203 : GrWrapOwnership ownership) {
204 0 : ASSERT_SINGLE_OWNER
205 0 : if (this->isAbandoned()) {
206 0 : return nullptr;
207 : }
208 0 : return fGpu->wrapBackendTexture(desc, ownership);
209 : }
210 :
211 0 : sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendRenderTarget(
212 : const GrBackendRenderTargetDesc& desc)
213 : {
214 0 : ASSERT_SINGLE_OWNER
215 0 : return this->isAbandoned() ? nullptr : fGpu->wrapBackendRenderTarget(desc);
216 : }
217 :
218 0 : void GrResourceProvider::assignUniqueKeyToResource(const GrUniqueKey& key,
219 : GrGpuResource* resource) {
220 0 : ASSERT_SINGLE_OWNER
221 0 : if (this->isAbandoned() || !resource) {
222 0 : return;
223 : }
224 0 : resource->resourcePriv().setUniqueKey(key);
225 : }
226 :
227 0 : GrGpuResource* GrResourceProvider::findAndRefResourceByUniqueKey(const GrUniqueKey& key) {
228 0 : ASSERT_SINGLE_OWNER
229 0 : return this->isAbandoned() ? nullptr : fCache->findAndRefUniqueResource(key);
230 : }
231 :
232 0 : GrTexture* GrResourceProvider::findAndRefTextureByUniqueKey(const GrUniqueKey& key) {
233 0 : ASSERT_SINGLE_OWNER
234 0 : GrGpuResource* resource = this->findAndRefResourceByUniqueKey(key);
235 0 : if (resource) {
236 0 : GrTexture* texture = static_cast<GrSurface*>(resource)->asTexture();
237 0 : SkASSERT(texture);
238 0 : return texture;
239 : }
240 0 : return NULL;
241 : }
242 :
243 : // MDB TODO (caching): this side-steps the issue of texture proxies with unique IDs
244 0 : void GrResourceProvider::assignUniqueKeyToProxy(const GrUniqueKey& key, GrTextureProxy* proxy) {
245 0 : ASSERT_SINGLE_OWNER
246 0 : SkASSERT(key.isValid());
247 0 : if (this->isAbandoned() || !proxy) {
248 0 : return;
249 : }
250 :
251 0 : GrTexture* texture = proxy->instantiate(this);
252 0 : if (!texture) {
253 0 : return;
254 : }
255 :
256 0 : this->assignUniqueKeyToResource(key, texture);
257 : }
258 :
259 : // MDB TODO (caching): this side-steps the issue of texture proxies with unique IDs
260 0 : sk_sp<GrTextureProxy> GrResourceProvider::findProxyByUniqueKey(const GrUniqueKey& key) {
261 0 : ASSERT_SINGLE_OWNER
262 :
263 0 : sk_sp<GrTexture> texture(this->findAndRefTextureByUniqueKey(key));
264 0 : if (!texture) {
265 0 : return nullptr;
266 : }
267 :
268 0 : return GrSurfaceProxy::MakeWrapped(std::move(texture));
269 : }
270 :
271 0 : const GrBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16_t* pattern,
272 : int patternSize,
273 : int reps,
274 : int vertCount,
275 : const GrUniqueKey& key) {
276 0 : size_t bufferSize = patternSize * reps * sizeof(uint16_t);
277 :
278 : // This is typically used in GrMeshDrawOps, so we assume kNoPendingIO.
279 : GrBuffer* buffer = this->createBuffer(bufferSize, kIndex_GrBufferType, kStatic_GrAccessPattern,
280 0 : kNoPendingIO_Flag);
281 0 : if (!buffer) {
282 0 : return nullptr;
283 : }
284 0 : uint16_t* data = (uint16_t*) buffer->map();
285 0 : bool useTempData = (nullptr == data);
286 0 : if (useTempData) {
287 0 : data = new uint16_t[reps * patternSize];
288 : }
289 0 : for (int i = 0; i < reps; ++i) {
290 0 : int baseIdx = i * patternSize;
291 0 : uint16_t baseVert = (uint16_t)(i * vertCount);
292 0 : for (int j = 0; j < patternSize; ++j) {
293 0 : data[baseIdx+j] = baseVert + pattern[j];
294 : }
295 : }
296 0 : if (useTempData) {
297 0 : if (!buffer->updateData(data, bufferSize)) {
298 0 : buffer->unref();
299 0 : return nullptr;
300 : }
301 0 : delete[] data;
302 : } else {
303 0 : buffer->unmap();
304 : }
305 0 : this->assignUniqueKeyToResource(key, buffer);
306 0 : return buffer;
307 : }
308 :
309 0 : const GrBuffer* GrResourceProvider::createQuadIndexBuffer() {
310 : static const int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1;
311 : GR_STATIC_ASSERT(4 * kMaxQuads <= 65535);
312 : static const uint16_t kPattern[] = { 0, 1, 2, 0, 2, 3 };
313 :
314 0 : return this->createInstancedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey);
315 : }
316 :
317 0 : GrPath* GrResourceProvider::createPath(const SkPath& path, const GrStyle& style) {
318 0 : SkASSERT(this->gpu()->pathRendering());
319 0 : return this->gpu()->pathRendering()->createPath(path, style);
320 : }
321 :
322 0 : GrPathRange* GrResourceProvider::createPathRange(GrPathRange::PathGenerator* gen,
323 : const GrStyle& style) {
324 0 : SkASSERT(this->gpu()->pathRendering());
325 0 : return this->gpu()->pathRendering()->createPathRange(gen, style);
326 : }
327 :
328 0 : GrPathRange* GrResourceProvider::createGlyphs(const SkTypeface* tf,
329 : const SkScalerContextEffects& effects,
330 : const SkDescriptor* desc,
331 : const GrStyle& style) {
332 :
333 0 : SkASSERT(this->gpu()->pathRendering());
334 0 : return this->gpu()->pathRendering()->createGlyphs(tf, effects, desc, style);
335 : }
336 :
337 0 : GrBuffer* GrResourceProvider::createBuffer(size_t size, GrBufferType intendedType,
338 : GrAccessPattern accessPattern, uint32_t flags,
339 : const void* data) {
340 0 : if (this->isAbandoned()) {
341 0 : return nullptr;
342 : }
343 0 : if (kDynamic_GrAccessPattern != accessPattern) {
344 0 : return this->gpu()->createBuffer(size, intendedType, accessPattern, data);
345 : }
346 0 : if (!(flags & kRequireGpuMemory_Flag) &&
347 0 : this->gpu()->caps()->preferClientSideDynamicBuffers() &&
348 0 : GrBufferTypeIsVertexOrIndex(intendedType) &&
349 : kDynamic_GrAccessPattern == accessPattern) {
350 0 : return GrBuffer::CreateCPUBacked(this->gpu(), size, intendedType, data);
351 : }
352 :
353 : // bin by pow2 with a reasonable min
354 : static const size_t MIN_SIZE = 1 << 12;
355 0 : size_t allocSize = SkTMax(MIN_SIZE, GrNextSizePow2(size));
356 :
357 0 : GrScratchKey key;
358 0 : GrBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
359 0 : uint32_t scratchFlags = 0;
360 0 : if (flags & kNoPendingIO_Flag) {
361 0 : scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
362 : } else {
363 0 : scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
364 : }
365 : GrBuffer* buffer = static_cast<GrBuffer*>(
366 0 : this->cache()->findAndRefScratchResource(key, allocSize, scratchFlags));
367 0 : if (!buffer) {
368 0 : buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern);
369 0 : if (!buffer) {
370 0 : return nullptr;
371 : }
372 : }
373 0 : if (data) {
374 0 : buffer->updateData(data, size);
375 : }
376 0 : SkASSERT(!buffer->isCPUBacked()); // We should only cache real VBOs.
377 0 : return buffer;
378 : }
379 :
380 0 : GrStencilAttachment* GrResourceProvider::attachStencilAttachment(GrRenderTarget* rt) {
381 0 : SkASSERT(rt);
382 0 : if (rt->renderTargetPriv().getStencilAttachment()) {
383 0 : return rt->renderTargetPriv().getStencilAttachment();
384 : }
385 :
386 0 : if (!rt->wasDestroyed() && rt->canAttemptStencilAttachment()) {
387 0 : GrUniqueKey sbKey;
388 :
389 0 : int width = rt->width();
390 0 : int height = rt->height();
391 : #if 0
392 : if (this->caps()->oversizedStencilSupport()) {
393 : width = SkNextPow2(width);
394 : height = SkNextPow2(height);
395 : }
396 : #endif
397 0 : bool newStencil = false;
398 0 : GrStencilAttachment::ComputeSharedStencilAttachmentKey(width, height,
399 0 : rt->numStencilSamples(), &sbKey);
400 : GrStencilAttachment* stencil = static_cast<GrStencilAttachment*>(
401 0 : this->findAndRefResourceByUniqueKey(sbKey));
402 0 : if (!stencil) {
403 : // Need to try and create a new stencil
404 0 : stencil = this->gpu()->createStencilAttachmentForRenderTarget(rt, width, height);
405 0 : if (stencil) {
406 0 : this->assignUniqueKeyToResource(sbKey, stencil);
407 0 : newStencil = true;
408 : }
409 : }
410 0 : if (rt->renderTargetPriv().attachStencilAttachment(stencil)) {
411 0 : if (newStencil) {
412 : // Right now we're clearing the stencil attachment here after it is
413 : // attached to a RT for the first time. When we start matching
414 : // stencil buffers with smaller color targets this will no longer
415 : // be correct because it won't be guaranteed to clear the entire
416 : // sb.
417 : // We used to clear down in the GL subclass using a special purpose
418 : // FBO. But iOS doesn't allow a stencil-only FBO. It reports unsupported
419 : // FBO status.
420 0 : this->gpu()->clearStencil(rt);
421 : }
422 : }
423 : }
424 0 : return rt->renderTargetPriv().getStencilAttachment();
425 : }
426 :
427 0 : sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendTextureAsRenderTarget(
428 : const GrBackendTextureDesc& desc)
429 : {
430 0 : if (this->isAbandoned()) {
431 0 : return nullptr;
432 : }
433 0 : return this->gpu()->wrapBackendTextureAsRenderTarget(desc);
434 : }
435 :
436 0 : sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrResourceProvider::makeSemaphore() {
437 0 : return fGpu->makeSemaphore();
438 : }
439 :
440 0 : void GrResourceProvider::takeOwnershipOfSemaphore(sk_sp<GrSemaphore> semaphore) {
441 0 : semaphore->resetGpu(fGpu);
442 0 : }
443 :
444 0 : void GrResourceProvider::releaseOwnershipOfSemaphore(sk_sp<GrSemaphore> semaphore) {
445 0 : semaphore->resetGpu(nullptr);
446 0 : }
|