Line data Source code
1 : /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 : /* This Source Code Form is subject to the terms of the Mozilla Public
3 : * License, v. 2.0. If a copy of the MPL was not distributed with this
4 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5 :
6 : #include "SourceBuffer.h"
7 :
8 : #include <algorithm>
9 : #include <cmath>
10 : #include <cstring>
11 : #include "mozilla/Likely.h"
12 : #include "nsIInputStream.h"
13 : #include "MainThreadUtils.h"
14 : #include "SurfaceCache.h"
15 :
16 : using std::max;
17 : using std::min;
18 :
19 : namespace mozilla {
20 : namespace image {
21 :
22 : //////////////////////////////////////////////////////////////////////////////
23 : // SourceBufferIterator implementation.
24 : //////////////////////////////////////////////////////////////////////////////
25 :
26 132 : SourceBufferIterator::~SourceBufferIterator()
27 : {
28 66 : if (mOwner) {
29 33 : mOwner->OnIteratorRelease();
30 : }
31 66 : }
32 :
33 : SourceBufferIterator&
34 0 : SourceBufferIterator::operator=(SourceBufferIterator&& aOther)
35 : {
36 0 : if (mOwner) {
37 0 : mOwner->OnIteratorRelease();
38 : }
39 :
40 0 : mOwner = Move(aOther.mOwner);
41 0 : mState = aOther.mState;
42 0 : mData = aOther.mData;
43 0 : mChunkCount = aOther.mChunkCount;
44 0 : mByteCount = aOther.mByteCount;
45 :
46 0 : return *this;
47 : }
48 :
49 : SourceBufferIterator::State
50 57 : SourceBufferIterator::AdvanceOrScheduleResume(size_t aRequestedBytes,
51 : IResumable* aConsumer)
52 : {
53 57 : MOZ_ASSERT(mOwner);
54 :
55 57 : if (MOZ_UNLIKELY(!HasMore())) {
56 0 : MOZ_ASSERT_UNREACHABLE("Should not advance a completed iterator");
57 : return COMPLETE;
58 : }
59 :
60 : // The range of data [mOffset, mOffset + mNextReadLength) has just been read
61 : // by the caller (or at least they don't have any interest in it), so consume
62 : // that data.
63 57 : MOZ_ASSERT(mData.mIterating.mNextReadLength <= mData.mIterating.mAvailableLength);
64 57 : mData.mIterating.mOffset += mData.mIterating.mNextReadLength;
65 57 : mData.mIterating.mAvailableLength -= mData.mIterating.mNextReadLength;
66 57 : mData.mIterating.mNextReadLength = 0;
67 :
68 57 : if (MOZ_LIKELY(mState == READY)) {
69 : // If the caller wants zero bytes of data, that's easy enough; we just
70 : // configured ourselves for a zero-byte read above! In theory we could do
71 : // this even in the START state, but it's not important for performance and
72 : // breaking the ability of callers to assert that the pointer returned by
73 : // Data() is non-null doesn't seem worth it.
74 24 : if (aRequestedBytes == 0) {
75 3 : MOZ_ASSERT(mData.mIterating.mNextReadLength == 0);
76 3 : return READY;
77 : }
78 :
79 : // Try to satisfy the request out of our local buffer. This is potentially
80 : // much faster than requesting data from our owning SourceBuffer because we
81 : // don't have to take the lock. Note that if we have anything at all in our
82 : // local buffer, we use it to satisfy the request; @aRequestedBytes is just
83 : // the *maximum* number of bytes we can return.
84 21 : if (mData.mIterating.mAvailableLength > 0) {
85 21 : return AdvanceFromLocalBuffer(aRequestedBytes);
86 : }
87 : }
88 :
89 : // Our local buffer is empty, so we'll have to request data from our owning
90 : // SourceBuffer.
91 33 : return mOwner->AdvanceIteratorOrScheduleResume(*this,
92 : aRequestedBytes,
93 33 : aConsumer);
94 : }
95 :
96 : bool
97 3 : SourceBufferIterator::RemainingBytesIsNoMoreThan(size_t aBytes) const
98 : {
99 3 : MOZ_ASSERT(mOwner);
100 3 : return mOwner->RemainingBytesIsNoMoreThan(*this, aBytes);
101 : }
102 :
103 :
104 : //////////////////////////////////////////////////////////////////////////////
105 : // SourceBuffer implementation.
106 : //////////////////////////////////////////////////////////////////////////////
107 :
108 : const size_t SourceBuffer::MIN_CHUNK_CAPACITY;
109 :
110 20 : SourceBuffer::SourceBuffer()
111 : : mMutex("image::SourceBuffer")
112 20 : , mConsumerCount(0)
113 20 : { }
114 :
115 2 : SourceBuffer::~SourceBuffer()
116 : {
117 1 : MOZ_ASSERT(mConsumerCount == 0,
118 : "SourceBuffer destroyed with active consumers");
119 1 : }
120 :
121 : nsresult
122 38 : SourceBuffer::AppendChunk(Maybe<Chunk>&& aChunk)
123 : {
124 38 : mMutex.AssertCurrentThreadOwns();
125 :
126 : #ifdef DEBUG
127 38 : if (mChunks.Length() > 0) {
128 0 : NS_WARNING("Appending an extra chunk for SourceBuffer");
129 : }
130 : #endif
131 :
132 38 : if (MOZ_UNLIKELY(!aChunk)) {
133 0 : return NS_ERROR_OUT_OF_MEMORY;
134 : }
135 :
136 38 : if (MOZ_UNLIKELY(aChunk->AllocationFailed())) {
137 0 : return NS_ERROR_OUT_OF_MEMORY;
138 : }
139 :
140 38 : if (MOZ_UNLIKELY(!mChunks.AppendElement(Move(*aChunk), fallible))) {
141 0 : return NS_ERROR_OUT_OF_MEMORY;
142 : }
143 :
144 38 : return NS_OK;
145 : }
146 :
147 : Maybe<SourceBuffer::Chunk>
148 38 : SourceBuffer::CreateChunk(size_t aCapacity, bool aRoundUp /* = true */)
149 : {
150 38 : if (MOZ_UNLIKELY(aCapacity == 0)) {
151 0 : MOZ_ASSERT_UNREACHABLE("Appending a chunk of zero size?");
152 : return Nothing();
153 : }
154 :
155 : // Round up if requested.
156 38 : size_t finalCapacity = aRoundUp ? RoundedUpCapacity(aCapacity)
157 38 : : aCapacity;
158 :
159 : // Use the size of the SurfaceCache as an additional heuristic to avoid
160 : // allocating huge buffers. Generally images do not get smaller when decoded,
161 : // so if we could store the source data in the SurfaceCache, we assume that
162 : // there's no way we'll be able to store the decoded version.
163 38 : if (MOZ_UNLIKELY(!SurfaceCache::CanHold(finalCapacity))) {
164 0 : NS_WARNING("SourceBuffer refused to create chunk too large for SurfaceCache");
165 0 : return Nothing();
166 : }
167 :
168 38 : return Some(Chunk(finalCapacity));
169 : }
170 :
171 : nsresult
172 34 : SourceBuffer::Compact()
173 : {
174 34 : mMutex.AssertCurrentThreadOwns();
175 :
176 34 : MOZ_ASSERT(mConsumerCount == 0, "Should have no consumers here");
177 34 : MOZ_ASSERT(mWaitingConsumers.Length() == 0, "Shouldn't have waiters");
178 34 : MOZ_ASSERT(mStatus, "Should be complete here");
179 :
180 : // Compact our waiting consumers list, since we're complete and no future
181 : // consumer will ever have to wait.
182 34 : mWaitingConsumers.Compact();
183 :
184 : // If we have no chunks, then there's nothing to compact.
185 34 : if (mChunks.Length() < 1) {
186 1 : return NS_OK;
187 : }
188 :
189 : // If we have one chunk, then we can compact if it has excess capacity.
190 33 : if (mChunks.Length() == 1 && mChunks[0].Length() == mChunks[0].Capacity()) {
191 14 : return NS_OK;
192 : }
193 :
194 : // We can compact our buffer. Determine the total length.
195 19 : size_t length = 0;
196 38 : for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) {
197 19 : length += mChunks[i].Length();
198 : }
199 :
200 : // If our total length is zero (which means ExpectLength() got called, but no
201 : // data ever actually got written) then just empty our chunk list.
202 19 : if (MOZ_UNLIKELY(length == 0)) {
203 0 : mChunks.Clear();
204 0 : return NS_OK;
205 : }
206 :
207 38 : Maybe<Chunk> newChunk = CreateChunk(length, /* aRoundUp = */ false);
208 19 : if (MOZ_UNLIKELY(!newChunk || newChunk->AllocationFailed())) {
209 0 : NS_WARNING("Failed to allocate chunk for SourceBuffer compacting - OOM?");
210 0 : return NS_OK;
211 : }
212 :
213 : // Copy our old chunks into the new chunk.
214 38 : for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) {
215 19 : size_t offset = newChunk->Length();
216 19 : MOZ_ASSERT(offset < newChunk->Capacity());
217 19 : MOZ_ASSERT(offset + mChunks[i].Length() <= newChunk->Capacity());
218 :
219 19 : memcpy(newChunk->Data() + offset, mChunks[i].Data(), mChunks[i].Length());
220 19 : newChunk->AddLength(mChunks[i].Length());
221 : }
222 :
223 19 : MOZ_ASSERT(newChunk->Length() == newChunk->Capacity(),
224 : "Compacted chunk has slack space");
225 :
226 : // Replace the old chunks with the new, compact chunk.
227 19 : mChunks.Clear();
228 19 : if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(Move(newChunk))))) {
229 0 : return HandleError(NS_ERROR_OUT_OF_MEMORY);
230 : }
231 19 : mChunks.Compact();
232 :
233 19 : return NS_OK;
234 : }
235 :
236 : /* static */ size_t
237 19 : SourceBuffer::RoundedUpCapacity(size_t aCapacity)
238 : {
239 : // Protect against overflow.
240 19 : if (MOZ_UNLIKELY(SIZE_MAX - aCapacity < MIN_CHUNK_CAPACITY)) {
241 0 : return aCapacity;
242 : }
243 :
244 : // Round up to the next multiple of MIN_CHUNK_CAPACITY (which should be the
245 : // size of a page).
246 : size_t roundedCapacity =
247 19 : (aCapacity + MIN_CHUNK_CAPACITY - 1) & ~(MIN_CHUNK_CAPACITY - 1);
248 19 : MOZ_ASSERT(roundedCapacity >= aCapacity, "Bad math?");
249 19 : MOZ_ASSERT(roundedCapacity - aCapacity < MIN_CHUNK_CAPACITY, "Bad math?");
250 :
251 19 : return roundedCapacity;
252 : }
253 :
254 : size_t
255 0 : SourceBuffer::FibonacciCapacityWithMinimum(size_t aMinCapacity)
256 : {
257 0 : mMutex.AssertCurrentThreadOwns();
258 :
259 : // We grow the source buffer using a Fibonacci growth rate.
260 :
261 0 : size_t length = mChunks.Length();
262 :
263 0 : if (length == 0) {
264 0 : return aMinCapacity;
265 : }
266 :
267 0 : if (length == 1) {
268 0 : return max(2 * mChunks[0].Capacity(), aMinCapacity);
269 : }
270 :
271 0 : return max(mChunks[length - 1].Capacity() + mChunks[length - 2].Capacity(),
272 0 : aMinCapacity);
273 : }
274 :
275 : void
276 0 : SourceBuffer::AddWaitingConsumer(IResumable* aConsumer)
277 : {
278 0 : mMutex.AssertCurrentThreadOwns();
279 :
280 0 : MOZ_ASSERT(!mStatus, "Waiting when we're complete?");
281 :
282 0 : if (aConsumer) {
283 0 : mWaitingConsumers.AppendElement(aConsumer);
284 : }
285 0 : }
286 :
287 : void
288 40 : SourceBuffer::ResumeWaitingConsumers()
289 : {
290 40 : mMutex.AssertCurrentThreadOwns();
291 :
292 40 : if (mWaitingConsumers.Length() == 0) {
293 40 : return;
294 : }
295 :
296 0 : for (uint32_t i = 0 ; i < mWaitingConsumers.Length() ; ++i) {
297 0 : mWaitingConsumers[i]->Resume();
298 : }
299 :
300 0 : mWaitingConsumers.Clear();
301 : }
302 :
303 : nsresult
304 19 : SourceBuffer::ExpectLength(size_t aExpectedLength)
305 : {
306 19 : MOZ_ASSERT(aExpectedLength > 0, "Zero expected size?");
307 :
308 38 : MutexAutoLock lock(mMutex);
309 :
310 19 : if (MOZ_UNLIKELY(mStatus)) {
311 0 : MOZ_ASSERT_UNREACHABLE("ExpectLength after SourceBuffer is complete");
312 : return NS_OK;
313 : }
314 :
315 19 : if (MOZ_UNLIKELY(mChunks.Length() > 0)) {
316 0 : MOZ_ASSERT_UNREACHABLE("Duplicate or post-Append call to ExpectLength");
317 : return NS_OK;
318 : }
319 :
320 19 : if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aExpectedLength))))) {
321 0 : return HandleError(NS_ERROR_OUT_OF_MEMORY);
322 : }
323 :
324 19 : return NS_OK;
325 : }
326 :
327 : nsresult
328 20 : SourceBuffer::Append(const char* aData, size_t aLength)
329 : {
330 20 : MOZ_ASSERT(aData, "Should have a buffer");
331 20 : MOZ_ASSERT(aLength > 0, "Writing a zero-sized chunk");
332 :
333 20 : size_t currentChunkCapacity = 0;
334 20 : size_t currentChunkLength = 0;
335 20 : char* currentChunkData = nullptr;
336 20 : size_t currentChunkRemaining = 0;
337 20 : size_t forCurrentChunk = 0;
338 20 : size_t forNextChunk = 0;
339 20 : size_t nextChunkCapacity = 0;
340 :
341 : {
342 40 : MutexAutoLock lock(mMutex);
343 :
344 20 : if (MOZ_UNLIKELY(mStatus)) {
345 : // This SourceBuffer is already complete; ignore further data.
346 0 : return NS_ERROR_FAILURE;
347 : }
348 :
349 20 : if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
350 0 : if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aLength))))) {
351 0 : return HandleError(NS_ERROR_OUT_OF_MEMORY);
352 : }
353 : }
354 :
355 : // Copy out the current chunk's information so we can release the lock.
356 : // Note that this wouldn't be safe if multiple producers were allowed!
357 20 : Chunk& currentChunk = mChunks.LastElement();
358 20 : currentChunkCapacity = currentChunk.Capacity();
359 20 : currentChunkLength = currentChunk.Length();
360 20 : currentChunkData = currentChunk.Data();
361 :
362 : // Partition this data between the current chunk and the next chunk.
363 : // (Because we always allocate a chunk big enough to fit everything passed
364 : // to Append, we'll never need more than those two chunks to store
365 : // everything.)
366 20 : currentChunkRemaining = currentChunkCapacity - currentChunkLength;
367 20 : forCurrentChunk = min(aLength, currentChunkRemaining);
368 20 : forNextChunk = aLength - forCurrentChunk;
369 :
370 : // If we'll need another chunk, determine what its capacity should be while
371 : // we still hold the lock.
372 20 : nextChunkCapacity = forNextChunk > 0
373 20 : ? FibonacciCapacityWithMinimum(forNextChunk)
374 : : 0;
375 : }
376 :
377 : // Write everything we can fit into the current chunk.
378 20 : MOZ_ASSERT(currentChunkLength + forCurrentChunk <= currentChunkCapacity);
379 20 : memcpy(currentChunkData + currentChunkLength, aData, forCurrentChunk);
380 :
381 : // If there's something left, create a new chunk and write it there.
382 40 : Maybe<Chunk> nextChunk;
383 20 : if (forNextChunk > 0) {
384 0 : MOZ_ASSERT(nextChunkCapacity >= forNextChunk, "Next chunk too small?");
385 0 : nextChunk = CreateChunk(nextChunkCapacity);
386 0 : if (MOZ_LIKELY(nextChunk && !nextChunk->AllocationFailed())) {
387 0 : memcpy(nextChunk->Data(), aData + forCurrentChunk, forNextChunk);
388 0 : nextChunk->AddLength(forNextChunk);
389 : }
390 : }
391 :
392 : // Update shared data structures.
393 : {
394 40 : MutexAutoLock lock(mMutex);
395 :
396 : // Update the length of the current chunk.
397 20 : Chunk& currentChunk = mChunks.LastElement();
398 20 : MOZ_ASSERT(currentChunk.Data() == currentChunkData, "Multiple producers?");
399 20 : MOZ_ASSERT(currentChunk.Length() == currentChunkLength,
400 : "Multiple producers?");
401 :
402 20 : currentChunk.AddLength(forCurrentChunk);
403 :
404 : // If we created a new chunk, add it to the series.
405 20 : if (forNextChunk > 0) {
406 0 : if (MOZ_UNLIKELY(!nextChunk)) {
407 0 : return HandleError(NS_ERROR_OUT_OF_MEMORY);
408 : }
409 :
410 0 : if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(Move(nextChunk))))) {
411 0 : return HandleError(NS_ERROR_OUT_OF_MEMORY);
412 : }
413 : }
414 :
415 : // Resume any waiting readers now that there's new data.
416 20 : ResumeWaitingConsumers();
417 : }
418 :
419 20 : return NS_OK;
420 : }
421 :
422 : static nsresult
423 20 : AppendToSourceBuffer(nsIInputStream*,
424 : void* aClosure,
425 : const char* aFromRawSegment,
426 : uint32_t,
427 : uint32_t aCount,
428 : uint32_t* aWriteCount)
429 : {
430 20 : SourceBuffer* sourceBuffer = static_cast<SourceBuffer*>(aClosure);
431 :
432 : // Copy the source data. Unless we hit OOM, we squelch the return value here,
433 : // because returning an error means that ReadSegments stops reading data, and
434 : // we want to ensure that we read everything we get. If we hit OOM then we
435 : // return a failed status to the caller.
436 20 : nsresult rv = sourceBuffer->Append(aFromRawSegment, aCount);
437 20 : if (rv == NS_ERROR_OUT_OF_MEMORY) {
438 0 : return rv;
439 : }
440 :
441 : // Report that we wrote everything we got.
442 20 : *aWriteCount = aCount;
443 :
444 20 : return NS_OK;
445 : }
446 :
447 : nsresult
448 19 : SourceBuffer::AppendFromInputStream(nsIInputStream* aInputStream,
449 : uint32_t aCount)
450 : {
451 : uint32_t bytesRead;
452 : nsresult rv = aInputStream->ReadSegments(AppendToSourceBuffer, this,
453 19 : aCount, &bytesRead);
454 19 : if (!NS_WARN_IF(NS_FAILED(rv))) {
455 19 : MOZ_ASSERT(bytesRead == aCount,
456 : "AppendToSourceBuffer should consume everything");
457 : }
458 19 : return rv;
459 : }
460 :
461 : void
462 20 : SourceBuffer::Complete(nsresult aStatus)
463 : {
464 21 : MutexAutoLock lock(mMutex);
465 :
466 20 : if (MOZ_UNLIKELY(mStatus)) {
467 0 : MOZ_ASSERT_UNREACHABLE("Called Complete more than once");
468 : return;
469 : }
470 :
471 20 : if (MOZ_UNLIKELY(NS_SUCCEEDED(aStatus) && IsEmpty())) {
472 : // It's illegal to succeed without writing anything.
473 0 : aStatus = NS_ERROR_FAILURE;
474 : }
475 :
476 20 : mStatus = Some(aStatus);
477 :
478 : // Resume any waiting consumers now that we're complete.
479 20 : ResumeWaitingConsumers();
480 :
481 : // If we still have active consumers, just return.
482 20 : if (mConsumerCount > 0) {
483 19 : return;
484 : }
485 :
486 : // Attempt to compact our buffer down to a single chunk.
487 1 : Compact();
488 : }
489 :
490 : bool
491 1 : SourceBuffer::IsComplete()
492 : {
493 2 : MutexAutoLock lock(mMutex);
494 2 : return bool(mStatus);
495 : }
496 :
497 : size_t
498 19 : SourceBuffer::SizeOfIncludingThisWithComputedFallback(MallocSizeOf
499 : aMallocSizeOf) const
500 : {
501 38 : MutexAutoLock lock(mMutex);
502 :
503 19 : size_t n = aMallocSizeOf(this);
504 19 : n += mChunks.ShallowSizeOfExcludingThis(aMallocSizeOf);
505 :
506 38 : for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) {
507 19 : size_t chunkSize = aMallocSizeOf(mChunks[i].Data());
508 :
509 19 : if (chunkSize == 0) {
510 : // We're on a platform where moz_malloc_size_of always returns 0.
511 0 : chunkSize = mChunks[i].Capacity();
512 : }
513 :
514 19 : n += chunkSize;
515 : }
516 :
517 38 : return n;
518 : }
519 :
520 : SourceBufferIterator
521 33 : SourceBuffer::Iterator()
522 : {
523 : {
524 66 : MutexAutoLock lock(mMutex);
525 33 : mConsumerCount++;
526 : }
527 :
528 33 : return SourceBufferIterator(this);
529 : }
530 :
531 : void
532 33 : SourceBuffer::OnIteratorRelease()
533 : {
534 66 : MutexAutoLock lock(mMutex);
535 :
536 33 : MOZ_ASSERT(mConsumerCount > 0, "Consumer count doesn't add up");
537 33 : mConsumerCount--;
538 :
539 : // If we still have active consumers, or we're not complete yet, then return.
540 33 : if (mConsumerCount > 0 || !mStatus) {
541 0 : return;
542 : }
543 :
544 : // Attempt to compact our buffer down to a single chunk.
545 33 : Compact();
546 : }
547 :
548 : bool
549 3 : SourceBuffer::RemainingBytesIsNoMoreThan(const SourceBufferIterator& aIterator,
550 : size_t aBytes) const
551 : {
552 6 : MutexAutoLock lock(mMutex);
553 :
554 : // If we're not complete, we always say no.
555 3 : if (!mStatus) {
556 0 : return false;
557 : }
558 :
559 : // If the iterator's at the end, the answer is trivial.
560 3 : if (!aIterator.HasMore()) {
561 0 : return true;
562 : }
563 :
564 3 : uint32_t iteratorChunk = aIterator.mData.mIterating.mChunk;
565 3 : size_t iteratorOffset = aIterator.mData.mIterating.mOffset;
566 3 : size_t iteratorLength = aIterator.mData.mIterating.mAvailableLength;
567 :
568 : // Include the bytes the iterator is currently pointing to in the limit, so
569 : // that the current chunk doesn't have to be a special case.
570 3 : size_t bytes = aBytes + iteratorOffset + iteratorLength;
571 :
572 : // Count the length over all of our chunks, starting with the one that the
573 : // iterator is currently pointing to. (This is O(N), but N is expected to be
574 : // ~1, so it doesn't seem worth caching the length separately.)
575 3 : size_t lengthSoFar = 0;
576 6 : for (uint32_t i = iteratorChunk ; i < mChunks.Length() ; ++i) {
577 3 : lengthSoFar += mChunks[i].Length();
578 3 : if (lengthSoFar > bytes) {
579 0 : return false;
580 : }
581 : }
582 :
583 3 : return true;
584 : }
585 :
586 : SourceBufferIterator::State
587 33 : SourceBuffer::AdvanceIteratorOrScheduleResume(SourceBufferIterator& aIterator,
588 : size_t aRequestedBytes,
589 : IResumable* aConsumer)
590 : {
591 66 : MutexAutoLock lock(mMutex);
592 :
593 33 : MOZ_ASSERT(aIterator.HasMore(), "Advancing a completed iterator and "
594 : "AdvanceOrScheduleResume didn't catch it");
595 :
596 33 : if (MOZ_UNLIKELY(mStatus && NS_FAILED(*mStatus))) {
597 : // This SourceBuffer is complete due to an error; all reads fail.
598 0 : return aIterator.SetComplete(*mStatus);
599 : }
600 :
601 33 : if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
602 : // We haven't gotten an initial chunk yet.
603 0 : AddWaitingConsumer(aConsumer);
604 0 : return aIterator.SetWaiting();
605 : }
606 :
607 33 : uint32_t iteratorChunkIdx = aIterator.mData.mIterating.mChunk;
608 33 : MOZ_ASSERT(iteratorChunkIdx < mChunks.Length());
609 :
610 33 : const Chunk& currentChunk = mChunks[iteratorChunkIdx];
611 33 : size_t iteratorEnd = aIterator.mData.mIterating.mOffset +
612 33 : aIterator.mData.mIterating.mAvailableLength;
613 33 : MOZ_ASSERT(iteratorEnd <= currentChunk.Length());
614 33 : MOZ_ASSERT(iteratorEnd <= currentChunk.Capacity());
615 :
616 33 : if (iteratorEnd < currentChunk.Length()) {
617 : // There's more data in the current chunk.
618 33 : return aIterator.SetReady(iteratorChunkIdx, currentChunk.Data(),
619 33 : iteratorEnd, currentChunk.Length() - iteratorEnd,
620 33 : aRequestedBytes);
621 : }
622 :
623 0 : if (iteratorEnd == currentChunk.Capacity() &&
624 0 : !IsLastChunk(iteratorChunkIdx)) {
625 : // Advance to the next chunk.
626 0 : const Chunk& nextChunk = mChunks[iteratorChunkIdx + 1];
627 0 : return aIterator.SetReady(iteratorChunkIdx + 1, nextChunk.Data(), 0,
628 0 : nextChunk.Length(), aRequestedBytes);
629 : }
630 :
631 0 : MOZ_ASSERT(IsLastChunk(iteratorChunkIdx), "Should've advanced");
632 :
633 0 : if (mStatus) {
634 : // There's no more data and this SourceBuffer completed successfully.
635 0 : MOZ_ASSERT(NS_SUCCEEDED(*mStatus), "Handled failures earlier");
636 0 : return aIterator.SetComplete(*mStatus);
637 : }
638 :
639 : // We're not complete, but there's no more data right now. Arrange to wake up
640 : // the consumer when we get more data.
641 0 : AddWaitingConsumer(aConsumer);
642 0 : return aIterator.SetWaiting();
643 : }
644 :
645 : nsresult
646 0 : SourceBuffer::HandleError(nsresult aError)
647 : {
648 0 : MOZ_ASSERT(NS_FAILED(aError), "Should have an error here");
649 0 : MOZ_ASSERT(aError == NS_ERROR_OUT_OF_MEMORY,
650 : "Unexpected error; may want to notify waiting readers, which "
651 : "HandleError currently doesn't do");
652 :
653 0 : mMutex.AssertCurrentThreadOwns();
654 :
655 0 : NS_WARNING("SourceBuffer encountered an unrecoverable error");
656 :
657 : // Record the error.
658 0 : mStatus = Some(aError);
659 :
660 : // Drop our references to waiting readers.
661 0 : mWaitingConsumers.Clear();
662 :
663 0 : return *mStatus;
664 : }
665 :
666 : bool
667 19 : SourceBuffer::IsEmpty()
668 : {
669 19 : mMutex.AssertCurrentThreadOwns();
670 38 : return mChunks.Length() == 0 ||
671 38 : mChunks[0].Length() == 0;
672 : }
673 :
674 : bool
675 0 : SourceBuffer::IsLastChunk(uint32_t aChunk)
676 : {
677 0 : mMutex.AssertCurrentThreadOwns();
678 0 : return aChunk + 1 == mChunks.Length();
679 : }
680 :
681 : } // namespace image
682 : } // namespace mozilla
|