Line data Source code
1 : /* This Source Code Form is subject to the terms of the Mozilla Public
2 : * License, v. 2.0. If a copy of the MPL was not distributed with this
3 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4 :
5 : #include "CacheLog.h"
6 : #include "CacheFile.h"
7 :
8 : #include "CacheFileChunk.h"
9 : #include "CacheFileInputStream.h"
10 : #include "CacheFileOutputStream.h"
11 : #include "nsThreadUtils.h"
12 : #include "mozilla/DebugOnly.h"
13 : #include "mozilla/Move.h"
14 : #include <algorithm>
15 : #include "nsComponentManagerUtils.h"
16 : #include "nsProxyRelease.h"
17 : #include "mozilla/Telemetry.h"
18 :
19 : // When CACHE_CHUNKS is defined we always cache unused chunks in mCacheChunks.
20 : // When it is not defined, we always release the chunks ASAP, i.e. we cache
21 : // unused chunks only when:
22 : // - CacheFile is memory-only
23 : // - CacheFile is still waiting for the handle
24 : // - the chunk is preloaded
25 :
26 : //#define CACHE_CHUNKS
27 :
28 : namespace mozilla {
29 : namespace net {
30 :
31 : class NotifyCacheFileListenerEvent : public Runnable {
32 : public:
33 0 : NotifyCacheFileListenerEvent(CacheFileListener* aCallback,
34 : nsresult aResult,
35 : bool aIsNew)
36 0 : : Runnable("net::NotifyCacheFileListenerEvent")
37 : , mCallback(aCallback)
38 : , mRV(aResult)
39 0 : , mIsNew(aIsNew)
40 : {
41 0 : LOG(("NotifyCacheFileListenerEvent::NotifyCacheFileListenerEvent() "
42 : "[this=%p]", this));
43 0 : }
44 :
45 : protected:
46 0 : ~NotifyCacheFileListenerEvent()
47 0 : {
48 0 : LOG(("NotifyCacheFileListenerEvent::~NotifyCacheFileListenerEvent() "
49 : "[this=%p]", this));
50 0 : }
51 :
52 : public:
53 0 : NS_IMETHOD Run() override
54 : {
55 0 : LOG(("NotifyCacheFileListenerEvent::Run() [this=%p]", this));
56 :
57 0 : mCallback->OnFileReady(mRV, mIsNew);
58 0 : return NS_OK;
59 : }
60 :
61 : protected:
62 : nsCOMPtr<CacheFileListener> mCallback;
63 : nsresult mRV;
64 : bool mIsNew;
65 : };
66 :
67 : class NotifyChunkListenerEvent : public Runnable {
68 : public:
69 2 : NotifyChunkListenerEvent(CacheFileChunkListener* aCallback,
70 : nsresult aResult,
71 : uint32_t aChunkIdx,
72 : CacheFileChunk* aChunk)
73 2 : : Runnable("net::NotifyChunkListenerEvent")
74 : , mCallback(aCallback)
75 : , mRV(aResult)
76 : , mChunkIdx(aChunkIdx)
77 2 : , mChunk(aChunk)
78 : {
79 2 : LOG(("NotifyChunkListenerEvent::NotifyChunkListenerEvent() [this=%p]",
80 : this));
81 2 : }
82 :
83 : protected:
84 4 : ~NotifyChunkListenerEvent()
85 4 : {
86 2 : LOG(("NotifyChunkListenerEvent::~NotifyChunkListenerEvent() [this=%p]",
87 : this));
88 6 : }
89 :
90 : public:
91 2 : NS_IMETHOD Run() override
92 : {
93 2 : LOG(("NotifyChunkListenerEvent::Run() [this=%p]", this));
94 :
95 2 : mCallback->OnChunkAvailable(mRV, mChunkIdx, mChunk);
96 2 : return NS_OK;
97 : }
98 :
99 : protected:
100 : nsCOMPtr<CacheFileChunkListener> mCallback;
101 : nsresult mRV;
102 : uint32_t mChunkIdx;
103 : RefPtr<CacheFileChunk> mChunk;
104 : };
105 :
106 :
107 : class DoomFileHelper : public CacheFileIOListener
108 : {
109 : public:
110 : NS_DECL_THREADSAFE_ISUPPORTS
111 :
112 0 : explicit DoomFileHelper(CacheFileListener *aListener)
113 0 : : mListener(aListener)
114 : {
115 0 : }
116 :
117 :
118 0 : NS_IMETHOD OnFileOpened(CacheFileHandle *aHandle, nsresult aResult) override
119 : {
120 0 : MOZ_CRASH("DoomFileHelper::OnFileOpened should not be called!");
121 : return NS_ERROR_UNEXPECTED;
122 : }
123 :
124 0 : NS_IMETHOD OnDataWritten(CacheFileHandle *aHandle, const char *aBuf,
125 : nsresult aResult) override
126 : {
127 0 : MOZ_CRASH("DoomFileHelper::OnDataWritten should not be called!");
128 : return NS_ERROR_UNEXPECTED;
129 : }
130 :
131 0 : NS_IMETHOD OnDataRead(CacheFileHandle *aHandle, char *aBuf, nsresult aResult) override
132 : {
133 0 : MOZ_CRASH("DoomFileHelper::OnDataRead should not be called!");
134 : return NS_ERROR_UNEXPECTED;
135 : }
136 :
137 0 : NS_IMETHOD OnFileDoomed(CacheFileHandle *aHandle, nsresult aResult) override
138 : {
139 0 : if (mListener)
140 0 : mListener->OnFileDoomed(aResult);
141 0 : return NS_OK;
142 : }
143 :
144 0 : NS_IMETHOD OnEOFSet(CacheFileHandle *aHandle, nsresult aResult) override
145 : {
146 0 : MOZ_CRASH("DoomFileHelper::OnEOFSet should not be called!");
147 : return NS_ERROR_UNEXPECTED;
148 : }
149 :
150 0 : NS_IMETHOD OnFileRenamed(CacheFileHandle *aHandle, nsresult aResult) override
151 : {
152 0 : MOZ_CRASH("DoomFileHelper::OnFileRenamed should not be called!");
153 : return NS_ERROR_UNEXPECTED;
154 : }
155 :
156 : private:
157 0 : virtual ~DoomFileHelper()
158 0 : {
159 0 : }
160 :
161 : nsCOMPtr<CacheFileListener> mListener;
162 : };
163 :
164 0 : NS_IMPL_ISUPPORTS(DoomFileHelper, CacheFileIOListener)
165 :
166 :
167 275 : NS_IMPL_ADDREF(CacheFile)
168 268 : NS_IMPL_RELEASE(CacheFile)
169 16 : NS_INTERFACE_MAP_BEGIN(CacheFile)
170 16 : NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileChunkListener)
171 12 : NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileIOListener)
172 7 : NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileMetadataListener)
173 0 : NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports,
174 : mozilla::net::CacheFileChunkListener)
175 0 : NS_INTERFACE_MAP_END_THREADSAFE
176 :
177 5 : CacheFile::CacheFile()
178 : : mLock("CacheFile.mLock")
179 : , mOpeningFile(false)
180 : , mReady(false)
181 : , mMemoryOnly(false)
182 : , mSkipSizeCheck(false)
183 : , mOpenAsMemoryOnly(false)
184 : , mPinned(false)
185 : , mPriority(false)
186 : , mDataAccessed(false)
187 : , mDataIsDirty(false)
188 : , mWritingMetadata(false)
189 : , mPreloadWithoutInputStreams(true)
190 : , mPreloadChunkCount(0)
191 : , mStatus(NS_OK)
192 : , mDataSize(-1)
193 : , mAltDataOffset(-1)
194 : , mKill(false)
195 5 : , mOutput(nullptr)
196 : {
197 5 : LOG(("CacheFile::CacheFile() [this=%p]", this));
198 5 : }
199 :
200 0 : CacheFile::~CacheFile()
201 : {
202 0 : LOG(("CacheFile::~CacheFile() [this=%p]", this));
203 :
204 0 : MutexAutoLock lock(mLock);
205 0 : if (!mMemoryOnly && mReady && !mKill) {
206 : // mReady flag indicates we have metadata plus in a valid state.
207 0 : WriteMetadataIfNeededLocked(true);
208 : }
209 0 : }
210 :
211 : nsresult
212 5 : CacheFile::Init(const nsACString &aKey,
213 : bool aCreateNew,
214 : bool aMemoryOnly,
215 : bool aSkipSizeCheck,
216 : bool aPriority,
217 : bool aPinned,
218 : CacheFileListener *aCallback)
219 : {
220 5 : MOZ_ASSERT(!mListener);
221 5 : MOZ_ASSERT(!mHandle);
222 :
223 5 : MOZ_ASSERT(!(aMemoryOnly && aPinned));
224 :
225 : nsresult rv;
226 :
227 5 : mKey = aKey;
228 5 : mOpenAsMemoryOnly = mMemoryOnly = aMemoryOnly;
229 5 : mSkipSizeCheck = aSkipSizeCheck;
230 5 : mPriority = aPriority;
231 5 : mPinned = aPinned;
232 :
233 : // Some consumers (at least nsHTTPCompressConv) assume that Read() can read
234 : // such amount of data that was announced by Available().
235 : // CacheFileInputStream::Available() uses also preloaded chunks to compute
236 : // number of available bytes in the input stream, so we have to make sure the
237 : // preloadChunkCount won't change during CacheFile's lifetime since otherwise
238 : // we could potentially release some cached chunks that was used to calculate
239 : // available bytes but would not be available later during call to
240 : // CacheFileInputStream::Read().
241 5 : mPreloadChunkCount = CacheObserver::PreloadChunkCount();
242 :
243 5 : LOG(("CacheFile::Init() [this=%p, key=%s, createNew=%d, memoryOnly=%d, "
244 : "priority=%d, listener=%p]", this, mKey.get(), aCreateNew, aMemoryOnly,
245 : aPriority, aCallback));
246 :
247 5 : if (mMemoryOnly) {
248 0 : MOZ_ASSERT(!aCallback);
249 :
250 0 : mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, false, mKey);
251 0 : mReady = true;
252 0 : mDataSize = mMetadata->Offset();
253 0 : return NS_OK;
254 : }
255 : else {
256 : uint32_t flags;
257 5 : if (aCreateNew) {
258 0 : MOZ_ASSERT(!aCallback);
259 0 : flags = CacheFileIOManager::CREATE_NEW;
260 :
261 : // make sure we can use this entry immediately
262 0 : mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
263 0 : mReady = true;
264 0 : mDataSize = mMetadata->Offset();
265 : } else {
266 5 : flags = CacheFileIOManager::CREATE;
267 : }
268 :
269 5 : if (mPriority) {
270 3 : flags |= CacheFileIOManager::PRIORITY;
271 : }
272 :
273 5 : if (mPinned) {
274 0 : flags |= CacheFileIOManager::PINNED;
275 : }
276 :
277 5 : mOpeningFile = true;
278 5 : mListener = aCallback;
279 5 : rv = CacheFileIOManager::OpenFile(mKey, flags, this);
280 5 : if (NS_FAILED(rv)) {
281 0 : mListener = nullptr;
282 0 : mOpeningFile = false;
283 :
284 0 : if (mPinned) {
285 0 : LOG(("CacheFile::Init() - CacheFileIOManager::OpenFile() failed "
286 : "but we want to pin, fail the file opening. [this=%p]", this));
287 0 : return NS_ERROR_NOT_AVAILABLE;
288 : }
289 :
290 0 : if (aCreateNew) {
291 0 : NS_WARNING("Forcing memory-only entry since OpenFile failed");
292 0 : LOG(("CacheFile::Init() - CacheFileIOManager::OpenFile() failed "
293 : "synchronously. We can continue in memory-only mode since "
294 : "aCreateNew == true. [this=%p]", this));
295 :
296 0 : mMemoryOnly = true;
297 : }
298 0 : else if (rv == NS_ERROR_NOT_INITIALIZED) {
299 : NS_WARNING("Forcing memory-only entry since CacheIOManager isn't "
300 0 : "initialized.");
301 0 : LOG(("CacheFile::Init() - CacheFileIOManager isn't initialized, "
302 : "initializing entry as memory-only. [this=%p]", this));
303 :
304 0 : mMemoryOnly = true;
305 0 : mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
306 0 : mReady = true;
307 0 : mDataSize = mMetadata->Offset();
308 :
309 0 : RefPtr<NotifyCacheFileListenerEvent> ev;
310 0 : ev = new NotifyCacheFileListenerEvent(aCallback, NS_OK, true);
311 0 : rv = NS_DispatchToCurrentThread(ev);
312 0 : NS_ENSURE_SUCCESS(rv, rv);
313 : }
314 : else {
315 0 : NS_ENSURE_SUCCESS(rv, rv);
316 : }
317 : }
318 : }
319 :
320 5 : return NS_OK;
321 : }
322 :
323 : nsresult
324 2 : CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk *aChunk)
325 : {
326 4 : CacheFileAutoLock lock(this);
327 :
328 : nsresult rv;
329 :
330 2 : uint32_t index = aChunk->Index();
331 :
332 2 : LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08" PRIx32 ", chunk=%p, idx=%u]",
333 : this, static_cast<uint32_t>(aResult), aChunk, index));
334 :
335 2 : if (aChunk->mDiscardedChunk) {
336 : // We discard only unused chunks, so it must be still unused when reading
337 : // data finishes.
338 0 : MOZ_ASSERT(aChunk->mRefCnt == 2);
339 0 : aChunk->mActiveChunk = false;
340 0 : ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(aChunk->mFile.forget()).forget());
341 :
342 0 : DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
343 0 : MOZ_ASSERT(removed);
344 0 : return NS_OK;
345 : }
346 :
347 2 : if (NS_FAILED(aResult)) {
348 0 : SetError(aResult);
349 : }
350 :
351 2 : if (HaveChunkListeners(index)) {
352 2 : rv = NotifyChunkListeners(index, aResult, aChunk);
353 2 : NS_ENSURE_SUCCESS(rv, rv);
354 : }
355 :
356 2 : return NS_OK;
357 : }
358 :
359 : nsresult
360 2 : CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk *aChunk)
361 : {
362 : // In case the chunk was reused, made dirty and released between calls to
363 : // CacheFileChunk::Write() and CacheFile::OnChunkWritten(), we must write
364 : // the chunk to the disk again. When the chunk is unused and is dirty simply
365 : // addref and release (outside the lock) the chunk which ensures that
366 : // CacheFile::DeactivateChunk() will be called again.
367 4 : RefPtr<CacheFileChunk> deactivateChunkAgain;
368 :
369 4 : CacheFileAutoLock lock(this);
370 :
371 : nsresult rv;
372 :
373 2 : LOG(("CacheFile::OnChunkWritten() [this=%p, rv=0x%08" PRIx32 ", chunk=%p, idx=%u]",
374 : this, static_cast<uint32_t>(aResult), aChunk, aChunk->Index()));
375 :
376 2 : MOZ_ASSERT(!mMemoryOnly);
377 2 : MOZ_ASSERT(!mOpeningFile);
378 2 : MOZ_ASSERT(mHandle);
379 :
380 2 : if (aChunk->mDiscardedChunk) {
381 : // We discard only unused chunks, so it must be still unused when writing
382 : // data finishes.
383 0 : MOZ_ASSERT(aChunk->mRefCnt == 2);
384 0 : aChunk->mActiveChunk = false;
385 0 : ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(aChunk->mFile.forget()).forget());
386 :
387 0 : DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
388 0 : MOZ_ASSERT(removed);
389 0 : return NS_OK;
390 : }
391 :
392 2 : if (NS_FAILED(aResult)) {
393 0 : SetError(aResult);
394 : }
395 :
396 2 : if (NS_SUCCEEDED(aResult) && !aChunk->IsDirty()) {
397 : // update hash value in metadata
398 2 : mMetadata->SetHash(aChunk->Index(), aChunk->Hash());
399 : }
400 :
401 : // notify listeners if there is any
402 2 : if (HaveChunkListeners(aChunk->Index())) {
403 : // don't release the chunk since there are some listeners queued
404 0 : rv = NotifyChunkListeners(aChunk->Index(), aResult, aChunk);
405 0 : if (NS_SUCCEEDED(rv)) {
406 0 : MOZ_ASSERT(aChunk->mRefCnt != 2);
407 0 : return NS_OK;
408 : }
409 : }
410 :
411 2 : if (aChunk->mRefCnt != 2) {
412 0 : LOG(("CacheFile::OnChunkWritten() - Chunk is still used [this=%p, chunk=%p,"
413 : " refcnt=%" PRIuPTR "]", this, aChunk, aChunk->mRefCnt.get()));
414 :
415 0 : return NS_OK;
416 : }
417 :
418 2 : if (aChunk->IsDirty()) {
419 0 : LOG(("CacheFile::OnChunkWritten() - Unused chunk is dirty. We must go "
420 : "through deactivation again. [this=%p, chunk=%p]", this, aChunk));
421 :
422 0 : deactivateChunkAgain = aChunk;
423 0 : return NS_OK;
424 : }
425 :
426 2 : bool keepChunk = false;
427 2 : if (NS_SUCCEEDED(aResult)) {
428 2 : keepChunk = ShouldCacheChunk(aChunk->Index());
429 2 : LOG(("CacheFile::OnChunkWritten() - %s unused chunk [this=%p, chunk=%p]",
430 : keepChunk ? "Caching" : "Releasing", this, aChunk));
431 : } else {
432 0 : LOG(("CacheFile::OnChunkWritten() - Releasing failed chunk [this=%p, "
433 : "chunk=%p]", this, aChunk));
434 : }
435 :
436 2 : RemoveChunkInternal(aChunk, keepChunk);
437 :
438 2 : WriteMetadataIfNeededLocked();
439 :
440 2 : return NS_OK;
441 : }
442 :
443 : nsresult
444 0 : CacheFile::OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
445 : CacheFileChunk *aChunk)
446 : {
447 0 : MOZ_CRASH("CacheFile::OnChunkAvailable should not be called!");
448 : return NS_ERROR_UNEXPECTED;
449 : }
450 :
451 : nsresult
452 0 : CacheFile::OnChunkUpdated(CacheFileChunk *aChunk)
453 : {
454 0 : MOZ_CRASH("CacheFile::OnChunkUpdated should not be called!");
455 : return NS_ERROR_UNEXPECTED;
456 : }
457 :
458 : nsresult
459 5 : CacheFile::OnFileOpened(CacheFileHandle *aHandle, nsresult aResult)
460 : {
461 : nsresult rv;
462 :
463 : // Using an 'auto' class to perform doom or fail the listener
464 : // outside the CacheFile's lock.
465 : class AutoFailDoomListener
466 : {
467 : public:
468 5 : explicit AutoFailDoomListener(CacheFileHandle *aHandle)
469 5 : : mHandle(aHandle)
470 5 : , mAlreadyDoomed(false)
471 5 : {}
472 5 : ~AutoFailDoomListener()
473 5 : {
474 5 : if (!mListener)
475 5 : return;
476 :
477 0 : if (mHandle) {
478 0 : if (mAlreadyDoomed) {
479 0 : mListener->OnFileDoomed(mHandle, NS_OK);
480 : } else {
481 0 : CacheFileIOManager::DoomFile(mHandle, mListener);
482 : }
483 : } else {
484 0 : mListener->OnFileDoomed(nullptr, NS_ERROR_NOT_AVAILABLE);
485 : }
486 5 : }
487 :
488 : CacheFileHandle* mHandle;
489 : nsCOMPtr<CacheFileIOListener> mListener;
490 : bool mAlreadyDoomed;
491 10 : } autoDoom(aHandle);
492 :
493 10 : nsCOMPtr<CacheFileListener> listener;
494 5 : bool isNew = false;
495 5 : nsresult retval = NS_OK;
496 :
497 : {
498 10 : CacheFileAutoLock lock(this);
499 :
500 5 : MOZ_ASSERT(mOpeningFile);
501 5 : MOZ_ASSERT((NS_SUCCEEDED(aResult) && aHandle) ||
502 : (NS_FAILED(aResult) && !aHandle));
503 5 : MOZ_ASSERT((mListener && !mMetadata) || // !createNew
504 : (!mListener && mMetadata)); // createNew
505 5 : MOZ_ASSERT(!mMemoryOnly || mMetadata); // memory-only was set on new entry
506 :
507 5 : LOG(("CacheFile::OnFileOpened() [this=%p, rv=0x%08" PRIx32 ", handle=%p]",
508 : this, static_cast<uint32_t>(aResult), aHandle));
509 :
510 5 : mOpeningFile = false;
511 :
512 5 : autoDoom.mListener.swap(mDoomAfterOpenListener);
513 :
514 5 : if (mMemoryOnly) {
515 : // We can be here only in case the entry was initilized as createNew and
516 : // SetMemoryOnly() was called.
517 :
518 : // Just don't store the handle into mHandle and exit
519 0 : autoDoom.mAlreadyDoomed = true;
520 0 : return NS_OK;
521 : }
522 :
523 5 : if (NS_FAILED(aResult)) {
524 0 : if (mMetadata) {
525 : // This entry was initialized as createNew, just switch to memory-only
526 : // mode.
527 0 : NS_WARNING("Forcing memory-only entry since OpenFile failed");
528 0 : LOG(("CacheFile::OnFileOpened() - CacheFileIOManager::OpenFile() "
529 : "failed asynchronously. We can continue in memory-only mode since "
530 : "aCreateNew == true. [this=%p]", this));
531 :
532 0 : mMemoryOnly = true;
533 0 : return NS_OK;
534 : }
535 :
536 0 : if (aResult == NS_ERROR_FILE_INVALID_PATH) {
537 : // CacheFileIOManager doesn't have mCacheDirectory, switch to
538 : // memory-only mode.
539 : NS_WARNING("Forcing memory-only entry since CacheFileIOManager doesn't "
540 0 : "have mCacheDirectory.");
541 0 : LOG(("CacheFile::OnFileOpened() - CacheFileIOManager doesn't have "
542 : "mCacheDirectory, initializing entry as memory-only. [this=%p]",
543 : this));
544 :
545 0 : mMemoryOnly = true;
546 0 : mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
547 0 : mReady = true;
548 0 : mDataSize = mMetadata->Offset();
549 :
550 0 : isNew = true;
551 0 : retval = NS_OK;
552 : } else {
553 : // CacheFileIOManager::OpenFile() failed for another reason.
554 0 : isNew = false;
555 0 : retval = aResult;
556 : }
557 :
558 0 : mListener.swap(listener);
559 : } else {
560 5 : mHandle = aHandle;
561 5 : if (NS_FAILED(mStatus)) {
562 0 : CacheFileIOManager::DoomFile(mHandle, nullptr);
563 : }
564 :
565 5 : if (mMetadata) {
566 0 : InitIndexEntry();
567 :
568 : // The entry was initialized as createNew, don't try to read metadata.
569 0 : mMetadata->SetHandle(mHandle);
570 :
571 : // Write all cached chunks, otherwise they may stay unwritten.
572 0 : for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
573 0 : uint32_t idx = iter.Key();
574 0 : const RefPtr<CacheFileChunk>& chunk = iter.Data();
575 :
576 0 : LOG(("CacheFile::OnFileOpened() - write [this=%p, idx=%u, chunk=%p]",
577 : this, idx, chunk.get()));
578 :
579 0 : mChunks.Put(idx, chunk);
580 0 : chunk->mFile = this;
581 0 : chunk->mActiveChunk = true;
582 :
583 0 : MOZ_ASSERT(chunk->IsReady());
584 :
585 : // This would be cleaner if we had an nsRefPtr constructor that took
586 : // a RefPtr<Derived>.
587 0 : ReleaseOutsideLock(RefPtr<nsISupports>(chunk));
588 :
589 0 : iter.Remove();
590 : }
591 :
592 0 : return NS_OK;
593 : }
594 : }
595 : }
596 :
597 5 : if (listener) {
598 0 : listener->OnFileReady(retval, isNew);
599 0 : return NS_OK;
600 : }
601 :
602 5 : MOZ_ASSERT(NS_SUCCEEDED(aResult));
603 5 : MOZ_ASSERT(!mMetadata);
604 5 : MOZ_ASSERT(mListener);
605 :
606 10 : mMetadata = new CacheFileMetadata(mHandle, mKey);
607 :
608 5 : rv = mMetadata->ReadMetadata(this);
609 5 : if (NS_FAILED(rv)) {
610 0 : mListener.swap(listener);
611 0 : listener->OnFileReady(rv, false);
612 : }
613 :
614 5 : return NS_OK;
615 : }
616 :
617 : nsresult
618 0 : CacheFile::OnDataWritten(CacheFileHandle *aHandle, const char *aBuf,
619 : nsresult aResult)
620 : {
621 0 : MOZ_CRASH("CacheFile::OnDataWritten should not be called!");
622 : return NS_ERROR_UNEXPECTED;
623 : }
624 :
625 : nsresult
626 0 : CacheFile::OnDataRead(CacheFileHandle *aHandle, char *aBuf, nsresult aResult)
627 : {
628 0 : MOZ_CRASH("CacheFile::OnDataRead should not be called!");
629 : return NS_ERROR_UNEXPECTED;
630 : }
631 :
632 : nsresult
633 5 : CacheFile::OnMetadataRead(nsresult aResult)
634 : {
635 5 : MOZ_ASSERT(mListener);
636 :
637 5 : LOG(("CacheFile::OnMetadataRead() [this=%p, rv=0x%08" PRIx32 "]",
638 : this, static_cast<uint32_t>(aResult)));
639 :
640 5 : bool isNew = false;
641 5 : if (NS_SUCCEEDED(aResult)) {
642 5 : mPinned = mMetadata->Pinned();
643 5 : mReady = true;
644 5 : mDataSize = mMetadata->Offset();
645 5 : if (mDataSize == 0 && mMetadata->ElementsSize() == 0) {
646 2 : isNew = true;
647 2 : mMetadata->MarkDirty();
648 : } else {
649 3 : const char *altData = mMetadata->GetElement(CacheFileUtils::kAltDataKey);
650 3 : if (altData &&
651 0 : (NS_FAILED(CacheFileUtils::ParseAlternativeDataInfo(
652 0 : altData, &mAltDataOffset, nullptr)) ||
653 0 : (mAltDataOffset > mDataSize))) {
654 : // alt-metadata cannot be parsed or alt-data offset is invalid
655 0 : mMetadata->InitEmptyMetadata();
656 0 : isNew = true;
657 0 : mAltDataOffset = -1;
658 0 : mDataSize = 0;
659 : } else {
660 6 : CacheFileAutoLock lock(this);
661 3 : PreloadChunks(0);
662 : }
663 : }
664 :
665 5 : InitIndexEntry();
666 : }
667 :
668 10 : nsCOMPtr<CacheFileListener> listener;
669 5 : mListener.swap(listener);
670 5 : listener->OnFileReady(aResult, isNew);
671 10 : return NS_OK;
672 : }
673 :
674 : nsresult
675 4 : CacheFile::OnMetadataWritten(nsresult aResult)
676 : {
677 8 : CacheFileAutoLock lock(this);
678 :
679 4 : LOG(("CacheFile::OnMetadataWritten() [this=%p, rv=0x%08" PRIx32 "]",
680 : this, static_cast<uint32_t>(aResult)));
681 :
682 4 : MOZ_ASSERT(mWritingMetadata);
683 4 : mWritingMetadata = false;
684 :
685 4 : MOZ_ASSERT(!mMemoryOnly);
686 4 : MOZ_ASSERT(!mOpeningFile);
687 :
688 4 : if (NS_WARN_IF(NS_FAILED(aResult))) {
689 : // TODO close streams with an error ???
690 0 : SetError(aResult);
691 : }
692 :
693 4 : if (mOutput || mInputs.Length() || mChunks.Count())
694 0 : return NS_OK;
695 :
696 4 : if (IsDirty())
697 0 : WriteMetadataIfNeededLocked();
698 :
699 4 : if (!mWritingMetadata) {
700 4 : LOG(("CacheFile::OnMetadataWritten() - Releasing file handle [this=%p]",
701 : this));
702 4 : CacheFileIOManager::ReleaseNSPRHandle(mHandle);
703 : }
704 :
705 4 : return NS_OK;
706 : }
707 :
708 : nsresult
709 0 : CacheFile::OnFileDoomed(CacheFileHandle *aHandle, nsresult aResult)
710 : {
711 0 : nsCOMPtr<CacheFileListener> listener;
712 :
713 : {
714 0 : CacheFileAutoLock lock(this);
715 :
716 0 : MOZ_ASSERT(mListener);
717 :
718 0 : LOG(("CacheFile::OnFileDoomed() [this=%p, rv=0x%08" PRIx32 ", handle=%p]",
719 : this, static_cast<uint32_t>(aResult), aHandle));
720 :
721 0 : mListener.swap(listener);
722 : }
723 :
724 0 : listener->OnFileDoomed(aResult);
725 0 : return NS_OK;
726 : }
727 :
728 : nsresult
729 0 : CacheFile::OnEOFSet(CacheFileHandle *aHandle, nsresult aResult)
730 : {
731 0 : MOZ_CRASH("CacheFile::OnEOFSet should not be called!");
732 : return NS_ERROR_UNEXPECTED;
733 : }
734 :
735 : nsresult
736 0 : CacheFile::OnFileRenamed(CacheFileHandle *aHandle, nsresult aResult)
737 : {
738 0 : MOZ_CRASH("CacheFile::OnFileRenamed should not be called!");
739 : return NS_ERROR_UNEXPECTED;
740 : }
741 :
742 17 : bool CacheFile::IsKilled()
743 : {
744 17 : bool killed = mKill;
745 17 : if (killed) {
746 0 : LOG(("CacheFile is killed, this=%p", this));
747 : }
748 :
749 17 : return killed;
750 : }
751 :
752 : nsresult
753 4 : CacheFile::OpenInputStream(nsICacheEntry *aEntryHandle, nsIInputStream **_retval)
754 : {
755 8 : CacheFileAutoLock lock(this);
756 :
757 4 : MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
758 :
759 4 : if (!mReady) {
760 0 : LOG(("CacheFile::OpenInputStream() - CacheFile is not ready [this=%p]",
761 : this));
762 :
763 0 : return NS_ERROR_NOT_AVAILABLE;
764 : }
765 :
766 4 : if (NS_FAILED(mStatus)) {
767 0 : LOG(("CacheFile::OpenInputStream() - CacheFile is in a failure state "
768 : "[this=%p, status=0x%08" PRIx32 "]", this, static_cast<uint32_t>(mStatus)));
769 :
770 : // Don't allow opening the input stream when this CacheFile is in
771 : // a failed state. This is the only way to protect consumers correctly
772 : // from reading a broken entry. When the file is in the failed state,
773 : // it's also doomed, so reopening the entry won't make any difference -
774 : // data will still be inaccessible anymore. Note that for just doomed
775 : // files, we must allow reading the data.
776 0 : return mStatus;
777 : }
778 :
779 : // Once we open input stream we no longer allow preloading of chunks without
780 : // input stream, i.e. we will no longer keep first few chunks preloaded when
781 : // the last input stream is closed.
782 4 : mPreloadWithoutInputStreams = false;
783 :
784 : CacheFileInputStream *input = new CacheFileInputStream(this, aEntryHandle,
785 4 : false);
786 4 : LOG(("CacheFile::OpenInputStream() - Creating new input stream %p [this=%p]",
787 : input, this));
788 :
789 4 : mInputs.AppendElement(input);
790 4 : NS_ADDREF(input);
791 :
792 4 : mDataAccessed = true;
793 4 : NS_ADDREF(*_retval = input);
794 4 : return NS_OK;
795 : }
796 :
797 : nsresult
798 0 : CacheFile::OpenAlternativeInputStream(nsICacheEntry *aEntryHandle,
799 : const char *aAltDataType,
800 : nsIInputStream **_retval)
801 : {
802 0 : CacheFileAutoLock lock(this);
803 :
804 0 : MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
805 :
806 : nsresult rv;
807 :
808 0 : if (NS_WARN_IF(!mReady)) {
809 0 : LOG(("CacheFile::OpenAlternativeInputStream() - CacheFile is not ready "
810 : "[this=%p]", this));
811 0 : return NS_ERROR_NOT_AVAILABLE;
812 : }
813 :
814 0 : if (mAltDataOffset == -1) {
815 0 : LOG(("CacheFile::OpenAlternativeInputStream() - Alternative data is not "
816 : "available [this=%p]", this));
817 0 : return NS_ERROR_NOT_AVAILABLE;
818 : }
819 :
820 0 : if (NS_FAILED(mStatus)) {
821 0 : LOG(("CacheFile::OpenAlternativeInputStream() - CacheFile is in a failure "
822 : "state [this=%p, status=0x%08" PRIx32 "]", this, static_cast<uint32_t>(mStatus)));
823 :
824 : // Don't allow opening the input stream when this CacheFile is in
825 : // a failed state. This is the only way to protect consumers correctly
826 : // from reading a broken entry. When the file is in the failed state,
827 : // it's also doomed, so reopening the entry won't make any difference -
828 : // data will still be inaccessible anymore. Note that for just doomed
829 : // files, we must allow reading the data.
830 0 : return mStatus;
831 : }
832 :
833 0 : const char *altData = mMetadata->GetElement(CacheFileUtils::kAltDataKey);
834 0 : MOZ_ASSERT(altData, "alt-metadata should exist but was not found!");
835 0 : if (NS_WARN_IF(!altData)) {
836 0 : LOG(("CacheFile::OpenAlternativeInputStream() - alt-metadata not found but "
837 : "alt-data exists according to mAltDataOffset! [this=%p, ]", this));
838 0 : return NS_ERROR_NOT_AVAILABLE;
839 : }
840 :
841 : int64_t offset;
842 0 : nsCString availableAltData;
843 : rv = CacheFileUtils::ParseAlternativeDataInfo(altData, &offset,
844 0 : &availableAltData);
845 0 : if (NS_WARN_IF(NS_FAILED(rv))) {
846 0 : MOZ_ASSERT(false, "alt-metadata unexpectedly failed to parse");
847 : LOG(("CacheFile::OpenAlternativeInputStream() - Cannot parse alternative "
848 : "metadata! [this=%p]", this));
849 : return rv;
850 : }
851 :
852 0 : if (availableAltData != aAltDataType) {
853 0 : LOG(("CacheFile::OpenAlternativeInputStream() - Alternative data is of a "
854 : "different type than requested [this=%p, availableType=%s, "
855 : "requestedType=%s]", this, availableAltData.get(), aAltDataType));
856 0 : return NS_ERROR_NOT_AVAILABLE;
857 : }
858 :
859 : // mAltDataOffset must be in sync with what is stored in metadata
860 0 : MOZ_ASSERT(mAltDataOffset == offset);
861 :
862 : // Once we open input stream we no longer allow preloading of chunks without
863 : // input stream, i.e. we will no longer keep first few chunks preloaded when
864 : // the last input stream is closed.
865 0 : mPreloadWithoutInputStreams = false;
866 :
867 0 : CacheFileInputStream *input = new CacheFileInputStream(this, aEntryHandle, true);
868 :
869 0 : LOG(("CacheFile::OpenAlternativeInputStream() - Creating new input stream %p "
870 : "[this=%p]", input, this));
871 :
872 0 : mInputs.AppendElement(input);
873 0 : NS_ADDREF(input);
874 :
875 0 : mDataAccessed = true;
876 0 : NS_ADDREF(*_retval = input);
877 0 : return NS_OK;
878 : }
879 :
880 : nsresult
881 2 : CacheFile::OpenOutputStream(CacheOutputCloseListener *aCloseListener, nsIOutputStream **_retval)
882 : {
883 4 : CacheFileAutoLock lock(this);
884 :
885 2 : MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
886 :
887 : nsresult rv;
888 :
889 2 : if (!mReady) {
890 0 : LOG(("CacheFile::OpenOutputStream() - CacheFile is not ready [this=%p]",
891 : this));
892 :
893 0 : return NS_ERROR_NOT_AVAILABLE;
894 : }
895 :
896 2 : if (mOutput) {
897 0 : LOG(("CacheFile::OpenOutputStream() - We already have output stream %p "
898 : "[this=%p]", mOutput, this));
899 :
900 0 : return NS_ERROR_NOT_AVAILABLE;
901 : }
902 :
903 2 : if (NS_FAILED(mStatus)) {
904 0 : LOG(("CacheFile::OpenOutputStream() - CacheFile is in a failure state "
905 : "[this=%p, status=0x%08" PRIx32 "]", this,
906 : static_cast<uint32_t>(mStatus)));
907 :
908 : // The CacheFile is already doomed. It make no sense to allow to write any
909 : // data to such entry.
910 0 : return mStatus;
911 : }
912 :
913 : // Fail if there is any input stream opened for alternative data
914 3 : for (uint32_t i = 0; i < mInputs.Length(); ++i) {
915 1 : if (mInputs[i]->IsAlternativeData()) {
916 0 : return NS_ERROR_NOT_AVAILABLE;
917 : }
918 : }
919 :
920 2 : if (mAltDataOffset != -1) {
921 : // Remove alt-data
922 0 : rv = Truncate(mAltDataOffset);
923 0 : if (NS_FAILED(rv)) {
924 0 : LOG(("CacheFile::OpenOutputStream() - Truncating alt-data failed "
925 : "[rv=0x%08" PRIx32 "]", static_cast<uint32_t>(rv)));
926 0 : return rv;
927 : }
928 0 : SetAltMetadata(nullptr);
929 0 : mAltDataOffset = -1;
930 : }
931 :
932 : // Once we open output stream we no longer allow preloading of chunks without
933 : // input stream. There is no reason to believe that some input stream will be
934 : // opened soon. Otherwise we would cache unused chunks of all newly created
935 : // entries until the CacheFile is destroyed.
936 2 : mPreloadWithoutInputStreams = false;
937 :
938 2 : mOutput = new CacheFileOutputStream(this, aCloseListener, false);
939 :
940 2 : LOG(("CacheFile::OpenOutputStream() - Creating new output stream %p "
941 : "[this=%p]", mOutput, this));
942 :
943 2 : mDataAccessed = true;
944 2 : NS_ADDREF(*_retval = mOutput);
945 2 : return NS_OK;
946 : }
947 :
948 : nsresult
949 0 : CacheFile::OpenAlternativeOutputStream(CacheOutputCloseListener *aCloseListener,
950 : const char *aAltDataType,
951 : nsIOutputStream **_retval)
952 : {
953 0 : CacheFileAutoLock lock(this);
954 :
955 0 : MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
956 :
957 0 : if (!mReady) {
958 0 : LOG(("CacheFile::OpenAlternativeOutputStream() - CacheFile is not ready "
959 : "[this=%p]", this));
960 :
961 0 : return NS_ERROR_NOT_AVAILABLE;
962 : }
963 :
964 0 : if (mOutput) {
965 0 : LOG(("CacheFile::OpenAlternativeOutputStream() - We already have output "
966 : "stream %p [this=%p]", mOutput, this));
967 :
968 0 : return NS_ERROR_NOT_AVAILABLE;
969 : }
970 :
971 0 : if (NS_FAILED(mStatus)) {
972 0 : LOG(("CacheFile::OpenAlternativeOutputStream() - CacheFile is in a failure "
973 : "state [this=%p, status=0x%08" PRIx32 "]", this,
974 : static_cast<uint32_t>(mStatus)));
975 :
976 : // The CacheFile is already doomed. It make no sense to allow to write any
977 : // data to such entry.
978 0 : return mStatus;
979 : }
980 :
981 : // Fail if there is any input stream opened for alternative data
982 0 : for (uint32_t i = 0; i < mInputs.Length(); ++i) {
983 0 : if (mInputs[i]->IsAlternativeData()) {
984 0 : return NS_ERROR_NOT_AVAILABLE;
985 : }
986 : }
987 :
988 : nsresult rv;
989 :
990 0 : if (mAltDataOffset != -1) {
991 : // Truncate old alt-data
992 0 : rv = Truncate(mAltDataOffset);
993 0 : if (NS_FAILED(rv)) {
994 0 : LOG(("CacheFile::OpenAlternativeOutputStream() - Truncating old alt-data "
995 : "failed [rv=0x%08" PRIx32 "]", static_cast<uint32_t>(rv)));
996 0 : return rv;
997 : }
998 : } else {
999 0 : mAltDataOffset = mDataSize;
1000 : }
1001 :
1002 0 : nsAutoCString altMetadata;
1003 0 : CacheFileUtils::BuildAlternativeDataInfo(aAltDataType, mAltDataOffset,
1004 0 : altMetadata);
1005 0 : rv = SetAltMetadata(altMetadata.get());
1006 0 : if (NS_FAILED(rv)) {
1007 0 : LOG(("CacheFile::OpenAlternativeOutputStream() - Set Metadata for alt-data"
1008 : "failed [rv=0x%08" PRIx32 "]", static_cast<uint32_t>(rv)));
1009 0 : return rv;
1010 : }
1011 :
1012 : // Once we open output stream we no longer allow preloading of chunks without
1013 : // input stream. There is no reason to believe that some input stream will be
1014 : // opened soon. Otherwise we would cache unused chunks of all newly created
1015 : // entries until the CacheFile is destroyed.
1016 0 : mPreloadWithoutInputStreams = false;
1017 :
1018 0 : mOutput = new CacheFileOutputStream(this, aCloseListener, true);
1019 :
1020 0 : LOG(("CacheFile::OpenAlternativeOutputStream() - Creating new output stream "
1021 : "%p [this=%p]", mOutput, this));
1022 :
1023 0 : mDataAccessed = true;
1024 0 : NS_ADDREF(*_retval = mOutput);
1025 0 : return NS_OK;
1026 : }
1027 :
1028 : nsresult
1029 0 : CacheFile::SetMemoryOnly()
1030 : {
1031 0 : LOG(("CacheFile::SetMemoryOnly() mMemoryOnly=%d [this=%p]",
1032 : mMemoryOnly, this));
1033 :
1034 0 : if (mMemoryOnly)
1035 0 : return NS_OK;
1036 :
1037 0 : MOZ_ASSERT(mReady);
1038 :
1039 0 : if (!mReady) {
1040 0 : LOG(("CacheFile::SetMemoryOnly() - CacheFile is not ready [this=%p]",
1041 : this));
1042 :
1043 0 : return NS_ERROR_NOT_AVAILABLE;
1044 : }
1045 :
1046 0 : if (mDataAccessed) {
1047 0 : LOG(("CacheFile::SetMemoryOnly() - Data was already accessed [this=%p]", this));
1048 0 : return NS_ERROR_NOT_AVAILABLE;
1049 : }
1050 :
1051 : // TODO what to do when this isn't a new entry and has an existing metadata???
1052 0 : mMemoryOnly = true;
1053 0 : return NS_OK;
1054 : }
1055 :
1056 : nsresult
1057 0 : CacheFile::Doom(CacheFileListener *aCallback)
1058 : {
1059 0 : LOG(("CacheFile::Doom() [this=%p, listener=%p]", this, aCallback));
1060 :
1061 0 : CacheFileAutoLock lock(this);
1062 :
1063 0 : return DoomLocked(aCallback);
1064 : }
1065 :
1066 : nsresult
1067 0 : CacheFile::DoomLocked(CacheFileListener *aCallback)
1068 : {
1069 0 : MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
1070 :
1071 0 : LOG(("CacheFile::DoomLocked() [this=%p, listener=%p]", this, aCallback));
1072 :
1073 0 : nsresult rv = NS_OK;
1074 :
1075 0 : if (mMemoryOnly) {
1076 0 : return NS_ERROR_FILE_NOT_FOUND;
1077 : }
1078 :
1079 0 : if (mHandle && mHandle->IsDoomed()) {
1080 0 : return NS_ERROR_FILE_NOT_FOUND;
1081 : }
1082 :
1083 0 : nsCOMPtr<CacheFileIOListener> listener;
1084 0 : if (aCallback || !mHandle) {
1085 0 : listener = new DoomFileHelper(aCallback);
1086 : }
1087 0 : if (mHandle) {
1088 0 : rv = CacheFileIOManager::DoomFile(mHandle, listener);
1089 0 : } else if (mOpeningFile) {
1090 0 : mDoomAfterOpenListener = listener;
1091 : }
1092 :
1093 0 : return rv;
1094 : }
1095 :
1096 : nsresult
1097 0 : CacheFile::ThrowMemoryCachedData()
1098 : {
1099 0 : CacheFileAutoLock lock(this);
1100 :
1101 0 : LOG(("CacheFile::ThrowMemoryCachedData() [this=%p]", this));
1102 :
1103 0 : if (mMemoryOnly) {
1104 : // This method should not be called when the CacheFile was initialized as
1105 : // memory-only, but it can be called when CacheFile end up as memory-only
1106 : // due to e.g. IO failure since CacheEntry doesn't know it.
1107 0 : LOG(("CacheFile::ThrowMemoryCachedData() - Ignoring request because the "
1108 : "entry is memory-only. [this=%p]", this));
1109 :
1110 0 : return NS_ERROR_NOT_AVAILABLE;
1111 : }
1112 :
1113 0 : if (mOpeningFile) {
1114 : // mayhemer, note: we shouldn't get here, since CacheEntry prevents loading
1115 : // entries from being purged.
1116 :
1117 0 : LOG(("CacheFile::ThrowMemoryCachedData() - Ignoring request because the "
1118 : "entry is still opening the file [this=%p]", this));
1119 :
1120 0 : return NS_ERROR_ABORT;
1121 : }
1122 :
1123 : // We cannot release all cached chunks since we need to keep preloaded chunks
1124 : // in memory. See initialization of mPreloadChunkCount for explanation.
1125 0 : CleanUpCachedChunks();
1126 :
1127 0 : return NS_OK;
1128 : }
1129 :
1130 : nsresult
1131 30 : CacheFile::GetElement(const char *aKey, char **_retval)
1132 : {
1133 60 : CacheFileAutoLock lock(this);
1134 30 : MOZ_ASSERT(mMetadata);
1135 30 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1136 :
1137 : const char *value;
1138 30 : value = mMetadata->GetElement(aKey);
1139 30 : if (!value)
1140 10 : return NS_ERROR_NOT_AVAILABLE;
1141 :
1142 20 : *_retval = NS_strdup(value);
1143 20 : return NS_OK;
1144 : }
1145 :
1146 : nsresult
1147 17 : CacheFile::SetElement(const char *aKey, const char *aValue)
1148 : {
1149 34 : CacheFileAutoLock lock(this);
1150 :
1151 17 : LOG(("CacheFile::SetElement() this=%p", this));
1152 :
1153 17 : MOZ_ASSERT(mMetadata);
1154 17 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1155 :
1156 17 : if (!strcmp(aKey, CacheFileUtils::kAltDataKey)) {
1157 0 : NS_ERROR("alt-data element is reserved for internal use and must not be "
1158 : "changed via CacheFile::SetElement()");
1159 0 : return NS_ERROR_FAILURE;
1160 : }
1161 :
1162 17 : PostWriteTimer();
1163 17 : return mMetadata->SetElement(aKey, aValue);
1164 : }
1165 :
1166 : nsresult
1167 1 : CacheFile::VisitMetaData(nsICacheEntryMetaDataVisitor *aVisitor)
1168 : {
1169 2 : CacheFileAutoLock lock(this);
1170 1 : MOZ_ASSERT(mMetadata);
1171 1 : MOZ_ASSERT(mReady);
1172 1 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1173 :
1174 1 : return mMetadata->Visit(aVisitor);
1175 : }
1176 :
1177 : nsresult
1178 0 : CacheFile::ElementsSize(uint32_t *_retval)
1179 : {
1180 0 : CacheFileAutoLock lock(this);
1181 :
1182 0 : if (!mMetadata)
1183 0 : return NS_ERROR_NOT_AVAILABLE;
1184 :
1185 0 : *_retval = mMetadata->ElementsSize();
1186 0 : return NS_OK;
1187 : }
1188 :
1189 : nsresult
1190 2 : CacheFile::SetExpirationTime(uint32_t aExpirationTime)
1191 : {
1192 4 : CacheFileAutoLock lock(this);
1193 :
1194 2 : LOG(("CacheFile::SetExpirationTime() this=%p, expiration=%u",
1195 : this, aExpirationTime));
1196 :
1197 2 : MOZ_ASSERT(mMetadata);
1198 2 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1199 :
1200 2 : PostWriteTimer();
1201 :
1202 2 : if (mHandle && !mHandle->IsDoomed())
1203 2 : CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, &aExpirationTime, nullptr, nullptr, nullptr);
1204 :
1205 2 : return mMetadata->SetExpirationTime(aExpirationTime);
1206 : }
1207 :
1208 : nsresult
1209 6 : CacheFile::GetExpirationTime(uint32_t *_retval)
1210 : {
1211 12 : CacheFileAutoLock lock(this);
1212 6 : MOZ_ASSERT(mMetadata);
1213 6 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1214 :
1215 6 : return mMetadata->GetExpirationTime(_retval);
1216 : }
1217 :
1218 : nsresult
1219 6 : CacheFile::SetFrecency(uint32_t aFrecency)
1220 : {
1221 12 : CacheFileAutoLock lock(this);
1222 :
1223 6 : LOG(("CacheFile::SetFrecency() this=%p, frecency=%u",
1224 : this, aFrecency));
1225 :
1226 6 : MOZ_ASSERT(mMetadata);
1227 6 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1228 :
1229 6 : PostWriteTimer();
1230 :
1231 6 : if (mHandle && !mHandle->IsDoomed())
1232 6 : CacheFileIOManager::UpdateIndexEntry(mHandle, &aFrecency, nullptr, nullptr, nullptr, nullptr);
1233 :
1234 6 : return mMetadata->SetFrecency(aFrecency);
1235 : }
1236 :
1237 : nsresult
1238 3 : CacheFile::GetFrecency(uint32_t *_retval)
1239 : {
1240 6 : CacheFileAutoLock lock(this);
1241 3 : MOZ_ASSERT(mMetadata);
1242 3 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1243 :
1244 3 : return mMetadata->GetFrecency(_retval);
1245 : }
1246 :
1247 1 : nsresult CacheFile::SetNetworkTimes(uint64_t aOnStartTime, uint64_t aOnStopTime)
1248 : {
1249 2 : CacheFileAutoLock lock(this);
1250 :
1251 1 : LOG(("CacheFile::SetNetworkTimes() this=%p, aOnStartTime=%" PRIu64
1252 : ", aOnStopTime=%" PRIu64 "", this, aOnStartTime, aOnStopTime));
1253 :
1254 1 : MOZ_ASSERT(mMetadata);
1255 1 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1256 1 : MOZ_ASSERT(aOnStartTime != kIndexTimeNotAvailable);
1257 1 : MOZ_ASSERT(aOnStopTime != kIndexTimeNotAvailable);
1258 :
1259 1 : PostWriteTimer();
1260 :
1261 2 : nsAutoCString onStartTime;
1262 1 : onStartTime.AppendInt(aOnStartTime);
1263 1 : nsresult rv = mMetadata->SetElement("net-response-time-onstart", onStartTime.get());
1264 1 : if (NS_WARN_IF(NS_FAILED(rv))) {
1265 0 : return rv;
1266 : }
1267 :
1268 2 : nsAutoCString onStopTime;
1269 1 : onStopTime.AppendInt(aOnStopTime);
1270 1 : rv = mMetadata->SetElement("net-response-time-onstop", onStopTime.get());
1271 1 : if (NS_WARN_IF(NS_FAILED(rv))) {
1272 0 : return rv;
1273 : }
1274 :
1275 1 : uint16_t onStartTime16 = aOnStartTime <= kIndexTimeOutOfBound ? aOnStartTime : kIndexTimeOutOfBound;
1276 1 : uint16_t onStopTime16 = aOnStopTime <= kIndexTimeOutOfBound ? aOnStopTime : kIndexTimeOutOfBound;
1277 :
1278 1 : if (mHandle && !mHandle->IsDoomed()) {
1279 1 : CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, nullptr,
1280 1 : &onStartTime16, &onStopTime16);
1281 : }
1282 1 : return NS_OK;
1283 : }
1284 :
1285 3 : nsresult CacheFile::GetOnStartTime(uint64_t *_retval)
1286 : {
1287 6 : CacheFileAutoLock lock(this);
1288 :
1289 3 : MOZ_ASSERT(mMetadata);
1290 3 : const char *onStartTimeStr = mMetadata->GetElement("net-response-time-onstart");
1291 3 : if (!onStartTimeStr) {
1292 1 : return NS_ERROR_NOT_AVAILABLE;
1293 : }
1294 : nsresult rv;
1295 2 : *_retval = nsCString(onStartTimeStr).ToInteger64(&rv);
1296 2 : MOZ_ASSERT(NS_SUCCEEDED(rv));
1297 2 : return NS_OK;
1298 : }
1299 :
1300 2 : nsresult CacheFile::GetOnStopTime(uint64_t *_retval)
1301 : {
1302 4 : CacheFileAutoLock lock(this);
1303 :
1304 2 : MOZ_ASSERT(mMetadata);
1305 2 : const char *onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
1306 2 : if (!onStopTimeStr) {
1307 0 : return NS_ERROR_NOT_AVAILABLE;
1308 : }
1309 : nsresult rv;
1310 2 : *_retval = nsCString(onStopTimeStr).ToInteger64(&rv);
1311 2 : MOZ_ASSERT(NS_SUCCEEDED(rv));
1312 2 : return NS_OK;
1313 : }
1314 :
1315 : nsresult
1316 0 : CacheFile::SetAltMetadata(const char* aAltMetadata)
1317 : {
1318 0 : AssertOwnsLock();
1319 0 : LOG(("CacheFile::SetAltMetadata() this=%p, aAltMetadata=%s",
1320 : this, aAltMetadata ? aAltMetadata : ""));
1321 :
1322 0 : MOZ_ASSERT(mMetadata);
1323 0 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1324 :
1325 0 : PostWriteTimer();
1326 :
1327 0 : nsresult rv = mMetadata->SetElement(CacheFileUtils::kAltDataKey, aAltMetadata);
1328 0 : bool hasAltData = aAltMetadata ? true : false;
1329 :
1330 0 : if (NS_FAILED(rv)) {
1331 : // Removing element shouldn't fail because it doesn't allocate memory.
1332 0 : mMetadata->SetElement(CacheFileUtils::kAltDataKey, nullptr);
1333 :
1334 0 : mAltDataOffset = -1;
1335 0 : hasAltData = false;
1336 : }
1337 :
1338 0 : if (mHandle && !mHandle->IsDoomed()) {
1339 0 : CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, &hasAltData, nullptr, nullptr);
1340 : }
1341 0 : return rv;
1342 : }
1343 :
1344 : nsresult
1345 4 : CacheFile::GetLastModified(uint32_t *_retval)
1346 : {
1347 8 : CacheFileAutoLock lock(this);
1348 4 : MOZ_ASSERT(mMetadata);
1349 4 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1350 :
1351 4 : return mMetadata->GetLastModified(_retval);
1352 : }
1353 :
1354 : nsresult
1355 8 : CacheFile::GetLastFetched(uint32_t *_retval)
1356 : {
1357 16 : CacheFileAutoLock lock(this);
1358 8 : MOZ_ASSERT(mMetadata);
1359 8 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1360 :
1361 8 : return mMetadata->GetLastFetched(_retval);
1362 : }
1363 :
1364 : nsresult
1365 8 : CacheFile::GetFetchCount(uint32_t *_retval)
1366 : {
1367 16 : CacheFileAutoLock lock(this);
1368 8 : MOZ_ASSERT(mMetadata);
1369 8 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1370 :
1371 8 : return mMetadata->GetFetchCount(_retval);
1372 : }
1373 :
1374 : nsresult
1375 2 : CacheFile::GetDiskStorageSizeInKB(uint32_t *aDiskStorageSize)
1376 : {
1377 2 : if (!mHandle) {
1378 0 : return NS_ERROR_NOT_AVAILABLE;
1379 : }
1380 :
1381 2 : *aDiskStorageSize = mHandle->FileSizeInK();
1382 2 : return NS_OK;
1383 : }
1384 :
1385 : nsresult
1386 6 : CacheFile::OnFetched()
1387 : {
1388 12 : CacheFileAutoLock lock(this);
1389 :
1390 6 : LOG(("CacheFile::OnFetched() this=%p", this));
1391 :
1392 6 : MOZ_ASSERT(mMetadata);
1393 6 : NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1394 :
1395 6 : PostWriteTimer();
1396 :
1397 6 : return mMetadata->OnFetched();
1398 : }
1399 :
1400 : void
1401 191 : CacheFile::Lock()
1402 : {
1403 191 : mLock.Lock();
1404 191 : }
1405 :
1406 : void
1407 191 : CacheFile::Unlock()
1408 : {
1409 : // move the elements out of mObjsToRelease
1410 : // so that they can be released after we unlock
1411 382 : nsTArray<RefPtr<nsISupports>> objs;
1412 191 : objs.SwapElements(mObjsToRelease);
1413 :
1414 191 : mLock.Unlock();
1415 :
1416 191 : }
1417 :
1418 : void
1419 244 : CacheFile::AssertOwnsLock() const
1420 : {
1421 244 : mLock.AssertCurrentThreadOwns();
1422 244 : }
1423 :
1424 : void
1425 16 : CacheFile::ReleaseOutsideLock(RefPtr<nsISupports> aObject)
1426 : {
1427 16 : AssertOwnsLock();
1428 :
1429 16 : mObjsToRelease.AppendElement(Move(aObject));
1430 16 : }
1431 :
1432 : nsresult
1433 8 : CacheFile::GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
1434 : CacheFileChunkListener *aCallback,
1435 : CacheFileChunk **_retval)
1436 : {
1437 8 : AssertOwnsLock();
1438 :
1439 8 : LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%u, caller=%d, listener=%p]",
1440 : this, aIndex, aCaller, aCallback));
1441 :
1442 8 : MOZ_ASSERT(mReady);
1443 8 : MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
1444 8 : MOZ_ASSERT((aCaller == READER && aCallback) ||
1445 : (aCaller == WRITER && !aCallback) ||
1446 : (aCaller == PRELOADER && !aCallback));
1447 :
1448 : // Preload chunks from disk when this is disk backed entry and the listener
1449 : // is reader.
1450 8 : bool preload = !mMemoryOnly && (aCaller == READER);
1451 :
1452 : nsresult rv;
1453 :
1454 16 : RefPtr<CacheFileChunk> chunk;
1455 8 : if (mChunks.Get(aIndex, getter_AddRefs(chunk))) {
1456 2 : LOG(("CacheFile::GetChunkLocked() - Found chunk %p in mChunks [this=%p]",
1457 : chunk.get(), this));
1458 :
1459 : // Preloader calls this method to preload only non-loaded chunks.
1460 2 : MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
1461 :
1462 : // We might get failed chunk between releasing the lock in
1463 : // CacheFileChunk::OnDataWritten/Read and CacheFile::OnChunkWritten/Read
1464 2 : rv = chunk->GetStatus();
1465 2 : if (NS_FAILED(rv)) {
1466 0 : SetError(rv);
1467 0 : LOG(("CacheFile::GetChunkLocked() - Found failed chunk in mChunks "
1468 : "[this=%p]", this));
1469 0 : return rv;
1470 : }
1471 :
1472 2 : if (chunk->IsReady() || aCaller == WRITER) {
1473 0 : chunk.swap(*_retval);
1474 : } else {
1475 2 : rv = QueueChunkListener(aIndex, aCallback);
1476 2 : NS_ENSURE_SUCCESS(rv, rv);
1477 : }
1478 :
1479 2 : if (preload) {
1480 2 : PreloadChunks(aIndex + 1);
1481 : }
1482 :
1483 2 : return NS_OK;
1484 : }
1485 :
1486 6 : if (mCachedChunks.Get(aIndex, getter_AddRefs(chunk))) {
1487 1 : LOG(("CacheFile::GetChunkLocked() - Reusing cached chunk %p [this=%p]",
1488 : chunk.get(), this));
1489 :
1490 : // Preloader calls this method to preload only non-loaded chunks.
1491 1 : MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
1492 :
1493 1 : mChunks.Put(aIndex, chunk);
1494 1 : mCachedChunks.Remove(aIndex);
1495 1 : chunk->mFile = this;
1496 1 : chunk->mActiveChunk = true;
1497 :
1498 1 : MOZ_ASSERT(chunk->IsReady());
1499 :
1500 1 : chunk.swap(*_retval);
1501 :
1502 1 : if (preload) {
1503 1 : PreloadChunks(aIndex + 1);
1504 : }
1505 :
1506 1 : return NS_OK;
1507 : }
1508 :
1509 5 : int64_t off = aIndex * static_cast<int64_t>(kChunkSize);
1510 :
1511 5 : if (off < mDataSize) {
1512 : // We cannot be here if this is memory only entry since the chunk must exist
1513 2 : MOZ_ASSERT(!mMemoryOnly);
1514 2 : if (mMemoryOnly) {
1515 : // If this ever really happen it is better to fail rather than crashing on
1516 : // a null handle.
1517 0 : LOG(("CacheFile::GetChunkLocked() - Unexpected state! Offset < mDataSize "
1518 : "for memory-only entry. [this=%p, off=%" PRId64 ", mDataSize=%" PRId64 "]",
1519 : this, off, mDataSize));
1520 :
1521 0 : return NS_ERROR_UNEXPECTED;
1522 : }
1523 :
1524 2 : chunk = new CacheFileChunk(this, aIndex, aCaller == WRITER);
1525 2 : mChunks.Put(aIndex, chunk);
1526 2 : chunk->mActiveChunk = true;
1527 :
1528 2 : LOG(("CacheFile::GetChunkLocked() - Reading newly created chunk %p from "
1529 : "the disk [this=%p]", chunk.get(), this));
1530 :
1531 : // Read the chunk from the disk
1532 10 : rv = chunk->Read(mHandle, std::min(static_cast<uint32_t>(mDataSize - off),
1533 6 : static_cast<uint32_t>(kChunkSize)),
1534 4 : mMetadata->GetHash(aIndex), this);
1535 2 : if (NS_WARN_IF(NS_FAILED(rv))) {
1536 0 : RemoveChunkInternal(chunk, false);
1537 0 : return rv;
1538 : }
1539 :
1540 2 : if (aCaller == WRITER) {
1541 0 : chunk.swap(*_retval);
1542 2 : } else if (aCaller != PRELOADER) {
1543 0 : rv = QueueChunkListener(aIndex, aCallback);
1544 0 : NS_ENSURE_SUCCESS(rv, rv);
1545 : }
1546 :
1547 2 : if (preload) {
1548 0 : PreloadChunks(aIndex + 1);
1549 : }
1550 :
1551 2 : return NS_OK;
1552 3 : } else if (off == mDataSize) {
1553 3 : if (aCaller == WRITER) {
1554 : // this listener is going to write to the chunk
1555 2 : chunk = new CacheFileChunk(this, aIndex, true);
1556 2 : mChunks.Put(aIndex, chunk);
1557 2 : chunk->mActiveChunk = true;
1558 :
1559 2 : LOG(("CacheFile::GetChunkLocked() - Created new empty chunk %p [this=%p]",
1560 : chunk.get(), this));
1561 :
1562 2 : chunk->InitNew();
1563 2 : mMetadata->SetHash(aIndex, chunk->Hash());
1564 :
1565 2 : if (HaveChunkListeners(aIndex)) {
1566 0 : rv = NotifyChunkListeners(aIndex, NS_OK, chunk);
1567 0 : NS_ENSURE_SUCCESS(rv, rv);
1568 : }
1569 :
1570 2 : chunk.swap(*_retval);
1571 2 : return NS_OK;
1572 : }
1573 : } else {
1574 0 : if (aCaller == WRITER) {
1575 : // this chunk was requested by writer, but we need to fill the gap first
1576 :
1577 : // Fill with zero the last chunk if it is incomplete
1578 0 : if (mDataSize % kChunkSize) {
1579 0 : rv = PadChunkWithZeroes(mDataSize / kChunkSize);
1580 0 : NS_ENSURE_SUCCESS(rv, rv);
1581 :
1582 0 : MOZ_ASSERT(!(mDataSize % kChunkSize));
1583 : }
1584 :
1585 0 : uint32_t startChunk = mDataSize / kChunkSize;
1586 :
1587 0 : if (mMemoryOnly) {
1588 : // We need to create all missing CacheFileChunks if this is memory-only
1589 : // entry
1590 0 : for (uint32_t i = startChunk ; i < aIndex ; i++) {
1591 0 : rv = PadChunkWithZeroes(i);
1592 0 : NS_ENSURE_SUCCESS(rv, rv);
1593 : }
1594 : } else {
1595 : // We don't need to create CacheFileChunk for other empty chunks unless
1596 : // there is some input stream waiting for this chunk.
1597 :
1598 0 : if (startChunk != aIndex) {
1599 : // Make sure the file contains zeroes at the end of the file
1600 0 : rv = CacheFileIOManager::TruncateSeekSetEOF(mHandle,
1601 0 : startChunk * kChunkSize,
1602 0 : aIndex * kChunkSize,
1603 0 : nullptr);
1604 0 : NS_ENSURE_SUCCESS(rv, rv);
1605 : }
1606 :
1607 0 : for (uint32_t i = startChunk ; i < aIndex ; i++) {
1608 0 : if (HaveChunkListeners(i)) {
1609 0 : rv = PadChunkWithZeroes(i);
1610 0 : NS_ENSURE_SUCCESS(rv, rv);
1611 : } else {
1612 0 : mMetadata->SetHash(i, kEmptyChunkHash);
1613 0 : mDataSize = (i + 1) * kChunkSize;
1614 : }
1615 : }
1616 : }
1617 :
1618 0 : MOZ_ASSERT(mDataSize == off);
1619 0 : rv = GetChunkLocked(aIndex, WRITER, nullptr, getter_AddRefs(chunk));
1620 0 : NS_ENSURE_SUCCESS(rv, rv);
1621 :
1622 0 : chunk.swap(*_retval);
1623 0 : return NS_OK;
1624 : }
1625 : }
1626 :
1627 : // We can be here only if the caller is reader since writer always create a
1628 : // new chunk above and preloader calls this method to preload only chunks that
1629 : // are not loaded but that do exist.
1630 1 : MOZ_ASSERT(aCaller == READER, "Unexpected!");
1631 :
1632 1 : if (mOutput) {
1633 : // the chunk doesn't exist but mOutput may create it
1634 0 : rv = QueueChunkListener(aIndex, aCallback);
1635 0 : NS_ENSURE_SUCCESS(rv, rv);
1636 : } else {
1637 1 : return NS_ERROR_NOT_AVAILABLE;
1638 : }
1639 :
1640 0 : return NS_OK;
1641 : }
1642 :
1643 : void
1644 6 : CacheFile::PreloadChunks(uint32_t aIndex)
1645 : {
1646 6 : AssertOwnsLock();
1647 :
1648 6 : uint32_t limit = aIndex + mPreloadChunkCount;
1649 :
1650 8 : for (uint32_t i = aIndex; i < limit; ++i) {
1651 8 : int64_t off = i * static_cast<int64_t>(kChunkSize);
1652 :
1653 8 : if (off >= mDataSize) {
1654 : // This chunk is beyond EOF.
1655 6 : return;
1656 : }
1657 :
1658 2 : if (mChunks.GetWeak(i) || mCachedChunks.GetWeak(i)) {
1659 : // This chunk is already in memory or is being read right now.
1660 0 : continue;
1661 : }
1662 :
1663 2 : LOG(("CacheFile::PreloadChunks() - Preloading chunk [this=%p, idx=%u]",
1664 : this, i));
1665 :
1666 4 : RefPtr<CacheFileChunk> chunk;
1667 2 : GetChunkLocked(i, PRELOADER, nullptr, getter_AddRefs(chunk));
1668 : // We've checked that we don't have this chunk, so no chunk must be
1669 : // returned.
1670 2 : MOZ_ASSERT(!chunk);
1671 : }
1672 : }
1673 :
1674 : bool
1675 5 : CacheFile::ShouldCacheChunk(uint32_t aIndex)
1676 : {
1677 5 : AssertOwnsLock();
1678 :
1679 : #ifdef CACHE_CHUNKS
1680 : // We cache all chunks.
1681 : return true;
1682 : #else
1683 :
1684 16 : if (mPreloadChunkCount != 0 && mInputs.Length() == 0 &&
1685 6 : mPreloadWithoutInputStreams && aIndex < mPreloadChunkCount) {
1686 : // We don't have any input stream yet, but it is likely that some will be
1687 : // opened soon. Keep first mPreloadChunkCount chunks in memory. The
1688 : // condition is here instead of in MustKeepCachedChunk() since these
1689 : // chunks should be preloaded and can be kept in memory as an optimization,
1690 : // but they can be released at any time until they are considered as
1691 : // preloaded chunks for any input stream.
1692 0 : return true;
1693 : }
1694 :
1695 : // Cache only chunks that we really need to keep.
1696 5 : return MustKeepCachedChunk(aIndex);
1697 : #endif
1698 : }
1699 :
1700 : bool
1701 8 : CacheFile::MustKeepCachedChunk(uint32_t aIndex)
1702 : {
1703 8 : AssertOwnsLock();
1704 :
1705 : // We must keep the chunk when this is memory only entry or we don't have
1706 : // a handle yet.
1707 8 : if (mMemoryOnly || mOpeningFile) {
1708 0 : return true;
1709 : }
1710 :
1711 8 : if (mPreloadChunkCount == 0) {
1712 : // Preloading of chunks is disabled
1713 0 : return false;
1714 : }
1715 :
1716 : // Check whether this chunk should be considered as preloaded chunk for any
1717 : // existing input stream.
1718 :
1719 : // maxPos is the position of the last byte in the given chunk
1720 8 : int64_t maxPos = static_cast<int64_t>(aIndex + 1) * kChunkSize - 1;
1721 :
1722 : // minPos is the position of the first byte in a chunk that precedes the given
1723 : // chunk by mPreloadChunkCount chunks
1724 : int64_t minPos;
1725 8 : if (mPreloadChunkCount >= aIndex) {
1726 8 : minPos = 0;
1727 : } else {
1728 0 : minPos = static_cast<int64_t>(aIndex - mPreloadChunkCount) * kChunkSize;
1729 : }
1730 :
1731 8 : for (uint32_t i = 0; i < mInputs.Length(); ++i) {
1732 4 : int64_t inputPos = mInputs[i]->GetPosition();
1733 4 : if (inputPos >= minPos && inputPos <= maxPos) {
1734 4 : return true;
1735 : }
1736 : }
1737 :
1738 4 : return false;
1739 : }
1740 :
1741 : nsresult
1742 5 : CacheFile::DeactivateChunk(CacheFileChunk *aChunk)
1743 : {
1744 : nsresult rv;
1745 :
1746 : // Avoid lock reentrancy by increasing the RefCnt
1747 10 : RefPtr<CacheFileChunk> chunk = aChunk;
1748 :
1749 : {
1750 8 : CacheFileAutoLock lock(this);
1751 :
1752 5 : LOG(("CacheFile::DeactivateChunk() [this=%p, chunk=%p, idx=%u]",
1753 : this, aChunk, aChunk->Index()));
1754 :
1755 5 : MOZ_ASSERT(mReady);
1756 5 : MOZ_ASSERT((mHandle && !mMemoryOnly && !mOpeningFile) ||
1757 : (!mHandle && mMemoryOnly && !mOpeningFile) ||
1758 : (!mHandle && !mMemoryOnly && mOpeningFile));
1759 :
1760 5 : if (aChunk->mRefCnt != 2) {
1761 0 : LOG(("CacheFile::DeactivateChunk() - Chunk is still used [this=%p, "
1762 : "chunk=%p, refcnt=%" PRIuPTR "]", this, aChunk, aChunk->mRefCnt.get()));
1763 :
1764 : // somebody got the reference before the lock was acquired
1765 0 : return NS_OK;
1766 : }
1767 :
1768 5 : if (aChunk->mDiscardedChunk) {
1769 0 : aChunk->mActiveChunk = false;
1770 0 : ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(aChunk->mFile.forget()).forget());
1771 :
1772 0 : DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
1773 0 : MOZ_ASSERT(removed);
1774 0 : return NS_OK;
1775 : }
1776 :
1777 : #ifdef DEBUG
1778 : {
1779 : // We can be here iff the chunk is in the hash table
1780 10 : RefPtr<CacheFileChunk> chunkCheck;
1781 5 : mChunks.Get(chunk->Index(), getter_AddRefs(chunkCheck));
1782 5 : MOZ_ASSERT(chunkCheck == chunk);
1783 :
1784 : // We also shouldn't have any queued listener for this chunk
1785 : ChunkListeners *listeners;
1786 5 : mChunkListeners.Get(chunk->Index(), &listeners);
1787 5 : MOZ_ASSERT(!listeners);
1788 : }
1789 : #endif
1790 :
1791 5 : if (NS_FAILED(chunk->GetStatus())) {
1792 0 : SetError(chunk->GetStatus());
1793 : }
1794 :
1795 5 : if (NS_FAILED(mStatus)) {
1796 : // Don't write any chunk to disk since this entry will be doomed
1797 0 : LOG(("CacheFile::DeactivateChunk() - Releasing chunk because of status "
1798 : "[this=%p, chunk=%p, mStatus=0x%08" PRIx32 "]",
1799 : this, chunk.get(), static_cast<uint32_t>(mStatus)));
1800 :
1801 0 : RemoveChunkInternal(chunk, false);
1802 0 : return mStatus;
1803 : }
1804 :
1805 5 : if (chunk->IsDirty() && !mMemoryOnly && !mOpeningFile) {
1806 2 : LOG(("CacheFile::DeactivateChunk() - Writing dirty chunk to the disk "
1807 : "[this=%p]", this));
1808 :
1809 2 : mDataIsDirty = true;
1810 :
1811 2 : rv = chunk->Write(mHandle, this);
1812 2 : if (NS_FAILED(rv)) {
1813 0 : LOG(("CacheFile::DeactivateChunk() - CacheFileChunk::Write() failed "
1814 : "synchronously. Removing it. [this=%p, chunk=%p, rv=0x%08" PRIx32 "]",
1815 : this, chunk.get(), static_cast<uint32_t>(rv)));
1816 :
1817 0 : RemoveChunkInternal(chunk, false);
1818 :
1819 0 : SetError(rv);
1820 0 : return rv;
1821 : }
1822 :
1823 : // Chunk will be removed in OnChunkWritten if it is still unused
1824 :
1825 : // chunk needs to be released under the lock to be able to rely on
1826 : // CacheFileChunk::mRefCnt in CacheFile::OnChunkWritten()
1827 2 : chunk = nullptr;
1828 2 : return NS_OK;
1829 : }
1830 :
1831 3 : bool keepChunk = ShouldCacheChunk(aChunk->Index());
1832 3 : LOG(("CacheFile::DeactivateChunk() - %s unused chunk [this=%p, chunk=%p]",
1833 : keepChunk ? "Caching" : "Releasing", this, chunk.get()));
1834 :
1835 3 : RemoveChunkInternal(chunk, keepChunk);
1836 :
1837 3 : if (!mMemoryOnly)
1838 3 : WriteMetadataIfNeededLocked();
1839 : }
1840 :
1841 3 : return NS_OK;
1842 : }
1843 :
1844 : void
1845 5 : CacheFile::RemoveChunkInternal(CacheFileChunk *aChunk, bool aCacheChunk)
1846 : {
1847 5 : AssertOwnsLock();
1848 :
1849 5 : aChunk->mActiveChunk = false;
1850 5 : ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(aChunk->mFile.forget()).forget());
1851 :
1852 5 : if (aCacheChunk) {
1853 4 : mCachedChunks.Put(aChunk->Index(), aChunk);
1854 : }
1855 :
1856 5 : mChunks.Remove(aChunk->Index());
1857 5 : }
1858 :
1859 : bool
1860 10 : CacheFile::OutputStreamExists(bool aAlternativeData)
1861 : {
1862 10 : AssertOwnsLock();
1863 :
1864 10 : if (!mOutput) {
1865 8 : return false;
1866 : }
1867 :
1868 2 : return mOutput->IsAlternativeData() == aAlternativeData;
1869 : }
1870 :
1871 : int64_t
1872 8 : CacheFile::BytesFromChunk(uint32_t aIndex, bool aAlternativeData)
1873 : {
1874 8 : AssertOwnsLock();
1875 :
1876 : int64_t dataSize;
1877 :
1878 8 : if (mAltDataOffset != -1) {
1879 0 : if (aAlternativeData) {
1880 0 : dataSize = mDataSize;
1881 : } else {
1882 0 : dataSize = mAltDataOffset;
1883 : }
1884 : } else {
1885 8 : MOZ_ASSERT(!aAlternativeData);
1886 8 : dataSize = mDataSize;
1887 : }
1888 :
1889 8 : if (!dataSize) {
1890 0 : return 0;
1891 : }
1892 :
1893 : // Index of the last existing chunk.
1894 8 : uint32_t lastChunk = (dataSize - 1) / kChunkSize;
1895 8 : if (aIndex > lastChunk) {
1896 0 : return 0;
1897 : }
1898 :
1899 : // We can use only preloaded chunks for the given stream to calculate
1900 : // available bytes if this is an entry stored on disk, since only those
1901 : // chunks are guaranteed not to be released.
1902 : uint32_t maxPreloadedChunk;
1903 8 : if (mMemoryOnly) {
1904 0 : maxPreloadedChunk = lastChunk;
1905 : } else {
1906 8 : maxPreloadedChunk = std::min(aIndex + mPreloadChunkCount, lastChunk);
1907 : }
1908 :
1909 : uint32_t i;
1910 16 : for (i = aIndex; i <= maxPreloadedChunk; ++i) {
1911 : CacheFileChunk * chunk;
1912 :
1913 8 : chunk = mChunks.GetWeak(i);
1914 8 : if (chunk) {
1915 8 : MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize);
1916 8 : if (chunk->IsReady()) {
1917 8 : continue;
1918 : }
1919 :
1920 : // don't search this chunk in cached
1921 0 : break;
1922 : }
1923 :
1924 0 : chunk = mCachedChunks.GetWeak(i);
1925 0 : if (chunk) {
1926 0 : MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize);
1927 0 : continue;
1928 : }
1929 :
1930 0 : break;
1931 : }
1932 :
1933 : // theoretic bytes in advance
1934 8 : int64_t advance = int64_t(i - aIndex) * kChunkSize;
1935 : // real bytes till the end of the file
1936 8 : int64_t tail = dataSize - (aIndex * kChunkSize);
1937 :
1938 8 : return std::min(advance, tail);
1939 : }
1940 :
1941 : nsresult
1942 0 : CacheFile::Truncate(int64_t aOffset)
1943 : {
1944 0 : AssertOwnsLock();
1945 :
1946 0 : LOG(("CacheFile::Truncate() [this=%p, offset=%" PRId64 "]", this, aOffset));
1947 :
1948 : nsresult rv;
1949 :
1950 : // If we ever need to truncate on non alt-data boundary, we need to handle
1951 : // existing input streams.
1952 0 : MOZ_ASSERT(aOffset == mAltDataOffset, "Truncating normal data not implemented");
1953 0 : MOZ_ASSERT(mReady);
1954 0 : MOZ_ASSERT(!mOutput);
1955 :
1956 0 : uint32_t lastChunk = 0;
1957 0 : if (mDataSize > 0) {
1958 0 : lastChunk = (mDataSize - 1) / kChunkSize;
1959 : }
1960 :
1961 0 : uint32_t newLastChunk = 0;
1962 0 : if (aOffset > 0) {
1963 0 : newLastChunk = (aOffset - 1) / kChunkSize;
1964 : }
1965 :
1966 0 : uint32_t bytesInNewLastChunk = aOffset - newLastChunk * kChunkSize;
1967 :
1968 0 : LOG(("CacheFileTruncate() - lastChunk=%u, newLastChunk=%u, "
1969 : "bytesInNewLastChunk=%u", lastChunk, newLastChunk, bytesInNewLastChunk));
1970 :
1971 : // Remove all truncated chunks from mCachedChunks
1972 0 : for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
1973 0 : uint32_t idx = iter.Key();
1974 :
1975 0 : if (idx > newLastChunk) {
1976 : // This is unused chunk, simply remove it.
1977 0 : LOG(("CacheFile::Truncate() - removing cached chunk [idx=%u]", idx));
1978 0 : iter.Remove();
1979 : }
1980 : }
1981 :
1982 : // We need to make sure no input stream holds a reference to a chunk we're
1983 : // going to discard. In theory, if alt-data begins at chunk boundary, input
1984 : // stream for normal data can get the chunk containing only alt-data via
1985 : // EnsureCorrectChunk() call. The input stream won't read the data from such
1986 : // chunk, but it will keep the reference until the stream is closed and we
1987 : // cannot simply discard this chunk.
1988 0 : int64_t maxInputChunk = -1;
1989 0 : for (uint32_t i = 0; i < mInputs.Length(); ++i) {
1990 0 : int64_t inputChunk = mInputs[i]->GetChunkIdx();
1991 :
1992 0 : if (maxInputChunk < inputChunk) {
1993 0 : maxInputChunk = inputChunk;
1994 : }
1995 :
1996 0 : MOZ_RELEASE_ASSERT(mInputs[i]->GetPosition() <= aOffset);
1997 : }
1998 :
1999 0 : MOZ_RELEASE_ASSERT(maxInputChunk <= newLastChunk + 1);
2000 0 : if (maxInputChunk == newLastChunk + 1) {
2001 : // Truncating must be done at chunk boundary
2002 0 : MOZ_RELEASE_ASSERT(bytesInNewLastChunk == kChunkSize);
2003 0 : newLastChunk++;
2004 0 : bytesInNewLastChunk = 0;
2005 0 : LOG(("CacheFile::Truncate() - chunk %p is still in use, using "
2006 : "newLastChunk=%u and bytesInNewLastChunk=%u",
2007 : mChunks.GetWeak(newLastChunk), newLastChunk, bytesInNewLastChunk));
2008 : }
2009 :
2010 : // Discard all truncated chunks in mChunks
2011 0 : for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) {
2012 0 : uint32_t idx = iter.Key();
2013 :
2014 0 : if (idx > newLastChunk) {
2015 0 : RefPtr<CacheFileChunk>& chunk = iter.Data();
2016 0 : LOG(("CacheFile::Truncate() - discarding chunk [idx=%u, chunk=%p]",
2017 : idx, chunk.get()));
2018 :
2019 0 : if (HaveChunkListeners(idx)) {
2020 0 : NotifyChunkListeners(idx, NS_ERROR_NOT_AVAILABLE, chunk);
2021 : }
2022 :
2023 0 : chunk->mDiscardedChunk = true;
2024 0 : mDiscardedChunks.AppendElement(chunk);
2025 0 : iter.Remove();
2026 : }
2027 : }
2028 :
2029 : // Remove hashes of all removed chunks from the metadata
2030 0 : for (uint32_t i = lastChunk; i > newLastChunk; --i) {
2031 0 : mMetadata->RemoveHash(i);
2032 : }
2033 :
2034 : // Truncate new last chunk
2035 0 : if (bytesInNewLastChunk == kChunkSize) {
2036 0 : LOG(("CacheFile::Truncate() - not truncating last chunk."));
2037 : } else {
2038 0 : RefPtr<CacheFileChunk> chunk;
2039 0 : if (mChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
2040 0 : LOG(("CacheFile::Truncate() - New last chunk %p got from mChunks.",
2041 : chunk.get()));
2042 0 : } else if (mCachedChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
2043 0 : LOG(("CacheFile::Truncate() - New last chunk %p got from mCachedChunks.",
2044 : chunk.get()));
2045 : } else {
2046 : // New last chunk isn't loaded but we need to update the hash.
2047 0 : MOZ_ASSERT(!mMemoryOnly);
2048 0 : MOZ_ASSERT(mHandle);
2049 :
2050 0 : rv = GetChunkLocked(newLastChunk, PRELOADER, nullptr,
2051 0 : getter_AddRefs(chunk));
2052 0 : if (NS_FAILED(rv)) {
2053 0 : return rv;
2054 : }
2055 : // We've checked that we don't have this chunk, so no chunk must be
2056 : // returned.
2057 0 : MOZ_ASSERT(!chunk);
2058 :
2059 0 : if (!mChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
2060 0 : return NS_ERROR_UNEXPECTED;
2061 : }
2062 :
2063 0 : LOG(("CacheFile::Truncate() - New last chunk %p got from preloader.",
2064 : chunk.get()));
2065 : }
2066 :
2067 0 : rv = chunk->Truncate(bytesInNewLastChunk);
2068 0 : if (NS_FAILED(rv)) {
2069 0 : return rv;
2070 : }
2071 :
2072 : // If the chunk is ready set the new hash now. If it's still being loaded
2073 : // CacheChunk::Truncate() made the chunk dirty and the hash will be updated
2074 : // in OnChunkWritten().
2075 0 : if (chunk->IsReady()) {
2076 0 : mMetadata->SetHash(newLastChunk, chunk->Hash());
2077 : }
2078 : }
2079 :
2080 0 : if (mHandle) {
2081 0 : rv = CacheFileIOManager::TruncateSeekSetEOF(mHandle, aOffset, aOffset, nullptr);
2082 0 : if (NS_FAILED(rv)) {
2083 0 : return rv;
2084 : }
2085 : }
2086 :
2087 0 : mDataSize = aOffset;
2088 :
2089 0 : return NS_OK;
2090 : }
2091 :
2092 : static uint32_t
2093 6 : StatusToTelemetryEnum(nsresult aStatus)
2094 : {
2095 6 : if (NS_SUCCEEDED(aStatus)) {
2096 2 : return 0;
2097 : }
2098 :
2099 4 : switch (aStatus) {
2100 : case NS_BASE_STREAM_CLOSED:
2101 3 : return 0; // Log this as a success
2102 : case NS_ERROR_OUT_OF_MEMORY:
2103 0 : return 2;
2104 : case NS_ERROR_FILE_DISK_FULL:
2105 0 : return 3;
2106 : case NS_ERROR_FILE_CORRUPTED:
2107 0 : return 4;
2108 : case NS_ERROR_FILE_NOT_FOUND:
2109 0 : return 5;
2110 : case NS_BINDING_ABORTED:
2111 1 : return 6;
2112 : default:
2113 0 : return 1; // other error
2114 : }
2115 :
2116 : NS_NOTREACHED("We should never get here");
2117 : }
2118 :
2119 : nsresult
2120 4 : CacheFile::RemoveInput(CacheFileInputStream *aInput, nsresult aStatus)
2121 : {
2122 8 : CacheFileAutoLock lock(this);
2123 :
2124 4 : LOG(("CacheFile::RemoveInput() [this=%p, input=%p, status=0x%08" PRIx32 "]", this,
2125 : aInput, static_cast<uint32_t>(aStatus)));
2126 :
2127 8 : DebugOnly<bool> found;
2128 4 : found = mInputs.RemoveElement(aInput);
2129 4 : MOZ_ASSERT(found);
2130 :
2131 4 : ReleaseOutsideLock(already_AddRefed<nsIInputStream>(static_cast<nsIInputStream*>(aInput)));
2132 :
2133 4 : if (!mMemoryOnly)
2134 4 : WriteMetadataIfNeededLocked();
2135 :
2136 : // If the input didn't read all data, there might be left some preloaded
2137 : // chunks that won't be used anymore.
2138 4 : CleanUpCachedChunks();
2139 :
2140 4 : Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_INPUT_STREAM_STATUS,
2141 4 : StatusToTelemetryEnum(aStatus));
2142 :
2143 8 : return NS_OK;
2144 : }
2145 :
2146 : nsresult
2147 2 : CacheFile::RemoveOutput(CacheFileOutputStream *aOutput, nsresult aStatus)
2148 : {
2149 2 : AssertOwnsLock();
2150 :
2151 2 : LOG(("CacheFile::RemoveOutput() [this=%p, output=%p, status=0x%08" PRIx32 "]", this,
2152 : aOutput, static_cast<uint32_t>(aStatus)));
2153 :
2154 2 : if (mOutput != aOutput) {
2155 0 : LOG(("CacheFile::RemoveOutput() - This output was already removed, ignoring"
2156 : " call [this=%p]", this));
2157 0 : return NS_OK;
2158 : }
2159 :
2160 2 : mOutput = nullptr;
2161 :
2162 : // Cancel all queued chunk and update listeners that cannot be satisfied
2163 2 : NotifyListenersAboutOutputRemoval();
2164 :
2165 2 : if (!mMemoryOnly)
2166 2 : WriteMetadataIfNeededLocked();
2167 :
2168 : // Make sure the CacheFile status is set to a failure when the output stream
2169 : // is closed with a fatal error. This way we propagate correctly and w/o any
2170 : // windows the failure state of this entry to end consumers.
2171 2 : if (NS_SUCCEEDED(mStatus) && NS_FAILED(aStatus) && aStatus != NS_BASE_STREAM_CLOSED) {
2172 0 : mStatus = aStatus;
2173 : }
2174 :
2175 : // Notify close listener as the last action
2176 2 : aOutput->NotifyCloseListener();
2177 :
2178 2 : Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS,
2179 2 : StatusToTelemetryEnum(aStatus));
2180 :
2181 2 : return NS_OK;
2182 : }
2183 :
2184 : nsresult
2185 2 : CacheFile::NotifyChunkListener(CacheFileChunkListener *aCallback,
2186 : nsIEventTarget *aTarget,
2187 : nsresult aResult,
2188 : uint32_t aChunkIdx,
2189 : CacheFileChunk *aChunk)
2190 : {
2191 2 : LOG(("CacheFile::NotifyChunkListener() [this=%p, listener=%p, target=%p, "
2192 : "rv=0x%08" PRIx32 ", idx=%u, chunk=%p]", this, aCallback, aTarget,
2193 : static_cast<uint32_t>(aResult), aChunkIdx, aChunk));
2194 :
2195 : nsresult rv;
2196 4 : RefPtr<NotifyChunkListenerEvent> ev;
2197 2 : ev = new NotifyChunkListenerEvent(aCallback, aResult, aChunkIdx, aChunk);
2198 2 : if (aTarget)
2199 2 : rv = aTarget->Dispatch(ev, NS_DISPATCH_NORMAL);
2200 : else
2201 0 : rv = NS_DispatchToCurrentThread(ev);
2202 2 : NS_ENSURE_SUCCESS(rv, rv);
2203 :
2204 2 : return NS_OK;
2205 : }
2206 :
2207 : nsresult
2208 2 : CacheFile::QueueChunkListener(uint32_t aIndex,
2209 : CacheFileChunkListener *aCallback)
2210 : {
2211 2 : LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%u, listener=%p]",
2212 : this, aIndex, aCallback));
2213 :
2214 2 : AssertOwnsLock();
2215 :
2216 2 : MOZ_ASSERT(aCallback);
2217 :
2218 2 : ChunkListenerItem *item = new ChunkListenerItem();
2219 2 : item->mTarget = CacheFileIOManager::IOTarget();
2220 2 : if (!item->mTarget) {
2221 0 : LOG(("CacheFile::QueueChunkListener() - Cannot get Cache I/O thread! Using "
2222 : "main thread for callback."));
2223 0 : item->mTarget = GetMainThreadEventTarget();
2224 : }
2225 2 : item->mCallback = aCallback;
2226 :
2227 : ChunkListeners *listeners;
2228 2 : if (!mChunkListeners.Get(aIndex, &listeners)) {
2229 2 : listeners = new ChunkListeners();
2230 2 : mChunkListeners.Put(aIndex, listeners);
2231 : }
2232 :
2233 2 : listeners->mItems.AppendElement(item);
2234 2 : return NS_OK;
2235 : }
2236 :
2237 : nsresult
2238 2 : CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult,
2239 : CacheFileChunk *aChunk)
2240 : {
2241 2 : LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%u, rv=0x%08" PRIx32 ", "
2242 : "chunk=%p]", this, aIndex, static_cast<uint32_t>(aResult), aChunk));
2243 :
2244 2 : AssertOwnsLock();
2245 :
2246 : nsresult rv, rv2;
2247 :
2248 : ChunkListeners *listeners;
2249 2 : mChunkListeners.Get(aIndex, &listeners);
2250 2 : MOZ_ASSERT(listeners);
2251 :
2252 2 : rv = NS_OK;
2253 4 : for (uint32_t i = 0 ; i < listeners->mItems.Length() ; i++) {
2254 2 : ChunkListenerItem *item = listeners->mItems[i];
2255 2 : rv2 = NotifyChunkListener(item->mCallback, item->mTarget, aResult, aIndex,
2256 2 : aChunk);
2257 2 : if (NS_FAILED(rv2) && NS_SUCCEEDED(rv))
2258 0 : rv = rv2;
2259 2 : delete item;
2260 : }
2261 :
2262 2 : mChunkListeners.Remove(aIndex);
2263 :
2264 2 : return rv;
2265 : }
2266 :
2267 : bool
2268 6 : CacheFile::HaveChunkListeners(uint32_t aIndex)
2269 : {
2270 : ChunkListeners *listeners;
2271 6 : mChunkListeners.Get(aIndex, &listeners);
2272 6 : return !!listeners;
2273 : }
2274 :
2275 : void
2276 2 : CacheFile::NotifyListenersAboutOutputRemoval()
2277 : {
2278 2 : LOG(("CacheFile::NotifyListenersAboutOutputRemoval() [this=%p]", this));
2279 :
2280 2 : AssertOwnsLock();
2281 :
2282 : // First fail all chunk listeners that wait for non-existent chunk
2283 2 : for (auto iter = mChunkListeners.Iter(); !iter.Done(); iter.Next()) {
2284 0 : uint32_t idx = iter.Key();
2285 0 : nsAutoPtr<ChunkListeners>& listeners = iter.Data();
2286 :
2287 0 : LOG(("CacheFile::NotifyListenersAboutOutputRemoval() - fail "
2288 : "[this=%p, idx=%u]", this, idx));
2289 :
2290 0 : RefPtr<CacheFileChunk> chunk;
2291 0 : mChunks.Get(idx, getter_AddRefs(chunk));
2292 0 : if (chunk) {
2293 0 : MOZ_ASSERT(!chunk->IsReady());
2294 0 : continue;
2295 : }
2296 :
2297 0 : for (uint32_t i = 0 ; i < listeners->mItems.Length() ; i++) {
2298 0 : ChunkListenerItem *item = listeners->mItems[i];
2299 0 : NotifyChunkListener(item->mCallback, item->mTarget,
2300 0 : NS_ERROR_NOT_AVAILABLE, idx, nullptr);
2301 0 : delete item;
2302 : }
2303 :
2304 0 : iter.Remove();
2305 : }
2306 :
2307 : // Fail all update listeners
2308 4 : for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) {
2309 2 : const RefPtr<CacheFileChunk>& chunk = iter.Data();
2310 2 : LOG(("CacheFile::NotifyListenersAboutOutputRemoval() - fail2 "
2311 : "[this=%p, idx=%u]", this, iter.Key()));
2312 :
2313 2 : if (chunk->IsReady()) {
2314 2 : chunk->NotifyUpdateListeners();
2315 : }
2316 : }
2317 2 : }
2318 :
2319 : bool
2320 8 : CacheFile::DataSize(int64_t* aSize)
2321 : {
2322 16 : CacheFileAutoLock lock(this);
2323 :
2324 8 : if (OutputStreamExists(false)) {
2325 2 : return false;
2326 : }
2327 :
2328 6 : if (mAltDataOffset == -1) {
2329 6 : *aSize = mDataSize;
2330 : } else {
2331 0 : *aSize = mAltDataOffset;
2332 : }
2333 :
2334 6 : return true;
2335 : }
2336 :
2337 : nsresult
2338 0 : CacheFile::GetAltDataSize(int64_t *aSize)
2339 : {
2340 0 : CacheFileAutoLock lock(this);
2341 0 : if (mOutput) {
2342 0 : return NS_ERROR_IN_PROGRESS;
2343 : }
2344 :
2345 0 : if (mAltDataOffset == -1) {
2346 0 : return NS_ERROR_NOT_AVAILABLE;
2347 : }
2348 :
2349 0 : *aSize = mDataSize - mAltDataOffset;
2350 0 : return NS_OK;
2351 : }
2352 :
2353 : bool
2354 0 : CacheFile::IsDoomed()
2355 : {
2356 0 : CacheFileAutoLock lock(this);
2357 :
2358 0 : if (!mHandle)
2359 0 : return false;
2360 :
2361 0 : return mHandle->IsDoomed();
2362 : }
2363 :
2364 : bool
2365 0 : CacheFile::IsWriteInProgress()
2366 : {
2367 : // Returns true when there is a potentially unfinished write operation.
2368 : // Not using lock for performance reasons. mMetadata is never released
2369 : // during life time of CacheFile.
2370 :
2371 0 : bool result = false;
2372 :
2373 0 : if (!mMemoryOnly) {
2374 0 : result = mDataIsDirty ||
2375 0 : (mMetadata && mMetadata->IsDirty()) ||
2376 0 : mWritingMetadata;
2377 : }
2378 :
2379 0 : result = result ||
2380 0 : mOpeningFile ||
2381 0 : mOutput ||
2382 0 : mChunks.Count();
2383 :
2384 0 : return result;
2385 : }
2386 :
2387 : bool
2388 15 : CacheFile::IsDirty()
2389 : {
2390 15 : return mDataIsDirty || mMetadata->IsDirty();
2391 : }
2392 :
2393 : void
2394 0 : CacheFile::WriteMetadataIfNeeded()
2395 : {
2396 0 : LOG(("CacheFile::WriteMetadataIfNeeded() [this=%p]", this));
2397 :
2398 0 : CacheFileAutoLock lock(this);
2399 :
2400 0 : if (!mMemoryOnly)
2401 0 : WriteMetadataIfNeededLocked();
2402 0 : }
2403 :
2404 : void
2405 11 : CacheFile::WriteMetadataIfNeededLocked(bool aFireAndForget)
2406 : {
2407 : // When aFireAndForget is set to true, we are called from dtor.
2408 : // |this| must not be referenced after this method returns!
2409 :
2410 11 : LOG(("CacheFile::WriteMetadataIfNeededLocked() [this=%p]", this));
2411 :
2412 : nsresult rv;
2413 :
2414 11 : AssertOwnsLock();
2415 11 : MOZ_ASSERT(!mMemoryOnly);
2416 :
2417 11 : if (!mMetadata) {
2418 0 : MOZ_CRASH("Must have metadata here");
2419 : return;
2420 : }
2421 :
2422 11 : if (NS_FAILED(mStatus))
2423 0 : return;
2424 :
2425 37 : if (!IsDirty() || mOutput || mInputs.Length() || mChunks.Count() ||
2426 19 : mWritingMetadata || mOpeningFile || mKill)
2427 7 : return;
2428 :
2429 4 : if (!aFireAndForget) {
2430 : // if aFireAndForget is set, we are called from dtor. Write
2431 : // scheduler hard-refers CacheFile otherwise, so we cannot be here.
2432 4 : CacheFileIOManager::UnscheduleMetadataWrite(this);
2433 : }
2434 :
2435 4 : LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing metadata [this=%p]",
2436 : this));
2437 :
2438 4 : rv = mMetadata->WriteMetadata(mDataSize, aFireAndForget ? nullptr : this);
2439 4 : if (NS_SUCCEEDED(rv)) {
2440 4 : mWritingMetadata = true;
2441 4 : mDataIsDirty = false;
2442 : } else {
2443 0 : LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing synchronously "
2444 : "failed [this=%p]", this));
2445 : // TODO: close streams with error
2446 0 : SetError(rv);
2447 : }
2448 : }
2449 :
2450 : void
2451 32 : CacheFile::PostWriteTimer()
2452 : {
2453 32 : if (mMemoryOnly)
2454 0 : return;
2455 :
2456 32 : LOG(("CacheFile::PostWriteTimer() [this=%p]", this));
2457 :
2458 32 : CacheFileIOManager::ScheduleMetadataWrite(this);
2459 : }
2460 :
2461 : void
2462 4 : CacheFile::CleanUpCachedChunks()
2463 : {
2464 7 : for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
2465 3 : uint32_t idx = iter.Key();
2466 3 : const RefPtr<CacheFileChunk>& chunk = iter.Data();
2467 :
2468 3 : LOG(("CacheFile::CleanUpCachedChunks() [this=%p, idx=%u, chunk=%p]", this,
2469 : idx, chunk.get()));
2470 :
2471 3 : if (MustKeepCachedChunk(idx)) {
2472 0 : LOG(("CacheFile::CleanUpCachedChunks() - Keeping chunk"));
2473 0 : continue;
2474 : }
2475 :
2476 3 : LOG(("CacheFile::CleanUpCachedChunks() - Removing chunk"));
2477 3 : iter.Remove();
2478 : }
2479 4 : }
2480 :
2481 : nsresult
2482 0 : CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx)
2483 : {
2484 0 : AssertOwnsLock();
2485 :
2486 : // This method is used to pad last incomplete chunk with zeroes or create
2487 : // a new chunk full of zeroes
2488 0 : MOZ_ASSERT(mDataSize / kChunkSize == aChunkIdx);
2489 :
2490 : nsresult rv;
2491 0 : RefPtr<CacheFileChunk> chunk;
2492 0 : rv = GetChunkLocked(aChunkIdx, WRITER, nullptr, getter_AddRefs(chunk));
2493 0 : NS_ENSURE_SUCCESS(rv, rv);
2494 :
2495 0 : LOG(("CacheFile::PadChunkWithZeroes() - Zeroing hole in chunk %d, range %d-%d"
2496 : " [this=%p]", aChunkIdx, chunk->DataSize(), kChunkSize - 1, this));
2497 :
2498 0 : CacheFileChunkWriteHandle hnd = chunk->GetWriteHandle(kChunkSize);
2499 0 : if (!hnd.Buf()) {
2500 0 : ReleaseOutsideLock(chunk.forget());
2501 0 : SetError(NS_ERROR_OUT_OF_MEMORY);
2502 0 : return NS_ERROR_OUT_OF_MEMORY;
2503 : }
2504 :
2505 0 : uint32_t offset = hnd.DataSize();
2506 0 : memset(hnd.Buf() + offset, 0, kChunkSize - offset);
2507 0 : hnd.UpdateDataSize(offset, kChunkSize - offset);
2508 :
2509 0 : ReleaseOutsideLock(chunk.forget());
2510 :
2511 0 : return NS_OK;
2512 : }
2513 :
2514 : void
2515 0 : CacheFile::SetError(nsresult aStatus)
2516 : {
2517 0 : AssertOwnsLock();
2518 :
2519 0 : if (NS_SUCCEEDED(mStatus)) {
2520 0 : mStatus = aStatus;
2521 0 : if (mHandle) {
2522 0 : CacheFileIOManager::DoomFile(mHandle, nullptr);
2523 : }
2524 : }
2525 0 : }
2526 :
2527 : nsresult
2528 5 : CacheFile::InitIndexEntry()
2529 : {
2530 5 : MOZ_ASSERT(mHandle);
2531 :
2532 5 : if (mHandle->IsDoomed())
2533 0 : return NS_OK;
2534 :
2535 : nsresult rv;
2536 :
2537 10 : rv = CacheFileIOManager::InitIndexEntry(
2538 : mHandle, GetOriginAttrsHash(mMetadata->OriginAttributes()),
2539 15 : mMetadata->IsAnonymous(), mPinned);
2540 5 : NS_ENSURE_SUCCESS(rv, rv);
2541 :
2542 : uint32_t expTime;
2543 5 : mMetadata->GetExpirationTime(&expTime);
2544 :
2545 : uint32_t frecency;
2546 5 : mMetadata->GetFrecency(&frecency);
2547 :
2548 5 : bool hasAltData = mMetadata->GetElement(CacheFileUtils::kAltDataKey) ? true : false;
2549 :
2550 10 : static auto toUint16 = [](const char* s) -> uint16_t {
2551 10 : if (s) {
2552 : nsresult rv;
2553 4 : uint64_t n64 = nsCString(s).ToInteger64(&rv);
2554 4 : MOZ_ASSERT(NS_SUCCEEDED(rv));
2555 4 : return n64 <= kIndexTimeOutOfBound ? n64 : kIndexTimeOutOfBound ;
2556 : }
2557 6 : return kIndexTimeNotAvailable;
2558 : };
2559 :
2560 5 : const char *onStartTimeStr = mMetadata->GetElement("net-response-time-onstart");
2561 5 : uint16_t onStartTime = toUint16(onStartTimeStr);
2562 :
2563 5 : const char *onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
2564 5 : uint16_t onStopTime = toUint16(onStopTimeStr);
2565 :
2566 5 : rv = CacheFileIOManager::UpdateIndexEntry(mHandle, &frecency, &expTime, &hasAltData, &onStartTime, &onStopTime);
2567 5 : NS_ENSURE_SUCCESS(rv, rv);
2568 :
2569 5 : return NS_OK;
2570 : }
2571 :
2572 : size_t
2573 0 : CacheFile::SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
2574 : {
2575 0 : CacheFileAutoLock lock(const_cast<CacheFile*>(this));
2576 :
2577 0 : size_t n = 0;
2578 0 : n += mKey.SizeOfExcludingThisIfUnshared(mallocSizeOf);
2579 0 : n += mChunks.ShallowSizeOfExcludingThis(mallocSizeOf);
2580 0 : for (auto iter = mChunks.ConstIter(); !iter.Done(); iter.Next()) {
2581 0 : n += iter.Data()->SizeOfIncludingThis(mallocSizeOf);
2582 : }
2583 0 : n += mCachedChunks.ShallowSizeOfExcludingThis(mallocSizeOf);
2584 0 : for (auto iter = mCachedChunks.ConstIter(); !iter.Done(); iter.Next()) {
2585 0 : n += iter.Data()->SizeOfIncludingThis(mallocSizeOf);
2586 : }
2587 0 : if (mMetadata) {
2588 0 : n += mMetadata->SizeOfIncludingThis(mallocSizeOf);
2589 : }
2590 :
2591 : // Input streams are not elsewhere reported.
2592 0 : n += mInputs.ShallowSizeOfExcludingThis(mallocSizeOf);
2593 0 : for (uint32_t i = 0; i < mInputs.Length(); ++i) {
2594 0 : n += mInputs[i]->SizeOfIncludingThis(mallocSizeOf);
2595 : }
2596 :
2597 : // Output streams are not elsewhere reported.
2598 0 : if (mOutput) {
2599 0 : n += mOutput->SizeOfIncludingThis(mallocSizeOf);
2600 : }
2601 :
2602 : // The listeners are usually classes reported just above.
2603 0 : n += mChunkListeners.ShallowSizeOfExcludingThis(mallocSizeOf);
2604 0 : n += mObjsToRelease.ShallowSizeOfExcludingThis(mallocSizeOf);
2605 :
2606 : // mHandle reported directly from CacheFileIOManager.
2607 :
2608 0 : return n;
2609 : }
2610 :
2611 : size_t
2612 0 : CacheFile::SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
2613 : {
2614 0 : return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
2615 : }
2616 :
2617 : } // namespace net
2618 : } // namespace mozilla
|