Line data Source code
1 : /* This Source Code Form is subject to the terms of the Mozilla Public
2 : * License, v. 2.0. If a copy of the MPL was not distributed with this
3 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4 :
5 : #include "CacheIOThread.h"
6 : #include "CacheFileIOManager.h"
7 :
8 : #include "nsIRunnable.h"
9 : #include "nsISupportsImpl.h"
10 : #include "nsPrintfCString.h"
11 : #include "nsThreadUtils.h"
12 : #include "mozilla/IOInterposer.h"
13 : #include "GeckoProfiler.h"
14 :
15 : #ifdef XP_WIN
16 : #include <windows.h>
17 : #endif
18 :
19 : #ifdef MOZ_TASK_TRACER
20 : #include "GeckoTaskTracer.h"
21 : #include "TracedTaskCommon.h"
22 : #endif
23 :
24 : namespace mozilla {
25 : namespace net {
26 :
27 : namespace { // anon
28 :
29 : class CacheIOTelemetry
30 : {
31 : public:
32 : typedef CacheIOThread::EventQueue::size_type size_type;
33 : static size_type mMinLengthToReport[CacheIOThread::LAST_LEVEL];
34 : static void Report(uint32_t aLevel, size_type aLength);
35 : };
36 :
37 : static CacheIOTelemetry::size_type const kGranularity = 30;
38 :
39 : CacheIOTelemetry::size_type
40 : CacheIOTelemetry::mMinLengthToReport[CacheIOThread::LAST_LEVEL] = {
41 : kGranularity, kGranularity, kGranularity, kGranularity,
42 : kGranularity, kGranularity, kGranularity, kGranularity
43 : };
44 :
45 : // static
46 50 : void CacheIOTelemetry::Report(uint32_t aLevel, CacheIOTelemetry::size_type aLength)
47 : {
48 50 : if (mMinLengthToReport[aLevel] > aLength) {
49 49 : return;
50 : }
51 :
52 : static Telemetry::HistogramID telemetryID[] = {
53 : Telemetry::HTTP_CACHE_IO_QUEUE_2_OPEN_PRIORITY,
54 : Telemetry::HTTP_CACHE_IO_QUEUE_2_READ_PRIORITY,
55 : Telemetry::HTTP_CACHE_IO_QUEUE_2_MANAGEMENT,
56 : Telemetry::HTTP_CACHE_IO_QUEUE_2_OPEN,
57 : Telemetry::HTTP_CACHE_IO_QUEUE_2_READ,
58 : Telemetry::HTTP_CACHE_IO_QUEUE_2_WRITE_PRIORITY,
59 : Telemetry::HTTP_CACHE_IO_QUEUE_2_WRITE,
60 : Telemetry::HTTP_CACHE_IO_QUEUE_2_INDEX,
61 : Telemetry::HTTP_CACHE_IO_QUEUE_2_EVICT
62 : };
63 :
64 : // Each bucket is a multiply of kGranularity (30, 60, 90..., 300+)
65 1 : aLength = (aLength / kGranularity);
66 : // Next time report only when over the current length + kGranularity
67 1 : mMinLengthToReport[aLevel] = (aLength + 1) * kGranularity;
68 :
69 : // 10 is number of buckets we have in each probe
70 1 : aLength = std::min<size_type>(aLength, 10);
71 :
72 1 : Telemetry::Accumulate(telemetryID[aLevel], aLength - 1); // counted from 0
73 : }
74 :
75 : } // anon
76 :
77 : namespace detail {
78 :
79 : /**
80 : * Helper class encapsulating platform-specific code to cancel
81 : * any pending IO operation taking too long. Solely used during
82 : * shutdown to prevent any IO shutdown hangs.
83 : * Mainly designed for using Win32 CancelSynchronousIo function.
84 : */
85 : class BlockingIOWatcher
86 : {
87 : #ifdef XP_WIN
88 : typedef BOOL(WINAPI* TCancelSynchronousIo)(HANDLE hThread);
89 : TCancelSynchronousIo mCancelSynchronousIo;
90 : // The native handle to the thread
91 : HANDLE mThread;
92 : // Event signaling back to the main thread, see NotifyOperationDone.
93 : HANDLE mEvent;
94 : #endif
95 :
96 : public:
97 : // Created and destroyed on the main thread only
98 : BlockingIOWatcher();
99 : ~BlockingIOWatcher();
100 :
101 : // Called on the IO thread to grab the platform specific
102 : // reference to it.
103 : void InitThread();
104 : // If there is a blocking operation being handled on the IO
105 : // thread, this is called on the main thread during shutdown.
106 : // Waits for notification from the IO thread for up to two seconds.
107 : // If that times out, it attempts to cancel the IO operation.
108 : void WatchAndCancel(Monitor& aMonitor);
109 : // Called by the IO thread after each operation has been
110 : // finished (after each Run() call). This wakes the main
111 : // thread up and makes WatchAndCancel() early exit and become
112 : // a no-op.
113 : void NotifyOperationDone();
114 : };
115 :
116 : #ifdef XP_WIN
117 :
118 : BlockingIOWatcher::BlockingIOWatcher()
119 : : mCancelSynchronousIo(NULL)
120 : , mThread(NULL)
121 : , mEvent(NULL)
122 : {
123 : HMODULE kernel32_dll = GetModuleHandle("kernel32.dll");
124 : if (!kernel32_dll) {
125 : return;
126 : }
127 :
128 : FARPROC ptr = GetProcAddress(kernel32_dll, "CancelSynchronousIo");
129 : if (!ptr) {
130 : return;
131 : }
132 :
133 : mCancelSynchronousIo = reinterpret_cast<TCancelSynchronousIo>(ptr);
134 :
135 : mEvent = ::CreateEvent(NULL, TRUE, FALSE, NULL);
136 : }
137 :
138 : BlockingIOWatcher::~BlockingIOWatcher()
139 : {
140 : if (mEvent) {
141 : CloseHandle(mEvent);
142 : }
143 : if (mThread) {
144 : CloseHandle(mThread);
145 : }
146 : }
147 :
148 : void BlockingIOWatcher::InitThread()
149 : {
150 : // GetCurrentThread() only returns a pseudo handle, hence DuplicateHandle
151 : BOOL result = ::DuplicateHandle(
152 : GetCurrentProcess(),
153 : GetCurrentThread(),
154 : GetCurrentProcess(),
155 : &mThread,
156 : 0,
157 : FALSE,
158 : DUPLICATE_SAME_ACCESS);
159 : }
160 :
161 : void BlockingIOWatcher::WatchAndCancel(Monitor& aMonitor)
162 : {
163 : if (!mEvent) {
164 : return;
165 : }
166 :
167 : // Reset before we enter the monitor to raise the chance we catch
168 : // the currently pending IO op completion.
169 : ::ResetEvent(mEvent);
170 :
171 : HANDLE thread;
172 : {
173 : MonitorAutoLock lock(aMonitor);
174 : thread = mThread;
175 :
176 : if (!thread) {
177 : return;
178 : }
179 : }
180 :
181 : LOG(("Blocking IO operation pending on IO thread, waiting..."));
182 :
183 : // It seems wise to use the I/O lag time as a maximum time to wait
184 : // for an operation to finish. When that times out and cancelation
185 : // succeeds, there will be no other IO operation permitted. By default
186 : // this is two seconds.
187 : uint32_t maxLag = std::min<uint32_t>(5, CacheObserver::MaxShutdownIOLag()) * 1000;
188 :
189 : DWORD result = ::WaitForSingleObject(mEvent, maxLag);
190 : if (result == WAIT_TIMEOUT) {
191 : LOG(("CacheIOThread: Attempting to cancel a long blocking IO operation"));
192 : BOOL result = mCancelSynchronousIo(thread);
193 : if (result) {
194 : LOG((" cancelation signal succeeded"));
195 : } else {
196 : DWORD error = GetLastError();
197 : LOG((" cancelation signal failed with GetLastError=%u", error));
198 : }
199 : }
200 : }
201 :
202 : void BlockingIOWatcher::NotifyOperationDone()
203 : {
204 : if (mEvent) {
205 : ::SetEvent(mEvent);
206 : }
207 : }
208 :
209 : #else // WIN
210 :
211 : // Stub code only (we don't implement IO cancelation for this platform)
212 :
213 1 : BlockingIOWatcher::BlockingIOWatcher() { }
214 0 : BlockingIOWatcher::~BlockingIOWatcher() { }
215 1 : void BlockingIOWatcher::InitThread() { }
216 0 : void BlockingIOWatcher::WatchAndCancel(Monitor&) { }
217 137 : void BlockingIOWatcher::NotifyOperationDone() { }
218 :
219 : #endif
220 :
221 : } // detail
222 :
223 : CacheIOThread* CacheIOThread::sSelf = nullptr;
224 :
225 281 : NS_IMPL_ISUPPORTS(CacheIOThread, nsIThreadObserver)
226 :
227 1 : CacheIOThread::CacheIOThread()
228 : : mMonitor("CacheIOThread")
229 : , mThread(nullptr)
230 : , mXPCOMThread(nullptr)
231 : , mLowestLevelWaiting(LAST_LEVEL)
232 : , mCurrentlyExecutingLevel(0)
233 : , mHasXPCOMEvents(false)
234 : , mRerunCurrentEvent(false)
235 : , mShutdown(false)
236 : , mIOCancelableEvents(0)
237 : , mEventCounter(0)
238 : #ifdef DEBUG
239 1 : , mInsideLoop(true)
240 : #endif
241 : {
242 10 : for (uint32_t i = 0; i < LAST_LEVEL; ++i) {
243 9 : mQueueLength[i] = 0;
244 : }
245 :
246 1 : sSelf = this;
247 1 : }
248 :
249 0 : CacheIOThread::~CacheIOThread()
250 : {
251 0 : if (mXPCOMThread) {
252 0 : nsIThread *thread = mXPCOMThread;
253 0 : thread->Release();
254 : }
255 :
256 0 : sSelf = nullptr;
257 : #ifdef DEBUG
258 0 : for (uint32_t level = 0; level < LAST_LEVEL; ++level) {
259 0 : MOZ_ASSERT(!mEventQueue[level].Length());
260 : }
261 : #endif
262 0 : }
263 :
264 1 : nsresult CacheIOThread::Init()
265 : {
266 : {
267 2 : MonitorAutoLock lock(mMonitor);
268 : // Yeah, there is not a thread yet, but we want to make sure
269 : // the sequencing is correct.
270 1 : mBlockingIOWatcher = MakeUnique<detail::BlockingIOWatcher>();
271 : }
272 :
273 1 : mThread = PR_CreateThread(PR_USER_THREAD, ThreadFunc, this,
274 : PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
275 : PR_JOINABLE_THREAD, 128 * 1024);
276 1 : if (!mThread) {
277 0 : return NS_ERROR_FAILURE;
278 : }
279 :
280 1 : return NS_OK;
281 : }
282 :
283 56 : nsresult CacheIOThread::Dispatch(nsIRunnable* aRunnable, uint32_t aLevel)
284 : {
285 56 : return Dispatch(do_AddRef(aRunnable), aLevel);
286 : }
287 :
288 56 : nsresult CacheIOThread::Dispatch(already_AddRefed<nsIRunnable> aRunnable,
289 : uint32_t aLevel)
290 : {
291 56 : NS_ENSURE_ARG(aLevel < LAST_LEVEL);
292 :
293 112 : nsCOMPtr<nsIRunnable> runnable(aRunnable);
294 :
295 : // Runnable is always expected to be non-null, hard null-check bellow.
296 56 : MOZ_ASSERT(runnable);
297 :
298 112 : MonitorAutoLock lock(mMonitor);
299 :
300 56 : if (mShutdown && (PR_GetCurrentThread() != mThread))
301 0 : return NS_ERROR_UNEXPECTED;
302 :
303 56 : return DispatchInternal(runnable.forget(), aLevel);
304 : }
305 :
306 1 : nsresult CacheIOThread::DispatchAfterPendingOpens(nsIRunnable* aRunnable)
307 : {
308 : // Runnable is always expected to be non-null, hard null-check bellow.
309 1 : MOZ_ASSERT(aRunnable);
310 :
311 2 : MonitorAutoLock lock(mMonitor);
312 :
313 1 : if (mShutdown && (PR_GetCurrentThread() != mThread))
314 0 : return NS_ERROR_UNEXPECTED;
315 :
316 : // Move everything from later executed OPEN level to the OPEN_PRIORITY level
317 : // where we post the (eviction) runnable.
318 1 : mQueueLength[OPEN_PRIORITY] += mEventQueue[OPEN].Length();
319 1 : mQueueLength[OPEN] -= mEventQueue[OPEN].Length();
320 1 : mEventQueue[OPEN_PRIORITY].AppendElements(mEventQueue[OPEN]);
321 1 : mEventQueue[OPEN].Clear();
322 :
323 1 : return DispatchInternal(do_AddRef(aRunnable), OPEN_PRIORITY);
324 : }
325 :
326 57 : nsresult CacheIOThread::DispatchInternal(already_AddRefed<nsIRunnable> aRunnable,
327 : uint32_t aLevel)
328 : {
329 114 : nsCOMPtr<nsIRunnable> runnable(aRunnable);
330 : #ifdef MOZ_TASK_TRACER
331 : if (tasktracer::IsStartLogging()) {
332 : runnable = tasktracer::CreateTracedRunnable(runnable.forget());
333 : (static_cast<tasktracer::TracedRunnable*>(runnable.get()))->DispatchTask();
334 : }
335 : #endif
336 :
337 57 : if (NS_WARN_IF(!runnable))
338 0 : return NS_ERROR_NULL_POINTER;
339 :
340 57 : mMonitor.AssertCurrentThreadOwns();
341 :
342 57 : ++mQueueLength[aLevel];
343 57 : mEventQueue[aLevel].AppendElement(runnable.forget());
344 57 : if (mLowestLevelWaiting > aLevel)
345 43 : mLowestLevelWaiting = aLevel;
346 :
347 57 : mMonitor.NotifyAll();
348 :
349 57 : return NS_OK;
350 : }
351 :
352 233 : bool CacheIOThread::IsCurrentThread()
353 : {
354 233 : return mThread == PR_GetCurrentThread();
355 : }
356 :
357 5 : uint32_t CacheIOThread::QueueSize(bool highPriority)
358 : {
359 10 : MonitorAutoLock lock(mMonitor);
360 5 : if (highPriority) {
361 3 : return mQueueLength[OPEN_PRIORITY] + mQueueLength[READ_PRIORITY];
362 : }
363 :
364 4 : return mQueueLength[OPEN_PRIORITY] + mQueueLength[READ_PRIORITY] +
365 4 : mQueueLength[MANAGEMENT] + mQueueLength[OPEN] + mQueueLength[READ];
366 : }
367 :
368 0 : bool CacheIOThread::YieldInternal()
369 : {
370 0 : if (!IsCurrentThread()) {
371 : NS_WARNING("Trying to yield to priority events on non-cache2 I/O thread? "
372 0 : "You probably do something wrong.");
373 0 : return false;
374 : }
375 :
376 0 : if (mCurrentlyExecutingLevel == XPCOM_LEVEL) {
377 : // Doesn't make any sense, since this handler is the one
378 : // that would be executed as the next one.
379 0 : return false;
380 : }
381 :
382 0 : if (!EventsPending(mCurrentlyExecutingLevel))
383 0 : return false;
384 :
385 0 : mRerunCurrentEvent = true;
386 0 : return true;
387 : }
388 :
389 0 : void CacheIOThread::Shutdown()
390 : {
391 0 : if (!mThread) {
392 0 : return;
393 : }
394 :
395 : {
396 0 : MonitorAutoLock lock(mMonitor);
397 0 : mShutdown = true;
398 0 : mMonitor.NotifyAll();
399 : }
400 :
401 0 : PR_JoinThread(mThread);
402 0 : mThread = nullptr;
403 : }
404 :
405 0 : void CacheIOThread::CancelBlockingIO()
406 : {
407 : // This is an attempt to cancel any blocking I/O operation taking
408 : // too long time.
409 0 : if (!mBlockingIOWatcher) {
410 0 : return;
411 : }
412 :
413 0 : if (!mIOCancelableEvents) {
414 0 : LOG(("CacheIOThread::CancelBlockingIO, no blocking operation to cancel"));
415 0 : return;
416 : }
417 :
418 : // OK, when we are here, we are processing an IO on the thread that
419 : // can be cancelled.
420 0 : mBlockingIOWatcher->WatchAndCancel(mMonitor);
421 : }
422 :
423 114 : already_AddRefed<nsIEventTarget> CacheIOThread::Target()
424 : {
425 228 : nsCOMPtr<nsIEventTarget> target;
426 :
427 114 : target = mXPCOMThread;
428 114 : if (!target && mThread)
429 : {
430 0 : MonitorAutoLock lock(mMonitor);
431 0 : while (!mXPCOMThread) {
432 0 : lock.Wait();
433 : }
434 :
435 0 : target = mXPCOMThread;
436 : }
437 :
438 228 : return target.forget();
439 : }
440 :
441 : // static
442 1 : void CacheIOThread::ThreadFunc(void* aClosure)
443 : {
444 : // XXXmstange We'd like to register this thread with the profiler, but doing
445 : // so causes leaks, see bug 1323100.
446 1 : NS_SetCurrentThreadName("Cache2 I/O");
447 :
448 1 : mozilla::IOInterposer::RegisterCurrentThread();
449 1 : CacheIOThread* thread = static_cast<CacheIOThread*>(aClosure);
450 1 : thread->ThreadFunc();
451 0 : mozilla::IOInterposer::UnregisterCurrentThread();
452 0 : }
453 :
454 1 : void CacheIOThread::ThreadFunc()
455 : {
456 1 : nsCOMPtr<nsIThreadInternal> threadInternal;
457 :
458 : {
459 1 : MonitorAutoLock lock(mMonitor);
460 :
461 1 : MOZ_ASSERT(mBlockingIOWatcher);
462 1 : mBlockingIOWatcher->InitThread();
463 :
464 : // This creates nsThread for this PRThread
465 1 : nsCOMPtr<nsIThread> xpcomThread = NS_GetCurrentThread();
466 :
467 1 : threadInternal = do_QueryInterface(xpcomThread);
468 1 : if (threadInternal)
469 1 : threadInternal->SetObserver(this);
470 :
471 1 : mXPCOMThread = xpcomThread.forget().take();
472 :
473 1 : lock.NotifyAll();
474 :
475 : do {
476 : loopStart:
477 : // Reset the lowest level now, so that we can detect a new event on
478 : // a lower level (i.e. higher priority) has been scheduled while
479 : // executing any previously scheduled event.
480 94 : mLowestLevelWaiting = LAST_LEVEL;
481 :
482 : // Process xpcom events first
483 174 : while (mHasXPCOMEvents) {
484 40 : mHasXPCOMEvents = false;
485 40 : mCurrentlyExecutingLevel = XPCOM_LEVEL;
486 :
487 80 : MonitorAutoUnlock unlock(mMonitor);
488 :
489 : bool processedEvent;
490 : nsresult rv;
491 80 : do {
492 80 : nsIThread *thread = mXPCOMThread;
493 80 : rv = thread->ProcessNextEvent(false, &processedEvent);
494 :
495 80 : ++mEventCounter;
496 80 : MOZ_ASSERT(mBlockingIOWatcher);
497 80 : mBlockingIOWatcher->NotifyOperationDone();
498 80 : } while (NS_SUCCEEDED(rv) && processedEvent);
499 : }
500 :
501 : uint32_t level;
502 668 : for (level = 0; level < LAST_LEVEL; ++level) {
503 624 : if (!mEventQueue[level].Length()) {
504 : // no events on this level, go to the next level
505 574 : continue;
506 : }
507 :
508 50 : LoopOneLevel(level);
509 :
510 : // Go to the first (lowest) level again
511 50 : goto loopStart;
512 : }
513 :
514 44 : if (EventsPending()) {
515 0 : continue;
516 : }
517 :
518 44 : if (mShutdown) {
519 0 : break;
520 : }
521 :
522 44 : lock.Wait(PR_INTERVAL_NO_TIMEOUT);
523 :
524 : } while (true);
525 :
526 0 : MOZ_ASSERT(!EventsPending());
527 :
528 : #ifdef DEBUG
529 : // This is for correct assertion on XPCOM events dispatch.
530 0 : mInsideLoop = false;
531 : #endif
532 : } // lock
533 :
534 0 : if (threadInternal)
535 0 : threadInternal->SetObserver(nullptr);
536 0 : }
537 :
538 50 : void CacheIOThread::LoopOneLevel(uint32_t aLevel)
539 : {
540 100 : EventQueue events;
541 50 : events.SwapElements(mEventQueue[aLevel]);
542 50 : EventQueue::size_type length = events.Length();
543 :
544 50 : mCurrentlyExecutingLevel = aLevel;
545 :
546 50 : bool returnEvents = false;
547 50 : bool reportTelemetry = true;
548 :
549 : EventQueue::size_type index;
550 : {
551 100 : MonitorAutoUnlock unlock(mMonitor);
552 :
553 107 : for (index = 0; index < length; ++index) {
554 58 : if (EventsPending(aLevel)) {
555 : // Somebody scheduled a new event on a lower level, break and harry
556 : // to execute it! Don't forget to return what we haven't exec.
557 1 : returnEvents = true;
558 1 : break;
559 : }
560 :
561 57 : if (reportTelemetry) {
562 50 : reportTelemetry = false;
563 50 : CacheIOTelemetry::Report(aLevel, length);
564 : }
565 :
566 : // Drop any previous flagging, only an event on the current level may set
567 : // this flag.
568 57 : mRerunCurrentEvent = false;
569 :
570 57 : events[index]->Run();
571 :
572 57 : MOZ_ASSERT(mBlockingIOWatcher);
573 57 : mBlockingIOWatcher->NotifyOperationDone();
574 :
575 57 : if (mRerunCurrentEvent) {
576 : // The event handler yields to higher priority events and wants to rerun.
577 0 : returnEvents = true;
578 0 : break;
579 : }
580 :
581 57 : ++mEventCounter;
582 57 : --mQueueLength[aLevel];
583 :
584 : // Release outside the lock.
585 57 : events[index] = nullptr;
586 : }
587 : }
588 :
589 50 : if (returnEvents)
590 1 : mEventQueue[aLevel].InsertElementsAt(0, events.Elements() + index, length - index);
591 50 : }
592 :
593 102 : bool CacheIOThread::EventsPending(uint32_t aLastLevel)
594 : {
595 102 : return mLowestLevelWaiting < aLastLevel || mHasXPCOMEvents;
596 : }
597 :
598 40 : NS_IMETHODIMP CacheIOThread::OnDispatchedEvent(nsIThreadInternal *thread)
599 : {
600 80 : MonitorAutoLock lock(mMonitor);
601 40 : mHasXPCOMEvents = true;
602 40 : MOZ_ASSERT(mInsideLoop);
603 40 : lock.Notify();
604 80 : return NS_OK;
605 : }
606 :
607 80 : NS_IMETHODIMP CacheIOThread::OnProcessNextEvent(nsIThreadInternal *thread, bool mayWait)
608 : {
609 80 : return NS_OK;
610 : }
611 :
612 80 : NS_IMETHODIMP CacheIOThread::AfterProcessNextEvent(nsIThreadInternal *thread,
613 : bool eventWasProcessed)
614 : {
615 80 : return NS_OK;
616 : }
617 :
618 : // Memory reporting
619 :
620 0 : size_t CacheIOThread::SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
621 : {
622 0 : MonitorAutoLock lock(const_cast<CacheIOThread*>(this)->mMonitor);
623 :
624 0 : size_t n = 0;
625 0 : n += mallocSizeOf(mThread);
626 0 : for (uint32_t level = 0; level < LAST_LEVEL; ++level) {
627 0 : n += mEventQueue[level].ShallowSizeOfExcludingThis(mallocSizeOf);
628 : // Events referenced by the queues are arbitrary objects we cannot be sure
629 : // are reported elsewhere as well as probably not implementing nsISizeOf
630 : // interface. Deliberatly omitting them from reporting here.
631 : }
632 :
633 0 : return n;
634 : }
635 :
636 0 : size_t CacheIOThread::SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
637 : {
638 0 : return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
639 : }
640 :
641 25 : CacheIOThread::Cancelable::Cancelable(bool aCancelable)
642 25 : : mCancelable(aCancelable)
643 : {
644 : // This will only ever be used on the I/O thread,
645 : // which is expected to be alive longer than this class.
646 25 : MOZ_ASSERT(CacheIOThread::sSelf);
647 25 : MOZ_ASSERT(CacheIOThread::sSelf->IsCurrentThread());
648 :
649 25 : if (mCancelable) {
650 25 : ++CacheIOThread::sSelf->mIOCancelableEvents;
651 : }
652 25 : }
653 :
654 50 : CacheIOThread::Cancelable::~Cancelable()
655 : {
656 25 : MOZ_ASSERT(CacheIOThread::sSelf);
657 :
658 25 : if (mCancelable) {
659 25 : --CacheIOThread::sSelf->mIOCancelableEvents;
660 : }
661 25 : }
662 :
663 : } // namespace net
664 : } // namespace mozilla
|