LCOV - code coverage report
Current view: top level - netwerk/ipc - ChannelEventQueue.h (source / functions) Hit Total Coverage
Test: output.info Lines: 86 101 85.1 %
Date: 2017-07-14 16:53:18 Functions: 17 32 53.1 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
       2             :  * vim: set sw=2 ts=8 et tw=80 :
       3             :  */
       4             : /* This Source Code Form is subject to the terms of the Mozilla Public
       5             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       6             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       7             : 
       8             : #ifndef mozilla_net_ChannelEventQueue_h
       9             : #define mozilla_net_ChannelEventQueue_h
      10             : 
      11             : #include "nsTArray.h"
      12             : #include "nsAutoPtr.h"
      13             : #include "nsIEventTarget.h"
      14             : #include "nsThreadUtils.h"
      15             : #include "nsXULAppAPI.h"
      16             : #include "mozilla/DebugOnly.h"
      17             : #include "mozilla/Mutex.h"
      18             : #include "mozilla/ReentrantMonitor.h"
      19             : #include "mozilla/UniquePtr.h"
      20             : #include "mozilla/Unused.h"
      21             : 
      22             : class nsISupports;
      23             : 
      24             : namespace mozilla {
      25             : namespace net {
      26             : 
      27             : class ChannelEvent
      28             : {
      29             :  public:
      30          16 :   ChannelEvent() { MOZ_COUNT_CTOR(ChannelEvent); }
      31          16 :   virtual ~ChannelEvent() { MOZ_COUNT_DTOR(ChannelEvent); }
      32             :   virtual void Run() = 0;
      33             :   virtual already_AddRefed<nsIEventTarget> GetEventTarget() = 0;
      34             : };
      35             : 
      36             : // Note that MainThreadChannelEvent should not be used in child process since
      37             : // GetEventTarget() directly returns an unlabeled event target.
      38             : class MainThreadChannelEvent : public ChannelEvent
      39             : {
      40             :  public:
      41           0 :   MainThreadChannelEvent() { MOZ_COUNT_CTOR(MainThreadChannelEvent); }
      42           0 :   virtual ~MainThreadChannelEvent() { MOZ_COUNT_DTOR(MainThreadChannelEvent); }
      43             : 
      44             :   already_AddRefed<nsIEventTarget>
      45           0 :   GetEventTarget() override
      46             :   {
      47           0 :     MOZ_ASSERT(XRE_IsParentProcess());
      48             : 
      49           0 :     return do_AddRef(GetMainThreadEventTarget());
      50             :   }
      51             : };
      52             : 
      53             : // This event is designed to be only used for e10s child channels.
      54             : // The goal is to force the child channel to implement GetNeckoTarget()
      55             : // which should return a labeled main thread event target so that this
      56             : // channel event can be dispatched correctly.
      57             : template<typename T>
      58             : class NeckoTargetChannelEvent : public ChannelEvent
      59             : {
      60             : public:
      61          13 :   explicit NeckoTargetChannelEvent(T *aChild)
      62          13 :     : mChild(aChild)
      63             :   {
      64          13 :     MOZ_COUNT_CTOR(NeckoTargetChannelEvent);
      65          13 :   }
      66          13 :   virtual ~NeckoTargetChannelEvent()
      67             :   {
      68          13 :     MOZ_COUNT_DTOR(NeckoTargetChannelEvent);
      69          26 :   }
      70             : 
      71             :   already_AddRefed<nsIEventTarget>
      72          17 :   GetEventTarget() override
      73             :   {
      74          17 :     MOZ_ASSERT(mChild);
      75             : 
      76          17 :     return mChild->GetNeckoTarget();
      77             :   }
      78             : 
      79             : protected:
      80             :   T *mChild;
      81             : };
      82             : 
      83             : // Workaround for Necko re-entrancy dangers. We buffer IPDL messages in a
      84             : // queue if still dispatching previous one(s) to listeners/observers.
      85             : // Otherwise synchronous XMLHttpRequests and/or other code that spins the
      86             : // event loop (ex: IPDL rpc) could cause listener->OnDataAvailable (for
      87             : // instance) to be dispatched and called before mListener->OnStartRequest has
      88             : // completed.
      89             : 
      90             : class ChannelEventQueue final
      91             : {
      92          52 :   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ChannelEventQueue)
      93             : 
      94             :  public:
      95           6 :   explicit ChannelEventQueue(nsISupports *owner)
      96           6 :     : mSuspendCount(0)
      97             :     , mSuspended(false)
      98             :     , mForcedCount(0)
      99             :     , mFlushing(false)
     100             :     , mOwner(owner)
     101             :     , mMutex("ChannelEventQueue::mMutex")
     102           6 :     , mRunningMonitor("ChannelEventQueue::mRunningMonitor")
     103           6 :   {}
     104             : 
     105             :   // Puts IPDL-generated channel event into queue, to be run later
     106             :   // automatically when EndForcedQueueing and/or Resume is called.
     107             :   //
     108             :   // @param aCallback - the ChannelEvent
     109             :   // @param aAssertionWhenNotQueued - this optional param will be used in an
     110             :   //   assertion when the event is executed directly.
     111             :   inline void RunOrEnqueue(ChannelEvent* aCallback,
     112             :                            bool aAssertionWhenNotQueued = false);
     113             : 
     114             :   // Append ChannelEvent in front of the event queue.
     115             :   inline nsresult PrependEvent(UniquePtr<ChannelEvent>& aEvent);
     116             :   inline nsresult PrependEvents(nsTArray<UniquePtr<ChannelEvent>>& aEvents);
     117             : 
     118             :   // After StartForcedQueueing is called, RunOrEnqueue() will start enqueuing
     119             :   // events that will be run/flushed when EndForcedQueueing is called.
     120             :   // - Note: queueing may still be required after EndForcedQueueing() (if the
     121             :   //   queue is suspended, etc):  always call RunOrEnqueue() to avoid race
     122             :   //   conditions.
     123             :   inline void StartForcedQueueing();
     124             :   inline void EndForcedQueueing();
     125             : 
     126             :   // Suspend/resume event queue.  RunOrEnqueue() will start enqueuing
     127             :   // events and they will be run/flushed when resume is called.  These should be
     128             :   // called when the channel owning the event queue is suspended/resumed.
     129             :   void Suspend();
     130             :   // Resume flushes the queue asynchronously, i.e. items in queue will be
     131             :   // dispatched in a new event on the current thread.
     132             :   void Resume();
     133             : 
     134             :  private:
     135             :   // Private destructor, to discourage deletion outside of Release():
     136           4 :   ~ChannelEventQueue()
     137           4 :   {
     138           4 :   }
     139             : 
     140             :   void SuspendInternal();
     141             :   void ResumeInternal();
     142             : 
     143             :   inline void MaybeFlushQueue();
     144             :   void FlushQueue();
     145             :   inline void CompleteResume();
     146             : 
     147             :   ChannelEvent* TakeEvent();
     148             : 
     149             :   nsTArray<UniquePtr<ChannelEvent>> mEventQueue;
     150             : 
     151             :   uint32_t mSuspendCount;
     152             :   bool mSuspended;
     153             :   uint32_t mForcedCount; // Support ForcedQueueing on multiple thread.
     154             :   bool mFlushing;
     155             : 
     156             :   // Keep ptr to avoid refcount cycle: only grab ref during flushing.
     157             :   nsISupports *mOwner;
     158             : 
     159             :   // For atomic mEventQueue operation and state update
     160             :   Mutex mMutex;
     161             : 
     162             :   // To guarantee event execution order among threads
     163             :   ReentrantMonitor mRunningMonitor;
     164             : 
     165             :   friend class AutoEventEnqueuer;
     166             : };
     167             : 
     168             : inline void
     169          16 : ChannelEventQueue::RunOrEnqueue(ChannelEvent* aCallback,
     170             :                                 bool aAssertionWhenNotQueued)
     171             : {
     172          16 :   MOZ_ASSERT(aCallback);
     173             : 
     174             :   // Events execution could be a destruction of the channel (and our own
     175             :   // destructor) unless we make sure its refcount doesn't drop to 0 while this
     176             :   // method is running.
     177          21 :   nsCOMPtr<nsISupports> kungFuDeathGrip(mOwner);
     178             :   Unused << kungFuDeathGrip; // Not used in this function
     179             : 
     180             :   // To avoid leaks.
     181          21 :   UniquePtr<ChannelEvent> event(aCallback);
     182             : 
     183             :   // To guarantee that the running event and all the events generated within
     184             :   // it will be finished before events on other threads.
     185          21 :   ReentrantMonitorAutoEnter monitor(mRunningMonitor);
     186             : 
     187             :   {
     188          21 :     MutexAutoLock lock(mMutex);
     189             : 
     190          16 :     bool enqueue =  !!mForcedCount || mSuspended || mFlushing || !mEventQueue.IsEmpty();
     191             : 
     192          16 :     if (enqueue) {
     193           7 :       mEventQueue.AppendElement(Move(event));
     194           7 :       return;
     195             :     }
     196             : 
     197          14 :     nsCOMPtr<nsIEventTarget> target = event->GetEventTarget();
     198           9 :     MOZ_ASSERT(target);
     199             : 
     200           9 :     bool isCurrentThread = false;
     201          14 :     DebugOnly<nsresult> rv = target->IsOnCurrentThread(&isCurrentThread);
     202           9 :     MOZ_ASSERT(NS_SUCCEEDED(rv));
     203             : 
     204           9 :     if (!isCurrentThread) {
     205             :       // Leverage Suspend/Resume mechanism to trigger flush procedure without
     206             :       // creating a new one.
     207           4 :       SuspendInternal();
     208           4 :       mEventQueue.AppendElement(Move(event));
     209           4 :       ResumeInternal();
     210           4 :       return;
     211             :     }
     212             :   }
     213             : 
     214           5 :   MOZ_RELEASE_ASSERT(!aAssertionWhenNotQueued);
     215           5 :   event->Run();
     216             : }
     217             : 
     218             : inline void
     219          14 : ChannelEventQueue::StartForcedQueueing()
     220             : {
     221          28 :   MutexAutoLock lock(mMutex);
     222          14 :   ++mForcedCount;
     223          14 : }
     224             : 
     225             : inline void
     226          14 : ChannelEventQueue::EndForcedQueueing()
     227             : {
     228          14 :   bool tryFlush = false;
     229             :   {
     230          28 :     MutexAutoLock lock(mMutex);
     231          14 :     MOZ_ASSERT(mForcedCount > 0);
     232          14 :     if(!--mForcedCount) {
     233          14 :       tryFlush = true;
     234             :     }
     235             :   }
     236             : 
     237          14 :   if (tryFlush) {
     238          14 :     MaybeFlushQueue();
     239             :   }
     240          14 : }
     241             : 
     242             : inline nsresult
     243           1 : ChannelEventQueue::PrependEvent(UniquePtr<ChannelEvent>& aEvent)
     244             : {
     245           2 :   MutexAutoLock lock(mMutex);
     246             : 
     247             :   // Prepending event while no queue flush foreseen might cause the following
     248             :   // channel events not run. This assertion here guarantee there must be a
     249             :   // queue flush, either triggered by Resume or EndForcedQueueing, to execute
     250             :   // the added event.
     251           1 :   MOZ_ASSERT(mSuspended || !!mForcedCount);
     252             : 
     253             :   UniquePtr<ChannelEvent>* newEvent =
     254           1 :     mEventQueue.InsertElementAt(0, Move(aEvent));
     255             : 
     256           1 :   if (!newEvent) {
     257           0 :     return NS_ERROR_OUT_OF_MEMORY;
     258             :   }
     259             : 
     260           1 :   return NS_OK;
     261             : }
     262             : 
     263             : inline nsresult
     264           0 : ChannelEventQueue::PrependEvents(nsTArray<UniquePtr<ChannelEvent>>& aEvents)
     265             : {
     266           0 :   MutexAutoLock lock(mMutex);
     267             : 
     268             :   // Prepending event while no queue flush foreseen might cause the following
     269             :   // channel events not run. This assertion here guarantee there must be a
     270             :   // queue flush, either triggered by Resume or EndForcedQueueing, to execute
     271             :   // the added events.
     272           0 :   MOZ_ASSERT(mSuspended || !!mForcedCount);
     273             : 
     274             :   UniquePtr<ChannelEvent>* newEvents =
     275           0 :     mEventQueue.InsertElementsAt(0, aEvents.Length());
     276           0 :   if (!newEvents) {
     277           0 :     return NS_ERROR_OUT_OF_MEMORY;
     278             :   }
     279             : 
     280           0 :   for (uint32_t i = 0; i < aEvents.Length(); i++) {
     281           0 :     newEvents[i] = Move(aEvents[i]);
     282             :   }
     283             : 
     284           0 :   return NS_OK;
     285             : }
     286             : 
     287             : inline void
     288           5 : ChannelEventQueue::CompleteResume()
     289             : {
     290           5 :   bool tryFlush = false;
     291             :   {
     292          10 :     MutexAutoLock lock(mMutex);
     293             : 
     294             :     // channel may have been suspended again since Resume fired event to call
     295             :     // this.
     296           5 :     if (!mSuspendCount) {
     297             :       // we need to remain logically suspended (for purposes of queuing incoming
     298             :       // messages) until this point, else new incoming messages could run before
     299             :       // queued ones.
     300           5 :       mSuspended = false;
     301           5 :       tryFlush = true;
     302             :     }
     303             :   }
     304             : 
     305           5 :   if (tryFlush) {
     306           5 :     MaybeFlushQueue();
     307             :   }
     308           5 : }
     309             : 
     310             : inline void
     311          19 : ChannelEventQueue::MaybeFlushQueue()
     312             : {
     313             :   // Don't flush if forced queuing on, we're already being flushed, or
     314             :   // suspended, or there's nothing to flush
     315          19 :   bool flushQueue = false;
     316             : 
     317             :   {
     318          38 :     MutexAutoLock lock(mMutex);
     319          27 :     flushQueue = !mForcedCount && !mFlushing && !mSuspended &&
     320           8 :                  !mEventQueue.IsEmpty();
     321             :   }
     322             : 
     323          19 :   if (flushQueue) {
     324           5 :     FlushQueue();
     325             :   }
     326          19 : }
     327             : 
     328             : // Ensures that RunOrEnqueue() will be collecting events during its lifetime
     329             : // (letting caller know incoming IPDL msgs should be queued). Flushes the queue
     330             : // when it goes out of scope.
     331             : class MOZ_STACK_CLASS AutoEventEnqueuer
     332             : {
     333             :  public:
     334          14 :   explicit AutoEventEnqueuer(ChannelEventQueue *queue)
     335          14 :     : mEventQueue(queue)
     336          14 :     , mOwner(queue->mOwner)
     337             :   {
     338          14 :     mEventQueue->StartForcedQueueing();
     339          14 :   }
     340          28 :   ~AutoEventEnqueuer() {
     341          14 :     mEventQueue->EndForcedQueueing();
     342          14 :   }
     343             :  private:
     344             :   RefPtr<ChannelEventQueue> mEventQueue;
     345             :   // Ensure channel object lives longer than ChannelEventQueue.
     346             :   nsCOMPtr<nsISupports> mOwner;
     347             : };
     348             : 
     349             : } // namespace net
     350             : } // namespace mozilla
     351             : 
     352             : #endif

Generated by: LCOV version 1.13