LCOV - code coverage report
Current view: top level - gfx/skia/skia/src/core - SkSharedMutex.cpp (source / functions) Hit Total Coverage
Test: output.info Lines: 0 85 0.0 %
Date: 2017-07-14 16:53:18 Functions: 0 14 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*
       2             :  * Copyright 2015 Google Inc.
       3             :  *
       4             :  * Use of this source code is governed by a BSD-style license that can be
       5             :  * found in the LICENSE file.
       6             :  */
       7             : 
       8             : #include "SkSharedMutex.h"
       9             : 
      10             : #include "SkAtomics.h"
      11             : #include "SkTypes.h"
      12             : #include "SkSemaphore.h"
      13             : 
      14             : #if !defined(__has_feature)
      15             :     #define __has_feature(x) 0
      16             : #endif
      17             : 
      18             : #if __has_feature(thread_sanitizer)
      19             : 
      20             :     /* Report that a lock has been created at address "lock". */
      21             :     #define ANNOTATE_RWLOCK_CREATE(lock) \
      22             :         AnnotateRWLockCreate(__FILE__, __LINE__, lock)
      23             : 
      24             :     /* Report that the lock at address "lock" is about to be destroyed. */
      25             :     #define ANNOTATE_RWLOCK_DESTROY(lock) \
      26             :         AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
      27             : 
      28             :     /* Report that the lock at address "lock" has been acquired.
      29             :        is_w=1 for writer lock, is_w=0 for reader lock. */
      30             :     #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
      31             :         AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
      32             : 
      33             :     /* Report that the lock at address "lock" is about to be released. */
      34             :     #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
      35             :       AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
      36             : 
      37             :     #if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
      38             :         #if defined(__GNUC__)
      39             :             #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
      40             :         #else
      41             :             /* TODO(glider): for Windows support we may want to change this macro in order
      42             :                to prepend __declspec(selectany) to the annotations' declarations. */
      43             :             #error weak annotations are not supported for your compiler
      44             :         #endif
      45             :     #else
      46             :         #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
      47             :     #endif
      48             : 
      49             :     extern "C" {
      50             :     void AnnotateRWLockCreate(
      51             :         const char *file, int line,
      52             :         const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
      53             :     void AnnotateRWLockDestroy(
      54             :         const char *file, int line,
      55             :         const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
      56             :     void AnnotateRWLockAcquired(
      57             :         const char *file, int line,
      58             :         const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
      59             :     void AnnotateRWLockReleased(
      60             :         const char *file, int line,
      61             :         const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
      62             :     }
      63             : 
      64             : #else
      65             : 
      66             :     #define ANNOTATE_RWLOCK_CREATE(lock)
      67             :     #define ANNOTATE_RWLOCK_DESTROY(lock)
      68             :     #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
      69             :     #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
      70             : 
      71             : #endif
      72             : 
      73             : #ifdef SK_DEBUG
      74             : 
      75             :     #include "SkThreadID.h"
      76             :     #include "SkTDArray.h"
      77             : 
      78           0 :     class SkSharedMutex::ThreadIDSet {
      79             :     public:
      80             :         // Returns true if threadID is in the set.
      81           0 :         bool find(SkThreadID threadID) const {
      82           0 :             for (auto& t : fThreadIDs) {
      83           0 :                 if (t == threadID) return true;
      84             :             }
      85           0 :             return false;
      86             :         }
      87             : 
      88             :         // Returns true if did not already exist.
      89           0 :         bool tryAdd(SkThreadID threadID) {
      90           0 :             for (auto& t : fThreadIDs) {
      91           0 :                 if (t == threadID) return false;
      92             :             }
      93           0 :             fThreadIDs.append(1, &threadID);
      94           0 :             return true;
      95             :         }
      96             :         // Returns true if already exists in Set.
      97           0 :         bool tryRemove(SkThreadID threadID) {
      98           0 :             for (int i = 0; i < fThreadIDs.count(); ++i) {
      99           0 :                 if (fThreadIDs[i] == threadID) {
     100           0 :                     fThreadIDs.remove(i);
     101           0 :                     return true;
     102             :                 }
     103             :             }
     104           0 :             return false;
     105             :         }
     106             : 
     107             :         void swap(ThreadIDSet& other) {
     108             :             fThreadIDs.swap(other.fThreadIDs);
     109             :         }
     110             : 
     111           0 :         int count() const {
     112           0 :             return fThreadIDs.count();
     113             :         }
     114             : 
     115             :     private:
     116             :         SkTDArray<SkThreadID> fThreadIDs;
     117             :     };
     118             : 
     119           0 :     SkSharedMutex::SkSharedMutex()
     120           0 :         : fCurrentShared(new ThreadIDSet)
     121           0 :         , fWaitingExclusive(new ThreadIDSet)
     122           0 :         , fWaitingShared(new ThreadIDSet){
     123             :         ANNOTATE_RWLOCK_CREATE(this);
     124           0 :     }
     125             : 
     126           0 :     SkSharedMutex::~SkSharedMutex() {  ANNOTATE_RWLOCK_DESTROY(this); }
     127             : 
     128           0 :     void SkSharedMutex::acquire() {
     129           0 :         SkThreadID threadID(SkGetThreadID());
     130             :         int currentSharedCount;
     131             :         int waitingExclusiveCount;
     132             :         {
     133           0 :             SkAutoMutexAcquire l(&fMu);
     134             : 
     135           0 :             if (!fWaitingExclusive->tryAdd(threadID)) {
     136           0 :                 SkDEBUGFAILF("Thread %lx already has an exclusive lock\n", threadID);
     137             :             }
     138             : 
     139           0 :             currentSharedCount = fCurrentShared->count();
     140           0 :             waitingExclusiveCount = fWaitingExclusive->count();
     141             :         }
     142             : 
     143           0 :         if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
     144           0 :             fExclusiveQueue.wait();
     145             :         }
     146             : 
     147             :         ANNOTATE_RWLOCK_ACQUIRED(this, 1);
     148           0 :     }
     149             : 
     150             :     // Implementation Detail:
     151             :     // The shared threads need two seperate queues to keep the threads that were added after the
     152             :     // exclusive lock separate from the threads added before.
     153           0 :     void SkSharedMutex::release() {
     154             :         ANNOTATE_RWLOCK_RELEASED(this, 1);
     155           0 :         SkThreadID threadID(SkGetThreadID());
     156             :         int sharedWaitingCount;
     157             :         int exclusiveWaitingCount;
     158             :         int sharedQueueSelect;
     159             :         {
     160           0 :             SkAutoMutexAcquire l(&fMu);
     161           0 :             SkASSERT(0 == fCurrentShared->count());
     162           0 :             if (!fWaitingExclusive->tryRemove(threadID)) {
     163           0 :                 SkDEBUGFAILF("Thread %lx did not have the lock held.\n", threadID);
     164             :             }
     165           0 :             exclusiveWaitingCount = fWaitingExclusive->count();
     166           0 :             sharedWaitingCount = fWaitingShared->count();
     167           0 :             fWaitingShared.swap(fCurrentShared);
     168           0 :             sharedQueueSelect = fSharedQueueSelect;
     169           0 :             if (sharedWaitingCount > 0) {
     170           0 :                 fSharedQueueSelect = 1 - fSharedQueueSelect;
     171             :             }
     172             :         }
     173             : 
     174           0 :         if (sharedWaitingCount > 0) {
     175           0 :             fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
     176           0 :         } else if (exclusiveWaitingCount > 0) {
     177           0 :             fExclusiveQueue.signal();
     178             :         }
     179           0 :     }
     180             : 
     181           0 :     void SkSharedMutex::assertHeld() const {
     182           0 :         SkThreadID threadID(SkGetThreadID());
     183           0 :         SkAutoMutexAcquire l(&fMu);
     184           0 :         SkASSERT(0 == fCurrentShared->count());
     185           0 :         SkASSERT(fWaitingExclusive->find(threadID));
     186           0 :     }
     187             : 
     188           0 :     void SkSharedMutex::acquireShared() {
     189           0 :         SkThreadID threadID(SkGetThreadID());
     190             :         int exclusiveWaitingCount;
     191             :         int sharedQueueSelect;
     192             :         {
     193           0 :             SkAutoMutexAcquire l(&fMu);
     194           0 :             exclusiveWaitingCount = fWaitingExclusive->count();
     195           0 :             if (exclusiveWaitingCount > 0) {
     196           0 :                 if (!fWaitingShared->tryAdd(threadID)) {
     197           0 :                     SkDEBUGFAILF("Thread %lx was already waiting!\n", threadID);
     198             :                 }
     199             :             } else {
     200           0 :                 if (!fCurrentShared->tryAdd(threadID)) {
     201           0 :                     SkDEBUGFAILF("Thread %lx already holds a shared lock!\n", threadID);
     202             :                 }
     203             :             }
     204           0 :             sharedQueueSelect = fSharedQueueSelect;
     205             :         }
     206             : 
     207           0 :         if (exclusiveWaitingCount > 0) {
     208           0 :             fSharedQueue[sharedQueueSelect].wait();
     209             :         }
     210             : 
     211             :         ANNOTATE_RWLOCK_ACQUIRED(this, 0);
     212           0 :     }
     213             : 
     214           0 :     void SkSharedMutex::releaseShared() {
     215             :         ANNOTATE_RWLOCK_RELEASED(this, 0);
     216           0 :         SkThreadID threadID(SkGetThreadID());
     217             : 
     218             :         int currentSharedCount;
     219             :         int waitingExclusiveCount;
     220             :         {
     221           0 :             SkAutoMutexAcquire l(&fMu);
     222           0 :             if (!fCurrentShared->tryRemove(threadID)) {
     223           0 :                 SkDEBUGFAILF("Thread %lx does not hold a shared lock.\n", threadID);
     224             :             }
     225           0 :             currentSharedCount = fCurrentShared->count();
     226           0 :             waitingExclusiveCount = fWaitingExclusive->count();
     227             :         }
     228             : 
     229           0 :         if (0 == currentSharedCount && waitingExclusiveCount > 0) {
     230           0 :             fExclusiveQueue.signal();
     231             :         }
     232           0 :     }
     233             : 
     234           0 :     void SkSharedMutex::assertHeldShared() const {
     235           0 :         SkThreadID threadID(SkGetThreadID());
     236           0 :         SkAutoMutexAcquire l(&fMu);
     237           0 :         SkASSERT(fCurrentShared->find(threadID));
     238           0 :     }
     239             : 
     240             : #else
     241             : 
     242             :     // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
     243             :     // These three counts must be the same size, so each gets 10 bits. The 10 bits represent
     244             :     // the log of the count which is 1024.
     245             :     //
     246             :     // The three counts held in fQueueCounts are:
     247             :     // * Shared - the number of shared lock holders currently running.
     248             :     // * WaitingExclusive - the number of threads waiting for an exclusive lock.
     249             :     // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
     250             :     //   to finish.
     251             :     static const int kLogThreadCount = 10;
     252             : 
     253             :     enum {
     254             :         kSharedOffset          = (0 * kLogThreadCount),
     255             :         kWaitingExlusiveOffset = (1 * kLogThreadCount),
     256             :         kWaitingSharedOffset   = (2 * kLogThreadCount),
     257             :         kSharedMask            = ((1 << kLogThreadCount) - 1) << kSharedOffset,
     258             :         kWaitingExclusiveMask  = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
     259             :         kWaitingSharedMask     = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
     260             :     };
     261             : 
     262             :     SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
     263             :     SkSharedMutex::~SkSharedMutex() {  ANNOTATE_RWLOCK_DESTROY(this); }
     264             :     void SkSharedMutex::acquire() {
     265             :         // Increment the count of exclusive queue waiters.
     266             :         int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
     267             :                                                         sk_memory_order_acquire);
     268             : 
     269             :         // If there are no other exclusive waiters and no shared threads are running then run
     270             :         // else wait.
     271             :         if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
     272             :             fExclusiveQueue.wait();
     273             :         }
     274             :         ANNOTATE_RWLOCK_ACQUIRED(this, 1);
     275             :     }
     276             : 
     277             :     void SkSharedMutex::release() {
     278             :         ANNOTATE_RWLOCK_RELEASED(this, 1);
     279             : 
     280             :         int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
     281             :         int32_t waitingShared;
     282             :         int32_t newQueueCounts;
     283             :         do {
     284             :             newQueueCounts = oldQueueCounts;
     285             : 
     286             :             // Decrement exclusive waiters.
     287             :             newQueueCounts -= 1 << kWaitingExlusiveOffset;
     288             : 
     289             :             // The number of threads waiting to acquire a shared lock.
     290             :             waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
     291             : 
     292             :             // If there are any move the counts of all the shared waiters to actual shared. They are
     293             :             // going to run next.
     294             :             if (waitingShared > 0) {
     295             : 
     296             :                 // Set waiting shared to zero.
     297             :                 newQueueCounts &= ~kWaitingSharedMask;
     298             : 
     299             :                 // Because this is the exclusive release, then there are zero readers. So, the bits
     300             :                 // for shared locks should be zero. Since those bits are zero, we can just |= in the
     301             :                 // waitingShared count instead of clearing with an &= and then |= the count.
     302             :                 newQueueCounts |= waitingShared << kSharedOffset;
     303             :             }
     304             : 
     305             :         } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
     306             :                                                 sk_memory_order_release, sk_memory_order_relaxed));
     307             : 
     308             :         if (waitingShared > 0) {
     309             :             // Run all the shared.
     310             :             fSharedQueue.signal(waitingShared);
     311             :         } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
     312             :             // Run a single exclusive waiter.
     313             :             fExclusiveQueue.signal();
     314             :         }
     315             :     }
     316             : 
     317             :     void SkSharedMutex::acquireShared() {
     318             :         int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
     319             :         int32_t newQueueCounts;
     320             :         do {
     321             :             newQueueCounts = oldQueueCounts;
     322             :             // If there are waiting exclusives then this shared lock waits else it runs.
     323             :             if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
     324             :                 newQueueCounts += 1 << kWaitingSharedOffset;
     325             :             } else {
     326             :                 newQueueCounts += 1 << kSharedOffset;
     327             :             }
     328             :         } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
     329             :                                                 sk_memory_order_acquire, sk_memory_order_relaxed));
     330             : 
     331             :         // If there are waiting exclusives, then this shared waits until after it runs.
     332             :         if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
     333             :             fSharedQueue.wait();
     334             :         }
     335             :         ANNOTATE_RWLOCK_ACQUIRED(this, 0);
     336             : 
     337             :     }
     338             : 
     339             :     void SkSharedMutex::releaseShared() {
     340             :         ANNOTATE_RWLOCK_RELEASED(this, 0);
     341             : 
     342             :         // Decrement the shared count.
     343             :         int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
     344             :                                                         sk_memory_order_release);
     345             : 
     346             :         // If shared count is going to zero (because the old count == 1) and there are exclusive
     347             :         // waiters, then run a single exclusive waiter.
     348             :         if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
     349             :             && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
     350             :             fExclusiveQueue.signal();
     351             :         }
     352             :     }
     353             : 
     354             : #endif

Generated by: LCOV version 1.13