LCOV - code coverage report
Current view: top level - js/src/jit - AtomicOperations.h (source / functions) Hit Total Coverage
Test: output.info Lines: 0 49 0.0 %
Date: 2017-07-14 16:53:18 Functions: 0 113 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : #ifndef jit_AtomicOperations_h
       8             : #define jit_AtomicOperations_h
       9             : 
      10             : #include "mozilla/Types.h"
      11             : 
      12             : #include "vm/SharedMem.h"
      13             : 
      14             : namespace js {
      15             : namespace jit {
      16             : 
      17             : class RegionLock;
      18             : 
      19             : /*
      20             :  * The atomic operations layer defines types and functions for
      21             :  * JIT-compatible atomic operation.
      22             :  *
      23             :  * The fundamental constraints on the functions are:
      24             :  *
      25             :  * - That their realization here MUST be compatible with code the JIT
      26             :  *   generates for its Atomics operations, so that an atomic access
      27             :  *   from the interpreter or runtime - from any C++ code - really is
      28             :  *   atomic relative to a concurrent, compatible atomic access from
      29             :  *   jitted code.  That is, these primitives expose JIT-compatible
      30             :  *   atomicity functionality to C++.
      31             :  *
      32             :  * - That accesses may race without creating C++ undefined behavior:
      33             :  *   atomic accesses (marked "SeqCst") may race with non-atomic
      34             :  *   accesses (marked "SafeWhenRacy"); overlapping but non-matching,
      35             :  *   and hence incompatible, atomic accesses may race; and non-atomic
      36             :  *   accesses may race.  The effects of races need not be predictable,
      37             :  *   so garbage can be produced by a read or written by a write, but
      38             :  *   the effects must be benign: the program must continue to run, and
      39             :  *   only the memory in the union of addresses named in the racing
      40             :  *   accesses may be affected.
      41             :  *
      42             :  * The compatibility constraint means that if the JIT makes dynamic
      43             :  * decisions about how to implement atomic operations then
      44             :  * corresponding dynamic decisions MUST be made in the implementations
      45             :  * of the functions below.
      46             :  *
      47             :  * The safe-for-races constraint means that by and large, it is hard
      48             :  * to implement these primitives in C++.  See "Implementation notes"
      49             :  * below.
      50             :  *
      51             :  * The "SeqCst" suffix on operations means "sequentially consistent"
      52             :  * and means such a function's operation must have "sequentially
      53             :  * consistent" memory ordering.  See mfbt/Atomics.h for an explanation
      54             :  * of this memory ordering.
      55             :  *
      56             :  * Note that a "SafeWhenRacy" access does not provide the atomicity of
      57             :  * a "relaxed atomic" access: it can read or write garbage if there's
      58             :  * a race.
      59             :  *
      60             :  *
      61             :  * Implementation notes.
      62             :  *
      63             :  * It's not a requirement that these functions be inlined; performance
      64             :  * is not a great concern.  On some platforms these functions may call
      65             :  * out to code that's generated at run time.
      66             :  *
      67             :  * In principle these functions will not be written in C++, thus
      68             :  * making races defined behavior if all racy accesses from C++ go via
      69             :  * these functions.  (Jitted code will always be safe for races and
      70             :  * provides the same guarantees as these functions.)
      71             :  *
      72             :  * The appropriate implementations will be platform-specific and
      73             :  * there are some obvious implementation strategies to choose
      74             :  * from, sometimes a combination is appropriate:
      75             :  *
      76             :  *  - generating the code at run-time with the JIT;
      77             :  *  - hand-written assembler (maybe inline); or
      78             :  *  - using special compiler intrinsics or directives.
      79             :  *
      80             :  * Trusting the compiler not to generate code that blows up on a
      81             :  * race definitely won't work in the presence of TSan, or even of
      82             :  * optimizing compilers in seemingly-"innocuous" conditions.  (See
      83             :  * https://www.usenix.org/legacy/event/hotpar11/tech/final_files/Boehm.pdf
      84             :  * for details.)
      85             :  */
      86             : class AtomicOperations
      87             : {
      88             :     friend class RegionLock;
      89             : 
      90             :   private:
      91             :     // The following functions are defined for T = int8_t, uint8_t,
      92             :     // int16_t, uint16_t, int32_t, uint32_t, int64_t, and uint64_t.
      93             : 
      94             :     // Atomically read *addr.
      95             :     template<typename T>
      96             :     static inline T loadSeqCst(T* addr);
      97             : 
      98             :     // Atomically store val in *addr.
      99             :     template<typename T>
     100             :     static inline void storeSeqCst(T* addr, T val);
     101             : 
     102             :     // Atomically store val in *addr and return the old value of *addr.
     103             :     template<typename T>
     104             :     static inline T exchangeSeqCst(T* addr, T val);
     105             : 
     106             :     // Atomically check that *addr contains oldval and if so replace it
     107             :     // with newval, in any case returning the old contents of *addr.
     108             :     template<typename T>
     109             :     static inline T compareExchangeSeqCst(T* addr, T oldval, T newval);
     110             : 
     111             :     // The following functions are defined for T = int8_t, uint8_t,
     112             :     // int16_t, uint16_t, int32_t, uint32_t only.
     113             : 
     114             :     // Atomically add, subtract, bitwise-AND, bitwise-OR, or bitwise-XOR
     115             :     // val into *addr and return the old value of *addr.
     116             :     template<typename T>
     117             :     static inline T fetchAddSeqCst(T* addr, T val);
     118             : 
     119             :     template<typename T>
     120             :     static inline T fetchSubSeqCst(T* addr, T val);
     121             : 
     122             :     template<typename T>
     123             :     static inline T fetchAndSeqCst(T* addr, T val);
     124             : 
     125             :     template<typename T>
     126             :     static inline T fetchOrSeqCst(T* addr, T val);
     127             : 
     128             :     template<typename T>
     129             :     static inline T fetchXorSeqCst(T* addr, T val);
     130             : 
     131             :     // The SafeWhenRacy functions are to be used when C++ code has to access
     132             :     // memory without synchronization and can't guarantee that there
     133             :     // won't be a race on the access.
     134             : 
     135             :     // Defined for all the integral types as well as for float32 and float64.
     136             :     template<typename T>
     137             :     static inline T loadSafeWhenRacy(T* addr);
     138             : 
     139             :     // Defined for all the integral types as well as for float32 and float64.
     140             :     template<typename T>
     141             :     static inline void storeSafeWhenRacy(T* addr, T val);
     142             : 
     143             :     // Replacement for memcpy().
     144             :     static inline void memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes);
     145             : 
     146             :     // Replacement for memmove().
     147             :     static inline void memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes);
     148             : 
     149             :   public:
     150             :     // Test lock-freedom for any int32 value.  This implements the
     151             :     // Atomics::isLockFree() operation in the Shared Memory and
     152             :     // Atomics specification, as follows:
     153             :     //
     154             :     // 1, 2, and 4 bytes are always lock free (in SpiderMonkey).
     155             :     //
     156             :     // Lock-freedom for 8 bytes is determined by the platform's
     157             :     // isLockfree8().  However, the spec stipulates that isLockFree(8)
     158             :     // is true only if there is an integer array that admits atomic
     159             :     // operations whose BYTES_PER_ELEMENT=8; at the moment (February
     160             :     // 2016) there are no such arrays.
     161             :     //
     162             :     // There is no lock-freedom for any other values on any platform.
     163             :     static inline bool isLockfree(int32_t n);
     164             : 
     165             :     // If the return value is true then a call to the 64-bit (8-byte)
     166             :     // routines below will work, otherwise those functions will assert in
     167             :     // debug builds and may crash in release build.  (See the code in
     168             :     // ../arm for an example.)  The value of this call does not change
     169             :     // during execution.
     170             :     static inline bool isLockfree8();
     171             : 
     172             :     // Execute a full memory barrier (LoadLoad+LoadStore+StoreLoad+StoreStore).
     173             :     static inline void fenceSeqCst();
     174             : 
     175             :     // All clients should use the APIs that take SharedMem pointers.
     176             :     // See above for semantics and acceptable types.
     177             : 
     178             :     template<typename T>
     179           0 :     static T loadSeqCst(SharedMem<T*> addr) {
     180           0 :         return loadSeqCst(addr.unwrap());
     181             :     }
     182             : 
     183             :     template<typename T>
     184           0 :     static void storeSeqCst(SharedMem<T*> addr, T val) {
     185           0 :         return storeSeqCst(addr.unwrap(), val);
     186             :     }
     187             : 
     188             :     template<typename T>
     189           0 :     static T exchangeSeqCst(SharedMem<T*> addr, T val) {
     190           0 :         return exchangeSeqCst(addr.unwrap(), val);
     191             :     }
     192             : 
     193             :     template<typename T>
     194           0 :     static T compareExchangeSeqCst(SharedMem<T*> addr, T oldval, T newval) {
     195           0 :         return compareExchangeSeqCst(addr.unwrap(), oldval, newval);
     196             :     }
     197             : 
     198             :     template<typename T>
     199           0 :     static T fetchAddSeqCst(SharedMem<T*> addr, T val) {
     200           0 :         return fetchAddSeqCst(addr.unwrap(), val);
     201             :     }
     202             : 
     203             :     template<typename T>
     204           0 :     static T fetchSubSeqCst(SharedMem<T*> addr, T val) {
     205           0 :         return fetchSubSeqCst(addr.unwrap(), val);
     206             :     }
     207             : 
     208             :     template<typename T>
     209           0 :     static T fetchAndSeqCst(SharedMem<T*> addr, T val) {
     210           0 :         return fetchAndSeqCst(addr.unwrap(), val);
     211             :     }
     212             : 
     213             :     template<typename T>
     214           0 :     static T fetchOrSeqCst(SharedMem<T*> addr, T val) {
     215           0 :         return fetchOrSeqCst(addr.unwrap(), val);
     216             :     }
     217             : 
     218             :     template<typename T>
     219           0 :     static T fetchXorSeqCst(SharedMem<T*> addr, T val) {
     220           0 :         return fetchXorSeqCst(addr.unwrap(), val);
     221             :     }
     222             : 
     223             :     template<typename T>
     224           0 :     static T loadSafeWhenRacy(SharedMem<T*> addr) {
     225           0 :         return loadSafeWhenRacy(addr.unwrap());
     226             :     }
     227             : 
     228             :     template<typename T>
     229           0 :     static void storeSafeWhenRacy(SharedMem<T*> addr, T val) {
     230           0 :         return storeSafeWhenRacy(addr.unwrap(), val);
     231             :     }
     232             : 
     233             :     template<typename T>
     234           0 :     static void memcpySafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nbytes) {
     235           0 :         memcpySafeWhenRacy(dest.template cast<void*>().unwrap(),
     236           0 :                            src.template cast<void*>().unwrap(), nbytes);
     237           0 :     }
     238             : 
     239             :     template<typename T>
     240           0 :     static void memcpySafeWhenRacy(SharedMem<T*> dest, T* src, size_t nbytes) {
     241           0 :         memcpySafeWhenRacy(dest.template cast<void*>().unwrap(), static_cast<void*>(src), nbytes);
     242           0 :     }
     243             : 
     244             :     template<typename T>
     245           0 :     static void memcpySafeWhenRacy(T* dest, SharedMem<T*> src, size_t nbytes) {
     246           0 :         memcpySafeWhenRacy(static_cast<void*>(dest), src.template cast<void*>().unwrap(), nbytes);
     247           0 :     }
     248             : 
     249             :     template<typename T>
     250           0 :     static void memmoveSafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nbytes) {
     251           0 :         memmoveSafeWhenRacy(dest.template cast<void*>().unwrap(),
     252           0 :                             src.template cast<void*>().unwrap(), nbytes);
     253           0 :     }
     254             : 
     255             :     template<typename T>
     256           0 :     static void podCopySafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
     257           0 :         memcpySafeWhenRacy(dest, src, nelem * sizeof(T));
     258           0 :     }
     259             : 
     260             :     template<typename T>
     261           0 :     static void podMoveSafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
     262           0 :         memmoveSafeWhenRacy(dest, src, nelem * sizeof(T));
     263           0 :     }
     264             : };
     265             : 
     266             : /* A data type representing a lock on some region of a
     267             :  * SharedArrayRawBuffer's memory, to be used only when the hardware
     268             :  * does not provide necessary atomicity (eg, float64 access on ARMv6
     269             :  * and some ARMv7 systems).
     270             :  */
     271             : class RegionLock
     272             : {
     273             :   public:
     274             :     RegionLock() : spinlock(0) {}
     275             : 
     276             :     /* Addr is the address to be locked, nbytes the number of bytes we
     277             :      * need to lock.  The lock that is taken may cover a larger range
     278             :      * of bytes.
     279             :      */
     280             :     template<size_t nbytes>
     281             :     void acquire(void* addr);
     282             : 
     283             :     /* Addr is the address to be unlocked, nbytes the number of bytes
     284             :      * we need to unlock.  The lock must be held by the calling thread,
     285             :      * at the given address and for the number of bytes.
     286             :      */
     287             :     template<size_t nbytes>
     288             :     void release(void* addr);
     289             : 
     290             :   private:
     291             :     /* For now, a simple spinlock that covers the entire buffer. */
     292             :     uint32_t spinlock;
     293             : };
     294             : 
     295             : inline bool
     296           0 : AtomicOperations::isLockfree(int32_t size)
     297             : {
     298             :     // Keep this in sync with visitAtomicIsLockFree() in jit/CodeGenerator.cpp.
     299             : 
     300           0 :     switch (size) {
     301             :       case 1:
     302           0 :         return true;
     303             :       case 2:
     304           0 :         return true;
     305             :       case 4:
     306             :         // The spec requires Atomics.isLockFree(4) to return true.
     307           0 :         return true;
     308             :       case 8:
     309             :         // The spec requires Atomics.isLockFree(n) to return false
     310             :         // unless n is the BYTES_PER_ELEMENT value of some integer
     311             :         // TypedArray that admits atomic operations.  At the time of
     312             :         // writing (February 2016) there is no such array with n=8.
     313             :         // return AtomicOperations::isLockfree8();
     314           0 :         return false;
     315             :       default:
     316           0 :         return false;
     317             :     }
     318             : }
     319             : 
     320             : } // namespace jit
     321             : } // namespace js
     322             : 
     323             : #if defined(JS_CODEGEN_ARM)
     324             : # include "jit/arm/AtomicOperations-arm.h"
     325             : #elif defined(JS_CODEGEN_ARM64)
     326             : # include "jit/arm64/AtomicOperations-arm64.h"
     327             : #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
     328             : # include "jit/mips-shared/AtomicOperations-mips-shared.h"
     329             : #elif defined(__ppc__) || defined(__PPC__)
     330             : # include "jit/none/AtomicOperations-feeling-lucky.h"
     331             : #elif defined(__sparc__)
     332             : # include "jit/none/AtomicOperations-feeling-lucky.h"
     333             : #elif defined(JS_CODEGEN_NONE)
     334             :   // You can disable the JIT with --disable-ion but you must still
     335             :   // provide the atomic operations that will be used by the JS engine.
     336             :   // When the JIT is disabled the operations are simply safe-for-races
     337             :   // C++ realizations of atomics.  These operations cannot be written
     338             :   // in portable C++, hence the default here is to crash.  See the
     339             :   // top of the file for more guidance.
     340             : # if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || defined(__PPC64LE__)
     341             : #  include "jit/none/AtomicOperations-feeling-lucky.h"
     342             : # elif defined(__aarch64__)
     343             : #  include "jit/arm64/AtomicOperations-arm64.h"
     344             : # elif defined(__alpha__)
     345             : #  include "jit/none/AtomicOperations-feeling-lucky.h"
     346             : # elif defined(__hppa__)
     347             : #  include "jit/none/AtomicOperations-feeling-lucky.h"
     348             : # elif defined(__sh__)
     349             : #  include "jit/none/AtomicOperations-feeling-lucky.h"
     350             : # else
     351             : #  include "jit/none/AtomicOperations-none.h" // These MOZ_CRASH() always
     352             : # endif
     353             : #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
     354             : # include "jit/x86-shared/AtomicOperations-x86-shared.h"
     355             : #else
     356             : # error "Atomic operations must be defined for this platform"
     357             : #endif
     358             : 
     359             : #endif // jit_AtomicOperations_h

Generated by: LCOV version 1.13