LCOV - code coverage report
Current view: top level - js/src/builtin - AtomicsObject.cpp (source / functions) Hit Total Coverage
Test: output.info Lines: 37 521 7.1 %
Date: 2017-07-14 16:53:18 Functions: 8 83 9.6 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : /*
       8             :  * JS Atomics pseudo-module.
       9             :  *
      10             :  * See "Spec: JavaScript Shared Memory, Atomics, and Locks" for the
      11             :  * full specification.
      12             :  *
      13             :  * In addition to what is specified there, we throw an Error object if
      14             :  * the futex API hooks have not been installed on the runtime.
      15             :  * Essentially that is an implementation error at a higher level.
      16             :  *
      17             :  *
      18             :  * Note on the current implementation of atomic operations.
      19             :  *
      20             :  * The Mozilla atomics are not sufficient to implement these APIs
      21             :  * because we need to support 8-bit, 16-bit, and 32-bit data: the
      22             :  * Mozilla atomics only support 32-bit data.
      23             :  *
      24             :  * At the moment we include mozilla/Atomics.h, which will define
      25             :  * MOZ_HAVE_CXX11_ATOMICS and include <atomic> if we have C++11
      26             :  * atomics.
      27             :  *
      28             :  * If MOZ_HAVE_CXX11_ATOMICS is set we'll use C++11 atomics.
      29             :  *
      30             :  * Otherwise, if the compiler has them we'll fall back on gcc/Clang
      31             :  * intrinsics.
      32             :  *
      33             :  * Otherwise, if we're on VC++2012, we'll use C++11 atomics even if
      34             :  * MOZ_HAVE_CXX11_ATOMICS is not defined.  The compiler has the
      35             :  * atomics but they are disabled in Mozilla due to a performance bug.
      36             :  * That performance bug does not affect the Atomics code.  See
      37             :  * mozilla/Atomics.h for further comments on that bug.
      38             :  *
      39             :  * Otherwise, if we're on VC++2010 or VC++2008, we'll emulate the
      40             :  * gcc/Clang intrinsics with simple code below using the VC++
      41             :  * intrinsics, like the VC++2012 solution this is a stopgap since
      42             :  * we're about to start using VC++2013 anyway.
      43             :  *
      44             :  * If none of those options are available then the build must disable
      45             :  * shared memory, or compilation will fail with a predictable error.
      46             :  */
      47             : 
      48             : #include "builtin/AtomicsObject.h"
      49             : 
      50             : #include "mozilla/Atomics.h"
      51             : #include "mozilla/FloatingPoint.h"
      52             : #include "mozilla/Maybe.h"
      53             : #include "mozilla/ScopeExit.h"
      54             : #include "mozilla/Unused.h"
      55             : 
      56             : #include "jsapi.h"
      57             : #include "jsfriendapi.h"
      58             : #include "jsnum.h"
      59             : 
      60             : #include "jit/AtomicOperations.h"
      61             : #include "jit/InlinableNatives.h"
      62             : #include "js/Class.h"
      63             : #include "vm/GlobalObject.h"
      64             : #include "vm/Time.h"
      65             : #include "vm/TypedArrayObject.h"
      66             : #include "wasm/WasmInstance.h"
      67             : 
      68             : #include "jsobjinlines.h"
      69             : 
      70             : using namespace js;
      71             : 
      72             : const Class AtomicsObject::class_ = {
      73             :     "Atomics",
      74             :     JSCLASS_HAS_CACHED_PROTO(JSProto_Atomics)
      75             : };
      76             : 
      77             : static bool
      78           0 : ReportBadArrayType(JSContext* cx)
      79             : {
      80           0 :     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_ATOMICS_BAD_ARRAY);
      81           0 :     return false;
      82             : }
      83             : 
      84             : static bool
      85           0 : ReportOutOfRange(JSContext* cx)
      86             : {
      87             :     // Use JSMSG_BAD_INDEX here, it is what ToIndex uses for some cases that it
      88             :     // reports directly.
      89           0 :     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
      90           0 :     return false;
      91             : }
      92             : 
      93             : static bool
      94           0 : ReportCannotWait(JSContext* cx)
      95             : {
      96           0 :     JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_ATOMICS_WAIT_NOT_ALLOWED);
      97           0 :     return false;
      98             : }
      99             : 
     100             : static bool
     101           0 : GetSharedTypedArray(JSContext* cx, HandleValue v,
     102             :                     MutableHandle<TypedArrayObject*> viewp)
     103             : {
     104           0 :     if (!v.isObject())
     105           0 :         return ReportBadArrayType(cx);
     106           0 :     if (!v.toObject().is<TypedArrayObject>())
     107           0 :         return ReportBadArrayType(cx);
     108           0 :     viewp.set(&v.toObject().as<TypedArrayObject>());
     109           0 :     if (!viewp->isSharedMemory())
     110           0 :         return ReportBadArrayType(cx);
     111           0 :     return true;
     112             : }
     113             : 
     114             : static bool
     115           0 : GetTypedArrayIndex(JSContext* cx, HandleValue v, Handle<TypedArrayObject*> view, uint32_t* offset)
     116             : {
     117             :     uint64_t index;
     118           0 :     if (!ToIndex(cx, v, &index))
     119           0 :         return false;
     120           0 :     if (index >= view->length())
     121           0 :         return ReportOutOfRange(cx);
     122           0 :     *offset = uint32_t(index);
     123           0 :     return true;
     124             : }
     125             : 
     126             : static int32_t
     127           0 : CompareExchange(Scalar::Type viewType, int32_t oldCandidate, int32_t newCandidate,
     128             :                 SharedMem<void*> viewData, uint32_t offset, bool* badArrayType = nullptr)
     129             : {
     130           0 :     switch (viewType) {
     131             :       case Scalar::Int8: {
     132           0 :         int8_t oldval = (int8_t)oldCandidate;
     133           0 :         int8_t newval = (int8_t)newCandidate;
     134           0 :         oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<int8_t*>() + offset,
     135           0 :                                                               oldval, newval);
     136           0 :         return oldval;
     137             :       }
     138             :       case Scalar::Uint8: {
     139           0 :         uint8_t oldval = (uint8_t)oldCandidate;
     140           0 :         uint8_t newval = (uint8_t)newCandidate;
     141           0 :         oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<uint8_t*>() + offset,
     142           0 :                                                               oldval, newval);
     143           0 :         return oldval;
     144             :       }
     145             :       case Scalar::Int16: {
     146           0 :         int16_t oldval = (int16_t)oldCandidate;
     147           0 :         int16_t newval = (int16_t)newCandidate;
     148           0 :         oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<int16_t*>() + offset,
     149           0 :                                                               oldval, newval);
     150           0 :         return oldval;
     151             :       }
     152             :       case Scalar::Uint16: {
     153           0 :         uint16_t oldval = (uint16_t)oldCandidate;
     154           0 :         uint16_t newval = (uint16_t)newCandidate;
     155           0 :         oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<uint16_t*>() + offset,
     156           0 :                                                               oldval, newval);
     157           0 :         return oldval;
     158             :       }
     159             :       case Scalar::Int32: {
     160           0 :         int32_t oldval = oldCandidate;
     161           0 :         int32_t newval = newCandidate;
     162           0 :         oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<int32_t*>() + offset,
     163           0 :                                                               oldval, newval);
     164           0 :         return oldval;
     165             :       }
     166             :       case Scalar::Uint32: {
     167           0 :         uint32_t oldval = (uint32_t)oldCandidate;
     168           0 :         uint32_t newval = (uint32_t)newCandidate;
     169           0 :         oldval = jit::AtomicOperations::compareExchangeSeqCst(viewData.cast<uint32_t*>() + offset,
     170           0 :                                                               oldval, newval);
     171           0 :         return (int32_t)oldval;
     172             :       }
     173             :       default:
     174           0 :         if (badArrayType)
     175           0 :             *badArrayType = true;
     176           0 :         return 0;
     177             :     }
     178             : }
     179             : 
     180             : bool
     181           0 : js::atomics_compareExchange(JSContext* cx, unsigned argc, Value* vp)
     182             : {
     183           0 :     CallArgs args = CallArgsFromVp(argc, vp);
     184           0 :     HandleValue objv = args.get(0);
     185           0 :     HandleValue idxv = args.get(1);
     186           0 :     HandleValue oldv = args.get(2);
     187           0 :     HandleValue newv = args.get(3);
     188           0 :     MutableHandleValue r = args.rval();
     189             : 
     190           0 :     Rooted<TypedArrayObject*> view(cx, nullptr);
     191           0 :     if (!GetSharedTypedArray(cx, objv, &view))
     192           0 :         return false;
     193             :     uint32_t offset;
     194           0 :     if (!GetTypedArrayIndex(cx, idxv, view, &offset))
     195           0 :         return false;
     196             :     int32_t oldCandidate;
     197           0 :     if (!ToInt32(cx, oldv, &oldCandidate))
     198           0 :         return false;
     199             :     int32_t newCandidate;
     200           0 :     if (!ToInt32(cx, newv, &newCandidate))
     201           0 :         return false;
     202             : 
     203           0 :     bool badType = false;
     204           0 :     int32_t result = CompareExchange(view->type(), oldCandidate, newCandidate,
     205           0 :                                      view->viewDataShared(), offset, &badType);
     206             : 
     207           0 :     if (badType)
     208           0 :         return ReportBadArrayType(cx);
     209             : 
     210           0 :     if (view->type() == Scalar::Uint32)
     211           0 :         r.setNumber((double)(uint32_t)result);
     212             :     else
     213           0 :         r.setInt32(result);
     214           0 :     return true;
     215             : }
     216             : 
     217             : bool
     218           0 : js::atomics_load(JSContext* cx, unsigned argc, Value* vp)
     219             : {
     220           0 :     CallArgs args = CallArgsFromVp(argc, vp);
     221           0 :     HandleValue objv = args.get(0);
     222           0 :     HandleValue idxv = args.get(1);
     223           0 :     MutableHandleValue r = args.rval();
     224             : 
     225           0 :     Rooted<TypedArrayObject*> view(cx, nullptr);
     226           0 :     if (!GetSharedTypedArray(cx, objv, &view))
     227           0 :         return false;
     228             :     uint32_t offset;
     229           0 :     if (!GetTypedArrayIndex(cx, idxv, view, &offset))
     230           0 :         return false;
     231             : 
     232           0 :     SharedMem<void*> viewData = view->viewDataShared();
     233           0 :     switch (view->type()) {
     234             :       case Scalar::Uint8: {
     235           0 :         uint8_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<uint8_t*>() + offset);
     236           0 :         r.setInt32(v);
     237           0 :         return true;
     238             :       }
     239             :       case Scalar::Int8: {
     240           0 :         int8_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<uint8_t*>() + offset);
     241           0 :         r.setInt32(v);
     242           0 :         return true;
     243             :       }
     244             :       case Scalar::Int16: {
     245           0 :         int16_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<int16_t*>() + offset);
     246           0 :         r.setInt32(v);
     247           0 :         return true;
     248             :       }
     249             :       case Scalar::Uint16: {
     250           0 :         uint16_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<uint16_t*>() + offset);
     251           0 :         r.setInt32(v);
     252           0 :         return true;
     253             :       }
     254             :       case Scalar::Int32: {
     255           0 :         int32_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<int32_t*>() + offset);
     256           0 :         r.setInt32(v);
     257           0 :         return true;
     258             :       }
     259             :       case Scalar::Uint32: {
     260           0 :         uint32_t v = jit::AtomicOperations::loadSeqCst(viewData.cast<uint32_t*>() + offset);
     261           0 :         r.setNumber(v);
     262           0 :         return true;
     263             :       }
     264             :       default:
     265           0 :         return ReportBadArrayType(cx);
     266             :     }
     267             : }
     268             : 
     269             : enum XchgStoreOp {
     270             :     DoExchange,
     271             :     DoStore
     272             : };
     273             : 
     274             : template<XchgStoreOp op>
     275             : static int32_t
     276           0 : ExchangeOrStore(Scalar::Type viewType, int32_t numberValue, SharedMem<void*> viewData,
     277             :                 uint32_t offset, bool* badArrayType = nullptr)
     278             : {
     279             : #define INT_OP(ptr, value)                                         \
     280             :     JS_BEGIN_MACRO                                                 \
     281             :     if (op == DoStore)                                             \
     282             :         jit::AtomicOperations::storeSeqCst(ptr, value);            \
     283             :     else                                                           \
     284             :         value = jit::AtomicOperations::exchangeSeqCst(ptr, value); \
     285             :     JS_END_MACRO
     286             : 
     287           0 :     switch (viewType) {
     288             :       case Scalar::Int8: {
     289           0 :         int8_t value = (int8_t)numberValue;
     290           0 :         INT_OP(viewData.cast<int8_t*>() + offset, value);
     291           0 :         return value;
     292             :       }
     293             :       case Scalar::Uint8: {
     294           0 :         uint8_t value = (uint8_t)numberValue;
     295           0 :         INT_OP(viewData.cast<uint8_t*>() + offset, value);
     296           0 :         return value;
     297             :       }
     298             :       case Scalar::Int16: {
     299           0 :         int16_t value = (int16_t)numberValue;
     300           0 :         INT_OP(viewData.cast<int16_t*>() + offset, value);
     301           0 :         return value;
     302             :       }
     303             :       case Scalar::Uint16: {
     304           0 :         uint16_t value = (uint16_t)numberValue;
     305           0 :         INT_OP(viewData.cast<uint16_t*>() + offset, value);
     306           0 :         return value;
     307             :       }
     308             :       case Scalar::Int32: {
     309           0 :         int32_t value = numberValue;
     310           0 :         INT_OP(viewData.cast<int32_t*>() + offset, value);
     311           0 :         return value;
     312             :       }
     313             :       case Scalar::Uint32: {
     314           0 :         uint32_t value = (uint32_t)numberValue;
     315           0 :         INT_OP(viewData.cast<uint32_t*>() + offset, value);
     316           0 :         return (int32_t)value;
     317             :       }
     318             :       default:
     319           0 :         if (badArrayType)
     320           0 :             *badArrayType = true;
     321           0 :         return 0;
     322             :     }
     323             : #undef INT_OP
     324             : }
     325             : 
     326             : template<XchgStoreOp op>
     327             : static bool
     328           0 : ExchangeOrStore(JSContext* cx, unsigned argc, Value* vp)
     329             : {
     330           0 :     CallArgs args = CallArgsFromVp(argc, vp);
     331           0 :     HandleValue objv = args.get(0);
     332           0 :     HandleValue idxv = args.get(1);
     333           0 :     HandleValue valv = args.get(2);
     334           0 :     MutableHandleValue r = args.rval();
     335             : 
     336           0 :     Rooted<TypedArrayObject*> view(cx, nullptr);
     337           0 :     if (!GetSharedTypedArray(cx, objv, &view))
     338           0 :         return false;
     339             :     uint32_t offset;
     340           0 :     if (!GetTypedArrayIndex(cx, idxv, view, &offset))
     341           0 :         return false;
     342             :     double integerValue;
     343           0 :     if (!ToInteger(cx, valv, &integerValue))
     344           0 :         return false;
     345             : 
     346           0 :     bool badType = false;
     347           0 :     int32_t result = ExchangeOrStore<op>(view->type(), JS::ToInt32(integerValue),
     348           0 :                                          view->viewDataShared(), offset, &badType);
     349             : 
     350           0 :     if (badType)
     351           0 :         return ReportBadArrayType(cx);
     352             : 
     353             :     if (op == DoStore)
     354           0 :         r.setNumber(integerValue);
     355           0 :     else if (view->type() == Scalar::Uint32)
     356           0 :         r.setNumber((double)(uint32_t)result);
     357             :     else
     358           0 :         r.setInt32(result);
     359           0 :     return true;
     360             : }
     361             : 
     362             : bool
     363           0 : js::atomics_store(JSContext* cx, unsigned argc, Value* vp)
     364             : {
     365           0 :     return ExchangeOrStore<DoStore>(cx, argc, vp);
     366             : }
     367             : 
     368             : bool
     369           0 : js::atomics_exchange(JSContext* cx, unsigned argc, Value* vp)
     370             : {
     371           0 :     return ExchangeOrStore<DoExchange>(cx, argc, vp);
     372             : }
     373             : 
     374             : template<typename T>
     375             : static bool
     376           0 : AtomicsBinop(JSContext* cx, HandleValue objv, HandleValue idxv, HandleValue valv,
     377             :              MutableHandleValue r)
     378             : {
     379           0 :     Rooted<TypedArrayObject*> view(cx, nullptr);
     380           0 :     if (!GetSharedTypedArray(cx, objv, &view))
     381           0 :         return false;
     382             :     uint32_t offset;
     383           0 :     if (!GetTypedArrayIndex(cx, idxv, view, &offset))
     384           0 :         return false;
     385             :     int32_t numberValue;
     386           0 :     if (!ToInt32(cx, valv, &numberValue))
     387           0 :         return false;
     388             : 
     389           0 :     SharedMem<void*> viewData = view->viewDataShared();
     390           0 :     switch (view->type()) {
     391             :       case Scalar::Int8: {
     392           0 :         int8_t v = (int8_t)numberValue;
     393           0 :         r.setInt32(T::operate(viewData.cast<int8_t*>() + offset, v));
     394           0 :         return true;
     395             :       }
     396             :       case Scalar::Uint8: {
     397           0 :         uint8_t v = (uint8_t)numberValue;
     398           0 :         r.setInt32(T::operate(viewData.cast<uint8_t*>() + offset, v));
     399           0 :         return true;
     400             :       }
     401             :       case Scalar::Int16: {
     402           0 :         int16_t v = (int16_t)numberValue;
     403           0 :         r.setInt32(T::operate(viewData.cast<int16_t*>() + offset, v));
     404           0 :         return true;
     405             :       }
     406             :       case Scalar::Uint16: {
     407           0 :         uint16_t v = (uint16_t)numberValue;
     408           0 :         r.setInt32(T::operate(viewData.cast<uint16_t*>() + offset, v));
     409           0 :         return true;
     410             :       }
     411             :       case Scalar::Int32: {
     412           0 :         int32_t v = numberValue;
     413           0 :         r.setInt32(T::operate(viewData.cast<int32_t*>() + offset, v));
     414           0 :         return true;
     415             :       }
     416             :       case Scalar::Uint32: {
     417           0 :         uint32_t v = (uint32_t)numberValue;
     418           0 :         r.setNumber((double)T::operate(viewData.cast<uint32_t*>() + offset, v));
     419           0 :         return true;
     420             :       }
     421             :       default:
     422           0 :         return ReportBadArrayType(cx);
     423             :     }
     424             : }
     425             : 
     426             : #define INTEGRAL_TYPES_FOR_EACH(NAME) \
     427             :     static int8_t operate(SharedMem<int8_t*> addr, int8_t v) { return NAME(addr, v); } \
     428             :     static uint8_t operate(SharedMem<uint8_t*> addr, uint8_t v) { return NAME(addr, v); } \
     429             :     static int16_t operate(SharedMem<int16_t*> addr, int16_t v) { return NAME(addr, v); } \
     430             :     static uint16_t operate(SharedMem<uint16_t*> addr, uint16_t v) { return NAME(addr, v); } \
     431             :     static int32_t operate(SharedMem<int32_t*> addr, int32_t v) { return NAME(addr, v); } \
     432             :     static uint32_t operate(SharedMem<uint32_t*> addr, uint32_t v) { return NAME(addr, v); }
     433             : 
     434             : class PerformAdd
     435             : {
     436             : public:
     437           0 :     INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchAddSeqCst)
     438             :     static int32_t perform(int32_t x, int32_t y) { return x + y; }
     439             : };
     440             : 
     441             : bool
     442           0 : js::atomics_add(JSContext* cx, unsigned argc, Value* vp)
     443             : {
     444           0 :     CallArgs args = CallArgsFromVp(argc, vp);
     445           0 :     return AtomicsBinop<PerformAdd>(cx, args.get(0), args.get(1), args.get(2), args.rval());
     446             : }
     447             : 
     448             : class PerformSub
     449             : {
     450             : public:
     451           0 :     INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchSubSeqCst)
     452             :     static int32_t perform(int32_t x, int32_t y) { return x - y; }
     453             : };
     454             : 
     455             : bool
     456           0 : js::atomics_sub(JSContext* cx, unsigned argc, Value* vp)
     457             : {
     458           0 :     CallArgs args = CallArgsFromVp(argc, vp);
     459           0 :     return AtomicsBinop<PerformSub>(cx, args.get(0), args.get(1), args.get(2), args.rval());
     460             : }
     461             : 
     462             : class PerformAnd
     463             : {
     464             : public:
     465           0 :     INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchAndSeqCst)
     466             :     static int32_t perform(int32_t x, int32_t y) { return x & y; }
     467             : };
     468             : 
     469             : bool
     470           0 : js::atomics_and(JSContext* cx, unsigned argc, Value* vp)
     471             : {
     472           0 :     CallArgs args = CallArgsFromVp(argc, vp);
     473           0 :     return AtomicsBinop<PerformAnd>(cx, args.get(0), args.get(1), args.get(2), args.rval());
     474             : }
     475             : 
     476             : class PerformOr
     477             : {
     478             : public:
     479           0 :     INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchOrSeqCst)
     480             :     static int32_t perform(int32_t x, int32_t y) { return x | y; }
     481             : };
     482             : 
     483             : bool
     484           0 : js::atomics_or(JSContext* cx, unsigned argc, Value* vp)
     485             : {
     486           0 :     CallArgs args = CallArgsFromVp(argc, vp);
     487           0 :     return AtomicsBinop<PerformOr>(cx, args.get(0), args.get(1), args.get(2), args.rval());
     488             : }
     489             : 
     490             : class PerformXor
     491             : {
     492             : public:
     493           0 :     INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchXorSeqCst)
     494             :     static int32_t perform(int32_t x, int32_t y) { return x ^ y; }
     495             : };
     496             : 
     497             : bool
     498           0 : js::atomics_xor(JSContext* cx, unsigned argc, Value* vp)
     499             : {
     500           0 :     CallArgs args = CallArgsFromVp(argc, vp);
     501           0 :     return AtomicsBinop<PerformXor>(cx, args.get(0), args.get(1), args.get(2), args.rval());
     502             : }
     503             : 
     504             : bool
     505           0 : js::atomics_isLockFree(JSContext* cx, unsigned argc, Value* vp)
     506             : {
     507           0 :     CallArgs args = CallArgsFromVp(argc, vp);
     508           0 :     HandleValue v = args.get(0);
     509             :     int32_t size;
     510           0 :     if (v.isInt32()) {
     511           0 :         size = v.toInt32();
     512             :     } else {
     513             :         double dsize;
     514           0 :         if (!ToInteger(cx, v, &dsize))
     515           0 :             return false;
     516           0 :         if (!mozilla::NumberIsInt32(dsize, &size)) {
     517           0 :             args.rval().setBoolean(false);
     518           0 :             return true;
     519             :         }
     520             :     }
     521           0 :     args.rval().setBoolean(jit::AtomicOperations::isLockfree(size));
     522           0 :     return true;
     523             : }
     524             : 
     525             : // asm.js callouts for platforms that do not have non-word-sized
     526             : // atomics where we don't want to inline the logic for the atomics.
     527             : //
     528             : // Memory will always be shared since the callouts are only called from
     529             : // code that checks that the memory is shared.
     530             : //
     531             : // To test this, either run on eg Raspberry Pi Model 1, or invoke the ARM
     532             : // simulator build with ARMHWCAP=vfp set.  Do not set any other flags; other
     533             : // vfp/neon flags force ARMv7 to be set.
     534             : 
     535             : int32_t
     536           0 : js::atomics_add_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
     537             : {
     538           0 :     SharedMem<void*> heap = instance->memoryBase().cast<void*>();
     539           0 :     size_t heapLength = instance->memoryLength();
     540             : 
     541           0 :     if (size_t(offset) >= heapLength)
     542           0 :         return 0;
     543             : 
     544           0 :     switch (Scalar::Type(vt)) {
     545             :       case Scalar::Int8:
     546           0 :         return PerformAdd::operate(heap.cast<int8_t*>() + offset, value);
     547             :       case Scalar::Uint8:
     548           0 :         return PerformAdd::operate(heap.cast<uint8_t*>() + offset, value);
     549             :       case Scalar::Int16:
     550           0 :         return PerformAdd::operate(heap.cast<int16_t*>() + (offset >> 1), value);
     551             :       case Scalar::Uint16:
     552           0 :         return PerformAdd::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
     553             :       default:
     554           0 :         MOZ_CRASH("Invalid size");
     555             :     }
     556             : }
     557             : 
     558             : int32_t
     559           0 : js::atomics_sub_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
     560             : {
     561           0 :     SharedMem<void*> heap = instance->memoryBase().cast<void*>();
     562           0 :     size_t heapLength = instance->memoryLength();
     563             : 
     564           0 :     if (size_t(offset) >= heapLength)
     565           0 :         return 0;
     566             : 
     567           0 :     switch (Scalar::Type(vt)) {
     568             :       case Scalar::Int8:
     569           0 :         return PerformSub::operate(heap.cast<int8_t*>() + offset, value);
     570             :       case Scalar::Uint8:
     571           0 :         return PerformSub::operate(heap.cast<uint8_t*>() + offset, value);
     572             :       case Scalar::Int16:
     573           0 :         return PerformSub::operate(heap.cast<int16_t*>() + (offset >> 1), value);
     574             :       case Scalar::Uint16:
     575           0 :         return PerformSub::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
     576             :       default:
     577           0 :         MOZ_CRASH("Invalid size");
     578             :     }
     579             : }
     580             : 
     581             : int32_t
     582           0 : js::atomics_and_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
     583             : {
     584           0 :     SharedMem<void*> heap = instance->memoryBase().cast<void*>();
     585           0 :     size_t heapLength = instance->memoryLength();
     586             : 
     587           0 :     if (size_t(offset) >= heapLength)
     588           0 :         return 0;
     589             : 
     590           0 :     switch (Scalar::Type(vt)) {
     591             :       case Scalar::Int8:
     592           0 :         return PerformAnd::operate(heap.cast<int8_t*>() + offset, value);
     593             :       case Scalar::Uint8:
     594           0 :         return PerformAnd::operate(heap.cast<uint8_t*>() + offset, value);
     595             :       case Scalar::Int16:
     596           0 :         return PerformAnd::operate(heap.cast<int16_t*>() + (offset >> 1), value);
     597             :       case Scalar::Uint16:
     598           0 :         return PerformAnd::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
     599             :       default:
     600           0 :         MOZ_CRASH("Invalid size");
     601             :     }
     602             : }
     603             : 
     604             : int32_t
     605           0 : js::atomics_or_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
     606             : {
     607           0 :     SharedMem<void*> heap = instance->memoryBase().cast<void*>();
     608           0 :     size_t heapLength = instance->memoryLength();
     609             : 
     610           0 :     if (size_t(offset) >= heapLength)
     611           0 :         return 0;
     612             : 
     613           0 :     switch (Scalar::Type(vt)) {
     614             :       case Scalar::Int8:
     615           0 :         return PerformOr::operate(heap.cast<int8_t*>() + offset, value);
     616             :       case Scalar::Uint8:
     617           0 :         return PerformOr::operate(heap.cast<uint8_t*>() + offset, value);
     618             :       case Scalar::Int16:
     619           0 :         return PerformOr::operate(heap.cast<int16_t*>() + (offset >> 1), value);
     620             :       case Scalar::Uint16:
     621           0 :         return PerformOr::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
     622             :       default:
     623           0 :         MOZ_CRASH("Invalid size");
     624             :     }
     625             : }
     626             : 
     627             : int32_t
     628           0 : js::atomics_xor_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
     629             : {
     630           0 :     SharedMem<void*> heap = instance->memoryBase().cast<void*>();
     631           0 :     size_t heapLength = instance->memoryLength();
     632             : 
     633           0 :     if (size_t(offset) >= heapLength)
     634           0 :         return 0;
     635             : 
     636           0 :     switch (Scalar::Type(vt)) {
     637             :       case Scalar::Int8:
     638           0 :         return PerformXor::operate(heap.cast<int8_t*>() + offset, value);
     639             :       case Scalar::Uint8:
     640           0 :         return PerformXor::operate(heap.cast<uint8_t*>() + offset, value);
     641             :       case Scalar::Int16:
     642           0 :         return PerformXor::operate(heap.cast<int16_t*>() + (offset >> 1), value);
     643             :       case Scalar::Uint16:
     644           0 :         return PerformXor::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
     645             :       default:
     646           0 :         MOZ_CRASH("Invalid size");
     647             :     }
     648             : }
     649             : 
     650             : int32_t
     651           0 : js::atomics_xchg_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
     652             : {
     653           0 :     SharedMem<void*> heap = instance->memoryBase().cast<void*>();
     654           0 :     size_t heapLength = instance->memoryLength();
     655             : 
     656           0 :     if (size_t(offset) >= heapLength)
     657           0 :         return 0;
     658             : 
     659           0 :     switch (Scalar::Type(vt)) {
     660             :       case Scalar::Int8:
     661           0 :         return ExchangeOrStore<DoExchange>(Scalar::Int8, value, heap, offset);
     662             :       case Scalar::Uint8:
     663           0 :         return ExchangeOrStore<DoExchange>(Scalar::Uint8, value, heap, offset);
     664             :       case Scalar::Int16:
     665           0 :         return ExchangeOrStore<DoExchange>(Scalar::Int16, value, heap, offset>>1);
     666             :       case Scalar::Uint16:
     667           0 :         return ExchangeOrStore<DoExchange>(Scalar::Uint16, value, heap, offset>>1);
     668             :       default:
     669           0 :         MOZ_CRASH("Invalid size");
     670             :     }
     671             : }
     672             : 
     673             : int32_t
     674           0 : js::atomics_cmpxchg_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t oldval, int32_t newval)
     675             : {
     676           0 :     SharedMem<void*> heap = instance->memoryBase().cast<void*>();
     677           0 :     size_t heapLength = instance->memoryLength();
     678             : 
     679           0 :     if (size_t(offset) >= heapLength)
     680           0 :         return 0;
     681             : 
     682           0 :     switch (Scalar::Type(vt)) {
     683             :       case Scalar::Int8:
     684           0 :         return CompareExchange(Scalar::Int8, oldval, newval, heap, offset);
     685             :       case Scalar::Uint8:
     686           0 :         return CompareExchange(Scalar::Uint8, oldval, newval, heap, offset);
     687             :       case Scalar::Int16:
     688           0 :         return CompareExchange(Scalar::Int16, oldval, newval, heap, offset>>1);
     689             :       case Scalar::Uint16:
     690           0 :         return CompareExchange(Scalar::Uint16, oldval, newval, heap, offset>>1);
     691             :       default:
     692           0 :         MOZ_CRASH("Invalid size");
     693             :     }
     694             : }
     695             : 
     696             : namespace js {
     697             : 
     698             : // Represents one waiting worker.
     699             : //
     700             : // The type is declared opaque in SharedArrayObject.h.  Instances of
     701             : // js::FutexWaiter are stack-allocated and linked onto a list across a
     702             : // call to FutexThread::wait().
     703             : //
     704             : // The 'waiters' field of the SharedArrayRawBuffer points to the highest
     705             : // priority waiter in the list, and lower priority nodes are linked through
     706             : // the 'lower_pri' field.  The 'back' field goes the other direction.
     707             : // The list is circular, so the 'lower_pri' field of the lowest priority
     708             : // node points to the first node in the list.  The list has no dedicated
     709             : // header node.
     710             : 
     711             : class FutexWaiter
     712             : {
     713             :   public:
     714           0 :     FutexWaiter(uint32_t offset, JSContext* cx)
     715           0 :       : offset(offset),
     716             :         cx(cx),
     717             :         lower_pri(nullptr),
     718           0 :         back(nullptr)
     719             :     {
     720           0 :     }
     721             : 
     722             :     uint32_t    offset;                 // int32 element index within the SharedArrayBuffer
     723             :     JSContext* cx;                      // The waiting thread
     724             :     FutexWaiter* lower_pri;             // Lower priority nodes in circular doubly-linked list of waiters
     725             :     FutexWaiter* back;                  // Other direction
     726             : };
     727             : 
     728             : class AutoLockFutexAPI
     729             : {
     730             :     // We have to wrap this in a Maybe because of the way loading
     731             :     // mozilla::Atomic pointers works.
     732             :     mozilla::Maybe<js::UniqueLock<js::Mutex>> unique_;
     733             : 
     734             :   public:
     735           0 :     AutoLockFutexAPI() {
     736           0 :         js::Mutex* lock = FutexThread::lock_;
     737           0 :         unique_.emplace(*lock);
     738           0 :     }
     739             : 
     740           0 :     ~AutoLockFutexAPI() {
     741           0 :         unique_.reset();
     742           0 :     }
     743             : 
     744           0 :     js::UniqueLock<js::Mutex>& unique() { return *unique_; }
     745             : };
     746             : 
     747             : } // namespace js
     748             : 
     749             : bool
     750           0 : js::atomics_wait(JSContext* cx, unsigned argc, Value* vp)
     751             : {
     752           0 :     CallArgs args = CallArgsFromVp(argc, vp);
     753           0 :     HandleValue objv = args.get(0);
     754           0 :     HandleValue idxv = args.get(1);
     755           0 :     HandleValue valv = args.get(2);
     756           0 :     HandleValue timeoutv = args.get(3);
     757           0 :     MutableHandleValue r = args.rval();
     758             : 
     759           0 :     Rooted<TypedArrayObject*> view(cx, nullptr);
     760           0 :     if (!GetSharedTypedArray(cx, objv, &view))
     761           0 :         return false;
     762           0 :     if (view->type() != Scalar::Int32)
     763           0 :         return ReportBadArrayType(cx);
     764             :     uint32_t offset;
     765           0 :     if (!GetTypedArrayIndex(cx, idxv, view, &offset))
     766           0 :         return false;
     767             :     int32_t value;
     768           0 :     if (!ToInt32(cx, valv, &value))
     769           0 :         return false;
     770           0 :     mozilla::Maybe<mozilla::TimeDuration> timeout;
     771           0 :     if (!timeoutv.isUndefined()) {
     772             :         double timeout_ms;
     773           0 :         if (!ToNumber(cx, timeoutv, &timeout_ms))
     774           0 :             return false;
     775           0 :         if (!mozilla::IsNaN(timeout_ms)) {
     776           0 :             if (timeout_ms < 0)
     777           0 :                 timeout = mozilla::Some(mozilla::TimeDuration::FromSeconds(0.0));
     778           0 :             else if (!mozilla::IsInfinite(timeout_ms))
     779           0 :                 timeout = mozilla::Some(mozilla::TimeDuration::FromMilliseconds(timeout_ms));
     780             :         }
     781             :     }
     782             : 
     783           0 :     if (!cx->fx.canWait())
     784           0 :         return ReportCannotWait(cx);
     785             : 
     786             :     // This lock also protects the "waiters" field on SharedArrayRawBuffer,
     787             :     // and it provides the necessary memory fence.
     788           0 :     AutoLockFutexAPI lock;
     789             : 
     790           0 :     SharedMem<int32_t*>(addr) = view->viewDataShared().cast<int32_t*>() + offset;
     791           0 :     if (jit::AtomicOperations::loadSafeWhenRacy(addr) != value) {
     792           0 :         r.setString(cx->names().futexNotEqual);
     793           0 :         return true;
     794             :     }
     795             : 
     796           0 :     Rooted<SharedArrayBufferObject*> sab(cx, view->bufferShared());
     797           0 :     SharedArrayRawBuffer* sarb = sab->rawBufferObject();
     798             : 
     799           0 :     FutexWaiter w(offset, cx);
     800           0 :     if (FutexWaiter* waiters = sarb->waiters()) {
     801           0 :         w.lower_pri = waiters;
     802           0 :         w.back = waiters->back;
     803           0 :         waiters->back->lower_pri = &w;
     804           0 :         waiters->back = &w;
     805             :     } else {
     806           0 :         w.lower_pri = w.back = &w;
     807           0 :         sarb->setWaiters(&w);
     808             :     }
     809             : 
     810           0 :     FutexThread::WaitResult result = FutexThread::FutexOK;
     811           0 :     bool retval = cx->fx.wait(cx, lock.unique(), timeout, &result);
     812           0 :     if (retval) {
     813           0 :         switch (result) {
     814             :           case FutexThread::FutexOK:
     815           0 :             r.setString(cx->names().futexOK);
     816           0 :             break;
     817             :           case FutexThread::FutexTimedOut:
     818           0 :             r.setString(cx->names().futexTimedOut);
     819           0 :             break;
     820             :         }
     821             :     }
     822             : 
     823           0 :     if (w.lower_pri == &w) {
     824           0 :         sarb->setWaiters(nullptr);
     825             :     } else {
     826           0 :         w.lower_pri->back = w.back;
     827           0 :         w.back->lower_pri = w.lower_pri;
     828           0 :         if (sarb->waiters() == &w)
     829           0 :             sarb->setWaiters(w.lower_pri);
     830             :     }
     831           0 :     return retval;
     832             : }
     833             : 
     834             : bool
     835           0 : js::atomics_wake(JSContext* cx, unsigned argc, Value* vp)
     836             : {
     837           0 :     CallArgs args = CallArgsFromVp(argc, vp);
     838           0 :     HandleValue objv = args.get(0);
     839           0 :     HandleValue idxv = args.get(1);
     840           0 :     HandleValue countv = args.get(2);
     841           0 :     MutableHandleValue r = args.rval();
     842             : 
     843           0 :     Rooted<TypedArrayObject*> view(cx, nullptr);
     844           0 :     if (!GetSharedTypedArray(cx, objv, &view))
     845           0 :         return false;
     846           0 :     if (view->type() != Scalar::Int32)
     847           0 :         return ReportBadArrayType(cx);
     848             :     uint32_t offset;
     849           0 :     if (!GetTypedArrayIndex(cx, idxv, view, &offset))
     850           0 :         return false;
     851             :     double count;
     852           0 :     if (countv.isUndefined()) {
     853           0 :         count = mozilla::PositiveInfinity<double>();
     854             :     } else {
     855           0 :         if (!ToInteger(cx, countv, &count))
     856           0 :             return false;
     857           0 :         if (count < 0.0)
     858           0 :             count = 0.0;
     859             :     }
     860             : 
     861           0 :     AutoLockFutexAPI lock;
     862             : 
     863           0 :     Rooted<SharedArrayBufferObject*> sab(cx, view->bufferShared());
     864           0 :     SharedArrayRawBuffer* sarb = sab->rawBufferObject();
     865           0 :     int32_t woken = 0;
     866             : 
     867           0 :     FutexWaiter* waiters = sarb->waiters();
     868           0 :     if (waiters && count > 0) {
     869           0 :         FutexWaiter* iter = waiters;
     870           0 :         do {
     871           0 :             FutexWaiter* c = iter;
     872           0 :             iter = iter->lower_pri;
     873           0 :             if (c->offset != offset || !c->cx->fx.isWaiting())
     874           0 :                 continue;
     875           0 :             c->cx->fx.wake(FutexThread::WakeExplicit);
     876           0 :             ++woken;
     877           0 :             --count;
     878           0 :         } while (count > 0 && iter != waiters);
     879             :     }
     880             : 
     881           0 :     r.setInt32(woken);
     882           0 :     return true;
     883             : }
     884             : 
     885             : /* static */ bool
     886           3 : js::FutexThread::initialize()
     887             : {
     888           3 :     MOZ_ASSERT(!lock_);
     889           3 :     lock_ = js_new<js::Mutex>(mutexid::FutexThread);
     890           3 :     return lock_ != nullptr;
     891             : }
     892             : 
     893             : /* static */ void
     894           0 : js::FutexThread::destroy()
     895             : {
     896           0 :     if (lock_) {
     897           0 :         js::Mutex* lock = lock_;
     898           0 :         js_delete(lock);
     899           0 :         lock_ = nullptr;
     900             :     }
     901           0 : }
     902             : 
     903             : /* static */ void
     904           2 : js::FutexThread::lock()
     905             : {
     906             :     // Load the atomic pointer.
     907           2 :     js::Mutex* lock = lock_;
     908             : 
     909           2 :     lock->lock();
     910           2 : }
     911             : 
     912             : /* static */ mozilla::Atomic<js::Mutex*> FutexThread::lock_;
     913             : 
     914             : /* static */ void
     915           2 : js::FutexThread::unlock()
     916             : {
     917             :     // Load the atomic pointer.
     918           2 :     js::Mutex* lock = lock_;
     919             : 
     920           2 :     lock->unlock();
     921           2 : }
     922             : 
     923          40 : js::FutexThread::FutexThread()
     924             :   : cond_(nullptr),
     925             :     state_(Idle),
     926          40 :     canWait_(false)
     927             : {
     928          40 : }
     929             : 
     930             : bool
     931           4 : js::FutexThread::initInstance()
     932             : {
     933           4 :     MOZ_ASSERT(lock_);
     934           4 :     cond_ = js_new<js::ConditionVariable>();
     935           4 :     return cond_ != nullptr;
     936             : }
     937             : 
     938             : void
     939           0 : js::FutexThread::destroyInstance()
     940             : {
     941           0 :     if (cond_)
     942           0 :         js_delete(cond_);
     943           0 : }
     944             : 
     945             : bool
     946           2 : js::FutexThread::isWaiting()
     947             : {
     948             :     // When a worker is awoken for an interrupt it goes into state
     949             :     // WaitingNotifiedForInterrupt for a short time before it actually
     950             :     // wakes up and goes into WaitingInterrupted.  In those states the
     951             :     // worker is still waiting, and if an explicit wake arrives the
     952             :     // worker transitions to Woken.  See further comments in
     953             :     // FutexThread::wait().
     954           2 :     return state_ == Waiting || state_ == WaitingInterrupted || state_ == WaitingNotifiedForInterrupt;
     955             : }
     956             : 
     957             : bool
     958           0 : js::FutexThread::wait(JSContext* cx, js::UniqueLock<js::Mutex>& locked,
     959             :                        mozilla::Maybe<mozilla::TimeDuration>& timeout, WaitResult* result)
     960             : {
     961           0 :     MOZ_ASSERT(&cx->fx == this);
     962           0 :     MOZ_ASSERT(cx->fx.canWait());
     963           0 :     MOZ_ASSERT(state_ == Idle || state_ == WaitingInterrupted);
     964             : 
     965             :     // Disallow waiting when a runtime is processing an interrupt.
     966             :     // See explanation below.
     967             : 
     968           0 :     if (state_ == WaitingInterrupted) {
     969           0 :         UnlockGuard<Mutex> unlock(locked);
     970           0 :         JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_ATOMICS_WAIT_NOT_ALLOWED);
     971           0 :         return false;
     972             :     }
     973             : 
     974             :     // Go back to Idle after returning.
     975           0 :     auto onFinish = mozilla::MakeScopeExit([&] {
     976           0 :         state_ = Idle;
     977           0 :     });
     978             : 
     979           0 :     const bool isTimed = timeout.isSome();
     980             : 
     981           0 :     auto finalEnd = timeout.map([](mozilla::TimeDuration& timeout) {
     982           0 :         return mozilla::TimeStamp::Now() + timeout;
     983           0 :     });
     984             : 
     985             : 
     986             :     // 4000s is about the longest timeout slice that is guaranteed to
     987             :     // work cross-platform.
     988           0 :     auto maxSlice = mozilla::TimeDuration::FromSeconds(4000.0);
     989             : 
     990             :     for (;;) {
     991             :         // If we are doing a timed wait, calculate the end time for this wait
     992             :         // slice.
     993           0 :         auto sliceEnd = finalEnd.map([&](mozilla::TimeStamp& finalEnd) {
     994           0 :             auto sliceEnd = mozilla::TimeStamp::Now() + maxSlice;
     995           0 :             if (finalEnd < sliceEnd)
     996           0 :                 sliceEnd = finalEnd;
     997           0 :             return sliceEnd;
     998           0 :         });
     999             : 
    1000           0 :         state_ = Waiting;
    1001             : 
    1002           0 :         if (isTimed) {
    1003           0 :             mozilla::Unused << cond_->wait_until(locked, *sliceEnd);
    1004             :         } else {
    1005           0 :             cond_->wait(locked);
    1006             :         }
    1007             : 
    1008           0 :         switch (state_) {
    1009             :           case FutexThread::Waiting:
    1010             :             // Timeout or spurious wakeup.
    1011           0 :             if (isTimed) {
    1012           0 :                 auto now = mozilla::TimeStamp::Now();
    1013           0 :                 if (now >= *finalEnd) {
    1014           0 :                     *result = FutexTimedOut;
    1015           0 :                     return true;
    1016             :                 }
    1017             :             }
    1018           0 :             break;
    1019             : 
    1020             :           case FutexThread::Woken:
    1021           0 :             *result = FutexOK;
    1022           0 :             return true;
    1023             : 
    1024             :           case FutexThread::WaitingNotifiedForInterrupt:
    1025             :             // The interrupt handler may reenter the engine.  In that case
    1026             :             // there are two complications:
    1027             :             //
    1028             :             // - The waiting thread is not actually waiting on the
    1029             :             //   condition variable so we have to record that it
    1030             :             //   should be woken when the interrupt handler returns.
    1031             :             //   To that end, we flag the thread as interrupted around
    1032             :             //   the interrupt and check state_ when the interrupt
    1033             :             //   handler returns.  A wake() call that reaches the
    1034             :             //   runtime during the interrupt sets state_ to Woken.
    1035             :             //
    1036             :             // - It is in principle possible for wait() to be
    1037             :             //   reentered on the same thread/runtime and waiting on the
    1038             :             //   same location and to yet again be interrupted and enter
    1039             :             //   the interrupt handler.  In this case, it is important
    1040             :             //   that when another agent wakes waiters, all waiters using
    1041             :             //   the same runtime on the same location are woken in LIFO
    1042             :             //   order; FIFO may be the required order, but FIFO would
    1043             :             //   fail to wake up the innermost call.  Interrupts are
    1044             :             //   outside any spec anyway.  Also, several such suspended
    1045             :             //   waiters may be woken at a time.
    1046             :             //
    1047             :             //   For the time being we disallow waiting from within code
    1048             :             //   that runs from within an interrupt handler; this may
    1049             :             //   occasionally (very rarely) be surprising but is
    1050             :             //   expedient.  Other solutions exist, see bug #1131943.  The
    1051             :             //   code that performs the check is above, at the head of
    1052             :             //   this function.
    1053             : 
    1054           0 :             state_ = WaitingInterrupted;
    1055             :             {
    1056           0 :                 UnlockGuard<Mutex> unlock(locked);
    1057           0 :                 if (!cx->handleInterrupt())
    1058           0 :                     return false;
    1059             :             }
    1060           0 :             if (state_ == Woken) {
    1061           0 :                 *result = FutexOK;
    1062           0 :                 return true;
    1063             :             }
    1064           0 :             break;
    1065             : 
    1066             :           default:
    1067           0 :             MOZ_CRASH("Bad FutexState in wait()");
    1068             :         }
    1069           0 :     }
    1070             : }
    1071             : 
    1072             : void
    1073           0 : js::FutexThread::wake(WakeReason reason)
    1074             : {
    1075           0 :     MOZ_ASSERT(isWaiting());
    1076             : 
    1077           0 :     if ((state_ == WaitingInterrupted || state_ == WaitingNotifiedForInterrupt) && reason == WakeExplicit) {
    1078           0 :         state_ = Woken;
    1079           0 :         return;
    1080             :     }
    1081           0 :     switch (reason) {
    1082             :       case WakeExplicit:
    1083           0 :         state_ = Woken;
    1084           0 :         break;
    1085             :       case WakeForJSInterrupt:
    1086           0 :         if (state_ == WaitingNotifiedForInterrupt)
    1087           0 :             return;
    1088           0 :         state_ = WaitingNotifiedForInterrupt;
    1089           0 :         break;
    1090             :       default:
    1091           0 :         MOZ_CRASH("bad WakeReason in FutexThread::wake()");
    1092             :     }
    1093           0 :     cond_->notify_all();
    1094             : }
    1095             : 
    1096             : const JSFunctionSpec AtomicsMethods[] = {
    1097             :     JS_INLINABLE_FN("compareExchange",    atomics_compareExchange,    4,0, AtomicsCompareExchange),
    1098             :     JS_INLINABLE_FN("load",               atomics_load,               2,0, AtomicsLoad),
    1099             :     JS_INLINABLE_FN("store",              atomics_store,              3,0, AtomicsStore),
    1100             :     JS_INLINABLE_FN("exchange",           atomics_exchange,           3,0, AtomicsExchange),
    1101             :     JS_INLINABLE_FN("add",                atomics_add,                3,0, AtomicsAdd),
    1102             :     JS_INLINABLE_FN("sub",                atomics_sub,                3,0, AtomicsSub),
    1103             :     JS_INLINABLE_FN("and",                atomics_and,                3,0, AtomicsAnd),
    1104             :     JS_INLINABLE_FN("or",                 atomics_or,                 3,0, AtomicsOr),
    1105             :     JS_INLINABLE_FN("xor",                atomics_xor,                3,0, AtomicsXor),
    1106             :     JS_INLINABLE_FN("isLockFree",         atomics_isLockFree,         1,0, AtomicsIsLockFree),
    1107             :     JS_FN("wait",                         atomics_wait,               4,0),
    1108             :     JS_FN("wake",                         atomics_wake,               3,0),
    1109             :     JS_FS_END
    1110             : };
    1111             : 
    1112             : JSObject*
    1113           6 : AtomicsObject::initClass(JSContext* cx, Handle<GlobalObject*> global)
    1114             : {
    1115             :     // Create Atomics Object.
    1116          12 :     RootedObject objProto(cx, GlobalObject::getOrCreateObjectPrototype(cx, global));
    1117           6 :     if (!objProto)
    1118           0 :         return nullptr;
    1119          12 :     RootedObject Atomics(cx, NewObjectWithGivenProto(cx, &AtomicsObject::class_, objProto,
    1120          12 :                                                      SingletonObject));
    1121           6 :     if (!Atomics)
    1122           0 :         return nullptr;
    1123             : 
    1124           6 :     if (!JS_DefineFunctions(cx, Atomics, AtomicsMethods))
    1125           0 :         return nullptr;
    1126           6 :     if (!DefineToStringTag(cx, Atomics, cx->names().Atomics))
    1127           0 :         return nullptr;
    1128             : 
    1129          12 :     RootedValue AtomicsValue(cx, ObjectValue(*Atomics));
    1130             : 
    1131             :     // Everything is set up, install Atomics on the global object.
    1132           6 :     if (!DefineProperty(cx, global, cx->names().Atomics, AtomicsValue, nullptr, nullptr,
    1133             :                         JSPROP_RESOLVING))
    1134             :     {
    1135           0 :         return nullptr;
    1136             :     }
    1137             : 
    1138           6 :     global->setConstructor(JSProto_Atomics, AtomicsValue);
    1139           6 :     return Atomics;
    1140             : }
    1141             : 
    1142             : JSObject*
    1143           6 : js::InitAtomicsClass(JSContext* cx, HandleObject obj)
    1144             : {
    1145           6 :     MOZ_ASSERT(obj->is<GlobalObject>());
    1146          12 :     Rooted<GlobalObject*> global(cx, &obj->as<GlobalObject>());
    1147          12 :     return AtomicsObject::initClass(cx, global);
    1148             : }
    1149             : 
    1150             : #undef CXX11_ATOMICS
    1151             : #undef GNU_ATOMICS

Generated by: LCOV version 1.13