LCOV - code coverage report
Current view: top level - js/src/gc - Heap.h (source / functions) Hit Total Coverage
Test: output.info Lines: 285 397 71.8 %
Date: 2017-07-14 16:53:18 Functions: 84 114 73.7 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : #ifndef gc_Heap_h
       8             : #define gc_Heap_h
       9             : 
      10             : #include "mozilla/ArrayUtils.h"
      11             : #include "mozilla/Atomics.h"
      12             : #include "mozilla/Attributes.h"
      13             : #include "mozilla/DebugOnly.h"
      14             : #include "mozilla/EnumeratedArray.h"
      15             : #include "mozilla/EnumeratedRange.h"
      16             : #include "mozilla/PodOperations.h"
      17             : 
      18             : #include <stddef.h>
      19             : #include <stdint.h>
      20             : 
      21             : #include "jsfriendapi.h"
      22             : #include "jspubtd.h"
      23             : #include "jstypes.h"
      24             : #include "jsutil.h"
      25             : 
      26             : #include "ds/BitArray.h"
      27             : #include "gc/Memory.h"
      28             : #include "js/GCAPI.h"
      29             : #include "js/HeapAPI.h"
      30             : #include "js/RootingAPI.h"
      31             : #include "js/TracingAPI.h"
      32             : 
      33             : struct JSRuntime;
      34             : 
      35             : namespace js {
      36             : 
      37             : class AutoLockGC;
      38             : class FreeOp;
      39             : 
      40             : extern bool
      41             : RuntimeFromActiveCooperatingThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone);
      42             : 
      43             : #ifdef DEBUG
      44             : 
      45             : // Barriers can't be triggered during backend Ion compilation, which may run on
      46             : // a helper thread.
      47             : extern bool
      48             : CurrentThreadIsIonCompiling();
      49             : #endif
      50             : 
      51             : // The return value indicates if anything was unmarked.
      52             : extern bool
      53             : UnmarkGrayCellRecursively(gc::Cell* cell, JS::TraceKind kind);
      54             : 
      55             : extern void
      56             : TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, gc::Cell** thingp, const char* name);
      57             : 
      58             : namespace gc {
      59             : 
      60             : class Arena;
      61             : class ArenaCellSet;
      62             : class ArenaList;
      63             : class SortedArenaList;
      64             : struct Chunk;
      65             : 
      66             : /*
      67             :  * This flag allows an allocation site to request a specific heap based upon the
      68             :  * estimated lifetime or lifetime requirements of objects allocated from that
      69             :  * site.
      70             :  */
      71             : enum InitialHeap {
      72             :     DefaultHeap,
      73             :     TenuredHeap
      74             : };
      75             : 
      76             : /* The GC allocation kinds. */
      77             : // FIXME: uint8_t would make more sense for the underlying type, but causes
      78             : // miscompilations in GCC (fixed in 4.8.5 and 4.9.3). See also bug 1143966.
      79             : enum class AllocKind {
      80             :     FIRST,
      81             :     OBJECT_FIRST = FIRST,
      82             :     FUNCTION = FIRST,
      83             :     FUNCTION_EXTENDED,
      84             :     OBJECT0,
      85             :     OBJECT0_BACKGROUND,
      86             :     OBJECT2,
      87             :     OBJECT2_BACKGROUND,
      88             :     OBJECT4,
      89             :     OBJECT4_BACKGROUND,
      90             :     OBJECT8,
      91             :     OBJECT8_BACKGROUND,
      92             :     OBJECT12,
      93             :     OBJECT12_BACKGROUND,
      94             :     OBJECT16,
      95             :     OBJECT16_BACKGROUND,
      96             :     OBJECT_LIMIT,
      97             :     OBJECT_LAST = OBJECT_LIMIT - 1,
      98             :     SCRIPT,
      99             :     LAZY_SCRIPT,
     100             :     SHAPE,
     101             :     ACCESSOR_SHAPE,
     102             :     BASE_SHAPE,
     103             :     OBJECT_GROUP,
     104             :     FAT_INLINE_STRING,
     105             :     STRING,
     106             :     EXTERNAL_STRING,
     107             :     FAT_INLINE_ATOM,
     108             :     ATOM,
     109             :     SYMBOL,
     110             :     JITCODE,
     111             :     SCOPE,
     112             :     REGEXP_SHARED,
     113             :     LIMIT,
     114             :     LAST = LIMIT - 1
     115             : };
     116             : 
     117             : // Macro to enumerate the different allocation kinds supplying information about
     118             : // the trace kind, C++ type and allocation size.
     119             : #define FOR_EACH_OBJECT_ALLOCKIND(D) \
     120             :  /* AllocKind              TraceKind      TypeName           SizedType */ \
     121             :     D(FUNCTION,            Object,        JSObject,          JSFunction) \
     122             :     D(FUNCTION_EXTENDED,   Object,        JSObject,          FunctionExtended) \
     123             :     D(OBJECT0,             Object,        JSObject,          JSObject_Slots0) \
     124             :     D(OBJECT0_BACKGROUND,  Object,        JSObject,          JSObject_Slots0) \
     125             :     D(OBJECT2,             Object,        JSObject,          JSObject_Slots2) \
     126             :     D(OBJECT2_BACKGROUND,  Object,        JSObject,          JSObject_Slots2) \
     127             :     D(OBJECT4,             Object,        JSObject,          JSObject_Slots4) \
     128             :     D(OBJECT4_BACKGROUND,  Object,        JSObject,          JSObject_Slots4) \
     129             :     D(OBJECT8,             Object,        JSObject,          JSObject_Slots8) \
     130             :     D(OBJECT8_BACKGROUND,  Object,        JSObject,          JSObject_Slots8) \
     131             :     D(OBJECT12,            Object,        JSObject,          JSObject_Slots12) \
     132             :     D(OBJECT12_BACKGROUND, Object,        JSObject,          JSObject_Slots12) \
     133             :     D(OBJECT16,            Object,        JSObject,          JSObject_Slots16) \
     134             :     D(OBJECT16_BACKGROUND, Object,        JSObject,          JSObject_Slots16)
     135             : 
     136             : #define FOR_EACH_NONOBJECT_ALLOCKIND(D) \
     137             :  /* AllocKind              TraceKind      TypeName           SizedType */ \
     138             :     D(SCRIPT,              Script,        JSScript,          JSScript) \
     139             :     D(LAZY_SCRIPT,         LazyScript,    js::LazyScript,    js::LazyScript) \
     140             :     D(SHAPE,               Shape,         js::Shape,         js::Shape) \
     141             :     D(ACCESSOR_SHAPE,      Shape,         js::AccessorShape, js::AccessorShape) \
     142             :     D(BASE_SHAPE,          BaseShape,     js::BaseShape,     js::BaseShape) \
     143             :     D(OBJECT_GROUP,        ObjectGroup,   js::ObjectGroup,   js::ObjectGroup) \
     144             :     D(FAT_INLINE_STRING,   String,        JSFatInlineString, JSFatInlineString) \
     145             :     D(STRING,              String,        JSString,          JSString) \
     146             :     D(EXTERNAL_STRING,     String,        JSExternalString,  JSExternalString) \
     147             :     D(FAT_INLINE_ATOM,     String,        js::FatInlineAtom, js::FatInlineAtom) \
     148             :     D(ATOM,                String,        js::NormalAtom,    js::NormalAtom) \
     149             :     D(SYMBOL,              Symbol,        JS::Symbol,        JS::Symbol) \
     150             :     D(JITCODE,             JitCode,       js::jit::JitCode,  js::jit::JitCode) \
     151             :     D(SCOPE,               Scope,         js::Scope,         js::Scope) \
     152             :     D(REGEXP_SHARED,       RegExpShared,  js::RegExpShared,  js::RegExpShared)
     153             : 
     154             : #define FOR_EACH_ALLOCKIND(D) \
     155             :     FOR_EACH_OBJECT_ALLOCKIND(D) \
     156             :     FOR_EACH_NONOBJECT_ALLOCKIND(D)
     157             : 
     158             : static_assert(int(AllocKind::FIRST) == 0, "Various places depend on AllocKind starting at 0, "
     159             :                                           "please audit them carefully!");
     160             : static_assert(int(AllocKind::OBJECT_FIRST) == 0, "Various places depend on AllocKind::OBJECT_FIRST "
     161             :                                                  "being 0, please audit them carefully!");
     162             : 
     163             : inline bool
     164     7101617 : IsAllocKind(AllocKind kind)
     165             : {
     166     7101617 :     return kind >= AllocKind::FIRST && kind <= AllocKind::LIMIT;
     167             : }
     168             : 
     169             : inline bool
     170     7357848 : IsValidAllocKind(AllocKind kind)
     171             : {
     172     7357848 :     return kind >= AllocKind::FIRST && kind <= AllocKind::LAST;
     173             : }
     174             : 
     175             : inline bool
     176      397949 : IsObjectAllocKind(AllocKind kind)
     177             : {
     178      397949 :     return kind >= AllocKind::OBJECT_FIRST && kind <= AllocKind::OBJECT_LAST;
     179             : }
     180             : 
     181             : inline bool
     182           0 : IsShapeAllocKind(AllocKind kind)
     183             : {
     184           0 :     return kind == AllocKind::SHAPE || kind == AllocKind::ACCESSOR_SHAPE;
     185             : }
     186             : 
     187             : // Returns a sequence for use in a range-based for loop,
     188             : // to iterate over all alloc kinds.
     189             : inline decltype(mozilla::MakeEnumeratedRange(AllocKind::FIRST, AllocKind::LIMIT))
     190         622 : AllAllocKinds()
     191             : {
     192         622 :     return mozilla::MakeEnumeratedRange(AllocKind::FIRST, AllocKind::LIMIT);
     193             : }
     194             : 
     195             : // Returns a sequence for use in a range-based for loop,
     196             : // to iterate over all object alloc kinds.
     197             : inline decltype(mozilla::MakeEnumeratedRange(AllocKind::OBJECT_FIRST, AllocKind::OBJECT_LIMIT))
     198           0 : ObjectAllocKinds()
     199             : {
     200           0 :     return mozilla::MakeEnumeratedRange(AllocKind::OBJECT_FIRST, AllocKind::OBJECT_LIMIT);
     201             : }
     202             : 
     203             : // Returns a sequence for use in a range-based for loop,
     204             : // to iterate over alloc kinds from |first| to |limit|, exclusive.
     205             : inline decltype(mozilla::MakeEnumeratedRange(AllocKind::FIRST, AllocKind::LIMIT))
     206             : SomeAllocKinds(AllocKind first = AllocKind::FIRST, AllocKind limit = AllocKind::LIMIT)
     207             : {
     208             :     MOZ_ASSERT(IsAllocKind(first), "|first| is not a valid AllocKind!");
     209             :     MOZ_ASSERT(IsAllocKind(limit), "|limit| is not a valid AllocKind!");
     210             :     return mozilla::MakeEnumeratedRange(first, limit);
     211             : }
     212             : 
     213             : // AllAllocKindArray<ValueType> gives an enumerated array of ValueTypes,
     214             : // with each index corresponding to a particular alloc kind.
     215             : template<typename ValueType> using AllAllocKindArray =
     216             :     mozilla::EnumeratedArray<AllocKind, AllocKind::LIMIT, ValueType>;
     217             : 
     218             : // ObjectAllocKindArray<ValueType> gives an enumerated array of ValueTypes,
     219             : // with each index corresponding to a particular object alloc kind.
     220             : template<typename ValueType> using ObjectAllocKindArray =
     221             :     mozilla::EnumeratedArray<AllocKind, AllocKind::OBJECT_LIMIT, ValueType>;
     222             : 
     223             : static inline JS::TraceKind
     224     1748862 : MapAllocToTraceKind(AllocKind kind)
     225             : {
     226             :     static const JS::TraceKind map[] = {
     227             : #define EXPAND_ELEMENT(allocKind, traceKind, type, sizedType) \
     228             :         JS::TraceKind::traceKind,
     229             : FOR_EACH_ALLOCKIND(EXPAND_ELEMENT)
     230             : #undef EXPAND_ELEMENT
     231             :     };
     232             : 
     233             :     static_assert(MOZ_ARRAY_LENGTH(map) == size_t(AllocKind::LIMIT),
     234             :                   "AllocKind-to-TraceKind mapping must be in sync");
     235     1748862 :     return map[size_t(kind)];
     236             : }
     237             : 
     238             : /*
     239             :  * This must be an upper bound, but we do not need the least upper bound, so
     240             :  * we just exclude non-background objects.
     241             :  */
     242             : static const size_t MAX_BACKGROUND_FINALIZE_KINDS =
     243             :     size_t(AllocKind::LIMIT) - size_t(AllocKind::OBJECT_LIMIT) / 2;
     244             : 
     245             : /* Mark colors to pass to markIfUnmarked. */
     246             : enum class MarkColor : uint32_t
     247             : {
     248             :     Black = 0,
     249             :     Gray
     250             : };
     251             : 
     252             : class TenuredCell;
     253             : 
     254             : // A GC cell is the base class for all GC things.
     255      205980 : struct Cell
     256             : {
     257             :   public:
     258    20521754 :     MOZ_ALWAYS_INLINE bool isTenured() const { return !IsInsideNursery(this); }
     259             :     MOZ_ALWAYS_INLINE const TenuredCell& asTenured() const;
     260             :     MOZ_ALWAYS_INLINE TenuredCell& asTenured();
     261             : 
     262             :     MOZ_ALWAYS_INLINE bool isMarkedAny() const;
     263             :     MOZ_ALWAYS_INLINE bool isMarkedBlack() const;
     264             :     MOZ_ALWAYS_INLINE bool isMarkedGray() const;
     265             : 
     266             :     inline JSRuntime* runtimeFromActiveCooperatingThread() const;
     267             : 
     268             :     // Note: Unrestricted access to the runtime of a GC thing from an arbitrary
     269             :     // thread can easily lead to races. Use this method very carefully.
     270             :     inline JSRuntime* runtimeFromAnyThread() const;
     271             : 
     272             :     // May be overridden by GC thing kinds that have a compartment pointer.
     273       20777 :     inline JSCompartment* maybeCompartment() const { return nullptr; }
     274             : 
     275             :     // The StoreBuffer used to record incoming pointers from the tenured heap.
     276             :     // This will return nullptr for a tenured cell.
     277             :     inline StoreBuffer* storeBuffer() const;
     278             : 
     279             :     inline JS::TraceKind getTraceKind() const;
     280             : 
     281             :     static MOZ_ALWAYS_INLINE bool needWriteBarrierPre(JS::Zone* zone);
     282             : 
     283             : #ifdef DEBUG
     284             :     inline bool isAligned() const;
     285             :     void dump(FILE* fp) const;
     286             :     void dump() const;
     287             : #endif
     288             : 
     289             :   protected:
     290             :     inline uintptr_t address() const;
     291             :     inline Chunk* chunk() const;
     292             : } JS_HAZ_GC_THING;
     293             : 
     294             : // A GC TenuredCell gets behaviors that are valid for things in the Tenured
     295             : // heap, such as access to the arena and mark bits.
     296      205980 : class TenuredCell : public Cell
     297             : {
     298             :   public:
     299             :     // Construct a TenuredCell from a void*, making various sanity assertions.
     300             :     static MOZ_ALWAYS_INLINE TenuredCell* fromPointer(void* ptr);
     301             :     static MOZ_ALWAYS_INLINE const TenuredCell* fromPointer(const void* ptr);
     302             : 
     303             :     // Mark bit management.
     304             :     MOZ_ALWAYS_INLINE bool isMarkedAny() const;
     305             :     MOZ_ALWAYS_INLINE bool isMarkedBlack() const;
     306             :     MOZ_ALWAYS_INLINE bool isMarkedGray() const;
     307             : 
     308             :     // The return value indicates if the cell went from unmarked to marked.
     309             :     MOZ_ALWAYS_INLINE bool markIfUnmarked(MarkColor color = MarkColor::Black) const;
     310             :     MOZ_ALWAYS_INLINE void markBlack() const;
     311             :     MOZ_ALWAYS_INLINE void copyMarkBitsFrom(const TenuredCell* src);
     312             : 
     313             :     // Access to the arena.
     314             :     inline Arena* arena() const;
     315             :     inline AllocKind getAllocKind() const;
     316             :     inline JS::TraceKind getTraceKind() const;
     317             :     inline JS::Zone* zone() const;
     318             :     inline JS::Zone* zoneFromAnyThread() const;
     319             :     inline bool isInsideZone(JS::Zone* zone) const;
     320             : 
     321             :     MOZ_ALWAYS_INLINE JS::shadow::Zone* shadowZone() const {
     322             :         return JS::shadow::Zone::asShadowZone(zone());
     323             :     }
     324      996888 :     MOZ_ALWAYS_INLINE JS::shadow::Zone* shadowZoneFromAnyThread() const {
     325      996888 :         return JS::shadow::Zone::asShadowZone(zoneFromAnyThread());
     326             :     }
     327             : 
     328             :     static MOZ_ALWAYS_INLINE void readBarrier(TenuredCell* thing);
     329             :     static MOZ_ALWAYS_INLINE void writeBarrierPre(TenuredCell* thing);
     330             : 
     331             :     static MOZ_ALWAYS_INLINE void writeBarrierPost(void* cellp, TenuredCell* prior,
     332             :                                                    TenuredCell* next);
     333             : 
     334             :     // Default implementation for kinds that don't require fixup.
     335           0 :     void fixupAfterMovingGC() {}
     336             : 
     337             : #ifdef DEBUG
     338             :     inline bool isAligned() const;
     339             : #endif
     340             : };
     341             : 
     342             : /* Cells are aligned to CellAlignShift, so the largest tagged null pointer is: */
     343             : const uintptr_t LargestTaggedNullCellPointer = (1 << CellAlignShift) - 1;
     344             : 
     345             : /*
     346             :  * The minimum cell size ends up as twice the cell alignment because the mark
     347             :  * bitmap contains one bit per CellBytesPerMarkBit bytes (which is equal to
     348             :  * CellAlignBytes) and we need two mark bits per cell.
     349             :  */
     350             : const size_t MarkBitsPerCell = 2;
     351             : const size_t MinCellSize = CellBytesPerMarkBit * MarkBitsPerCell;
     352             : 
     353             : constexpr size_t
     354             : DivideAndRoundUp(size_t numerator, size_t divisor) {
     355             :     return (numerator + divisor - 1) / divisor;
     356             : }
     357             : 
     358             : static_assert(ArenaSize % CellAlignBytes == 0,
     359             :               "Arena size must be a multiple of cell alignment");
     360             : 
     361             : /*
     362             :  * We sometimes use an index to refer to a cell in an arena. The index for a
     363             :  * cell is found by dividing by the cell alignment so not all indicies refer to
     364             :  * valid cells.
     365             :  */
     366             : const size_t ArenaCellIndexBytes = CellAlignBytes;
     367             : const size_t MaxArenaCellIndex = ArenaSize / CellAlignBytes;
     368             : 
     369             : /*
     370             :  * The mark bitmap has one bit per each possible cell start position. This
     371             :  * wastes some space for larger GC things but allows us to avoid division by the
     372             :  * cell's size when accessing the bitmap.
     373             :  */
     374             : const size_t ArenaBitmapBits = ArenaSize / CellBytesPerMarkBit;
     375             : const size_t ArenaBitmapBytes = DivideAndRoundUp(ArenaBitmapBits, 8);
     376             : const size_t ArenaBitmapWords = DivideAndRoundUp(ArenaBitmapBits, JS_BITS_PER_WORD);
     377             : 
     378             : /*
     379             :  * A FreeSpan represents a contiguous sequence of free cells in an Arena. It
     380             :  * can take two forms.
     381             :  *
     382             :  * - In an empty span, |first| and |last| are both zero.
     383             :  *
     384             :  * - In a non-empty span, |first| is the address of the first free thing in the
     385             :  *   span, and |last| is the address of the last free thing in the span.
     386             :  *   Furthermore, the memory pointed to by |last| holds a FreeSpan structure
     387             :  *   that points to the next span (which may be empty); this works because
     388             :  *   sizeof(FreeSpan) is less than the smallest thingSize.
     389             :  */
     390             : class FreeSpan
     391             : {
     392             :     friend class Arena;
     393             :     friend class ArenaCellIterImpl;
     394             : 
     395             :     uint16_t first;
     396             :     uint16_t last;
     397             : 
     398             :   public:
     399             :     // This inits just |first| and |last|; if the span is non-empty it doesn't
     400             :     // do anything with the next span stored at |last|.
     401           0 :     void initBounds(uintptr_t firstArg, uintptr_t lastArg, const Arena* arena) {
     402           0 :         checkRange(firstArg, lastArg, arena);
     403           0 :         first = firstArg;
     404           0 :         last = lastArg;
     405           0 :     }
     406             : 
     407       13324 :     void initAsEmpty() {
     408       13324 :         first = 0;
     409       13324 :         last = 0;
     410       13324 :     }
     411             : 
     412             :     // This sets |first| and |last|, and also sets the next span stored at
     413             :     // |last| as empty. (As a result, |firstArg| and |lastArg| cannot represent
     414             :     // an empty span.)
     415           0 :     void initFinal(uintptr_t firstArg, uintptr_t lastArg, const Arena* arena) {
     416           0 :         initBounds(firstArg, lastArg, arena);
     417           0 :         FreeSpan* last = nextSpanUnchecked(arena);
     418           0 :         last->initAsEmpty();
     419           0 :         checkSpan(arena);
     420           0 :     }
     421             : 
     422      976662 :     bool isEmpty() const {
     423      976662 :         return !first;
     424             :     }
     425             : 
     426      480377 :     Arena* getArenaUnchecked() { return reinterpret_cast<Arena*>(this); }
     427             :     inline Arena* getArena();
     428             : 
     429          60 :     static size_t offsetOfFirst() {
     430          60 :         return offsetof(FreeSpan, first);
     431             :     }
     432             : 
     433          20 :     static size_t offsetOfLast() {
     434          20 :         return offsetof(FreeSpan, last);
     435             :     }
     436             : 
     437             :     // Like nextSpan(), but no checking of the following span is done.
     438      961387 :     FreeSpan* nextSpanUnchecked(const Arena* arena) const {
     439      961387 :         MOZ_ASSERT(arena && !isEmpty());
     440      961386 :         return reinterpret_cast<FreeSpan*>(uintptr_t(arena) + last);
     441             :     }
     442             : 
     443        6381 :     const FreeSpan* nextSpan(const Arena* arena) const {
     444        6381 :         checkSpan(arena);
     445        6381 :         return nextSpanUnchecked(arena);
     446             :     }
     447             : 
     448      480377 :     MOZ_ALWAYS_INLINE TenuredCell* allocate(size_t thingSize) {
     449             :         // Eschew the usual checks, because this might be the placeholder span.
     450             :         // If this is somehow an invalid, non-empty span, checkSpan() will catch it.
     451      480377 :         Arena* arena = getArenaUnchecked();
     452      480378 :         checkSpan(arena);
     453      480391 :         uintptr_t thing = uintptr_t(arena) + first;
     454      480391 :         if (first < last) {
     455             :             // We have space for at least two more things, so do a simple bump-allocate.
     456      467188 :             first += thingSize;
     457       13203 :         } else if (MOZ_LIKELY(first)) {
     458             :             // The last space points to the next free span (which may be empty).
     459        6321 :             const FreeSpan* next = nextSpan(arena);
     460        6321 :             first = next->first;
     461        6321 :             last = next->last;
     462             :         } else {
     463        6882 :             return nullptr; // The span is empty.
     464             :         }
     465      473509 :         checkSpan(arena);
     466      473489 :         JS_EXTRA_POISON(reinterpret_cast<void*>(thing), JS_ALLOCATED_TENURED_PATTERN, thingSize);
     467      473489 :         MemProfiler::SampleTenured(reinterpret_cast<void*>(thing), thingSize);
     468      473494 :         return reinterpret_cast<TenuredCell*>(thing);
     469             :     }
     470             : 
     471             :     inline void checkSpan(const Arena* arena) const;
     472             :     inline void checkRange(uintptr_t first, uintptr_t last, const Arena* arena) const;
     473             : };
     474             : 
     475             : /*
     476             :  * Arenas are the allocation units of the tenured heap in the GC. An arena
     477             :  * is 4kiB in size and 4kiB-aligned. It starts with several header fields
     478             :  * followed by some bytes of padding. The remainder of the arena is filled
     479             :  * with GC things of a particular AllocKind. The padding ensures that the
     480             :  * GC thing array ends exactly at the end of the arena:
     481             :  *
     482             :  * <----------------------------------------------> = ArenaSize bytes
     483             :  * +---------------+---------+----+----+-----+----+
     484             :  * | header fields | padding | T0 | T1 | ... | Tn |
     485             :  * +---------------+---------+----+----+-----+----+
     486             :  * <-------------------------> = first thing offset
     487             :  */
     488             : class Arena
     489             : {
     490             :     static JS_FRIEND_DATA(const uint32_t) ThingSizes[];
     491             :     static JS_FRIEND_DATA(const uint32_t) FirstThingOffsets[];
     492             :     static JS_FRIEND_DATA(const uint32_t) ThingsPerArena[];
     493             : 
     494             :     /*
     495             :      * The first span of free things in the arena. Most of these spans are
     496             :      * stored as offsets in free regions of the data array, and most operations
     497             :      * on FreeSpans take an Arena pointer for safety. However, the FreeSpans
     498             :      * used for allocation are stored here, at the start of an Arena, and use
     499             :      * their own address to grab the next span within the same Arena.
     500             :      */
     501             :     FreeSpan firstFreeSpan;
     502             : 
     503             :   public:
     504             :     /*
     505             :      * The zone that this Arena is contained within, when allocated. The offset
     506             :      * of this field must match the ArenaZoneOffset stored in js/HeapAPI.h,
     507             :      * as is statically asserted below.
     508             :      */
     509             :     JS::Zone* zone;
     510             : 
     511             :     /*
     512             :      * Arena::next has two purposes: when unallocated, it points to the next
     513             :      * available Arena. When allocated, it points to the next Arena in the same
     514             :      * zone and with the same alloc kind.
     515             :      */
     516             :     Arena* next;
     517             : 
     518             :   private:
     519             :     /*
     520             :      * One of the AllocKind constants or AllocKind::LIMIT when the arena does
     521             :      * not contain any GC things and is on the list of empty arenas in the GC
     522             :      * chunk.
     523             :      *
     524             :      * We use 8 bits for the alloc kind so the compiler can use byte-level
     525             :      * memory instructions to access it.
     526             :      */
     527             :     size_t allocKind : 8;
     528             : 
     529             :   public:
     530             :     /*
     531             :      * When collecting we sometimes need to keep an auxillary list of arenas,
     532             :      * for which we use the following fields. This happens for several reasons:
     533             :      *
     534             :      * When recursive marking uses too much stack, the marking is delayed and
     535             :      * the corresponding arenas are put into a stack. To distinguish the bottom
     536             :      * of the stack from the arenas not present in the stack we use the
     537             :      * markOverflow flag to tag arenas on the stack.
     538             :      *
     539             :      * Delayed marking is also used for arenas that we allocate into during an
     540             :      * incremental GC. In this case, we intend to mark all the objects in the
     541             :      * arena, and it's faster to do this marking in bulk.
     542             :      *
     543             :      * When sweeping we keep track of which arenas have been allocated since
     544             :      * the end of the mark phase. This allows us to tell whether a pointer to
     545             :      * an unmarked object is yet to be finalized or has already been
     546             :      * reallocated. We set the allocatedDuringIncremental flag for this and
     547             :      * clear it at the end of the sweep phase.
     548             :      *
     549             :      * To minimize the size of the header fields we record the next linkage as
     550             :      * address() >> ArenaShift and pack it with the allocKind and the flags.
     551             :      */
     552             :     size_t hasDelayedMarking : 1;
     553             :     size_t allocatedDuringIncremental : 1;
     554             :     size_t markOverflow : 1;
     555             :     size_t auxNextLink : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
     556             :     static_assert(ArenaShift >= 8 + 1 + 1 + 1,
     557             :                   "Arena::auxNextLink packing assumes that ArenaShift has "
     558             :                   "enough bits to cover allocKind and hasDelayedMarking.");
     559             : 
     560             :   private:
     561             :     union {
     562             :         /*
     563             :          * For arenas in zones other than the atoms zone, if non-null, points
     564             :          * to an ArenaCellSet that represents the set of cells in this arena
     565             :          * that are in the nursery's store buffer.
     566             :          */
     567             :         ArenaCellSet* bufferedCells_;
     568             : 
     569             :         /*
     570             :          * For arenas in the atoms zone, the starting index into zone atom
     571             :          * marking bitmaps (see AtomMarking.h) of the things in this zone.
     572             :          * Atoms never refer to nursery things, so no store buffer index is
     573             :          * needed.
     574             :          */
     575             :         size_t atomBitmapStart_;
     576             :     };
     577             :   public:
     578             : 
     579             :     /*
     580             :      * The size of data should be |ArenaSize - offsetof(data)|, but the offset
     581             :      * is not yet known to the compiler, so we do it by hand. |firstFreeSpan|
     582             :      * takes up 8 bytes on 64-bit due to alignment requirements; the rest are
     583             :      * obvious. This constant is stored in js/HeapAPI.h.
     584             :      */
     585             :     uint8_t data[ArenaSize - ArenaHeaderSize];
     586             : 
     587             :     void init(JS::Zone* zoneArg, AllocKind kind);
     588             : 
     589             :     // Sets |firstFreeSpan| to the Arena's entire valid range, and
     590             :     // also sets the next span stored at |firstFreeSpan.last| as empty.
     591        6662 :     void setAsFullyUnused() {
     592        6662 :         AllocKind kind = getAllocKind();
     593        6662 :         firstFreeSpan.first = firstThingOffset(kind);
     594        6662 :         firstFreeSpan.last = lastThingOffset(kind);
     595        6662 :         FreeSpan* last = firstFreeSpan.nextSpanUnchecked(this);
     596        6662 :         last->initAsEmpty();
     597        6662 :     }
     598             : 
     599             :     // Initialize an arena to its unallocated state. For arenas that were
     600             :     // previously allocated for some zone, use release() instead.
     601        6662 :     void setAsNotAllocated() {
     602        6662 :         firstFreeSpan.initAsEmpty();
     603        6662 :         zone = nullptr;
     604        6662 :         allocKind = size_t(AllocKind::LIMIT);
     605        6662 :         hasDelayedMarking = 0;
     606        6662 :         allocatedDuringIncremental = 0;
     607        6662 :         markOverflow = 0;
     608        6662 :         auxNextLink = 0;
     609        6662 :         bufferedCells_ = nullptr;
     610        6662 :     }
     611             : 
     612             :     // Return an allocated arena to its unallocated state.
     613             :     inline void release();
     614             : 
     615      580731 :     uintptr_t address() const {
     616      580731 :         checkAddress();
     617      580730 :         return uintptr_t(this);
     618             :     }
     619             : 
     620             :     inline void checkAddress() const;
     621             : 
     622             :     inline Chunk* chunk() const;
     623             : 
     624     7101543 :     bool allocated() const {
     625     7101543 :         MOZ_ASSERT(IsAllocKind(AllocKind(allocKind)));
     626     7101659 :         return IsValidAllocKind(AllocKind(allocKind));
     627             :     }
     628             : 
     629     5863601 :     AllocKind getAllocKind() const {
     630     5863601 :         MOZ_ASSERT(allocated());
     631     5864042 :         return AllocKind(allocKind);
     632             :     }
     633             : 
     634        8924 :     FreeSpan* getFirstFreeSpan() { return &firstFreeSpan; }
     635             : 
     636     5097843 :     static size_t thingSize(AllocKind kind) { return ThingSizes[size_t(kind)]; }
     637           0 :     static size_t thingsPerArena(AllocKind kind) { return ThingsPerArena[size_t(kind)]; }
     638           0 :     static size_t thingsSpan(AllocKind kind) { return thingsPerArena(kind) * thingSize(kind); }
     639             : 
     640      956094 :     static size_t firstThingOffset(AllocKind kind) { return FirstThingOffsets[size_t(kind)]; }
     641      955008 :     static size_t lastThingOffset(AllocKind kind) { return ArenaSize - thingSize(kind); }
     642             : 
     643     2456910 :     size_t getThingSize() const { return thingSize(getAllocKind()); }
     644           0 :     size_t getThingsPerArena() const { return thingsPerArena(getAllocKind()); }
     645           0 :     size_t getThingsSpan() const { return getThingsPerArena() * getThingSize(); }
     646           0 :     size_t getFirstThingOffset() const { return firstThingOffset(getAllocKind()); }
     647             : 
     648           0 :     uintptr_t thingsStart() const { return address() + getFirstThingOffset(); }
     649           0 :     uintptr_t thingsEnd() const { return address() + ArenaSize; }
     650             : 
     651        1030 :     bool isEmpty() const {
     652             :         // Arena is empty if its first span covers the whole arena.
     653        1030 :         firstFreeSpan.checkSpan(this);
     654        1030 :         AllocKind kind = getAllocKind();
     655        1030 :         return firstFreeSpan.first == firstThingOffset(kind) &&
     656        1030 :                firstFreeSpan.last == lastThingOffset(kind);
     657             :     }
     658             : 
     659        1726 :     bool hasFreeThings() const { return !firstFreeSpan.isEmpty(); }
     660             : 
     661           0 :     size_t numFreeThings(size_t thingSize) const {
     662           0 :         firstFreeSpan.checkSpan(this);
     663           0 :         size_t numFree = 0;
     664           0 :         const FreeSpan* span = &firstFreeSpan;
     665           0 :         for (; !span->isEmpty(); span = span->nextSpan(this))
     666           0 :             numFree += (span->last - span->first) / thingSize + 1;
     667           0 :         return numFree;
     668             :     }
     669             : 
     670           0 :     size_t countFreeCells() { return numFreeThings(getThingSize()); }
     671           0 :     size_t countUsedCells() { return getThingsPerArena() - countFreeCells(); }
     672             : 
     673           0 :     bool inFreeList(uintptr_t thing) {
     674           0 :         uintptr_t base = address();
     675           0 :         const FreeSpan* span = &firstFreeSpan;
     676           0 :         for (; !span->isEmpty(); span = span->nextSpan(this)) {
     677             :             /* If the thing comes before the current span, it's not free. */
     678           0 :             if (thing < base + span->first)
     679           0 :                 return false;
     680             : 
     681             :             /* If we find it before the end of the span, it's free. */
     682           0 :             if (thing <= base + span->last)
     683           0 :                 return true;
     684             :         }
     685           0 :         return false;
     686             :     }
     687             : 
     688       55823 :     static bool isAligned(uintptr_t thing, size_t thingSize) {
     689             :         /* Things ends at the arena end. */
     690       55823 :         uintptr_t tailOffset = ArenaSize - (thing & ArenaMask);
     691       55823 :         return tailOffset % thingSize == 0;
     692             :     }
     693             : 
     694           0 :     Arena* getNextDelayedMarking() const {
     695           0 :         MOZ_ASSERT(hasDelayedMarking);
     696           0 :         return reinterpret_cast<Arena*>(auxNextLink << ArenaShift);
     697             :     }
     698             : 
     699         130 :     void setNextDelayedMarking(Arena* arena) {
     700         130 :         MOZ_ASSERT(!(uintptr_t(arena) & ArenaMask));
     701         130 :         MOZ_ASSERT(!auxNextLink && !hasDelayedMarking);
     702         130 :         hasDelayedMarking = 1;
     703         130 :         if (arena)
     704         129 :             auxNextLink = arena->address() >> ArenaShift;
     705         130 :     }
     706             : 
     707           0 :     void unsetDelayedMarking() {
     708           0 :         MOZ_ASSERT(hasDelayedMarking);
     709           0 :         hasDelayedMarking = 0;
     710           0 :         auxNextLink = 0;
     711           0 :     }
     712             : 
     713           0 :     Arena* getNextAllocDuringSweep() const {
     714           0 :         MOZ_ASSERT(allocatedDuringIncremental);
     715           0 :         return reinterpret_cast<Arena*>(auxNextLink << ArenaShift);
     716             :     }
     717             : 
     718           0 :     void setNextAllocDuringSweep(Arena* arena) {
     719           0 :         MOZ_ASSERT(!(uintptr_t(arena) & ArenaMask));
     720           0 :         MOZ_ASSERT(!auxNextLink && !allocatedDuringIncremental);
     721           0 :         allocatedDuringIncremental = 1;
     722           0 :         if (arena)
     723           0 :             auxNextLink = arena->address() >> ArenaShift;
     724           0 :     }
     725             : 
     726           0 :     void unsetAllocDuringSweep() {
     727           0 :         MOZ_ASSERT(allocatedDuringIncremental);
     728           0 :         allocatedDuringIncremental = 0;
     729           0 :         auxNextLink = 0;
     730           0 :     }
     731             : 
     732             :     inline ArenaCellSet*& bufferedCells();
     733             :     inline size_t& atomBitmapStart();
     734             : 
     735             :     template <typename T>
     736             :     size_t finalize(FreeOp* fop, AllocKind thingKind, size_t thingSize);
     737             : 
     738             :     static void staticAsserts();
     739             : 
     740             :     void unmarkAll();
     741             : };
     742             : 
     743             : static_assert(ArenaZoneOffset == offsetof(Arena, zone),
     744             :               "The hardcoded API zone offset must match the actual offset.");
     745             : 
     746             : static_assert(sizeof(Arena) == ArenaSize,
     747             :               "ArenaSize must match the actual size of the Arena structure.");
     748             : 
     749             : static_assert(offsetof(Arena, data) == ArenaHeaderSize,
     750             :               "ArenaHeaderSize must match the actual size of the header fields.");
     751             : 
     752             : inline Arena*
     753             : FreeSpan::getArena()
     754             : {
     755             :     Arena* arena = getArenaUnchecked();
     756             :     arena->checkAddress();
     757             :     return arena;
     758             : }
     759             : 
     760             : inline void
     761      962074 : FreeSpan::checkSpan(const Arena* arena) const
     762             : {
     763             : #ifdef DEBUG
     764      962074 :     if (!first) {
     765       13793 :         MOZ_ASSERT(!first && !last);
     766       13793 :         return;
     767             :     }
     768             : 
     769      948281 :     arena->checkAddress();
     770      948292 :     checkRange(first, last, arena);
     771             : 
     772             :     // If there's a following span, it must have a higher address,
     773             :     // and the gap must be at least 2 * thingSize.
     774      948345 :     const FreeSpan* next = nextSpanUnchecked(arena);
     775      948343 :     if (next->first) {
     776           0 :         checkRange(next->first, next->last, arena);
     777           0 :         size_t thingSize = arena->getThingSize();
     778           0 :         MOZ_ASSERT(last + 2 * thingSize <= next->first);
     779             :     }
     780             : #endif
     781             : }
     782             : 
     783             : inline void
     784      948305 : FreeSpan::checkRange(uintptr_t first, uintptr_t last, const Arena* arena) const
     785             : {
     786             : #ifdef DEBUG
     787      948305 :     MOZ_ASSERT(arena);
     788      948305 :     MOZ_ASSERT(first <= last);
     789      948305 :     AllocKind thingKind = arena->getAllocKind();
     790      948345 :     MOZ_ASSERT(first >= Arena::firstThingOffset(thingKind));
     791      948345 :     MOZ_ASSERT(last <= Arena::lastThingOffset(thingKind));
     792      948347 :     MOZ_ASSERT((last - first) % Arena::thingSize(thingKind) == 0);
     793             : #endif
     794      948341 : }
     795             : 
     796             : /*
     797             :  * The tail of the chunk info is shared between all chunks in the system, both
     798             :  * nursery and tenured. This structure is locatable from any GC pointer by
     799             :  * aligning to 1MiB.
     800             :  */
     801             : struct ChunkTrailer
     802             : {
     803             :     // Construct a Nursery ChunkTrailer.
     804         159 :     ChunkTrailer(JSRuntime* rt, StoreBuffer* sb)
     805         159 :       : location(ChunkLocation::Nursery), storeBuffer(sb), runtime(rt)
     806         159 :     {}
     807             : 
     808             :     // Construct a Tenured heap ChunkTrailer.
     809          50 :     explicit ChunkTrailer(JSRuntime* rt)
     810          50 :       : location(ChunkLocation::TenuredHeap), storeBuffer(nullptr), runtime(rt)
     811          50 :     {}
     812             : 
     813             :   public:
     814             :     // The index of the chunk in the nursery, or LocationTenuredHeap.
     815             :     ChunkLocation   location;
     816             :     uint32_t        padding;
     817             : 
     818             :     // The store buffer for pointers from tenured things to things in this
     819             :     // chunk. Will be non-null only for nursery chunks.
     820             :     StoreBuffer*    storeBuffer;
     821             : 
     822             :     // Provide quick access to the runtime from absolutely anywhere.
     823             :     JSRuntime*      runtime;
     824             : };
     825             : 
     826             : static_assert(sizeof(ChunkTrailer) == ChunkTrailerSize,
     827             :               "ChunkTrailer size must match the API defined size.");
     828             : 
     829             : /* The chunk header (located at the end of the chunk to preserve arena alignment). */
     830             : struct ChunkInfo
     831             : {
     832          50 :     void init() {
     833          50 :         next = prev = nullptr;
     834          50 :     }
     835             : 
     836             :   private:
     837             :     friend class ChunkPool;
     838             :     Chunk*          next;
     839             :     Chunk*          prev;
     840             : 
     841             :   public:
     842             :     /* Free arenas are linked together with arena.next. */
     843             :     Arena*          freeArenasHead;
     844             : 
     845             : #if JS_BITS_PER_WORD == 32
     846             :     /*
     847             :      * Calculating sizes and offsets is simpler if sizeof(ChunkInfo) is
     848             :      * architecture-independent.
     849             :      */
     850             :     char            padding[24];
     851             : #endif
     852             : 
     853             :     /*
     854             :      * Decommitted arenas are tracked by a bitmap in the chunk header. We use
     855             :      * this offset to start our search iteration close to a decommitted arena
     856             :      * that we can allocate.
     857             :      */
     858             :     uint32_t        lastDecommittedArenaOffset;
     859             : 
     860             :     /* Number of free arenas, either committed or decommitted. */
     861             :     uint32_t        numArenasFree;
     862             : 
     863             :     /* Number of free, committed arenas. */
     864             :     uint32_t        numArenasFreeCommitted;
     865             : };
     866             : 
     867             : /*
     868             :  * Calculating ArenasPerChunk:
     869             :  *
     870             :  * In order to figure out how many Arenas will fit in a chunk, we need to know
     871             :  * how much extra space is available after we allocate the header data. This
     872             :  * is a problem because the header size depends on the number of arenas in the
     873             :  * chunk. The two dependent fields are bitmap and decommittedArenas.
     874             :  *
     875             :  * For the mark bitmap, we know that each arena will use a fixed number of full
     876             :  * bytes: ArenaBitmapBytes. The full size of the header data is this number
     877             :  * multiplied by the eventual number of arenas we have in the header. We,
     878             :  * conceptually, distribute this header data among the individual arenas and do
     879             :  * not include it in the header. This way we do not have to worry about its
     880             :  * variable size: it gets attached to the variable number we are computing.
     881             :  *
     882             :  * For the decommitted arena bitmap, we only have 1 bit per arena, so this
     883             :  * technique will not work. Instead, we observe that we do not have enough
     884             :  * header info to fill 8 full arenas: it is currently 4 on 64bit, less on
     885             :  * 32bit. Thus, with current numbers, we need 64 bytes for decommittedArenas.
     886             :  * This will not become 63 bytes unless we double the data required in the
     887             :  * header. Therefore, we just compute the number of bytes required to track
     888             :  * every possible arena and do not worry about slop bits, since there are too
     889             :  * few to usefully allocate.
     890             :  *
     891             :  * To actually compute the number of arenas we can allocate in a chunk, we
     892             :  * divide the amount of available space less the header info (not including
     893             :  * the mark bitmap which is distributed into the arena size) by the size of
     894             :  * the arena (with the mark bitmap bytes it uses).
     895             :  */
     896             : const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
     897             : const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE;
     898             : const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkTrailer) - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
     899             : const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
     900             : 
     901             : #ifdef JS_GC_SMALL_CHUNK_SIZE
     902             : static_assert(ArenasPerChunk == 62, "Do not accidentally change our heap's density.");
     903             : #else
     904             : static_assert(ArenasPerChunk == 252, "Do not accidentally change our heap's density.");
     905             : #endif
     906             : 
     907             : static inline void
     908     2399762 : AssertValidColorBit(const TenuredCell* thing, ColorBit colorBit)
     909             : {
     910             : #ifdef DEBUG
     911     2399762 :     Arena* arena = thing->arena();
     912     2399778 :     MOZ_ASSERT(unsigned(colorBit) < arena->getThingSize() / CellBytesPerMarkBit);
     913             : #endif
     914     2399864 : }
     915             : 
     916             : /* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
     917             : struct ChunkBitmap
     918             : {
     919             :     volatile uintptr_t bitmap[ArenaBitmapWords * ArenasPerChunk];
     920             : 
     921             :   public:
     922           0 :     ChunkBitmap() { }
     923             : 
     924     2465178 :     MOZ_ALWAYS_INLINE void getMarkWordAndMask(const TenuredCell* cell, ColorBit colorBit,
     925             :                                               uintptr_t** wordp, uintptr_t* maskp)
     926             :     {
     927     2465178 :         detail::GetGCThingMarkWordAndMask(uintptr_t(cell), colorBit, wordp, maskp);
     928     2465190 :     }
     929             : 
     930     2399738 :     MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool markBit(const TenuredCell* cell, ColorBit colorBit) {
     931     2399738 :         AssertValidColorBit(cell, colorBit);
     932             :         uintptr_t* word, mask;
     933     2399886 :         getMarkWordAndMask(cell, colorBit, &word, &mask);
     934     2399898 :         return *word & mask;
     935             :     }
     936             : 
     937      433151 :     MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool isMarkedAny(const TenuredCell* cell) {
     938      433151 :         return markBit(cell, ColorBit::BlackBit) || markBit(cell, ColorBit::GrayOrBlackBit);
     939             :     }
     940             : 
     941       38926 :     MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool isMarkedBlack(const TenuredCell* cell) {
     942       38926 :         return markBit(cell, ColorBit::BlackBit);
     943             :     }
     944             : 
     945      760448 :     MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool isMarkedGray(const TenuredCell* cell) {
     946      760448 :         return !markBit(cell, ColorBit::BlackBit) && markBit(cell, ColorBit::GrayOrBlackBit);
     947             :     }
     948             : 
     949             :     // The return value indicates if the cell went from unmarked to marked.
     950       60368 :     MOZ_ALWAYS_INLINE bool markIfUnmarked(const TenuredCell* cell, MarkColor color) {
     951             :         uintptr_t* word, mask;
     952       60368 :         getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
     953       60368 :         if (*word & mask)
     954       34702 :             return false;
     955       25666 :         if (color == MarkColor::Black) {
     956       25666 :             *word |= mask;
     957             :         } else {
     958             :             /*
     959             :              * We use getMarkWordAndMask to recalculate both mask and word as
     960             :              * doing just mask << color may overflow the mask.
     961             :              */
     962           0 :             getMarkWordAndMask(cell, ColorBit::GrayOrBlackBit, &word, &mask);
     963           0 :             if (*word & mask)
     964           0 :                 return false;
     965           0 :             *word |= mask;
     966             :         }
     967       25666 :         return true;
     968             :     }
     969             : 
     970           0 :     MOZ_ALWAYS_INLINE void markBlack(const TenuredCell* cell) {
     971             :         uintptr_t* word, mask;
     972           0 :         getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
     973           0 :         *word |= mask;
     974           0 :     }
     975             : 
     976           0 :     MOZ_ALWAYS_INLINE void copyMarkBit(TenuredCell* dst, const TenuredCell* src,
     977             :                                        ColorBit colorBit) {
     978             :         uintptr_t* srcWord, srcMask;
     979           0 :         getMarkWordAndMask(src, colorBit, &srcWord, &srcMask);
     980             : 
     981             :         uintptr_t* dstWord, dstMask;
     982           0 :         getMarkWordAndMask(dst, colorBit, &dstWord, &dstMask);
     983             : 
     984           0 :         *dstWord &= ~dstMask;
     985           0 :         if (*srcWord & srcMask)
     986           0 :             *dstWord |= dstMask;
     987           0 :     }
     988             : 
     989          50 :     void clear() {
     990          50 :         memset((void*)bitmap, 0, sizeof(bitmap));
     991          50 :     }
     992             : 
     993        4928 :     uintptr_t* arenaBits(Arena* arena) {
     994             :         static_assert(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
     995             :                       "We assume that the part of the bitmap corresponding to the arena "
     996             :                       "has the exact number of words so we do not need to deal with a word "
     997             :                       "that covers bits from two arenas.");
     998             : 
     999             :         uintptr_t* word, unused;
    1000        4928 :         getMarkWordAndMask(reinterpret_cast<TenuredCell*>(arena->address()),
    1001        4928 :                            ColorBit::BlackBit, &word, &unused);
    1002        4928 :         return word;
    1003             :     }
    1004             : };
    1005             : 
    1006             : static_assert(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap),
    1007             :               "Ensure our ChunkBitmap actually covers all arenas.");
    1008             : static_assert(js::gc::ChunkMarkBitmapBits == ArenaBitmapBits * ArenasPerChunk,
    1009             :               "Ensure that the mark bitmap has the right number of bits.");
    1010             : 
    1011             : typedef BitArray<ArenasPerChunk> PerArenaBitmap;
    1012             : 
    1013             : const size_t ChunkPadSize = ChunkSize
    1014             :                             - (sizeof(Arena) * ArenasPerChunk)
    1015             :                             - sizeof(ChunkBitmap)
    1016             :                             - sizeof(PerArenaBitmap)
    1017             :                             - sizeof(ChunkInfo)
    1018             :                             - sizeof(ChunkTrailer);
    1019             : static_assert(ChunkPadSize < BytesPerArenaWithHeader,
    1020             :               "If the chunk padding is larger than an arena, we should have one more arena.");
    1021             : 
    1022             : /*
    1023             :  * Chunks contain arenas and associated data structures (mark bitmap, delayed
    1024             :  * marking state).
    1025             :  */
    1026             : struct Chunk
    1027             : {
    1028             :     Arena           arenas[ArenasPerChunk];
    1029             : 
    1030             :     /* Pad to full size to ensure cache alignment of ChunkInfo. */
    1031             :     uint8_t         padding[ChunkPadSize];
    1032             : 
    1033             :     ChunkBitmap     bitmap;
    1034             :     PerArenaBitmap  decommittedArenas;
    1035             :     ChunkInfo       info;
    1036             :     ChunkTrailer    trailer;
    1037             : 
    1038    16575917 :     static Chunk* fromAddress(uintptr_t addr) {
    1039    16575917 :         addr &= ~ChunkMask;
    1040    16575917 :         return reinterpret_cast<Chunk*>(addr);
    1041             :     }
    1042             : 
    1043    16570410 :     static bool withinValidRange(uintptr_t addr) {
    1044    16570410 :         uintptr_t offset = addr & ChunkMask;
    1045    16570410 :         return Chunk::fromAddress(addr)->isNurseryChunk()
    1046    16572027 :                ? offset < ChunkSize - sizeof(ChunkTrailer)
    1047    16572027 :                : offset < ArenasPerChunk * ArenaSize;
    1048             :     }
    1049             : 
    1050           0 :     static size_t arenaIndex(uintptr_t addr) {
    1051           0 :         MOZ_ASSERT(!Chunk::fromAddress(addr)->isNurseryChunk());
    1052           0 :         MOZ_ASSERT(withinValidRange(addr));
    1053           0 :         return (addr & ChunkMask) >> ArenaShift;
    1054             :     }
    1055             : 
    1056             :     uintptr_t address() const {
    1057             :         uintptr_t addr = reinterpret_cast<uintptr_t>(this);
    1058             :         MOZ_ASSERT(!(addr & ChunkMask));
    1059             :         return addr;
    1060             :     }
    1061             : 
    1062          29 :     bool unused() const {
    1063          29 :         return info.numArenasFree == ArenasPerChunk;
    1064             :     }
    1065             : 
    1066       13324 :     bool hasAvailableArenas() const {
    1067       13324 :         return info.numArenasFree != 0;
    1068             :     }
    1069             : 
    1070    16571175 :     bool isNurseryChunk() const {
    1071    16571175 :         return trailer.storeBuffer;
    1072             :     }
    1073             : 
    1074             :     Arena* allocateArena(JSRuntime* rt, JS::Zone* zone, AllocKind kind, const AutoLockGC& lock);
    1075             : 
    1076             :     void releaseArena(JSRuntime* rt, Arena* arena, const AutoLockGC& lock);
    1077             :     void recycleArena(Arena* arena, SortedArenaList& dest, size_t thingsPerArena);
    1078             : 
    1079             :     MOZ_MUST_USE bool decommitOneFreeArena(JSRuntime* rt, AutoLockGC& lock);
    1080             :     void decommitAllArenasWithoutUnlocking(const AutoLockGC& lock);
    1081             : 
    1082             :     static Chunk* allocate(JSRuntime* rt);
    1083             :     void init(JSRuntime* rt);
    1084             : 
    1085             :   private:
    1086             :     void decommitAllArenas(JSRuntime* rt);
    1087             : 
    1088             :     /* Search for a decommitted arena to allocate. */
    1089             :     unsigned findDecommittedArenaOffset();
    1090             :     Arena* fetchNextDecommittedArena();
    1091             : 
    1092             :     void addArenaToFreeList(JSRuntime* rt, Arena* arena);
    1093             :     void addArenaToDecommittedList(JSRuntime* rt, const Arena* arena);
    1094             : 
    1095             :     void updateChunkListAfterAlloc(JSRuntime* rt, const AutoLockGC& lock);
    1096             :     void updateChunkListAfterFree(JSRuntime* rt, const AutoLockGC& lock);
    1097             : 
    1098             :   public:
    1099             :     /* Unlink and return the freeArenasHead. */
    1100             :     Arena* fetchNextFreeArena(JSRuntime* rt);
    1101             : };
    1102             : 
    1103             : static_assert(sizeof(Chunk) == ChunkSize,
    1104             :               "Ensure the hardcoded chunk size definition actually matches the struct.");
    1105             : static_assert(js::gc::ChunkMarkBitmapOffset == offsetof(Chunk, bitmap),
    1106             :               "The hardcoded API bitmap offset must match the actual offset.");
    1107             : static_assert(js::gc::ChunkRuntimeOffset == offsetof(Chunk, trailer) +
    1108             :                                             offsetof(ChunkTrailer, runtime),
    1109             :               "The hardcoded API runtime offset must match the actual offset.");
    1110             : static_assert(js::gc::ChunkLocationOffset == offsetof(Chunk, trailer) +
    1111             :                                              offsetof(ChunkTrailer, location),
    1112             :               "The hardcoded API location offset must match the actual offset.");
    1113             : 
    1114             : /*
    1115             :  * Tracks the used sizes for owned heap data and automatically maintains the
    1116             :  * memory usage relationship between GCRuntime and Zones.
    1117             :  */
    1118             : class HeapUsage
    1119             : {
    1120             :     /*
    1121             :      * A heap usage that contains our parent's heap usage, or null if this is
    1122             :      * the top-level usage container.
    1123             :      */
    1124             :     HeapUsage* const parent_;
    1125             : 
    1126             :     /*
    1127             :      * The approximate number of bytes in use on the GC heap, to the nearest
    1128             :      * ArenaSize. This does not include any malloc data. It also does not
    1129             :      * include not-actively-used addresses that are still reserved at the OS
    1130             :      * level for GC usage. It is atomic because it is updated by both the active
    1131             :      * and GC helper threads.
    1132             :      */
    1133             :     mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes_;
    1134             : 
    1135             :   public:
    1136          35 :     explicit HeapUsage(HeapUsage* parent)
    1137          35 :       : parent_(parent),
    1138          35 :         gcBytes_(0)
    1139          35 :     {}
    1140             : 
    1141       21437 :     size_t gcBytes() const { return gcBytes_; }
    1142             : 
    1143       13314 :     void addGCArena() {
    1144       13314 :         gcBytes_ += ArenaSize;
    1145       13314 :         if (parent_)
    1146        6657 :             parent_->addGCArena();
    1147       13314 :     }
    1148           0 :     void removeGCArena() {
    1149           0 :         MOZ_ASSERT(gcBytes_ >= ArenaSize);
    1150           0 :         gcBytes_ -= ArenaSize;
    1151           0 :         if (parent_)
    1152           0 :             parent_->removeGCArena();
    1153           0 :     }
    1154             : 
    1155             :     /* Pair to adoptArenas. Adopts the attendant usage statistics. */
    1156          15 :     void adopt(HeapUsage& other) {
    1157          15 :         gcBytes_ += other.gcBytes_;
    1158          15 :         other.gcBytes_ = 0;
    1159          15 :     }
    1160             : };
    1161             : 
    1162             : inline void
    1163     1528954 : Arena::checkAddress() const
    1164             : {
    1165     3057900 :     mozilla::DebugOnly<uintptr_t> addr = uintptr_t(this);
    1166     1528974 :     MOZ_ASSERT(addr);
    1167     1528970 :     MOZ_ASSERT(!(addr & ArenaMask));
    1168     1528970 :     MOZ_ASSERT(Chunk::withinValidRange(addr));
    1169     1528947 : }
    1170             : 
    1171             : inline Chunk*
    1172        4928 : Arena::chunk() const
    1173             : {
    1174        4928 :     return Chunk::fromAddress(address());
    1175             : }
    1176             : 
    1177             : MOZ_ALWAYS_INLINE const TenuredCell&
    1178     1410632 : Cell::asTenured() const
    1179             : {
    1180     1410632 :     MOZ_ASSERT(isTenured());
    1181     1410626 :     return *static_cast<const TenuredCell*>(this);
    1182             : }
    1183             : 
    1184             : MOZ_ALWAYS_INLINE TenuredCell&
    1185     1538123 : Cell::asTenured()
    1186             : {
    1187     1538123 :     MOZ_ASSERT(isTenured());
    1188     1538120 :     return *static_cast<TenuredCell*>(this);
    1189             : }
    1190             : 
    1191             : MOZ_ALWAYS_INLINE bool
    1192             : Cell::isMarkedAny() const
    1193             : {
    1194             :     return !isTenured() || asTenured().isMarkedAny();
    1195             : }
    1196             : 
    1197             : MOZ_ALWAYS_INLINE bool
    1198       49503 : Cell::isMarkedBlack() const
    1199             : {
    1200       49503 :     return !isTenured() || asTenured().isMarkedBlack();
    1201             : }
    1202             : 
    1203             : MOZ_ALWAYS_INLINE bool
    1204       59139 : Cell::isMarkedGray() const
    1205             : {
    1206       59139 :     return isTenured() && asTenured().isMarkedGray();
    1207             : }
    1208             : 
    1209             : inline JSRuntime*
    1210       17652 : Cell::runtimeFromActiveCooperatingThread() const
    1211             : {
    1212       17652 :     JSRuntime* rt = chunk()->trailer.runtime;
    1213       17652 :     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
    1214       17652 :     return rt;
    1215             : }
    1216             : 
    1217             : inline JSRuntime*
    1218     2832368 : Cell::runtimeFromAnyThread() const
    1219             : {
    1220     2832368 :     return chunk()->trailer.runtime;
    1221             : }
    1222             : 
    1223             : inline uintptr_t
    1224    15050242 : Cell::address() const
    1225             : {
    1226    15050242 :     uintptr_t addr = uintptr_t(this);
    1227    15050242 :     MOZ_ASSERT(addr % CellAlignBytes == 0);
    1228    15050242 :     MOZ_ASSERT(Chunk::withinValidRange(addr));
    1229    15051368 :     return addr;
    1230             : }
    1231             : 
    1232             : Chunk*
    1233     4670692 : Cell::chunk() const
    1234             : {
    1235     4670692 :     uintptr_t addr = uintptr_t(this);
    1236     4670692 :     MOZ_ASSERT(addr % CellAlignBytes == 0);
    1237     4670692 :     addr &= ~ChunkMask;
    1238     4670692 :     return reinterpret_cast<Chunk*>(addr);
    1239             : }
    1240             : 
    1241             : inline StoreBuffer*
    1242      528155 : Cell::storeBuffer() const
    1243             : {
    1244      528155 :     return chunk()->trailer.storeBuffer;
    1245             : }
    1246             : 
    1247             : inline JS::TraceKind
    1248       58331 : Cell::getTraceKind() const
    1249             : {
    1250       58331 :     return isTenured() ? asTenured().getTraceKind() : JS::TraceKind::Object;
    1251             : }
    1252             : 
    1253             : inline bool
    1254           0 : InFreeList(Arena* arena, void* thing)
    1255             : {
    1256           0 :     uintptr_t addr = reinterpret_cast<uintptr_t>(thing);
    1257           0 :     MOZ_ASSERT(Arena::isAligned(addr, arena->getThingSize()));
    1258           0 :     return arena->inFreeList(addr);
    1259             : }
    1260             : 
    1261             : /* static */ MOZ_ALWAYS_INLINE bool
    1262         122 : Cell::needWriteBarrierPre(JS::Zone* zone) {
    1263         122 :     return JS::shadow::Zone::asShadowZone(zone)->needsIncrementalBarrier();
    1264             : }
    1265             : 
    1266             : /* static */ MOZ_ALWAYS_INLINE TenuredCell*
    1267      105719 : TenuredCell::fromPointer(void* ptr)
    1268             : {
    1269      105719 :     MOZ_ASSERT(static_cast<TenuredCell*>(ptr)->isTenured());
    1270      105719 :     return static_cast<TenuredCell*>(ptr);
    1271             : }
    1272             : 
    1273             : /* static */ MOZ_ALWAYS_INLINE const TenuredCell*
    1274           0 : TenuredCell::fromPointer(const void* ptr)
    1275             : {
    1276           0 :     MOZ_ASSERT(static_cast<const TenuredCell*>(ptr)->isTenured());
    1277           0 :     return static_cast<const TenuredCell*>(ptr);
    1278             : }
    1279             : 
    1280             : bool
    1281      433149 : TenuredCell::isMarkedAny() const
    1282             : {
    1283      433149 :     MOZ_ASSERT(arena()->allocated());
    1284      433151 :     return chunk()->bitmap.isMarkedAny(this);
    1285             : }
    1286             : 
    1287             : bool
    1288       38926 : TenuredCell::isMarkedBlack() const
    1289             : {
    1290       38926 :     MOZ_ASSERT(arena()->allocated());
    1291       38926 :     return chunk()->bitmap.isMarkedBlack(this);
    1292             : }
    1293             : 
    1294             : bool
    1295      760436 : TenuredCell::isMarkedGray() const
    1296             : {
    1297      760436 :     MOZ_ASSERT(arena()->allocated());
    1298      760448 :     return chunk()->bitmap.isMarkedGray(this);
    1299             : }
    1300             : 
    1301             : bool
    1302       60368 : TenuredCell::markIfUnmarked(MarkColor color /* = Black */) const
    1303             : {
    1304       60368 :     return chunk()->bitmap.markIfUnmarked(this, color);
    1305             : }
    1306             : 
    1307             : void
    1308           0 : TenuredCell::markBlack() const
    1309             : {
    1310           0 :     chunk()->bitmap.markBlack(this);
    1311           0 : }
    1312             : 
    1313             : void
    1314           0 : TenuredCell::copyMarkBitsFrom(const TenuredCell* src)
    1315             : {
    1316           0 :     ChunkBitmap& bitmap = chunk()->bitmap;
    1317           0 :     bitmap.copyMarkBit(this, src, ColorBit::BlackBit);
    1318           0 :     bitmap.copyMarkBit(this, src, ColorBit::GrayOrBlackBit);
    1319           0 : }
    1320             : 
    1321             : inline Arena*
    1322    14992845 : TenuredCell::arena() const
    1323             : {
    1324    14992845 :     MOZ_ASSERT(isTenured());
    1325    14994540 :     uintptr_t addr = address();
    1326    14995226 :     addr &= ~ArenaMask;
    1327    14995226 :     return reinterpret_cast<Arena*>(addr);
    1328             : }
    1329             : 
    1330             : AllocKind
    1331     2452969 : TenuredCell::getAllocKind() const
    1332             : {
    1333     2452969 :     return arena()->getAllocKind();
    1334             : }
    1335             : 
    1336             : JS::TraceKind
    1337       96235 : TenuredCell::getTraceKind() const
    1338             : {
    1339       96235 :     return MapAllocToTraceKind(getAllocKind());
    1340             : }
    1341             : 
    1342             : JS::Zone*
    1343     1146259 : TenuredCell::zone() const
    1344             : {
    1345     1146259 :     JS::Zone* zone = arena()->zone;
    1346     1146266 :     MOZ_ASSERT(CurrentThreadCanAccessZone(zone));
    1347     1146222 :     return zone;
    1348             : }
    1349             : 
    1350             : JS::Zone*
    1351     6726828 : TenuredCell::zoneFromAnyThread() const
    1352             : {
    1353     6726828 :     return arena()->zone;
    1354             : }
    1355             : 
    1356             : bool
    1357             : TenuredCell::isInsideZone(JS::Zone* zone) const
    1358             : {
    1359             :     return zone == arena()->zone;
    1360             : }
    1361             : 
    1362             : /* static */ MOZ_ALWAYS_INLINE void
    1363      638991 : TenuredCell::readBarrier(TenuredCell* thing)
    1364             : {
    1365      638991 :     MOZ_ASSERT(!CurrentThreadIsIonCompiling());
    1366      639001 :     MOZ_ASSERT(thing);
    1367      639001 :     MOZ_ASSERT(CurrentThreadCanAccessZone(thing->zoneFromAnyThread()));
    1368             : 
    1369             :     // It would be good if barriers were never triggered during collection, but
    1370             :     // at the moment this can happen e.g. when rekeying tables containing
    1371             :     // read-barriered GC things after a moving GC.
    1372             :     //
    1373             :     // TODO: Fix this and assert we're not collecting if we're on the active
    1374             :     // thread.
    1375             : 
    1376      638985 :     JS::shadow::Zone* shadowZone = thing->shadowZoneFromAnyThread();
    1377      638991 :     if (shadowZone->needsIncrementalBarrier()) {
    1378             :         // Barriers are only enabled on the active thread and are disabled while collecting.
    1379       16480 :         MOZ_ASSERT(!RuntimeFromActiveCooperatingThreadIsHeapMajorCollecting(shadowZone));
    1380       16480 :         Cell* tmp = thing;
    1381       16480 :         TraceManuallyBarrieredGenericPointerEdge(shadowZone->barrierTracer(), &tmp, "read barrier");
    1382       16480 :         MOZ_ASSERT(tmp == thing);
    1383             :     }
    1384             : 
    1385      638993 :     if (thing->isMarkedGray()) {
    1386             :         // There shouldn't be anything marked grey unless we're on the active thread.
    1387           0 :         MOZ_ASSERT(CurrentThreadCanAccessRuntime(thing->runtimeFromAnyThread()));
    1388           0 :         if (!RuntimeFromActiveCooperatingThreadIsHeapMajorCollecting(shadowZone))
    1389           0 :             UnmarkGrayCellRecursively(thing, thing->getTraceKind());
    1390             :     }
    1391      639002 : }
    1392             : 
    1393             : void
    1394             : AssertSafeToSkipBarrier(TenuredCell* thing);
    1395             : 
    1396             : /* static */ MOZ_ALWAYS_INLINE void
    1397      491086 : TenuredCell::writeBarrierPre(TenuredCell* thing)
    1398             : {
    1399      491086 :     MOZ_ASSERT(!CurrentThreadIsIonCompiling());
    1400      491096 :     if (!thing)
    1401      122852 :         return;
    1402             : 
    1403             : #ifdef JS_GC_ZEAL
    1404             :     // When verifying pre barriers we need to switch on all barriers, even
    1405             :     // those on the Atoms Zone. Normally, we never enter a parse task when
    1406             :     // collecting in the atoms zone, so will filter out atoms below.
    1407             :     // Unfortuantely, If we try that when verifying pre-barriers, we'd never be
    1408             :     // able to handle off thread parse tasks at all as we switch on the verifier any
    1409             :     // time we're not doing GC. This would cause us to deadlock, as off thread parsing
    1410             :     // is meant to resume after GC work completes. Instead we filter out any
    1411             :     // off thread barriers that reach us and assert that they would normally not be
    1412             :     // possible.
    1413      368244 :     if (!CurrentThreadCanAccessRuntime(thing->runtimeFromAnyThread())) {
    1414       10318 :         AssertSafeToSkipBarrier(thing);
    1415       10318 :         return;
    1416             :     }
    1417             : #endif
    1418             : 
    1419      357930 :     JS::shadow::Zone* shadowZone = thing->shadowZoneFromAnyThread();
    1420      357931 :     if (shadowZone->needsIncrementalBarrier()) {
    1421        4232 :         MOZ_ASSERT(!RuntimeFromActiveCooperatingThreadIsHeapMajorCollecting(shadowZone));
    1422        4232 :         Cell* tmp = thing;
    1423        4232 :         TraceManuallyBarrieredGenericPointerEdge(shadowZone->barrierTracer(), &tmp, "pre barrier");
    1424        4232 :         MOZ_ASSERT(tmp == thing);
    1425             :     }
    1426             : }
    1427             : 
    1428             : static MOZ_ALWAYS_INLINE void
    1429     1771034 : AssertValidToSkipBarrier(TenuredCell* thing)
    1430             : {
    1431     1771034 :     MOZ_ASSERT(!IsInsideNursery(thing));
    1432     1771053 :     MOZ_ASSERT_IF(thing, MapAllocToTraceKind(thing->getAllocKind()) != JS::TraceKind::Object);
    1433     1771095 : }
    1434             : 
    1435             : /* static */ MOZ_ALWAYS_INLINE void
    1436     1771049 : TenuredCell::writeBarrierPost(void* cellp, TenuredCell* prior, TenuredCell* next)
    1437             : {
    1438     1771049 :     AssertValidToSkipBarrier(next);
    1439     1771089 : }
    1440             : 
    1441             : #ifdef DEBUG
    1442             : bool
    1443       16001 : Cell::isAligned() const
    1444             : {
    1445       16001 :     if (!isTenured())
    1446           0 :         return true;
    1447       16001 :     return asTenured().isAligned();
    1448             : }
    1449             : 
    1450             : bool
    1451       55823 : TenuredCell::isAligned() const
    1452             : {
    1453       55823 :     return Arena::isAligned(address(), arena()->getThingSize());
    1454             : }
    1455             : #endif
    1456             : 
    1457             : static const int32_t ChunkLocationOffsetFromLastByte =
    1458             :     int32_t(gc::ChunkLocationOffset) - int32_t(gc::ChunkMask);
    1459             : 
    1460             : } /* namespace gc */
    1461             : 
    1462             : namespace debug {
    1463             : 
    1464             : // Utility functions meant to be called from an interactive debugger.
    1465             : enum class MarkInfo : int {
    1466             :     BLACK = 0,
    1467             :     GRAY = 1,
    1468             :     UNMARKED = -1,
    1469             :     NURSERY = -2,
    1470             : };
    1471             : 
    1472             : // Get the mark color for a cell, in a way easily usable from a debugger.
    1473             : MOZ_NEVER_INLINE MarkInfo
    1474             : GetMarkInfo(js::gc::Cell* cell);
    1475             : 
    1476             : // Sample usage from gdb:
    1477             : //
    1478             : //   (gdb) p $word = js::debug::GetMarkWordAddress(obj)
    1479             : //   $1 = (uintptr_t *) 0x7fa56d5fe360
    1480             : //   (gdb) p/x $mask = js::debug::GetMarkMask(obj, js::gc::GRAY)
    1481             : //   $2 = 0x200000000
    1482             : //   (gdb) watch *$word
    1483             : //   Hardware watchpoint 7: *$word
    1484             : //   (gdb) cond 7 *$word & $mask
    1485             : //   (gdb) cont
    1486             : //
    1487             : // Note that this is *not* a watchpoint on a single bit. It is a watchpoint on
    1488             : // the whole word, which will trigger whenever the word changes and the
    1489             : // selected bit is set after the change.
    1490             : //
    1491             : // So if the bit changing is the desired one, this is exactly what you want.
    1492             : // But if a different bit changes (either set or cleared), you may still stop
    1493             : // execution if the $mask bit happened to already be set. gdb does not expose
    1494             : // enough information to restrict the watchpoint to just a single bit.
    1495             : 
    1496             : // Return the address of the word containing the mark bits for the given cell,
    1497             : // or nullptr if the cell is in the nursery.
    1498             : MOZ_NEVER_INLINE uintptr_t*
    1499             : GetMarkWordAddress(js::gc::Cell* cell);
    1500             : 
    1501             : // Return the mask for the given cell and color bit, or 0 if the cell is in the
    1502             : // nursery.
    1503             : MOZ_NEVER_INLINE uintptr_t
    1504             : GetMarkMask(js::gc::Cell* cell, uint32_t colorBit);
    1505             : 
    1506             : } /* namespace debug */
    1507             : } /* namespace js */
    1508             : 
    1509             : #endif /* gc_Heap_h */

Generated by: LCOV version 1.13