LCOV - code coverage report
Current view: top level - js/src/gc - GCRuntime.h (source / functions) Hit Total Coverage
Test: output.info Lines: 121 213 56.8 %
Date: 2017-07-14 16:53:18 Functions: 81 128 63.3 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : #ifndef gc_GCRuntime_h
       8             : #define gc_GCRuntime_h
       9             : 
      10             : #include "mozilla/Atomics.h"
      11             : #include "mozilla/EnumSet.h"
      12             : #include "mozilla/Maybe.h"
      13             : 
      14             : #include "jsfriendapi.h"
      15             : #include "jsgc.h"
      16             : 
      17             : #include "gc/AtomMarking.h"
      18             : #include "gc/Heap.h"
      19             : #include "gc/Nursery.h"
      20             : #include "gc/Statistics.h"
      21             : #include "gc/StoreBuffer.h"
      22             : #include "gc/Tracer.h"
      23             : #include "js/GCAnnotations.h"
      24             : 
      25             : namespace js {
      26             : 
      27             : class AutoLockGC;
      28             : class AutoLockHelperThreadState;
      29             : class VerifyPreTracer;
      30             : 
      31             : namespace gc {
      32             : 
      33             : typedef Vector<ZoneGroup*, 4, SystemAllocPolicy> ZoneGroupVector;
      34             : using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>;
      35             : 
      36             : class AutoMaybeStartBackgroundAllocation;
      37             : class AutoRunParallelTask;
      38             : class AutoTraceSession;
      39             : class MarkingValidator;
      40             : struct MovingTracer;
      41             : class WeakCacheSweepIterator;
      42             : 
      43             : enum IncrementalProgress
      44             : {
      45             :     NotFinished = 0,
      46             :     Finished
      47             : };
      48             : 
      49             : enum SweepActionList
      50             : {
      51             :     PerSweepGroupActionList,
      52             :     PerZoneActionList,
      53             :     SweepActionListCount
      54             : };
      55             : 
      56             : class ChunkPool
      57             : {
      58             :     Chunk* head_;
      59             :     size_t count_;
      60             : 
      61             :   public:
      62          12 :     ChunkPool() : head_(nullptr), count_(0) {}
      63           0 :     ~ChunkPool() {
      64             :         // TODO: We should be able to assert that the chunk pool is empty but
      65             :         // this causes XPCShell test failures on Windows 2012. See bug 1379232.
      66           0 :     }
      67             : 
      68             :     bool empty() const { return !head_; }
      69        6848 :     size_t count() const { return count_; }
      70             : 
      71        6628 :     Chunk* head() { MOZ_ASSERT(head_); return head_; }
      72             :     Chunk* pop();
      73             :     void push(Chunk* chunk);
      74             :     Chunk* remove(Chunk* chunk);
      75             : 
      76             : #ifdef DEBUG
      77             :     bool contains(Chunk* chunk) const;
      78             :     bool verify() const;
      79             : #endif
      80             : 
      81             :     // Pool mutation does not invalidate an Iter unless the mutation
      82             :     // is of the Chunk currently being visited by the Iter.
      83             :     class Iter {
      84             :       public:
      85           0 :         explicit Iter(ChunkPool& pool) : current_(pool.head_) {}
      86           0 :         bool done() const { return !current_; }
      87             :         void next();
      88           0 :         Chunk* get() const { return current_; }
      89             :         operator Chunk*() const { return get(); }
      90           0 :         Chunk* operator->() const { return get(); }
      91             :       private:
      92             :         Chunk* current_;
      93             :     };
      94             : };
      95             : 
      96             : // Performs extra allocation off thread so that when memory is required on the
      97             : // active thread it will already be available and waiting.
      98           0 : class BackgroundAllocTask : public GCParallelTask
      99             : {
     100             :     // Guarded by the GC lock.
     101             :     GCLockData<ChunkPool&> chunkPool_;
     102             : 
     103             :     const bool enabled_;
     104             : 
     105             :   public:
     106             :     BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool);
     107          74 :     bool enabled() const { return enabled_; }
     108             : 
     109             :   protected:
     110             :     void run() override;
     111             : };
     112             : 
     113             : // Search the provided Chunks for free arenas and decommit them.
     114           0 : class BackgroundDecommitTask : public GCParallelTask
     115             : {
     116             :   public:
     117             :     using ChunkVector = mozilla::Vector<Chunk*>;
     118             : 
     119           4 :     explicit BackgroundDecommitTask(JSRuntime *rt) : GCParallelTask(rt) {}
     120             :     void setChunksToScan(ChunkVector &chunks);
     121             : 
     122             :   protected:
     123             :     void run() override;
     124             : 
     125             :   private:
     126             :     ActiveThreadOrGCTaskData<ChunkVector> toDecommit;
     127             : };
     128             : 
     129             : /*
     130             :  * Encapsulates all of the GC tunables. These are effectively constant and
     131             :  * should only be modified by setParameter.
     132             :  */
     133             : class GCSchedulingTunables
     134             : {
     135             :     /*
     136             :      * Soft limit on the number of bytes we are allowed to allocate in the GC
     137             :      * heap. Attempts to allocate gcthings over this limit will return null and
     138             :      * subsequently invoke the standard OOM machinery, independent of available
     139             :      * physical memory.
     140             :      */
     141             :     UnprotectedData<size_t> gcMaxBytes_;
     142             : 
     143             :     /* Maximum nursery size for each zone group. */
     144             :     ActiveThreadData<size_t> gcMaxNurseryBytes_;
     145             : 
     146             :     /*
     147             :      * The base value used to compute zone->trigger.gcBytes(). When
     148             :      * usage.gcBytes() surpasses threshold.gcBytes() for a zone, the zone may
     149             :      * be scheduled for a GC, depending on the exact circumstances.
     150             :      */
     151             :     ActiveThreadOrGCTaskData<size_t> gcZoneAllocThresholdBase_;
     152             : 
     153             :     /* Fraction of threshold.gcBytes() which triggers an incremental GC. */
     154             :     UnprotectedData<double> zoneAllocThresholdFactor_;
     155             :     /* The same except when doing so would interrupt an already running GC. */
     156             :     UnprotectedData<double> zoneAllocThresholdFactorAvoidInterrupt_;
     157             : 
     158             :     /*
     159             :      * Number of bytes to allocate between incremental slices in GCs triggered
     160             :      * by the zone allocation threshold.
     161             :      */
     162             :     UnprotectedData<size_t> zoneAllocDelayBytes_;
     163             : 
     164             :     /*
     165             :      * Totally disables |highFrequencyGC|, the HeapGrowthFactor, and other
     166             :      * tunables that make GC non-deterministic.
     167             :      */
     168             :     ActiveThreadData<bool> dynamicHeapGrowthEnabled_;
     169             : 
     170             :     /*
     171             :      * We enter high-frequency mode if we GC a twice within this many
     172             :      * microseconds. This value is stored directly in microseconds.
     173             :      */
     174             :     ActiveThreadData<uint64_t> highFrequencyThresholdUsec_;
     175             : 
     176             :     /*
     177             :      * When in the |highFrequencyGC| mode, these parameterize the per-zone
     178             :      * "HeapGrowthFactor" computation.
     179             :      */
     180             :     ActiveThreadData<uint64_t> highFrequencyLowLimitBytes_;
     181             :     ActiveThreadData<uint64_t> highFrequencyHighLimitBytes_;
     182             :     ActiveThreadData<double> highFrequencyHeapGrowthMax_;
     183             :     ActiveThreadData<double> highFrequencyHeapGrowthMin_;
     184             : 
     185             :     /*
     186             :      * When not in |highFrequencyGC| mode, this is the global (stored per-zone)
     187             :      * "HeapGrowthFactor".
     188             :      */
     189             :     ActiveThreadData<double> lowFrequencyHeapGrowth_;
     190             : 
     191             :     /*
     192             :      * Doubles the length of IGC slices when in the |highFrequencyGC| mode.
     193             :      */
     194             :     ActiveThreadData<bool> dynamicMarkSliceEnabled_;
     195             : 
     196             :     /*
     197             :      * Controls whether painting can trigger IGC slices.
     198             :      */
     199             :     ActiveThreadData<bool> refreshFrameSlicesEnabled_;
     200             : 
     201             :     /*
     202             :      * Controls the number of empty chunks reserved for future allocation.
     203             :      */
     204             :     UnprotectedData<uint32_t> minEmptyChunkCount_;
     205             :     UnprotectedData<uint32_t> maxEmptyChunkCount_;
     206             : 
     207             :   public:
     208           4 :     GCSchedulingTunables()
     209           4 :       : gcMaxBytes_(0),
     210             :         gcMaxNurseryBytes_(0),
     211           8 :         gcZoneAllocThresholdBase_(30 * 1024 * 1024),
     212             :         zoneAllocThresholdFactor_(0.9),
     213             :         zoneAllocThresholdFactorAvoidInterrupt_(0.95),
     214           8 :         zoneAllocDelayBytes_(1024 * 1024),
     215             :         dynamicHeapGrowthEnabled_(false),
     216           8 :         highFrequencyThresholdUsec_(1000 * 1000),
     217           8 :         highFrequencyLowLimitBytes_(100 * 1024 * 1024),
     218           8 :         highFrequencyHighLimitBytes_(500 * 1024 * 1024),
     219             :         highFrequencyHeapGrowthMax_(3.0),
     220             :         highFrequencyHeapGrowthMin_(1.5),
     221             :         lowFrequencyHeapGrowth_(1.5),
     222             :         dynamicMarkSliceEnabled_(false),
     223             :         refreshFrameSlicesEnabled_(true),
     224             :         minEmptyChunkCount_(1),
     225          36 :         maxEmptyChunkCount_(30)
     226           4 :     {}
     227             : 
     228        6573 :     size_t gcMaxBytes() const { return gcMaxBytes_; }
     229             :     size_t gcMaxNurseryBytes() const { return gcMaxNurseryBytes_; }
     230         290 :     size_t gcZoneAllocThresholdBase() const { return gcZoneAllocThresholdBase_; }
     231        5807 :     double zoneAllocThresholdFactor() const { return zoneAllocThresholdFactor_; }
     232           0 :     double zoneAllocThresholdFactorAvoidInterrupt() const { return zoneAllocThresholdFactorAvoidInterrupt_; }
     233           0 :     size_t zoneAllocDelayBytes() const { return zoneAllocDelayBytes_; }
     234         290 :     bool isDynamicHeapGrowthEnabled() const { return dynamicHeapGrowthEnabled_; }
     235           0 :     uint64_t highFrequencyThresholdUsec() const { return highFrequencyThresholdUsec_; }
     236           0 :     uint64_t highFrequencyLowLimitBytes() const { return highFrequencyLowLimitBytes_; }
     237           0 :     uint64_t highFrequencyHighLimitBytes() const { return highFrequencyHighLimitBytes_; }
     238           0 :     double highFrequencyHeapGrowthMax() const { return highFrequencyHeapGrowthMax_; }
     239           0 :     double highFrequencyHeapGrowthMin() const { return highFrequencyHeapGrowthMin_; }
     240         175 :     double lowFrequencyHeapGrowth() const { return lowFrequencyHeapGrowth_; }
     241           0 :     bool isDynamicMarkSliceEnabled() const { return dynamicMarkSliceEnabled_; }
     242           0 :     bool areRefreshFrameSlicesEnabled() const { return refreshFrameSlicesEnabled_; }
     243          74 :     unsigned minEmptyChunkCount(const AutoLockGC&) const { return minEmptyChunkCount_; }
     244           0 :     unsigned maxEmptyChunkCount() const { return maxEmptyChunkCount_; }
     245             : 
     246             :     MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value, const AutoLockGC& lock);
     247             : };
     248             : 
     249             : /*
     250             :  * GC Scheduling Overview
     251             :  * ======================
     252             :  *
     253             :  * Scheduling GC's in SpiderMonkey/Firefox is tremendously complicated because
     254             :  * of the large number of subtle, cross-cutting, and widely dispersed factors
     255             :  * that must be taken into account. A summary of some of the more important
     256             :  * factors follows.
     257             :  *
     258             :  * Cost factors:
     259             :  *
     260             :  *   * GC too soon and we'll revisit an object graph almost identical to the
     261             :  *     one we just visited; since we are unlikely to find new garbage, the
     262             :  *     traversal will be largely overhead. We rely heavily on external factors
     263             :  *     to signal us that we are likely to find lots of garbage: e.g. "a tab
     264             :  *     just got closed".
     265             :  *
     266             :  *   * GC too late and we'll run out of memory to allocate (e.g. Out-Of-Memory,
     267             :  *     hereafter simply abbreviated to OOM). If this happens inside
     268             :  *     SpiderMonkey we may be able to recover, but most embedder allocations
     269             :  *     will simply crash on OOM, even if the GC has plenty of free memory it
     270             :  *     could surrender.
     271             :  *
     272             :  *   * Memory fragmentation: if we fill the process with GC allocations, a
     273             :  *     request for a large block of contiguous memory may fail because no
     274             :  *     contiguous block is free, despite having enough memory available to
     275             :  *     service the request.
     276             :  *
     277             :  *   * Management overhead: if our GC heap becomes large, we create extra
     278             :  *     overhead when managing the GC's structures, even if the allocations are
     279             :  *     mostly unused.
     280             :  *
     281             :  * Heap Management Factors:
     282             :  *
     283             :  *   * GC memory: The GC has its own allocator that it uses to make fixed size
     284             :  *     allocations for GC managed things. In cases where the GC thing requires
     285             :  *     larger or variable sized memory to implement itself, it is responsible
     286             :  *     for using the system heap.
     287             :  *
     288             :  *   * C Heap Memory: Rather than allowing for large or variable allocations,
     289             :  *     the SpiderMonkey GC allows GC things to hold pointers to C heap memory.
     290             :  *     It is the responsibility of the thing to free this memory with a custom
     291             :  *     finalizer (with the sole exception of NativeObject, which knows about
     292             :  *     slots and elements for performance reasons). C heap memory has different
     293             :  *     performance and overhead tradeoffs than GC internal memory, which need
     294             :  *     to be considered with scheduling a GC.
     295             :  *
     296             :  * Application Factors:
     297             :  *
     298             :  *   * Most applications allocate heavily at startup, then enter a processing
     299             :  *     stage where memory utilization remains roughly fixed with a slower
     300             :  *     allocation rate. This is not always the case, however, so while we may
     301             :  *     optimize for this pattern, we must be able to handle arbitrary
     302             :  *     allocation patterns.
     303             :  *
     304             :  * Other factors:
     305             :  *
     306             :  *   * Other memory: This is memory allocated outside the purview of the GC.
     307             :  *     Data mapped by the system for code libraries, data allocated by those
     308             :  *     libraries, data in the JSRuntime that is used to manage the engine,
     309             :  *     memory used by the embedding that is not attached to a GC thing, memory
     310             :  *     used by unrelated processes running on the hardware that use space we
     311             :  *     could otherwise use for allocation, etc. While we don't have to manage
     312             :  *     it, we do have to take it into account when scheduling since it affects
     313             :  *     when we will OOM.
     314             :  *
     315             :  *   * Physical Reality: All real machines have limits on the number of bits
     316             :  *     that they are physically able to store. While modern operating systems
     317             :  *     can generally make additional space available with swapping, at some
     318             :  *     point there are simply no more bits to allocate. There is also the
     319             :  *     factor of address space limitations, particularly on 32bit machines.
     320             :  *
     321             :  *   * Platform Factors: Each OS makes use of wildly different memory
     322             :  *     management techniques. These differences result in different performance
     323             :  *     tradeoffs, different fragmentation patterns, and different hard limits
     324             :  *     on the amount of physical and/or virtual memory that we can use before
     325             :  *     OOMing.
     326             :  *
     327             :  *
     328             :  * Reasons for scheduling GC
     329             :  * -------------------------
     330             :  *
     331             :  *  While code generally takes the above factors into account in only an ad-hoc
     332             :  *  fashion, the API forces the user to pick a "reason" for the GC. We have a
     333             :  *  bunch of JS::gcreason reasons in GCAPI.h. These fall into a few categories
     334             :  *  that generally coincide with one or more of the above factors.
     335             :  *
     336             :  *  Embedding reasons:
     337             :  *
     338             :  *   1) Do a GC now because the embedding knows something useful about the
     339             :  *      zone's memory retention state. These are gcreasons like LOAD_END,
     340             :  *      PAGE_HIDE, SET_NEW_DOCUMENT, DOM_UTILS. Mostly, Gecko uses these to
     341             :  *      indicate that a significant fraction of the scheduled zone's memory is
     342             :  *      probably reclaimable.
     343             :  *
     344             :  *   2) Do some known amount of GC work now because the embedding knows now is
     345             :  *      a good time to do a long, unblockable operation of a known duration.
     346             :  *      These are INTER_SLICE_GC and REFRESH_FRAME.
     347             :  *
     348             :  *  Correctness reasons:
     349             :  *
     350             :  *   3) Do a GC now because correctness depends on some GC property. For
     351             :  *      example, CC_WAITING is where the embedding requires the mark bits
     352             :  *      to be set correct. Also, EVICT_NURSERY where we need to work on the tenured
     353             :  *      heap.
     354             :  *
     355             :  *   4) Do a GC because we are shutting down: e.g. SHUTDOWN_CC or DESTROY_*.
     356             :  *
     357             :  *   5) Do a GC because a compartment was accessed between GC slices when we
     358             :  *      would have otherwise discarded it. We have to do a second GC to clean
     359             :  *      it up: e.g. COMPARTMENT_REVIVED.
     360             :  *
     361             :  *  Emergency Reasons:
     362             :  *
     363             :  *   6) Do an all-zones, non-incremental GC now because the embedding knows it
     364             :  *      cannot wait: e.g. MEM_PRESSURE.
     365             :  *
     366             :  *   7) OOM when fetching a new Chunk results in a LAST_DITCH GC.
     367             :  *
     368             :  *  Heap Size Limitation Reasons:
     369             :  *
     370             :  *   8) Do an incremental, zonal GC with reason MAYBEGC when we discover that
     371             :  *      the gc's allocated size is approaching the current trigger. This is
     372             :  *      called MAYBEGC because we make this check in the MaybeGC function.
     373             :  *      MaybeGC gets called at the top of the main event loop. Normally, it is
     374             :  *      expected that this callback will keep the heap size limited. It is
     375             :  *      relatively inexpensive, because it is invoked with no JS running and
     376             :  *      thus few stack roots to scan. For this reason, the GC's "trigger" bytes
     377             :  *      is less than the GC's "max" bytes as used by the trigger below.
     378             :  *
     379             :  *   9) Do an incremental, zonal GC with reason MAYBEGC when we go to allocate
     380             :  *      a new GC thing and find that the GC heap size has grown beyond the
     381             :  *      configured maximum (JSGC_MAX_BYTES). We trigger this GC by returning
     382             :  *      nullptr and then calling maybeGC at the top level of the allocator.
     383             :  *      This is then guaranteed to fail the "size greater than trigger" check
     384             :  *      above, since trigger is always less than max. After performing the GC,
     385             :  *      the allocator unconditionally returns nullptr to force an OOM exception
     386             :  *      is raised by the script.
     387             :  *
     388             :  *      Note that this differs from a LAST_DITCH GC where we actually run out
     389             :  *      of memory (i.e., a call to a system allocator fails) when trying to
     390             :  *      allocate. Unlike above, LAST_DITCH GC only happens when we are really
     391             :  *      out of memory, not just when we cross an arbitrary trigger; despite
     392             :  *      this, it may still return an allocation at the end and allow the script
     393             :  *      to continue, if the LAST_DITCH GC was able to free up enough memory.
     394             :  *
     395             :  *  10) Do a GC under reason ALLOC_TRIGGER when we are over the GC heap trigger
     396             :  *      limit, but in the allocator rather than in a random call to maybeGC.
     397             :  *      This occurs if we allocate too much before returning to the event loop
     398             :  *      and calling maybeGC; this is extremely common in benchmarks and
     399             :  *      long-running Worker computations. Note that this uses a wildly
     400             :  *      different mechanism from the above in that it sets the interrupt flag
     401             :  *      and does the GC at the next loop head, before the next alloc, or
     402             :  *      maybeGC. The reason for this is that this check is made after the
     403             :  *      allocation and we cannot GC with an uninitialized thing in the heap.
     404             :  *
     405             :  *  11) Do an incremental, zonal GC with reason TOO_MUCH_MALLOC when we have
     406             :  *      malloced more than JSGC_MAX_MALLOC_BYTES in a zone since the last GC.
     407             :  *
     408             :  *
     409             :  * Size Limitation Triggers Explanation
     410             :  * ------------------------------------
     411             :  *
     412             :  *  The GC internally is entirely unaware of the context of the execution of
     413             :  *  the mutator. It sees only:
     414             :  *
     415             :  *   A) Allocated size: this is the amount of memory currently requested by the
     416             :  *      mutator. This quantity is monotonically increasing: i.e. the allocation
     417             :  *      rate is always >= 0. It is also easy for the system to track.
     418             :  *
     419             :  *   B) Retained size: this is the amount of memory that the mutator can
     420             :  *      currently reach. Said another way, it is the size of the heap
     421             :  *      immediately after a GC (modulo background sweeping). This size is very
     422             :  *      costly to know exactly and also extremely hard to estimate with any
     423             :  *      fidelity.
     424             :  *
     425             :  *   For reference, a common allocated vs. retained graph might look like:
     426             :  *
     427             :  *       |                                  **         **
     428             :  *       |                       **       ** *       **
     429             :  *       |                     ** *     **   *     **
     430             :  *       |           *       **   *   **     *   **
     431             :  *       |          **     **     * **       * **
     432             :  *      s|         * *   **       ** +  +    **
     433             :  *      i|        *  *  *      +  +       +  +     +
     434             :  *      z|       *   * * +  +                   +     +  +
     435             :  *      e|      *    **+
     436             :  *       |     *     +
     437             :  *       |    *    +
     438             :  *       |   *   +
     439             :  *       |  *  +
     440             :  *       | * +
     441             :  *       |*+
     442             :  *       +--------------------------------------------------
     443             :  *                               time
     444             :  *                                           *** = allocated
     445             :  *                                           +++ = retained
     446             :  *
     447             :  *           Note that this is a bit of a simplification
     448             :  *           because in reality we track malloc and GC heap
     449             :  *           sizes separately and have a different level of
     450             :  *           granularity and accuracy on each heap.
     451             :  *
     452             :  *   This presents some obvious implications for Mark-and-Sweep collectors.
     453             :  *   Namely:
     454             :  *       -> t[marking] ~= size[retained]
     455             :  *       -> t[sweeping] ~= size[allocated] - size[retained]
     456             :  *
     457             :  *   In a non-incremental collector, maintaining low latency and high
     458             :  *   responsiveness requires that total GC times be as low as possible. Thus,
     459             :  *   in order to stay responsive when we did not have a fully incremental
     460             :  *   collector, our GC triggers were focused on minimizing collection time.
     461             :  *   Furthermore, since size[retained] is not under control of the GC, all the
     462             :  *   GC could do to control collection times was reduce sweep times by
     463             :  *   minimizing size[allocated], per the equation above.
     464             :  *
     465             :  *   The result of the above is GC triggers that focus on size[allocated] to
     466             :  *   the exclusion of other important factors and default heuristics that are
     467             :  *   not optimal for a fully incremental collector. On the other hand, this is
     468             :  *   not all bad: minimizing size[allocated] also minimizes the chance of OOM
     469             :  *   and sweeping remains one of the hardest areas to further incrementalize.
     470             :  *
     471             :  *      EAGER_ALLOC_TRIGGER
     472             :  *      -------------------
     473             :  *      Occurs when we return to the event loop and find our heap is getting
     474             :  *      largish, but before t[marking] OR t[sweeping] is too large for a
     475             :  *      responsive non-incremental GC. This is intended to be the common case
     476             :  *      in normal web applications: e.g. we just finished an event handler and
     477             :  *      the few objects we allocated when computing the new whatzitz have
     478             :  *      pushed us slightly over the limit. After this GC we rescale the new
     479             :  *      EAGER_ALLOC_TRIGGER trigger to 150% of size[retained] so that our
     480             :  *      non-incremental GC times will always be proportional to this size
     481             :  *      rather than being dominated by sweeping.
     482             :  *
     483             :  *      As a concession to mutators that allocate heavily during their startup
     484             :  *      phase, we have a highFrequencyGCMode that ups the growth rate to 300%
     485             :  *      of the current size[retained] so that we'll do fewer longer GCs at the
     486             :  *      end of the mutator startup rather than more, smaller GCs.
     487             :  *
     488             :  *          Assumptions:
     489             :  *            -> Responsiveness is proportional to t[marking] + t[sweeping].
     490             :  *            -> size[retained] is proportional only to GC allocations.
     491             :  *
     492             :  *      ALLOC_TRIGGER (non-incremental)
     493             :  *      -------------------------------
     494             :  *      If we do not return to the event loop before getting all the way to our
     495             :  *      gc trigger bytes then MAYBEGC will never fire. To avoid OOMing, we
     496             :  *      succeed the current allocation and set the script interrupt so that we
     497             :  *      will (hopefully) do a GC before we overflow our max and have to raise
     498             :  *      an OOM exception for the script.
     499             :  *
     500             :  *          Assumptions:
     501             :  *            -> Common web scripts will return to the event loop before using
     502             :  *               10% of the current gcTriggerBytes worth of GC memory.
     503             :  *
     504             :  *      ALLOC_TRIGGER (incremental)
     505             :  *      ---------------------------
     506             :  *      In practice the above trigger is rough: if a website is just on the
     507             :  *      cusp, sometimes it will trigger a non-incremental GC moments before
     508             :  *      returning to the event loop, where it could have done an incremental
     509             :  *      GC. Thus, we recently added an incremental version of the above with a
     510             :  *      substantially lower threshold, so that we have a soft limit here. If
     511             :  *      IGC can collect faster than the allocator generates garbage, even if
     512             :  *      the allocator does not return to the event loop frequently, we should
     513             :  *      not have to fall back to a non-incremental GC.
     514             :  *
     515             :  *      INCREMENTAL_TOO_SLOW
     516             :  *      --------------------
     517             :  *      Do a full, non-incremental GC if we overflow ALLOC_TRIGGER during an
     518             :  *      incremental GC. When in the middle of an incremental GC, we suppress
     519             :  *      our other triggers, so we need a way to backstop the IGC if the
     520             :  *      mutator allocates faster than the IGC can clean things up.
     521             :  *
     522             :  *      TOO_MUCH_MALLOC
     523             :  *      ---------------
     524             :  *      Performs a GC before size[allocated] - size[retained] gets too large
     525             :  *      for non-incremental sweeping to be fast in the case that we have
     526             :  *      significantly more malloc allocation than GC allocation. This is meant
     527             :  *      to complement MAYBEGC triggers. We track this by counting malloced
     528             :  *      bytes; the counter gets reset at every GC since we do not always have a
     529             :  *      size at the time we call free. Because of this, the malloc heuristic
     530             :  *      is, unfortunately, not usefully able to augment our other GC heap
     531             :  *      triggers and is limited to this singular heuristic.
     532             :  *
     533             :  *          Assumptions:
     534             :  *            -> EITHER size[allocated_by_malloc] ~= size[allocated_by_GC]
     535             :  *                 OR   time[sweeping] ~= size[allocated_by_malloc]
     536             :  *            -> size[retained] @ t0 ~= size[retained] @ t1
     537             :  *               i.e. That the mutator is in steady-state operation.
     538             :  *
     539             :  *      LAST_DITCH_GC
     540             :  *      -------------
     541             :  *      Does a GC because we are out of memory.
     542             :  *
     543             :  *          Assumptions:
     544             :  *            -> size[retained] < size[available_memory]
     545             :  */
     546             : class GCSchedulingState
     547             : {
     548             :     /*
     549             :      * Influences how we schedule and run GC's in several subtle ways. The most
     550             :      * important factor is in how it controls the "HeapGrowthFactor". The
     551             :      * growth factor is a measure of how large (as a percentage of the last GC)
     552             :      * the heap is allowed to grow before we try to schedule another GC.
     553             :      */
     554             :     ActiveThreadData<bool> inHighFrequencyGCMode_;
     555             : 
     556             :   public:
     557           4 :     GCSchedulingState()
     558           4 :       : inHighFrequencyGCMode_(false)
     559           4 :     {}
     560             : 
     561        3944 :     bool inHighFrequencyGCMode() const { return inHighFrequencyGCMode_; }
     562             : 
     563           0 :     void updateHighFrequencyMode(uint64_t lastGCTime, uint64_t currentTime,
     564             :                                  const GCSchedulingTunables& tunables) {
     565             :         inHighFrequencyGCMode_ =
     566           0 :             tunables.isDynamicHeapGrowthEnabled() && lastGCTime &&
     567           0 :             lastGCTime + tunables.highFrequencyThresholdUsec() > currentTime;
     568           0 :     }
     569             : };
     570             : 
     571             : template<typename F>
     572             : struct Callback {
     573             :     ActiveThreadOrGCTaskData<F> op;
     574             :     ActiveThreadOrGCTaskData<void*> data;
     575             : 
     576          16 :     Callback()
     577          16 :       : op(nullptr), data(nullptr)
     578          16 :     {}
     579          25 :     Callback(F op, void* data)
     580          25 :       : op(op), data(data)
     581          25 :     {}
     582             : };
     583             : 
     584             : template<typename F>
     585             : using CallbackVector = ActiveThreadData<Vector<Callback<F>, 4, SystemAllocPolicy>>;
     586             : 
     587             : template <typename T, typename Iter0, typename Iter1>
     588             : class ChainedIter
     589             : {
     590             :     Iter0 iter0_;
     591             :     Iter1 iter1_;
     592             : 
     593             :   public:
     594           0 :     ChainedIter(const Iter0& iter0, const Iter1& iter1)
     595           0 :       : iter0_(iter0), iter1_(iter1)
     596           0 :     {}
     597             : 
     598           0 :     bool done() const { return iter0_.done() && iter1_.done(); }
     599           0 :     void next() {
     600           0 :         MOZ_ASSERT(!done());
     601           0 :         if (!iter0_.done()) {
     602           0 :             iter0_.next();
     603             :         } else {
     604           0 :             MOZ_ASSERT(!iter1_.done());
     605           0 :             iter1_.next();
     606             :         }
     607           0 :     }
     608           0 :     T get() const {
     609           0 :         MOZ_ASSERT(!done());
     610           0 :         if (!iter0_.done())
     611           0 :             return iter0_.get();
     612           0 :         MOZ_ASSERT(!iter1_.done());
     613           0 :         return iter1_.get();
     614             :     }
     615             : 
     616           0 :     operator T() const { return get(); }
     617           0 :     T operator->() const { return get(); }
     618             : };
     619             : 
     620             : typedef HashMap<Value*, const char*, DefaultHasher<Value*>, SystemAllocPolicy> RootedValueMap;
     621             : 
     622             : using AllocKinds = mozilla::EnumSet<AllocKind>;
     623             : 
     624             : template <typename T>
     625             : class MemoryCounter
     626             : {
     627             :     // Bytes counter to measure memory pressure for GC scheduling. It runs
     628             :     // from maxBytes down to zero.
     629             :     mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> bytes_;
     630             : 
     631             :     // GC trigger threshold for memory allocations.
     632             :     js::ActiveThreadData<size_t> maxBytes_;
     633             : 
     634             :     // Whether a GC has been triggered as a result of bytes falling below
     635             :     // zero.
     636             :     //
     637             :     // This should be a bool, but Atomic only supports 32-bit and pointer-sized
     638             :     // types.
     639             :     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> triggered_;
     640             : 
     641             :   public:
     642          66 :     MemoryCounter()
     643             :       : bytes_(0),
     644             :         maxBytes_(0),
     645          66 :         triggered_(false)
     646          66 :     { }
     647             : 
     648         208 :     void reset() {
     649         208 :         bytes_ = maxBytes_;
     650         208 :         triggered_ = false;
     651         208 :     }
     652             : 
     653         109 :     void setMax(size_t newMax) {
     654             :         // For compatibility treat any value that exceeds PTRDIFF_T_MAX to
     655             :         // mean that value.
     656         109 :         maxBytes_ = (ptrdiff_t(newMax) >= 0) ? newMax : size_t(-1) >> 1;
     657         109 :         reset();
     658         109 :     }
     659             : 
     660      212880 :     bool update(T* owner, size_t bytes) {
     661      212880 :         bytes_ -= ptrdiff_t(bytes);
     662      212883 :         if (MOZ_UNLIKELY(isTooMuchMalloc())) {
     663           0 :             if (!triggered_)
     664           0 :                 triggered_ = owner->triggerGCForTooMuchMalloc();
     665             :         }
     666      212883 :         return triggered_;
     667             :     }
     668             : 
     669           0 :     ptrdiff_t bytes() const { return bytes_; }
     670          51 :     size_t maxBytes() const { return maxBytes_; }
     671      212982 :     bool isTooMuchMalloc() const { return bytes_ <= 0; }
     672             : };
     673             : 
     674           0 : class GCRuntime
     675             : {
     676             :   public:
     677             :     explicit GCRuntime(JSRuntime* rt);
     678             :     MOZ_MUST_USE bool init(uint32_t maxbytes, uint32_t maxNurseryBytes);
     679             :     void finishRoots();
     680             :     void finish();
     681             : 
     682             :     inline bool hasZealMode(ZealMode mode);
     683             :     inline void clearZealMode(ZealMode mode);
     684             :     inline bool upcomingZealousGC();
     685             :     inline bool needZealousGC();
     686             : 
     687             :     MOZ_MUST_USE bool addRoot(Value* vp, const char* name);
     688             :     void removeRoot(Value* vp);
     689             :     void setMarkStackLimit(size_t limit, AutoLockGC& lock);
     690             : 
     691             :     MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock);
     692             :     uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
     693             : 
     694             :     MOZ_MUST_USE bool triggerGC(JS::gcreason::Reason reason);
     695             :     void maybeAllocTriggerZoneGC(Zone* zone, const AutoLockGC& lock);
     696             :     // The return value indicates if we were able to do the GC.
     697             :     bool triggerZoneGC(Zone* zone, JS::gcreason::Reason reason,
     698             :                        size_t usedBytes, size_t thresholdBytes);
     699             :     void maybeGC(Zone* zone);
     700             :     // The return value indicates whether a major GC was performed.
     701             :     bool gcIfRequested();
     702             :     void gc(JSGCInvocationKind gckind, JS::gcreason::Reason reason);
     703             :     void startGC(JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis = 0);
     704             :     void gcSlice(JS::gcreason::Reason reason, int64_t millis = 0);
     705             :     void finishGC(JS::gcreason::Reason reason);
     706             :     void abortGC();
     707             :     void startDebugGC(JSGCInvocationKind gckind, SliceBudget& budget);
     708             :     void debugGCSlice(SliceBudget& budget);
     709             : 
     710             :     bool canChangeActiveContext(JSContext* cx);
     711             : 
     712           0 :     void triggerFullGCForAtoms() {
     713           0 :         MOZ_ASSERT(fullGCForAtomsRequested_);
     714           0 :         fullGCForAtomsRequested_ = false;
     715           0 :         MOZ_RELEASE_ASSERT(triggerGC(JS::gcreason::ALLOC_TRIGGER));
     716           0 :     }
     717             : 
     718             :     void runDebugGC();
     719             :     void notifyRootsRemoved();
     720             : 
     721             :     enum TraceOrMarkRuntime {
     722             :         TraceRuntime,
     723             :         MarkRuntime
     724             :     };
     725             :     void traceRuntime(JSTracer* trc, AutoLockForExclusiveAccess& lock);
     726             :     void traceRuntimeForMinorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock);
     727             : 
     728             :     void notifyDidPaint();
     729             :     void shrinkBuffers();
     730             :     void onOutOfMallocMemory();
     731             :     void onOutOfMallocMemory(const AutoLockGC& lock);
     732             : 
     733             : #ifdef JS_GC_ZEAL
     734         115 :     const void* addressOfZealModeBits() { return &zealModeBits; }
     735             :     void getZealBits(uint32_t* zealBits, uint32_t* frequency, uint32_t* nextScheduled);
     736             :     void setZeal(uint8_t zeal, uint32_t frequency);
     737             :     bool parseAndSetZeal(const char* str);
     738             :     void setNextScheduled(uint32_t count);
     739             :     void verifyPreBarriers();
     740             :     void maybeVerifyPreBarriers(bool always);
     741             :     bool selectForMarking(JSObject* object);
     742             :     void clearSelectedForMarking();
     743             :     void setDeterministic(bool enable);
     744             : #endif
     745             : 
     746        6693 :     uint64_t nextCellUniqueId() {
     747        6693 :         MOZ_ASSERT(nextCellUniqueId_ > 0);
     748        6693 :         uint64_t uid = ++nextCellUniqueId_;
     749        6693 :         return uid;
     750             :     }
     751             : 
     752             : #ifdef DEBUG
     753           0 :     bool shutdownCollectedEverything() const {
     754           0 :         return arenasEmptyAtShutdown;
     755             :     }
     756             : #endif
     757             : 
     758             :   public:
     759             :     // Internal public interface
     760      404287 :     State state() const { return incrementalState; }
     761       24490 :     bool isHeapCompacting() const { return state() == State::Compact; }
     762           0 :     bool isForegroundSweeping() const { return state() == State::Sweep; }
     763       24490 :     bool isBackgroundSweeping() { return helperState.isBackgroundSweeping(); }
     764          50 :     void waitBackgroundSweepEnd() { helperState.waitBackgroundSweepEnd(); }
     765           0 :     void waitBackgroundSweepOrAllocEnd() {
     766           0 :         helperState.waitBackgroundSweepEnd();
     767           0 :         allocTask.cancel(GCParallelTask::CancelAndWait);
     768           0 :     }
     769             : 
     770             : #ifdef DEBUG
     771     5462332 :     bool onBackgroundThread() { return helperState.onBackgroundThread(); }
     772             : #endif // DEBUG
     773             : 
     774             :     void lockGC() {
     775             :         lock.lock();
     776             :     }
     777             : 
     778             :     void unlockGC() {
     779             :         lock.unlock();
     780             :     }
     781             : 
     782             : #ifdef DEBUG
     783         678 :     bool currentThreadHasLockedGC() const {
     784         678 :         return lock.ownedByCurrentThread();
     785             :     }
     786             : #endif // DEBUG
     787             : 
     788           0 :     void setAlwaysPreserveCode() { alwaysPreserveCode = true; }
     789             : 
     790           3 :     bool isIncrementalGCAllowed() const { return incrementalAllowed; }
     791           0 :     void disallowIncrementalGC() { incrementalAllowed = false; }
     792             : 
     793           1 :     bool isIncrementalGCEnabled() const { return mode == JSGC_MODE_INCREMENTAL && incrementalAllowed; }
     794      353842 :     bool isIncrementalGCInProgress() const { return state() != State::NotActive; }
     795             : 
     796             :     bool isCompactingGCEnabled() const;
     797             : 
     798           0 :     bool isShrinkingGC() const { return invocationKind == GC_SHRINK; }
     799             : 
     800             :     static bool initializeSweepActions();
     801             : 
     802             :     void setGrayRootsTracer(JSTraceDataOp traceOp, void* data);
     803             :     MOZ_MUST_USE bool addBlackRootsTracer(JSTraceDataOp traceOp, void* data);
     804             :     void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data);
     805             : 
     806           0 :     bool triggerGCForTooMuchMalloc() {
     807           0 :         stats().recordTrigger(mallocCounter.bytes(), mallocCounter.maxBytes());
     808           0 :         return triggerGC(JS::gcreason::TOO_MUCH_MALLOC);
     809             :     }
     810           0 :     int32_t getMallocBytes() const { return mallocCounter.bytes(); }
     811          51 :     size_t maxMallocBytesAllocated() const { return mallocCounter.maxBytes(); }
     812           3 :     bool isTooMuchMalloc() const { return mallocCounter.isTooMuchMalloc(); }
     813           3 :     void resetMallocBytes() { mallocCounter.reset(); }
     814             :     void setMaxMallocBytes(size_t value);
     815             :     void updateMallocCounter(JS::Zone* zone, size_t nbytes);
     816             : 
     817             :     void setGCCallback(JSGCCallback callback, void* data);
     818             :     void callGCCallback(JSGCStatus status) const;
     819             :     void setObjectsTenuredCallback(JSObjectsTenuredCallback callback,
     820             :                                    void* data);
     821             :     void callObjectsTenuredCallback();
     822             :     MOZ_MUST_USE bool addFinalizeCallback(JSFinalizeCallback callback, void* data);
     823             :     void removeFinalizeCallback(JSFinalizeCallback func);
     824             :     MOZ_MUST_USE bool addWeakPointerZonesCallback(JSWeakPointerZonesCallback callback,
     825             :                                                       void* data);
     826             :     void removeWeakPointerZonesCallback(JSWeakPointerZonesCallback callback);
     827             :     MOZ_MUST_USE bool addWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback,
     828             :                                                         void* data);
     829             :     void removeWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback);
     830             :     JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
     831             :     JS::GCNurseryCollectionCallback setNurseryCollectionCallback(
     832             :         JS::GCNurseryCollectionCallback callback);
     833             :     JS::DoCycleCollectionCallback setDoCycleCollectionCallback(JS::DoCycleCollectionCallback callback);
     834             :     void callDoCycleCollectionCallback(JSContext* cx);
     835             : 
     836             :     void setFullCompartmentChecks(bool enable);
     837             : 
     838           0 :     JS::Zone* getCurrentSweepGroup() { return currentSweepGroup; }
     839           0 :     void setFoundBlackGrayEdges(TenuredCell& target) {
     840           0 :         AutoEnterOOMUnsafeRegion oomUnsafe;
     841           0 :         if (!foundBlackGrayEdges.ref().append(&target))
     842           0 :             oomUnsafe.crash("OOM|small: failed to insert into foundBlackGrayEdges");
     843           0 :     }
     844             : 
     845      185427 :     uint64_t gcNumber() const { return number; }
     846             : 
     847          24 :     uint64_t minorGCCount() const { return minorGCNumber; }
     848          24 :     void incMinorGcNumber() { ++minorGCNumber; ++number; }
     849             : 
     850         205 :     uint64_t majorGCCount() const { return majorGCNumber; }
     851           1 :     void incMajorGcNumber() { ++majorGCNumber; ++number; }
     852             : 
     853           3 :     int64_t defaultSliceBudget() const { return defaultTimeBudget_; }
     854             : 
     855           0 :     bool isIncrementalGc() const { return isIncremental; }
     856             :     bool isFullGc() const { return isFull; }
     857           0 :     bool isCompactingGc() const { return isCompacting; }
     858             : 
     859      873317 :     bool areGrayBitsValid() const { return grayBitsValid; }
     860           0 :     void setGrayBitsInvalid() { grayBitsValid = false; }
     861             : 
     862       54334 :     bool majorGCRequested() const { return majorGCTriggerReason != JS::gcreason::NO_REASON; }
     863             : 
     864        2202 :     bool fullGCForAtomsRequested() const { return fullGCForAtomsRequested_; }
     865             : 
     866             :     double computeHeapGrowthFactor(size_t lastBytes);
     867             :     size_t computeTriggerBytes(double growthFactor, size_t lastBytes);
     868             : 
     869          48 :     JSGCMode gcMode() const { return mode; }
     870           4 :     void setGCMode(JSGCMode m) {
     871           4 :         mode = m;
     872           4 :         marker.setGCMode(mode);
     873           4 :     }
     874             : 
     875             :     inline void updateOnFreeArenaAlloc(const ChunkInfo& info);
     876             :     inline void updateOnArenaFree(const ChunkInfo& info);
     877             : 
     878          54 :     ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_.ref(); }
     879       13378 :     ChunkPool& availableChunks(const AutoLockGC& lock) { return availableChunks_.ref(); }
     880          44 :     ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_.ref(); }
     881          56 :     const ChunkPool& fullChunks(const AutoLockGC& lock) const { return fullChunks_.ref(); }
     882          56 :     const ChunkPool& availableChunks(const AutoLockGC& lock) const { return availableChunks_.ref(); }
     883          74 :     const ChunkPool& emptyChunks(const AutoLockGC& lock) const { return emptyChunks_.ref(); }
     884             :     typedef ChainedIter<Chunk*, ChunkPool::Iter, ChunkPool::Iter> NonEmptyChunksIter;
     885           0 :     NonEmptyChunksIter allNonEmptyChunks() {
     886           0 :         return NonEmptyChunksIter(ChunkPool::Iter(availableChunks_.ref()), ChunkPool::Iter(fullChunks_.ref()));
     887             :     }
     888             : 
     889             :     Chunk* getOrAllocChunk(const AutoLockGC& lock,
     890             :                            AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc);
     891             :     void recycleChunk(Chunk* chunk, const AutoLockGC& lock);
     892             : 
     893             : #ifdef JS_GC_ZEAL
     894             :     void startVerifyPreBarriers();
     895             :     void endVerifyPreBarriers();
     896             :     void finishVerifier();
     897          34 :     bool isVerifyPreBarriersEnabled() const { return !!verifyPreData; }
     898             : #else
     899             :     bool isVerifyPreBarriersEnabled() const { return false; }
     900             : #endif
     901             : 
     902             :     // Free certain LifoAlloc blocks when it is safe to do so.
     903             :     void freeUnusedLifoBlocksAfterSweeping(LifoAlloc* lifo);
     904             :     void freeAllLifoBlocksAfterSweeping(LifoAlloc* lifo);
     905             : 
     906             :     // Public here for ReleaseArenaLists and FinalizeTypedArenas.
     907             :     void releaseArena(Arena* arena, const AutoLockGC& lock);
     908             : 
     909             :     void releaseHeldRelocatedArenas();
     910             :     void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock);
     911             : 
     912             :     // Allocator
     913             :     template <AllowGC allowGC>
     914             :     MOZ_MUST_USE bool checkAllocatorState(JSContext* cx, AllocKind kind);
     915             :     template <AllowGC allowGC>
     916             :     JSObject* tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicSlots,
     917             :                                   const Class* clasp);
     918             :     template <AllowGC allowGC>
     919             :     static JSObject* tryNewTenuredObject(JSContext* cx, AllocKind kind, size_t thingSize,
     920             :                                          size_t nDynamicSlots);
     921             :     template <typename T, AllowGC allowGC>
     922             :     static T* tryNewTenuredThing(JSContext* cx, AllocKind kind, size_t thingSize);
     923             :     static TenuredCell* refillFreeListInGC(Zone* zone, AllocKind thingKind);
     924             : 
     925             :     void bufferGrayRoots();
     926             : 
     927             :     /*
     928             :      * Concurrent sweep infrastructure.
     929             :      */
     930             :     void startTask(GCParallelTask& task, gcstats::PhaseKind phase, AutoLockHelperThreadState& locked);
     931             :     void joinTask(GCParallelTask& task, gcstats::PhaseKind phase, AutoLockHelperThreadState& locked);
     932             : 
     933             :   private:
     934             : 
     935             :   private:
     936             :     enum IncrementalResult
     937             :     {
     938             :         Reset = 0,
     939             :         Ok
     940             :     };
     941             : 
     942             :     // For ArenaLists::allocateFromArena()
     943             :     friend class ArenaLists;
     944             :     Chunk* pickChunk(const AutoLockGC& lock,
     945             :                      AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc);
     946             :     Arena* allocateArena(Chunk* chunk, Zone* zone, AllocKind kind,
     947             :                          ShouldCheckThresholds checkThresholds, const AutoLockGC& lock);
     948             :     void arenaAllocatedDuringGC(JS::Zone* zone, Arena* arena);
     949             : 
     950             :     // Allocator internals
     951             :     MOZ_MUST_USE bool gcIfNeededAtAllocation(JSContext* cx);
     952             :     template <typename T>
     953             :     static void checkIncrementalZoneState(JSContext* cx, T* t);
     954             :     static TenuredCell* refillFreeListFromAnyThread(JSContext* cx, AllocKind thingKind,
     955             :                                                     size_t thingSize);
     956             :     static TenuredCell* refillFreeListFromActiveCooperatingThread(JSContext* cx, AllocKind thingKind,
     957             :                                                                   size_t thingSize);
     958             :     static TenuredCell* refillFreeListFromHelperThread(JSContext* cx, AllocKind thingKind);
     959             : 
     960             :     /*
     961             :      * Return the list of chunks that can be released outside the GC lock.
     962             :      * Must be called either during the GC or with the GC lock taken.
     963             :      */
     964             :     friend class BackgroundDecommitTask;
     965             :     ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
     966             :     void freeEmptyChunks(JSRuntime* rt, const AutoLockGC& lock);
     967             :     void prepareToFreeChunk(ChunkInfo& info);
     968             : 
     969             :     friend class BackgroundAllocTask;
     970             :     friend class AutoMaybeStartBackgroundAllocation;
     971             :     bool wantBackgroundAllocation(const AutoLockGC& lock) const;
     972             :     void startBackgroundAllocTaskIfIdle();
     973             : 
     974             :     void requestMajorGC(JS::gcreason::Reason reason);
     975             :     SliceBudget defaultBudget(JS::gcreason::Reason reason, int64_t millis);
     976             :     IncrementalResult budgetIncrementalGC(bool nonincrementalByAPI, JS::gcreason::Reason reason,
     977             :                                           SliceBudget& budget, AutoLockForExclusiveAccess& lock);
     978             :     IncrementalResult resetIncrementalGC(AbortReason reason, AutoLockForExclusiveAccess& lock);
     979             : 
     980             :     // Assert if the system state is such that we should never
     981             :     // receive a request to do GC work.
     982             :     void checkCanCallAPI();
     983             : 
     984             :     // Check if the system state is such that GC has been supressed
     985             :     // or otherwise delayed.
     986             :     MOZ_MUST_USE bool checkIfGCAllowedInCurrentState(JS::gcreason::Reason reason);
     987             : 
     988             :     gcstats::ZoneGCStats scanZonesBeforeGC();
     989             :     void collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::Reason reason) JS_HAZ_GC_CALL;
     990             :     MOZ_MUST_USE IncrementalResult gcCycle(bool nonincrementalByAPI, SliceBudget& budget,
     991             :                                            JS::gcreason::Reason reason);
     992             :     bool shouldRepeatForDeadZone(JS::gcreason::Reason reason);
     993             :     void incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason,
     994             :                                  AutoLockForExclusiveAccess& lock);
     995             : 
     996             :     void pushZealSelectedObjects();
     997             :     void purgeRuntime(AutoLockForExclusiveAccess& lock);
     998             :     MOZ_MUST_USE bool beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock);
     999             :     bool prepareZonesForCollection(JS::gcreason::Reason reason, bool* isFullOut,
    1000             :                                    AutoLockForExclusiveAccess& lock);
    1001             :     bool shouldPreserveJITCode(JSCompartment* comp, int64_t currentTime,
    1002             :                                JS::gcreason::Reason reason, bool canAllocateMoreCode);
    1003             :     void traceRuntimeForMajorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock);
    1004             :     void traceRuntimeAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock);
    1005             :     void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
    1006             :                             AutoLockForExclusiveAccess& lock);
    1007             :     void maybeDoCycleCollection();
    1008             :     void markCompartments();
    1009             :     IncrementalProgress drainMarkStack(SliceBudget& sliceBudget, gcstats::PhaseKind phase);
    1010             :     template <class CompartmentIterT> void markWeakReferences(gcstats::PhaseKind phase);
    1011             :     void markWeakReferencesInCurrentGroup(gcstats::PhaseKind phase);
    1012             :     template <class ZoneIterT, class CompartmentIterT> void markGrayReferences(gcstats::PhaseKind phase);
    1013             :     void markBufferedGrayRoots(JS::Zone* zone);
    1014             :     void markGrayReferencesInCurrentGroup(gcstats::PhaseKind phase);
    1015             :     void markAllWeakReferences(gcstats::PhaseKind phase);
    1016             :     void markAllGrayReferences(gcstats::PhaseKind phase);
    1017             : 
    1018             :     void beginSweepPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock);
    1019             :     void groupZonesForSweeping(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock);
    1020             :     MOZ_MUST_USE bool findInterZoneEdges();
    1021             :     void getNextSweepGroup();
    1022             :     void endMarkingSweepGroup();
    1023             :     void beginSweepingSweepGroup();
    1024             :     bool shouldReleaseObservedTypes();
    1025             :     void sweepDebuggerOnMainThread(FreeOp* fop);
    1026             :     void sweepJitDataOnMainThread(FreeOp* fop);
    1027             :     void endSweepingSweepGroup();
    1028             :     IncrementalProgress performSweepActions(SliceBudget& sliceBudget,
    1029             :                                             AutoLockForExclusiveAccess& lock);
    1030             :     static IncrementalProgress sweepTypeInformation(GCRuntime* gc, FreeOp* fop, Zone* zone,
    1031             :                                                     SliceBudget& budget, AllocKind kind);
    1032             :     static IncrementalProgress mergeSweptObjectArenas(GCRuntime* gc, FreeOp* fop, Zone* zone,
    1033             :                                                       SliceBudget& budget, AllocKind kind);
    1034             :     static IncrementalProgress sweepAtomsTable(GCRuntime* gc, SliceBudget& budget);
    1035             :     void startSweepingAtomsTable();
    1036             :     IncrementalProgress sweepAtomsTable(SliceBudget& budget);
    1037             :     static IncrementalProgress sweepWeakCaches(GCRuntime* gc, SliceBudget& budget);
    1038             :     IncrementalProgress sweepWeakCaches(SliceBudget& budget);
    1039             :     static IncrementalProgress finalizeAllocKind(GCRuntime* gc, FreeOp* fop, Zone* zone,
    1040             :                                                  SliceBudget& budget, AllocKind kind);
    1041             :     static IncrementalProgress sweepShapeTree(GCRuntime* gc, FreeOp* fop, Zone* zone,
    1042             :                                               SliceBudget& budget, AllocKind kind);
    1043             :     void endSweepPhase(bool lastGC, AutoLockForExclusiveAccess& lock);
    1044             :     bool allCCVisibleZonesWereCollected() const;
    1045             :     void sweepZones(FreeOp* fop, ZoneGroup* group, bool lastGC);
    1046             :     void sweepZoneGroups(FreeOp* fop, bool destroyingRuntime);
    1047             :     void decommitAllWithoutUnlocking(const AutoLockGC& lock);
    1048             :     void startDecommit();
    1049             :     void queueZonesForBackgroundSweep(ZoneList& zones);
    1050             :     void sweepBackgroundThings(ZoneList& zones, LifoAlloc& freeBlocks);
    1051             :     void assertBackgroundSweepingFinished();
    1052             :     bool shouldCompact();
    1053             :     void beginCompactPhase();
    1054             :     IncrementalProgress compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget,
    1055             :                                      AutoLockForExclusiveAccess& lock);
    1056             :     void endCompactPhase(JS::gcreason::Reason reason);
    1057             :     void sweepTypesAfterCompacting(Zone* zone);
    1058             :     void sweepZoneAfterCompacting(Zone* zone);
    1059             :     MOZ_MUST_USE bool relocateArenas(Zone* zone, JS::gcreason::Reason reason,
    1060             :                                      Arena*& relocatedListOut, SliceBudget& sliceBudget);
    1061             :     void updateTypeDescrObjects(MovingTracer* trc, Zone* zone);
    1062             :     void updateCellPointers(MovingTracer* trc, Zone* zone, AllocKinds kinds, size_t bgTaskCount);
    1063             :     void updateAllCellPointers(MovingTracer* trc, Zone* zone);
    1064             :     void updateZonePointersToRelocatedCells(Zone* zone, AutoLockForExclusiveAccess& lock);
    1065             :     void updateRuntimePointersToRelocatedCells(AutoLockForExclusiveAccess& lock);
    1066             :     void protectAndHoldArenas(Arena* arenaList);
    1067             :     void unprotectHeldRelocatedArenas();
    1068             :     void releaseRelocatedArenas(Arena* arenaList);
    1069             :     void releaseRelocatedArenasWithoutUnlocking(Arena* arenaList, const AutoLockGC& lock);
    1070             :     void finishCollection(JS::gcreason::Reason reason);
    1071             : 
    1072             :     void computeNonIncrementalMarkingForValidation(AutoLockForExclusiveAccess& lock);
    1073             :     void validateIncrementalMarking();
    1074             :     void finishMarkingValidation();
    1075             : 
    1076             : #ifdef DEBUG
    1077             :     void checkForCompartmentMismatches();
    1078             : #endif
    1079             : 
    1080             :     void callFinalizeCallbacks(FreeOp* fop, JSFinalizeStatus status) const;
    1081             :     void callWeakPointerZonesCallbacks() const;
    1082             :     void callWeakPointerCompartmentCallbacks(JSCompartment* comp) const;
    1083             : 
    1084             :   public:
    1085             :     JSRuntime* const rt;
    1086             : 
    1087             :     /* Embedders can use this zone and group however they wish. */
    1088             :     UnprotectedData<JS::Zone*> systemZone;
    1089             :     UnprotectedData<ZoneGroup*> systemZoneGroup;
    1090             : 
    1091             :     // List of all zone groups (protected by the GC lock).
    1092             :     ActiveThreadOrGCTaskData<ZoneGroupVector> groups;
    1093             : 
    1094             :     // The unique atoms zone, which has no zone group.
    1095             :     WriteOnceData<Zone*> atomsZone;
    1096             : 
    1097             :   private:
    1098             :     UnprotectedData<gcstats::Statistics> stats_;
    1099             :   public:
    1100         337 :     gcstats::Statistics& stats() { return stats_.ref(); }
    1101             : 
    1102             :     GCMarker marker;
    1103             : 
    1104             :     Vector<JS::GCCellPtr, 0, SystemAllocPolicy> unmarkGrayStack;
    1105             : 
    1106             :     /* Track heap usage for this runtime. */
    1107             :     HeapUsage usage;
    1108             : 
    1109             :     /* GC scheduling state and parameters. */
    1110             :     GCSchedulingTunables tunables;
    1111             :     GCSchedulingState schedulingState;
    1112             : 
    1113             :     MemProfiler mMemProfiler;
    1114             : 
    1115             :     // State used for managing atom mark bitmaps in each zone. Protected by the
    1116             :     // exclusive access lock.
    1117             :     AtomMarkingRuntime atomMarking;
    1118             : 
    1119             :   private:
    1120             :     // When empty, chunks reside in the emptyChunks pool and are re-used as
    1121             :     // needed or eventually expired if not re-used. The emptyChunks pool gets
    1122             :     // refilled from the background allocation task heuristically so that empty
    1123             :     // chunks should always available for immediate allocation without syscalls.
    1124             :     GCLockData<ChunkPool> emptyChunks_;
    1125             : 
    1126             :     // Chunks which have had some, but not all, of their arenas allocated live
    1127             :     // in the available chunk lists. When all available arenas in a chunk have
    1128             :     // been allocated, the chunk is removed from the available list and moved
    1129             :     // to the fullChunks pool. During a GC, if all arenas are free, the chunk
    1130             :     // is moved back to the emptyChunks pool and scheduled for eventual
    1131             :     // release.
    1132             :     UnprotectedData<ChunkPool> availableChunks_;
    1133             : 
    1134             :     // When all arenas in a chunk are used, it is moved to the fullChunks pool
    1135             :     // so as to reduce the cost of operations on the available lists.
    1136             :     UnprotectedData<ChunkPool> fullChunks_;
    1137             : 
    1138             :     ActiveThreadData<RootedValueMap> rootsHash;
    1139             : 
    1140             :     // An incrementing id used to assign unique ids to cells that require one.
    1141             :     mozilla::Atomic<uint64_t, mozilla::ReleaseAcquire> nextCellUniqueId_;
    1142             : 
    1143             :     /*
    1144             :      * Number of the committed arenas in all GC chunks including empty chunks.
    1145             :      */
    1146             :     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
    1147             :     ActiveThreadData<VerifyPreTracer*> verifyPreData;
    1148             : 
    1149             :   private:
    1150             :     UnprotectedData<bool> chunkAllocationSinceLastGC;
    1151             :     ActiveThreadData<int64_t> lastGCTime;
    1152             : 
    1153             :     ActiveThreadData<JSGCMode> mode;
    1154             : 
    1155             :     mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters;
    1156             : 
    1157             :     /* During shutdown, the GC needs to clean up every possible object. */
    1158             :     ActiveThreadData<bool> cleanUpEverything;
    1159             : 
    1160             :     // Gray marking must be done after all black marking is complete. However,
    1161             :     // we do not have write barriers on XPConnect roots. Therefore, XPConnect
    1162             :     // roots must be accumulated in the first slice of incremental GC. We
    1163             :     // accumulate these roots in each zone's gcGrayRoots vector and then mark
    1164             :     // them later, after black marking is complete for each compartment. This
    1165             :     // accumulation can fail, but in that case we switch to non-incremental GC.
    1166             :     enum class GrayBufferState {
    1167             :         Unused,
    1168             :         Okay,
    1169             :         Failed
    1170             :     };
    1171             :     ActiveThreadOrGCTaskData<GrayBufferState> grayBufferState;
    1172           3 :     bool hasBufferedGrayRoots() const { return grayBufferState == GrayBufferState::Okay; }
    1173             : 
    1174             :     // Clear each zone's gray buffers, but do not change the current state.
    1175             :     void resetBufferedGrayRoots() const;
    1176             : 
    1177             :     // Reset the gray buffering state to Unused.
    1178           0 :     void clearBufferedGrayRoots() {
    1179           0 :         grayBufferState = GrayBufferState::Unused;
    1180           0 :         resetBufferedGrayRoots();
    1181           0 :     }
    1182             : 
    1183             :     /*
    1184             :      * The gray bits can become invalid if UnmarkGray overflows the stack. A
    1185             :      * full GC will reset this bit, since it fills in all the gray bits.
    1186             :      */
    1187             :     UnprotectedData<bool> grayBitsValid;
    1188             : 
    1189             :     mozilla::Atomic<JS::gcreason::Reason, mozilla::Relaxed> majorGCTriggerReason;
    1190             : 
    1191             :   private:
    1192             :     /* Perform full GC if rt->keepAtoms() becomes false. */
    1193             :     ActiveThreadData<bool> fullGCForAtomsRequested_;
    1194             : 
    1195             :     /* Incremented at the start of every minor GC. */
    1196             :     ActiveThreadData<uint64_t> minorGCNumber;
    1197             : 
    1198             :     /* Incremented at the start of every major GC. */
    1199             :     ActiveThreadData<uint64_t> majorGCNumber;
    1200             : 
    1201             :     /* The major GC number at which to release observed type information. */
    1202             :     ActiveThreadData<uint64_t> jitReleaseNumber;
    1203             : 
    1204             :     /* Incremented on every GC slice. */
    1205             :     ActiveThreadData<uint64_t> number;
    1206             : 
    1207             :     /* Whether the currently running GC can finish in multiple slices. */
    1208             :     ActiveThreadData<bool> isIncremental;
    1209             : 
    1210             :     /* Whether all zones are being collected in first GC slice. */
    1211             :     ActiveThreadData<bool> isFull;
    1212             : 
    1213             :     /* Whether the heap will be compacted at the end of GC. */
    1214             :     ActiveThreadData<bool> isCompacting;
    1215             : 
    1216             :     /* The invocation kind of the current GC, taken from the first slice. */
    1217             :     ActiveThreadData<JSGCInvocationKind> invocationKind;
    1218             : 
    1219             :     /* The initial GC reason, taken from the first slice. */
    1220             :     ActiveThreadData<JS::gcreason::Reason> initialReason;
    1221             : 
    1222             :     /*
    1223             :      * The current incremental GC phase. This is also used internally in
    1224             :      * non-incremental GC.
    1225             :      */
    1226             :     ActiveThreadOrGCTaskData<State> incrementalState;
    1227             : 
    1228             :     /* Indicates that the last incremental slice exhausted the mark stack. */
    1229             :     ActiveThreadData<bool> lastMarkSlice;
    1230             : 
    1231             :     /* Whether any sweeping will take place in the separate GC helper thread. */
    1232             :     ActiveThreadData<bool> sweepOnBackgroundThread;
    1233             : 
    1234             :     /* Whether observed type information is being released in the current GC. */
    1235             :     ActiveThreadData<bool> releaseObservedTypes;
    1236             : 
    1237             :     /* Whether any black->gray edges were found during marking. */
    1238             :     ActiveThreadData<BlackGrayEdgeVector> foundBlackGrayEdges;
    1239             : 
    1240             :     /* Singly linked list of zones to be swept in the background. */
    1241             :     ActiveThreadOrGCTaskData<ZoneList> backgroundSweepZones;
    1242             : 
    1243             :     /*
    1244             :      * Free LIFO blocks are transferred to this allocator before being freed on
    1245             :      * the background GC thread after sweeping.
    1246             :      */
    1247             :     ActiveThreadOrGCTaskData<LifoAlloc> blocksToFreeAfterSweeping;
    1248             : 
    1249             :   private:
    1250             :     /* Index of current sweep group (for stats). */
    1251             :     ActiveThreadData<unsigned> sweepGroupIndex;
    1252             : 
    1253             :     /*
    1254             :      * Incremental sweep state.
    1255             :      */
    1256             : 
    1257             :     ActiveThreadData<JS::Zone*> sweepGroups;
    1258             :     ActiveThreadOrGCTaskData<JS::Zone*> currentSweepGroup;
    1259             :     ActiveThreadData<SweepActionList> sweepActionList;
    1260             :     ActiveThreadData<size_t> sweepPhaseIndex;
    1261             :     ActiveThreadOrGCTaskData<JS::Zone*> sweepZone;
    1262             :     ActiveThreadData<size_t> sweepActionIndex;
    1263             :     ActiveThreadData<mozilla::Maybe<AtomSet::Enum>> maybeAtomsToSweep;
    1264             :     ActiveThreadOrGCTaskData<JS::detail::WeakCacheBase*> sweepCache;
    1265             :     ActiveThreadData<bool> abortSweepAfterCurrentGroup;
    1266             : 
    1267             :     friend class WeakCacheSweepIterator;
    1268             : 
    1269             :     /*
    1270             :      * List head of arenas allocated during the sweep phase.
    1271             :      */
    1272             :     ActiveThreadData<Arena*> arenasAllocatedDuringSweep;
    1273             : 
    1274             :     /*
    1275             :      * Incremental compacting state.
    1276             :      */
    1277             :     ActiveThreadData<bool> startedCompacting;
    1278             :     ActiveThreadData<ZoneList> zonesToMaybeCompact;
    1279             :     ActiveThreadData<Arena*> relocatedArenasToRelease;
    1280             : 
    1281             : #ifdef JS_GC_ZEAL
    1282             :     ActiveThreadData<MarkingValidator*> markingValidator;
    1283             : #endif
    1284             : 
    1285             :     /*
    1286             :      * Indicates that a GC slice has taken place in the middle of an animation
    1287             :      * frame, rather than at the beginning. In this case, the next slice will be
    1288             :      * delayed so that we don't get back-to-back slices.
    1289             :      */
    1290             :     ActiveThreadData<bool> interFrameGC;
    1291             : 
    1292             :     /* Default budget for incremental GC slice. See js/SliceBudget.h. */
    1293             :     ActiveThreadData<int64_t> defaultTimeBudget_;
    1294             : 
    1295             :     /*
    1296             :      * We disable incremental GC if we encounter a Class with a trace hook
    1297             :      * that does not implement write barriers.
    1298             :      */
    1299             :     ActiveThreadData<bool> incrementalAllowed;
    1300             : 
    1301             :     /*
    1302             :      * Whether compacting GC can is enabled globally.
    1303             :      */
    1304             :     ActiveThreadData<bool> compactingEnabled;
    1305             : 
    1306             :     ActiveThreadData<bool> rootsRemoved;
    1307             : 
    1308             :     /*
    1309             :      * These options control the zealousness of the GC. At every allocation,
    1310             :      * nextScheduled is decremented. When it reaches zero we do a full GC.
    1311             :      *
    1312             :      * At this point, if zeal_ is one of the types that trigger periodic
    1313             :      * collection, then nextScheduled is reset to the value of zealFrequency.
    1314             :      * Otherwise, no additional GCs take place.
    1315             :      *
    1316             :      * You can control these values in several ways:
    1317             :      *   - Set the JS_GC_ZEAL environment variable
    1318             :      *   - Call gczeal() or schedulegc() from inside shell-executed JS code
    1319             :      *     (see the help for details)
    1320             :      *
    1321             :      * If gcZeal_ == 1 then we perform GCs in select places (during MaybeGC and
    1322             :      * whenever we are notified that GC roots have been removed). This option is
    1323             :      * mainly useful to embedders.
    1324             :      *
    1325             :      * We use zeal_ == 4 to enable write barrier verification. See the comment
    1326             :      * in jsgc.cpp for more information about this.
    1327             :      *
    1328             :      * zeal_ values from 8 to 10 periodically run different types of
    1329             :      * incremental GC.
    1330             :      *
    1331             :      * zeal_ value 14 performs periodic shrinking collections.
    1332             :      */
    1333             : #ifdef JS_GC_ZEAL
    1334             :     ActiveThreadData<uint32_t> zealModeBits;
    1335             :     ActiveThreadData<int> zealFrequency;
    1336             :     ActiveThreadData<int> nextScheduled;
    1337             :     ActiveThreadData<bool> deterministicOnly;
    1338             :     ActiveThreadData<int> incrementalLimit;
    1339             : 
    1340             :     ActiveThreadData<Vector<JSObject*, 0, SystemAllocPolicy>> selectedForMarking;
    1341             : #endif
    1342             : 
    1343             :     ActiveThreadData<bool> fullCompartmentChecks;
    1344             : 
    1345             :     Callback<JSGCCallback> gcCallback;
    1346             :     Callback<JS::DoCycleCollectionCallback> gcDoCycleCollectionCallback;
    1347             :     Callback<JSObjectsTenuredCallback> tenuredCallback;
    1348             :     CallbackVector<JSFinalizeCallback> finalizeCallbacks;
    1349             :     CallbackVector<JSWeakPointerZonesCallback> updateWeakPointerZonesCallbacks;
    1350             :     CallbackVector<JSWeakPointerCompartmentCallback> updateWeakPointerCompartmentCallbacks;
    1351             : 
    1352             :     MemoryCounter<GCRuntime> mallocCounter;
    1353             : 
    1354             :     /*
    1355             :      * The trace operations to trace embedding-specific GC roots. One is for
    1356             :      * tracing through black roots and the other is for tracing through gray
    1357             :      * roots. The black/gray distinction is only relevant to the cycle
    1358             :      * collector.
    1359             :      */
    1360             :     CallbackVector<JSTraceDataOp> blackRootTracers;
    1361             :     Callback<JSTraceDataOp> grayRootTracer;
    1362             : 
    1363             :     /* Always preserve JIT code during GCs, for testing. */
    1364             :     ActiveThreadData<bool> alwaysPreserveCode;
    1365             : 
    1366             : #ifdef DEBUG
    1367             :     ActiveThreadData<bool> arenasEmptyAtShutdown;
    1368             : #endif
    1369             : 
    1370             :     /* Synchronize GC heap access among GC helper threads and active threads. */
    1371             :     friend class js::AutoLockGC;
    1372             :     js::Mutex lock;
    1373             : 
    1374             :     BackgroundAllocTask allocTask;
    1375             :     BackgroundDecommitTask decommitTask;
    1376             : 
    1377             :     GCHelperState helperState;
    1378             : 
    1379             :     /*
    1380             :      * During incremental sweeping, this field temporarily holds the arenas of
    1381             :      * the current AllocKind being swept in order of increasing free space.
    1382             :      */
    1383             :     ActiveThreadData<SortedArenaList> incrementalSweepList;
    1384             : 
    1385             :   private:
    1386             :     ActiveThreadData<Nursery> nursery_;
    1387             :     ActiveThreadData<gc::StoreBuffer> storeBuffer_;
    1388             :   public:
    1389      274164 :     Nursery& nursery() { return nursery_.ref(); }
    1390        1025 :     gc::StoreBuffer& storeBuffer() { return storeBuffer_.ref(); }
    1391             : 
    1392             :     // Free LIFO blocks are transferred to this allocator before being freed
    1393             :     // after minor GC.
    1394             :     ActiveThreadData<LifoAlloc> blocksToFreeAfterMinorGC;
    1395             : 
    1396         190 :     const void* addressOfNurseryPosition() {
    1397         190 :         return nursery_.refNoCheck().addressOfPosition();
    1398             :     }
    1399          95 :     const void* addressOfNurseryCurrentEnd() {
    1400          95 :         return nursery_.refNoCheck().addressOfCurrentEnd();
    1401             :     }
    1402             : 
    1403             :     void minorGC(JS::gcreason::Reason reason,
    1404             :                  gcstats::PhaseKind phase = gcstats::PhaseKind::MINOR_GC) JS_HAZ_GC_CALL;
    1405           6 :     void evictNursery(JS::gcreason::Reason reason = JS::gcreason::EVICT_NURSERY) {
    1406           6 :         minorGC(reason, gcstats::PhaseKind::EVICT_NURSERY);
    1407           6 :     }
    1408             :     void freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo);
    1409             : 
    1410             :     friend class js::GCHelperState;
    1411             :     friend class MarkingValidator;
    1412             :     friend class AutoTraceSession;
    1413             :     friend class AutoEnterIteration;
    1414             : };
    1415             : 
    1416             : /* Prevent compartments and zones from being collected during iteration. */
    1417             : class MOZ_RAII AutoEnterIteration {
    1418             :     GCRuntime* gc;
    1419             : 
    1420             :   public:
    1421        4869 :     explicit AutoEnterIteration(GCRuntime* gc_) : gc(gc_) {
    1422        4869 :         ++gc->numActiveZoneIters;
    1423        4869 :     }
    1424             : 
    1425        9738 :     ~AutoEnterIteration() {
    1426        4869 :         MOZ_ASSERT(gc->numActiveZoneIters);
    1427        4869 :         --gc->numActiveZoneIters;
    1428        4869 :     }
    1429             : };
    1430             : 
    1431             : // After pulling a Chunk out of the empty chunks pool, we want to run the
    1432             : // background allocator to refill it. The code that takes Chunks does so under
    1433             : // the GC lock. We need to start the background allocation under the helper
    1434             : // threads lock. To avoid lock inversion we have to delay the start until after
    1435             : // we are outside the GC lock. This class handles that delay automatically.
    1436             : class MOZ_RAII AutoMaybeStartBackgroundAllocation
    1437             : {
    1438             :     GCRuntime* gc;
    1439             : 
    1440             :   public:
    1441        6889 :     AutoMaybeStartBackgroundAllocation()
    1442        6889 :       : gc(nullptr)
    1443        6889 :     {}
    1444             : 
    1445          18 :     void tryToStartBackgroundAllocation(GCRuntime& gc) {
    1446          18 :         this->gc = &gc;
    1447          18 :     }
    1448             : 
    1449       13778 :     ~AutoMaybeStartBackgroundAllocation() {
    1450        6889 :         if (gc)
    1451          17 :             gc->startBackgroundAllocTaskIfIdle();
    1452        6889 :     }
    1453             : };
    1454             : 
    1455             : #ifdef JS_GC_ZEAL
    1456             : 
    1457             : inline bool
    1458      613108 : GCRuntime::hasZealMode(ZealMode mode)
    1459             : {
    1460             :     static_assert(size_t(ZealMode::Limit) < sizeof(zealModeBits) * 8,
    1461             :                   "Zeal modes must fit in zealModeBits");
    1462      613108 :     return zealModeBits & (1 << uint32_t(mode));
    1463             : }
    1464             : 
    1465             : inline void
    1466           0 : GCRuntime::clearZealMode(ZealMode mode)
    1467             : {
    1468           0 :     zealModeBits &= ~(1 << uint32_t(mode));
    1469           0 :     MOZ_ASSERT(!hasZealMode(mode));
    1470           0 : }
    1471             : 
    1472             : inline bool
    1473       24321 : GCRuntime::upcomingZealousGC() {
    1474       24321 :     return nextScheduled == 1;
    1475             : }
    1476             : 
    1477             : inline bool
    1478      343608 : GCRuntime::needZealousGC() {
    1479      343608 :     if (nextScheduled > 0 && --nextScheduled == 0) {
    1480           0 :         if (hasZealMode(ZealMode::Alloc) ||
    1481           0 :             hasZealMode(ZealMode::GenerationalGC) ||
    1482           0 :             hasZealMode(ZealMode::IncrementalRootsThenFinish) ||
    1483           0 :             hasZealMode(ZealMode::IncrementalMarkAllThenFinish) ||
    1484           0 :             hasZealMode(ZealMode::IncrementalMultipleSlices) ||
    1485           0 :             hasZealMode(ZealMode::Compact) ||
    1486           0 :             hasZealMode(ZealMode::IncrementalSweepThenFinish))
    1487             :         {
    1488           0 :             nextScheduled = zealFrequency;
    1489             :         }
    1490           0 :         return true;
    1491             :     }
    1492      343610 :     return false;
    1493             : }
    1494             : #else
    1495             : inline bool GCRuntime::hasZealMode(ZealMode mode) { return false; }
    1496             : inline void GCRuntime::clearZealMode(ZealMode mode) { }
    1497             : inline bool GCRuntime::upcomingZealousGC() { return false; }
    1498             : inline bool GCRuntime::needZealousGC() { return false; }
    1499             : #endif
    1500             : 
    1501             : } /* namespace gc */
    1502             : 
    1503             : } /* namespace js */
    1504             : 
    1505             : #endif

Generated by: LCOV version 1.13