Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef gc_Zone_h
8 : #define gc_Zone_h
9 :
10 : #include "mozilla/Atomics.h"
11 : #include "mozilla/MemoryReporting.h"
12 :
13 : #include "jscntxt.h"
14 :
15 : #include "ds/SplayTree.h"
16 : #include "gc/FindSCCs.h"
17 : #include "gc/GCRuntime.h"
18 : #include "js/GCHashTable.h"
19 : #include "js/TracingAPI.h"
20 : #include "vm/MallocProvider.h"
21 : #include "vm/RegExpShared.h"
22 : #include "vm/TypeInference.h"
23 :
24 : namespace js {
25 :
26 : namespace jit {
27 : class JitZone;
28 : } // namespace jit
29 :
30 : namespace gc {
31 :
32 : // This class encapsulates the data that determines when we need to do a zone GC.
33 : class ZoneHeapThreshold
34 : {
35 : // The "growth factor" for computing our next thresholds after a GC.
36 : GCLockData<double> gcHeapGrowthFactor_;
37 :
38 : // GC trigger threshold for allocations on the GC heap.
39 : mozilla::Atomic<size_t, mozilla::Relaxed> gcTriggerBytes_;
40 :
41 : public:
42 31 : ZoneHeapThreshold()
43 31 : : gcHeapGrowthFactor_(3.0),
44 31 : gcTriggerBytes_(0)
45 31 : {}
46 :
47 0 : double gcHeapGrowthFactor() const { return gcHeapGrowthFactor_; }
48 14895 : size_t gcTriggerBytes() const { return gcTriggerBytes_; }
49 : double allocTrigger(bool highFrequencyGC) const;
50 :
51 : void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
52 : const GCSchedulingTunables& tunables, const GCSchedulingState& state,
53 : const AutoLockGC& lock);
54 : void updateForRemovedArena(const GCSchedulingTunables& tunables);
55 :
56 : private:
57 : static double computeZoneHeapGrowthFactorForHeapSize(size_t lastBytes,
58 : const GCSchedulingTunables& tunables,
59 : const GCSchedulingState& state);
60 : static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
61 : JSGCInvocationKind gckind,
62 : const GCSchedulingTunables& tunables,
63 : const AutoLockGC& lock);
64 : };
65 :
66 0 : struct ZoneComponentFinder : public ComponentFinder<JS::Zone, ZoneComponentFinder>
67 : {
68 0 : ZoneComponentFinder(uintptr_t sl, AutoLockForExclusiveAccess& lock)
69 0 : : ComponentFinder<JS::Zone, ZoneComponentFinder>(sl), lock(lock)
70 0 : {}
71 :
72 : AutoLockForExclusiveAccess& lock;
73 : };
74 :
75 : struct UniqueIdGCPolicy {
76 : static bool needsSweep(Cell** cell, uint64_t* value);
77 : };
78 :
79 : // Maps a Cell* to a unique, 64bit id.
80 : using UniqueIdMap = GCHashMap<Cell*,
81 : uint64_t,
82 : PointerHasher<Cell*, 3>,
83 : SystemAllocPolicy,
84 : UniqueIdGCPolicy>;
85 :
86 : extern uint64_t NextCellUniqueId(JSRuntime* rt);
87 :
88 : template <typename T>
89 : class ZoneCellIter;
90 :
91 : } // namespace gc
92 :
93 : class MOZ_NON_TEMPORARY_CLASS ExternalStringCache
94 : {
95 : static const size_t NumEntries = 4;
96 : mozilla::Array<JSString*, NumEntries> entries_;
97 :
98 : ExternalStringCache(const ExternalStringCache&) = delete;
99 : void operator=(const ExternalStringCache&) = delete;
100 :
101 : public:
102 31 : ExternalStringCache() { purge(); }
103 47 : void purge() { mozilla::PodArrayZero(entries_); }
104 :
105 : MOZ_ALWAYS_INLINE JSString* lookup(const char16_t* chars, size_t len) const;
106 : MOZ_ALWAYS_INLINE void put(JSString* s);
107 : };
108 :
109 : } // namespace js
110 :
111 : namespace JS {
112 :
113 : // A zone is a collection of compartments. Every compartment belongs to exactly
114 : // one zone. In Firefox, there is roughly one zone per tab along with a system
115 : // zone for everything else. Zones mainly serve as boundaries for garbage
116 : // collection. Unlike compartments, they have no special security properties.
117 : //
118 : // Every GC thing belongs to exactly one zone. GC things from the same zone but
119 : // different compartments can share an arena (4k page). GC things from different
120 : // zones cannot be stored in the same arena. The garbage collector is capable of
121 : // collecting one zone at a time; it cannot collect at the granularity of
122 : // compartments.
123 : //
124 : // GC things are tied to zones and compartments as follows:
125 : //
126 : // - JSObjects belong to a compartment and cannot be shared between
127 : // compartments. If an object needs to point to a JSObject in a different
128 : // compartment, regardless of zone, it must go through a cross-compartment
129 : // wrapper. Each compartment keeps track of its outgoing wrappers in a table.
130 : // JSObjects find their compartment via their ObjectGroup.
131 : //
132 : // - JSStrings do not belong to any particular compartment, but they do belong
133 : // to a zone. Thus, two different compartments in the same zone can point to a
134 : // JSString. When a string needs to be wrapped, we copy it if it's in a
135 : // different zone and do nothing if it's in the same zone. Thus, transferring
136 : // strings within a zone is very efficient.
137 : //
138 : // - Shapes and base shapes belong to a zone and are shared between compartments
139 : // in that zone where possible. Accessor shapes store getter and setter
140 : // JSObjects which belong to a single compartment, so these shapes and all
141 : // their descendants can't be shared with other compartments.
142 : //
143 : // - Scripts are also compartment-local and cannot be shared. A script points to
144 : // its compartment.
145 : //
146 : // - ObjectGroup and JitCode objects belong to a compartment and cannot be
147 : // shared. There is no mechanism to obtain the compartment from a JitCode
148 : // object.
149 : //
150 : // A zone remains alive as long as any GC things in the zone are alive. A
151 : // compartment remains alive as long as any JSObjects, scripts, shapes, or base
152 : // shapes within it are alive.
153 : //
154 : // We always guarantee that a zone has at least one live compartment by refusing
155 : // to delete the last compartment in a live zone.
156 : struct Zone : public JS::shadow::Zone,
157 : public js::gc::GraphNodeBase<JS::Zone>,
158 : public js::MallocProvider<JS::Zone>
159 : {
160 : explicit Zone(JSRuntime* rt, js::ZoneGroup* group);
161 : ~Zone();
162 : MOZ_MUST_USE bool init(bool isSystem);
163 :
164 : private:
165 : js::ZoneGroup* const group_;
166 : public:
167 940818 : js::ZoneGroup* group() const {
168 940818 : return group_;
169 : }
170 :
171 : // For JIT use.
172 : static size_t offsetOfGroup() {
173 : return offsetof(Zone, group_);
174 : }
175 :
176 : void findOutgoingEdges(js::gc::ZoneComponentFinder& finder);
177 :
178 : void discardJitCode(js::FreeOp* fop, bool discardBaselineCode = true);
179 :
180 : void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
181 : size_t* typePool,
182 : size_t* regexpZone,
183 : size_t* jitZone,
184 : size_t* baselineStubsOptimized,
185 : size_t* cachedCFG,
186 : size_t* uniqueIdMap,
187 : size_t* shapeTables,
188 : size_t* atomsMarkBitmaps);
189 :
190 : // Iterate over all cells in the zone. See the definition of ZoneCellIter
191 : // in jsgcinlines.h for the possible arguments and documentation.
192 : template <typename T, typename... Args>
193 57 : js::gc::ZoneCellIter<T> cellIter(Args&&... args) {
194 57 : return js::gc::ZoneCellIter<T>(const_cast<Zone*>(this), mozilla::Forward<Args>(args)...);
195 : }
196 :
197 0 : MOZ_MUST_USE void* onOutOfMemory(js::AllocFunction allocFunc, size_t nbytes,
198 : void* reallocPtr = nullptr) {
199 0 : if (!js::CurrentThreadCanAccessRuntime(runtime_))
200 0 : return nullptr;
201 0 : return runtimeFromActiveCooperatingThread()->onOutOfMemory(allocFunc, nbytes, reallocPtr);
202 : }
203 0 : void reportAllocationOverflow() { js::ReportAllocationOverflow(nullptr); }
204 :
205 : void beginSweepTypes(js::FreeOp* fop, bool releaseTypes);
206 :
207 : bool hasMarkedCompartments();
208 :
209 80 : void scheduleGC() { MOZ_ASSERT(!CurrentThreadIsHeapBusy()); gcScheduled_ = true; }
210 48 : void unscheduleGC() { gcScheduled_ = false; }
211 320 : bool isGCScheduled() { return gcScheduled_ && canCollect(); }
212 :
213 16 : void setPreservingCode(bool preserving) { gcPreserveCode_ = preserving; }
214 4 : bool isPreservingCode() const { return gcPreserveCode_; }
215 :
216 : bool canCollect();
217 :
218 : void notifyObservingDebuggers();
219 :
220 16 : void setGCState(GCState state) {
221 16 : MOZ_ASSERT(CurrentThreadIsHeapBusy());
222 16 : MOZ_ASSERT_IF(state != NoGC, canCollect());
223 16 : gcState_ = state;
224 16 : if (state == Finished)
225 0 : notifyObservingDebuggers();
226 16 : }
227 :
228 11606 : bool isCollecting() const {
229 11606 : MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromActiveCooperatingThread()));
230 11606 : return isCollectingFromAnyThread();
231 : }
232 :
233 17169 : bool isCollectingFromAnyThread() const {
234 17169 : if (CurrentThreadIsHeapCollecting())
235 15266 : return gcState_ != NoGC;
236 : else
237 1903 : return needsIncrementalBarrier();
238 : }
239 :
240 : // If this returns true, all object tracing must be done with a GC marking
241 : // tracer.
242 46740 : bool requireGCTracer() const {
243 46740 : JSRuntime* rt = runtimeFromAnyThread();
244 46740 : return CurrentThreadIsHeapMajorCollecting() && !rt->gc.isHeapCompacting() && gcState_ != NoGC;
245 : }
246 :
247 96734 : bool shouldMarkInZone() const {
248 96734 : return needsIncrementalBarrier() || isGCMarking();
249 : }
250 :
251 : // Get a number that is incremented whenever this zone is collected, and
252 : // possibly at other times too.
253 : uint64_t gcNumber();
254 :
255 : bool compileBarriers() const { return compileBarriers(needsIncrementalBarrier()); }
256 : bool compileBarriers(bool needsIncrementalBarrier) const {
257 : return needsIncrementalBarrier ||
258 : runtimeFromActiveCooperatingThread()->hasZealMode(js::gc::ZealMode::VerifierPre);
259 : }
260 :
261 : void setNeedsIncrementalBarrier(bool needs);
262 247 : const uint32_t* addressOfNeedsIncrementalBarrier() const { return &needsIncrementalBarrier_; }
263 :
264 284 : js::jit::JitZone* getJitZone(JSContext* cx) { return jitZone_ ? jitZone_ : createJitZone(cx); }
265 23098 : js::jit::JitZone* jitZone() { return jitZone_; }
266 :
267 3643180 : bool isAtomsZone() const { return runtimeFromAnyThread()->isAtomsZone(this); }
268 0 : bool isSelfHostingZone() const { return runtimeFromAnyThread()->isSelfHostingZone(this); }
269 :
270 : void prepareForCompacting();
271 :
272 : #ifdef DEBUG
273 : // For testing purposes, return the index of the sweep group which this zone
274 : // was swept in in the last GC.
275 : unsigned lastSweepGroupIndex() { return gcLastSweepGroupIndex; }
276 : #endif
277 :
278 : void sweepBreakpoints(js::FreeOp* fop);
279 : void sweepUniqueIds(js::FreeOp* fop);
280 : void sweepWeakMaps();
281 : void sweepCompartments(js::FreeOp* fop, bool keepAtleastOne, bool lastGC);
282 :
283 : using DebuggerVector = js::Vector<js::Debugger*, 0, js::SystemAllocPolicy>;
284 :
285 : private:
286 : js::ZoneGroupData<DebuggerVector*> debuggers;
287 :
288 : js::jit::JitZone* createJitZone(JSContext* cx);
289 :
290 0 : bool isQueuedForBackgroundSweep() {
291 0 : return isOnList();
292 : }
293 :
294 : // Side map for storing a unique ids for cells, independent of address.
295 : js::ZoneGroupOrGCTaskData<js::gc::UniqueIdMap> uniqueIds_;
296 :
297 1281664 : js::gc::UniqueIdMap& uniqueIds() { return uniqueIds_.ref(); }
298 :
299 : public:
300 : bool hasDebuggers() const { return debuggers && debuggers->length(); }
301 0 : DebuggerVector* getDebuggers() const { return debuggers; }
302 : DebuggerVector* getOrCreateDebuggers(JSContext* cx);
303 :
304 : void clearTables();
305 :
306 : /*
307 : * When true, skip calling the metadata callback. We use this:
308 : * - to avoid invoking the callback recursively;
309 : * - to avoid observing lazy prototype setup (which confuses callbacks that
310 : * want to use the types being set up!);
311 : * - to avoid attaching allocation stacks to allocation stack nodes, which
312 : * is silly
313 : * And so on.
314 : */
315 : js::ZoneGroupData<bool> suppressAllocationMetadataBuilder;
316 :
317 : js::gc::ArenaLists arenas;
318 :
319 : js::TypeZone types;
320 :
321 : private:
322 : /* Live weakmaps in this zone. */
323 : js::ZoneGroupOrGCTaskData<mozilla::LinkedList<js::WeakMapBase>> gcWeakMapList_;
324 : public:
325 36 : mozilla::LinkedList<js::WeakMapBase>& gcWeakMapList() { return gcWeakMapList_.ref(); }
326 :
327 : typedef js::Vector<JSCompartment*, 1, js::SystemAllocPolicy> CompartmentVector;
328 :
329 : private:
330 : // The set of compartments in this zone.
331 : js::ActiveThreadOrGCTaskData<CompartmentVector> compartments_;
332 : public:
333 366504 : CompartmentVector& compartments() { return compartments_.ref(); }
334 :
335 : // This zone's gray roots.
336 : typedef js::Vector<js::gc::Cell*, 0, js::SystemAllocPolicy> GrayRootVector;
337 : private:
338 : js::ZoneGroupOrGCTaskData<GrayRootVector> gcGrayRoots_;
339 : public:
340 5118 : GrayRootVector& gcGrayRoots() { return gcGrayRoots_.ref(); }
341 :
342 : // This zone's weak edges found via graph traversal during marking,
343 : // preserved for re-scanning during sweeping.
344 : using WeakEdges = js::Vector<js::gc::TenuredCell**, 0, js::SystemAllocPolicy>;
345 : private:
346 : js::ZoneGroupOrGCTaskData<WeakEdges> gcWeakRefs_;
347 : public:
348 87 : WeakEdges& gcWeakRefs() { return gcWeakRefs_.ref(); }
349 :
350 : private:
351 : // List of non-ephemeron weak containers to sweep during beginSweepingSweepGroup.
352 : js::ZoneGroupOrGCTaskData<mozilla::LinkedList<detail::WeakCacheBase>> weakCaches_;
353 : public:
354 1333 : mozilla::LinkedList<detail::WeakCacheBase>& weakCaches() { return weakCaches_.ref(); }
355 1333 : void registerWeakCache(detail::WeakCacheBase* cachep) {
356 1333 : weakCaches().insertBack(cachep);
357 1333 : }
358 :
359 : private:
360 : /*
361 : * Mapping from not yet marked keys to a vector of all values that the key
362 : * maps to in any live weak map.
363 : */
364 : js::ZoneGroupOrGCTaskData<js::gc::WeakKeyTable> gcWeakKeys_;
365 : public:
366 31 : js::gc::WeakKeyTable& gcWeakKeys() { return gcWeakKeys_.ref(); }
367 :
368 : private:
369 : // A set of edges from this zone to other zones.
370 : //
371 : // This is used during GC while calculating sweep groups to record edges
372 : // that can't be determined by examining this zone by itself.
373 : js::ZoneGroupData<ZoneSet> gcSweepGroupEdges_;
374 :
375 : public:
376 31 : ZoneSet& gcSweepGroupEdges() { return gcSweepGroupEdges_.ref(); }
377 :
378 : // Keep track of all TypeDescr and related objects in this compartment.
379 : // This is used by the GC to trace them all first when compacting, since the
380 : // TypedObject trace hook may access these objects.
381 : //
382 : // There are no barriers here - the set contains only tenured objects so no
383 : // post-barrier is required, and these are weak references so no pre-barrier
384 : // is required.
385 : using TypeDescrObjectSet = js::GCHashSet<JSObject*,
386 : js::MovableCellHasher<JSObject*>,
387 : js::SystemAllocPolicy>;
388 : private:
389 : js::ZoneGroupData<JS::WeakCache<TypeDescrObjectSet>> typeDescrObjects_;
390 :
391 : // Malloc counter to measure memory pressure for GC scheduling. This
392 : // counter should be used only when it's not possible to know the size of
393 : // a free.
394 : js::gc::MemoryCounter<Zone> gcMallocCounter;
395 :
396 : // Counter of JIT code executable memory for GC scheduling. Also imprecise,
397 : // since wasm can generate code that outlives a zone.
398 : js::gc::MemoryCounter<Zone> jitCodeCounter;
399 :
400 : public:
401 : js::RegExpZone regExps;
402 :
403 103 : JS::WeakCache<TypeDescrObjectSet>& typeDescrObjects() { return typeDescrObjects_.ref(); }
404 :
405 : bool addTypeDescrObject(JSContext* cx, HandleObject obj);
406 :
407 0 : bool triggerGCForTooMuchMalloc() {
408 0 : JSRuntime* rt = runtimeFromAnyThread();
409 :
410 0 : if (CurrentThreadCanAccessRuntime(rt)) {
411 0 : return rt->gc.triggerZoneGC(this, JS::gcreason::TOO_MUCH_MALLOC,
412 0 : gcMallocCounter.bytes(), gcMallocCounter.maxBytes());
413 : }
414 0 : return false;
415 : }
416 :
417 48 : void resetGCMallocBytes() { gcMallocCounter.reset(); }
418 71 : void setGCMaxMallocBytes(size_t value) { gcMallocCounter.setMax(value); }
419 158908 : void updateMallocCounter(size_t nbytes) { gcMallocCounter.update(this, nbytes); }
420 0 : size_t GCMaxMallocBytes() const { return gcMallocCounter.maxBytes(); }
421 0 : size_t GCMallocBytes() const { return gcMallocCounter.bytes(); }
422 :
423 4499 : void updateJitCodeMallocBytes(size_t size) { jitCodeCounter.update(this, size); }
424 :
425 : // Resets all the memory counters.
426 48 : void resetAllMallocBytes() {
427 48 : resetGCMallocBytes();
428 48 : jitCodeCounter.reset();
429 48 : }
430 48 : bool isTooMuchMalloc() const {
431 96 : return gcMallocCounter.isTooMuchMalloc() ||
432 96 : jitCodeCounter.isTooMuchMalloc();
433 : }
434 :
435 : private:
436 : // Bitmap of atoms marked by this zone.
437 : js::ZoneGroupOrGCTaskData<js::SparseBitmap> markedAtoms_;
438 :
439 : // Set of atoms recently used by this Zone. Purged on GC.
440 : js::ZoneGroupOrGCTaskData<js::AtomSet> atomCache_;
441 :
442 : // Cache storing allocated external strings. Purged on GC.
443 : js::ZoneGroupOrGCTaskData<js::ExternalStringCache> externalStringCache_;
444 :
445 : public:
446 570806 : js::SparseBitmap& markedAtoms() { return markedAtoms_.ref(); }
447 :
448 484209 : js::AtomSet& atomCache() { return atomCache_.ref(); }
449 :
450 464 : js::ExternalStringCache& externalStringCache() { return externalStringCache_.ref(); };
451 :
452 : // Track heap usage under this Zone.
453 : js::gc::HeapUsage usage;
454 :
455 : // Thresholds used to trigger GC.
456 : js::gc::ZoneHeapThreshold threshold;
457 :
458 : // Amount of data to allocate before triggering a new incremental slice for
459 : // the current GC.
460 : js::UnprotectedData<size_t> gcDelayBytes;
461 :
462 : private:
463 : // Shared Shape property tree.
464 : js::ZoneGroupData<js::PropertyTree> propertyTree_;
465 : public:
466 148217 : js::PropertyTree& propertyTree() { return propertyTree_.ref(); }
467 :
468 : private:
469 : // Set of all unowned base shapes in the Zone.
470 : js::ZoneGroupData<js::BaseShapeSet> baseShapes_;
471 : public:
472 15147 : js::BaseShapeSet& baseShapes() { return baseShapes_.ref(); }
473 :
474 : private:
475 : // Set of initial shapes in the Zone. For certain prototypes -- namely,
476 : // those of various builtin classes -- there are two entries: one for a
477 : // lookup via TaggedProto, and one for a lookup via JSProtoKey. See
478 : // InitialShapeProto.
479 : js::ZoneGroupData<js::InitialShapeSet> initialShapes_;
480 : public:
481 142359 : js::InitialShapeSet& initialShapes() { return initialShapes_.ref(); }
482 :
483 : #ifdef JSGC_HASH_TABLE_CHECKS
484 : void checkInitialShapesTableAfterMovingGC();
485 : void checkBaseShapeTableAfterMovingGC();
486 : #endif
487 : void fixupInitialShapeTable();
488 : void fixupAfterMovingGC();
489 :
490 : // Per-zone data for use by an embedder.
491 : js::ZoneGroupData<void*> data;
492 :
493 : js::ZoneGroupData<bool> isSystem;
494 :
495 413029 : bool usedByHelperThread() {
496 413029 : return !isAtomsZone() && group()->usedByHelperThread;
497 : }
498 :
499 : #ifdef DEBUG
500 : js::ZoneGroupData<unsigned> gcLastSweepGroupIndex;
501 : #endif
502 :
503 262951 : static js::HashNumber UniqueIdToHash(uint64_t uid) {
504 262951 : return js::HashNumber(uid >> 32) ^ js::HashNumber(uid & 0xFFFFFFFF);
505 : }
506 :
507 : // Creates a HashNumber based on getUniqueId. Returns false on OOM.
508 : MOZ_MUST_USE bool getHashCode(js::gc::Cell* cell, js::HashNumber* hashp) {
509 : uint64_t uid;
510 : if (!getOrCreateUniqueId(cell, &uid))
511 : return false;
512 : *hashp = UniqueIdToHash(uid);
513 : return true;
514 : }
515 :
516 : // Gets an existing UID in |uidp| if one exists.
517 216997 : MOZ_MUST_USE bool maybeGetUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
518 216997 : MOZ_ASSERT(uidp);
519 216997 : MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
520 :
521 : // Get an existing uid, if one has been set.
522 216993 : auto p = uniqueIds().lookup(cell);
523 216998 : if (p)
524 216999 : *uidp = p->value();
525 :
526 216997 : return p.found();
527 : }
528 :
529 : // Puts an existing UID in |uidp|, or creates a new UID for this Cell and
530 : // puts that into |uidp|. Returns false on OOM.
531 594245 : MOZ_MUST_USE bool getOrCreateUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
532 594245 : MOZ_ASSERT(uidp);
533 594245 : MOZ_ASSERT(js::CurrentThreadCanAccessZone(this) || js::CurrentThreadIsPerformingGC());
534 :
535 : // Get an existing uid, if one has been set.
536 594246 : auto p = uniqueIds().lookupForAdd(cell);
537 594267 : if (p) {
538 587575 : *uidp = p->value();
539 587564 : return true;
540 : }
541 :
542 6688 : MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
543 :
544 : // Set a new uid on the cell.
545 6688 : *uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
546 6688 : if (!uniqueIds().add(p, cell, *uidp))
547 0 : return false;
548 :
549 : // If the cell was in the nursery, hopefully unlikely, then we need to
550 : // tell the nursery about it so that it can sweep the uid if the thing
551 : // does not get tenured.
552 6688 : if (IsInsideNursery(cell) && !group()->nursery().addedUniqueIdToCell(cell)) {
553 0 : uniqueIds().remove(cell);
554 0 : return false;
555 : }
556 :
557 6688 : return true;
558 : }
559 :
560 262949 : js::HashNumber getHashCodeInfallible(js::gc::Cell* cell) {
561 262949 : return UniqueIdToHash(getUniqueIdInfallible(cell));
562 : }
563 :
564 479939 : uint64_t getUniqueIdInfallible(js::gc::Cell* cell) {
565 : uint64_t uid;
566 959878 : js::AutoEnterOOMUnsafeRegion oomUnsafe;
567 479942 : if (!getOrCreateUniqueId(cell, &uid))
568 0 : oomUnsafe.crash("failed to allocate uid");
569 959879 : return uid;
570 : }
571 :
572 : // Return true if this cell has a UID associated with it.
573 440690 : MOZ_MUST_USE bool hasUniqueId(js::gc::Cell* cell) {
574 440690 : MOZ_ASSERT(js::CurrentThreadCanAccessZone(this) || js::CurrentThreadIsPerformingGC());
575 440680 : return uniqueIds().has(cell);
576 : }
577 :
578 : // Transfer an id from another cell. This must only be called on behalf of a
579 : // moving GC. This method is infallible.
580 22681 : void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
581 22681 : MOZ_ASSERT(src != tgt);
582 22681 : MOZ_ASSERT(!IsInsideNursery(tgt));
583 22681 : MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromActiveCooperatingThread()));
584 22681 : MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
585 22681 : uniqueIds().rekeyIfMoved(src, tgt);
586 22681 : }
587 :
588 : // Remove any unique id associated with this Cell.
589 27 : void removeUniqueId(js::gc::Cell* cell) {
590 27 : MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
591 27 : uniqueIds().remove(cell);
592 27 : }
593 :
594 : // When finished parsing off-thread, transfer any UIDs we created in the
595 : // off-thread zone into the target zone.
596 15 : void adoptUniqueIds(JS::Zone* source) {
597 30 : js::AutoEnterOOMUnsafeRegion oomUnsafe;
598 112 : for (js::gc::UniqueIdMap::Enum e(source->uniqueIds()); !e.empty(); e.popFront()) {
599 97 : MOZ_ASSERT(!uniqueIds().has(e.front().key()));
600 97 : if (!uniqueIds().put(e.front().key(), e.front().value()))
601 0 : oomUnsafe.crash("failed to transfer unique ids from off-thread");
602 : }
603 15 : source->uniqueIds().clear();
604 15 : }
605 :
606 : #ifdef JSGC_HASH_TABLE_CHECKS
607 : // Assert that the UniqueId table has been redirected successfully.
608 : void checkUniqueIdTableAfterMovingGC();
609 : #endif
610 :
611 127231 : bool keepShapeTables() const {
612 127231 : return keepShapeTables_;
613 : }
614 254458 : void setKeepShapeTables(bool b) {
615 254458 : keepShapeTables_ = b;
616 254457 : }
617 :
618 : private:
619 : js::ZoneGroupData<js::jit::JitZone*> jitZone_;
620 :
621 : js::ActiveThreadData<bool> gcScheduled_;
622 : js::ZoneGroupData<bool> gcPreserveCode_;
623 : js::ZoneGroupData<bool> keepShapeTables_;
624 :
625 : // Allow zones to be linked into a list
626 : friend class js::gc::ZoneList;
627 : static Zone * const NotOnList;
628 : js::ZoneGroupOrGCTaskData<Zone*> listNext_;
629 : bool isOnList() const;
630 : Zone* nextZone() const;
631 :
632 : friend bool js::CurrentThreadCanAccessZone(Zone* zone);
633 : friend class js::gc::GCRuntime;
634 : };
635 :
636 : } // namespace JS
637 :
638 : namespace js {
639 :
640 : // Iterate over all zone groups except those which may be in use by helper
641 : // thread parse tasks.
642 507 : class ZoneGroupsIter
643 : {
644 : gc::AutoEnterIteration iterMarker;
645 : ZoneGroup** it;
646 : ZoneGroup** end;
647 :
648 : public:
649 507 : explicit ZoneGroupsIter(JSRuntime* rt) : iterMarker(&rt->gc) {
650 507 : it = rt->gc.groups.ref().begin();
651 507 : end = rt->gc.groups.ref().end();
652 :
653 507 : if (!done() && (*it)->usedByHelperThread)
654 0 : next();
655 507 : }
656 :
657 242090 : bool done() const { return it == end; }
658 :
659 4562 : void next() {
660 4562 : MOZ_ASSERT(!done());
661 4594 : do {
662 4594 : it++;
663 4594 : } while (!done() && (*it)->usedByHelperThread);
664 4562 : }
665 :
666 4562 : ZoneGroup* get() const {
667 4562 : MOZ_ASSERT(!done());
668 4562 : return *it;
669 : }
670 :
671 4071 : operator ZoneGroup*() const { return get(); }
672 491 : ZoneGroup* operator->() const { return get(); }
673 : };
674 :
675 : // Using the atoms zone without holding the exclusive access lock is dangerous
676 : // because worker threads may be using it simultaneously. Therefore, it's
677 : // better to skip the atoms zone when iterating over zones. If you need to
678 : // iterate over the atoms zone, consider taking the exclusive access lock first.
679 : enum ZoneSelector {
680 : WithAtoms,
681 : SkipAtoms
682 : };
683 :
684 : // Iterate over all zones in one zone group.
685 4071 : class ZonesInGroupIter
686 : {
687 : gc::AutoEnterIteration iterMarker;
688 : JS::Zone** it;
689 : JS::Zone** end;
690 :
691 : public:
692 4071 : explicit ZonesInGroupIter(ZoneGroup* group) : iterMarker(&group->runtime->gc) {
693 4071 : it = group->zones().begin();
694 4071 : end = group->zones().end();
695 4071 : }
696 :
697 39948 : bool done() const { return it == end; }
698 :
699 5407 : void next() {
700 5407 : MOZ_ASSERT(!done());
701 5407 : it++;
702 5407 : }
703 :
704 25065 : JS::Zone* get() const {
705 25065 : MOZ_ASSERT(!done());
706 25065 : return *it;
707 : }
708 :
709 : operator JS::Zone*() const { return get(); }
710 : JS::Zone* operator->() const { return get(); }
711 : };
712 :
713 : // Iterate over all zones in the runtime, except those which may be in use by
714 : // parse threads.
715 446 : class ZonesIter
716 : {
717 : ZoneGroupsIter group;
718 : Maybe<ZonesInGroupIter> zone;
719 : JS::Zone* atomsZone;
720 :
721 : public:
722 446 : ZonesIter(JSRuntime* rt, ZoneSelector selector)
723 446 : : group(rt), atomsZone(selector == WithAtoms ? rt->gc.atomsZone.ref() : nullptr)
724 : {
725 446 : if (!atomsZone && !done())
726 54 : next();
727 446 : }
728 :
729 : bool atAtomsZone(JSRuntime* rt) const {
730 : return !!atomsZone;
731 : }
732 :
733 222317 : bool done() const { return !atomsZone && group.done(); }
734 :
735 5849 : void next() {
736 5849 : MOZ_ASSERT(!done());
737 5849 : if (atomsZone)
738 388 : atomsZone = nullptr;
739 13991 : while (!group.done()) {
740 9478 : if (zone.isSome())
741 5407 : zone.ref().next();
742 : else
743 4071 : zone.emplace(group);
744 9478 : if (zone.ref().done()) {
745 4071 : zone.reset();
746 4071 : group.next();
747 : } else {
748 5407 : break;
749 : }
750 : }
751 5849 : }
752 :
753 28473 : JS::Zone* get() const {
754 28473 : MOZ_ASSERT(!done());
755 28473 : return atomsZone ? atomsZone : zone.ref().get();
756 : }
757 :
758 4868 : operator JS::Zone*() const { return get(); }
759 23605 : JS::Zone* operator->() const { return get(); }
760 : };
761 :
762 : struct CompartmentsInZoneIter
763 : {
764 4408 : explicit CompartmentsInZoneIter(JS::Zone* zone) : zone(zone) {
765 4408 : it = zone->compartments().begin();
766 4408 : }
767 :
768 180858 : bool done() const {
769 180858 : MOZ_ASSERT(it);
770 361716 : return it < zone->compartments().begin() ||
771 361716 : it >= zone->compartments().end();
772 : }
773 60492 : void next() {
774 60492 : MOZ_ASSERT(!done());
775 60492 : it++;
776 60492 : }
777 :
778 61821 : JSCompartment* get() const {
779 61821 : MOZ_ASSERT(it);
780 61821 : return *it;
781 : }
782 :
783 61378 : operator JSCompartment*() const { return get(); }
784 428 : JSCompartment* operator->() const { return get(); }
785 :
786 : private:
787 : JS::Zone* zone;
788 : JSCompartment** it;
789 :
790 0 : CompartmentsInZoneIter()
791 0 : : zone(nullptr), it(nullptr)
792 0 : {}
793 :
794 : // This is for the benefit of CompartmentsIterT::comp.
795 : friend class mozilla::Maybe<CompartmentsInZoneIter>;
796 : };
797 :
798 : // This iterator iterates over all the compartments in a given set of zones. The
799 : // set of zones is determined by iterating ZoneIterT.
800 : template<class ZonesIterT>
801 291 : class CompartmentsIterT
802 : {
803 : gc::AutoEnterIteration iterMarker;
804 : ZonesIterT zone;
805 : mozilla::Maybe<CompartmentsInZoneIter> comp;
806 :
807 : public:
808 2 : explicit CompartmentsIterT(JSRuntime* rt)
809 2 : : iterMarker(&rt->gc), zone(rt)
810 : {
811 2 : if (zone.done())
812 0 : comp.emplace();
813 : else
814 2 : comp.emplace(zone);
815 2 : }
816 :
817 289 : CompartmentsIterT(JSRuntime* rt, ZoneSelector selector)
818 289 : : iterMarker(&rt->gc), zone(rt, selector)
819 : {
820 289 : if (zone.done())
821 0 : comp.emplace();
822 : else
823 289 : comp.emplace(zone);
824 289 : }
825 :
826 181137 : bool done() const { return zone.done(); }
827 :
828 59839 : void next() {
829 59839 : MOZ_ASSERT(!done());
830 59839 : MOZ_ASSERT(!comp.ref().done());
831 59839 : comp->next();
832 59839 : if (comp->done()) {
833 4373 : comp.reset();
834 4373 : zone.next();
835 4373 : if (!zone.done())
836 4082 : comp.emplace(zone);
837 : }
838 59839 : }
839 :
840 61168 : JSCompartment* get() const {
841 61168 : MOZ_ASSERT(!done());
842 61168 : return *comp;
843 : }
844 :
845 53535 : operator JSCompartment*() const { return get(); }
846 7633 : JSCompartment* operator->() const { return get(); }
847 : };
848 :
849 : typedef CompartmentsIterT<ZonesIter> CompartmentsIter;
850 :
851 : /*
852 : * Allocation policy that uses Zone::pod_malloc and friends, so that memory
853 : * pressure is accounted for on the zone. This is suitable for memory associated
854 : * with GC things allocated in the zone.
855 : *
856 : * Since it doesn't hold a JSContext (those may not live long enough), it can't
857 : * report out-of-memory conditions itself; the caller must check for OOM and
858 : * take the appropriate action.
859 : *
860 : * FIXME bug 647103 - replace these *AllocPolicy names.
861 : */
862 : class ZoneAllocPolicy
863 : {
864 : Zone* const zone;
865 :
866 : public:
867 0 : MOZ_IMPLICIT ZoneAllocPolicy(Zone* zone) : zone(zone) {}
868 :
869 : template <typename T>
870 : T* maybe_pod_malloc(size_t numElems) {
871 : return zone->maybe_pod_malloc<T>(numElems);
872 : }
873 :
874 : template <typename T>
875 0 : T* maybe_pod_calloc(size_t numElems) {
876 0 : return zone->maybe_pod_calloc<T>(numElems);
877 : }
878 :
879 : template <typename T>
880 : T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
881 : return zone->maybe_pod_realloc<T>(p, oldSize, newSize);
882 : }
883 :
884 : template <typename T>
885 0 : T* pod_malloc(size_t numElems) {
886 0 : return zone->pod_malloc<T>(numElems);
887 : }
888 :
889 : template <typename T>
890 0 : T* pod_calloc(size_t numElems) {
891 0 : return zone->pod_calloc<T>(numElems);
892 : }
893 :
894 : template <typename T>
895 : T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
896 : return zone->pod_realloc<T>(p, oldSize, newSize);
897 : }
898 :
899 0 : void free_(void* p) { js_free(p); }
900 0 : void reportAllocOverflow() const {}
901 :
902 0 : MOZ_MUST_USE bool checkSimulatedOOM() const {
903 0 : return !js::oom::ShouldFailWithOOM();
904 : }
905 : };
906 :
907 : /*
908 : * Provides a delete policy that can be used for objects which have their
909 : * lifetime managed by the GC so they can be safely destroyed outside of GC.
910 : *
911 : * This is necessary for example when initializing such an object may fail after
912 : * the initial allocation. The partially-initialized object must be destroyed,
913 : * but it may not be safe to do so at the current time as the store buffer may
914 : * contain pointers into it.
915 : *
916 : * This policy traces GC pointers in the object and clears them, making sure to
917 : * trigger barriers while doing so. This will remove any store buffer pointers
918 : * into the object and make it safe to delete.
919 : */
920 : template <typename T>
921 : struct GCManagedDeletePolicy
922 : {
923 : struct ClearEdgesTracer : public JS::CallbackTracer
924 : {
925 0 : explicit ClearEdgesTracer(JSContext* cx) : CallbackTracer(cx, TraceWeakMapKeysValues) {}
926 : #ifdef DEBUG
927 0 : TracerKind getTracerKind() const override { return TracerKind::ClearEdges; }
928 : #endif
929 :
930 : template <typename S>
931 0 : void clearEdge(S** thingp) {
932 0 : InternalBarrierMethods<S*>::preBarrier(*thingp);
933 0 : InternalBarrierMethods<S*>::postBarrier(thingp, *thingp, nullptr);
934 0 : *thingp = nullptr;
935 0 : }
936 :
937 0 : void onObjectEdge(JSObject** objp) override { clearEdge(objp); }
938 0 : void onStringEdge(JSString** strp) override { clearEdge(strp); }
939 0 : void onSymbolEdge(JS::Symbol** symp) override { clearEdge(symp); }
940 0 : void onScriptEdge(JSScript** scriptp) override { clearEdge(scriptp); }
941 0 : void onShapeEdge(js::Shape** shapep) override { clearEdge(shapep); }
942 0 : void onObjectGroupEdge(js::ObjectGroup** groupp) override { clearEdge(groupp); }
943 0 : void onBaseShapeEdge(js::BaseShape** basep) override { clearEdge(basep); }
944 0 : void onJitCodeEdge(js::jit::JitCode** codep) override { clearEdge(codep); }
945 0 : void onLazyScriptEdge(js::LazyScript** lazyp) override { clearEdge(lazyp); }
946 0 : void onScopeEdge(js::Scope** scopep) override { clearEdge(scopep); }
947 0 : void onRegExpSharedEdge(js::RegExpShared** sharedp) override { clearEdge(sharedp); }
948 0 : void onChild(const JS::GCCellPtr& thing) override { MOZ_CRASH(); }
949 : };
950 :
951 0 : void operator()(const T* constPtr) {
952 0 : if (constPtr) {
953 0 : auto ptr = const_cast<T*>(constPtr);
954 0 : ClearEdgesTracer trc(TlsContext.get());
955 0 : ptr->trace(&trc);
956 0 : js_delete(ptr);
957 : }
958 0 : }
959 : };
960 :
961 : #ifdef DEBUG
962 : inline bool
963 41638 : IsClearEdgesTracer(JSTracer *trc)
964 : {
965 41738 : return trc->isCallbackTracer() &&
966 41738 : trc->asCallbackTracer()->getTracerKind() == JS::CallbackTracer::TracerKind::ClearEdges;
967 : }
968 : #endif
969 :
970 : } // namespace js
971 :
972 : namespace JS {
973 :
974 : // Scope data that contain GCPtrs must use the correct DeletePolicy.
975 : //
976 : // This is defined here because vm/Scope.h cannot #include "vm/Runtime.h"
977 :
978 : template <>
979 : struct DeletePolicy<js::FunctionScope::Data>
980 : : public js::GCManagedDeletePolicy<js::FunctionScope::Data>
981 : { };
982 :
983 : template <>
984 : struct DeletePolicy<js::ModuleScope::Data>
985 : : public js::GCManagedDeletePolicy<js::ModuleScope::Data>
986 : { };
987 :
988 : } // namespace JS
989 :
990 : #endif // gc_Zone_h
|