Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef gc_Barrier_h
8 : #define gc_Barrier_h
9 :
10 : #include "NamespaceImports.h"
11 :
12 : #include "gc/Heap.h"
13 : #include "gc/StoreBuffer.h"
14 : #include "js/HeapAPI.h"
15 : #include "js/Id.h"
16 : #include "js/RootingAPI.h"
17 : #include "js/Value.h"
18 :
19 : /*
20 : * A write barrier is a mechanism used by incremental or generation GCs to
21 : * ensure that every value that needs to be marked is marked. In general, the
22 : * write barrier should be invoked whenever a write can cause the set of things
23 : * traced through by the GC to change. This includes:
24 : * - writes to object properties
25 : * - writes to array slots
26 : * - writes to fields like JSObject::shape_ that we trace through
27 : * - writes to fields in private data
28 : * - writes to non-markable fields like JSObject::private that point to
29 : * markable data
30 : * The last category is the trickiest. Even though the private pointers does not
31 : * point to a GC thing, changing the private pointer may change the set of
32 : * objects that are traced by the GC. Therefore it needs a write barrier.
33 : *
34 : * Every barriered write should have the following form:
35 : * <pre-barrier>
36 : * obj->field = value; // do the actual write
37 : * <post-barrier>
38 : * The pre-barrier is used for incremental GC and the post-barrier is for
39 : * generational GC.
40 : *
41 : * PRE-BARRIER
42 : *
43 : * To understand the pre-barrier, let's consider how incremental GC works. The
44 : * GC itself is divided into "slices". Between each slice, JS code is allowed to
45 : * run. Each slice should be short so that the user doesn't notice the
46 : * interruptions. In our GC, the structure of the slices is as follows:
47 : *
48 : * 1. ... JS work, which leads to a request to do GC ...
49 : * 2. [first GC slice, which performs all root marking and possibly more marking]
50 : * 3. ... more JS work is allowed to run ...
51 : * 4. [GC mark slice, which runs entirely in drainMarkStack]
52 : * 5. ... more JS work ...
53 : * 6. [GC mark slice, which runs entirely in drainMarkStack]
54 : * 7. ... more JS work ...
55 : * 8. [GC marking finishes; sweeping done non-incrementally; GC is done]
56 : * 9. ... JS continues uninterrupted now that GC is finishes ...
57 : *
58 : * Of course, there may be a different number of slices depending on how much
59 : * marking is to be done.
60 : *
61 : * The danger inherent in this scheme is that the JS code in steps 3, 5, and 7
62 : * might change the heap in a way that causes the GC to collect an object that
63 : * is actually reachable. The write barrier prevents this from happening. We use
64 : * a variant of incremental GC called "snapshot at the beginning." This approach
65 : * guarantees the invariant that if an object is reachable in step 2, then we
66 : * will mark it eventually. The name comes from the idea that we take a
67 : * theoretical "snapshot" of all reachable objects in step 2; all objects in
68 : * that snapshot should eventually be marked. (Note that the write barrier
69 : * verifier code takes an actual snapshot.)
70 : *
71 : * The basic correctness invariant of a snapshot-at-the-beginning collector is
72 : * that any object reachable at the end of the GC (step 9) must either:
73 : * (1) have been reachable at the beginning (step 2) and thus in the snapshot
74 : * (2) or must have been newly allocated, in steps 3, 5, or 7.
75 : * To deal with case (2), any objects allocated during an incremental GC are
76 : * automatically marked black.
77 : *
78 : * This strategy is actually somewhat conservative: if an object becomes
79 : * unreachable between steps 2 and 8, it would be safe to collect it. We won't,
80 : * mainly for simplicity. (Also, note that the snapshot is entirely
81 : * theoretical. We don't actually do anything special in step 2 that we wouldn't
82 : * do in a non-incremental GC.
83 : *
84 : * It's the pre-barrier's job to maintain the snapshot invariant. Consider the
85 : * write "obj->field = value". Let the prior value of obj->field be
86 : * value0. Since it's possible that value0 may have been what obj->field
87 : * contained in step 2, when the snapshot was taken, the barrier marks
88 : * value0. Note that it only does this if we're in the middle of an incremental
89 : * GC. Since this is rare, the cost of the write barrier is usually just an
90 : * extra branch.
91 : *
92 : * In practice, we implement the pre-barrier differently based on the type of
93 : * value0. E.g., see JSObject::writeBarrierPre, which is used if obj->field is
94 : * a JSObject*. It takes value0 as a parameter.
95 : *
96 : * READ-BARRIER
97 : *
98 : * Incremental GC requires that weak pointers have read barriers. The problem
99 : * happens when, during an incremental GC, some code reads a weak pointer and
100 : * writes it somewhere on the heap that has been marked black in a previous
101 : * slice. Since the weak pointer will not otherwise be marked and will be swept
102 : * and finalized in the last slice, this will leave the pointer just written
103 : * dangling after the GC. To solve this, we immediately mark black all weak
104 : * pointers that get read between slices so that it is safe to store them in an
105 : * already marked part of the heap, e.g. in Rooted.
106 : *
107 : * POST-BARRIER
108 : *
109 : * For generational GC, we want to be able to quickly collect the nursery in a
110 : * minor collection. Part of the way this is achieved is to only mark the
111 : * nursery itself; tenured things, which may form the majority of the heap, are
112 : * not traced through or marked. This leads to the problem of what to do about
113 : * tenured objects that have pointers into the nursery: if such things are not
114 : * marked, they may be discarded while there are still live objects which
115 : * reference them. The solution is to maintain information about these pointers,
116 : * and mark their targets when we start a minor collection.
117 : *
118 : * The pointers can be thought of as edges in object graph, and the set of edges
119 : * from the tenured generation into the nursery is know as the remembered set.
120 : * Post barriers are used to track this remembered set.
121 : *
122 : * Whenever a slot which could contain such a pointer is written, we use a write
123 : * barrier to check if the edge created is in the remembered set, and if so we
124 : * insert it into the store buffer, which is the collector's representation of
125 : * the remembered set. This means than when we come to do a minor collection we
126 : * can examine the contents of the store buffer and mark any edge targets that
127 : * are in the nursery.
128 : *
129 : * IMPLEMENTATION DETAILS
130 : *
131 : * Since it would be awkward to change every write to memory into a function
132 : * call, this file contains a bunch of C++ classes and templates that use
133 : * operator overloading to take care of barriers automatically. In many cases,
134 : * all that's necessary to make some field be barriered is to replace
135 : * Type* field;
136 : * with
137 : * GCPtr<Type> field;
138 : *
139 : * One additional note: not all object writes need to be pre-barriered. Writes
140 : * to newly allocated objects do not need a pre-barrier. In these cases, we use
141 : * the "obj->field.init(value)" method instead of "obj->field = value". We use
142 : * the init naming idiom in many places to signify that a field is being
143 : * assigned for the first time.
144 : *
145 : * This file implements four classes, illustrated here:
146 : *
147 : * BarrieredBase base class of all barriers
148 : * | |
149 : * | WriteBarrieredBase base class which provides common write operations
150 : * | | | | |
151 : * | | | | PreBarriered provides pre-barriers only
152 : * | | | |
153 : * | | | GCPtr provides pre- and post-barriers
154 : * | | |
155 : * | | HeapPtr provides pre- and post-barriers; is relocatable
156 : * | | and deletable for use inside C++ managed memory
157 : * | |
158 : * | HeapSlot similar to GCPtr, but tailored to slots storage
159 : * |
160 : * ReadBarrieredBase base class which provides common read operations
161 : * |
162 : * ReadBarriered provides read barriers only
163 : *
164 : *
165 : * The implementation of the barrier logic is implemented on T::writeBarrier.*,
166 : * via:
167 : *
168 : * WriteBarrieredBase<T>::pre
169 : * -> InternalBarrierMethods<T*>::preBarrier
170 : * -> T::writeBarrierPre
171 : * -> InternalBarrierMethods<Value>::preBarrier
172 : * -> InternalBarrierMethods<jsid>::preBarrier
173 : * -> InternalBarrierMethods<T*>::preBarrier
174 : * -> T::writeBarrierPre
175 : *
176 : * GCPtr<T>::post and HeapPtr<T>::post
177 : * -> InternalBarrierMethods<T*>::postBarrier
178 : * -> T::writeBarrierPost
179 : * -> InternalBarrierMethods<Value>::postBarrier
180 : * -> StoreBuffer::put
181 : *
182 : * These classes are designed to be used by the internals of the JS engine.
183 : * Barriers designed to be used externally are provided in js/RootingAPI.h.
184 : * These external barriers call into the same post-barrier implementations at
185 : * InternalBarrierMethods<T>::post via an indirect call to Heap(.+)Barrier.
186 : *
187 : * These clases are designed to be used to wrap GC thing pointers or values that
188 : * act like them (i.e. JS::Value and jsid). It is possible to use them for
189 : * other types by supplying the necessary barrier implementations but this
190 : * is not usually necessary and should be done with caution.
191 : */
192 :
193 : class JSAtom;
194 : struct JSCompartment;
195 : class JSFlatString;
196 : class JSLinearString;
197 :
198 : namespace JS {
199 : class Symbol;
200 : } // namespace JS
201 :
202 : namespace js {
203 :
204 : class AccessorShape;
205 : class ArrayObject;
206 : class ArgumentsObject;
207 : class ArrayBufferObjectMaybeShared;
208 : class ArrayBufferObject;
209 : class ArrayBufferViewObject;
210 : class SharedArrayBufferObject;
211 : class BaseShape;
212 : class DebugEnvironmentProxy;
213 : class GlobalObject;
214 : class LazyScript;
215 : class ModuleObject;
216 : class ModuleEnvironmentObject;
217 : class ModuleNamespaceObject;
218 : class NativeObject;
219 : class PlainObject;
220 : class PropertyName;
221 : class SavedFrame;
222 : class EnvironmentObject;
223 : class ScriptSourceObject;
224 : class Shape;
225 : class UnownedBaseShape;
226 : class ObjectGroup;
227 :
228 : namespace jit {
229 : class JitCode;
230 : } // namespace jit
231 :
232 : #ifdef DEBUG
233 : // Barriers can't be triggered during backend Ion compilation, which may run on
234 : // a helper thread.
235 : bool
236 : CurrentThreadIsIonCompiling();
237 :
238 : bool
239 : CurrentThreadIsIonCompilingSafeForMinorGC();
240 :
241 : bool
242 : CurrentThreadIsGCSweeping();
243 :
244 : bool
245 : IsMarkedBlack(JSObject* obj);
246 : #endif
247 :
248 : MOZ_ALWAYS_INLINE void
249 563089 : CheckEdgeIsNotBlackToGray(JSObject* src, const Value& dst)
250 : {
251 563089 : MOZ_ASSERT_IF(IsMarkedBlack(src), JS::ValueIsNotGray(dst));
252 563096 : }
253 :
254 : template <typename T>
255 : struct InternalBarrierMethods {};
256 :
257 : template <typename T>
258 : struct InternalBarrierMethods<T*>
259 : {
260 404331 : static bool isMarkable(T* v) { return v != nullptr; }
261 :
262 619076 : static void preBarrier(T* v) { T::writeBarrierPre(v); }
263 :
264 2107698 : static void postBarrier(T** vp, T* prev, T* next) { T::writeBarrierPost(vp, prev, next); }
265 :
266 392965 : static void readBarrier(T* v) { T::readBarrier(v); }
267 : };
268 :
269 : template <typename S> struct PreBarrierFunctor : public VoidDefaultAdaptor<S> {
270 : template <typename T> void operator()(T* t);
271 : };
272 :
273 : template <typename S> struct ReadBarrierFunctor : public VoidDefaultAdaptor<S> {
274 : template <typename T> void operator()(T* t);
275 : };
276 :
277 : template <>
278 : struct InternalBarrierMethods<Value>
279 : {
280 35285 : static bool isMarkable(const Value& v) { return v.isGCThing(); }
281 :
282 256699 : static void preBarrier(const Value& v) {
283 256699 : DispatchTyped(PreBarrierFunctor<Value>(), v);
284 256699 : }
285 :
286 149825 : static void postBarrier(Value* vp, const Value& prev, const Value& next) {
287 149825 : MOZ_ASSERT(!CurrentThreadIsIonCompiling());
288 149823 : MOZ_ASSERT(vp);
289 :
290 : // If the target needs an entry, add it.
291 : js::gc::StoreBuffer* sb;
292 149823 : if (next.isObject() && (sb = reinterpret_cast<gc::Cell*>(&next.toObject())->storeBuffer())) {
293 : // If we know that the prev has already inserted an entry, we can
294 : // skip doing the lookup to add the new entry. Note that we cannot
295 : // safely assert the presence of the entry because it may have been
296 : // added via a different store buffer.
297 5041 : if (prev.isObject() && reinterpret_cast<gc::Cell*>(&prev.toObject())->storeBuffer())
298 1 : return;
299 5040 : sb->putValue(vp);
300 5040 : return;
301 : }
302 : // Remove the prev entry if the new value does not need it.
303 144782 : if (prev.isObject() && (sb = reinterpret_cast<gc::Cell*>(&prev.toObject())->storeBuffer()))
304 536 : sb->unputValue(vp);
305 : }
306 :
307 20105 : static void readBarrier(const Value& v) {
308 20105 : DispatchTyped(ReadBarrierFunctor<Value>(), v);
309 20105 : }
310 : };
311 :
312 : template <>
313 : struct InternalBarrierMethods<jsid>
314 : {
315 117 : static bool isMarkable(jsid id) { return JSID_IS_GCTHING(id); }
316 1034 : static void preBarrier(jsid id) { DispatchTyped(PreBarrierFunctor<jsid>(), id); }
317 27104 : static void postBarrier(jsid* idp, jsid prev, jsid next) {}
318 : };
319 :
320 : // Base class of all barrier types.
321 : template <typename T>
322 : class BarrieredBase
323 : {
324 : protected:
325 : // BarrieredBase is not directly instantiable.
326 776040 : explicit BarrieredBase(const T& v) : value(v) {}
327 :
328 : // BarrieredBase subclasses cannot be copy constructed by default.
329 29191 : BarrieredBase(const BarrieredBase<T>& other) = default;
330 :
331 : // Storage for all barrier classes. |value| must be a GC thing reference
332 : // type: either a direct pointer to a GC thing or a supported tagged
333 : // pointer that can reference GC things, such as JS::Value or jsid. Nested
334 : // barrier types are NOT supported. See assertTypeConstraints.
335 : T value;
336 :
337 : public:
338 : // Note: this is public because C++ cannot friend to a specific template instantiation.
339 : // Friending to the generic template leads to a number of unintended consequences, including
340 : // template resolution ambiguity and a circular dependency with Tracing.h.
341 55619 : T* unsafeUnbarrieredForTracing() { return &value; }
342 : };
343 :
344 : // Base class for barriered pointer types that intercept only writes.
345 : template <class T>
346 : class WriteBarrieredBase : public BarrieredBase<T>,
347 : public WrappedPtrOperations<T, WriteBarrieredBase<T>>
348 : {
349 : protected:
350 : using BarrieredBase<T>::value;
351 :
352 : // WriteBarrieredBase is not directly instantiable.
353 695737 : explicit WriteBarrieredBase(const T& v) : BarrieredBase<T>(v) {}
354 :
355 : public:
356 : using ElementType = T;
357 :
358 69081333 : DECLARE_POINTER_CONSTREF_OPS(T);
359 :
360 : // Use this if the automatic coercion to T isn't working.
361 83091215 : const T& get() const { return this->value; }
362 :
363 : // Use this if you want to change the value without invoking barriers.
364 : // Obviously this is dangerous unless you know the barrier is not needed.
365 0 : void unsafeSet(const T& v) { this->value = v; }
366 :
367 : // For users who need to manually barrier the raw types.
368 53 : static void writeBarrierPre(const T& v) { InternalBarrierMethods<T>::preBarrier(v); }
369 :
370 : protected:
371 867490 : void pre() { InternalBarrierMethods<T>::preBarrier(this->value); }
372 1929503 : void post(const T& prev, const T& next) {
373 1929503 : InternalBarrierMethods<T>::postBarrier(&this->value, prev, next);
374 1929518 : }
375 : };
376 :
377 : /*
378 : * PreBarriered only automatically handles pre-barriers. Post-barriers must be
379 : * manually implemented when using this class. GCPtr and HeapPtr should be used
380 : * in all cases that do not require explicit low-level control of moving
381 : * behavior, e.g. for HashMap keys.
382 : */
383 : template <class T>
384 : class PreBarriered : public WriteBarrieredBase<T>
385 : {
386 : public:
387 : PreBarriered() : WriteBarrieredBase<T>(JS::GCPolicy<T>::initial()) {}
388 : /*
389 : * Allow implicit construction for use in generic contexts, such as
390 : * DebuggerWeakMap::markKeys.
391 : */
392 108139 : MOZ_IMPLICIT PreBarriered(const T& v) : WriteBarrieredBase<T>(v) {}
393 4271 : explicit PreBarriered(const PreBarriered<T>& v) : WriteBarrieredBase<T>(v.value) {}
394 5345 : ~PreBarriered() { this->pre(); }
395 :
396 311 : void init(const T& v) {
397 311 : this->value = v;
398 311 : }
399 :
400 : /* Use to set the pointer to nullptr. */
401 : void clear() {
402 : this->pre();
403 : this->value = nullptr;
404 : }
405 :
406 3392 : DECLARE_POINTER_ASSIGN_OPS(PreBarriered, T);
407 :
408 : private:
409 3392 : void set(const T& v) {
410 3392 : this->pre();
411 3392 : this->value = v;
412 3392 : }
413 : };
414 :
415 : /*
416 : * A pre- and post-barriered heap pointer, for use inside the JS engine.
417 : *
418 : * It must only be stored in memory that has GC lifetime. GCPtr must not be
419 : * used in contexts where it may be implicitly moved or deleted, e.g. most
420 : * containers.
421 : *
422 : * The post-barriers implemented by this class are faster than those
423 : * implemented by js::HeapPtr<T> or JS::Heap<T> at the cost of not
424 : * automatically handling deletion or movement.
425 : */
426 : template <class T>
427 : class GCPtr : public WriteBarrieredBase<T>
428 : {
429 : public:
430 197567 : GCPtr() : WriteBarrieredBase<T>(JS::GCPolicy<T>::initial()) {}
431 350770 : explicit GCPtr(const T& v) : WriteBarrieredBase<T>(v) {
432 350770 : this->post(JS::GCPolicy<T>::initial(), v);
433 350769 : }
434 14028 : explicit GCPtr(const GCPtr<T>& v) : WriteBarrieredBase<T>(v) {
435 14028 : this->post(JS::GCPolicy<T>::initial(), v);
436 14028 : }
437 : #ifdef DEBUG
438 0 : ~GCPtr() {
439 : // No barriers are necessary as this only happens when we are sweeping
440 : // or when after GCManagedDeletePolicy has triggered the barriers for us
441 : // and cleared the pointer.
442 : //
443 : // If you get a crash here, you may need to make the containing object
444 : // use GCManagedDeletePolicy and use JS::DeletePolicy to destroy it.
445 : //
446 : // Note that when sweeping the wrapped pointer may already have been
447 : // freed by this point.
448 0 : MOZ_ASSERT(CurrentThreadIsGCSweeping() || this->value == JS::GCPolicy<T>::initial());
449 0 : Poison(this, JS_FREED_HEAP_PTR_PATTERN, sizeof(*this));
450 0 : }
451 : #endif
452 :
453 858995 : void init(const T& v) {
454 858995 : this->value = v;
455 858995 : this->post(JS::GCPolicy<T>::initial(), v);
456 859005 : }
457 :
458 596059 : DECLARE_POINTER_ASSIGN_OPS(GCPtr, T);
459 :
460 : private:
461 596099 : void set(const T& v) {
462 596099 : this->pre();
463 596102 : T tmp = this->value;
464 596102 : this->value = v;
465 596102 : this->post(tmp, this->value);
466 596104 : }
467 :
468 : /*
469 : * Unlike HeapPtr<T>, GCPtr<T> must be managed with GC lifetimes.
470 : * Specifically, the memory used by the pointer itself must be live until
471 : * at least the next minor GC. For that reason, move semantics are invalid
472 : * and are deleted here. Please note that not all containers support move
473 : * semantics, so this does not completely prevent invalid uses.
474 : */
475 : GCPtr(GCPtr<T>&&) = delete;
476 : GCPtr<T>& operator=(GCPtr<T>&&) = delete;
477 : };
478 :
479 : /*
480 : * A pre- and post-barriered heap pointer, for use inside the JS engine. These
481 : * heap pointers can be stored in C++ containers like GCVector and GCHashMap.
482 : *
483 : * The GC sometimes keeps pointers to pointers to GC things --- for example, to
484 : * track references into the nursery. However, C++ containers like GCVector and
485 : * GCHashMap usually reserve the right to relocate their elements any time
486 : * they're modified, invalidating all pointers to the elements. HeapPtr
487 : * has a move constructor which knows how to keep the GC up to date if it is
488 : * moved to a new location.
489 : *
490 : * However, because of this additional communication with the GC, HeapPtr
491 : * is somewhat slower, so it should only be used in contexts where this ability
492 : * is necessary.
493 : *
494 : * Obviously, JSObjects, JSStrings, and the like get tenured and compacted, so
495 : * whatever pointers they contain get relocated, in the sense used here.
496 : * However, since the GC itself is moving those values, it takes care of its
497 : * internal pointers to those pointers itself. HeapPtr is only necessary
498 : * when the relocation would otherwise occur without the GC's knowledge.
499 : */
500 : template <class T>
501 : class HeapPtr : public WriteBarrieredBase<T>
502 : {
503 : public:
504 839 : HeapPtr() : WriteBarrieredBase<T>(JS::GCPolicy<T>::initial()) {}
505 :
506 : // Implicitly adding barriers is a reasonable default.
507 33653 : MOZ_IMPLICIT HeapPtr(const T& v) : WriteBarrieredBase<T>(v) {
508 33653 : this->post(JS::GCPolicy<T>::initial(), this->value);
509 33653 : }
510 :
511 : /*
512 : * For HeapPtr, move semantics are equivalent to copy semantics. In
513 : * C++, a copy constructor taking const-ref is the way to get a single
514 : * function that will be used for both lvalue and rvalue copies, so we can
515 : * simply omit the rvalue variant.
516 : */
517 16428 : MOZ_IMPLICIT HeapPtr(const HeapPtr<T>& v) : WriteBarrieredBase<T>(v) {
518 16428 : this->post(JS::GCPolicy<T>::initial(), this->value);
519 16428 : }
520 :
521 44510 : ~HeapPtr() {
522 44510 : this->pre();
523 44510 : this->post(this->value, JS::GCPolicy<T>::initial());
524 44510 : }
525 :
526 0 : void init(const T& v) {
527 0 : this->value = v;
528 0 : this->post(JS::GCPolicy<T>::initial(), this->value);
529 0 : }
530 :
531 14118 : DECLARE_POINTER_ASSIGN_OPS(HeapPtr, T);
532 :
533 : /* Make this friend so it can access pre() and post(). */
534 : template <class T1, class T2>
535 : friend inline void
536 : BarrieredSetPair(Zone* zone,
537 : HeapPtr<T1*>& v1, T1* val1,
538 : HeapPtr<T2*>& v2, T2* val2);
539 :
540 : protected:
541 14118 : void set(const T& v) {
542 14118 : this->pre();
543 14118 : postBarrieredSet(v);
544 14118 : }
545 :
546 14362 : void postBarrieredSet(const T& v) {
547 14362 : T tmp = this->value;
548 14362 : this->value = v;
549 14362 : this->post(tmp, this->value);
550 14362 : }
551 : };
552 :
553 : // Base class for barriered pointer types that intercept reads and writes.
554 : template <typename T>
555 29191 : class ReadBarrieredBase : public BarrieredBase<T>
556 : {
557 : protected:
558 : // ReadBarrieredBase is not directly instantiable.
559 80223 : explicit ReadBarrieredBase(const T& v) : BarrieredBase<T>(v) {}
560 :
561 : protected:
562 392967 : void read() const { InternalBarrierMethods<T>::readBarrier(this->value); }
563 322499 : void post(const T& prev, const T& next) {
564 322499 : InternalBarrierMethods<T>::postBarrier(&this->value, prev, next);
565 322500 : }
566 : };
567 :
568 : // Incremental GC requires that weak pointers have read barriers. See the block
569 : // comment at the top of Barrier.h for a complete discussion of why.
570 : //
571 : // Note that this class also has post-barriers, so is safe to use with nursery
572 : // pointers. However, when used as a hashtable key, care must still be taken to
573 : // insert manual post-barriers on the table for rekeying if the key is based in
574 : // any way on the address of the object.
575 : template <typename T>
576 : class ReadBarriered : public ReadBarrieredBase<T>,
577 : public WrappedPtrOperations<T, ReadBarriered<T>>
578 : {
579 : protected:
580 : using ReadBarrieredBase<T>::value;
581 :
582 : public:
583 9765 : ReadBarriered() : ReadBarrieredBase<T>(JS::GCPolicy<T>::initial()) {}
584 :
585 : // It is okay to add barriers implicitly.
586 60857 : MOZ_IMPLICIT ReadBarriered(const T& v) : ReadBarrieredBase<T>(v) {
587 60857 : this->post(JS::GCPolicy<T>::initial(), v);
588 60857 : }
589 :
590 : // The copy constructor creates a new weak edge but the wrapped pointer does
591 : // not escape, so no read barrier is necessary.
592 42399 : explicit ReadBarriered(const ReadBarriered& v) : ReadBarrieredBase<T>(v) {
593 42399 : this->post(JS::GCPolicy<T>::initial(), v.unbarrieredGet());
594 42399 : }
595 :
596 : // Move retains the lifetime status of the source edge, so does not fire
597 : // the read barrier of the defunct edge.
598 78728 : ReadBarriered(ReadBarriered&& v)
599 78728 : : ReadBarrieredBase<T>(mozilla::Move(v))
600 : {
601 78728 : this->post(JS::GCPolicy<T>::initial(), v.value);
602 78728 : }
603 :
604 131738 : ~ReadBarriered() {
605 131738 : this->post(this->value, JS::GCPolicy<T>::initial());
606 131738 : }
607 :
608 137 : ReadBarriered& operator=(const ReadBarriered& v) {
609 137 : T prior = this->value;
610 137 : this->value = v.value;
611 137 : this->post(prior, v.value);
612 137 : return *this;
613 : }
614 :
615 376189 : const T& get() const {
616 376189 : if (InternalBarrierMethods<T>::isMarkable(this->value))
617 372862 : this->read();
618 376191 : return this->value;
619 : }
620 :
621 587458 : const T& unbarrieredGet() const {
622 587458 : return this->value;
623 : }
624 :
625 31833 : explicit operator bool() const {
626 31833 : return bool(this->value);
627 : }
628 :
629 360506 : operator const T&() const { return get(); }
630 :
631 15613 : const T& operator->() const { return get(); }
632 :
633 522011 : T* unsafeGet() { return &this->value; }
634 124745 : T const* unsafeGet() const { return &this->value; }
635 :
636 8565 : void set(const T& v)
637 : {
638 8565 : T tmp = this->value;
639 8565 : this->value = v;
640 8565 : this->post(tmp, v);
641 8565 : }
642 : };
643 :
644 : // A WeakRef pointer does not hold its target live and is automatically nulled
645 : // out when the GC discovers that it is not reachable from any other path.
646 : template <typename T>
647 : using WeakRef = ReadBarriered<T>;
648 :
649 : // A pre- and post-barriered Value that is specialized to be aware that it
650 : // resides in a slots or elements vector. This allows it to be relocated in
651 : // memory, but with substantially less overhead than a HeapPtr.
652 : class HeapSlot : public WriteBarrieredBase<Value>
653 : {
654 : public:
655 : enum Kind {
656 : Slot = 0,
657 : Element = 1
658 : };
659 :
660 : explicit HeapSlot() = delete;
661 :
662 : explicit HeapSlot(NativeObject* obj, Kind kind, uint32_t slot, const Value& v)
663 : : WriteBarrieredBase<Value>(v)
664 : {
665 : post(obj, kind, slot, v);
666 : }
667 :
668 : explicit HeapSlot(NativeObject* obj, Kind kind, uint32_t slot, const HeapSlot& s)
669 : : WriteBarrieredBase<Value>(s.value)
670 : {
671 : post(obj, kind, slot, s);
672 : }
673 :
674 52 : ~HeapSlot() {
675 26 : pre();
676 26 : }
677 :
678 310272 : void init(NativeObject* owner, Kind kind, uint32_t slot, const Value& v) {
679 310272 : value = v;
680 310272 : post(owner, kind, slot, v);
681 310274 : }
682 :
683 : #ifdef DEBUG
684 : bool preconditionForSet(NativeObject* owner, Kind kind, uint32_t slot) const;
685 : void assertPreconditionForWriteBarrierPost(NativeObject* obj, Kind kind, uint32_t slot,
686 : const Value& target) const;
687 : #endif
688 :
689 204002 : MOZ_ALWAYS_INLINE void set(NativeObject* owner, Kind kind, uint32_t slot, const Value& v) {
690 204002 : MOZ_ASSERT(preconditionForSet(owner, kind, slot));
691 204002 : pre();
692 204001 : value = v;
693 204001 : post(owner, kind, slot, v);
694 204003 : }
695 :
696 : /* For users who need to manually barrier the raw types. */
697 : static void writeBarrierPost(NativeObject* owner, Kind kind, uint32_t slot, const Value& target) {
698 : reinterpret_cast<HeapSlot*>(const_cast<Value*>(&target))->post(owner, kind, slot, target);
699 : }
700 :
701 : private:
702 514266 : void post(NativeObject* owner, Kind kind, uint32_t slot, const Value& target) {
703 : #ifdef DEBUG
704 514266 : assertPreconditionForWriteBarrierPost(owner, kind, slot, target);
705 : #endif
706 514277 : if (this->value.isObject()) {
707 138432 : gc::Cell* cell = reinterpret_cast<gc::Cell*>(&this->value.toObject());
708 138432 : if (cell->storeBuffer())
709 24230 : cell->storeBuffer()->putSlot(owner, kind, slot, 1);
710 : }
711 514276 : }
712 : };
713 :
714 : class HeapSlotArray
715 : {
716 : HeapSlot* array;
717 :
718 : // Whether writes may be performed to the slots in this array. This helps
719 : // to control how object elements which may be copy on write are used.
720 : #ifdef DEBUG
721 : bool allowWrite_;
722 : #endif
723 :
724 : public:
725 41300 : explicit HeapSlotArray(HeapSlot* array, bool allowWrite)
726 41300 : : array(array)
727 : #ifdef DEBUG
728 41300 : , allowWrite_(allowWrite)
729 : #endif
730 41300 : {}
731 :
732 676 : operator const Value*() const {
733 : JS_STATIC_ASSERT(sizeof(GCPtr<Value>) == sizeof(Value));
734 : JS_STATIC_ASSERT(sizeof(HeapSlot) == sizeof(Value));
735 676 : return reinterpret_cast<const Value*>(array);
736 : }
737 21315 : operator HeapSlot*() const { MOZ_ASSERT(allowWrite()); return array; }
738 :
739 15 : HeapSlotArray operator +(int offset) const { return HeapSlotArray(array + offset, allowWrite()); }
740 19294 : HeapSlotArray operator +(uint32_t offset) const { return HeapSlotArray(array + offset, allowWrite()); }
741 :
742 : private:
743 40624 : bool allowWrite() const {
744 : #ifdef DEBUG
745 40624 : return allowWrite_;
746 : #else
747 : return true;
748 : #endif
749 : }
750 : };
751 :
752 : /*
753 : * This is a hack for RegExpStatics::updateFromMatch. It allows us to do two
754 : * barriers with only one branch to check if we're in an incremental GC.
755 : */
756 : template <class T1, class T2>
757 : static inline void
758 122 : BarrieredSetPair(Zone* zone,
759 : HeapPtr<T1*>& v1, T1* val1,
760 : HeapPtr<T2*>& v2, T2* val2)
761 : {
762 122 : if (T1::needWriteBarrierPre(zone)) {
763 0 : v1.pre();
764 0 : v2.pre();
765 : }
766 122 : v1.postBarrieredSet(val1);
767 122 : v2.postBarrieredSet(val2);
768 122 : }
769 :
770 : /*
771 : * ImmutableTenuredPtr is designed for one very narrow case: replacing
772 : * immutable raw pointers to GC-managed things, implicitly converting to a
773 : * handle type for ease of use. Pointers encapsulated by this type must:
774 : *
775 : * be immutable (no incremental write barriers),
776 : * never point into the nursery (no generational write barriers), and
777 : * be traced via MarkRuntime (we use fromMarkedLocation).
778 : *
779 : * In short: you *really* need to know what you're doing before you use this
780 : * class!
781 : */
782 : template <typename T>
783 : class ImmutableTenuredPtr
784 : {
785 : T value;
786 :
787 : public:
788 589113 : operator T() const { return value; }
789 : T operator->() const { return value; }
790 :
791 1220509 : operator Handle<T>() const {
792 1220509 : return Handle<T>::fromMarkedLocation(&value);
793 : }
794 :
795 1461 : void init(T ptr) {
796 1461 : MOZ_ASSERT(ptr->isTenured());
797 1461 : value = ptr;
798 1461 : }
799 :
800 12 : T get() const { return value; }
801 : const T* address() { return &value; }
802 : };
803 :
804 : template <typename T>
805 : struct MovableCellHasher<PreBarriered<T>>
806 : {
807 : using Key = PreBarriered<T>;
808 : using Lookup = T;
809 :
810 : static bool hasHash(const Lookup& l) { return MovableCellHasher<T>::hasHash(l); }
811 : static bool ensureHash(const Lookup& l) { return MovableCellHasher<T>::ensureHash(l); }
812 0 : static HashNumber hash(const Lookup& l) { return MovableCellHasher<T>::hash(l); }
813 0 : static bool match(const Key& k, const Lookup& l) { return MovableCellHasher<T>::match(k, l); }
814 : static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
815 : };
816 :
817 : template <typename T>
818 : struct MovableCellHasher<HeapPtr<T>>
819 : {
820 : using Key = HeapPtr<T>;
821 : using Lookup = T;
822 :
823 926 : static bool hasHash(const Lookup& l) { return MovableCellHasher<T>::hasHash(l); }
824 119 : static bool ensureHash(const Lookup& l) { return MovableCellHasher<T>::ensureHash(l); }
825 916 : static HashNumber hash(const Lookup& l) { return MovableCellHasher<T>::hash(l); }
826 764 : static bool match(const Key& k, const Lookup& l) { return MovableCellHasher<T>::match(k, l); }
827 0 : static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
828 : };
829 :
830 : template <typename T>
831 : struct MovableCellHasher<ReadBarriered<T>>
832 : {
833 : using Key = ReadBarriered<T>;
834 : using Lookup = T;
835 :
836 0 : static bool hasHash(const Lookup& l) { return MovableCellHasher<T>::hasHash(l); }
837 0 : static bool ensureHash(const Lookup& l) { return MovableCellHasher<T>::ensureHash(l); }
838 0 : static HashNumber hash(const Lookup& l) { return MovableCellHasher<T>::hash(l); }
839 0 : static bool match(const Key& k, const Lookup& l) {
840 0 : return MovableCellHasher<T>::match(k.unbarrieredGet(), l);
841 : }
842 : static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
843 : };
844 :
845 : /* Useful for hashtables with a GCPtr as key. */
846 : template <class T>
847 : struct GCPtrHasher
848 : {
849 : typedef GCPtr<T> Key;
850 : typedef T Lookup;
851 :
852 : static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
853 : static bool match(const Key& k, Lookup l) { return k.get() == l; }
854 : static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
855 : };
856 :
857 : /* Specialized hashing policy for GCPtrs. */
858 : template <class T>
859 : struct DefaultHasher<GCPtr<T>> : GCPtrHasher<T> {};
860 :
861 : template <class T>
862 : struct PreBarrieredHasher
863 : {
864 : typedef PreBarriered<T> Key;
865 : typedef T Lookup;
866 :
867 : static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
868 0 : static bool match(const Key& k, Lookup l) { return k.get() == l; }
869 : static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
870 : };
871 :
872 : template <class T>
873 : struct DefaultHasher<PreBarriered<T>> : PreBarrieredHasher<T> { };
874 :
875 : /* Useful for hashtables with a ReadBarriered as key. */
876 : template <class T>
877 22854 : struct ReadBarrieredHasher
878 : {
879 : typedef ReadBarriered<T> Key;
880 : typedef T Lookup;
881 :
882 : static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
883 : static bool match(const Key& k, Lookup l) { return k.unbarrieredGet() == l; }
884 : static void rekey(Key& k, const Key& newKey) { k.set(newKey.unbarrieredGet()); }
885 : };
886 :
887 : /* Specialized hashing policy for ReadBarriereds. */
888 : template <class T>
889 22854 : struct DefaultHasher<ReadBarriered<T>> : ReadBarrieredHasher<T> { };
890 :
891 : class ArrayObject;
892 : class ArrayBufferObject;
893 : class GlobalObject;
894 : class Scope;
895 : class ScriptSourceObject;
896 : class Shape;
897 : class BaseShape;
898 : class UnownedBaseShape;
899 : class WasmInstanceObject;
900 : class WasmTableObject;
901 : namespace jit {
902 : class JitCode;
903 : } // namespace jit
904 :
905 : typedef PreBarriered<JSObject*> PreBarrieredObject;
906 : typedef PreBarriered<JSScript*> PreBarrieredScript;
907 : typedef PreBarriered<jit::JitCode*> PreBarrieredJitCode;
908 : typedef PreBarriered<JSString*> PreBarrieredString;
909 : typedef PreBarriered<JSAtom*> PreBarrieredAtom;
910 :
911 : typedef GCPtr<NativeObject*> GCPtrNativeObject;
912 : typedef GCPtr<ArrayObject*> GCPtrArrayObject;
913 : typedef GCPtr<ArrayBufferObjectMaybeShared*> GCPtrArrayBufferObjectMaybeShared;
914 : typedef GCPtr<ArrayBufferObject*> GCPtrArrayBufferObject;
915 : typedef GCPtr<BaseShape*> GCPtrBaseShape;
916 : typedef GCPtr<JSAtom*> GCPtrAtom;
917 : typedef GCPtr<JSFlatString*> GCPtrFlatString;
918 : typedef GCPtr<JSFunction*> GCPtrFunction;
919 : typedef GCPtr<JSLinearString*> GCPtrLinearString;
920 : typedef GCPtr<JSObject*> GCPtrObject;
921 : typedef GCPtr<JSScript*> GCPtrScript;
922 : typedef GCPtr<JSString*> GCPtrString;
923 : typedef GCPtr<ModuleObject*> GCPtrModuleObject;
924 : typedef GCPtr<ModuleEnvironmentObject*> GCPtrModuleEnvironmentObject;
925 : typedef GCPtr<ModuleNamespaceObject*> GCPtrModuleNamespaceObject;
926 : typedef GCPtr<PlainObject*> GCPtrPlainObject;
927 : typedef GCPtr<PropertyName*> GCPtrPropertyName;
928 : typedef GCPtr<Shape*> GCPtrShape;
929 : typedef GCPtr<UnownedBaseShape*> GCPtrUnownedBaseShape;
930 : typedef GCPtr<jit::JitCode*> GCPtrJitCode;
931 : typedef GCPtr<ObjectGroup*> GCPtrObjectGroup;
932 : typedef GCPtr<Scope*> GCPtrScope;
933 :
934 : typedef PreBarriered<Value> PreBarrieredValue;
935 : typedef GCPtr<Value> GCPtrValue;
936 :
937 : typedef PreBarriered<jsid> PreBarrieredId;
938 : typedef GCPtr<jsid> GCPtrId;
939 :
940 : typedef ImmutableTenuredPtr<PropertyName*> ImmutablePropertyNamePtr;
941 : typedef ImmutableTenuredPtr<JS::Symbol*> ImmutableSymbolPtr;
942 :
943 : typedef ReadBarriered<DebugEnvironmentProxy*> ReadBarrieredDebugEnvironmentProxy;
944 : typedef ReadBarriered<GlobalObject*> ReadBarrieredGlobalObject;
945 : typedef ReadBarriered<JSObject*> ReadBarrieredObject;
946 : typedef ReadBarriered<JSFunction*> ReadBarrieredFunction;
947 : typedef ReadBarriered<JSScript*> ReadBarrieredScript;
948 : typedef ReadBarriered<ScriptSourceObject*> ReadBarrieredScriptSourceObject;
949 : typedef ReadBarriered<Shape*> ReadBarrieredShape;
950 : typedef ReadBarriered<jit::JitCode*> ReadBarrieredJitCode;
951 : typedef ReadBarriered<ObjectGroup*> ReadBarrieredObjectGroup;
952 : typedef ReadBarriered<JS::Symbol*> ReadBarrieredSymbol;
953 : typedef ReadBarriered<WasmInstanceObject*> ReadBarrieredWasmInstanceObject;
954 : typedef ReadBarriered<WasmTableObject*> ReadBarrieredWasmTableObject;
955 :
956 : typedef ReadBarriered<Value> ReadBarrieredValue;
957 :
958 : namespace detail {
959 :
960 : template <typename T>
961 : struct DefineComparisonOps<PreBarriered<T>> : mozilla::TrueType {
962 6745468 : static const T& get(const PreBarriered<T>& v) { return v.get(); }
963 : };
964 :
965 : template <typename T>
966 : struct DefineComparisonOps<GCPtr<T>> : mozilla::TrueType {
967 310539 : static const T& get(const GCPtr<T>& v) { return v.get(); }
968 : };
969 :
970 : template <typename T>
971 : struct DefineComparisonOps<HeapPtr<T>> : mozilla::TrueType {
972 0 : static const T& get(const HeapPtr<T>& v) { return v.get(); }
973 : };
974 :
975 : template <typename T>
976 : struct DefineComparisonOps<ReadBarriered<T>> : mozilla::TrueType {
977 80 : static const T& get(const ReadBarriered<T>& v) { return v.unbarrieredGet(); }
978 : };
979 :
980 : template <>
981 : struct DefineComparisonOps<HeapSlot> : mozilla::TrueType {
982 : static const Value& get(const HeapSlot& v) { return v.get(); }
983 : };
984 :
985 : } /* namespace detail */
986 :
987 : } /* namespace js */
988 :
989 : #endif /* gc_Barrier_h */
|