Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef jit_JitCompartment_h
8 : #define jit_JitCompartment_h
9 :
10 : #include "mozilla/Array.h"
11 : #include "mozilla/DebugOnly.h"
12 : #include "mozilla/MemoryReporting.h"
13 :
14 : #include "builtin/TypedObject.h"
15 : #include "jit/CompileInfo.h"
16 : #include "jit/ICStubSpace.h"
17 : #include "jit/IonCode.h"
18 : #include "jit/IonControlFlow.h"
19 : #include "jit/JitFrames.h"
20 : #include "jit/shared/Assembler-shared.h"
21 : #include "js/GCHashTable.h"
22 : #include "js/Value.h"
23 : #include "vm/Stack.h"
24 :
25 : namespace js {
26 : namespace jit {
27 :
28 : class FrameSizeClass;
29 :
30 : enum EnterJitType {
31 : EnterJitBaseline = 0,
32 : EnterJitOptimized = 1
33 : };
34 :
35 7673 : struct EnterJitData
36 : {
37 7673 : explicit EnterJitData(JSContext* cx)
38 7673 : : envChain(cx),
39 7673 : result(cx)
40 7673 : {}
41 :
42 : uint8_t* jitcode;
43 : InterpreterFrame* osrFrame;
44 :
45 : void* calleeToken;
46 :
47 : Value* maxArgv;
48 : unsigned maxArgc;
49 : unsigned numActualArgs;
50 : unsigned osrNumStackValues;
51 :
52 : RootedObject envChain;
53 : RootedValue result;
54 :
55 : bool constructing;
56 : };
57 :
58 : typedef void (*EnterJitCode)(void* code, unsigned argc, Value* argv, InterpreterFrame* fp,
59 : CalleeToken calleeToken, JSObject* envChain,
60 : size_t numStackValues, Value* vp);
61 :
62 : class JitcodeGlobalTable;
63 :
64 : // Information about a loop backedge in the runtime, which can be set to
65 : // point to either the loop header or to an OOL interrupt checking stub,
66 : // if signal handlers are being used to implement interrupts.
67 : class PatchableBackedge : public InlineListNode<PatchableBackedge>
68 : {
69 : friend class JitZoneGroup;
70 :
71 : CodeLocationJump backedge;
72 : CodeLocationLabel loopHeader;
73 : CodeLocationLabel interruptCheck;
74 :
75 : public:
76 0 : PatchableBackedge(CodeLocationJump backedge,
77 : CodeLocationLabel loopHeader,
78 : CodeLocationLabel interruptCheck)
79 0 : : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck)
80 0 : {}
81 : };
82 :
83 : class JitRuntime
84 : {
85 : private:
86 : friend class JitCompartment;
87 :
88 : // Executable allocator for all code except wasm code and Ion code with
89 : // patchable backedges (see below).
90 : ActiveThreadData<ExecutableAllocator> execAlloc_;
91 :
92 : // Executable allocator for Ion scripts with patchable backedges.
93 : ActiveThreadData<ExecutableAllocator> backedgeExecAlloc_;
94 :
95 : // Shared exception-handler tail.
96 : ExclusiveAccessLockWriteOnceData<JitCode*> exceptionTail_;
97 :
98 : // Shared post-bailout-handler tail.
99 : ExclusiveAccessLockWriteOnceData<JitCode*> bailoutTail_;
100 :
101 : // Shared profiler exit frame tail.
102 : ExclusiveAccessLockWriteOnceData<JitCode*> profilerExitFrameTail_;
103 :
104 : // Trampoline for entering JIT code. Contains OSR prologue.
105 : ExclusiveAccessLockWriteOnceData<JitCode*> enterJIT_;
106 :
107 : // Trampoline for entering baseline JIT code.
108 : ExclusiveAccessLockWriteOnceData<JitCode*> enterBaselineJIT_;
109 :
110 : // Vector mapping frame class sizes to bailout tables.
111 : typedef Vector<JitCode*, 4, SystemAllocPolicy> BailoutTableVector;
112 : ExclusiveAccessLockWriteOnceData<BailoutTableVector> bailoutTables_;
113 :
114 : // Generic bailout table; used if the bailout table overflows.
115 : ExclusiveAccessLockWriteOnceData<JitCode*> bailoutHandler_;
116 :
117 : // Argument-rectifying thunk, in the case of insufficient arguments passed
118 : // to a function call site.
119 : ExclusiveAccessLockWriteOnceData<JitCode*> argumentsRectifier_;
120 : ExclusiveAccessLockWriteOnceData<void*> argumentsRectifierReturnAddr_;
121 :
122 : // Thunk that invalides an (Ion compiled) caller on the Ion stack.
123 : ExclusiveAccessLockWriteOnceData<JitCode*> invalidator_;
124 :
125 : // Thunk that calls the GC pre barrier.
126 : ExclusiveAccessLockWriteOnceData<JitCode*> valuePreBarrier_;
127 : ExclusiveAccessLockWriteOnceData<JitCode*> stringPreBarrier_;
128 : ExclusiveAccessLockWriteOnceData<JitCode*> objectPreBarrier_;
129 : ExclusiveAccessLockWriteOnceData<JitCode*> shapePreBarrier_;
130 : ExclusiveAccessLockWriteOnceData<JitCode*> objectGroupPreBarrier_;
131 :
132 : // Thunk to call malloc/free.
133 : ExclusiveAccessLockWriteOnceData<JitCode*> mallocStub_;
134 : ExclusiveAccessLockWriteOnceData<JitCode*> freeStub_;
135 :
136 : // Thunk called to finish compilation of an IonScript.
137 : ExclusiveAccessLockWriteOnceData<JitCode*> lazyLinkStub_;
138 :
139 : // Thunk used by the debugger for breakpoint and step mode.
140 : ExclusiveAccessLockWriteOnceData<JitCode*> debugTrapHandler_;
141 :
142 : // Thunk used to fix up on-stack recompile of baseline scripts.
143 : ExclusiveAccessLockWriteOnceData<JitCode*> baselineDebugModeOSRHandler_;
144 : ExclusiveAccessLockWriteOnceData<void*> baselineDebugModeOSRHandlerNoFrameRegPopAddr_;
145 :
146 : // Map VMFunction addresses to the JitCode of the wrapper.
147 : using VMWrapperMap = HashMap<const VMFunction*, JitCode*>;
148 : ExclusiveAccessLockWriteOnceData<VMWrapperMap*> functionWrappers_;
149 :
150 : // If true, the signal handler to interrupt Ion code should not attempt to
151 : // patch backedges, as some thread is busy modifying data structures.
152 : mozilla::Atomic<bool> preventBackedgePatching_;
153 :
154 : // Global table of jitcode native address => bytecode address mappings.
155 : UnprotectedData<JitcodeGlobalTable*> jitcodeGlobalTable_;
156 :
157 : private:
158 : JitCode* generateLazyLinkStub(JSContext* cx);
159 : JitCode* generateProfilerExitFrameTailStub(JSContext* cx);
160 : JitCode* generateExceptionTailStub(JSContext* cx, void* handler);
161 : JitCode* generateBailoutTailStub(JSContext* cx);
162 : JitCode* generateEnterJIT(JSContext* cx, EnterJitType type);
163 : JitCode* generateArgumentsRectifier(JSContext* cx, void** returnAddrOut);
164 : JitCode* generateBailoutTable(JSContext* cx, uint32_t frameClass);
165 : JitCode* generateBailoutHandler(JSContext* cx);
166 : JitCode* generateInvalidator(JSContext* cx);
167 : JitCode* generatePreBarrier(JSContext* cx, MIRType type);
168 : JitCode* generateMallocStub(JSContext* cx);
169 : JitCode* generateFreeStub(JSContext* cx);
170 : JitCode* generateDebugTrapHandler(JSContext* cx);
171 : JitCode* generateBaselineDebugModeOSRHandler(JSContext* cx, uint32_t* noFrameRegPopOffsetOut);
172 : JitCode* generateVMWrapper(JSContext* cx, const VMFunction& f);
173 :
174 : bool generateTLEventVM(JSContext* cx, MacroAssembler& masm, const VMFunction& f, bool enter);
175 :
176 1036 : inline bool generateTLEnterVM(JSContext* cx, MacroAssembler& masm, const VMFunction& f) {
177 1036 : return generateTLEventVM(cx, masm, f, /* enter = */ true);
178 : }
179 1036 : inline bool generateTLExitVM(JSContext* cx, MacroAssembler& masm, const VMFunction& f) {
180 1036 : return generateTLEventVM(cx, masm, f, /* enter = */ false);
181 : }
182 :
183 : public:
184 : explicit JitRuntime(JSRuntime* rt);
185 : ~JitRuntime();
186 : MOZ_MUST_USE bool initialize(JSContext* cx, js::AutoLockForExclusiveAccess& lock);
187 :
188 : static void Trace(JSTracer* trc, js::AutoLockForExclusiveAccess& lock);
189 : static void TraceJitcodeGlobalTableForMinorGC(JSTracer* trc);
190 : static MOZ_MUST_USE bool MarkJitcodeGlobalTableIteratively(GCMarker* marker);
191 : static void SweepJitcodeGlobalTable(JSRuntime* rt);
192 :
193 4499 : ExecutableAllocator& execAlloc() {
194 4499 : return execAlloc_.ref();
195 : }
196 0 : ExecutableAllocator& backedgeExecAlloc() {
197 0 : return backedgeExecAlloc_.ref();
198 : }
199 :
200 : class AutoPreventBackedgePatching
201 : {
202 : mozilla::DebugOnly<JSRuntime*> rt_;
203 : JitRuntime* jrt_;
204 : bool prev_;
205 :
206 : public:
207 : // This two-arg constructor is provided for JSRuntime::createJitRuntime,
208 : // where we have a JitRuntime but didn't set rt->jitRuntime_ yet.
209 9011 : AutoPreventBackedgePatching(JSRuntime* rt, JitRuntime* jrt)
210 9011 : : rt_(rt),
211 : jrt_(jrt),
212 9011 : prev_(false) // silence GCC warning
213 : {
214 9011 : if (jrt_) {
215 9011 : prev_ = jrt_->preventBackedgePatching_;
216 9011 : jrt_->preventBackedgePatching_ = true;
217 : }
218 9011 : }
219 9007 : explicit AutoPreventBackedgePatching(JSRuntime* rt)
220 9007 : : AutoPreventBackedgePatching(rt, rt->jitRuntime())
221 9007 : {}
222 18022 : ~AutoPreventBackedgePatching() {
223 9011 : MOZ_ASSERT(jrt_ == rt_->jitRuntime());
224 9011 : if (jrt_) {
225 9011 : MOZ_ASSERT(jrt_->preventBackedgePatching_);
226 9011 : jrt_->preventBackedgePatching_ = prev_;
227 : }
228 9011 : }
229 : };
230 :
231 85 : bool preventBackedgePatching() const {
232 85 : return preventBackedgePatching_;
233 : }
234 :
235 : JitCode* getVMWrapper(const VMFunction& f) const;
236 : JitCode* debugTrapHandler(JSContext* cx);
237 : JitCode* getBaselineDebugModeOSRHandler(JSContext* cx);
238 : void* getBaselineDebugModeOSRHandlerAddress(JSContext* cx, bool popFrameReg);
239 :
240 8 : JitCode* getGenericBailoutHandler() const {
241 8 : return bailoutHandler_;
242 : }
243 :
244 1119 : JitCode* getExceptionTail() const {
245 1119 : return exceptionTail_;
246 : }
247 :
248 8 : JitCode* getBailoutTail() const {
249 8 : return bailoutTail_;
250 : }
251 :
252 631 : JitCode* getProfilerExitFrameTail() const {
253 631 : return profilerExitFrameTail_;
254 : }
255 :
256 : JitCode* getBailoutTable(const FrameSizeClass& frameClass) const;
257 :
258 73 : JitCode* getArgumentsRectifier() const {
259 73 : return argumentsRectifier_;
260 : }
261 :
262 0 : void* getArgumentsRectifierReturnAddr() const {
263 0 : return argumentsRectifierReturnAddr_;
264 : }
265 :
266 8 : JitCode* getInvalidationThunk() const {
267 8 : return invalidator_;
268 : }
269 :
270 35 : EnterJitCode enterIon() const {
271 35 : return enterJIT_->as<EnterJitCode>();
272 : }
273 :
274 7638 : EnterJitCode enterBaseline() const {
275 7638 : return enterBaselineJIT_->as<EnterJitCode>();
276 : }
277 :
278 247 : JitCode* preBarrier(MIRType type) const {
279 247 : switch (type) {
280 207 : case MIRType::Value: return valuePreBarrier_;
281 3 : case MIRType::String: return stringPreBarrier_;
282 4 : case MIRType::Object: return objectPreBarrier_;
283 32 : case MIRType::Shape: return shapePreBarrier_;
284 1 : case MIRType::ObjectGroup: return objectGroupPreBarrier_;
285 0 : default: MOZ_CRASH();
286 : }
287 : }
288 :
289 0 : JitCode* mallocStub() const {
290 0 : return mallocStub_;
291 : }
292 :
293 0 : JitCode* freeStub() const {
294 0 : return freeStub_;
295 : }
296 :
297 16 : JitCode* lazyLinkStub() const {
298 16 : return lazyLinkStub_;
299 : }
300 :
301 632 : bool hasJitcodeGlobalTable() const {
302 632 : return jitcodeGlobalTable_ != nullptr;
303 : }
304 :
305 632 : JitcodeGlobalTable* getJitcodeGlobalTable() {
306 632 : MOZ_ASSERT(hasJitcodeGlobalTable());
307 632 : return jitcodeGlobalTable_;
308 : }
309 :
310 2640 : bool isProfilerInstrumentationEnabled(JSRuntime* rt) {
311 2640 : return rt->geckoProfiler().enabled();
312 : }
313 :
314 20 : bool isOptimizationTrackingEnabled(ZoneGroup* group) {
315 20 : return isProfilerInstrumentationEnabled(group->runtime);
316 : }
317 : };
318 :
319 : class JitZoneGroup
320 : {
321 : public:
322 : enum BackedgeTarget {
323 : BackedgeLoopHeader,
324 : BackedgeInterruptCheck
325 : };
326 :
327 : private:
328 : // Whether patchable backedges currently jump to the loop header or the
329 : // interrupt check.
330 : ZoneGroupData<BackedgeTarget> backedgeTarget_;
331 :
332 : // List of all backedges in all Ion code. The backedge edge list is accessed
333 : // asynchronously when the active thread is paused and preventBackedgePatching_
334 : // is false. Thus, the list must only be mutated while preventBackedgePatching_
335 : // is true.
336 : ZoneGroupData<InlineList<PatchableBackedge>> backedgeList_;
337 0 : InlineList<PatchableBackedge>& backedgeList() { return backedgeList_.ref(); }
338 :
339 : public:
340 : explicit JitZoneGroup(ZoneGroup* group);
341 :
342 0 : BackedgeTarget backedgeTarget() const {
343 0 : return backedgeTarget_;
344 : }
345 0 : void addPatchableBackedge(JitRuntime* jrt, PatchableBackedge* backedge) {
346 0 : MOZ_ASSERT(jrt->preventBackedgePatching());
347 0 : backedgeList().pushFront(backedge);
348 0 : }
349 0 : void removePatchableBackedge(JitRuntime* jrt, PatchableBackedge* backedge) {
350 0 : MOZ_ASSERT(jrt->preventBackedgePatching());
351 0 : backedgeList().remove(backedge);
352 0 : }
353 :
354 : void patchIonBackedges(JSContext* cx, BackedgeTarget target);
355 : };
356 :
357 : enum class CacheKind : uint8_t;
358 : class CacheIRStubInfo;
359 :
360 : enum class ICStubEngine : uint8_t {
361 : // Baseline IC, see SharedIC.h and BaselineIC.h.
362 : Baseline = 0,
363 :
364 : // Ion IC that reuses Baseline IC code, see SharedIC.h.
365 : IonSharedIC,
366 :
367 : // Ion IC, see IonIC.h.
368 : IonIC
369 : };
370 :
371 545 : struct CacheIRStubKey : public DefaultHasher<CacheIRStubKey> {
372 : struct Lookup {
373 : CacheKind kind;
374 : ICStubEngine engine;
375 : const uint8_t* code;
376 : uint32_t length;
377 :
378 6382 : Lookup(CacheKind kind, ICStubEngine engine, const uint8_t* code, uint32_t length)
379 6382 : : kind(kind), engine(engine), code(code), length(length)
380 6382 : {}
381 : };
382 :
383 : static HashNumber hash(const Lookup& l);
384 : static bool match(const CacheIRStubKey& entry, const Lookup& l);
385 :
386 : UniquePtr<CacheIRStubInfo, JS::FreePolicy> stubInfo;
387 :
388 251 : explicit CacheIRStubKey(CacheIRStubInfo* info) : stubInfo(info) {}
389 539 : CacheIRStubKey(CacheIRStubKey&& other) : stubInfo(Move(other.stubInfo)) { }
390 :
391 0 : void operator=(CacheIRStubKey&& other) {
392 0 : stubInfo = Move(other.stubInfo);
393 0 : }
394 : };
395 :
396 : template<typename Key>
397 : struct IcStubCodeMapGCPolicy
398 : {
399 0 : static bool needsSweep(Key*, ReadBarrieredJitCode* value) {
400 0 : return IsAboutToBeFinalized(value);
401 : }
402 : };
403 :
404 13 : class JitZone
405 : {
406 : // Allocated space for optimized baseline stubs.
407 : OptimizedICStubSpace optimizedStubSpace_;
408 : // Allocated space for cached cfg.
409 : CFGSpace cfgSpace_;
410 :
411 : // Set of CacheIRStubInfo instances used by Ion stubs in this Zone.
412 : using IonCacheIRStubInfoSet = HashSet<CacheIRStubKey, CacheIRStubKey, SystemAllocPolicy>;
413 : IonCacheIRStubInfoSet ionCacheIRStubInfoSet_;
414 :
415 : // Map CacheIRStubKey to shared JitCode objects.
416 : using BaselineCacheIRStubCodeMap = GCHashMap<CacheIRStubKey,
417 : ReadBarrieredJitCode,
418 : CacheIRStubKey,
419 : SystemAllocPolicy,
420 : IcStubCodeMapGCPolicy<CacheIRStubKey>>;
421 : BaselineCacheIRStubCodeMap baselineCacheIRStubCodes_;
422 :
423 : public:
424 : MOZ_MUST_USE bool init(JSContext* cx);
425 : void sweep(FreeOp* fop);
426 :
427 : void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
428 : size_t* jitZone,
429 : size_t* baselineStubsOptimized,
430 : size_t* cachedCFG) const;
431 :
432 16677 : OptimizedICStubSpace* optimizedStubSpace() {
433 16677 : return &optimizedStubSpace_;
434 : }
435 19 : CFGSpace* cfgSpace() {
436 19 : return &cfgSpace_;
437 : }
438 :
439 6361 : JitCode* getBaselineCacheIRStubCode(const CacheIRStubKey::Lookup& key,
440 : CacheIRStubInfo** stubInfo) {
441 6361 : auto p = baselineCacheIRStubCodes_.lookup(key);
442 6361 : if (p) {
443 6116 : *stubInfo = p->key().stubInfo.get();
444 6116 : return p->value();
445 : }
446 245 : *stubInfo = nullptr;
447 245 : return nullptr;
448 : }
449 245 : MOZ_MUST_USE bool putBaselineCacheIRStubCode(const CacheIRStubKey::Lookup& lookup,
450 : CacheIRStubKey& key,
451 : JitCode* stubCode)
452 : {
453 245 : auto p = baselineCacheIRStubCodes_.lookupForAdd(lookup);
454 245 : MOZ_ASSERT(!p);
455 245 : return baselineCacheIRStubCodes_.add(p, Move(key), stubCode);
456 : }
457 :
458 21 : CacheIRStubInfo* getIonCacheIRStubInfo(const CacheIRStubKey::Lookup& key) {
459 21 : if (!ionCacheIRStubInfoSet_.initialized())
460 1 : return nullptr;
461 20 : IonCacheIRStubInfoSet::Ptr p = ionCacheIRStubInfoSet_.lookup(key);
462 20 : return p ? p->stubInfo.get() : nullptr;
463 : }
464 6 : MOZ_MUST_USE bool putIonCacheIRStubInfo(const CacheIRStubKey::Lookup& lookup,
465 : CacheIRStubKey& key)
466 : {
467 6 : if (!ionCacheIRStubInfoSet_.initialized() && !ionCacheIRStubInfoSet_.init())
468 0 : return false;
469 6 : IonCacheIRStubInfoSet::AddPtr p = ionCacheIRStubInfoSet_.lookupForAdd(lookup);
470 6 : MOZ_ASSERT(!p);
471 6 : return ionCacheIRStubInfoSet_.add(p, Move(key));
472 : }
473 4 : void purgeIonCacheIRStubInfo() {
474 4 : ionCacheIRStubInfoSet_.finish();
475 4 : }
476 : };
477 :
478 : enum class BailoutReturnStub {
479 : GetProp,
480 : GetPropSuper,
481 : SetProp,
482 : Call,
483 : New,
484 : Count
485 : };
486 :
487 : class JitCompartment
488 : {
489 : friend class JitActivation;
490 :
491 : // Map ICStub keys to ICStub shared code objects.
492 : using ICStubCodeMap = GCHashMap<uint32_t,
493 : ReadBarrieredJitCode,
494 : DefaultHasher<uint32_t>,
495 : RuntimeAllocPolicy,
496 : IcStubCodeMapGCPolicy<uint32_t>>;
497 : ICStubCodeMap* stubCodes_;
498 :
499 : // Keep track of offset into various baseline stubs' code at return
500 : // point from called script.
501 : struct BailoutReturnStubInfo
502 : {
503 : void* addr;
504 : uint32_t key;
505 :
506 1425 : BailoutReturnStubInfo() : addr(nullptr), key(0) { }
507 227 : BailoutReturnStubInfo(void* addr_, uint32_t key_) : addr(addr_), key(key_) { }
508 : };
509 : mozilla::EnumeratedArray<BailoutReturnStub,
510 : BailoutReturnStub::Count,
511 : BailoutReturnStubInfo> bailoutReturnStubInfo_;
512 :
513 : // Stubs to concatenate two strings inline, or perform RegExp calls inline.
514 : // These bake in zone and compartment specific pointers and can't be stored
515 : // in JitRuntime. These are weak pointers, but are not declared as
516 : // ReadBarriered since they are only read from during Ion compilation,
517 : // which may occur off thread and whose barriers are captured during
518 : // CodeGenerator::link.
519 : JitCode* stringConcatStub_;
520 : JitCode* regExpMatcherStub_;
521 : JitCode* regExpSearcherStub_;
522 : JitCode* regExpTesterStub_;
523 :
524 : mozilla::EnumeratedArray<SimdType, SimdType::Count, ReadBarrieredObject> simdTemplateObjects_;
525 :
526 : JitCode* generateStringConcatStub(JSContext* cx);
527 : JitCode* generateRegExpMatcherStub(JSContext* cx);
528 : JitCode* generateRegExpSearcherStub(JSContext* cx);
529 : JitCode* generateRegExpTesterStub(JSContext* cx);
530 :
531 : public:
532 0 : JSObject* getSimdTemplateObjectFor(JSContext* cx, Handle<SimdTypeDescr*> descr) {
533 0 : ReadBarrieredObject& tpl = simdTemplateObjects_[descr->type()];
534 0 : if (!tpl)
535 0 : tpl.set(TypedObject::createZeroed(cx, descr, 0, gc::TenuredHeap));
536 0 : return tpl.get();
537 : }
538 :
539 0 : JSObject* maybeGetSimdTemplateObjectFor(SimdType type) const {
540 0 : const ReadBarrieredObject& tpl = simdTemplateObjects_[type];
541 :
542 : // This function is used by Eager Simd Unbox phase, so we cannot use the
543 : // read barrier. For more information, see the comment above
544 : // CodeGenerator::simdRefreshTemplatesDuringLink_ .
545 0 : return tpl.unbarrieredGet();
546 : }
547 :
548 : // This function is used to call the read barrier, to mark the SIMD template
549 : // type as used. This function can only be called from the active thread.
550 0 : void registerSimdTemplateObjectFor(SimdType type) {
551 0 : ReadBarrieredObject& tpl = simdTemplateObjects_[type];
552 0 : MOZ_ASSERT(tpl.unbarrieredGet());
553 0 : tpl.get();
554 0 : }
555 :
556 44626 : JitCode* getStubCode(uint32_t key) {
557 44626 : ICStubCodeMap::Ptr p = stubCodes_->lookup(key);
558 44626 : if (p)
559 42247 : return p->value();
560 2379 : return nullptr;
561 : }
562 2379 : MOZ_MUST_USE bool putStubCode(JSContext* cx, uint32_t key, Handle<JitCode*> stubCode) {
563 2379 : MOZ_ASSERT(stubCode);
564 2379 : if (!stubCodes_->putNew(key, stubCode.get())) {
565 0 : ReportOutOfMemory(cx);
566 0 : return false;
567 : }
568 2379 : return true;
569 : }
570 227 : void initBailoutReturnAddr(void* addr, uint32_t key, BailoutReturnStub kind) {
571 227 : MOZ_ASSERT(bailoutReturnStubInfo_[kind].addr == nullptr);
572 227 : bailoutReturnStubInfo_[kind] = BailoutReturnStubInfo { addr, key };
573 227 : }
574 0 : void* bailoutReturnAddr(BailoutReturnStub kind) {
575 0 : MOZ_ASSERT(bailoutReturnStubInfo_[kind].addr);
576 0 : return bailoutReturnStubInfo_[kind].addr;
577 : }
578 :
579 : JitCompartment();
580 : ~JitCompartment();
581 :
582 : MOZ_MUST_USE bool initialize(JSContext* cx);
583 :
584 : // Initialize code stubs only used by Ion, not Baseline.
585 : MOZ_MUST_USE bool ensureIonStubsExist(JSContext* cx);
586 :
587 : void sweep(FreeOp* fop, JSCompartment* compartment);
588 :
589 12 : JitCode* stringConcatStubNoBarrier() const {
590 12 : return stringConcatStub_;
591 : }
592 :
593 0 : JitCode* regExpMatcherStubNoBarrier() const {
594 0 : return regExpMatcherStub_;
595 : }
596 :
597 0 : MOZ_MUST_USE bool ensureRegExpMatcherStubExists(JSContext* cx) {
598 0 : if (regExpMatcherStub_)
599 0 : return true;
600 0 : regExpMatcherStub_ = generateRegExpMatcherStub(cx);
601 0 : return regExpMatcherStub_ != nullptr;
602 : }
603 :
604 0 : JitCode* regExpSearcherStubNoBarrier() const {
605 0 : return regExpSearcherStub_;
606 : }
607 :
608 0 : MOZ_MUST_USE bool ensureRegExpSearcherStubExists(JSContext* cx) {
609 0 : if (regExpSearcherStub_)
610 0 : return true;
611 0 : regExpSearcherStub_ = generateRegExpSearcherStub(cx);
612 0 : return regExpSearcherStub_ != nullptr;
613 : }
614 :
615 0 : JitCode* regExpTesterStubNoBarrier() const {
616 0 : return regExpTesterStub_;
617 : }
618 :
619 0 : MOZ_MUST_USE bool ensureRegExpTesterStubExists(JSContext* cx) {
620 0 : if (regExpTesterStub_)
621 0 : return true;
622 0 : regExpTesterStub_ = generateRegExpTesterStub(cx);
623 0 : return regExpTesterStub_ != nullptr;
624 : }
625 :
626 : size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
627 : };
628 :
629 : // Called from JSCompartment::discardJitCode().
630 : void InvalidateAll(FreeOp* fop, JS::Zone* zone);
631 : void FinishInvalidation(FreeOp* fop, JSScript* script);
632 :
633 : // On windows systems, really large frames need to be incrementally touched.
634 : // The following constant defines the minimum increment of the touch.
635 : #ifdef XP_WIN
636 : const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1;
637 : #endif
638 :
639 : // If NON_WRITABLE_JIT_CODE is enabled, this class will ensure
640 : // JIT code is writable (has RW permissions) in its scope.
641 : // Otherwise it's a no-op.
642 : class MOZ_STACK_CLASS AutoWritableJitCode
643 : {
644 : // Backedge patching from the signal handler will change memory protection
645 : // flags, so don't allow it in a AutoWritableJitCode scope.
646 : JitRuntime::AutoPreventBackedgePatching preventPatching_;
647 : JSRuntime* rt_;
648 : void* addr_;
649 : size_t size_;
650 :
651 : public:
652 4499 : AutoWritableJitCode(JSRuntime* rt, void* addr, size_t size)
653 4499 : : preventPatching_(rt), rt_(rt), addr_(addr), size_(size)
654 : {
655 4499 : rt_->toggleAutoWritableJitCodeActive(true);
656 4499 : if (!ExecutableAllocator::makeWritable(addr_, size_))
657 0 : MOZ_CRASH();
658 4499 : }
659 4499 : AutoWritableJitCode(void* addr, size_t size)
660 4499 : : AutoWritableJitCode(TlsContext.get()->runtime(), addr, size)
661 4499 : {}
662 0 : explicit AutoWritableJitCode(JitCode* code)
663 0 : : AutoWritableJitCode(code->runtimeFromActiveCooperatingThread(), code->raw(), code->bufferSize())
664 0 : {}
665 8998 : ~AutoWritableJitCode() {
666 4499 : if (!ExecutableAllocator::makeExecutable(addr_, size_))
667 0 : MOZ_CRASH();
668 4499 : rt_->toggleAutoWritableJitCodeActive(false);
669 4499 : }
670 : };
671 :
672 23 : class MOZ_STACK_CLASS MaybeAutoWritableJitCode
673 : {
674 : mozilla::Maybe<AutoWritableJitCode> awjc_;
675 :
676 : public:
677 21 : MaybeAutoWritableJitCode(void* addr, size_t size, ReprotectCode reprotect) {
678 21 : if (reprotect)
679 0 : awjc_.emplace(addr, size);
680 21 : }
681 2 : MaybeAutoWritableJitCode(JitCode* code, ReprotectCode reprotect) {
682 2 : if (reprotect)
683 0 : awjc_.emplace(code);
684 2 : }
685 : };
686 :
687 : } // namespace jit
688 : } // namespace js
689 :
690 : #endif /* jit_JitCompartment_h */
|