Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/JitcodeMap.h"
8 :
9 : #include "mozilla/DebugOnly.h"
10 : #include "mozilla/MathAlgorithms.h"
11 : #include "mozilla/Maybe.h"
12 : #include "mozilla/SizePrintfMacros.h"
13 : #include "mozilla/Sprintf.h"
14 :
15 : #include "jsprf.h"
16 :
17 : #include "gc/Marking.h"
18 : #include "gc/Statistics.h"
19 : #include "jit/BaselineJIT.h"
20 : #include "jit/JitSpewer.h"
21 : #include "js/Vector.h"
22 : #include "vm/GeckoProfiler.h"
23 :
24 : #include "jsscriptinlines.h"
25 :
26 : #include "vm/GeckoProfiler-inl.h"
27 : #include "vm/TypeInference-inl.h"
28 :
29 : using mozilla::Maybe;
30 :
31 : namespace js {
32 : namespace jit {
33 :
34 : static inline JitcodeRegionEntry
35 0 : RegionAtAddr(const JitcodeGlobalEntry::IonEntry& entry, void* ptr,
36 : uint32_t* ptrOffset)
37 : {
38 0 : MOZ_ASSERT(entry.containsPointer(ptr));
39 0 : *ptrOffset = reinterpret_cast<uint8_t*>(ptr) -
40 0 : reinterpret_cast<uint8_t*>(entry.nativeStartAddr());
41 :
42 0 : uint32_t regionIdx = entry.regionTable()->findRegionEntry(*ptrOffset);
43 0 : MOZ_ASSERT(regionIdx < entry.regionTable()->numRegions());
44 :
45 0 : return entry.regionTable()->regionEntry(regionIdx);
46 : }
47 :
48 : void*
49 0 : JitcodeGlobalEntry::IonEntry::canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const
50 : {
51 : uint32_t ptrOffset;
52 0 : JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
53 0 : return (void*)(((uint8_t*) nativeStartAddr()) + region.nativeOffset());
54 : }
55 :
56 : bool
57 0 : JitcodeGlobalEntry::IonEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
58 : BytecodeLocationVector& results,
59 : uint32_t* depth) const
60 : {
61 : uint32_t ptrOffset;
62 0 : JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
63 0 : *depth = region.scriptDepth();
64 :
65 0 : JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
66 0 : MOZ_ASSERT(locationIter.hasMore());
67 0 : bool first = true;
68 0 : while (locationIter.hasMore()) {
69 : uint32_t scriptIdx, pcOffset;
70 0 : locationIter.readNext(&scriptIdx, &pcOffset);
71 : // For the first entry pushed (innermost frame), the pcOffset is obtained
72 : // from the delta-run encodings.
73 0 : if (first) {
74 0 : pcOffset = region.findPcOffset(ptrOffset, pcOffset);
75 0 : first = false;
76 : }
77 0 : JSScript* script = getScript(scriptIdx);
78 0 : jsbytecode* pc = script->offsetToPC(pcOffset);
79 0 : if (!results.append(BytecodeLocation(script, pc)))
80 0 : return false;
81 : }
82 :
83 0 : return true;
84 : }
85 :
86 : uint32_t
87 0 : JitcodeGlobalEntry::IonEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
88 : const char** results,
89 : uint32_t maxResults) const
90 : {
91 0 : MOZ_ASSERT(maxResults >= 1);
92 :
93 : uint32_t ptrOffset;
94 0 : JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
95 :
96 0 : JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
97 0 : MOZ_ASSERT(locationIter.hasMore());
98 0 : uint32_t count = 0;
99 0 : while (locationIter.hasMore()) {
100 : uint32_t scriptIdx, pcOffset;
101 :
102 0 : locationIter.readNext(&scriptIdx, &pcOffset);
103 0 : MOZ_ASSERT(getStr(scriptIdx));
104 :
105 0 : results[count++] = getStr(scriptIdx);
106 0 : if (count >= maxResults)
107 0 : break;
108 : }
109 :
110 0 : return count;
111 : }
112 :
113 : void
114 0 : JitcodeGlobalEntry::IonEntry::youngestFrameLocationAtAddr(JSRuntime* rt, void* ptr,
115 : JSScript** script, jsbytecode** pc) const
116 : {
117 : uint32_t ptrOffset;
118 0 : JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
119 :
120 0 : JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
121 0 : MOZ_ASSERT(locationIter.hasMore());
122 : uint32_t scriptIdx, pcOffset;
123 0 : locationIter.readNext(&scriptIdx, &pcOffset);
124 0 : pcOffset = region.findPcOffset(ptrOffset, pcOffset);
125 :
126 0 : *script = getScript(scriptIdx);
127 0 : *pc = (*script)->offsetToPC(pcOffset);
128 0 : }
129 :
130 : void
131 0 : JitcodeGlobalEntry::IonEntry::destroy()
132 : {
133 : // The region table is stored at the tail of the compacted data,
134 : // which means the start of the region table is a pointer to
135 : // the _middle_ of the memory space allocated for it.
136 : //
137 : // When freeing it, obtain the payload start pointer first.
138 0 : if (regionTable_)
139 0 : js_free((void*) (regionTable_->payloadStart()));
140 0 : regionTable_ = nullptr;
141 :
142 : // Free the scriptList strs.
143 0 : for (uint32_t i = 0; i < scriptList_->size; i++) {
144 0 : js_free(scriptList_->pairs[i].str);
145 0 : scriptList_->pairs[i].str = nullptr;
146 : }
147 :
148 : // Free the script list
149 0 : js_free(scriptList_);
150 0 : scriptList_ = nullptr;
151 :
152 : // The optimizations region and attempts table is in the same block of
153 : // memory, the beginning of which is pointed to by
154 : // optimizationsRegionTable_->payloadStart().
155 0 : if (optsRegionTable_) {
156 0 : MOZ_ASSERT(optsAttemptsTable_);
157 0 : js_free((void*) optsRegionTable_->payloadStart());
158 : }
159 0 : optsRegionTable_ = nullptr;
160 0 : optsTypesTable_ = nullptr;
161 0 : optsAttemptsTable_ = nullptr;
162 0 : js_delete(optsAllTypes_);
163 0 : optsAllTypes_ = nullptr;
164 0 : }
165 :
166 : void*
167 0 : JitcodeGlobalEntry::BaselineEntry::canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const
168 : {
169 : // TODO: We can't yet normalize Baseline addresses until we unify
170 : // BaselineScript's PCMappingEntries with JitcodeGlobalMap.
171 0 : return ptr;
172 : }
173 :
174 : bool
175 0 : JitcodeGlobalEntry::BaselineEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
176 : BytecodeLocationVector& results,
177 : uint32_t* depth) const
178 : {
179 0 : MOZ_ASSERT(containsPointer(ptr));
180 0 : MOZ_ASSERT(script_->hasBaselineScript());
181 :
182 0 : uint8_t* addr = reinterpret_cast<uint8_t*>(ptr);
183 0 : jsbytecode* pc = script_->baselineScript()->approximatePcForNativeAddress(script_, addr);
184 0 : if (!results.append(BytecodeLocation(script_, pc)))
185 0 : return false;
186 :
187 0 : *depth = 1;
188 :
189 0 : return true;
190 : }
191 :
192 : uint32_t
193 0 : JitcodeGlobalEntry::BaselineEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
194 : const char** results,
195 : uint32_t maxResults) const
196 : {
197 0 : MOZ_ASSERT(containsPointer(ptr));
198 0 : MOZ_ASSERT(maxResults >= 1);
199 :
200 0 : results[0] = str();
201 0 : return 1;
202 : }
203 :
204 : void
205 0 : JitcodeGlobalEntry::BaselineEntry::youngestFrameLocationAtAddr(JSRuntime* rt, void* ptr,
206 : JSScript** script,
207 : jsbytecode** pc) const
208 : {
209 0 : uint8_t* addr = reinterpret_cast<uint8_t*>(ptr);
210 0 : *script = script_;
211 0 : *pc = script_->baselineScript()->approximatePcForNativeAddress(script_, addr);
212 0 : }
213 :
214 : void
215 0 : JitcodeGlobalEntry::BaselineEntry::destroy()
216 : {
217 0 : if (!str_)
218 0 : return;
219 0 : js_free((void*) str_);
220 0 : str_ = nullptr;
221 : }
222 :
223 : static inline JitcodeGlobalEntry&
224 0 : RejoinEntry(JSRuntime* rt, const JitcodeGlobalEntry::IonCacheEntry& cache, void* ptr)
225 : {
226 0 : MOZ_ASSERT(cache.containsPointer(ptr));
227 :
228 : // There must exist an entry for the rejoin addr if this entry exists.
229 0 : JitRuntime* jitrt = rt->jitRuntime();
230 : JitcodeGlobalEntry& entry =
231 0 : jitrt->getJitcodeGlobalTable()->lookupInfallible(cache.rejoinAddr());
232 0 : MOZ_ASSERT(entry.isIon());
233 0 : return entry;
234 : }
235 :
236 : void*
237 0 : JitcodeGlobalEntry::IonCacheEntry::canonicalNativeAddrFor(JSRuntime* rt, void* ptr) const
238 : {
239 0 : return nativeStartAddr_;
240 : }
241 :
242 : bool
243 0 : JitcodeGlobalEntry::IonCacheEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
244 : BytecodeLocationVector& results,
245 : uint32_t* depth) const
246 : {
247 0 : const JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, ptr);
248 0 : return entry.callStackAtAddr(rt, rejoinAddr(), results, depth);
249 : }
250 :
251 : uint32_t
252 0 : JitcodeGlobalEntry::IonCacheEntry::callStackAtAddr(JSRuntime* rt, void* ptr,
253 : const char** results,
254 : uint32_t maxResults) const
255 : {
256 0 : const JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, ptr);
257 0 : return entry.callStackAtAddr(rt, rejoinAddr(), results, maxResults);
258 : }
259 :
260 : void
261 0 : JitcodeGlobalEntry::IonCacheEntry::youngestFrameLocationAtAddr(JSRuntime* rt, void* ptr,
262 : JSScript** script,
263 : jsbytecode** pc) const
264 : {
265 0 : const JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, ptr);
266 0 : return entry.youngestFrameLocationAtAddr(rt, rejoinAddr(), script, pc);
267 : }
268 :
269 :
270 12086 : static int ComparePointers(const void* a, const void* b) {
271 12086 : const uint8_t* a_ptr = reinterpret_cast<const uint8_t*>(a);
272 12086 : const uint8_t* b_ptr = reinterpret_cast<const uint8_t*>(b);
273 12086 : if (a_ptr < b_ptr)
274 11747 : return -1;
275 339 : if (a_ptr > b_ptr)
276 339 : return 1;
277 0 : return 0;
278 : }
279 :
280 : /* static */ int
281 12086 : JitcodeGlobalEntry::compare(const JitcodeGlobalEntry& ent1, const JitcodeGlobalEntry& ent2)
282 : {
283 : // Both parts of compare cannot be a query.
284 12086 : MOZ_ASSERT(!(ent1.isQuery() && ent2.isQuery()));
285 :
286 : // Ensure no overlaps for non-query lookups.
287 12086 : MOZ_ASSERT_IF(!ent1.isQuery() && !ent2.isQuery(), !ent1.overlapsWith(ent2));
288 :
289 : // For two non-query entries, just comapare the start addresses.
290 12086 : if (!ent1.isQuery() && !ent2.isQuery())
291 12086 : return ComparePointers(ent1.nativeStartAddr(), ent2.nativeStartAddr());
292 :
293 0 : void* ptr = ent1.isQuery() ? ent1.nativeStartAddr() : ent2.nativeStartAddr();
294 0 : const JitcodeGlobalEntry& ent = ent1.isQuery() ? ent2 : ent1;
295 0 : int flip = ent1.isQuery() ? 1 : -1;
296 :
297 0 : if (ent.startsBelowPointer(ptr)) {
298 0 : if (ent.endsAbovePointer(ptr))
299 0 : return 0;
300 :
301 : // query ptr > entry
302 0 : return flip * 1;
303 : }
304 :
305 : // query ptr < entry
306 0 : return flip * -1;
307 : }
308 :
309 : /* static */ char*
310 627 : JitcodeGlobalEntry::createScriptString(JSContext* cx, JSScript* script, size_t* length)
311 : {
312 : // If the script has a function, try calculating its name.
313 627 : bool hasName = false;
314 627 : size_t nameLength = 0;
315 1254 : UniqueChars nameStr;
316 627 : JSFunction* func = script->functionDelazifying();
317 627 : if (func && func->displayAtom()) {
318 614 : nameStr = StringToNewUTF8CharsZ(cx, *func->displayAtom());
319 614 : if (!nameStr)
320 0 : return nullptr;
321 :
322 614 : nameLength = strlen(nameStr.get());
323 614 : hasName = true;
324 : }
325 :
326 : // Calculate filename length
327 627 : const char* filenameStr = script->filename() ? script->filename() : "(null)";
328 627 : size_t filenameLength = strlen(filenameStr);
329 :
330 : // Calculate lineno length
331 627 : bool hasLineno = false;
332 627 : size_t linenoLength = 0;
333 : char linenoStr[15];
334 627 : if (hasName || (script->functionNonDelazifying() || script->isForEval())) {
335 622 : linenoLength = SprintfLiteral(linenoStr, "%" PRIuSIZE, script->lineno());
336 622 : hasLineno = true;
337 : }
338 :
339 : // Full profile string for scripts with functions is:
340 : // FuncName (FileName:Lineno)
341 : // Full profile string for scripts without functions is:
342 : // FileName:Lineno
343 : // Full profile string for scripts without functions and without linenos is:
344 : // FileName
345 :
346 : // Calculate full string length.
347 627 : size_t fullLength = 0;
348 627 : if (hasName) {
349 614 : MOZ_ASSERT(hasLineno);
350 614 : fullLength = nameLength + 2 + filenameLength + 1 + linenoLength + 1;
351 13 : } else if (hasLineno) {
352 8 : fullLength = filenameLength + 1 + linenoLength;
353 : } else {
354 5 : fullLength = filenameLength;
355 : }
356 :
357 : // Allocate string.
358 627 : char* str = cx->pod_malloc<char>(fullLength + 1);
359 627 : if (!str)
360 0 : return nullptr;
361 :
362 627 : size_t cur = 0;
363 :
364 : // Fill string with func name if needed.
365 627 : if (hasName) {
366 614 : memcpy(str + cur, nameStr.get(), nameLength);
367 614 : cur += nameLength;
368 614 : str[cur++] = ' ';
369 614 : str[cur++] = '(';
370 : }
371 :
372 : // Fill string with filename chars.
373 627 : memcpy(str + cur, filenameStr, filenameLength);
374 627 : cur += filenameLength;
375 :
376 : // Fill lineno chars.
377 627 : if (hasLineno) {
378 622 : str[cur++] = ':';
379 622 : memcpy(str + cur, linenoStr, linenoLength);
380 622 : cur += linenoLength;
381 : }
382 :
383 : // Terminal ')' if necessary.
384 627 : if (hasName)
385 614 : str[cur++] = ')';
386 :
387 627 : MOZ_ASSERT(cur == fullLength);
388 627 : str[cur] = 0;
389 :
390 627 : if (length)
391 0 : *length = fullLength;
392 :
393 627 : return str;
394 : }
395 :
396 :
397 0 : JitcodeGlobalTable::Enum::Enum(JitcodeGlobalTable& table, JSRuntime* rt)
398 : : Range(table),
399 : rt_(rt),
400 0 : next_(cur_ ? cur_->tower_->next(0) : nullptr)
401 : {
402 0 : for (int level = JitcodeSkiplistTower::MAX_HEIGHT - 1; level >= 0; level--)
403 0 : prevTower_[level] = nullptr;
404 0 : }
405 :
406 : void
407 0 : JitcodeGlobalTable::Enum::popFront()
408 : {
409 0 : MOZ_ASSERT(!empty());
410 :
411 : // Did not remove current entry; advance prevTower_.
412 0 : if (cur_ != table_.freeEntries_) {
413 0 : for (int level = cur_->tower_->height() - 1; level >= 0; level--) {
414 0 : JitcodeGlobalEntry* prevTowerEntry = prevTower_[level];
415 :
416 0 : if (prevTowerEntry) {
417 0 : if (prevTowerEntry->tower_->next(level) == cur_)
418 0 : prevTower_[level] = cur_;
419 : } else {
420 0 : prevTower_[level] = table_.startTower_[level];
421 : }
422 : }
423 : }
424 :
425 0 : cur_ = next_;
426 0 : if (!empty())
427 0 : next_ = cur_->tower_->next(0);
428 0 : }
429 :
430 : void
431 0 : JitcodeGlobalTable::Enum::removeFront()
432 : {
433 0 : MOZ_ASSERT(!empty());
434 0 : table_.releaseEntry(*cur_, prevTower_, rt_);
435 0 : }
436 :
437 : const JitcodeGlobalEntry&
438 0 : JitcodeGlobalTable::lookupForSamplerInfallible(void* ptr, JSRuntime* rt, uint32_t sampleBufferGen)
439 : {
440 0 : JitcodeGlobalEntry* entry = lookupInternal(ptr);
441 0 : MOZ_ASSERT(entry);
442 :
443 0 : entry->setGeneration(sampleBufferGen);
444 :
445 : // IonCache entries must keep their corresponding Ion entries alive.
446 0 : if (entry->isIonCache()) {
447 0 : JitcodeGlobalEntry& rejoinEntry = RejoinEntry(rt, entry->ionCacheEntry(), ptr);
448 0 : rejoinEntry.setGeneration(sampleBufferGen);
449 : }
450 :
451 : // JitcodeGlobalEntries are marked at the end of the mark phase. A read
452 : // barrier is not needed. Any JS frames sampled during the sweep phase of
453 : // the GC must be on stack, and on-stack frames must already be marked at
454 : // the beginning of the sweep phase. It's not possible to assert this here
455 : // as we may be off main thread when called from the gecko profiler.
456 :
457 0 : return *entry;
458 : }
459 :
460 : JitcodeGlobalEntry*
461 0 : JitcodeGlobalTable::lookupInternal(void* ptr)
462 : {
463 0 : JitcodeGlobalEntry query = JitcodeGlobalEntry::MakeQuery(ptr);
464 : JitcodeGlobalEntry* searchTower[JitcodeSkiplistTower::MAX_HEIGHT];
465 0 : searchInternal(query, searchTower);
466 :
467 0 : if (searchTower[0] == nullptr) {
468 : // Check startTower
469 0 : if (startTower_[0] == nullptr)
470 0 : return nullptr;
471 :
472 0 : MOZ_ASSERT(startTower_[0]->compareTo(query) >= 0);
473 0 : int cmp = startTower_[0]->compareTo(query);
474 0 : MOZ_ASSERT(cmp >= 0);
475 0 : return (cmp == 0) ? startTower_[0] : nullptr;
476 : }
477 :
478 0 : JitcodeGlobalEntry* bottom = searchTower[0];
479 0 : MOZ_ASSERT(bottom->compareTo(query) < 0);
480 :
481 0 : JitcodeGlobalEntry* bottomNext = bottom->tower_->next(0);
482 0 : if (bottomNext == nullptr)
483 0 : return nullptr;
484 :
485 0 : int cmp = bottomNext->compareTo(query);
486 0 : MOZ_ASSERT(cmp >= 0);
487 0 : return (cmp == 0) ? bottomNext : nullptr;
488 : }
489 :
490 : bool
491 632 : JitcodeGlobalTable::addEntry(const JitcodeGlobalEntry& entry, JSRuntime* rt)
492 : {
493 632 : MOZ_ASSERT(entry.isIon() || entry.isBaseline() || entry.isIonCache() || entry.isDummy());
494 :
495 : JitcodeGlobalEntry* searchTower[JitcodeSkiplistTower::MAX_HEIGHT];
496 632 : searchInternal(entry, searchTower);
497 :
498 : // Allocate a new entry and tower.
499 632 : JitcodeSkiplistTower* newTower = allocateTower(generateTowerHeight());
500 632 : if (!newTower)
501 0 : return false;
502 :
503 632 : JitcodeGlobalEntry* newEntry = allocateEntry();
504 632 : if (!newEntry)
505 0 : return false;
506 :
507 632 : *newEntry = entry;
508 632 : newEntry->tower_ = newTower;
509 :
510 : // Suppress profiler sampling while skiplist is being mutated.
511 1264 : AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
512 :
513 : // Link up entry with forward entries taken from tower.
514 1883 : for (int level = newTower->height() - 1; level >= 0; level--) {
515 1251 : JitcodeGlobalEntry* searchTowerEntry = searchTower[level];
516 1251 : if (searchTowerEntry) {
517 1218 : MOZ_ASSERT(searchTowerEntry->compareTo(*newEntry) < 0);
518 1218 : JitcodeGlobalEntry* searchTowerNextEntry = searchTowerEntry->tower_->next(level);
519 :
520 1218 : MOZ_ASSERT_IF(searchTowerNextEntry, searchTowerNextEntry->compareTo(*newEntry) > 0);
521 :
522 1218 : newTower->setNext(level, searchTowerNextEntry);
523 1218 : searchTowerEntry->tower_->setNext(level, newEntry);
524 : } else {
525 33 : newTower->setNext(level, startTower_[level]);
526 33 : startTower_[level] = newEntry;
527 : }
528 : }
529 632 : skiplistSize_++;
530 : // verifySkiplist(); - disabled for release.
531 :
532 : // Any entries that may directly contain nursery pointers must be marked
533 : // during a minor GC to update those pointers.
534 632 : if (entry.canHoldNurseryPointers())
535 0 : addToNurseryList(&newEntry->ionEntry());
536 :
537 632 : return true;
538 : }
539 :
540 : void
541 0 : JitcodeGlobalTable::removeEntry(JitcodeGlobalEntry& entry, JitcodeGlobalEntry** prevTower,
542 : JSRuntime* rt)
543 : {
544 0 : MOZ_ASSERT(!TlsContext.get()->isProfilerSamplingEnabled());
545 :
546 0 : if (entry.canHoldNurseryPointers())
547 0 : removeFromNurseryList(&entry.ionEntry());
548 :
549 : // Unlink query entry.
550 0 : for (int level = entry.tower_->height() - 1; level >= 0; level--) {
551 0 : JitcodeGlobalEntry* prevTowerEntry = prevTower[level];
552 0 : if (prevTowerEntry) {
553 0 : MOZ_ASSERT(prevTowerEntry->tower_->next(level) == &entry);
554 0 : prevTowerEntry->tower_->setNext(level, entry.tower_->next(level));
555 : } else {
556 0 : startTower_[level] = entry.tower_->next(level);
557 : }
558 : }
559 0 : skiplistSize_--;
560 : // verifySkiplist(); - disabled for release.
561 :
562 : // Entry has been unlinked.
563 0 : entry.destroy();
564 0 : entry.tower_->addToFreeList(&(freeTowers_[entry.tower_->height() - 1]));
565 0 : entry.tower_ = nullptr;
566 0 : entry = JitcodeGlobalEntry();
567 0 : entry.addToFreeList(&freeEntries_);
568 0 : }
569 :
570 : void
571 0 : JitcodeGlobalTable::releaseEntry(JitcodeGlobalEntry& entry, JitcodeGlobalEntry** prevTower,
572 : JSRuntime* rt)
573 : {
574 0 : mozilla::DebugOnly<uint32_t> gen = rt->profilerSampleBufferGen();
575 0 : mozilla::DebugOnly<uint32_t> lapCount = rt->profilerSampleBufferLapCount();
576 0 : MOZ_ASSERT_IF(gen != UINT32_MAX, !entry.isSampled(gen, lapCount));
577 0 : removeEntry(entry, prevTower, rt);
578 0 : }
579 :
580 : void
581 632 : JitcodeGlobalTable::searchInternal(const JitcodeGlobalEntry& query, JitcodeGlobalEntry** towerOut)
582 : {
583 632 : JitcodeGlobalEntry* cur = nullptr;
584 20856 : for (int level = JitcodeSkiplistTower::MAX_HEIGHT - 1; level >= 0; level--) {
585 20224 : JitcodeGlobalEntry* entry = searchAtHeight(level, cur, query);
586 20224 : MOZ_ASSERT_IF(entry == nullptr, cur == nullptr);
587 20224 : towerOut[level] = entry;
588 20224 : cur = entry;
589 : }
590 :
591 : // Validate the resulting tower.
592 : #ifdef DEBUG
593 20856 : for (int level = JitcodeSkiplistTower::MAX_HEIGHT - 1; level >= 0; level--) {
594 20224 : if (towerOut[level] == nullptr) {
595 : // If we got NULL for a given level, then we should have gotten NULL
596 : // for the level above as well.
597 14339 : MOZ_ASSERT_IF(unsigned(level) < (JitcodeSkiplistTower::MAX_HEIGHT - 1),
598 : towerOut[level + 1] == nullptr);
599 14339 : continue;
600 : }
601 :
602 5885 : JitcodeGlobalEntry* cur = towerOut[level];
603 :
604 : // Non-null result at a given level must sort < query.
605 5885 : MOZ_ASSERT(cur->compareTo(query) < 0);
606 :
607 : // The entry must have a tower height that accomodates level.
608 5885 : if (!cur->tower_->next(level))
609 5750 : continue;
610 :
611 135 : JitcodeGlobalEntry* next = cur->tower_->next(level);
612 :
613 : // Next entry must have tower height that accomodates level.
614 135 : MOZ_ASSERT(unsigned(level) < next->tower_->height());
615 :
616 : // Next entry must sort >= query.
617 135 : MOZ_ASSERT(next->compareTo(query) >= 0);
618 : }
619 : #endif // DEBUG
620 632 : }
621 :
622 : JitcodeGlobalEntry*
623 20224 : JitcodeGlobalTable::searchAtHeight(unsigned level, JitcodeGlobalEntry* start,
624 : const JitcodeGlobalEntry& query)
625 : {
626 20224 : JitcodeGlobalEntry* cur = start;
627 :
628 : // If starting with nullptr, use the start tower.
629 20224 : if (start == nullptr) {
630 14967 : cur = startTower_[level];
631 14967 : if (cur == nullptr || cur->compareTo(query) >= 0)
632 14339 : return nullptr;
633 : }
634 :
635 : // Keep skipping at |level| until we reach an entry < query whose
636 : // successor is an entry >= query.
637 : for (;;) {
638 9901 : JitcodeGlobalEntry* next = cur->tower_->next(level);
639 9901 : if (next == nullptr || next->compareTo(query) >= 0)
640 5885 : return cur;
641 :
642 4016 : cur = next;
643 4016 : }
644 : }
645 :
646 : unsigned
647 632 : JitcodeGlobalTable::generateTowerHeight()
648 : {
649 : // Implementation taken from Hars L. and Pteruska G.,
650 : // "Pseudorandom Recursions: Small and fast Pseudorandom number generators for
651 : // embedded applications."
652 632 : rand_ ^= mozilla::RotateLeft(rand_, 5) ^ mozilla::RotateLeft(rand_, 24);
653 632 : rand_ += 0x37798849;
654 :
655 : // Return 1 + number of lowbit zeros in new randval, capped at MAX_HEIGHT.
656 632 : unsigned result = 0;
657 1251 : for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT - 1; i++) {
658 1251 : if ((rand_ >> i) & 0x1)
659 632 : break;
660 619 : result++;
661 : }
662 632 : return result + 1;
663 : }
664 :
665 : JitcodeSkiplistTower*
666 632 : JitcodeGlobalTable::allocateTower(unsigned height)
667 : {
668 632 : MOZ_ASSERT(height >= 1);
669 632 : JitcodeSkiplistTower* tower = JitcodeSkiplistTower::PopFromFreeList(&freeTowers_[height - 1]);
670 632 : if (tower)
671 0 : return tower;
672 :
673 632 : size_t size = JitcodeSkiplistTower::CalculateSize(height);
674 632 : tower = (JitcodeSkiplistTower*) alloc_.alloc(size);
675 632 : if (!tower)
676 0 : return nullptr;
677 :
678 632 : return new (tower) JitcodeSkiplistTower(height);
679 : }
680 :
681 : JitcodeGlobalEntry*
682 632 : JitcodeGlobalTable::allocateEntry()
683 : {
684 632 : JitcodeGlobalEntry* entry = JitcodeGlobalEntry::PopFromFreeList(&freeEntries_);
685 632 : if (entry)
686 0 : return entry;
687 :
688 632 : return alloc_.new_<JitcodeGlobalEntry>();
689 : }
690 :
691 : #ifdef DEBUG
692 : void
693 0 : JitcodeGlobalTable::verifySkiplist()
694 : {
695 : JitcodeGlobalEntry* curTower[JitcodeSkiplistTower::MAX_HEIGHT];
696 0 : for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++)
697 0 : curTower[i] = startTower_[i];
698 :
699 0 : uint32_t count = 0;
700 0 : JitcodeGlobalEntry* curEntry = startTower_[0];
701 0 : while (curEntry) {
702 0 : count++;
703 0 : unsigned curHeight = curEntry->tower_->height();
704 0 : MOZ_ASSERT(curHeight >= 1);
705 :
706 0 : for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++) {
707 0 : if (i < curHeight) {
708 0 : MOZ_ASSERT(curTower[i] == curEntry);
709 0 : JitcodeGlobalEntry* nextEntry = curEntry->tower_->next(i);
710 0 : MOZ_ASSERT_IF(nextEntry, curEntry->compareTo(*nextEntry) < 0);
711 0 : curTower[i] = nextEntry;
712 : } else {
713 0 : MOZ_ASSERT_IF(curTower[i], curTower[i]->compareTo(*curEntry) > 0);
714 : }
715 : }
716 0 : curEntry = curEntry->tower_->next(0);
717 : }
718 :
719 0 : MOZ_ASSERT(count == skiplistSize_);
720 0 : }
721 : #endif // DEBUG
722 :
723 : void
724 0 : JitcodeGlobalTable::setAllEntriesAsExpired(JSRuntime* rt)
725 : {
726 0 : AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
727 0 : for (Range r(*this); !r.empty(); r.popFront()) {
728 0 : auto entry = r.front();
729 0 : if (entry->canHoldNurseryPointers())
730 0 : removeFromNurseryList(&entry->ionEntry());
731 0 : entry->setAsExpired();
732 : }
733 0 : }
734 :
735 : struct Unconditionally
736 : {
737 : template <typename T>
738 0 : static bool ShouldTrace(JSRuntime* rt, T* thingp) { return true; }
739 : };
740 :
741 : void
742 0 : JitcodeGlobalTable::traceForMinorGC(JSTracer* trc)
743 : {
744 : // Trace only entries that can directly contain nursery pointers.
745 :
746 0 : MOZ_ASSERT(trc->runtime()->geckoProfiler().enabled());
747 0 : MOZ_ASSERT(JS::CurrentThreadIsHeapMinorCollecting());
748 :
749 0 : AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
750 0 : JitcodeGlobalEntry::IonEntry* entry = nurseryEntries_;
751 0 : while (entry) {
752 0 : entry->trace<Unconditionally>(trc);
753 0 : JitcodeGlobalEntry::IonEntry* prev = entry;
754 0 : entry = entry->nextNursery_;
755 0 : removeFromNurseryList(prev);
756 : }
757 0 : }
758 :
759 : struct IfUnmarked
760 : {
761 : template <typename T>
762 0 : static bool ShouldTrace(JSRuntime* rt, T* thingp) { return !IsMarkedUnbarriered(rt, thingp); }
763 : };
764 :
765 : template <>
766 0 : bool IfUnmarked::ShouldTrace<TypeSet::Type>(JSRuntime* rt, TypeSet::Type* type)
767 : {
768 0 : return !TypeSet::IsTypeMarked(rt, type);
769 : }
770 :
771 : bool
772 0 : JitcodeGlobalTable::markIteratively(GCMarker* marker)
773 : {
774 : // JitcodeGlobalTable must keep entries that are in the sampler buffer
775 : // alive. This conditionality is akin to holding the entries weakly.
776 : //
777 : // If this table were marked at the beginning of the mark phase, then
778 : // sampling would require a read barrier for sampling in between
779 : // incremental GC slices. However, invoking read barriers from the sampler
780 : // is wildly unsafe. The sampler may run at any time, including during GC
781 : // itself.
782 : //
783 : // Instead, JitcodeGlobalTable is marked at the beginning of the sweep
784 : // phase, along with weak references. The key assumption is the
785 : // following. At the beginning of the sweep phase, any JS frames that the
786 : // sampler may put in its buffer that are not already there at the
787 : // beginning of the mark phase must have already been marked, as either 1)
788 : // the frame was on-stack at the beginning of the sweep phase, or 2) the
789 : // frame was pushed between incremental sweep slices. Frames of case 1)
790 : // are already marked. Frames of case 2) must have been reachable to have
791 : // been newly pushed, and thus are already marked.
792 : //
793 : // The approach above obviates the need for read barriers. The assumption
794 : // above is checked in JitcodeGlobalTable::lookupForSampler.
795 :
796 0 : MOZ_ASSERT(!JS::CurrentThreadIsHeapMinorCollecting());
797 :
798 0 : AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
799 0 : uint32_t gen = marker->runtime()->profilerSampleBufferGen();
800 0 : uint32_t lapCount = marker->runtime()->profilerSampleBufferLapCount();
801 :
802 : // If the profiler is off, all entries are considered to be expired.
803 0 : if (!marker->runtime()->geckoProfiler().enabled())
804 0 : gen = UINT32_MAX;
805 :
806 0 : bool markedAny = false;
807 0 : for (Range r(*this); !r.empty(); r.popFront()) {
808 0 : JitcodeGlobalEntry* entry = r.front();
809 :
810 : // If an entry is not sampled, reset its generation to the invalid
811 : // generation, and conditionally mark the rest of the entry if its
812 : // JitCode is not already marked. This conditional marking ensures
813 : // that so long as the JitCode *may* be sampled, we keep any
814 : // information that may be handed out to the sampler, like tracked
815 : // types used by optimizations and scripts used for pc to line number
816 : // mapping, alive as well.
817 0 : if (!entry->isSampled(gen, lapCount)) {
818 0 : if (entry->canHoldNurseryPointers())
819 0 : removeFromNurseryList(&entry->ionEntry());
820 0 : entry->setAsExpired();
821 0 : if (!entry->baseEntry().isJitcodeMarkedFromAnyThread(marker->runtime()))
822 0 : continue;
823 : }
824 :
825 : // The table is runtime-wide. Not all zones may be participating in
826 : // the GC.
827 0 : if (!entry->zone()->isCollecting() || entry->zone()->isGCFinished())
828 0 : continue;
829 :
830 0 : markedAny |= entry->trace<IfUnmarked>(marker);
831 : }
832 :
833 0 : return markedAny;
834 : }
835 :
836 : void
837 0 : JitcodeGlobalTable::sweep(JSRuntime* rt)
838 : {
839 0 : AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
840 0 : for (Enum e(*this, rt); !e.empty(); e.popFront()) {
841 0 : JitcodeGlobalEntry* entry = e.front();
842 :
843 0 : if (!entry->zone()->isCollecting() || entry->zone()->isGCFinished())
844 0 : continue;
845 :
846 0 : if (entry->baseEntry().isJitcodeAboutToBeFinalized())
847 0 : e.removeFront();
848 : else
849 0 : entry->sweepChildren(rt);
850 : }
851 0 : }
852 :
853 : template <class ShouldTraceProvider>
854 : bool
855 0 : JitcodeGlobalEntry::BaseEntry::traceJitcode(JSTracer* trc)
856 : {
857 0 : if (ShouldTraceProvider::ShouldTrace(trc->runtime(), &jitcode_)) {
858 0 : TraceManuallyBarrieredEdge(trc, &jitcode_, "jitcodglobaltable-baseentry-jitcode");
859 0 : return true;
860 : }
861 0 : return false;
862 : }
863 :
864 : bool
865 0 : JitcodeGlobalEntry::BaseEntry::isJitcodeMarkedFromAnyThread(JSRuntime* rt)
866 : {
867 0 : return IsMarkedUnbarriered(rt, &jitcode_);
868 : }
869 :
870 : bool
871 0 : JitcodeGlobalEntry::BaseEntry::isJitcodeAboutToBeFinalized()
872 : {
873 0 : return IsAboutToBeFinalizedUnbarriered(&jitcode_);
874 : }
875 :
876 : template <class ShouldTraceProvider>
877 : bool
878 0 : JitcodeGlobalEntry::BaselineEntry::trace(JSTracer* trc)
879 : {
880 0 : if (ShouldTraceProvider::ShouldTrace(trc->runtime(), &script_)) {
881 0 : TraceManuallyBarrieredEdge(trc, &script_, "jitcodeglobaltable-baselineentry-script");
882 0 : return true;
883 : }
884 0 : return false;
885 : }
886 :
887 : void
888 0 : JitcodeGlobalEntry::BaselineEntry::sweepChildren()
889 : {
890 0 : MOZ_ALWAYS_FALSE(IsAboutToBeFinalizedUnbarriered(&script_));
891 0 : }
892 :
893 : bool
894 0 : JitcodeGlobalEntry::BaselineEntry::isMarkedFromAnyThread(JSRuntime* rt)
895 : {
896 0 : return IsMarkedUnbarriered(rt, &script_);
897 : }
898 :
899 : template <class ShouldTraceProvider>
900 : bool
901 0 : JitcodeGlobalEntry::IonEntry::trace(JSTracer* trc)
902 : {
903 0 : bool tracedAny = false;
904 :
905 0 : JSRuntime* rt = trc->runtime();
906 0 : for (unsigned i = 0; i < numScripts(); i++) {
907 0 : if (ShouldTraceProvider::ShouldTrace(rt, &sizedScriptList()->pairs[i].script)) {
908 0 : TraceManuallyBarrieredEdge(trc, &sizedScriptList()->pairs[i].script,
909 : "jitcodeglobaltable-ionentry-script");
910 0 : tracedAny = true;
911 : }
912 : }
913 :
914 0 : if (!optsAllTypes_)
915 0 : return tracedAny;
916 :
917 0 : for (IonTrackedTypeWithAddendum* iter = optsAllTypes_->begin();
918 0 : iter != optsAllTypes_->end(); iter++)
919 : {
920 0 : if (ShouldTraceProvider::ShouldTrace(rt, &iter->type)) {
921 0 : iter->type.trace(trc);
922 0 : tracedAny = true;
923 : }
924 0 : if (iter->hasAllocationSite() && ShouldTraceProvider::ShouldTrace(rt, &iter->script)) {
925 0 : TraceManuallyBarrieredEdge(trc, &iter->script,
926 : "jitcodeglobaltable-ionentry-type-addendum-script");
927 0 : tracedAny = true;
928 0 : } else if (iter->hasConstructor() && ShouldTraceProvider::ShouldTrace(rt, &iter->constructor)) {
929 0 : TraceManuallyBarrieredEdge(trc, &iter->constructor,
930 : "jitcodeglobaltable-ionentry-type-addendum-constructor");
931 0 : tracedAny = true;
932 : }
933 : }
934 :
935 0 : return tracedAny;
936 : }
937 :
938 : void
939 0 : JitcodeGlobalEntry::IonEntry::sweepChildren()
940 : {
941 0 : for (unsigned i = 0; i < numScripts(); i++)
942 0 : MOZ_ALWAYS_FALSE(IsAboutToBeFinalizedUnbarriered(&sizedScriptList()->pairs[i].script));
943 :
944 0 : if (!optsAllTypes_)
945 0 : return;
946 :
947 0 : for (IonTrackedTypeWithAddendum* iter = optsAllTypes_->begin();
948 0 : iter != optsAllTypes_->end(); iter++)
949 : {
950 : // Types may move under compacting GC. This method is only called on
951 : // entries that are sampled, and thus are not about to be finalized.
952 0 : MOZ_ALWAYS_FALSE(TypeSet::IsTypeAboutToBeFinalized(&iter->type));
953 0 : if (iter->hasAllocationSite())
954 0 : MOZ_ALWAYS_FALSE(IsAboutToBeFinalizedUnbarriered(&iter->script));
955 0 : else if (iter->hasConstructor())
956 0 : MOZ_ALWAYS_FALSE(IsAboutToBeFinalizedUnbarriered(&iter->constructor));
957 : }
958 : }
959 :
960 : bool
961 0 : JitcodeGlobalEntry::IonEntry::isMarkedFromAnyThread(JSRuntime* rt)
962 : {
963 0 : for (unsigned i = 0; i < numScripts(); i++) {
964 0 : if (!IsMarkedUnbarriered(rt, &sizedScriptList()->pairs[i].script))
965 0 : return false;
966 : }
967 :
968 0 : if (!optsAllTypes_)
969 0 : return true;
970 :
971 0 : for (IonTrackedTypeWithAddendum* iter = optsAllTypes_->begin();
972 0 : iter != optsAllTypes_->end(); iter++)
973 : {
974 0 : if (!TypeSet::IsTypeMarked(rt, &iter->type))
975 0 : return false;
976 : }
977 :
978 0 : return true;
979 : }
980 :
981 : template <class ShouldTraceProvider>
982 : bool
983 0 : JitcodeGlobalEntry::IonCacheEntry::trace(JSTracer* trc)
984 : {
985 0 : JitcodeGlobalEntry& entry = RejoinEntry(trc->runtime(), *this, nativeStartAddr());
986 0 : return entry.trace<ShouldTraceProvider>(trc);
987 : }
988 :
989 : void
990 0 : JitcodeGlobalEntry::IonCacheEntry::sweepChildren(JSRuntime* rt)
991 : {
992 0 : JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, nativeStartAddr());
993 0 : entry.sweepChildren(rt);
994 0 : }
995 :
996 : bool
997 0 : JitcodeGlobalEntry::IonCacheEntry::isMarkedFromAnyThread(JSRuntime* rt)
998 : {
999 0 : JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, nativeStartAddr());
1000 0 : return entry.isMarkedFromAnyThread(rt);
1001 : }
1002 :
1003 : Maybe<uint8_t>
1004 0 : JitcodeGlobalEntry::IonCacheEntry::trackedOptimizationIndexAtAddr(
1005 : JSRuntime *rt,
1006 : void* ptr,
1007 : uint32_t* entryOffsetOut)
1008 : {
1009 0 : MOZ_ASSERT(hasTrackedOptimizations());
1010 0 : MOZ_ASSERT(containsPointer(ptr));
1011 0 : JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, ptr);
1012 :
1013 0 : if (!entry.hasTrackedOptimizations())
1014 0 : return mozilla::Nothing();
1015 :
1016 : uint32_t mainEntryOffsetOut;
1017 : Maybe<uint8_t> maybeIndex =
1018 0 : entry.trackedOptimizationIndexAtAddr(rt, rejoinAddr(), &mainEntryOffsetOut);
1019 0 : if (maybeIndex.isNothing())
1020 0 : return mozilla::Nothing();
1021 :
1022 : // For IonCache, the canonical address is just the start of the addr.
1023 0 : *entryOffsetOut = 0;
1024 0 : return maybeIndex;
1025 : }
1026 :
1027 : void
1028 0 : JitcodeGlobalEntry::IonCacheEntry::forEachOptimizationAttempt(
1029 : JSRuntime *rt, uint8_t index, JS::ForEachTrackedOptimizationAttemptOp& op)
1030 : {
1031 0 : JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, nativeStartAddr());
1032 0 : if (!entry.hasTrackedOptimizations())
1033 0 : return;
1034 0 : entry.forEachOptimizationAttempt(rt, index, op);
1035 :
1036 : // Record the outcome associated with the stub.
1037 0 : op(JS::TrackedStrategy::InlineCache_OptimizedStub, trackedOutcome_);
1038 : }
1039 :
1040 : void
1041 0 : JitcodeGlobalEntry::IonCacheEntry::forEachOptimizationTypeInfo(
1042 : JSRuntime *rt, uint8_t index,
1043 : IonTrackedOptimizationsTypeInfo::ForEachOpAdapter& op)
1044 : {
1045 0 : JitcodeGlobalEntry& entry = RejoinEntry(rt, *this, nativeStartAddr());
1046 0 : if (!entry.hasTrackedOptimizations())
1047 0 : return;
1048 0 : entry.forEachOptimizationTypeInfo(rt, index, op);
1049 : }
1050 :
1051 : /* static */ void
1052 0 : JitcodeRegionEntry::WriteHead(CompactBufferWriter& writer,
1053 : uint32_t nativeOffset, uint8_t scriptDepth)
1054 : {
1055 0 : writer.writeUnsigned(nativeOffset);
1056 0 : writer.writeByte(scriptDepth);
1057 0 : }
1058 :
1059 : /* static */ void
1060 0 : JitcodeRegionEntry::ReadHead(CompactBufferReader& reader,
1061 : uint32_t* nativeOffset, uint8_t* scriptDepth)
1062 : {
1063 0 : *nativeOffset = reader.readUnsigned();
1064 0 : *scriptDepth = reader.readByte();
1065 0 : }
1066 :
1067 : /* static */ void
1068 0 : JitcodeRegionEntry::WriteScriptPc(CompactBufferWriter& writer,
1069 : uint32_t scriptIdx, uint32_t pcOffset)
1070 : {
1071 0 : writer.writeUnsigned(scriptIdx);
1072 0 : writer.writeUnsigned(pcOffset);
1073 0 : }
1074 :
1075 : /* static */ void
1076 0 : JitcodeRegionEntry::ReadScriptPc(CompactBufferReader& reader,
1077 : uint32_t* scriptIdx, uint32_t* pcOffset)
1078 : {
1079 0 : *scriptIdx = reader.readUnsigned();
1080 0 : *pcOffset = reader.readUnsigned();
1081 0 : }
1082 :
1083 : /* static */ void
1084 0 : JitcodeRegionEntry::WriteDelta(CompactBufferWriter& writer,
1085 : uint32_t nativeDelta, int32_t pcDelta)
1086 : {
1087 0 : if (pcDelta >= 0) {
1088 : // 1 and 2-byte formats possible.
1089 :
1090 : // NNNN-BBB0
1091 0 : if (pcDelta <= ENC1_PC_DELTA_MAX && nativeDelta <= ENC1_NATIVE_DELTA_MAX) {
1092 0 : uint8_t encVal = ENC1_MASK_VAL | (pcDelta << ENC1_PC_DELTA_SHIFT) |
1093 0 : (nativeDelta << ENC1_NATIVE_DELTA_SHIFT);
1094 0 : writer.writeByte(encVal);
1095 0 : return;
1096 : }
1097 :
1098 : // NNNN-NNNN BBBB-BB01
1099 0 : if (pcDelta <= ENC2_PC_DELTA_MAX && nativeDelta <= ENC2_NATIVE_DELTA_MAX) {
1100 0 : uint16_t encVal = ENC2_MASK_VAL | (pcDelta << ENC2_PC_DELTA_SHIFT) |
1101 0 : (nativeDelta << ENC2_NATIVE_DELTA_SHIFT);
1102 0 : writer.writeByte(encVal & 0xff);
1103 0 : writer.writeByte((encVal >> 8) & 0xff);
1104 0 : return;
1105 : }
1106 : }
1107 :
1108 : // NNNN-NNNN NNNB-BBBB BBBB-B011
1109 0 : if (pcDelta >= ENC3_PC_DELTA_MIN && pcDelta <= ENC3_PC_DELTA_MAX &&
1110 : nativeDelta <= ENC3_NATIVE_DELTA_MAX)
1111 : {
1112 : uint32_t encVal = ENC3_MASK_VAL |
1113 0 : ((pcDelta << ENC3_PC_DELTA_SHIFT) & ENC3_PC_DELTA_MASK) |
1114 0 : (nativeDelta << ENC3_NATIVE_DELTA_SHIFT);
1115 0 : writer.writeByte(encVal & 0xff);
1116 0 : writer.writeByte((encVal >> 8) & 0xff);
1117 0 : writer.writeByte((encVal >> 16) & 0xff);
1118 0 : return;
1119 : }
1120 :
1121 : // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
1122 0 : if (pcDelta >= ENC4_PC_DELTA_MIN && pcDelta <= ENC4_PC_DELTA_MAX &&
1123 : nativeDelta <= ENC4_NATIVE_DELTA_MAX)
1124 : {
1125 : uint32_t encVal = ENC4_MASK_VAL |
1126 0 : ((pcDelta << ENC4_PC_DELTA_SHIFT) & ENC4_PC_DELTA_MASK) |
1127 0 : (nativeDelta << ENC4_NATIVE_DELTA_SHIFT);
1128 0 : writer.writeByte(encVal & 0xff);
1129 0 : writer.writeByte((encVal >> 8) & 0xff);
1130 0 : writer.writeByte((encVal >> 16) & 0xff);
1131 0 : writer.writeByte((encVal >> 24) & 0xff);
1132 0 : return;
1133 : }
1134 :
1135 : // Should never get here.
1136 0 : MOZ_CRASH("pcDelta/nativeDelta values are too large to encode.");
1137 : }
1138 :
1139 : /* static */ void
1140 0 : JitcodeRegionEntry::ReadDelta(CompactBufferReader& reader,
1141 : uint32_t* nativeDelta, int32_t* pcDelta)
1142 : {
1143 : // NB:
1144 : // It's possible to get nativeDeltas with value 0 in two cases:
1145 : //
1146 : // 1. The last region's run. This is because the region table's start
1147 : // must be 4-byte aligned, and we must insert padding bytes to align the
1148 : // payload section before emitting the table.
1149 : //
1150 : // 2. A zero-offset nativeDelta with a negative pcDelta.
1151 : //
1152 : // So if nativeDelta is zero, then pcDelta must be <= 0.
1153 :
1154 : // NNNN-BBB0
1155 0 : const uint32_t firstByte = reader.readByte();
1156 0 : if ((firstByte & ENC1_MASK) == ENC1_MASK_VAL) {
1157 0 : uint32_t encVal = firstByte;
1158 0 : *nativeDelta = encVal >> ENC1_NATIVE_DELTA_SHIFT;
1159 0 : *pcDelta = (encVal & ENC1_PC_DELTA_MASK) >> ENC1_PC_DELTA_SHIFT;
1160 0 : MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
1161 0 : return;
1162 : }
1163 :
1164 : // NNNN-NNNN BBBB-BB01
1165 0 : const uint32_t secondByte = reader.readByte();
1166 0 : if ((firstByte & ENC2_MASK) == ENC2_MASK_VAL) {
1167 0 : uint32_t encVal = firstByte | secondByte << 8;
1168 0 : *nativeDelta = encVal >> ENC2_NATIVE_DELTA_SHIFT;
1169 0 : *pcDelta = (encVal & ENC2_PC_DELTA_MASK) >> ENC2_PC_DELTA_SHIFT;
1170 0 : MOZ_ASSERT(*pcDelta != 0);
1171 0 : MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
1172 0 : return;
1173 : }
1174 :
1175 : // NNNN-NNNN NNNB-BBBB BBBB-B011
1176 0 : const uint32_t thirdByte = reader.readByte();
1177 0 : if ((firstByte & ENC3_MASK) == ENC3_MASK_VAL) {
1178 0 : uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16;
1179 0 : *nativeDelta = encVal >> ENC3_NATIVE_DELTA_SHIFT;
1180 :
1181 0 : uint32_t pcDeltaU = (encVal & ENC3_PC_DELTA_MASK) >> ENC3_PC_DELTA_SHIFT;
1182 : // Fix sign if necessary.
1183 0 : if (pcDeltaU > static_cast<uint32_t>(ENC3_PC_DELTA_MAX))
1184 0 : pcDeltaU |= ~ENC3_PC_DELTA_MAX;
1185 0 : *pcDelta = pcDeltaU;
1186 0 : MOZ_ASSERT(*pcDelta != 0);
1187 0 : MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
1188 0 : return;
1189 : }
1190 :
1191 : // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
1192 0 : MOZ_ASSERT((firstByte & ENC4_MASK) == ENC4_MASK_VAL);
1193 0 : const uint32_t fourthByte = reader.readByte();
1194 0 : uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24;
1195 0 : *nativeDelta = encVal >> ENC4_NATIVE_DELTA_SHIFT;
1196 :
1197 0 : uint32_t pcDeltaU = (encVal & ENC4_PC_DELTA_MASK) >> ENC4_PC_DELTA_SHIFT;
1198 : // fix sign if necessary
1199 0 : if (pcDeltaU > static_cast<uint32_t>(ENC4_PC_DELTA_MAX))
1200 0 : pcDeltaU |= ~ENC4_PC_DELTA_MAX;
1201 0 : *pcDelta = pcDeltaU;
1202 :
1203 0 : MOZ_ASSERT(*pcDelta != 0);
1204 0 : MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
1205 : }
1206 :
1207 : /* static */ uint32_t
1208 0 : JitcodeRegionEntry::ExpectedRunLength(const CodeGeneratorShared::NativeToBytecode* entry,
1209 : const CodeGeneratorShared::NativeToBytecode* end)
1210 : {
1211 0 : MOZ_ASSERT(entry < end);
1212 :
1213 : // We always use the first entry, so runLength starts at 1
1214 0 : uint32_t runLength = 1;
1215 :
1216 0 : uint32_t curNativeOffset = entry->nativeOffset.offset();
1217 0 : uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
1218 :
1219 0 : for (auto nextEntry = entry + 1; nextEntry != end; nextEntry += 1) {
1220 : // If the next run moves to a different inline site, stop the run.
1221 0 : if (nextEntry->tree != entry->tree)
1222 0 : break;
1223 :
1224 0 : uint32_t nextNativeOffset = nextEntry->nativeOffset.offset();
1225 0 : uint32_t nextBytecodeOffset = nextEntry->tree->script()->pcToOffset(nextEntry->pc);
1226 0 : MOZ_ASSERT(nextNativeOffset >= curNativeOffset);
1227 :
1228 0 : uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
1229 0 : int32_t bytecodeDelta = int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
1230 :
1231 : // If deltas are too large (very unlikely), stop the run.
1232 0 : if (!IsDeltaEncodeable(nativeDelta, bytecodeDelta))
1233 0 : break;
1234 :
1235 0 : runLength++;
1236 :
1237 : // If the run has grown to its maximum length, stop the run.
1238 0 : if (runLength == MAX_RUN_LENGTH)
1239 0 : break;
1240 :
1241 0 : curNativeOffset = nextNativeOffset;
1242 0 : curBytecodeOffset = nextBytecodeOffset;
1243 : }
1244 :
1245 0 : return runLength;
1246 : }
1247 :
1248 : struct JitcodeMapBufferWriteSpewer
1249 : {
1250 : #ifdef JS_JITSPEW
1251 : CompactBufferWriter* writer;
1252 : uint32_t startPos;
1253 :
1254 : static const uint32_t DumpMaxBytes = 50;
1255 :
1256 0 : explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter& w)
1257 0 : : writer(&w), startPos(writer->length())
1258 0 : {}
1259 :
1260 0 : void spewAndAdvance(const char* name) {
1261 0 : if (writer->oom())
1262 0 : return;
1263 :
1264 0 : uint32_t curPos = writer->length();
1265 0 : const uint8_t* start = writer->buffer() + startPos;
1266 0 : const uint8_t* end = writer->buffer() + curPos;
1267 0 : const char* MAP = "0123456789ABCDEF";
1268 0 : uint32_t bytes = end - start;
1269 :
1270 : char buffer[DumpMaxBytes * 3];
1271 0 : for (uint32_t i = 0; i < bytes; i++) {
1272 0 : buffer[i*3] = MAP[(start[i] >> 4) & 0xf];
1273 0 : buffer[i*3 + 1] = MAP[(start[i] >> 0) & 0xf];
1274 0 : buffer[i*3 + 2] = ' ';
1275 : }
1276 0 : if (bytes >= DumpMaxBytes)
1277 0 : buffer[DumpMaxBytes*3 - 1] = '\0';
1278 : else
1279 0 : buffer[bytes*3 - 1] = '\0';
1280 :
1281 0 : JitSpew(JitSpew_Profiling, "%s@%d[%d bytes] - %s", name, int(startPos), int(bytes), buffer);
1282 :
1283 : // Move to the end of the current buffer.
1284 0 : startPos = writer->length();
1285 : }
1286 : #else // !JS_JITSPEW
1287 : explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter& w) {}
1288 : void spewAndAdvance(const char* name) {}
1289 : #endif // JS_JITSPEW
1290 : };
1291 :
1292 : // Write a run, starting at the given NativeToBytecode entry, into the given buffer writer.
1293 : /* static */ bool
1294 0 : JitcodeRegionEntry::WriteRun(CompactBufferWriter& writer,
1295 : JSScript** scriptList, uint32_t scriptListSize,
1296 : uint32_t runLength, const CodeGeneratorShared::NativeToBytecode* entry)
1297 : {
1298 0 : MOZ_ASSERT(runLength > 0);
1299 0 : MOZ_ASSERT(runLength <= MAX_RUN_LENGTH);
1300 :
1301 : // Calculate script depth.
1302 0 : MOZ_ASSERT(entry->tree->depth() <= 0xff);
1303 0 : uint8_t scriptDepth = entry->tree->depth();
1304 0 : uint32_t regionNativeOffset = entry->nativeOffset.offset();
1305 :
1306 0 : JitcodeMapBufferWriteSpewer spewer(writer);
1307 :
1308 : // Write the head info.
1309 0 : JitSpew(JitSpew_Profiling, " Head Info: nativeOffset=%d scriptDepth=%d",
1310 0 : int(regionNativeOffset), int(scriptDepth));
1311 0 : WriteHead(writer, regionNativeOffset, scriptDepth);
1312 0 : spewer.spewAndAdvance(" ");
1313 :
1314 : // Write each script/pc pair.
1315 : {
1316 0 : InlineScriptTree* curTree = entry->tree;
1317 0 : jsbytecode* curPc = entry->pc;
1318 0 : for (uint8_t i = 0; i < scriptDepth; i++) {
1319 : // Find the index of the script within the list.
1320 : // NB: scriptList is guaranteed to contain curTree->script()
1321 0 : uint32_t scriptIdx = 0;
1322 0 : for (; scriptIdx < scriptListSize; scriptIdx++) {
1323 0 : if (scriptList[scriptIdx] == curTree->script())
1324 0 : break;
1325 : }
1326 0 : MOZ_ASSERT(scriptIdx < scriptListSize);
1327 :
1328 0 : uint32_t pcOffset = curTree->script()->pcToOffset(curPc);
1329 :
1330 0 : JitSpew(JitSpew_Profiling, " Script/PC %d: scriptIdx=%d pcOffset=%d",
1331 0 : int(i), int(scriptIdx), int(pcOffset));
1332 0 : WriteScriptPc(writer, scriptIdx, pcOffset);
1333 0 : spewer.spewAndAdvance(" ");
1334 :
1335 0 : MOZ_ASSERT_IF(i < scriptDepth - 1, curTree->hasCaller());
1336 0 : curPc = curTree->callerPc();
1337 0 : curTree = curTree->caller();
1338 : }
1339 : }
1340 :
1341 : // Start writing runs.
1342 0 : uint32_t curNativeOffset = entry->nativeOffset.offset();
1343 0 : uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
1344 :
1345 0 : JitSpew(JitSpew_Profiling, " Writing Delta Run from nativeOffset=%d bytecodeOffset=%d",
1346 0 : int(curNativeOffset), int(curBytecodeOffset));
1347 :
1348 : // Skip first entry because it is implicit in the header. Start at subsequent entry.
1349 0 : for (uint32_t i = 1; i < runLength; i++) {
1350 0 : MOZ_ASSERT(entry[i].tree == entry->tree);
1351 :
1352 0 : uint32_t nextNativeOffset = entry[i].nativeOffset.offset();
1353 0 : uint32_t nextBytecodeOffset = entry[i].tree->script()->pcToOffset(entry[i].pc);
1354 0 : MOZ_ASSERT(nextNativeOffset >= curNativeOffset);
1355 :
1356 0 : uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
1357 0 : int32_t bytecodeDelta = int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
1358 0 : MOZ_ASSERT(IsDeltaEncodeable(nativeDelta, bytecodeDelta));
1359 :
1360 0 : JitSpew(JitSpew_Profiling, " RunEntry native: %d-%d [%d] bytecode: %d-%d [%d]",
1361 : int(curNativeOffset), int(nextNativeOffset), int(nativeDelta),
1362 0 : int(curBytecodeOffset), int(nextBytecodeOffset), int(bytecodeDelta));
1363 0 : WriteDelta(writer, nativeDelta, bytecodeDelta);
1364 :
1365 : // Spew the bytecode in these ranges.
1366 0 : if (curBytecodeOffset < nextBytecodeOffset) {
1367 0 : JitSpewStart(JitSpew_Profiling, " OPS: ");
1368 0 : uint32_t curBc = curBytecodeOffset;
1369 0 : while (curBc < nextBytecodeOffset) {
1370 0 : jsbytecode* pc = entry[i].tree->script()->offsetToPC(curBc);
1371 : #ifdef JS_JITSPEW
1372 0 : JSOp op = JSOp(*pc);
1373 0 : JitSpewCont(JitSpew_Profiling, "%s ", CodeName[op]);
1374 : #endif
1375 0 : curBc += GetBytecodeLength(pc);
1376 : }
1377 0 : JitSpewFin(JitSpew_Profiling);
1378 : }
1379 0 : spewer.spewAndAdvance(" ");
1380 :
1381 0 : curNativeOffset = nextNativeOffset;
1382 0 : curBytecodeOffset = nextBytecodeOffset;
1383 : }
1384 :
1385 0 : if (writer.oom())
1386 0 : return false;
1387 :
1388 0 : return true;
1389 : }
1390 :
1391 : void
1392 0 : JitcodeRegionEntry::unpack()
1393 : {
1394 0 : CompactBufferReader reader(data_, end_);
1395 0 : ReadHead(reader, &nativeOffset_, &scriptDepth_);
1396 0 : MOZ_ASSERT(scriptDepth_ > 0);
1397 :
1398 0 : scriptPcStack_ = reader.currentPosition();
1399 : // Skip past script/pc stack
1400 0 : for (unsigned i = 0; i < scriptDepth_; i++) {
1401 : uint32_t scriptIdx, pcOffset;
1402 0 : ReadScriptPc(reader, &scriptIdx, &pcOffset);
1403 : }
1404 :
1405 0 : deltaRun_ = reader.currentPosition();
1406 0 : }
1407 :
1408 : uint32_t
1409 0 : JitcodeRegionEntry::findPcOffset(uint32_t queryNativeOffset, uint32_t startPcOffset) const
1410 : {
1411 0 : DeltaIterator iter = deltaIterator();
1412 0 : uint32_t curNativeOffset = nativeOffset();
1413 0 : uint32_t curPcOffset = startPcOffset;
1414 0 : while (iter.hasMore()) {
1415 : uint32_t nativeDelta;
1416 : int32_t pcDelta;
1417 0 : iter.readNext(&nativeDelta, &pcDelta);
1418 :
1419 : // The start address of the next delta-run entry is counted towards
1420 : // the current delta-run entry, because return addresses should
1421 : // associate with the bytecode op prior (the call) not the op after.
1422 0 : if (queryNativeOffset <= curNativeOffset + nativeDelta)
1423 0 : break;
1424 0 : curNativeOffset += nativeDelta;
1425 0 : curPcOffset += pcDelta;
1426 : }
1427 0 : return curPcOffset;
1428 : }
1429 :
1430 : typedef js::Vector<char*, 32, SystemAllocPolicy> ProfilingStringVector;
1431 :
1432 : struct AutoFreeProfilingStrings {
1433 : ProfilingStringVector& profilingStrings_;
1434 : bool keep_;
1435 0 : explicit AutoFreeProfilingStrings(ProfilingStringVector& vec)
1436 0 : : profilingStrings_(vec),
1437 0 : keep_(false)
1438 0 : {}
1439 :
1440 0 : void keepStrings() { keep_ = true; }
1441 :
1442 0 : ~AutoFreeProfilingStrings() {
1443 0 : if (keep_)
1444 0 : return;
1445 0 : for (size_t i = 0; i < profilingStrings_.length(); i++)
1446 0 : js_free(profilingStrings_[i]);
1447 0 : }
1448 : };
1449 :
1450 : bool
1451 0 : JitcodeIonTable::makeIonEntry(JSContext* cx, JitCode* code,
1452 : uint32_t numScripts, JSScript** scripts,
1453 : JitcodeGlobalEntry::IonEntry& out)
1454 : {
1455 : typedef JitcodeGlobalEntry::IonEntry::SizedScriptList SizedScriptList;
1456 :
1457 0 : MOZ_ASSERT(numScripts > 0);
1458 :
1459 : // Create profiling strings for script, within vector.
1460 : typedef js::Vector<char*, 32, SystemAllocPolicy> ProfilingStringVector;
1461 :
1462 0 : ProfilingStringVector profilingStrings;
1463 0 : if (!profilingStrings.reserve(numScripts))
1464 0 : return false;
1465 :
1466 0 : AutoFreeProfilingStrings autoFreeProfilingStrings(profilingStrings);
1467 0 : for (uint32_t i = 0; i < numScripts; i++) {
1468 0 : char* str = JitcodeGlobalEntry::createScriptString(cx, scripts[i]);
1469 0 : if (!str)
1470 0 : return false;
1471 0 : if (!profilingStrings.append(str))
1472 0 : return false;
1473 : }
1474 :
1475 : // Create SizedScriptList
1476 0 : void* mem = (void*)cx->pod_malloc<uint8_t>(SizedScriptList::AllocSizeFor(numScripts));
1477 0 : if (!mem)
1478 0 : return false;
1479 :
1480 : // Keep allocated profiling strings on destruct.
1481 0 : autoFreeProfilingStrings.keepStrings();
1482 :
1483 : SizedScriptList* scriptList = new (mem) SizedScriptList(numScripts, scripts,
1484 0 : &profilingStrings[0]);
1485 0 : out.init(code, code->raw(), code->rawEnd(), scriptList, this);
1486 0 : return true;
1487 : }
1488 :
1489 : uint32_t
1490 0 : JitcodeIonTable::findRegionEntry(uint32_t nativeOffset) const
1491 : {
1492 : static const uint32_t LINEAR_SEARCH_THRESHOLD = 8;
1493 0 : uint32_t regions = numRegions();
1494 0 : MOZ_ASSERT(regions > 0);
1495 :
1496 : // For small region lists, just search linearly.
1497 0 : if (regions <= LINEAR_SEARCH_THRESHOLD) {
1498 0 : JitcodeRegionEntry previousEntry = regionEntry(0);
1499 0 : for (uint32_t i = 1; i < regions; i++) {
1500 0 : JitcodeRegionEntry nextEntry = regionEntry(i);
1501 0 : MOZ_ASSERT(nextEntry.nativeOffset() >= previousEntry.nativeOffset());
1502 :
1503 : // See note in binary-search code below about why we use '<=' here instead of
1504 : // '<'. Short explanation: regions are closed at their ending addresses,
1505 : // and open at their starting addresses.
1506 0 : if (nativeOffset <= nextEntry.nativeOffset())
1507 0 : return i-1;
1508 :
1509 0 : previousEntry = nextEntry;
1510 : }
1511 : // If nothing found, assume it falls within last region.
1512 0 : return regions - 1;
1513 : }
1514 :
1515 : // For larger ones, binary search the region table.
1516 0 : uint32_t idx = 0;
1517 0 : uint32_t count = regions;
1518 0 : while (count > 1) {
1519 0 : uint32_t step = count/2;
1520 0 : uint32_t mid = idx + step;
1521 0 : JitcodeRegionEntry midEntry = regionEntry(mid);
1522 :
1523 : // A region memory range is closed at its ending address, not starting
1524 : // address. This is because the return address for calls must associate
1525 : // with the call's bytecode PC, not the PC of the bytecode operator after
1526 : // the call.
1527 : //
1528 : // So a query is < an entry if the query nativeOffset is <= the start address
1529 : // of the entry, and a query is >= an entry if the query nativeOffset is > the
1530 : // start address of an entry.
1531 0 : if (nativeOffset <= midEntry.nativeOffset()) {
1532 : // Target entry is below midEntry.
1533 0 : count = step;
1534 : } else { // if (nativeOffset > midEntry.nativeOffset())
1535 : // Target entry is at midEntry or above.
1536 0 : idx = mid;
1537 0 : count -= step;
1538 : }
1539 : }
1540 0 : return idx;
1541 : }
1542 :
1543 : /* static */ bool
1544 0 : JitcodeIonTable::WriteIonTable(CompactBufferWriter& writer,
1545 : JSScript** scriptList, uint32_t scriptListSize,
1546 : const CodeGeneratorShared::NativeToBytecode* start,
1547 : const CodeGeneratorShared::NativeToBytecode* end,
1548 : uint32_t* tableOffsetOut, uint32_t* numRegionsOut)
1549 : {
1550 0 : MOZ_ASSERT(tableOffsetOut != nullptr);
1551 0 : MOZ_ASSERT(numRegionsOut != nullptr);
1552 0 : MOZ_ASSERT(writer.length() == 0);
1553 0 : MOZ_ASSERT(scriptListSize > 0);
1554 :
1555 0 : JitSpew(JitSpew_Profiling, "Writing native to bytecode map for %s:%" PRIuSIZE " (%" PRIuSIZE " entries)",
1556 : scriptList[0]->filename(), scriptList[0]->lineno(),
1557 0 : mozilla::PointerRangeSize(start, end));
1558 :
1559 0 : JitSpew(JitSpew_Profiling, " ScriptList of size %d", int(scriptListSize));
1560 0 : for (uint32_t i = 0; i < scriptListSize; i++) {
1561 0 : JitSpew(JitSpew_Profiling, " Script %d - %s:%" PRIuSIZE,
1562 0 : int(i), scriptList[i]->filename(), scriptList[i]->lineno());
1563 : }
1564 :
1565 : // Write out runs first. Keep a vector tracking the positive offsets from payload
1566 : // start to the run.
1567 0 : const CodeGeneratorShared::NativeToBytecode* curEntry = start;
1568 0 : js::Vector<uint32_t, 32, SystemAllocPolicy> runOffsets;
1569 :
1570 0 : while (curEntry != end) {
1571 : // Calculate the length of the next run.
1572 0 : uint32_t runLength = JitcodeRegionEntry::ExpectedRunLength(curEntry, end);
1573 0 : MOZ_ASSERT(runLength > 0);
1574 0 : MOZ_ASSERT(runLength <= uintptr_t(end - curEntry));
1575 0 : JitSpew(JitSpew_Profiling, " Run at entry %d, length %d, buffer offset %d",
1576 0 : int(curEntry - start), int(runLength), int(writer.length()));
1577 :
1578 : // Store the offset of the run.
1579 0 : if (!runOffsets.append(writer.length()))
1580 0 : return false;
1581 :
1582 : // Encode the run.
1583 0 : if (!JitcodeRegionEntry::WriteRun(writer, scriptList, scriptListSize, runLength, curEntry))
1584 0 : return false;
1585 :
1586 0 : curEntry += runLength;
1587 : }
1588 :
1589 : // Done encoding regions. About to start table. Ensure we are aligned to 4 bytes
1590 : // since table is composed of uint32_t values.
1591 0 : uint32_t padding = sizeof(uint32_t) - (writer.length() % sizeof(uint32_t));
1592 0 : if (padding == sizeof(uint32_t))
1593 0 : padding = 0;
1594 0 : JitSpew(JitSpew_Profiling, " Padding %d bytes after run @%d",
1595 0 : int(padding), int(writer.length()));
1596 0 : for (uint32_t i = 0; i < padding; i++)
1597 0 : writer.writeByte(0);
1598 :
1599 : // Now at start of table.
1600 0 : uint32_t tableOffset = writer.length();
1601 :
1602 : // The table being written at this point will be accessed directly via uint32_t
1603 : // pointers, so all writes below use native endianness.
1604 :
1605 : // Write out numRegions
1606 0 : JitSpew(JitSpew_Profiling, " Writing numRuns=%d", int(runOffsets.length()));
1607 0 : writer.writeNativeEndianUint32_t(runOffsets.length());
1608 :
1609 : // Write out region offset table. The offsets in |runOffsets| are currently forward
1610 : // offsets from the beginning of the buffer. We convert them to backwards offsets
1611 : // from the start of the table before writing them into their table entries.
1612 0 : for (uint32_t i = 0; i < runOffsets.length(); i++) {
1613 0 : JitSpew(JitSpew_Profiling, " Run %d offset=%d backOffset=%d @%d",
1614 0 : int(i), int(runOffsets[i]), int(tableOffset - runOffsets[i]), int(writer.length()));
1615 0 : writer.writeNativeEndianUint32_t(tableOffset - runOffsets[i]);
1616 : }
1617 :
1618 0 : if (writer.oom())
1619 0 : return false;
1620 :
1621 0 : *tableOffsetOut = tableOffset;
1622 0 : *numRegionsOut = runOffsets.length();
1623 0 : return true;
1624 : }
1625 :
1626 :
1627 : } // namespace jit
1628 : } // namespace js
1629 :
1630 0 : JS::ForEachProfiledFrameOp::FrameHandle::FrameHandle(JSRuntime* rt, js::jit::JitcodeGlobalEntry& entry,
1631 0 : void* addr, const char* label, uint32_t depth)
1632 : : rt_(rt),
1633 : entry_(entry),
1634 : addr_(addr),
1635 : canonicalAddr_(nullptr),
1636 : label_(label),
1637 : depth_(depth),
1638 0 : optsIndex_()
1639 : {
1640 0 : updateHasTrackedOptimizations();
1641 :
1642 0 : if (!canonicalAddr_) {
1643 : // If the entry has tracked optimizations, updateHasTrackedOptimizations
1644 : // would have updated the canonical address.
1645 0 : MOZ_ASSERT_IF(entry_.isIon(), !hasTrackedOptimizations());
1646 0 : canonicalAddr_ = entry_.canonicalNativeAddrFor(rt_, addr_);
1647 : }
1648 0 : }
1649 :
1650 : JS_PUBLIC_API(JS::ProfilingFrameIterator::FrameKind)
1651 0 : JS::ForEachProfiledFrameOp::FrameHandle::frameKind() const
1652 : {
1653 0 : if (entry_.isBaseline())
1654 0 : return JS::ProfilingFrameIterator::Frame_Baseline;
1655 0 : return JS::ProfilingFrameIterator::Frame_Ion;
1656 : }
1657 :
1658 : JS_PUBLIC_API(void)
1659 0 : JS::ForEachProfiledFrame(JSContext* cx, void* addr, ForEachProfiledFrameOp& op)
1660 : {
1661 0 : js::jit::JitcodeGlobalTable* table = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
1662 0 : js::jit::JitcodeGlobalEntry& entry = table->lookupInfallible(addr);
1663 :
1664 : // Extract the stack for the entry. Assume maximum inlining depth is <64
1665 : const char* labels[64];
1666 0 : uint32_t depth = entry.callStackAtAddr(cx->runtime(), addr, labels, 64);
1667 0 : MOZ_ASSERT(depth < 64);
1668 0 : for (uint32_t i = depth; i != 0; i--) {
1669 0 : JS::ForEachProfiledFrameOp::FrameHandle handle(cx->runtime(), entry, addr, labels[i - 1], i - 1);
1670 0 : op(handle);
1671 : }
1672 0 : }
|