Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "gc/Allocator.h"
8 :
9 : #include "jscntxt.h"
10 :
11 : #include "gc/GCInternals.h"
12 : #include "gc/GCTrace.h"
13 : #include "gc/Nursery.h"
14 : #include "jit/JitCompartment.h"
15 : #include "threading/CpuCount.h"
16 : #include "vm/Runtime.h"
17 : #include "vm/String.h"
18 :
19 : #include "jsobjinlines.h"
20 :
21 : #include "gc/Heap-inl.h"
22 :
23 : using namespace js;
24 : using namespace gc;
25 :
26 : template <typename T, AllowGC allowGC /* = CanGC */>
27 : JSObject*
28 168668 : js::Allocate(JSContext* cx, AllocKind kind, size_t nDynamicSlots, InitialHeap heap,
29 : const Class* clasp)
30 : {
31 : static_assert(mozilla::IsConvertible<T*, JSObject*>::value, "must be JSObject derived");
32 168668 : MOZ_ASSERT(IsObjectAllocKind(kind));
33 168668 : size_t thingSize = Arena::thingSize(kind);
34 :
35 168667 : MOZ_ASSERT(thingSize == Arena::thingSize(kind));
36 168667 : MOZ_ASSERT(thingSize >= sizeof(JSObject_Slots0));
37 : static_assert(sizeof(JSObject_Slots0) >= MinCellSize,
38 : "All allocations must be at least the allocator-imposed minimum size.");
39 :
40 168667 : MOZ_ASSERT_IF(nDynamicSlots != 0, clasp->isNative() || clasp->isProxy());
41 :
42 : // Off-thread alloc cannot trigger GC or make runtime assertions.
43 168667 : if (cx->helperThread()) {
44 7943 : JSObject* obj = GCRuntime::tryNewTenuredObject<NoGC>(cx, kind, thingSize, nDynamicSlots);
45 7943 : if (MOZ_UNLIKELY(allowGC && !obj))
46 0 : ReportOutOfMemory(cx);
47 7943 : return obj;
48 : }
49 :
50 160726 : JSRuntime* rt = cx->runtime();
51 160726 : if (!rt->gc.checkAllocatorState<allowGC>(cx, kind))
52 0 : return nullptr;
53 :
54 160727 : if (cx->nursery().isEnabled() && heap != TenuredHeap) {
55 34752 : JSObject* obj = rt->gc.tryNewNurseryObject<allowGC>(cx, thingSize, nDynamicSlots, clasp);
56 34752 : if (obj)
57 34752 : return obj;
58 :
59 : // Our most common non-jit allocation path is NoGC; thus, if we fail the
60 : // alloc and cannot GC, we *must* return nullptr here so that the caller
61 : // will do a CanGC allocation to clear the nursery. Failing to do so will
62 : // cause all allocations on this path to land in Tenured, and we will not
63 : // get the benefit of the nursery.
64 : if (!allowGC)
65 0 : return nullptr;
66 : }
67 :
68 125975 : return GCRuntime::tryNewTenuredObject<allowGC>(cx, kind, thingSize, nDynamicSlots);
69 : }
70 : template JSObject* js::Allocate<JSObject, NoGC>(JSContext* cx, gc::AllocKind kind,
71 : size_t nDynamicSlots, gc::InitialHeap heap,
72 : const Class* clasp);
73 : template JSObject* js::Allocate<JSObject, CanGC>(JSContext* cx, gc::AllocKind kind,
74 : size_t nDynamicSlots, gc::InitialHeap heap,
75 : const Class* clasp);
76 :
77 : // Attempt to allocate a new GC thing out of the nursery. If there is not enough
78 : // room in the nursery or there is an OOM, this method will return nullptr.
79 : template <AllowGC allowGC>
80 : JSObject*
81 34752 : GCRuntime::tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicSlots, const Class* clasp)
82 : {
83 34752 : MOZ_ASSERT(cx->isNurseryAllocAllowed());
84 34752 : MOZ_ASSERT(!cx->helperThread());
85 34752 : MOZ_ASSERT(!IsAtomsCompartment(cx->compartment()));
86 34752 : JSObject* obj = cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
87 34752 : if (obj)
88 34752 : return obj;
89 :
90 0 : if (allowGC && !cx->suppressGC) {
91 0 : cx->runtime()->gc.minorGC(JS::gcreason::OUT_OF_NURSERY);
92 :
93 : // Exceeding gcMaxBytes while tenuring can disable the Nursery.
94 0 : if (cx->nursery().isEnabled()) {
95 0 : JSObject* obj = cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
96 0 : MOZ_ASSERT(obj);
97 0 : return obj;
98 : }
99 : }
100 0 : return nullptr;
101 : }
102 :
103 : template <AllowGC allowGC>
104 : JSObject*
105 133918 : GCRuntime::tryNewTenuredObject(JSContext* cx, AllocKind kind, size_t thingSize,
106 : size_t nDynamicSlots)
107 : {
108 133918 : HeapSlot* slots = nullptr;
109 133918 : if (nDynamicSlots) {
110 312 : slots = cx->zone()->pod_malloc<HeapSlot>(nDynamicSlots);
111 312 : if (MOZ_UNLIKELY(!slots)) {
112 : if (allowGC)
113 0 : ReportOutOfMemory(cx);
114 0 : return nullptr;
115 : }
116 312 : Debug_SetSlotRangeToCrashOnTouch(slots, nDynamicSlots);
117 : }
118 :
119 133918 : JSObject* obj = tryNewTenuredThing<JSObject, allowGC>(cx, kind, thingSize);
120 :
121 133918 : if (obj)
122 133918 : obj->setInitialSlotsMaybeNonNative(slots);
123 : else
124 0 : js_free(slots);
125 :
126 133918 : return obj;
127 : }
128 :
129 : template <typename T, AllowGC allowGC /* = CanGC */>
130 : T*
131 316517 : js::Allocate(JSContext* cx)
132 : {
133 : static_assert(!mozilla::IsConvertible<T*, JSObject*>::value, "must not be JSObject derived");
134 : static_assert(sizeof(T) >= MinCellSize,
135 : "All allocations must be at least the allocator-imposed minimum size.");
136 :
137 316517 : AllocKind kind = MapTypeToFinalizeKind<T>::kind;
138 316517 : size_t thingSize = sizeof(T);
139 316517 : MOZ_ASSERT(thingSize == Arena::thingSize(kind));
140 :
141 316516 : if (!cx->helperThread()) {
142 274201 : if (!cx->runtime()->gc.checkAllocatorState<allowGC>(cx, kind))
143 0 : return nullptr;
144 : }
145 :
146 316517 : return GCRuntime::tryNewTenuredThing<T, allowGC>(cx, kind, thingSize);
147 : }
148 :
149 : #define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType) \
150 : template type* js::Allocate<type, NoGC>(JSContext* cx);\
151 : template type* js::Allocate<type, CanGC>(JSContext* cx);
152 : FOR_EACH_NONOBJECT_ALLOCKIND(DECL_ALLOCATOR_INSTANCES)
153 : #undef DECL_ALLOCATOR_INSTANCES
154 :
155 : template <typename T, AllowGC allowGC>
156 : /* static */ T*
157 450434 : GCRuntime::tryNewTenuredThing(JSContext* cx, AllocKind kind, size_t thingSize)
158 : {
159 : // Bump allocate in the arena's current free-list span.
160 450434 : T* t = reinterpret_cast<T*>(cx->arenas()->allocateFromFreeList(kind, thingSize));
161 450437 : if (MOZ_UNLIKELY(!t)) {
162 : // Get the next available free list and allocate out of it. This may
163 : // acquire a new arena, which will lock the chunk list. If there are no
164 : // chunks available it may also allocate new memory directly.
165 6454 : t = reinterpret_cast<T*>(refillFreeListFromAnyThread(cx, kind, thingSize));
166 :
167 6454 : if (MOZ_UNLIKELY(!t && allowGC && !cx->helperThread())) {
168 : // We have no memory available for a new chunk; perform an
169 : // all-compartments, non-incremental, shrinking GC and wait for
170 : // sweeping to finish.
171 0 : JS::PrepareForFullGC(cx);
172 0 : cx->runtime()->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH);
173 0 : cx->runtime()->gc.waitBackgroundSweepOrAllocEnd();
174 :
175 0 : t = tryNewTenuredThing<T, NoGC>(cx, kind, thingSize);
176 0 : if (!t)
177 0 : ReportOutOfMemory(cx);
178 : }
179 : }
180 :
181 450437 : checkIncrementalZoneState(cx, t);
182 450436 : TraceTenuredAlloc(t, kind);
183 450437 : return t;
184 : }
185 :
186 : template <AllowGC allowGC>
187 : bool
188 434924 : GCRuntime::checkAllocatorState(JSContext* cx, AllocKind kind)
189 : {
190 : if (allowGC) {
191 343607 : if (!gcIfNeededAtAllocation(cx))
192 0 : return false;
193 : }
194 :
195 : #if defined(JS_GC_ZEAL) || defined(DEBUG)
196 434928 : MOZ_ASSERT_IF(cx->compartment()->isAtomsCompartment(),
197 : kind == AllocKind::ATOM ||
198 : kind == AllocKind::FAT_INLINE_ATOM ||
199 : kind == AllocKind::SYMBOL ||
200 : kind == AllocKind::JITCODE ||
201 : kind == AllocKind::SCOPE);
202 434928 : MOZ_ASSERT_IF(!cx->compartment()->isAtomsCompartment(),
203 : kind != AllocKind::ATOM &&
204 : kind != AllocKind::FAT_INLINE_ATOM);
205 434928 : MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
206 434928 : MOZ_ASSERT(cx->isAllocAllowed());
207 : #endif
208 :
209 : // Crash if we perform a GC action when it is not safe.
210 434929 : if (allowGC && !cx->suppressGC)
211 315356 : cx->verifyIsSafeToGC();
212 :
213 : // For testing out of memory conditions
214 434929 : if (js::oom::ShouldFailWithOOM()) {
215 : // If we are doing a fallible allocation, percolate up the OOM
216 : // instead of reporting it.
217 : if (allowGC)
218 0 : ReportOutOfMemory(cx);
219 0 : return false;
220 : }
221 :
222 434929 : return true;
223 : }
224 :
225 : bool
226 343607 : GCRuntime::gcIfNeededAtAllocation(JSContext* cx)
227 : {
228 : #ifdef JS_GC_ZEAL
229 343607 : if (needZealousGC())
230 0 : runDebugGC();
231 : #endif
232 :
233 : // Invoking the interrupt callback can fail and we can't usefully
234 : // handle that here. Just check in case we need to collect instead.
235 343610 : if (cx->hasPendingInterrupt())
236 1851 : gcIfRequested();
237 :
238 : // If we have grown past our GC heap threshold while in the middle of
239 : // an incremental GC, we're growing faster than we're GCing, so stop
240 : // the world and do a full, non-incremental GC right now, if possible.
241 348034 : if (isIncrementalGCInProgress() &&
242 4423 : cx->zone()->usage.gcBytes() > cx->zone()->threshold.gcTriggerBytes())
243 : {
244 0 : PrepareZoneForGC(cx->zone());
245 0 : gc(GC_NORMAL, JS::gcreason::INCREMENTAL_TOO_SLOW);
246 : }
247 :
248 343610 : return true;
249 : }
250 :
251 : template <typename T>
252 : /* static */ void
253 450436 : GCRuntime::checkIncrementalZoneState(JSContext* cx, T* t)
254 : {
255 : #ifdef DEBUG
256 450436 : if (cx->helperThread())
257 50260 : return;
258 :
259 400177 : Zone* zone = cx->zone();
260 400177 : MOZ_ASSERT_IF(t && zone->wasGCStarted() && (zone->isGCMarking() || zone->isGCSweeping()),
261 : t->asTenured().arena()->allocatedDuringIncremental);
262 : #endif
263 : }
264 :
265 :
266 : // /////////// Arena -> Thing Allocator //////////////////////////////////////
267 :
268 : void
269 17 : GCRuntime::startBackgroundAllocTaskIfIdle()
270 : {
271 34 : AutoLockHelperThreadState helperLock;
272 17 : if (allocTask.isRunningWithLockHeld(helperLock))
273 0 : return;
274 :
275 : // Join the previous invocation of the task. This will return immediately
276 : // if the thread has never been started.
277 17 : allocTask.joinWithLockHeld(helperLock);
278 17 : allocTask.startWithLockHeld(helperLock);
279 : }
280 :
281 : /* static */ TenuredCell*
282 6454 : GCRuntime::refillFreeListFromAnyThread(JSContext* cx, AllocKind thingKind, size_t thingSize)
283 : {
284 6454 : cx->arenas()->checkEmptyFreeList(thingKind);
285 :
286 6454 : if (!cx->helperThread())
287 5768 : return refillFreeListFromActiveCooperatingThread(cx, thingKind, thingSize);
288 :
289 686 : return refillFreeListFromHelperThread(cx, thingKind);
290 : }
291 :
292 : /* static */ TenuredCell*
293 5768 : GCRuntime::refillFreeListFromActiveCooperatingThread(JSContext* cx, AllocKind thingKind, size_t thingSize)
294 : {
295 : // It should not be possible to allocate on the active thread while we are
296 : // inside a GC.
297 5768 : Zone *zone = cx->zone();
298 5768 : MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy(), "allocating while under GC");
299 :
300 11536 : AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
301 11536 : return cx->arenas()->allocateFromArena(zone, thingKind, CheckThresholds, maybeStartBGAlloc);
302 : }
303 :
304 : /* static */ TenuredCell*
305 686 : GCRuntime::refillFreeListFromHelperThread(JSContext* cx, AllocKind thingKind)
306 : {
307 : // A GC may be happening on the active thread, but zones used by off thread
308 : // tasks are never collected.
309 686 : Zone* zone = cx->zone();
310 686 : MOZ_ASSERT(!zone->wasGCStarted());
311 :
312 1372 : AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
313 1372 : return cx->arenas()->allocateFromArena(zone, thingKind, CheckThresholds, maybeStartBGAlloc);
314 : }
315 :
316 : /* static */ TenuredCell*
317 421 : GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind)
318 : {
319 : /*
320 : * Called by compacting GC to refill a free list while we are in a GC.
321 : */
322 :
323 421 : zone->arenas.checkEmptyFreeList(thingKind);
324 842 : mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromActiveCooperatingThread();
325 421 : MOZ_ASSERT(JS::CurrentThreadIsHeapCollecting());
326 421 : MOZ_ASSERT_IF(!JS::CurrentThreadIsHeapMinorCollecting(), !rt->gc.isBackgroundSweeping());
327 :
328 842 : AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
329 421 : return zone->arenas.allocateFromArena(zone, thingKind, DontCheckThresholds,
330 842 : maybeStartBackgroundAllocation);
331 : }
332 :
333 : TenuredCell*
334 6875 : ArenaLists::allocateFromArena(JS::Zone* zone, AllocKind thingKind,
335 : ShouldCheckThresholds checkThresholds,
336 : AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc)
337 : {
338 6875 : JSRuntime* rt = zone->runtimeFromAnyThread();
339 :
340 13750 : mozilla::Maybe<AutoLockGC> maybeLock;
341 :
342 : // See if we can proceed without taking the GC lock.
343 6875 : if (backgroundFinalizeState(thingKind) != BFS_DONE)
344 0 : maybeLock.emplace(rt);
345 :
346 6875 : ArenaList& al = arenaLists(thingKind);
347 6875 : Arena* arena = al.takeNextArena();
348 6875 : if (arena) {
349 : // Empty arenas should be immediately freed.
350 218 : MOZ_ASSERT(!arena->isEmpty());
351 :
352 218 : return allocateFromArenaInner(zone, arena, thingKind);
353 : }
354 :
355 : // Parallel threads have their own ArenaLists, but chunks are shared;
356 : // if we haven't already, take the GC lock now to avoid racing.
357 6657 : if (maybeLock.isNothing())
358 6657 : maybeLock.emplace(rt);
359 :
360 6657 : Chunk* chunk = rt->gc.pickChunk(maybeLock.ref(), maybeStartBGAlloc);
361 6657 : if (!chunk)
362 0 : return nullptr;
363 :
364 : // Although our chunk should definitely have enough space for another arena,
365 : // there are other valid reasons why Chunk::allocateArena() may fail.
366 6657 : arena = rt->gc.allocateArena(chunk, zone, thingKind, checkThresholds, maybeLock.ref());
367 6657 : if (!arena)
368 0 : return nullptr;
369 :
370 6657 : MOZ_ASSERT(al.isCursorAtEnd());
371 6657 : al.insertBeforeCursor(arena);
372 :
373 6657 : return allocateFromArenaInner(zone, arena, thingKind);
374 : }
375 :
376 : inline TenuredCell*
377 6875 : ArenaLists::allocateFromArenaInner(JS::Zone* zone, Arena* arena, AllocKind kind)
378 : {
379 6875 : size_t thingSize = Arena::thingSize(kind);
380 :
381 6875 : freeLists(kind) = arena->getFirstFreeSpan();
382 :
383 6875 : if (MOZ_UNLIKELY(zone->wasGCStarted()))
384 130 : zone->runtimeFromAnyThread()->gc.arenaAllocatedDuringGC(zone, arena);
385 6875 : TenuredCell* thing = freeLists(kind)->allocate(thingSize);
386 6875 : MOZ_ASSERT(thing); // This allocation is infallible.
387 6875 : return thing;
388 : }
389 :
390 : void
391 130 : GCRuntime::arenaAllocatedDuringGC(JS::Zone* zone, Arena* arena)
392 : {
393 130 : if (zone->needsIncrementalBarrier()) {
394 130 : arena->allocatedDuringIncremental = true;
395 130 : marker.delayMarkingArena(arena);
396 0 : } else if (zone->isGCSweeping()) {
397 0 : arena->setNextAllocDuringSweep(arenasAllocatedDuringSweep);
398 0 : arenasAllocatedDuringSweep = arena;
399 : }
400 130 : }
401 :
402 :
403 : // /////////// Chunk -> Arena Allocator //////////////////////////////////////
404 :
405 : bool
406 74 : GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const
407 : {
408 : // To minimize memory waste, we do not want to run the background chunk
409 : // allocation if we already have some empty chunks or when the runtime has
410 : // a small heap size (and therefore likely has a small growth rate).
411 148 : return allocTask.enabled() &&
412 130 : emptyChunks(lock).count() < tunables.minEmptyChunkCount(lock) &&
413 130 : (fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
414 : }
415 :
416 : Arena*
417 6657 : GCRuntime::allocateArena(Chunk* chunk, Zone* zone, AllocKind thingKind,
418 : ShouldCheckThresholds checkThresholds, const AutoLockGC& lock)
419 : {
420 6657 : MOZ_ASSERT(chunk->hasAvailableArenas());
421 :
422 : // Fail the allocation if we are over our heap size limits.
423 6657 : if (checkThresholds && usage.gcBytes() >= tunables.gcMaxBytes())
424 0 : return nullptr;
425 :
426 6657 : Arena* arena = chunk->allocateArena(rt, zone, thingKind, lock);
427 6657 : zone->usage.addGCArena();
428 :
429 : // Trigger an incremental slice if needed.
430 6657 : if (checkThresholds)
431 6254 : maybeAllocTriggerZoneGC(zone, lock);
432 :
433 6657 : return arena;
434 : }
435 :
436 : Arena*
437 6657 : Chunk::allocateArena(JSRuntime* rt, Zone* zone, AllocKind thingKind, const AutoLockGC& lock)
438 : {
439 6657 : Arena* arena = info.numArenasFreeCommitted > 0
440 6657 : ? fetchNextFreeArena(rt)
441 6657 : : fetchNextDecommittedArena();
442 6657 : arena->init(zone, thingKind);
443 6657 : updateChunkListAfterAlloc(rt, lock);
444 6657 : return arena;
445 : }
446 :
447 : inline void
448 0 : GCRuntime::updateOnFreeArenaAlloc(const ChunkInfo& info)
449 : {
450 0 : MOZ_ASSERT(info.numArenasFreeCommitted <= numArenasFreeCommitted);
451 0 : --numArenasFreeCommitted;
452 0 : }
453 :
454 : Arena*
455 0 : Chunk::fetchNextFreeArena(JSRuntime* rt)
456 : {
457 0 : MOZ_ASSERT(info.numArenasFreeCommitted > 0);
458 0 : MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
459 :
460 0 : Arena* arena = info.freeArenasHead;
461 0 : info.freeArenasHead = arena->next;
462 0 : --info.numArenasFreeCommitted;
463 0 : --info.numArenasFree;
464 0 : rt->gc.updateOnFreeArenaAlloc(info);
465 :
466 0 : return arena;
467 : }
468 :
469 : Arena*
470 6657 : Chunk::fetchNextDecommittedArena()
471 : {
472 6657 : MOZ_ASSERT(info.numArenasFreeCommitted == 0);
473 6657 : MOZ_ASSERT(info.numArenasFree > 0);
474 :
475 6657 : unsigned offset = findDecommittedArenaOffset();
476 6657 : info.lastDecommittedArenaOffset = offset + 1;
477 6657 : --info.numArenasFree;
478 6657 : decommittedArenas.unset(offset);
479 :
480 6657 : Arena* arena = &arenas[offset];
481 6657 : MarkPagesInUse(arena, ArenaSize);
482 6657 : arena->setAsNotAllocated();
483 :
484 6657 : return arena;
485 : }
486 :
487 : /*
488 : * Search for and return the next decommitted Arena. Our goal is to keep
489 : * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
490 : * it to the most recently freed arena when we free, and forcing it to
491 : * the last alloc + 1 when we allocate.
492 : */
493 : uint32_t
494 6657 : Chunk::findDecommittedArenaOffset()
495 : {
496 : /* Note: lastFreeArenaOffset can be past the end of the list. */
497 6657 : for (unsigned i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++) {
498 6657 : if (decommittedArenas.get(i))
499 6657 : return i;
500 : }
501 0 : for (unsigned i = 0; i < info.lastDecommittedArenaOffset; i++) {
502 0 : if (decommittedArenas.get(i))
503 0 : return i;
504 : }
505 0 : MOZ_CRASH("No decommitted arenas found.");
506 : }
507 :
508 :
509 : // /////////// System -> Chunk Allocator /////////////////////////////////////
510 :
511 : Chunk*
512 40 : GCRuntime::getOrAllocChunk(const AutoLockGC& lock,
513 : AutoMaybeStartBackgroundAllocation& maybeStartBackgroundAllocation)
514 : {
515 40 : Chunk* chunk = emptyChunks(lock).pop();
516 40 : if (!chunk) {
517 20 : chunk = Chunk::allocate(rt);
518 20 : if (!chunk)
519 0 : return nullptr;
520 20 : MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
521 : }
522 :
523 40 : if (wantBackgroundAllocation(lock))
524 18 : maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt->gc);
525 :
526 40 : return chunk;
527 : }
528 :
529 : void
530 4 : GCRuntime::recycleChunk(Chunk* chunk, const AutoLockGC& lock)
531 : {
532 4 : emptyChunks(lock).push(chunk);
533 4 : }
534 :
535 : Chunk*
536 6657 : GCRuntime::pickChunk(const AutoLockGC& lock,
537 : AutoMaybeStartBackgroundAllocation& maybeStartBackgroundAllocation)
538 : {
539 6657 : if (availableChunks(lock).count())
540 6628 : return availableChunks(lock).head();
541 :
542 29 : Chunk* chunk = getOrAllocChunk(lock, maybeStartBackgroundAllocation);
543 29 : if (!chunk)
544 0 : return nullptr;
545 :
546 29 : chunk->init(rt);
547 29 : MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
548 29 : MOZ_ASSERT(chunk->unused());
549 29 : MOZ_ASSERT(!fullChunks(lock).contains(chunk));
550 29 : MOZ_ASSERT(!availableChunks(lock).contains(chunk));
551 :
552 29 : chunkAllocationSinceLastGC = true;
553 :
554 29 : availableChunks(lock).push(chunk);
555 :
556 29 : return chunk;
557 : }
558 :
559 4 : BackgroundAllocTask::BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool)
560 : : GCParallelTask(rt),
561 : chunkPool_(pool),
562 4 : enabled_(CanUseExtraThreads() && GetCPUCount() >= 2)
563 : {
564 4 : }
565 :
566 : /* virtual */ void
567 17 : BackgroundAllocTask::run()
568 : {
569 17 : TraceLoggerThread* logger = TraceLoggerForCurrentThread();
570 34 : AutoTraceLog logAllocation(logger, TraceLogger_GCAllocation);
571 :
572 34 : AutoLockGC lock(runtime());
573 51 : while (!cancel_ && runtime()->gc.wantBackgroundAllocation(lock)) {
574 : Chunk* chunk;
575 : {
576 34 : AutoUnlockGC unlock(lock);
577 17 : chunk = Chunk::allocate(runtime());
578 17 : if (!chunk)
579 0 : break;
580 17 : chunk->init(runtime());
581 : }
582 17 : chunkPool_.ref().push(chunk);
583 : }
584 17 : }
585 :
586 : /* static */ Chunk*
587 37 : Chunk::allocate(JSRuntime* rt)
588 : {
589 37 : Chunk* chunk = static_cast<Chunk*>(MapAlignedPages(ChunkSize, ChunkSize));
590 37 : if (!chunk)
591 0 : return nullptr;
592 37 : rt->gc.stats().count(gcstats::STAT_NEW_CHUNK);
593 37 : return chunk;
594 : }
595 :
596 : void
597 50 : Chunk::init(JSRuntime* rt)
598 : {
599 50 : JS_POISON(this, JS_FRESH_TENURED_PATTERN, ChunkSize);
600 :
601 : /*
602 : * We clear the bitmap to guard against JS::GCThingIsMarkedGray being called
603 : * on uninitialized data, which would happen before the first GC cycle.
604 : */
605 50 : bitmap.clear();
606 :
607 : /*
608 : * Decommit the arenas. We do this after poisoning so that if the OS does
609 : * not have to recycle the pages, we still get the benefit of poisoning.
610 : */
611 50 : decommitAllArenas(rt);
612 :
613 : /* Initialize the chunk info. */
614 50 : info.init();
615 50 : new (&trailer) ChunkTrailer(rt);
616 :
617 : /* The rest of info fields are initialized in pickChunk. */
618 50 : }
619 :
620 50 : void Chunk::decommitAllArenas(JSRuntime* rt)
621 : {
622 50 : decommittedArenas.clear(true);
623 50 : MarkPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
624 :
625 50 : info.freeArenasHead = nullptr;
626 50 : info.lastDecommittedArenaOffset = 0;
627 50 : info.numArenasFree = ArenasPerChunk;
628 50 : info.numArenasFreeCommitted = 0;
629 50 : }
|