Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : *
4 : * Copyright (C) 2008 Apple Inc. All rights reserved.
5 : *
6 : * Redistribution and use in source and binary forms, with or without
7 : * modification, are permitted provided that the following conditions
8 : * are met:
9 : * 1. Redistributions of source code must retain the above copyright
10 : * notice, this list of conditions and the following disclaimer.
11 : * 2. Redistributions in binary form must reproduce the above copyright
12 : * notice, this list of conditions and the following disclaimer in the
13 : * documentation and/or other materials provided with the distribution.
14 : *
15 : * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 : * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 : * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 : * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 : * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 : * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 : * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 : * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 : * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 : * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 : */
27 :
28 : #include "jit/ExecutableAllocator.h"
29 :
30 : #include "jit/JitCompartment.h"
31 : #include "js/MemoryMetrics.h"
32 :
33 : using namespace js::jit;
34 :
35 0 : ExecutablePool::~ExecutablePool()
36 : {
37 0 : MOZ_ASSERT(m_ionCodeBytes == 0);
38 0 : MOZ_ASSERT(m_baselineCodeBytes == 0);
39 0 : MOZ_ASSERT(m_regexpCodeBytes == 0);
40 0 : MOZ_ASSERT(m_otherCodeBytes == 0);
41 :
42 0 : MOZ_ASSERT(!isMarked());
43 :
44 0 : m_allocator->releasePoolPages(this);
45 0 : }
46 :
47 : void
48 67 : ExecutablePool::release(bool willDestroy)
49 : {
50 67 : MOZ_ASSERT(m_refCount != 0);
51 67 : MOZ_ASSERT_IF(willDestroy, m_refCount == 1);
52 67 : if (--m_refCount == 0)
53 0 : js_delete(this);
54 67 : }
55 :
56 : void
57 0 : ExecutablePool::release(size_t n, CodeKind kind)
58 : {
59 0 : switch (kind) {
60 : case ION_CODE:
61 0 : m_ionCodeBytes -= n;
62 0 : MOZ_ASSERT(m_ionCodeBytes < m_allocation.size); // Shouldn't underflow.
63 0 : break;
64 : case BASELINE_CODE:
65 0 : m_baselineCodeBytes -= n;
66 0 : MOZ_ASSERT(m_baselineCodeBytes < m_allocation.size);
67 0 : break;
68 : case REGEXP_CODE:
69 0 : m_regexpCodeBytes -= n;
70 0 : MOZ_ASSERT(m_regexpCodeBytes < m_allocation.size);
71 0 : break;
72 : case OTHER_CODE:
73 0 : m_otherCodeBytes -= n;
74 0 : MOZ_ASSERT(m_otherCodeBytes < m_allocation.size);
75 0 : break;
76 : default:
77 0 : MOZ_CRASH("bad code kind");
78 : }
79 :
80 0 : release();
81 0 : }
82 :
83 : void
84 4498 : ExecutablePool::addRef()
85 : {
86 : // It should be impossible for us to roll over, because only small
87 : // pools have multiple holders, and they have one holder per chunk
88 : // of generated code, and they only hold 16KB or so of code.
89 4498 : MOZ_ASSERT(m_refCount);
90 4498 : ++m_refCount;
91 4498 : MOZ_ASSERT(m_refCount, "refcount overflow");
92 4498 : }
93 :
94 : void*
95 4499 : ExecutablePool::alloc(size_t n, CodeKind kind)
96 : {
97 4499 : MOZ_ASSERT(n <= available());
98 4499 : void* result = m_freePtr;
99 4499 : m_freePtr += n;
100 :
101 4499 : switch (kind) {
102 26 : case ION_CODE: m_ionCodeBytes += n; break;
103 3329 : case BASELINE_CODE: m_baselineCodeBytes += n; break;
104 40 : case REGEXP_CODE: m_regexpCodeBytes += n; break;
105 1104 : case OTHER_CODE: m_otherCodeBytes += n; break;
106 0 : default: MOZ_CRASH("bad code kind");
107 : }
108 :
109 4499 : return result;
110 : }
111 :
112 : size_t
113 21068 : ExecutablePool::available() const
114 : {
115 21068 : MOZ_ASSERT(m_end >= m_freePtr);
116 21068 : return m_end - m_freePtr;
117 : }
118 :
119 8 : ExecutableAllocator::ExecutableAllocator(JSRuntime* rt)
120 8 : : rt_(rt)
121 : {
122 8 : MOZ_ASSERT(m_smallPools.empty());
123 8 : }
124 :
125 0 : ExecutableAllocator::~ExecutableAllocator()
126 : {
127 0 : for (size_t i = 0; i < m_smallPools.length(); i++)
128 0 : m_smallPools[i]->release(/* willDestroy = */true);
129 :
130 : // If this asserts we have a pool leak.
131 0 : MOZ_ASSERT_IF(m_pools.initialized(), m_pools.empty());
132 0 : }
133 :
134 : ExecutablePool*
135 4499 : ExecutableAllocator::poolForSize(size_t n)
136 : {
137 : // Try to fit in an existing small allocator. Use the pool with the
138 : // least available space that is big enough (best-fit). This is the
139 : // best strategy because (a) it maximizes the chance of the next
140 : // allocation fitting in a small pool, and (b) it minimizes the
141 : // potential waste when a small pool is next abandoned.
142 4499 : ExecutablePool* minPool = nullptr;
143 18244 : for (size_t i = 0; i < m_smallPools.length(); i++) {
144 13745 : ExecutablePool* pool = m_smallPools[i];
145 13745 : if (n <= pool->available() && (!minPool || pool->available() < minPool->available()))
146 4928 : minPool = pool;
147 : }
148 4499 : if (minPool) {
149 4418 : minPool->addRef();
150 4418 : return minPool;
151 : }
152 :
153 : // If the request is large, we just provide a unshared allocator
154 81 : if (n > ExecutableCodePageSize)
155 1 : return createPool(n);
156 :
157 : // Create a new allocator
158 80 : ExecutablePool* pool = createPool(ExecutableCodePageSize);
159 80 : if (!pool)
160 0 : return nullptr;
161 : // At this point, local |pool| is the owner.
162 :
163 80 : if (m_smallPools.length() < maxSmallPools) {
164 : // We haven't hit the maximum number of live pools; add the new pool.
165 : // If append() OOMs, we just return an unshared allocator.
166 13 : if (m_smallPools.append(pool))
167 13 : pool->addRef();
168 : } else {
169 : // Find the pool with the least space.
170 67 : int iMin = 0;
171 268 : for (size_t i = 1; i < m_smallPools.length(); i++) {
172 201 : if (m_smallPools[i]->available() < m_smallPools[iMin]->available())
173 53 : iMin = i;
174 : }
175 :
176 : // If the new allocator will result in more free space than the small
177 : // pool with the least space, then we will use it instead
178 67 : ExecutablePool* minPool = m_smallPools[iMin];
179 67 : if ((pool->available() - n) > minPool->available()) {
180 67 : minPool->release();
181 67 : m_smallPools[iMin] = pool;
182 67 : pool->addRef();
183 : }
184 : }
185 :
186 : // Pass ownership to the caller.
187 80 : return pool;
188 : }
189 :
190 : /* static */ size_t
191 4580 : ExecutableAllocator::roundUpAllocationSize(size_t request, size_t granularity)
192 : {
193 4580 : if ((std::numeric_limits<size_t>::max() - granularity) <= request)
194 0 : return OVERSIZE_ALLOCATION;
195 :
196 : // Round up to next page boundary
197 4580 : size_t size = request + (granularity - 1);
198 4580 : size = size & ~(granularity - 1);
199 4580 : MOZ_ASSERT(size >= request);
200 4580 : return size;
201 : }
202 :
203 : ExecutablePool*
204 81 : ExecutableAllocator::createPool(size_t n)
205 : {
206 81 : MOZ_ASSERT(rt_->jitRuntime()->preventBackedgePatching());
207 :
208 81 : size_t allocSize = roundUpAllocationSize(n, ExecutableCodePageSize);
209 81 : if (allocSize == OVERSIZE_ALLOCATION)
210 0 : return nullptr;
211 :
212 81 : if (!m_pools.initialized() && !m_pools.init())
213 0 : return nullptr;
214 :
215 81 : ExecutablePool::Allocation a = systemAlloc(allocSize);
216 81 : if (!a.pages)
217 0 : return nullptr;
218 :
219 81 : ExecutablePool* pool = js_new<ExecutablePool>(this, a);
220 81 : if (!pool) {
221 0 : systemRelease(a);
222 0 : return nullptr;
223 : }
224 :
225 81 : if (!m_pools.put(pool)) {
226 : // Note: this will call |systemRelease(a)|.
227 0 : js_delete(pool);
228 0 : return nullptr;
229 : }
230 :
231 81 : return pool;
232 : }
233 :
234 : void*
235 4499 : ExecutableAllocator::alloc(JSContext* cx, size_t n, ExecutablePool** poolp, CodeKind type)
236 : {
237 : // Don't race with reprotectAll called from the signal handler.
238 8998 : JitRuntime::AutoPreventBackedgePatching apbp(rt_);
239 :
240 : // Caller must ensure 'n' is word-size aligned. If all allocations are
241 : // of word sized quantities, then all subsequent allocations will be
242 : // aligned.
243 4499 : MOZ_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n);
244 :
245 4499 : if (n == OVERSIZE_ALLOCATION) {
246 0 : *poolp = nullptr;
247 0 : return nullptr;
248 : }
249 :
250 4499 : *poolp = poolForSize(n);
251 4499 : if (!*poolp)
252 0 : return nullptr;
253 :
254 : // This alloc is infallible because poolForSize() just obtained
255 : // (found, or created if necessary) a pool that had enough space.
256 4499 : void* result = (*poolp)->alloc(n, type);
257 4499 : MOZ_ASSERT(result);
258 :
259 4499 : cx->zone()->updateJitCodeMallocBytes(n);
260 :
261 4499 : return result;
262 : }
263 :
264 : void
265 0 : ExecutableAllocator::releasePoolPages(ExecutablePool* pool)
266 : {
267 : // Don't race with reprotectAll called from the signal handler.
268 0 : JitRuntime::AutoPreventBackedgePatching apbp(rt_);
269 :
270 0 : MOZ_ASSERT(pool->m_allocation.pages);
271 0 : systemRelease(pool->m_allocation);
272 :
273 0 : MOZ_ASSERT(m_pools.initialized());
274 :
275 : // Pool may not be present in m_pools if we hit OOM during creation.
276 0 : if (auto ptr = m_pools.lookup(pool))
277 0 : m_pools.remove(ptr);
278 0 : }
279 :
280 : void
281 0 : ExecutableAllocator::purge()
282 : {
283 : // Don't race with reprotectAll called from the signal handler.
284 0 : JitRuntime::AutoPreventBackedgePatching apbp(rt_);
285 :
286 0 : for (size_t i = 0; i < m_smallPools.length(); i++)
287 0 : m_smallPools[i]->release();
288 0 : m_smallPools.clear();
289 0 : }
290 :
291 : void
292 0 : ExecutableAllocator::addSizeOfCode(JS::CodeSizes* sizes) const
293 : {
294 0 : if (m_pools.initialized()) {
295 0 : for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront()) {
296 0 : ExecutablePool* pool = r.front();
297 0 : sizes->ion += pool->m_ionCodeBytes;
298 0 : sizes->baseline += pool->m_baselineCodeBytes;
299 0 : sizes->regexp += pool->m_regexpCodeBytes;
300 0 : sizes->other += pool->m_otherCodeBytes;
301 0 : sizes->unused += pool->m_allocation.size - pool->m_ionCodeBytes
302 0 : - pool->m_baselineCodeBytes
303 0 : - pool->m_regexpCodeBytes
304 0 : - pool->m_otherCodeBytes;
305 : }
306 : }
307 0 : }
308 :
309 : void
310 0 : ExecutableAllocator::reprotectAll(ProtectionSetting protection)
311 : {
312 0 : if (!m_pools.initialized())
313 0 : return;
314 :
315 0 : for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront())
316 0 : reprotectPool(rt_, r.front(), protection);
317 : }
318 :
319 : /* static */ void
320 0 : ExecutableAllocator::reprotectPool(JSRuntime* rt, ExecutablePool* pool, ProtectionSetting protection)
321 : {
322 : // Don't race with reprotectAll called from the signal handler.
323 0 : MOZ_ASSERT(rt->jitRuntime()->preventBackedgePatching() ||
324 : rt->activeContext()->handlingJitInterrupt());
325 :
326 0 : char* start = pool->m_allocation.pages;
327 0 : if (!ReprotectRegion(start, pool->m_freePtr - start, protection))
328 0 : MOZ_CRASH();
329 0 : }
330 :
331 : /* static */ void
332 0 : ExecutableAllocator::poisonCode(JSRuntime* rt, JitPoisonRangeVector& ranges)
333 : {
334 0 : MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
335 :
336 : // Don't race with reprotectAll called from the signal handler.
337 0 : JitRuntime::AutoPreventBackedgePatching apbp(rt);
338 :
339 : #ifdef DEBUG
340 : // Make sure no pools have the mark bit set.
341 0 : for (size_t i = 0; i < ranges.length(); i++)
342 0 : MOZ_ASSERT(!ranges[i].pool->isMarked());
343 : #endif
344 :
345 0 : for (size_t i = 0; i < ranges.length(); i++) {
346 0 : ExecutablePool* pool = ranges[i].pool;
347 0 : if (pool->m_refCount == 1) {
348 : // This is the last reference so the release() call below will
349 : // unmap the memory. Don't bother poisoning it.
350 0 : continue;
351 : }
352 :
353 0 : MOZ_ASSERT(pool->m_refCount > 1);
354 :
355 : // Use the pool's mark bit to indicate we made the pool writable.
356 : // This avoids reprotecting a pool multiple times.
357 0 : if (!pool->isMarked()) {
358 0 : reprotectPool(rt, pool, ProtectionSetting::Writable);
359 0 : pool->mark();
360 : }
361 :
362 0 : memset(ranges[i].start, JS_SWEPT_CODE_PATTERN, ranges[i].size);
363 : }
364 :
365 : // Make the pools executable again and drop references.
366 0 : for (size_t i = 0; i < ranges.length(); i++) {
367 0 : ExecutablePool* pool = ranges[i].pool;
368 0 : if (pool->isMarked()) {
369 0 : reprotectPool(rt, pool, ProtectionSetting::Executable);
370 0 : pool->unmark();
371 : }
372 0 : pool->release();
373 : }
374 0 : }
375 :
376 : ExecutablePool::Allocation
377 81 : ExecutableAllocator::systemAlloc(size_t n)
378 : {
379 81 : void* allocation = AllocateExecutableMemory(n, ProtectionSetting::Executable);
380 81 : ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(allocation), n };
381 81 : return alloc;
382 : }
383 :
384 : void
385 0 : ExecutableAllocator::systemRelease(const ExecutablePool::Allocation& alloc)
386 : {
387 0 : DeallocateExecutableMemory(alloc.pages, alloc.size);
388 0 : }
|