Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/ProcessExecutableMemory.h"
8 :
9 : #include "mozilla/Array.h"
10 : #include "mozilla/Atomics.h"
11 : #include "mozilla/DebugOnly.h"
12 : #include "mozilla/Maybe.h"
13 : #include "mozilla/TaggedAnonymousMemory.h"
14 : #include "mozilla/XorShift128PlusRNG.h"
15 :
16 : #include "jsfriendapi.h"
17 : #include "jsmath.h"
18 : #include "jsutil.h"
19 : #include "jswin.h"
20 :
21 : #include <errno.h>
22 :
23 : #include "gc/Memory.h"
24 : #include "threading/LockGuard.h"
25 : #include "threading/Mutex.h"
26 : #include "vm/MutexIDs.h"
27 :
28 : #ifdef XP_WIN
29 : # include "mozilla/StackWalk_windows.h"
30 : # include "mozilla/WindowsVersion.h"
31 : #else
32 : # include <sys/mman.h>
33 : # include <unistd.h>
34 : #endif
35 :
36 : #ifdef MOZ_VALGRIND
37 : # include <valgrind/valgrind.h>
38 : #endif
39 :
40 : using namespace js;
41 : using namespace js::jit;
42 :
43 : #ifdef XP_WIN
44 : static void*
45 : ComputeRandomAllocationAddress()
46 : {
47 : /*
48 : * Inspiration is V8's OS::Allocate in platform-win32.cc.
49 : *
50 : * VirtualAlloc takes 64K chunks out of the virtual address space, so we
51 : * keep 16b alignment.
52 : *
53 : * x86: V8 comments say that keeping addresses in the [64MiB, 1GiB) range
54 : * tries to avoid system default DLL mapping space. In the end, we get 13
55 : * bits of randomness in our selection.
56 : * x64: [2GiB, 4TiB), with 25 bits of randomness.
57 : */
58 : # ifdef HAVE_64BIT_BUILD
59 : static const uintptr_t base = 0x0000000080000000;
60 : static const uintptr_t mask = 0x000003ffffff0000;
61 : # elif defined(_M_IX86) || defined(__i386__)
62 : static const uintptr_t base = 0x04000000;
63 : static const uintptr_t mask = 0x3fff0000;
64 : # else
65 : # error "Unsupported architecture"
66 : # endif
67 :
68 : uint64_t rand = js::GenerateRandomSeed();
69 : return (void*) (base | (rand & mask));
70 : }
71 :
72 : # ifdef HAVE_64BIT_BUILD
73 : static js::JitExceptionHandler sJitExceptionHandler;
74 :
75 : JS_FRIEND_API(void)
76 : js::SetJitExceptionHandler(JitExceptionHandler handler)
77 : {
78 : MOZ_ASSERT(!sJitExceptionHandler);
79 : sJitExceptionHandler = handler;
80 : }
81 :
82 : // From documentation for UNWIND_INFO on
83 : // http://msdn.microsoft.com/en-us/library/ddssxxy8.aspx
84 : struct UnwindInfo
85 : {
86 : uint8_t version : 3;
87 : uint8_t flags : 5;
88 : uint8_t sizeOfPrologue;
89 : uint8_t countOfUnwindCodes;
90 : uint8_t frameRegister : 4;
91 : uint8_t frameOffset : 4;
92 : ULONG exceptionHandler;
93 : };
94 :
95 : static const unsigned ThunkLength = 12;
96 :
97 : struct ExceptionHandlerRecord
98 : {
99 : RUNTIME_FUNCTION runtimeFunction;
100 : UnwindInfo unwindInfo;
101 : uint8_t thunk[ThunkLength];
102 : };
103 :
104 : // This function must match the function pointer type PEXCEPTION_HANDLER
105 : // mentioned in:
106 : // http://msdn.microsoft.com/en-us/library/ssa62fwe.aspx.
107 : // This type is rather elusive in documentation; Wine is the best I've found:
108 : // http://source.winehq.org/source/include/winnt.h
109 : static DWORD
110 : ExceptionHandler(PEXCEPTION_RECORD exceptionRecord, _EXCEPTION_REGISTRATION_RECORD*,
111 : PCONTEXT context, _EXCEPTION_REGISTRATION_RECORD**)
112 : {
113 : return sJitExceptionHandler(exceptionRecord, context);
114 : }
115 :
116 : // For an explanation of the problem being solved here, see
117 : // SetJitExceptionFilter in jsfriendapi.h.
118 : static bool
119 : RegisterExecutableMemory(void* p, size_t bytes, size_t pageSize)
120 : {
121 : if (!VirtualAlloc(p, pageSize, MEM_COMMIT, PAGE_READWRITE))
122 : MOZ_CRASH();
123 :
124 : ExceptionHandlerRecord* r = reinterpret_cast<ExceptionHandlerRecord*>(p);
125 :
126 : // All these fields are specified to be offsets from the base of the
127 : // executable code (which is 'p'), even if they have 'Address' in their
128 : // names. In particular, exceptionHandler is a ULONG offset which is a
129 : // 32-bit integer. Since 'p' can be farther than INT32_MAX away from
130 : // sJitExceptionHandler, we must generate a little thunk inside the
131 : // record. The record is put on its own page so that we can take away write
132 : // access to protect against accidental clobbering.
133 :
134 : r->runtimeFunction.BeginAddress = pageSize;
135 : r->runtimeFunction.EndAddress = (DWORD)bytes;
136 : r->runtimeFunction.UnwindData = offsetof(ExceptionHandlerRecord, unwindInfo);
137 :
138 : r->unwindInfo.version = 1;
139 : r->unwindInfo.flags = UNW_FLAG_EHANDLER;
140 : r->unwindInfo.sizeOfPrologue = 0;
141 : r->unwindInfo.countOfUnwindCodes = 0;
142 : r->unwindInfo.frameRegister = 0;
143 : r->unwindInfo.frameOffset = 0;
144 : r->unwindInfo.exceptionHandler = offsetof(ExceptionHandlerRecord, thunk);
145 :
146 : // mov imm64, rax
147 : r->thunk[0] = 0x48;
148 : r->thunk[1] = 0xb8;
149 : void* handler = JS_FUNC_TO_DATA_PTR(void*, ExceptionHandler);
150 : memcpy(&r->thunk[2], &handler, 8);
151 :
152 : // jmp rax
153 : r->thunk[10] = 0xff;
154 : r->thunk[11] = 0xe0;
155 :
156 : DWORD oldProtect;
157 : if (!VirtualProtect(p, pageSize, PAGE_EXECUTE_READ, &oldProtect))
158 : MOZ_CRASH();
159 :
160 : // XXX NB: The profiler believes this function is only called from the main
161 : // thread. If that ever becomes untrue, the profiler must be updated
162 : // immediately.
163 : AutoSuppressStackWalking suppress;
164 : return RtlAddFunctionTable(&r->runtimeFunction, 1, reinterpret_cast<DWORD64>(p));
165 : }
166 :
167 : static void
168 : UnregisterExecutableMemory(void* p, size_t bytes, size_t pageSize)
169 : {
170 : ExceptionHandlerRecord* r = reinterpret_cast<ExceptionHandlerRecord*>(p);
171 :
172 : // XXX NB: The profiler believes this function is only called from the main
173 : // thread. If that ever becomes untrue, the profiler must be updated
174 : // immediately.
175 : AutoSuppressStackWalking suppress;
176 : RtlDeleteFunctionTable(&r->runtimeFunction);
177 : }
178 : # endif
179 :
180 : static void*
181 : ReserveProcessExecutableMemory(size_t bytes)
182 : {
183 : # ifdef HAVE_64BIT_BUILD
184 : size_t pageSize = gc::SystemPageSize();
185 : if (sJitExceptionHandler)
186 : bytes += pageSize;
187 : # endif
188 :
189 : void* p = nullptr;
190 : for (size_t i = 0; i < 10; i++) {
191 : void* randomAddr = ComputeRandomAllocationAddress();
192 : p = VirtualAlloc(randomAddr, bytes, MEM_RESERVE, PAGE_NOACCESS);
193 : if (p)
194 : break;
195 : }
196 :
197 : if (!p) {
198 : // Try again without randomization.
199 : p = VirtualAlloc(nullptr, bytes, MEM_RESERVE, PAGE_NOACCESS);
200 : if (!p)
201 : return nullptr;
202 : }
203 :
204 : # ifdef HAVE_64BIT_BUILD
205 : if (sJitExceptionHandler) {
206 : if (!RegisterExecutableMemory(p, bytes, pageSize)) {
207 : VirtualFree(p, 0, MEM_RELEASE);
208 : return nullptr;
209 : }
210 :
211 : p = (uint8_t*)p + pageSize;
212 : bytes -= pageSize;
213 : }
214 :
215 : RegisterJitCodeRegion((uint8_t*)p, bytes);
216 : # endif
217 :
218 : return p;
219 : }
220 :
221 : static void
222 : DeallocateProcessExecutableMemory(void* addr, size_t bytes)
223 : {
224 : # ifdef HAVE_64BIT_BUILD
225 : UnregisterJitCodeRegion((uint8_t*)addr, bytes);
226 :
227 : if (sJitExceptionHandler) {
228 : size_t pageSize = gc::SystemPageSize();
229 : addr = (uint8_t*)addr - pageSize;
230 : UnregisterExecutableMemory(addr, bytes, pageSize);
231 : }
232 : # endif
233 :
234 : VirtualFree(addr, 0, MEM_RELEASE);
235 : }
236 :
237 : static DWORD
238 : ProtectionSettingToFlags(ProtectionSetting protection)
239 : {
240 : switch (protection) {
241 : case ProtectionSetting::Protected: return PAGE_NOACCESS;
242 : case ProtectionSetting::Writable: return PAGE_READWRITE;
243 : case ProtectionSetting::Executable: return PAGE_EXECUTE_READ;
244 : }
245 : MOZ_CRASH();
246 : }
247 :
248 : static void
249 : CommitPages(void* addr, size_t bytes, ProtectionSetting protection)
250 : {
251 : if (!VirtualAlloc(addr, bytes, MEM_COMMIT, ProtectionSettingToFlags(protection)))
252 : MOZ_CRASH("CommitPages failed");
253 : }
254 :
255 : static void
256 : DecommitPages(void* addr, size_t bytes)
257 : {
258 : if (!VirtualFree(addr, bytes, MEM_DECOMMIT))
259 : MOZ_CRASH("DecommitPages failed");
260 : }
261 : #else // !XP_WIN
262 : static void*
263 3 : ComputeRandomAllocationAddress()
264 : {
265 3 : uint64_t rand = js::GenerateRandomSeed();
266 :
267 : # ifdef HAVE_64BIT_BUILD
268 : // x64 CPUs have a 48-bit address space and on some platforms the OS will
269 : // give us access to 47 bits, so to be safe we right shift by 18 to leave
270 : // 46 bits.
271 3 : rand >>= 18;
272 : # else
273 : // On 32-bit, right shift by 34 to leave 30 bits, range [0, 1GiB). Then add
274 : // 512MiB to get range [512MiB, 1.5GiB), or [0x20000000, 0x60000000). This
275 : // is based on V8 comments in platform-posix.cc saying this range is
276 : // relatively unpopulated across a variety of kernels.
277 : rand >>= 34;
278 : rand += 512 * 1024 * 1024;
279 : # endif
280 :
281 : // Ensure page alignment.
282 3 : uintptr_t mask = ~uintptr_t(gc::SystemPageSize() - 1);
283 3 : return (void*) uintptr_t(rand & mask);
284 : }
285 :
286 : static void*
287 3 : ReserveProcessExecutableMemory(size_t bytes)
288 : {
289 : // Note that randomAddr is just a hint: if the address is not available
290 : // mmap will pick a different address.
291 3 : void* randomAddr = ComputeRandomAllocationAddress();
292 : void* p = MozTaggedAnonymousMmap(randomAddr, bytes, PROT_NONE, MAP_PRIVATE | MAP_ANON,
293 3 : -1, 0, "js-executable-memory");
294 3 : if (p == MAP_FAILED)
295 0 : return nullptr;
296 3 : return p;
297 : }
298 :
299 : static void
300 0 : DeallocateProcessExecutableMemory(void* addr, size_t bytes)
301 : {
302 0 : mozilla::DebugOnly<int> result = munmap(addr, bytes);
303 0 : MOZ_ASSERT(!result || errno == ENOMEM);
304 0 : }
305 :
306 : static unsigned
307 9079 : ProtectionSettingToFlags(ProtectionSetting protection)
308 : {
309 : #ifdef MOZ_VALGRIND
310 : // If we're configured for Valgrind and running on it, use a slacker
311 : // scheme that doesn't change execute permissions, since doing so causes
312 : // Valgrind a lot of extra overhead re-JITting code that loses and later
313 : // regains execute permission. See bug 1338179.
314 : if (RUNNING_ON_VALGRIND) {
315 : switch (protection) {
316 : case ProtectionSetting::Protected: return PROT_NONE;
317 : case ProtectionSetting::Writable: return PROT_READ | PROT_WRITE | PROT_EXEC;
318 : case ProtectionSetting::Executable: return PROT_READ | PROT_EXEC;
319 : }
320 : MOZ_CRASH();
321 : }
322 : // If we get here, we're configured for Valgrind but not running on
323 : // it, so use the standard scheme.
324 : #endif
325 9079 : switch (protection) {
326 0 : case ProtectionSetting::Protected: return PROT_NONE;
327 4499 : case ProtectionSetting::Writable: return PROT_READ | PROT_WRITE;
328 4580 : case ProtectionSetting::Executable: return PROT_READ | PROT_EXEC;
329 : }
330 0 : MOZ_CRASH();
331 : }
332 :
333 : static void
334 81 : CommitPages(void* addr, size_t bytes, ProtectionSetting protection)
335 : {
336 81 : void* p = MozTaggedAnonymousMmap(addr, bytes, ProtectionSettingToFlags(protection),
337 : MAP_FIXED | MAP_PRIVATE | MAP_ANON,
338 81 : -1, 0, "js-executable-memory");
339 81 : MOZ_RELEASE_ASSERT(addr == p);
340 81 : }
341 :
342 : static void
343 0 : DecommitPages(void* addr, size_t bytes)
344 : {
345 : // Use mmap with MAP_FIXED and PROT_NONE. Inspired by jemalloc's
346 : // pages_decommit.
347 : void* p = MozTaggedAnonymousMmap(addr, bytes, PROT_NONE,
348 : MAP_FIXED | MAP_PRIVATE | MAP_ANON,
349 0 : -1, 0, "js-executable-memory");
350 0 : MOZ_RELEASE_ASSERT(addr == p);
351 0 : }
352 : #endif
353 :
354 : template <size_t NumBits>
355 3 : class PageBitSet
356 : {
357 : using WordType = uint32_t;
358 : static const size_t BitsPerWord = sizeof(WordType) * 8;
359 :
360 : static_assert((NumBits % BitsPerWord) == 0,
361 : "NumBits must be a multiple of BitsPerWord");
362 : static const size_t NumWords = NumBits / BitsPerWord;
363 :
364 : mozilla::Array<WordType, NumWords> words_;
365 :
366 246 : uint32_t indexToWord(uint32_t index) const {
367 246 : MOZ_ASSERT(index < NumBits);
368 246 : return index / BitsPerWord;
369 : }
370 246 : WordType indexToBit(uint32_t index) const {
371 246 : MOZ_ASSERT(index < NumBits);
372 246 : return WordType(1) << (index % BitsPerWord);
373 : }
374 :
375 : public:
376 3 : void init() {
377 3 : mozilla::PodArrayZero(words_);
378 3 : }
379 164 : bool contains(size_t index) const {
380 164 : uint32_t word = indexToWord(index);
381 164 : return words_[word] & indexToBit(index);
382 : }
383 82 : void insert(size_t index) {
384 82 : MOZ_ASSERT(!contains(index));
385 82 : uint32_t word = indexToWord(index);
386 82 : words_[word] |= indexToBit(index);
387 82 : }
388 0 : void remove(size_t index) {
389 0 : MOZ_ASSERT(contains(index));
390 0 : uint32_t word = indexToWord(index);
391 0 : words_[word] &= ~indexToBit(index);
392 0 : }
393 :
394 : #ifdef DEBUG
395 0 : bool empty() const {
396 0 : for (size_t i = 0; i < NumWords; i++) {
397 0 : if (words_[i] != 0)
398 0 : return false;
399 : }
400 0 : return true;
401 : }
402 : #endif
403 : };
404 :
405 : // Per-process executable memory allocator. It reserves a block of memory of
406 : // MaxCodeBytesPerProcess bytes, then allocates/deallocates pages from that.
407 : //
408 : // This has a number of benefits compared to raw mmap/VirtualAlloc:
409 : //
410 : // * More resillient against certain attacks.
411 : //
412 : // * Behaves more consistently across platforms: it avoids the 64K granularity
413 : // issues on Windows, for instance.
414 : //
415 : // * On x64, near jumps can be used for jumps to other JIT pages.
416 : //
417 : // * On Win64, we have to register the exception handler only once (at process
418 : // startup). This saves some memory and avoids RtlAddFunctionTable profiler
419 : // deadlocks.
420 0 : class ProcessExecutableMemory
421 : {
422 : static_assert((MaxCodeBytesPerProcess % ExecutableCodePageSize) == 0,
423 : "MaxCodeBytesPerProcess must be a multiple of ExecutableCodePageSize");
424 : static const size_t MaxCodePages = MaxCodeBytesPerProcess / ExecutableCodePageSize;
425 :
426 : // Start of the MaxCodeBytesPerProcess memory block or nullptr if
427 : // uninitialized. Note that this is NOT guaranteed to be aligned to
428 : // ExecutableCodePageSize.
429 : uint8_t* base_;
430 :
431 : // The fields below should only be accessed while we hold the lock.
432 : Mutex lock_;
433 :
434 : // pagesAllocated_ is an Atomic so that bytesAllocated does not have to
435 : // take the lock.
436 : mozilla::Atomic<size_t, mozilla::ReleaseAcquire> pagesAllocated_;
437 :
438 : // Page where we should try to allocate next.
439 : size_t cursor_;
440 :
441 : mozilla::Maybe<mozilla::non_crypto::XorShift128PlusRNG> rng_;
442 : PageBitSet<MaxCodePages> pages_;
443 :
444 : public:
445 3 : ProcessExecutableMemory()
446 3 : : base_(nullptr),
447 : lock_(mutexid::ProcessExecutableRegion),
448 : pagesAllocated_(0),
449 : cursor_(0),
450 : rng_(),
451 3 : pages_()
452 3 : {}
453 :
454 3 : MOZ_MUST_USE bool init() {
455 3 : pages_.init();
456 :
457 3 : MOZ_RELEASE_ASSERT(!initialized());
458 3 : MOZ_RELEASE_ASSERT(gc::SystemPageSize() <= ExecutableCodePageSize);
459 :
460 3 : void* p = ReserveProcessExecutableMemory(MaxCodeBytesPerProcess);
461 3 : if (!p)
462 0 : return false;
463 :
464 3 : base_ = static_cast<uint8_t*>(p);
465 :
466 3 : mozilla::Array<uint64_t, 2> seed;
467 3 : GenerateXorShift128PlusSeed(seed);
468 3 : rng_.emplace(seed[0], seed[1]);
469 3 : return true;
470 : }
471 :
472 84 : bool initialized() const {
473 84 : return base_ != nullptr;
474 : }
475 :
476 29846 : size_t bytesAllocated() const {
477 29846 : MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
478 29846 : return pagesAllocated_ * ExecutableCodePageSize;
479 : }
480 :
481 0 : void release() {
482 0 : MOZ_ASSERT(initialized());
483 0 : MOZ_ASSERT(pages_.empty());
484 0 : MOZ_ASSERT(pagesAllocated_ == 0);
485 0 : DeallocateProcessExecutableMemory(base_, MaxCodeBytesPerProcess);
486 0 : base_ = nullptr;
487 0 : rng_.reset();
488 0 : MOZ_ASSERT(!initialized());
489 0 : }
490 :
491 17996 : void assertValidAddress(void* p, size_t bytes) const {
492 17996 : MOZ_RELEASE_ASSERT(p >= base_ &&
493 : uintptr_t(p) + bytes <= uintptr_t(base_) + MaxCodeBytesPerProcess);
494 17996 : }
495 :
496 : void* allocate(size_t bytes, ProtectionSetting protection);
497 : void deallocate(void* addr, size_t bytes);
498 : };
499 :
500 : void*
501 81 : ProcessExecutableMemory::allocate(size_t bytes, ProtectionSetting protection)
502 : {
503 81 : MOZ_ASSERT(initialized());
504 81 : MOZ_ASSERT(bytes > 0);
505 81 : MOZ_ASSERT((bytes % ExecutableCodePageSize) == 0);
506 :
507 81 : size_t numPages = bytes / ExecutableCodePageSize;
508 :
509 : // Take the lock and try to allocate.
510 81 : void* p = nullptr;
511 : {
512 162 : LockGuard<Mutex> guard(lock_);
513 81 : MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
514 :
515 : // Check if we have enough pages available.
516 81 : if (pagesAllocated_ + numPages >= MaxCodePages)
517 0 : return nullptr;
518 :
519 81 : MOZ_ASSERT(bytes <= MaxCodeBytesPerProcess);
520 :
521 : // Maybe skip a page to make allocations less predictable.
522 81 : size_t page = cursor_ + (rng_.ref().next() % 2);
523 :
524 81 : for (size_t i = 0; i < MaxCodePages; i++) {
525 : // Make sure page + numPages - 1 is a valid index.
526 81 : if (page + numPages > MaxCodePages)
527 0 : page = 0;
528 :
529 81 : bool available = true;
530 163 : for (size_t j = 0; j < numPages; j++) {
531 82 : if (pages_.contains(page + j)) {
532 0 : available = false;
533 0 : break;
534 : }
535 : }
536 81 : if (!available) {
537 0 : page++;
538 0 : continue;
539 : }
540 :
541 : // Mark the pages as unavailable.
542 163 : for (size_t j = 0; j < numPages; j++)
543 82 : pages_.insert(page + j);
544 :
545 81 : pagesAllocated_ += numPages;
546 81 : MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
547 :
548 : // If we allocated a small number of pages, move cursor_ to the
549 : // next page. We don't do this for larger allocations to avoid
550 : // skipping a large number of small holes.
551 81 : if (numPages <= 2)
552 81 : cursor_ = page + numPages;
553 :
554 81 : p = base_ + page * ExecutableCodePageSize;
555 81 : break;
556 : }
557 81 : if (!p)
558 0 : return nullptr;
559 : }
560 :
561 : // Commit the pages after releasing the lock.
562 81 : CommitPages(p, bytes, protection);
563 81 : return p;
564 : }
565 :
566 : void
567 0 : ProcessExecutableMemory::deallocate(void* addr, size_t bytes)
568 : {
569 0 : MOZ_ASSERT(initialized());
570 0 : MOZ_ASSERT(addr);
571 0 : MOZ_ASSERT((uintptr_t(addr) % gc::SystemPageSize()) == 0);
572 0 : MOZ_ASSERT(bytes > 0);
573 0 : MOZ_ASSERT((bytes % ExecutableCodePageSize) == 0);
574 :
575 0 : assertValidAddress(addr, bytes);
576 :
577 0 : size_t firstPage = (static_cast<uint8_t*>(addr) - base_) / ExecutableCodePageSize;
578 0 : size_t numPages = bytes / ExecutableCodePageSize;
579 :
580 : // Decommit before taking the lock.
581 0 : DecommitPages(addr, bytes);
582 :
583 0 : LockGuard<Mutex> guard(lock_);
584 0 : MOZ_ASSERT(numPages <= pagesAllocated_);
585 0 : pagesAllocated_ -= numPages;
586 :
587 0 : for (size_t i = 0; i < numPages; i++)
588 0 : pages_.remove(firstPage + i);
589 :
590 : // Move the cursor back so we can reuse pages instead of fragmenting the
591 : // whole region.
592 0 : if (firstPage < cursor_)
593 0 : cursor_ = firstPage;
594 0 : }
595 :
596 3 : static ProcessExecutableMemory execMemory;
597 :
598 : void*
599 81 : js::jit::AllocateExecutableMemory(size_t bytes, ProtectionSetting protection)
600 : {
601 81 : return execMemory.allocate(bytes, protection);
602 : }
603 :
604 : void
605 0 : js::jit::DeallocateExecutableMemory(void* addr, size_t bytes)
606 : {
607 0 : execMemory.deallocate(addr, bytes);
608 0 : }
609 :
610 : bool
611 3 : js::jit::InitProcessExecutableMemory()
612 : {
613 3 : return execMemory.init();
614 : }
615 :
616 : void
617 0 : js::jit::ReleaseProcessExecutableMemory()
618 : {
619 0 : execMemory.release();
620 0 : }
621 :
622 : bool
623 14923 : js::jit::CanLikelyAllocateMoreExecutableMemory()
624 : {
625 : // Use a 8 MB buffer.
626 : static const size_t BufferSize = 8 * 1024 * 1024;
627 :
628 14923 : MOZ_ASSERT(execMemory.bytesAllocated() <= MaxCodeBytesPerProcess);
629 :
630 14923 : return execMemory.bytesAllocated() + BufferSize <= MaxCodeBytesPerProcess;
631 : }
632 :
633 : bool
634 8998 : js::jit::ReprotectRegion(void* start, size_t size, ProtectionSetting protection)
635 : {
636 : // Calculate the start of the page containing this region,
637 : // and account for this extra memory within size.
638 8998 : size_t pageSize = gc::SystemPageSize();
639 8998 : intptr_t startPtr = reinterpret_cast<intptr_t>(start);
640 8998 : intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
641 8998 : void* pageStart = reinterpret_cast<void*>(pageStartPtr);
642 8998 : size += (startPtr - pageStartPtr);
643 :
644 : // Round size up
645 8998 : size += (pageSize - 1);
646 8998 : size &= ~(pageSize - 1);
647 :
648 8998 : MOZ_ASSERT((uintptr_t(pageStart) % pageSize) == 0);
649 :
650 8998 : execMemory.assertValidAddress(pageStart, size);
651 :
652 : #ifdef XP_WIN
653 : DWORD oldProtect;
654 : DWORD flags = ProtectionSettingToFlags(protection);
655 : if (!VirtualProtect(pageStart, size, flags, &oldProtect))
656 : return false;
657 : #else
658 8998 : unsigned flags = ProtectionSettingToFlags(protection);
659 8998 : if (mprotect(pageStart, size, flags))
660 0 : return false;
661 : #endif
662 :
663 8998 : execMemory.assertValidAddress(pageStart, size);
664 8998 : return true;
665 : }
|