Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : *
4 : * Copyright 2014 Mozilla Foundation
5 : *
6 : * Licensed under the Apache License, Version 2.0 (the "License");
7 : * you may not use this file except in compliance with the License.
8 : * You may obtain a copy of the License at
9 : *
10 : * http://www.apache.org/licenses/LICENSE-2.0
11 : *
12 : * Unless required by applicable law or agreed to in writing, software
13 : * distributed under the License is distributed on an "AS IS" BASIS,
14 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 : * See the License for the specific language governing permissions and
16 : * limitations under the License.
17 : */
18 :
19 : #include "wasm/WasmSignalHandlers.h"
20 :
21 : #include "mozilla/DebugOnly.h"
22 : #include "mozilla/PodOperations.h"
23 : #include "mozilla/ScopeExit.h"
24 :
25 : #include "jit/AtomicOperations.h"
26 : #include "jit/Disassembler.h"
27 : #include "vm/Runtime.h"
28 : #include "wasm/WasmBuiltins.h"
29 : #include "wasm/WasmInstance.h"
30 :
31 : using namespace js;
32 : using namespace js::jit;
33 : using namespace js::wasm;
34 :
35 : using JS::GenericNaN;
36 : using mozilla::DebugOnly;
37 : using mozilla::PodArrayZero;
38 :
39 : #if defined(ANDROID)
40 : # include <sys/system_properties.h>
41 : # if defined(MOZ_LINKER)
42 : extern "C" MFBT_API bool IsSignalHandlingBroken();
43 : # endif
44 : #endif
45 :
46 : // Crashing inside the signal handler can cause the handler to be recursively
47 : // invoked, eventually blowing the stack without actually showing a crash
48 : // report dialog via Breakpad. To guard against this we watch for such
49 : // recursion and fall through to the next handler immediately rather than
50 : // trying to handle it.
51 : class AutoSetHandlingSegFault
52 : {
53 : JSContext* cx;
54 :
55 : public:
56 0 : explicit AutoSetHandlingSegFault(JSContext* cx)
57 0 : : cx(cx)
58 : {
59 0 : MOZ_ASSERT(!cx->handlingSegFault);
60 0 : cx->handlingSegFault = true;
61 0 : }
62 :
63 0 : ~AutoSetHandlingSegFault()
64 0 : {
65 0 : MOZ_ASSERT(cx->handlingSegFault);
66 0 : cx->handlingSegFault = false;
67 0 : }
68 : };
69 :
70 : #if defined(XP_WIN)
71 : # define XMM_sig(p,i) ((p)->Xmm##i)
72 : # define EIP_sig(p) ((p)->Eip)
73 : # define EBP_sig(p) ((p)->Ebp)
74 : # define ESP_sig(p) ((p)->Esp)
75 : # define RIP_sig(p) ((p)->Rip)
76 : # define RAX_sig(p) ((p)->Rax)
77 : # define RCX_sig(p) ((p)->Rcx)
78 : # define RDX_sig(p) ((p)->Rdx)
79 : # define RBX_sig(p) ((p)->Rbx)
80 : # define RSP_sig(p) ((p)->Rsp)
81 : # define RBP_sig(p) ((p)->Rbp)
82 : # define RSI_sig(p) ((p)->Rsi)
83 : # define RDI_sig(p) ((p)->Rdi)
84 : # define R8_sig(p) ((p)->R8)
85 : # define R9_sig(p) ((p)->R9)
86 : # define R10_sig(p) ((p)->R10)
87 : # define R11_sig(p) ((p)->R11)
88 : # define R12_sig(p) ((p)->R12)
89 : # define R13_sig(p) ((p)->R13)
90 : # define R14_sig(p) ((p)->R14)
91 : # define R15_sig(p) ((p)->R15)
92 : #elif defined(__OpenBSD__)
93 : # define XMM_sig(p,i) ((p)->sc_fpstate->fx_xmm[i])
94 : # define EIP_sig(p) ((p)->sc_eip)
95 : # define EBP_sig(p) ((p)->sc_ebp)
96 : # define ESP_sig(p) ((p)->sc_esp)
97 : # define RIP_sig(p) ((p)->sc_rip)
98 : # define RAX_sig(p) ((p)->sc_rax)
99 : # define RCX_sig(p) ((p)->sc_rcx)
100 : # define RDX_sig(p) ((p)->sc_rdx)
101 : # define RBX_sig(p) ((p)->sc_rbx)
102 : # define RSP_sig(p) ((p)->sc_rsp)
103 : # define RBP_sig(p) ((p)->sc_rbp)
104 : # define RSI_sig(p) ((p)->sc_rsi)
105 : # define RDI_sig(p) ((p)->sc_rdi)
106 : # define R8_sig(p) ((p)->sc_r8)
107 : # define R9_sig(p) ((p)->sc_r9)
108 : # define R10_sig(p) ((p)->sc_r10)
109 : # define R11_sig(p) ((p)->sc_r11)
110 : # define R12_sig(p) ((p)->sc_r12)
111 : # if defined(__arm__)
112 : # define R13_sig(p) ((p)->sc_usr_sp)
113 : # define R14_sig(p) ((p)->sc_usr_lr)
114 : # define R15_sig(p) ((p)->sc_pc)
115 : # else
116 : # define R13_sig(p) ((p)->sc_r13)
117 : # define R14_sig(p) ((p)->sc_r14)
118 : # define R15_sig(p) ((p)->sc_r15)
119 : # endif
120 : # if defined(__aarch64__)
121 : # define EPC_sig(p) ((p)->sc_elr)
122 : # define RFP_sig(p) ((p)->sc_x[29])
123 : # define RLR_sig(p) ((p)->sc_lr)
124 : # define R31_sig(p) ((p)->sc_sp)
125 : # endif
126 : # if defined(__mips__)
127 : # define EPC_sig(p) ((p)->sc_pc)
128 : # define RFP_sig(p) ((p)->sc_regs[30])
129 : # endif
130 : #elif defined(__linux__) || defined(__sun)
131 : # if defined(__linux__)
132 : # define XMM_sig(p,i) ((p)->uc_mcontext.fpregs->_xmm[i])
133 : # define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP])
134 : # define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
135 : # define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
136 : # else
137 : # define XMM_sig(p,i) ((p)->uc_mcontext.fpregs.fp_reg_set.fpchip_state.xmm[i])
138 : # define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_PC])
139 : # define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
140 : # define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
141 : # endif
142 : # define RIP_sig(p) ((p)->uc_mcontext.gregs[REG_RIP])
143 : # define RAX_sig(p) ((p)->uc_mcontext.gregs[REG_RAX])
144 : # define RCX_sig(p) ((p)->uc_mcontext.gregs[REG_RCX])
145 : # define RDX_sig(p) ((p)->uc_mcontext.gregs[REG_RDX])
146 : # define RBX_sig(p) ((p)->uc_mcontext.gregs[REG_RBX])
147 : # define RSP_sig(p) ((p)->uc_mcontext.gregs[REG_RSP])
148 : # define RBP_sig(p) ((p)->uc_mcontext.gregs[REG_RBP])
149 : # define RSI_sig(p) ((p)->uc_mcontext.gregs[REG_RSI])
150 : # define RDI_sig(p) ((p)->uc_mcontext.gregs[REG_RDI])
151 : # define R8_sig(p) ((p)->uc_mcontext.gregs[REG_R8])
152 : # define R9_sig(p) ((p)->uc_mcontext.gregs[REG_R9])
153 : # define R10_sig(p) ((p)->uc_mcontext.gregs[REG_R10])
154 : # define R12_sig(p) ((p)->uc_mcontext.gregs[REG_R12])
155 : # if defined(__linux__) && defined(__arm__)
156 : # define R11_sig(p) ((p)->uc_mcontext.arm_fp)
157 : # define R13_sig(p) ((p)->uc_mcontext.arm_sp)
158 : # define R14_sig(p) ((p)->uc_mcontext.arm_lr)
159 : # define R15_sig(p) ((p)->uc_mcontext.arm_pc)
160 : # else
161 : # define R11_sig(p) ((p)->uc_mcontext.gregs[REG_R11])
162 : # define R13_sig(p) ((p)->uc_mcontext.gregs[REG_R13])
163 : # define R14_sig(p) ((p)->uc_mcontext.gregs[REG_R14])
164 : # define R15_sig(p) ((p)->uc_mcontext.gregs[REG_R15])
165 : # endif
166 : # if defined(__linux__) && defined(__aarch64__)
167 : # define EPC_sig(p) ((p)->uc_mcontext.pc)
168 : # define RFP_sig(p) ((p)->uc_mcontext.regs[29])
169 : # define RLR_sig(p) ((p)->uc_mcontext.regs[30])
170 : # define R31_sig(p) ((p)->uc_mcontext.regs[31])
171 : # endif
172 : # if defined(__linux__) && defined(__mips__)
173 : # define EPC_sig(p) ((p)->uc_mcontext.pc)
174 : # define RFP_sig(p) ((p)->uc_mcontext.gregs[30])
175 : # endif
176 : #elif defined(__NetBSD__)
177 : # define XMM_sig(p,i) (((struct fxsave64*)(p)->uc_mcontext.__fpregs)->fx_xmm[i])
178 : # define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP])
179 : # define EBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EBP])
180 : # define ESP_sig(p) ((p)->uc_mcontext.__gregs[_REG_ESP])
181 : # define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP])
182 : # define RAX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RAX])
183 : # define RCX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RCX])
184 : # define RDX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDX])
185 : # define RBX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBX])
186 : # define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP])
187 : # define RBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBP])
188 : # define RSI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSI])
189 : # define RDI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDI])
190 : # define R8_sig(p) ((p)->uc_mcontext.__gregs[_REG_R8])
191 : # define R9_sig(p) ((p)->uc_mcontext.__gregs[_REG_R9])
192 : # define R10_sig(p) ((p)->uc_mcontext.__gregs[_REG_R10])
193 : # define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
194 : # define R12_sig(p) ((p)->uc_mcontext.__gregs[_REG_R12])
195 : # define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
196 : # define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
197 : # define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
198 : # if defined(__aarch64__)
199 : # define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC])
200 : # define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_X29])
201 : # define RLR_sig(p) ((p)->uc_mcontext.__gregs[_REG_X30])
202 : # define R31_sig(p) ((p)->uc_mcontext.__gregs[_REG_SP])
203 : # endif
204 : # if defined(__mips__)
205 : # define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_EPC])
206 : # define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_S8])
207 : # endif
208 : #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
209 : # if defined(__DragonFly__)
210 : # define XMM_sig(p,i) (((union savefpu*)(p)->uc_mcontext.mc_fpregs)->sv_xmm.sv_xmm[i])
211 : # else
212 : # define XMM_sig(p,i) (((struct savefpu*)(p)->uc_mcontext.mc_fpstate)->sv_xmm[i])
213 : # endif
214 : # define EIP_sig(p) ((p)->uc_mcontext.mc_eip)
215 : # define EBP_sig(p) ((p)->uc_mcontext.mc_ebp)
216 : # define ESP_sig(p) ((p)->uc_mcontext.mc_esp)
217 : # define RIP_sig(p) ((p)->uc_mcontext.mc_rip)
218 : # define RAX_sig(p) ((p)->uc_mcontext.mc_rax)
219 : # define RCX_sig(p) ((p)->uc_mcontext.mc_rcx)
220 : # define RDX_sig(p) ((p)->uc_mcontext.mc_rdx)
221 : # define RBX_sig(p) ((p)->uc_mcontext.mc_rbx)
222 : # define RSP_sig(p) ((p)->uc_mcontext.mc_rsp)
223 : # define RBP_sig(p) ((p)->uc_mcontext.mc_rbp)
224 : # define RSI_sig(p) ((p)->uc_mcontext.mc_rsi)
225 : # define RDI_sig(p) ((p)->uc_mcontext.mc_rdi)
226 : # define R8_sig(p) ((p)->uc_mcontext.mc_r8)
227 : # define R9_sig(p) ((p)->uc_mcontext.mc_r9)
228 : # define R10_sig(p) ((p)->uc_mcontext.mc_r10)
229 : # define R12_sig(p) ((p)->uc_mcontext.mc_r12)
230 : # if defined(__FreeBSD__) && defined(__arm__)
231 : # define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
232 : # define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
233 : # define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
234 : # define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
235 : # else
236 : # define R11_sig(p) ((p)->uc_mcontext.mc_r11)
237 : # define R13_sig(p) ((p)->uc_mcontext.mc_r13)
238 : # define R14_sig(p) ((p)->uc_mcontext.mc_r14)
239 : # define R15_sig(p) ((p)->uc_mcontext.mc_r15)
240 : # endif
241 : # if defined(__FreeBSD__) && defined(__aarch64__)
242 : # define EPC_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_elr)
243 : # define RFP_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_x[29])
244 : # define RLR_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_lr)
245 : # define R31_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_sp)
246 : # endif
247 : # if defined(__FreeBSD__) && defined(__mips__)
248 : # define EPC_sig(p) ((p)->uc_mcontext.mc_pc)
249 : # define RFP_sig(p) ((p)->uc_mcontext.mc_regs[30])
250 : # endif
251 : #elif defined(XP_DARWIN)
252 : # define EIP_sig(p) ((p)->uc_mcontext->__ss.__eip)
253 : # define EBP_sig(p) ((p)->uc_mcontext->__ss.__ebp)
254 : # define ESP_sig(p) ((p)->uc_mcontext->__ss.__esp)
255 : # define RIP_sig(p) ((p)->uc_mcontext->__ss.__rip)
256 : # define RBP_sig(p) ((p)->uc_mcontext->__ss.__rbp)
257 : # define RSP_sig(p) ((p)->uc_mcontext->__ss.__rsp)
258 : # define R15_sig(p) ((p)->uc_mcontext->__ss.__pc)
259 : #else
260 : # error "Don't know how to read/write to the thread state via the mcontext_t."
261 : #endif
262 :
263 : #if defined(XP_WIN)
264 : # include "jswin.h"
265 : #else
266 : # include <signal.h>
267 : # include <sys/mman.h>
268 : #endif
269 :
270 : #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
271 : # include <sys/ucontext.h> // for ucontext_t, mcontext_t
272 : #endif
273 :
274 : #if defined(__x86_64__)
275 : # if defined(__DragonFly__)
276 : # include <machine/npx.h> // for union savefpu
277 : # elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
278 : defined(__NetBSD__) || defined(__OpenBSD__)
279 : # include <machine/fpu.h> // for struct savefpu/fxsave64
280 : # endif
281 : #endif
282 :
283 : #if defined(ANDROID)
284 : // Not all versions of the Android NDK define ucontext_t or mcontext_t.
285 : // Detect this and provide custom but compatible definitions. Note that these
286 : // follow the GLibc naming convention to access register values from
287 : // mcontext_t.
288 : //
289 : // See: https://chromiumcodereview.appspot.com/10829122/
290 : // See: http://code.google.com/p/android/issues/detail?id=34784
291 : # if !defined(__BIONIC_HAVE_UCONTEXT_T)
292 : # if defined(__arm__)
293 :
294 : // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
295 : // Old versions of the C library <signal.h> didn't define the type.
296 : # if !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
297 : # include <asm/sigcontext.h>
298 : # endif
299 :
300 : typedef struct sigcontext mcontext_t;
301 :
302 : typedef struct ucontext {
303 : uint32_t uc_flags;
304 : struct ucontext* uc_link;
305 : stack_t uc_stack;
306 : mcontext_t uc_mcontext;
307 : // Other fields are not used so don't define them here.
308 : } ucontext_t;
309 :
310 : # elif defined(__mips__)
311 :
312 : typedef struct {
313 : uint32_t regmask;
314 : uint32_t status;
315 : uint64_t pc;
316 : uint64_t gregs[32];
317 : uint64_t fpregs[32];
318 : uint32_t acx;
319 : uint32_t fpc_csr;
320 : uint32_t fpc_eir;
321 : uint32_t used_math;
322 : uint32_t dsp;
323 : uint64_t mdhi;
324 : uint64_t mdlo;
325 : uint32_t hi1;
326 : uint32_t lo1;
327 : uint32_t hi2;
328 : uint32_t lo2;
329 : uint32_t hi3;
330 : uint32_t lo3;
331 : } mcontext_t;
332 :
333 : typedef struct ucontext {
334 : uint32_t uc_flags;
335 : struct ucontext* uc_link;
336 : stack_t uc_stack;
337 : mcontext_t uc_mcontext;
338 : // Other fields are not used so don't define them here.
339 : } ucontext_t;
340 :
341 : # elif defined(__i386__)
342 : // x86 version for Android.
343 : typedef struct {
344 : uint32_t gregs[19];
345 : void* fpregs;
346 : uint32_t oldmask;
347 : uint32_t cr2;
348 : } mcontext_t;
349 :
350 : typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
351 : typedef struct ucontext {
352 : uint32_t uc_flags;
353 : struct ucontext* uc_link;
354 : stack_t uc_stack;
355 : mcontext_t uc_mcontext;
356 : // Other fields are not used by V8, don't define them here.
357 : } ucontext_t;
358 : enum { REG_EIP = 14 };
359 : # endif // defined(__i386__)
360 : # endif // !defined(__BIONIC_HAVE_UCONTEXT_T)
361 : #endif // defined(ANDROID)
362 :
363 : #if !defined(XP_WIN)
364 : # define CONTEXT ucontext_t
365 : #endif
366 :
367 : // Define a context type for use in the emulator code. This is usually just
368 : // the same as CONTEXT, but on Mac we use a different structure since we call
369 : // into the emulator code from a Mach exception handler rather than a
370 : // sigaction-style signal handler.
371 : #if defined(XP_DARWIN)
372 : # if defined(__x86_64__)
373 : struct macos_x64_context {
374 : x86_thread_state64_t thread;
375 : x86_float_state64_t float_;
376 : };
377 : # define EMULATOR_CONTEXT macos_x64_context
378 : # elif defined(__i386__)
379 : struct macos_x86_context {
380 : x86_thread_state_t thread;
381 : x86_float_state_t float_;
382 : };
383 : # define EMULATOR_CONTEXT macos_x86_context
384 : # elif defined(__arm__)
385 : struct macos_arm_context {
386 : arm_thread_state_t thread;
387 : arm_neon_state_t float_;
388 : };
389 : # define EMULATOR_CONTEXT macos_arm_context
390 : # else
391 : # error Unsupported architecture
392 : # endif
393 : #else
394 : # define EMULATOR_CONTEXT CONTEXT
395 : #endif
396 :
397 : #if defined(_M_X64) || defined(__x86_64__)
398 : # define PC_sig(p) RIP_sig(p)
399 : # define FP_sig(p) RBP_sig(p)
400 : # define SP_sig(p) RSP_sig(p)
401 : #elif defined(_M_IX86) || defined(__i386__)
402 : # define PC_sig(p) EIP_sig(p)
403 : # define FP_sig(p) EBP_sig(p)
404 : # define SP_sig(p) ESP_sig(p)
405 : #elif defined(__arm__)
406 : # define FP_sig(p) R11_sig(p)
407 : # define SP_sig(p) R13_sig(p)
408 : # define LR_sig(p) R14_sig(p)
409 : # define PC_sig(p) R15_sig(p)
410 : #elif defined(__aarch64__)
411 : # define PC_sig(p) EPC_sig(p)
412 : # define FP_sig(p) RFP_sig(p)
413 : # define SP_sig(p) R31_sig(p)
414 : # define LR_sig(p) RLR_sig(p)
415 : #elif defined(__mips__)
416 : # define PC_sig(p) EPC_sig(p)
417 : # define FP_sig(p) RFP_sig(p)
418 : #endif
419 :
420 : static uint8_t**
421 2 : ContextToPC(CONTEXT* context)
422 : {
423 : #ifdef JS_CODEGEN_NONE
424 : MOZ_CRASH();
425 : #else
426 2 : return reinterpret_cast<uint8_t**>(&PC_sig(context));
427 : #endif
428 : }
429 :
430 : static uint8_t*
431 0 : ContextToFP(CONTEXT* context)
432 : {
433 : #ifdef JS_CODEGEN_NONE
434 : MOZ_CRASH();
435 : #else
436 0 : return reinterpret_cast<uint8_t*>(FP_sig(context));
437 : #endif
438 : }
439 :
440 : static uint8_t*
441 0 : ContextToSP(CONTEXT* context)
442 : {
443 : #ifdef JS_CODEGEN_NONE
444 : MOZ_CRASH();
445 : #else
446 0 : return reinterpret_cast<uint8_t*>(SP_sig(context));
447 : #endif
448 : }
449 :
450 : #if defined(__arm__) || defined(__aarch64__)
451 : static uint8_t*
452 : ContextToLR(CONTEXT* context)
453 : {
454 : return reinterpret_cast<uint8_t*>(LR_sig(context));
455 : }
456 : #endif
457 :
458 : #if defined(XP_DARWIN)
459 :
460 : static uint8_t**
461 : ContextToPC(EMULATOR_CONTEXT* context)
462 : {
463 : # if defined(__x86_64__)
464 : static_assert(sizeof(context->thread.__rip) == sizeof(void*),
465 : "stored IP should be compile-time pointer-sized");
466 : return reinterpret_cast<uint8_t**>(&context->thread.__rip);
467 : # elif defined(__i386__)
468 : static_assert(sizeof(context->thread.uts.ts32.__eip) == sizeof(void*),
469 : "stored IP should be compile-time pointer-sized");
470 : return reinterpret_cast<uint8_t**>(&context->thread.uts.ts32.__eip);
471 : # elif defined(__arm__)
472 : static_assert(sizeof(context->thread.__pc) == sizeof(void*),
473 : "stored IP should be compile-time pointer-sized");
474 : return reinterpret_cast<uint8_t**>(&context->thread.__pc);
475 : # else
476 : # error Unsupported architecture
477 : # endif
478 : }
479 :
480 : static uint8_t*
481 : ContextToFP(EMULATOR_CONTEXT* context)
482 : {
483 : # if defined(__x86_64__)
484 : return (uint8_t*)context->thread.__rbp;
485 : # elif defined(__i386__)
486 : return (uint8_t*)context->thread.uts.ts32.__ebp;
487 : # elif defined(__arm__)
488 : return (uint8_t*)context->thread.__fp;
489 : # else
490 : # error Unsupported architecture
491 : # endif
492 : }
493 :
494 : static uint8_t*
495 : ContextToSP(EMULATOR_CONTEXT* context)
496 : {
497 : # if defined(__x86_64__)
498 : return (uint8_t*)context->thread.__rsp;
499 : # elif defined(__i386__)
500 : return (uint8_t*)context->thread.uts.ts32.__esp;
501 : # elif defined(__arm__)
502 : return (uint8_t*)context->thread.__sp;
503 : # else
504 : # error Unsupported architecture
505 : # endif
506 : }
507 :
508 : static JS::ProfilingFrameIterator::RegisterState
509 : ToRegisterState(EMULATOR_CONTEXT* context)
510 : {
511 : JS::ProfilingFrameIterator::RegisterState state;
512 : state.fp = ContextToFP(context);
513 : state.pc = *ContextToPC(context);
514 : state.sp = ContextToSP(context);
515 : // no ARM on Darwin => don't fill state.lr.
516 : return state;
517 : }
518 : #endif // XP_DARWIN
519 :
520 : static JS::ProfilingFrameIterator::RegisterState
521 0 : ToRegisterState(CONTEXT* context)
522 : {
523 : #ifdef JS_CODEGEN_NONE
524 : MOZ_CRASH();
525 : #else
526 0 : JS::ProfilingFrameIterator::RegisterState state;
527 0 : state.fp = ContextToFP(context);
528 0 : state.pc = *ContextToPC(context);
529 0 : state.sp = ContextToSP(context);
530 : # if defined(__arm__) || defined(__aarch64__)
531 : state.lr = ContextToLR(context);
532 : # endif
533 0 : return state;
534 : #endif
535 : }
536 :
537 : #if defined(WASM_HUGE_MEMORY)
538 : MOZ_COLD static void
539 0 : SetFPRegToNaN(size_t size, void* fp_reg)
540 : {
541 0 : MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
542 0 : memset(fp_reg, 0, Simd128DataSize);
543 0 : switch (size) {
544 0 : case 4: *static_cast<float*>(fp_reg) = GenericNaN(); break;
545 0 : case 8: *static_cast<double*>(fp_reg) = GenericNaN(); break;
546 : default:
547 : // All SIMD accesses throw on OOB.
548 0 : MOZ_CRASH("unexpected size in SetFPRegToNaN");
549 : }
550 0 : }
551 :
552 : MOZ_COLD static void
553 0 : SetGPRegToZero(void* gp_reg)
554 : {
555 0 : memset(gp_reg, 0, sizeof(intptr_t));
556 0 : }
557 :
558 : MOZ_COLD static void
559 0 : SetFPRegToLoadedValue(SharedMem<void*> addr, size_t size, void* fp_reg)
560 : {
561 0 : MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
562 0 : memset(fp_reg, 0, Simd128DataSize);
563 0 : AtomicOperations::memcpySafeWhenRacy(fp_reg, addr, size);
564 0 : }
565 :
566 : MOZ_COLD static void
567 0 : SetGPRegToLoadedValue(SharedMem<void*> addr, size_t size, void* gp_reg)
568 : {
569 0 : MOZ_RELEASE_ASSERT(size <= sizeof(void*));
570 0 : memset(gp_reg, 0, sizeof(void*));
571 0 : AtomicOperations::memcpySafeWhenRacy(gp_reg, addr, size);
572 0 : }
573 :
574 : MOZ_COLD static void
575 0 : SetGPRegToLoadedValueSext32(SharedMem<void*> addr, size_t size, void* gp_reg)
576 : {
577 0 : MOZ_RELEASE_ASSERT(size <= sizeof(int32_t));
578 0 : int8_t msb = AtomicOperations::loadSafeWhenRacy(addr.cast<uint8_t*>() + (size - 1));
579 0 : memset(gp_reg, 0, sizeof(void*));
580 0 : memset(gp_reg, msb >> 7, sizeof(int32_t));
581 0 : AtomicOperations::memcpySafeWhenRacy(gp_reg, addr, size);
582 0 : }
583 :
584 : MOZ_COLD static void
585 0 : StoreValueFromFPReg(SharedMem<void*> addr, size_t size, const void* fp_reg)
586 : {
587 0 : MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
588 0 : AtomicOperations::memcpySafeWhenRacy(addr, const_cast<void*>(fp_reg), size);
589 0 : }
590 :
591 : MOZ_COLD static void
592 0 : StoreValueFromGPReg(SharedMem<void*> addr, size_t size, const void* gp_reg)
593 : {
594 0 : MOZ_RELEASE_ASSERT(size <= sizeof(void*));
595 0 : AtomicOperations::memcpySafeWhenRacy(addr, const_cast<void*>(gp_reg), size);
596 0 : }
597 :
598 : MOZ_COLD static void
599 0 : StoreValueFromGPImm(SharedMem<void*> addr, size_t size, int32_t imm)
600 : {
601 0 : MOZ_RELEASE_ASSERT(size <= sizeof(imm));
602 0 : AtomicOperations::memcpySafeWhenRacy(addr, static_cast<void*>(&imm), size);
603 0 : }
604 :
605 : # if !defined(XP_DARWIN)
606 : MOZ_COLD static void*
607 0 : AddressOfFPRegisterSlot(CONTEXT* context, FloatRegisters::Encoding encoding)
608 : {
609 0 : switch (encoding) {
610 0 : case X86Encoding::xmm0: return &XMM_sig(context, 0);
611 0 : case X86Encoding::xmm1: return &XMM_sig(context, 1);
612 0 : case X86Encoding::xmm2: return &XMM_sig(context, 2);
613 0 : case X86Encoding::xmm3: return &XMM_sig(context, 3);
614 0 : case X86Encoding::xmm4: return &XMM_sig(context, 4);
615 0 : case X86Encoding::xmm5: return &XMM_sig(context, 5);
616 0 : case X86Encoding::xmm6: return &XMM_sig(context, 6);
617 0 : case X86Encoding::xmm7: return &XMM_sig(context, 7);
618 0 : case X86Encoding::xmm8: return &XMM_sig(context, 8);
619 0 : case X86Encoding::xmm9: return &XMM_sig(context, 9);
620 0 : case X86Encoding::xmm10: return &XMM_sig(context, 10);
621 0 : case X86Encoding::xmm11: return &XMM_sig(context, 11);
622 0 : case X86Encoding::xmm12: return &XMM_sig(context, 12);
623 0 : case X86Encoding::xmm13: return &XMM_sig(context, 13);
624 0 : case X86Encoding::xmm14: return &XMM_sig(context, 14);
625 0 : case X86Encoding::xmm15: return &XMM_sig(context, 15);
626 0 : default: break;
627 : }
628 0 : MOZ_CRASH();
629 : }
630 :
631 : MOZ_COLD static void*
632 0 : AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
633 : {
634 0 : switch (code) {
635 0 : case X86Encoding::rax: return &RAX_sig(context);
636 0 : case X86Encoding::rcx: return &RCX_sig(context);
637 0 : case X86Encoding::rdx: return &RDX_sig(context);
638 0 : case X86Encoding::rbx: return &RBX_sig(context);
639 0 : case X86Encoding::rsp: return &RSP_sig(context);
640 0 : case X86Encoding::rbp: return &RBP_sig(context);
641 0 : case X86Encoding::rsi: return &RSI_sig(context);
642 0 : case X86Encoding::rdi: return &RDI_sig(context);
643 0 : case X86Encoding::r8: return &R8_sig(context);
644 0 : case X86Encoding::r9: return &R9_sig(context);
645 0 : case X86Encoding::r10: return &R10_sig(context);
646 0 : case X86Encoding::r11: return &R11_sig(context);
647 0 : case X86Encoding::r12: return &R12_sig(context);
648 0 : case X86Encoding::r13: return &R13_sig(context);
649 0 : case X86Encoding::r14: return &R14_sig(context);
650 0 : case X86Encoding::r15: return &R15_sig(context);
651 0 : default: break;
652 : }
653 0 : MOZ_CRASH();
654 : }
655 : # else
656 : MOZ_COLD static void*
657 : AddressOfFPRegisterSlot(EMULATOR_CONTEXT* context, FloatRegisters::Encoding encoding)
658 : {
659 : switch (encoding) {
660 : case X86Encoding::xmm0: return &context->float_.__fpu_xmm0;
661 : case X86Encoding::xmm1: return &context->float_.__fpu_xmm1;
662 : case X86Encoding::xmm2: return &context->float_.__fpu_xmm2;
663 : case X86Encoding::xmm3: return &context->float_.__fpu_xmm3;
664 : case X86Encoding::xmm4: return &context->float_.__fpu_xmm4;
665 : case X86Encoding::xmm5: return &context->float_.__fpu_xmm5;
666 : case X86Encoding::xmm6: return &context->float_.__fpu_xmm6;
667 : case X86Encoding::xmm7: return &context->float_.__fpu_xmm7;
668 : case X86Encoding::xmm8: return &context->float_.__fpu_xmm8;
669 : case X86Encoding::xmm9: return &context->float_.__fpu_xmm9;
670 : case X86Encoding::xmm10: return &context->float_.__fpu_xmm10;
671 : case X86Encoding::xmm11: return &context->float_.__fpu_xmm11;
672 : case X86Encoding::xmm12: return &context->float_.__fpu_xmm12;
673 : case X86Encoding::xmm13: return &context->float_.__fpu_xmm13;
674 : case X86Encoding::xmm14: return &context->float_.__fpu_xmm14;
675 : case X86Encoding::xmm15: return &context->float_.__fpu_xmm15;
676 : default: break;
677 : }
678 : MOZ_CRASH();
679 : }
680 :
681 : MOZ_COLD static void*
682 : AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
683 : {
684 : switch (code) {
685 : case X86Encoding::rax: return &context->thread.__rax;
686 : case X86Encoding::rcx: return &context->thread.__rcx;
687 : case X86Encoding::rdx: return &context->thread.__rdx;
688 : case X86Encoding::rbx: return &context->thread.__rbx;
689 : case X86Encoding::rsp: return &context->thread.__rsp;
690 : case X86Encoding::rbp: return &context->thread.__rbp;
691 : case X86Encoding::rsi: return &context->thread.__rsi;
692 : case X86Encoding::rdi: return &context->thread.__rdi;
693 : case X86Encoding::r8: return &context->thread.__r8;
694 : case X86Encoding::r9: return &context->thread.__r9;
695 : case X86Encoding::r10: return &context->thread.__r10;
696 : case X86Encoding::r11: return &context->thread.__r11;
697 : case X86Encoding::r12: return &context->thread.__r12;
698 : case X86Encoding::r13: return &context->thread.__r13;
699 : case X86Encoding::r14: return &context->thread.__r14;
700 : case X86Encoding::r15: return &context->thread.__r15;
701 : default: break;
702 : }
703 : MOZ_CRASH();
704 : }
705 : # endif // !XP_DARWIN
706 :
707 : MOZ_COLD static void
708 0 : SetRegisterToCoercedUndefined(EMULATOR_CONTEXT* context, size_t size,
709 : const Disassembler::OtherOperand& value)
710 : {
711 0 : if (value.kind() == Disassembler::OtherOperand::FPR)
712 0 : SetFPRegToNaN(size, AddressOfFPRegisterSlot(context, value.fpr()));
713 : else
714 0 : SetGPRegToZero(AddressOfGPRegisterSlot(context, value.gpr()));
715 0 : }
716 :
717 : MOZ_COLD static void
718 0 : SetRegisterToLoadedValue(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
719 : const Disassembler::OtherOperand& value)
720 : {
721 0 : if (value.kind() == Disassembler::OtherOperand::FPR)
722 0 : SetFPRegToLoadedValue(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
723 : else
724 0 : SetGPRegToLoadedValue(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
725 0 : }
726 :
727 : MOZ_COLD static void
728 0 : SetRegisterToLoadedValueSext32(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
729 : const Disassembler::OtherOperand& value)
730 : {
731 0 : SetGPRegToLoadedValueSext32(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
732 0 : }
733 :
734 : MOZ_COLD static void
735 0 : StoreValueFromRegister(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
736 : const Disassembler::OtherOperand& value)
737 : {
738 0 : if (value.kind() == Disassembler::OtherOperand::FPR)
739 0 : StoreValueFromFPReg(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
740 0 : else if (value.kind() == Disassembler::OtherOperand::GPR)
741 0 : StoreValueFromGPReg(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
742 : else
743 0 : StoreValueFromGPImm(addr, size, value.imm());
744 0 : }
745 :
746 : MOZ_COLD static uint8_t*
747 0 : ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddress& address)
748 : {
749 0 : MOZ_RELEASE_ASSERT(!address.isPCRelative(), "PC-relative addresses not supported yet");
750 :
751 0 : uintptr_t result = address.disp();
752 :
753 0 : if (address.hasBase()) {
754 : uintptr_t base;
755 0 : StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
756 0 : AddressOfGPRegisterSlot(context, address.base()));
757 0 : result += base;
758 : }
759 :
760 0 : if (address.hasIndex()) {
761 : uintptr_t index;
762 0 : StoreValueFromGPReg(SharedMem<void*>::unshared(&index), sizeof(uintptr_t),
763 0 : AddressOfGPRegisterSlot(context, address.index()));
764 0 : MOZ_ASSERT(address.scale() < 32, "address shift overflow");
765 0 : result += index * (uintptr_t(1) << address.scale());
766 : }
767 :
768 0 : return reinterpret_cast<uint8_t*>(result);
769 : }
770 :
771 : MOZ_COLD static void
772 0 : HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
773 : const Instance& instance, WasmActivation* activation, uint8_t** ppc)
774 : {
775 0 : MOZ_RELEASE_ASSERT(instance.code().containsFunctionPC(pc));
776 :
777 : const CodeSegment* segment;
778 0 : const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc, &segment);
779 0 : if (!memoryAccess) {
780 : // If there is no associated MemoryAccess for the faulting PC, this must be
781 : // experimental SIMD.js or Atomics. When these are converted to
782 : // non-experimental wasm features, this case, as well as outOfBoundsCode,
783 : // can be removed.
784 0 : activation->startInterrupt(ToRegisterState(context));
785 0 : if (!instance.code().containsCodePC(pc, &segment))
786 0 : MOZ_CRASH("Cannot map PC to trap handler");
787 0 : *ppc = segment->outOfBoundsCode();
788 0 : return;
789 : }
790 :
791 0 : MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - segment->base()));
792 :
793 : // On WASM_HUGE_MEMORY platforms, asm.js code may fault. asm.js does not
794 : // trap on fault and so has no trap out-of-line path. Instead, stores are
795 : // silently ignored (by advancing the pc past the store and resuming) and
796 : // loads silently succeed with a JS-semantics-determined value.
797 :
798 0 : if (memoryAccess->hasTrapOutOfLineCode()) {
799 0 : *ppc = memoryAccess->trapOutOfLineCode(segment->base());
800 0 : return;
801 : }
802 :
803 0 : MOZ_RELEASE_ASSERT(instance.isAsmJS());
804 :
805 : // Disassemble the instruction which caused the trap so that we can extract
806 : // information about it and decide what to do.
807 0 : Disassembler::HeapAccess access;
808 0 : uint8_t* end = Disassembler::DisassembleHeapAccess(pc, &access);
809 0 : const Disassembler::ComplexAddress& address = access.address();
810 0 : MOZ_RELEASE_ASSERT(end > pc);
811 0 : MOZ_RELEASE_ASSERT(segment->containsFunctionPC(end));
812 :
813 : // Check x64 asm.js heap access invariants.
814 0 : MOZ_RELEASE_ASSERT(address.disp() >= 0);
815 0 : MOZ_RELEASE_ASSERT(address.base() == HeapReg.code());
816 0 : MOZ_RELEASE_ASSERT(!address.hasIndex() || address.index() != HeapReg.code());
817 0 : MOZ_RELEASE_ASSERT(address.scale() == 0);
818 0 : if (address.hasBase()) {
819 : uintptr_t base;
820 0 : StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
821 0 : AddressOfGPRegisterSlot(context, address.base()));
822 0 : MOZ_RELEASE_ASSERT(reinterpret_cast<uint8_t*>(base) == instance.memoryBase());
823 : }
824 0 : if (address.hasIndex()) {
825 : uintptr_t index;
826 0 : StoreValueFromGPReg(SharedMem<void*>::unshared(&index), sizeof(uintptr_t),
827 0 : AddressOfGPRegisterSlot(context, address.index()));
828 0 : MOZ_RELEASE_ASSERT(uint32_t(index) == index);
829 : }
830 :
831 : // Determine the actual effective address of the faulting access. We can't
832 : // rely on the faultingAddress given to us by the OS, because we need the
833 : // address of the start of the access, and the OS may sometimes give us an
834 : // address somewhere in the middle of the heap access.
835 0 : uint8_t* accessAddress = ComputeAccessAddress(context, address);
836 0 : MOZ_RELEASE_ASSERT(size_t(faultingAddress - accessAddress) < access.size(),
837 : "Given faulting address does not appear to be within computed "
838 : "faulting address range");
839 0 : MOZ_RELEASE_ASSERT(accessAddress >= instance.memoryBase(),
840 : "Access begins outside the asm.js heap");
841 0 : MOZ_RELEASE_ASSERT(accessAddress + access.size() <= instance.memoryBase() +
842 : instance.memoryMappedSize(),
843 : "Access extends beyond the asm.js heap guard region");
844 0 : MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.memoryBase() +
845 : instance.memoryLength(),
846 : "Computed access address is not actually out of bounds");
847 :
848 : // The basic sandbox model is that all heap accesses are a heap base
849 : // register plus an index, and the index is always computed with 32-bit
850 : // operations, so we know it can only be 4 GiB off of the heap base.
851 : //
852 : // However, we wish to support the optimization of folding immediates
853 : // and scaled indices into addresses, and any address arithmetic we fold
854 : // gets done at full pointer width, so it doesn't get properly wrapped.
855 : // We support this by extending HugeMappedSize to the greatest size that
856 : // could be reached by such an unwrapped address, and then when we arrive
857 : // here in the signal handler for such an access, we compute the fully
858 : // wrapped address, and perform the load or store on it.
859 : //
860 : // Taking a signal is really slow, but in theory programs really shouldn't
861 : // be hitting this anyway.
862 0 : intptr_t unwrappedOffset = accessAddress - instance.memoryBase().unwrap(/* for value */);
863 0 : uint32_t wrappedOffset = uint32_t(unwrappedOffset);
864 0 : size_t size = access.size();
865 0 : MOZ_RELEASE_ASSERT(wrappedOffset + size > wrappedOffset);
866 0 : bool inBounds = wrappedOffset + size < instance.memoryLength();
867 :
868 0 : if (inBounds) {
869 : // We now know that this is an access that is actually in bounds when
870 : // properly wrapped. Complete the load or store with the wrapped
871 : // address.
872 0 : SharedMem<uint8_t*> wrappedAddress = instance.memoryBase() + wrappedOffset;
873 0 : MOZ_RELEASE_ASSERT(wrappedAddress >= instance.memoryBase());
874 0 : MOZ_RELEASE_ASSERT(wrappedAddress + size > wrappedAddress);
875 0 : MOZ_RELEASE_ASSERT(wrappedAddress + size <= instance.memoryBase() + instance.memoryLength());
876 0 : switch (access.kind()) {
877 : case Disassembler::HeapAccess::Load:
878 0 : SetRegisterToLoadedValue(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
879 0 : break;
880 : case Disassembler::HeapAccess::LoadSext32:
881 0 : SetRegisterToLoadedValueSext32(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
882 0 : break;
883 : case Disassembler::HeapAccess::Store:
884 0 : StoreValueFromRegister(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
885 0 : break;
886 : case Disassembler::HeapAccess::LoadSext64:
887 0 : MOZ_CRASH("no int64 accesses in asm.js");
888 : case Disassembler::HeapAccess::Unknown:
889 0 : MOZ_CRASH("Failed to disassemble instruction");
890 : }
891 : } else {
892 : // We now know that this is an out-of-bounds access made by an asm.js
893 : // load/store that we should handle.
894 0 : switch (access.kind()) {
895 : case Disassembler::HeapAccess::Load:
896 : case Disassembler::HeapAccess::LoadSext32:
897 : // Assign the JS-defined result value to the destination register
898 : // (ToInt32(undefined) or ToNumber(undefined), determined by the
899 : // type of the destination register). Very conveniently, we can
900 : // infer the type from the register class, since all SIMD accesses
901 : // throw on out of bounds (see above), so the only types using FP
902 : // registers are float32 and double.
903 0 : SetRegisterToCoercedUndefined(context, access.size(), access.otherOperand());
904 0 : break;
905 : case Disassembler::HeapAccess::Store:
906 : // Do nothing.
907 0 : break;
908 : case Disassembler::HeapAccess::LoadSext64:
909 0 : MOZ_CRASH("no int64 accesses in asm.js");
910 : case Disassembler::HeapAccess::Unknown:
911 0 : MOZ_CRASH("Failed to disassemble instruction");
912 : }
913 : }
914 :
915 0 : *ppc = end;
916 : }
917 :
918 : #else // WASM_HUGE_MEMORY
919 :
920 : MOZ_COLD static void
921 : HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
922 : const Instance& instance, WasmActivation* activation, uint8_t** ppc)
923 : {
924 : MOZ_RELEASE_ASSERT(instance.code().containsFunctionPC(pc));
925 :
926 : const CodeSegment* segment;
927 : const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc, &segment);
928 : if (!memoryAccess) {
929 : // See explanation in the WASM_HUGE_MEMORY HandleMemoryAccess.
930 : activation->startInterrupt(ToRegisterState(context));
931 : if (!instance.code().containsCodePC(pc, &segment))
932 : MOZ_CRASH("Cannot map PC to trap handler");
933 : *ppc = segment->outOfBoundsCode();
934 : return;
935 : }
936 :
937 : MOZ_RELEASE_ASSERT(memoryAccess->hasTrapOutOfLineCode());
938 : *ppc = memoryAccess->trapOutOfLineCode(segment->base());
939 : }
940 :
941 : #endif // WASM_HUGE_MEMORY
942 :
943 : MOZ_COLD static bool
944 0 : IsHeapAccessAddress(const Instance &instance, uint8_t* faultingAddress)
945 : {
946 0 : size_t accessLimit = instance.memoryMappedSize();
947 :
948 0 : return instance.metadata().usesMemory() &&
949 0 : faultingAddress >= instance.memoryBase() &&
950 0 : faultingAddress < instance.memoryBase() + accessLimit;
951 : }
952 :
953 : #if defined(XP_WIN)
954 :
955 : static bool
956 : HandleFault(PEXCEPTION_POINTERS exception)
957 : {
958 : EXCEPTION_RECORD* record = exception->ExceptionRecord;
959 : CONTEXT* context = exception->ContextRecord;
960 :
961 : if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
962 : return false;
963 :
964 : uint8_t** ppc = ContextToPC(context);
965 : uint8_t* pc = *ppc;
966 :
967 : if (record->NumberParameters < 2)
968 : return false;
969 :
970 : // Don't allow recursive handling of signals, see AutoSetHandlingSegFault.
971 : JSContext* cx = TlsContext.get();
972 : if (!cx || cx->handlingSegFault)
973 : return false;
974 : AutoSetHandlingSegFault handling(cx);
975 :
976 : WasmActivation* activation = ActivationIfInnermost(cx);
977 : if (!activation)
978 : return false;
979 :
980 : const CodeSegment* codeSegment;
981 : const Code* code = activation->compartment()->wasm.lookupCode(pc, &codeSegment);
982 : if (!code)
983 : return false;
984 :
985 : if (!codeSegment->containsFunctionPC(pc)) {
986 : // On Windows, it is possible for InterruptRunningJitCode to execute
987 : // between a faulting heap access and the handling of the fault due
988 : // to InterruptRunningJitCode's use of SuspendThread. When this happens,
989 : // after ResumeThread, the exception handler is called with pc equal to
990 : // CodeSegment.interrupt, which is logically wrong. The Right Thing would
991 : // be for the OS to make fault-handling atomic (so that CONTEXT.pc was
992 : // always the logically-faulting pc). Fortunately, we can detect this
993 : // case and silence the exception ourselves (the exception will
994 : // retrigger after the interrupt jumps back to resumePC).
995 :
996 : for (auto t : code->tiers()) {
997 : if (pc == code->segment(t).interruptCode() &&
998 : activation->interrupted() &&
999 : code->segment(t).containsFunctionPC(activation->resumePC()))
1000 : {
1001 : return true;
1002 : }
1003 : }
1004 : return false;
1005 : }
1006 :
1007 : const Instance* instance = LookupFaultingInstance(activation, pc, ContextToFP(context));
1008 : if (!instance)
1009 : return false;
1010 :
1011 : uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(record->ExceptionInformation[1]);
1012 :
1013 : // This check isn't necessary, but, since we can, check anyway to make
1014 : // sure we aren't covering up a real bug.
1015 : if (!IsHeapAccessAddress(*instance, faultingAddress))
1016 : return false;
1017 :
1018 : // Similar to the non-atomic situation above, on Windows, an OOB fault at a
1019 : // PC can trigger *after* an async interrupt observed that PC and attempted
1020 : // to redirect to the async stub. In this unique case, interrupted() is
1021 : // already true when the OOB handler is called. Since the point of the async
1022 : // interrupt is to get out of an iloop and the OOB trap will do just that,
1023 : // we can simply clear the interrupt. (The update to CONTEXT.pc made by
1024 : // HandleMemoryAccess will clobber the interrupt's previous update.)
1025 : if (activation->interrupted()) {
1026 : MOZ_ASSERT(activation->resumePC() == pc);
1027 : activation->finishInterrupt();
1028 : }
1029 :
1030 : HandleMemoryAccess(context, pc, faultingAddress, *instance, activation, ppc);
1031 : return true;
1032 : }
1033 :
1034 : static LONG WINAPI
1035 : WasmFaultHandler(LPEXCEPTION_POINTERS exception)
1036 : {
1037 : if (HandleFault(exception))
1038 : return EXCEPTION_CONTINUE_EXECUTION;
1039 :
1040 : // No need to worry about calling other handlers, the OS does this for us.
1041 : return EXCEPTION_CONTINUE_SEARCH;
1042 : }
1043 :
1044 : #elif defined(XP_DARWIN)
1045 : # include <mach/exc.h>
1046 :
1047 : // This definition was generated by mig (the Mach Interface Generator) for the
1048 : // routine 'exception_raise' (exc.defs).
1049 : #pragma pack(4)
1050 : typedef struct {
1051 : mach_msg_header_t Head;
1052 : /* start of the kernel processed data */
1053 : mach_msg_body_t msgh_body;
1054 : mach_msg_port_descriptor_t thread;
1055 : mach_msg_port_descriptor_t task;
1056 : /* end of the kernel processed data */
1057 : NDR_record_t NDR;
1058 : exception_type_t exception;
1059 : mach_msg_type_number_t codeCnt;
1060 : int64_t code[2];
1061 : } Request__mach_exception_raise_t;
1062 : #pragma pack()
1063 :
1064 : // The full Mach message also includes a trailer.
1065 : struct ExceptionRequest
1066 : {
1067 : Request__mach_exception_raise_t body;
1068 : mach_msg_trailer_t trailer;
1069 : };
1070 :
1071 : static bool
1072 : HandleMachException(JSContext* cx, const ExceptionRequest& request)
1073 : {
1074 : // Don't allow recursive handling of signals, see AutoSetHandlingSegFault.
1075 : if (cx->handlingSegFault)
1076 : return false;
1077 : AutoSetHandlingSegFault handling(cx);
1078 :
1079 : // Get the port of the JSContext's thread from the message.
1080 : mach_port_t cxThread = request.body.thread.name;
1081 :
1082 : // Read out the JSRuntime thread's register state.
1083 : EMULATOR_CONTEXT context;
1084 : # if defined(__x86_64__)
1085 : unsigned int thread_state_count = x86_THREAD_STATE64_COUNT;
1086 : unsigned int float_state_count = x86_FLOAT_STATE64_COUNT;
1087 : int thread_state = x86_THREAD_STATE64;
1088 : int float_state = x86_FLOAT_STATE64;
1089 : # elif defined(__i386__)
1090 : unsigned int thread_state_count = x86_THREAD_STATE_COUNT;
1091 : unsigned int float_state_count = x86_FLOAT_STATE_COUNT;
1092 : int thread_state = x86_THREAD_STATE;
1093 : int float_state = x86_FLOAT_STATE;
1094 : # elif defined(__arm__)
1095 : unsigned int thread_state_count = ARM_THREAD_STATE_COUNT;
1096 : unsigned int float_state_count = ARM_NEON_STATE_COUNT;
1097 : int thread_state = ARM_THREAD_STATE;
1098 : int float_state = ARM_NEON_STATE;
1099 : # else
1100 : # error Unsupported architecture
1101 : # endif
1102 : kern_return_t kret;
1103 : kret = thread_get_state(cxThread, thread_state,
1104 : (thread_state_t)&context.thread, &thread_state_count);
1105 : if (kret != KERN_SUCCESS)
1106 : return false;
1107 : kret = thread_get_state(cxThread, float_state,
1108 : (thread_state_t)&context.float_, &float_state_count);
1109 : if (kret != KERN_SUCCESS)
1110 : return false;
1111 :
1112 : uint8_t** ppc = ContextToPC(&context);
1113 : uint8_t* pc = *ppc;
1114 :
1115 : if (request.body.exception != EXC_BAD_ACCESS || request.body.codeCnt != 2)
1116 : return false;
1117 :
1118 : // The faulting thread is suspended so we can access cx fields that can
1119 : // normally only be accessed by the cx's active thread.
1120 : AutoNoteSingleThreadedRegion anstr;
1121 :
1122 : WasmActivation* activation = ActivationIfInnermost(cx);
1123 : if (!activation)
1124 : return false;
1125 :
1126 : const Instance* instance = LookupFaultingInstance(activation, pc, ContextToFP(&context));
1127 : if (!instance || !instance->code().containsFunctionPC(pc))
1128 : return false;
1129 :
1130 : uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(request.body.code[1]);
1131 :
1132 : // This check isn't necessary, but, since we can, check anyway to make
1133 : // sure we aren't covering up a real bug.
1134 : if (!IsHeapAccessAddress(*instance, faultingAddress))
1135 : return false;
1136 :
1137 : HandleMemoryAccess(&context, pc, faultingAddress, *instance, activation, ppc);
1138 :
1139 : // Update the thread state with the new pc and register values.
1140 : kret = thread_set_state(cxThread, float_state, (thread_state_t)&context.float_, float_state_count);
1141 : if (kret != KERN_SUCCESS)
1142 : return false;
1143 : kret = thread_set_state(cxThread, thread_state, (thread_state_t)&context.thread, thread_state_count);
1144 : if (kret != KERN_SUCCESS)
1145 : return false;
1146 :
1147 : return true;
1148 : }
1149 :
1150 : // Taken from mach_exc in /usr/include/mach/mach_exc.defs.
1151 : static const mach_msg_id_t sExceptionId = 2405;
1152 :
1153 : // The choice of id here is arbitrary, the only constraint is that sQuitId != sExceptionId.
1154 : static const mach_msg_id_t sQuitId = 42;
1155 :
1156 : static void
1157 : MachExceptionHandlerThread(JSContext* cx)
1158 : {
1159 : mach_port_t port = cx->wasmMachExceptionHandler.port();
1160 : kern_return_t kret;
1161 :
1162 : while(true) {
1163 : ExceptionRequest request;
1164 : kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
1165 : port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1166 :
1167 : // If we fail even receiving the message, we can't even send a reply!
1168 : // Rather than hanging the faulting thread (hanging the browser), crash.
1169 : if (kret != KERN_SUCCESS) {
1170 : fprintf(stderr, "MachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret);
1171 : MOZ_CRASH();
1172 : }
1173 :
1174 : // There are only two messages we should be receiving: an exception
1175 : // message that occurs when the runtime's thread faults and the quit
1176 : // message sent when the runtime is shutting down.
1177 : if (request.body.Head.msgh_id == sQuitId)
1178 : break;
1179 : if (request.body.Head.msgh_id != sExceptionId) {
1180 : fprintf(stderr, "Unexpected msg header id %d\n", (int)request.body.Head.msgh_bits);
1181 : MOZ_CRASH();
1182 : }
1183 :
1184 : // Some thread just commited an EXC_BAD_ACCESS and has been suspended by
1185 : // the kernel. The kernel is waiting for us to reply with instructions.
1186 : // Our default is the "not handled" reply (by setting the RetCode field
1187 : // of the reply to KERN_FAILURE) which tells the kernel to continue
1188 : // searching at the process and system level. If this is an asm.js
1189 : // expected exception, we handle it and return KERN_SUCCESS.
1190 : bool handled = HandleMachException(cx, request);
1191 : kern_return_t replyCode = handled ? KERN_SUCCESS : KERN_FAILURE;
1192 :
1193 : // This magic incantation to send a reply back to the kernel was derived
1194 : // from the exc_server generated by 'mig -v /usr/include/mach/mach_exc.defs'.
1195 : __Reply__exception_raise_t reply;
1196 : reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
1197 : reply.Head.msgh_size = sizeof(reply);
1198 : reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
1199 : reply.Head.msgh_local_port = MACH_PORT_NULL;
1200 : reply.Head.msgh_id = request.body.Head.msgh_id + 100;
1201 : reply.NDR = NDR_record;
1202 : reply.RetCode = replyCode;
1203 : mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
1204 : MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1205 : }
1206 : }
1207 :
1208 : MachExceptionHandler::MachExceptionHandler()
1209 : : installed_(false),
1210 : thread_(),
1211 : port_(MACH_PORT_NULL)
1212 : {}
1213 :
1214 : void
1215 : MachExceptionHandler::uninstall()
1216 : {
1217 : if (installed_) {
1218 : thread_port_t thread = mach_thread_self();
1219 : kern_return_t kret = thread_set_exception_ports(thread,
1220 : EXC_MASK_BAD_ACCESS,
1221 : MACH_PORT_NULL,
1222 : EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
1223 : THREAD_STATE_NONE);
1224 : mach_port_deallocate(mach_task_self(), thread);
1225 : if (kret != KERN_SUCCESS)
1226 : MOZ_CRASH();
1227 : installed_ = false;
1228 : }
1229 : if (thread_.joinable()) {
1230 : // Break the handler thread out of the mach_msg loop.
1231 : mach_msg_header_t msg;
1232 : msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
1233 : msg.msgh_size = sizeof(msg);
1234 : msg.msgh_remote_port = port_;
1235 : msg.msgh_local_port = MACH_PORT_NULL;
1236 : msg.msgh_reserved = 0;
1237 : msg.msgh_id = sQuitId;
1238 : kern_return_t kret = mach_msg(&msg, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL,
1239 : MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1240 : if (kret != KERN_SUCCESS) {
1241 : fprintf(stderr, "MachExceptionHandler: failed to send quit message: %d\n", (int)kret);
1242 : MOZ_CRASH();
1243 : }
1244 :
1245 : // Wait for the handler thread to complete before deallocating the port.
1246 : thread_.join();
1247 : }
1248 : if (port_ != MACH_PORT_NULL) {
1249 : DebugOnly<kern_return_t> kret = mach_port_destroy(mach_task_self(), port_);
1250 : MOZ_ASSERT(kret == KERN_SUCCESS);
1251 : port_ = MACH_PORT_NULL;
1252 : }
1253 : }
1254 :
1255 : bool
1256 : MachExceptionHandler::install(JSContext* cx)
1257 : {
1258 : MOZ_ASSERT(!installed());
1259 : kern_return_t kret;
1260 : mach_port_t thread;
1261 :
1262 : auto onFailure = mozilla::MakeScopeExit([&] {
1263 : uninstall();
1264 : });
1265 :
1266 : // Get a port which can send and receive data.
1267 : kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port_);
1268 : if (kret != KERN_SUCCESS)
1269 : return false;
1270 : kret = mach_port_insert_right(mach_task_self(), port_, port_, MACH_MSG_TYPE_MAKE_SEND);
1271 : if (kret != KERN_SUCCESS)
1272 : return false;
1273 :
1274 : // Create a thread to block on reading port_.
1275 : if (!thread_.init(MachExceptionHandlerThread, cx))
1276 : return false;
1277 :
1278 : // Direct exceptions on this thread to port_ (and thus our handler thread).
1279 : // Note: we are totally clobbering any existing *thread* exception ports and
1280 : // not even attempting to forward. Breakpad and gdb both use the *process*
1281 : // exception ports which are only called if the thread doesn't handle the
1282 : // exception, so we should be fine.
1283 : thread = mach_thread_self();
1284 : kret = thread_set_exception_ports(thread,
1285 : EXC_MASK_BAD_ACCESS,
1286 : port_,
1287 : EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
1288 : THREAD_STATE_NONE);
1289 : mach_port_deallocate(mach_task_self(), thread);
1290 : if (kret != KERN_SUCCESS)
1291 : return false;
1292 :
1293 : installed_ = true;
1294 : onFailure.release();
1295 : return true;
1296 : }
1297 :
1298 : #else // If not Windows or Mac, assume Unix
1299 :
1300 : enum class Signal {
1301 : SegFault,
1302 : BusError
1303 : };
1304 :
1305 : // Be very cautious and default to not handling; we don't want to accidentally
1306 : // silence real crashes from real bugs.
1307 : template<Signal signal>
1308 : static bool
1309 0 : HandleFault(int signum, siginfo_t* info, void* ctx)
1310 : {
1311 : // The signals we're expecting come from access violations, accessing
1312 : // mprotected memory. If the signal originates anywhere else, don't try
1313 : // to handle it.
1314 : if (signal == Signal::SegFault)
1315 0 : MOZ_RELEASE_ASSERT(signum == SIGSEGV);
1316 : else
1317 : MOZ_RELEASE_ASSERT(signum == SIGBUS);
1318 :
1319 0 : CONTEXT* context = (CONTEXT*)ctx;
1320 0 : uint8_t** ppc = ContextToPC(context);
1321 0 : uint8_t* pc = *ppc;
1322 :
1323 : // Don't allow recursive handling of signals, see AutoSetHandlingSegFault.
1324 0 : JSContext* cx = TlsContext.get();
1325 0 : if (!cx || cx->handlingSegFault)
1326 0 : return false;
1327 0 : AutoSetHandlingSegFault handling(cx);
1328 :
1329 0 : WasmActivation* activation = ActivationIfInnermost(cx);
1330 0 : if (!activation)
1331 0 : return false;
1332 :
1333 : const CodeSegment* segment;
1334 0 : const Instance* instance = LookupFaultingInstance(activation, pc, ContextToFP(context));
1335 0 : if (!instance || !instance->code().containsFunctionPC(pc, &segment))
1336 0 : return false;
1337 :
1338 0 : uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(info->si_addr);
1339 :
1340 : // Although it's not strictly necessary, to make sure we're not covering up
1341 : // any real bugs, check that the faulting address is indeed in the
1342 : // instance's memory.
1343 0 : if (!faultingAddress) {
1344 : // On some Linux systems, the kernel apparently sometimes "gives up" and
1345 : // passes a null faultingAddress with si_code set to SI_KERNEL.
1346 : // This is observed on some automation machines for some out-of-bounds
1347 : // atomic accesses on x86/64.
1348 : #ifdef SI_KERNEL
1349 0 : if (info->si_code != SI_KERNEL)
1350 0 : return false;
1351 : #else
1352 : return false;
1353 : #endif
1354 : } else {
1355 0 : if (!IsHeapAccessAddress(*instance, faultingAddress))
1356 0 : return false;
1357 : }
1358 :
1359 : #ifdef JS_CODEGEN_ARM
1360 : if (signal == Signal::BusError) {
1361 : // TODO: We may see a bus error for something that is an unaligned access that
1362 : // partly overlaps the end of the heap. In this case, it is an out-of-bounds
1363 : // error and we should signal that properly, but to do so we must inspect
1364 : // the operand of the failed access.
1365 : activation->startInterrupt(ToRegisterState(context));
1366 : *ppc = segment->unalignedAccessCode();
1367 : return true;
1368 : }
1369 : #endif
1370 :
1371 0 : HandleMemoryAccess(context, pc, faultingAddress, *instance, activation, ppc);
1372 0 : return true;
1373 : }
1374 :
1375 : static struct sigaction sPrevSEGVHandler;
1376 : static struct sigaction sPrevSIGBUSHandler;
1377 :
1378 : template<Signal signal>
1379 : static void
1380 0 : WasmFaultHandler(int signum, siginfo_t* info, void* context)
1381 : {
1382 0 : if (HandleFault<signal>(signum, info, context))
1383 0 : return;
1384 :
1385 : struct sigaction* previousSignal = signum == SIGSEGV
1386 : ? &sPrevSEGVHandler
1387 0 : : &sPrevSIGBUSHandler;
1388 :
1389 : // This signal is not for any asm.js code we expect, so we need to forward
1390 : // the signal to the next handler. If there is no next handler (SIG_IGN or
1391 : // SIG_DFL), then it's time to crash. To do this, we set the signal back to
1392 : // its original disposition and return. This will cause the faulting op to
1393 : // be re-executed which will crash in the normal way. The advantage of
1394 : // doing this to calling _exit() is that we remove ourselves from the crash
1395 : // stack which improves crash reports. If there is a next handler, call it.
1396 : // It will either crash synchronously, fix up the instruction so that
1397 : // execution can continue and return, or trigger a crash by returning the
1398 : // signal to it's original disposition and returning.
1399 : //
1400 : // Note: the order of these tests matter.
1401 0 : if (previousSignal->sa_flags & SA_SIGINFO)
1402 0 : previousSignal->sa_sigaction(signum, info, context);
1403 0 : else if (previousSignal->sa_handler == SIG_DFL || previousSignal->sa_handler == SIG_IGN)
1404 0 : sigaction(signum, previousSignal, nullptr);
1405 : else
1406 0 : previousSignal->sa_handler(signum);
1407 : }
1408 : # endif // XP_WIN || XP_DARWIN || assume unix
1409 :
1410 : static void
1411 2 : RedirectIonBackedgesToInterruptCheck(JSContext* cx)
1412 : {
1413 2 : if (!cx->runtime()->hasJitRuntime())
1414 0 : return;
1415 2 : jit::JitRuntime* jitRuntime = cx->runtime()->jitRuntime();
1416 2 : Zone* zone = cx->zoneRaw();
1417 2 : if (zone && !zone->isAtomsZone()) {
1418 : // If the backedge list is being mutated, the pc must be in C++ code and
1419 : // thus not in a JIT iloop. We assume that the interrupt flag will be
1420 : // checked at least once before entering JIT code (if not, no big deal;
1421 : // the browser will just request another interrupt in a second).
1422 0 : if (!jitRuntime->preventBackedgePatching()) {
1423 0 : jit::JitZoneGroup* jzg = zone->group()->jitZoneGroup;
1424 0 : jzg->patchIonBackedges(cx, jit::JitZoneGroup::BackedgeInterruptCheck);
1425 : }
1426 : }
1427 : }
1428 :
1429 : bool
1430 2 : wasm::InInterruptibleCode(JSContext* cx, uint8_t* pc, const CodeSegment** cs)
1431 : {
1432 : // Only interrupt in function code so that the frame iterators have the
1433 : // invariant that resumePC always has a function CodeRange and we can't
1434 : // get into any weird interrupt-during-interrupt-stub cases.
1435 2 : if (!cx->compartment())
1436 2 : return false;
1437 :
1438 0 : const Code* code = cx->compartment()->wasm.lookupCode(pc, cs);
1439 0 : return code && (*cs)->containsFunctionPC(pc);
1440 : }
1441 :
1442 : // The return value indicates whether the PC was changed, not whether there was
1443 : // a failure.
1444 : static bool
1445 2 : RedirectJitCodeToInterruptCheck(JSContext* cx, CONTEXT* context)
1446 : {
1447 : // Jitcode may only be modified on the runtime's active thread.
1448 2 : if (cx != cx->runtime()->activeContext())
1449 0 : return false;
1450 :
1451 : // The faulting thread is suspended so we can access cx fields that can
1452 : // normally only be accessed by the cx's active thread.
1453 4 : AutoNoteSingleThreadedRegion anstr;
1454 :
1455 2 : RedirectIonBackedgesToInterruptCheck(cx);
1456 :
1457 : #ifdef JS_SIMULATOR
1458 : uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
1459 : #else
1460 2 : uint8_t* pc = *ContextToPC(context);
1461 : #endif
1462 :
1463 2 : const CodeSegment* codeSegment = nullptr;
1464 2 : if (!InInterruptibleCode(cx, pc, &codeSegment))
1465 2 : return false;
1466 :
1467 : // Only probe cx->activation() via ActivationIfInnermost after we know the
1468 : // pc is in wasm code. This way we don't depend on signal-safe update of
1469 : // cx->activation().
1470 0 : WasmActivation* activation = ActivationIfInnermost(cx);
1471 0 : MOZ_ASSERT(activation);
1472 :
1473 : #ifdef JS_SIMULATOR
1474 : // The checks performed by the !JS_SIMULATOR path happen in
1475 : // Simulator::handleWasmInterrupt.
1476 : cx->simulator()->trigger_wasm_interrupt();
1477 : #else
1478 : // fp may be null when first entering wasm code from an entry stub.
1479 0 : uint8_t* fp = ContextToFP(context);
1480 0 : if (!fp)
1481 0 : return false;
1482 :
1483 : // The out-of-bounds/unaligned trap paths which call startInterrupt() go
1484 : // through function code, so test if already interrupted. These paths are
1485 : // temporary though, so this case can be removed later.
1486 0 : if (activation->interrupted())
1487 0 : return false;
1488 :
1489 0 : activation->startInterrupt(ToRegisterState(context));
1490 0 : *ContextToPC(context) = codeSegment->interruptCode();
1491 : #endif
1492 :
1493 0 : return true;
1494 : }
1495 :
1496 : #if !defined(XP_WIN)
1497 : // For the interrupt signal, pick a signal number that:
1498 : // - is not otherwise used by mozilla or standard libraries
1499 : // - defaults to nostop and noprint on gdb/lldb so that noone is bothered
1500 : // SIGVTALRM a relative of SIGALRM, so intended for user code, but, unlike
1501 : // SIGALRM, not used anywhere else in Mozilla.
1502 : static const int sInterruptSignal = SIGVTALRM;
1503 :
1504 : static void
1505 2 : JitInterruptHandler(int signum, siginfo_t* info, void* context)
1506 : {
1507 2 : if (JSContext* cx = TlsContext.get()) {
1508 :
1509 : #if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
1510 : SimulatorProcess::ICacheCheckingDisableCount++;
1511 : #endif
1512 :
1513 2 : RedirectJitCodeToInterruptCheck(cx, (CONTEXT*)context);
1514 :
1515 : #if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
1516 : SimulatorProcess::cacheInvalidatedBySignalHandler_ = true;
1517 : SimulatorProcess::ICacheCheckingDisableCount--;
1518 : #endif
1519 :
1520 2 : cx->finishHandlingJitInterrupt();
1521 : }
1522 2 : }
1523 : #endif
1524 :
1525 : static bool sTriedInstallSignalHandlers = false;
1526 : static bool sHaveSignalHandlers = false;
1527 :
1528 : static bool
1529 4 : ProcessHasSignalHandlers()
1530 : {
1531 : // We assume that there are no races creating the first JSRuntime of the process.
1532 4 : if (sTriedInstallSignalHandlers)
1533 1 : return sHaveSignalHandlers;
1534 3 : sTriedInstallSignalHandlers = true;
1535 :
1536 : #if defined(ANDROID)
1537 : # if !defined(__aarch64__)
1538 : // Before Android 4.4 (SDK version 19), there is a bug
1539 : // https://android-review.googlesource.com/#/c/52333
1540 : // in Bionic's pthread_join which causes pthread_join to return early when
1541 : // pthread_kill is used (on any thread). Nobody expects the pthread_cond_wait
1542 : // EINTRquisition.
1543 : char version_string[PROP_VALUE_MAX];
1544 : PodArrayZero(version_string);
1545 : if (__system_property_get("ro.build.version.sdk", version_string) > 0) {
1546 : if (atol(version_string) < 19)
1547 : return false;
1548 : }
1549 : # endif
1550 : # if defined(MOZ_LINKER)
1551 : // Signal handling is broken on some android systems.
1552 : if (IsSignalHandlingBroken())
1553 : return false;
1554 : # endif
1555 : #endif
1556 :
1557 : // The interrupt handler allows the active thread to be paused from another
1558 : // thread (see InterruptRunningJitCode).
1559 : #if defined(XP_WIN)
1560 : // Windows uses SuspendThread to stop the active thread from another thread.
1561 : #else
1562 : struct sigaction interruptHandler;
1563 3 : interruptHandler.sa_flags = SA_SIGINFO;
1564 3 : interruptHandler.sa_sigaction = &JitInterruptHandler;
1565 3 : sigemptyset(&interruptHandler.sa_mask);
1566 : struct sigaction prev;
1567 3 : if (sigaction(sInterruptSignal, &interruptHandler, &prev))
1568 0 : MOZ_CRASH("unable to install interrupt handler");
1569 :
1570 : // There shouldn't be any other handlers installed for sInterruptSignal. If
1571 : // there are, we could always forward, but we need to understand what we're
1572 : // doing to avoid problematic interference.
1573 6 : if ((prev.sa_flags & SA_SIGINFO && prev.sa_sigaction) ||
1574 3 : (prev.sa_handler != SIG_DFL && prev.sa_handler != SIG_IGN))
1575 : {
1576 0 : MOZ_CRASH("contention for interrupt signal");
1577 : }
1578 : #endif // defined(XP_WIN)
1579 :
1580 : // Install a SIGSEGV handler to handle safely-out-of-bounds asm.js heap
1581 : // access and/or unaligned accesses.
1582 : # if defined(XP_WIN)
1583 : # if defined(MOZ_ASAN)
1584 : // Under ASan we need to let the ASan runtime's ShadowExceptionHandler stay
1585 : // in the first handler position. This requires some coordination with
1586 : // MemoryProtectionExceptionHandler::isDisabled().
1587 : const bool firstHandler = false;
1588 : # else
1589 : // Otherwise, WasmFaultHandler needs to go first, so that we can recover
1590 : // from wasm faults and continue execution without triggering handlers
1591 : // such as MemoryProtectionExceptionHandler that assume we are crashing.
1592 : const bool firstHandler = true;
1593 : # endif
1594 : if (!AddVectoredExceptionHandler(firstHandler, WasmFaultHandler))
1595 : return false;
1596 : # elif defined(XP_DARWIN)
1597 : // OSX handles seg faults via the Mach exception handler above, so don't
1598 : // install WasmFaultHandler.
1599 : # else
1600 : // SA_NODEFER allows us to reenter the signal handler if we crash while
1601 : // handling the signal, and fall through to the Breakpad handler by testing
1602 : // handlingSegFault.
1603 :
1604 : // Allow handling OOB with signals on all architectures
1605 : struct sigaction faultHandler;
1606 3 : faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER;
1607 3 : faultHandler.sa_sigaction = WasmFaultHandler<Signal::SegFault>;
1608 3 : sigemptyset(&faultHandler.sa_mask);
1609 3 : if (sigaction(SIGSEGV, &faultHandler, &sPrevSEGVHandler))
1610 0 : MOZ_CRASH("unable to install segv handler");
1611 :
1612 : # if defined(JS_CODEGEN_ARM)
1613 : // On Arm Handle Unaligned Accesses
1614 : struct sigaction busHandler;
1615 : busHandler.sa_flags = SA_SIGINFO | SA_NODEFER;
1616 : busHandler.sa_sigaction = WasmFaultHandler<Signal::BusError>;
1617 : sigemptyset(&busHandler.sa_mask);
1618 : if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler))
1619 : MOZ_CRASH("unable to install sigbus handler");
1620 : # endif
1621 : # endif
1622 :
1623 3 : sHaveSignalHandlers = true;
1624 3 : return true;
1625 : }
1626 :
1627 : bool
1628 4 : wasm::EnsureSignalHandlers(JSContext* cx)
1629 : {
1630 : // Nothing to do if the platform doesn't support it.
1631 4 : if (!ProcessHasSignalHandlers())
1632 0 : return true;
1633 :
1634 : #if defined(XP_DARWIN)
1635 : // On OSX, each JSContext which runs wasm gets its own handler thread.
1636 : if (!cx->wasmMachExceptionHandler.installed() && !cx->wasmMachExceptionHandler.install(cx))
1637 : return false;
1638 : #endif
1639 :
1640 4 : return true;
1641 : }
1642 :
1643 : bool
1644 19 : wasm::HaveSignalHandlers()
1645 : {
1646 19 : MOZ_ASSERT(sTriedInstallSignalHandlers);
1647 19 : return sHaveSignalHandlers;
1648 : }
1649 :
1650 : // JSRuntime::requestInterrupt sets interrupt_ (which is checked frequently by
1651 : // C++ code at every Baseline JIT loop backedge) and jitStackLimit_ (which is
1652 : // checked at every Baseline and Ion JIT function prologue). The remaining
1653 : // sources of potential iloops (Ion loop backedges and all wasm code) are
1654 : // handled by this function:
1655 : // 1. Ion loop backedges are patched to instead point to a stub that handles
1656 : // the interrupt;
1657 : // 2. if the active thread's pc is inside wasm code, the pc is updated to point
1658 : // to a stub that handles the interrupt.
1659 : void
1660 2 : js::InterruptRunningJitCode(JSContext* cx)
1661 : {
1662 : // If signal handlers weren't installed, then Ion and wasm emit normal
1663 : // interrupt checks and don't need asynchronous interruption.
1664 2 : if (!HaveSignalHandlers())
1665 0 : return;
1666 :
1667 : // Do nothing if we're already handling an interrupt here, to avoid races
1668 : // below and in JitRuntime::patchIonBackedges.
1669 2 : if (!cx->startHandlingJitInterrupt())
1670 0 : return;
1671 :
1672 : // If we are on context's thread, then: pc is not in wasm code (so nothing
1673 : // to do for wasm) and we can patch Ion backedges without any special
1674 : // synchronization.
1675 2 : if (cx == TlsContext.get()) {
1676 0 : RedirectIonBackedgesToInterruptCheck(cx);
1677 0 : cx->finishHandlingJitInterrupt();
1678 0 : return;
1679 : }
1680 :
1681 : // We are not on the runtime's active thread, so to do 1 and 2 above, we need
1682 : // to halt the runtime's active thread first.
1683 : #if defined(XP_WIN)
1684 : // On Windows, we can simply suspend the active thread and work directly on
1685 : // its context from this thread. SuspendThread can sporadically fail if the
1686 : // thread is in the middle of a syscall. Rather than retrying in a loop,
1687 : // just wait for the next request for interrupt.
1688 : HANDLE thread = (HANDLE)cx->threadNative();
1689 : if (SuspendThread(thread) != (DWORD)-1) {
1690 : CONTEXT context;
1691 : context.ContextFlags = CONTEXT_FULL;
1692 : if (GetThreadContext(thread, &context)) {
1693 : if (RedirectJitCodeToInterruptCheck(cx, &context))
1694 : SetThreadContext(thread, &context);
1695 : }
1696 : ResumeThread(thread);
1697 : }
1698 : cx->finishHandlingJitInterrupt();
1699 : #else
1700 : // On Unix, we instead deliver an async signal to the active thread which
1701 : // halts the thread and callers our JitInterruptHandler (which has already
1702 : // been installed by EnsureSignalHandlersInstalled).
1703 2 : pthread_t thread = (pthread_t)cx->threadNative();
1704 2 : pthread_kill(thread, sInterruptSignal);
1705 : #endif
1706 : }
|