Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef jit_x64_BaseAssembler_x64_h
8 : #define jit_x64_BaseAssembler_x64_h
9 :
10 : #include "jit/x86-shared/BaseAssembler-x86-shared.h"
11 :
12 : namespace js {
13 : namespace jit {
14 :
15 : namespace X86Encoding {
16 :
17 9006 : class BaseAssemblerX64 : public BaseAssembler
18 : {
19 : public:
20 :
21 : // Arithmetic operations:
22 :
23 497 : void addq_rr(RegisterID src, RegisterID dst)
24 : {
25 497 : spew("addq %s, %s", GPReg64Name(src), GPReg64Name(dst));
26 497 : m_formatter.oneByteOp64(OP_ADD_GvEv, src, dst);
27 497 : }
28 :
29 22 : void addq_mr(int32_t offset, RegisterID base, RegisterID dst)
30 : {
31 22 : spew("addq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
32 22 : m_formatter.oneByteOp64(OP_ADD_GvEv, offset, base, dst);
33 22 : }
34 :
35 0 : void addq_mr(const void* addr, RegisterID dst)
36 : {
37 0 : spew("addq %p, %s", addr, GPReg64Name(dst));
38 0 : m_formatter.oneByteOp64(OP_ADD_GvEv, addr, dst);
39 0 : }
40 :
41 35224 : void addq_ir(int32_t imm, RegisterID dst)
42 : {
43 35224 : spew("addq $%d, %s", imm, GPReg64Name(dst));
44 35224 : if (CAN_SIGN_EXTEND_8_32(imm)) {
45 24176 : m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_ADD);
46 24176 : m_formatter.immediate8s(imm);
47 : } else {
48 11048 : if (dst == rax)
49 0 : m_formatter.oneByteOp64(OP_ADD_EAXIv);
50 : else
51 11048 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
52 11048 : m_formatter.immediate32(imm);
53 : }
54 35223 : }
55 :
56 0 : void addq_i32r(int32_t imm, RegisterID dst)
57 : {
58 : // 32-bit immediate always, for patching.
59 0 : spew("addq $0x%04x, %s", imm, GPReg64Name(dst));
60 0 : if (dst == rax)
61 0 : m_formatter.oneByteOp64(OP_ADD_EAXIv);
62 : else
63 0 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
64 0 : m_formatter.immediate32(imm);
65 0 : }
66 :
67 7508 : void addq_im(int32_t imm, int32_t offset, RegisterID base)
68 : {
69 7508 : spew("addq $%d, " MEM_ob, imm, ADDR_ob(offset, base));
70 7508 : if (CAN_SIGN_EXTEND_8_32(imm)) {
71 7508 : m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_ADD);
72 7508 : m_formatter.immediate8s(imm);
73 : } else {
74 0 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_ADD);
75 0 : m_formatter.immediate32(imm);
76 : }
77 7508 : }
78 :
79 0 : void addq_im(int32_t imm, const void* addr)
80 : {
81 0 : spew("addq $%d, %p", imm, addr);
82 0 : if (CAN_SIGN_EXTEND_8_32(imm)) {
83 0 : m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_ADD);
84 0 : m_formatter.immediate8s(imm);
85 : } else {
86 0 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_ADD);
87 0 : m_formatter.immediate32(imm);
88 : }
89 0 : }
90 :
91 1485 : void andq_rr(RegisterID src, RegisterID dst)
92 : {
93 1485 : spew("andq %s, %s", GPReg64Name(src), GPReg64Name(dst));
94 1485 : m_formatter.oneByteOp64(OP_AND_GvEv, src, dst);
95 1485 : }
96 :
97 162 : void andq_mr(int32_t offset, RegisterID base, RegisterID dst)
98 : {
99 162 : spew("andq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
100 162 : m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, dst);
101 162 : }
102 :
103 1 : void andq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
104 : {
105 1 : spew("andq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
106 1 : m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, index, scale, dst);
107 1 : }
108 :
109 0 : void andq_mr(const void* addr, RegisterID dst)
110 : {
111 0 : spew("andq %p, %s", addr, GPReg64Name(dst));
112 0 : m_formatter.oneByteOp64(OP_AND_GvEv, addr, dst);
113 0 : }
114 :
115 0 : void orq_mr(int32_t offset, RegisterID base, RegisterID dst)
116 : {
117 0 : spew("orq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
118 0 : m_formatter.oneByteOp64(OP_OR_GvEv, offset, base, dst);
119 0 : }
120 :
121 0 : void orq_mr(const void* addr, RegisterID dst)
122 : {
123 0 : spew("orq %p, %s", addr, GPReg64Name(dst));
124 0 : m_formatter.oneByteOp64(OP_OR_GvEv, addr, dst);
125 0 : }
126 :
127 0 : void xorq_mr(int32_t offset, RegisterID base, RegisterID dst)
128 : {
129 0 : spew("xorq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
130 0 : m_formatter.oneByteOp64(OP_XOR_GvEv, offset, base, dst);
131 0 : }
132 :
133 0 : void xorq_mr(const void* addr, RegisterID dst)
134 : {
135 0 : spew("xorq %p, %s", addr, GPReg64Name(dst));
136 0 : m_formatter.oneByteOp64(OP_XOR_GvEv, addr, dst);
137 0 : }
138 :
139 0 : void bsrq_rr(RegisterID src, RegisterID dst)
140 : {
141 0 : spew("bsrq %s, %s", GPReg64Name(src), GPReg64Name(dst));
142 0 : m_formatter.twoByteOp64(OP2_BSR_GvEv, src, dst);
143 0 : }
144 :
145 0 : void bsfq_rr(RegisterID src, RegisterID dst)
146 : {
147 0 : spew("bsfq %s, %s", GPReg64Name(src), GPReg64Name(dst));
148 0 : m_formatter.twoByteOp64(OP2_BSF_GvEv, src, dst);
149 0 : }
150 :
151 0 : void popcntq_rr(RegisterID src, RegisterID dst)
152 : {
153 0 : spew("popcntq %s, %s", GPReg64Name(src), GPReg64Name(dst));
154 0 : m_formatter.legacySSEPrefix(VEX_SS);
155 0 : m_formatter.twoByteOp64(OP2_POPCNT_GvEv, src, dst);
156 0 : }
157 :
158 14369 : void andq_ir(int32_t imm, RegisterID dst)
159 : {
160 14369 : spew("andq $0x%" PRIx64 ", %s", int64_t(imm), GPReg64Name(dst));
161 14369 : if (CAN_SIGN_EXTEND_8_32(imm)) {
162 14369 : m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_AND);
163 14369 : m_formatter.immediate8s(imm);
164 : } else {
165 0 : if (dst == rax)
166 0 : m_formatter.oneByteOp64(OP_AND_EAXIv);
167 : else
168 0 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_AND);
169 0 : m_formatter.immediate32(imm);
170 : }
171 14369 : }
172 :
173 0 : void negq_r(RegisterID dst)
174 : {
175 0 : spew("negq %s", GPReg64Name(dst));
176 0 : m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NEG);
177 0 : }
178 :
179 1306 : void orq_rr(RegisterID src, RegisterID dst)
180 : {
181 1306 : spew("orq %s, %s", GPReg64Name(src), GPReg64Name(dst));
182 1306 : m_formatter.oneByteOp64(OP_OR_GvEv, src, dst);
183 1306 : }
184 :
185 2084 : void orq_ir(int32_t imm, RegisterID dst)
186 : {
187 2084 : spew("orq $0x%" PRIx64 ", %s", int64_t(imm), GPReg64Name(dst));
188 2084 : if (CAN_SIGN_EXTEND_8_32(imm)) {
189 1650 : m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_OR);
190 1650 : m_formatter.immediate8s(imm);
191 : } else {
192 434 : if (dst == rax)
193 0 : m_formatter.oneByteOp64(OP_OR_EAXIv);
194 : else
195 434 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_OR);
196 434 : m_formatter.immediate32(imm);
197 : }
198 2084 : }
199 :
200 : void notq_r(RegisterID dst)
201 : {
202 : spew("notq %s", GPReg64Name(dst));
203 : m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NOT);
204 : }
205 :
206 1700 : void subq_rr(RegisterID src, RegisterID dst)
207 : {
208 1700 : spew("subq %s, %s", GPReg64Name(src), GPReg64Name(dst));
209 1700 : m_formatter.oneByteOp64(OP_SUB_GvEv, src, dst);
210 1700 : }
211 :
212 : void subq_rm(RegisterID src, int32_t offset, RegisterID base)
213 : {
214 : spew("subq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
215 : m_formatter.oneByteOp64(OP_SUB_EvGv, offset, base, src);
216 : }
217 :
218 91 : void subq_mr(int32_t offset, RegisterID base, RegisterID dst)
219 : {
220 91 : spew("subq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
221 91 : m_formatter.oneByteOp64(OP_SUB_GvEv, offset, base, dst);
222 91 : }
223 :
224 0 : void subq_mr(const void* addr, RegisterID dst)
225 : {
226 0 : spew("subq %p, %s", addr, GPReg64Name(dst));
227 0 : m_formatter.oneByteOp64(OP_SUB_GvEv, addr, dst);
228 0 : }
229 :
230 28836 : void subq_ir(int32_t imm, RegisterID dst)
231 : {
232 28836 : spew("subq $%d, %s", imm, GPReg64Name(dst));
233 28836 : if (CAN_SIGN_EXTEND_8_32(imm)) {
234 17783 : m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_SUB);
235 17783 : m_formatter.immediate8s(imm);
236 : } else {
237 11053 : if (dst == rax)
238 0 : m_formatter.oneByteOp64(OP_SUB_EAXIv);
239 : else
240 11053 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_SUB);
241 11053 : m_formatter.immediate32(imm);
242 : }
243 28836 : }
244 :
245 0 : void xorq_rr(RegisterID src, RegisterID dst)
246 : {
247 0 : spew("xorq %s, %s", GPReg64Name(src), GPReg64Name(dst));
248 0 : m_formatter.oneByteOp64(OP_XOR_GvEv, src, dst);
249 0 : }
250 :
251 473 : void xorq_ir(int32_t imm, RegisterID dst)
252 : {
253 473 : spew("xorq $0x%" PRIx64 ", %s", int64_t(imm), GPReg64Name(dst));
254 473 : if (CAN_SIGN_EXTEND_8_32(imm)) {
255 473 : m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_XOR);
256 473 : m_formatter.immediate8s(imm);
257 : } else {
258 0 : if (dst == rax)
259 0 : m_formatter.oneByteOp64(OP_XOR_EAXIv);
260 : else
261 0 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_XOR);
262 0 : m_formatter.immediate32(imm);
263 : }
264 473 : }
265 :
266 0 : void sarq_CLr(RegisterID dst)
267 : {
268 0 : spew("sarq %%cl, %s", GPReg64Name(dst));
269 0 : m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SAR);
270 0 : }
271 :
272 0 : void shlq_CLr(RegisterID dst)
273 : {
274 0 : spew("shlq %%cl, %s", GPReg64Name(dst));
275 0 : m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHL);
276 0 : }
277 :
278 0 : void shrq_CLr(RegisterID dst)
279 : {
280 0 : spew("shrq %%cl, %s", GPReg64Name(dst));
281 0 : m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHR);
282 0 : }
283 :
284 17 : void sarq_ir(int32_t imm, RegisterID dst)
285 : {
286 17 : MOZ_ASSERT(imm < 64);
287 17 : spew("sarq $%d, %s", imm, GPReg64Name(dst));
288 17 : if (imm == 1)
289 17 : m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SAR);
290 : else {
291 0 : m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SAR);
292 0 : m_formatter.immediate8u(imm);
293 : }
294 17 : }
295 :
296 1668 : void shlq_ir(int32_t imm, RegisterID dst)
297 : {
298 1668 : MOZ_ASSERT(imm < 64);
299 1668 : spew("shlq $%d, %s", imm, GPReg64Name(dst));
300 1668 : if (imm == 1)
301 22 : m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHL);
302 : else {
303 1646 : m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHL);
304 1646 : m_formatter.immediate8u(imm);
305 : }
306 1668 : }
307 :
308 5828 : void shrq_ir(int32_t imm, RegisterID dst)
309 : {
310 5828 : MOZ_ASSERT(imm < 64);
311 5828 : spew("shrq $%d, %s", imm, GPReg64Name(dst));
312 5828 : if (imm == 1)
313 0 : m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHR);
314 : else {
315 5828 : m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHR);
316 5828 : m_formatter.immediate8u(imm);
317 : }
318 5828 : }
319 :
320 0 : void rolq_ir(int32_t imm, RegisterID dst)
321 : {
322 0 : MOZ_ASSERT(imm < 64);
323 0 : spew("rolq $%d, %s", imm, GPReg64Name(dst));
324 0 : if (imm == 1)
325 0 : m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROL);
326 : else {
327 0 : m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROL);
328 0 : m_formatter.immediate8u(imm);
329 : }
330 0 : }
331 0 : void rolq_CLr(RegisterID dst)
332 : {
333 0 : spew("rolq %%cl, %s", GPReg64Name(dst));
334 0 : m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROL);
335 0 : }
336 :
337 0 : void rorq_ir(int32_t imm, RegisterID dst)
338 : {
339 0 : MOZ_ASSERT(imm < 64);
340 0 : spew("rorq $%d, %s", imm, GPReg64Name(dst));
341 0 : if (imm == 1)
342 0 : m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROR);
343 : else {
344 0 : m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROR);
345 0 : m_formatter.immediate8u(imm);
346 : }
347 0 : }
348 0 : void rorq_CLr(RegisterID dst)
349 : {
350 0 : spew("rorq %%cl, %s", GPReg64Name(dst));
351 0 : m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROR);
352 0 : }
353 :
354 0 : void imulq_rr(RegisterID src, RegisterID dst)
355 : {
356 0 : spew("imulq %s, %s", GPReg64Name(src), GPReg64Name(dst));
357 0 : m_formatter.twoByteOp64(OP2_IMUL_GvEv, src, dst);
358 0 : }
359 :
360 0 : void imulq_mr(int32_t offset, RegisterID base, RegisterID dst)
361 : {
362 0 : spew("imulq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
363 0 : m_formatter.twoByteOp64(OP2_IMUL_GvEv, offset, base, dst);
364 0 : }
365 :
366 0 : void cqo()
367 : {
368 0 : spew("cqo ");
369 0 : m_formatter.oneByteOp64(OP_CDQ);
370 0 : }
371 :
372 0 : void idivq_r(RegisterID divisor)
373 : {
374 0 : spew("idivq %s", GPReg64Name(divisor));
375 0 : m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_IDIV);
376 0 : }
377 :
378 0 : void divq_r(RegisterID divisor)
379 : {
380 0 : spew("divq %s", GPReg64Name(divisor));
381 0 : m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_DIV);
382 0 : }
383 :
384 : // Comparisons:
385 :
386 530 : void cmpq_rr(RegisterID rhs, RegisterID lhs)
387 : {
388 530 : spew("cmpq %s, %s", GPReg64Name(rhs), GPReg64Name(lhs));
389 530 : m_formatter.oneByteOp64(OP_CMP_GvEv, rhs, lhs);
390 530 : }
391 :
392 1829 : void cmpq_rm(RegisterID rhs, int32_t offset, RegisterID base)
393 : {
394 1829 : spew("cmpq %s, " MEM_ob, GPReg64Name(rhs), ADDR_ob(offset, base));
395 1829 : m_formatter.oneByteOp64(OP_CMP_EvGv, offset, base, rhs);
396 1829 : }
397 :
398 0 : void cmpq_mr(int32_t offset, RegisterID base, RegisterID lhs)
399 : {
400 0 : spew("cmpq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(lhs));
401 0 : m_formatter.oneByteOp64(OP_CMP_GvEv, offset, base, lhs);
402 0 : }
403 :
404 1694 : void cmpq_ir(int32_t rhs, RegisterID lhs)
405 : {
406 1694 : if (rhs == 0) {
407 857 : testq_rr(lhs, lhs);
408 857 : return;
409 : }
410 :
411 837 : spew("cmpq $0x%" PRIx64 ", %s", int64_t(rhs), GPReg64Name(lhs));
412 837 : if (CAN_SIGN_EXTEND_8_32(rhs)) {
413 836 : m_formatter.oneByteOp64(OP_GROUP1_EvIb, lhs, GROUP1_OP_CMP);
414 836 : m_formatter.immediate8s(rhs);
415 : } else {
416 1 : if (lhs == rax)
417 0 : m_formatter.oneByteOp64(OP_CMP_EAXIv);
418 : else
419 1 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP);
420 1 : m_formatter.immediate32(rhs);
421 : }
422 : }
423 :
424 1047 : void cmpq_im(int32_t rhs, int32_t offset, RegisterID base)
425 : {
426 1047 : spew("cmpq $0x%" PRIx64 ", " MEM_ob, int64_t(rhs), ADDR_ob(offset, base));
427 1047 : if (CAN_SIGN_EXTEND_8_32(rhs)) {
428 1047 : m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_CMP);
429 1047 : m_formatter.immediate8s(rhs);
430 : } else {
431 0 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
432 0 : m_formatter.immediate32(rhs);
433 : }
434 1047 : }
435 :
436 7 : void cmpq_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
437 : {
438 7 : spew("cmpq $0x%x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
439 7 : if (CAN_SIGN_EXTEND_8_32(rhs)) {
440 7 : m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_CMP);
441 7 : m_formatter.immediate8s(rhs);
442 : } else {
443 0 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_CMP);
444 0 : m_formatter.immediate32(rhs);
445 : }
446 7 : }
447 0 : void cmpq_im(int32_t rhs, const void* addr)
448 : {
449 0 : spew("cmpq $0x%" PRIx64 ", %p", int64_t(rhs), addr);
450 0 : if (CAN_SIGN_EXTEND_8_32(rhs)) {
451 0 : m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_CMP);
452 0 : m_formatter.immediate8s(rhs);
453 : } else {
454 0 : m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
455 0 : m_formatter.immediate32(rhs);
456 : }
457 0 : }
458 0 : void cmpq_rm(RegisterID rhs, const void* addr)
459 : {
460 0 : spew("cmpq %s, %p", GPReg64Name(rhs), addr);
461 0 : m_formatter.oneByteOp64(OP_CMP_EvGv, addr, rhs);
462 0 : }
463 :
464 1214 : void testq_rr(RegisterID rhs, RegisterID lhs)
465 : {
466 1214 : spew("testq %s, %s", GPReg64Name(rhs), GPReg64Name(lhs));
467 1214 : m_formatter.oneByteOp64(OP_TEST_EvGv, lhs, rhs);
468 1214 : }
469 :
470 13390 : void testq_ir(int32_t rhs, RegisterID lhs)
471 : {
472 : // If the mask fits in a 32-bit immediate, we can use testl with a
473 : // 32-bit subreg.
474 13390 : if (CAN_ZERO_EXTEND_32_64(rhs)) {
475 13390 : testl_ir(rhs, lhs);
476 13390 : return;
477 : }
478 0 : spew("testq $0x%" PRIx64 ", %s", int64_t(rhs), GPReg64Name(lhs));
479 0 : if (lhs == rax)
480 0 : m_formatter.oneByteOp64(OP_TEST_EAXIv);
481 : else
482 0 : m_formatter.oneByteOp64(OP_GROUP3_EvIz, lhs, GROUP3_OP_TEST);
483 0 : m_formatter.immediate32(rhs);
484 : }
485 :
486 57 : void testq_i32m(int32_t rhs, int32_t offset, RegisterID base)
487 : {
488 57 : spew("testq $0x%" PRIx64 ", " MEM_ob, int64_t(rhs), ADDR_ob(offset, base));
489 57 : m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, GROUP3_OP_TEST);
490 57 : m_formatter.immediate32(rhs);
491 57 : }
492 :
493 : void testq_i32m(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
494 : {
495 : spew("testq $0x%4x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
496 : m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, index, scale, GROUP3_OP_TEST);
497 : m_formatter.immediate32(rhs);
498 : }
499 :
500 : // Various move ops:
501 :
502 0 : void cmovzq_rr(RegisterID src, RegisterID dst)
503 : {
504 0 : spew("cmovz %s, %s", GPReg16Name(src), GPReg32Name(dst));
505 0 : m_formatter.twoByteOp64(OP2_CMOVZ_GvEv, src, dst);
506 0 : }
507 0 : void cmovzq_mr(int32_t offset, RegisterID base, RegisterID dst)
508 : {
509 0 : spew("cmovz " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
510 0 : m_formatter.twoByteOp64(OP2_CMOVZ_GvEv, offset, base, dst);
511 0 : }
512 0 : void cmovzq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
513 : {
514 0 : spew("cmovz " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg32Name(dst));
515 0 : m_formatter.twoByteOp64(OP2_CMOVZ_GvEv, offset, base, index, scale, dst);
516 0 : }
517 :
518 5 : void xchgq_rr(RegisterID src, RegisterID dst)
519 : {
520 5 : spew("xchgq %s, %s", GPReg64Name(src), GPReg64Name(dst));
521 5 : m_formatter.oneByteOp64(OP_XCHG_GvEv, src, dst);
522 5 : }
523 : void xchgq_rm(RegisterID src, int32_t offset, RegisterID base)
524 : {
525 : spew("xchgq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
526 : m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, src);
527 : }
528 : void xchgq_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
529 : {
530 : spew("xchgq %s, " MEM_obs, GPReg64Name(src), ADDR_obs(offset, base, index, scale));
531 : m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, index, scale, src);
532 : }
533 :
534 49962 : void movq_rr(RegisterID src, RegisterID dst)
535 : {
536 49962 : spew("movq %s, %s", GPReg64Name(src), GPReg64Name(dst));
537 49962 : m_formatter.oneByteOp64(OP_MOV_GvEv, src, dst);
538 49962 : }
539 :
540 9252 : void movq_rm(RegisterID src, int32_t offset, RegisterID base)
541 : {
542 9252 : spew("movq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
543 9252 : m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, src);
544 9252 : }
545 :
546 : void movq_rm_disp32(RegisterID src, int32_t offset, RegisterID base)
547 : {
548 : spew("movq %s, " MEM_o32b, GPReg64Name(src), ADDR_o32b(offset, base));
549 : m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, offset, base, src);
550 : }
551 :
552 65 : void movq_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, int scale)
553 : {
554 65 : spew("movq %s, " MEM_obs, GPReg64Name(src), ADDR_obs(offset, base, index, scale));
555 65 : m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, index, scale, src);
556 65 : }
557 :
558 0 : void movq_rm(RegisterID src, const void* addr)
559 : {
560 0 : if (src == rax && !IsAddressImmediate(addr)) {
561 0 : movq_EAXm(addr);
562 0 : return;
563 : }
564 :
565 0 : spew("movq %s, %p", GPReg64Name(src), addr);
566 0 : m_formatter.oneByteOp64(OP_MOV_EvGv, addr, src);
567 : }
568 :
569 0 : void movq_mEAX(const void* addr)
570 : {
571 0 : if (IsAddressImmediate(addr)) {
572 0 : movq_mr(addr, rax);
573 0 : return;
574 : }
575 :
576 0 : spew("movq %p, %%rax", addr);
577 0 : m_formatter.oneByteOp64(OP_MOV_EAXOv);
578 0 : m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
579 : }
580 :
581 0 : void movq_EAXm(const void* addr)
582 : {
583 0 : if (IsAddressImmediate(addr)) {
584 0 : movq_rm(rax, addr);
585 0 : return;
586 : }
587 :
588 0 : spew("movq %%rax, %p", addr);
589 0 : m_formatter.oneByteOp64(OP_MOV_OvEAX);
590 0 : m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
591 : }
592 :
593 51409 : void movq_mr(int32_t offset, RegisterID base, RegisterID dst)
594 : {
595 51409 : spew("movq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
596 51409 : m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, dst);
597 51409 : }
598 :
599 : void movq_mr_disp32(int32_t offset, RegisterID base, RegisterID dst)
600 : {
601 : spew("movq " MEM_o32b ", %s", ADDR_o32b(offset, base), GPReg64Name(dst));
602 : m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, offset, base, dst);
603 : }
604 :
605 344 : void movq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
606 : {
607 344 : spew("movq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
608 344 : m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, index, scale, dst);
609 344 : }
610 :
611 0 : void movq_mr(const void* addr, RegisterID dst)
612 : {
613 0 : if (dst == rax && !IsAddressImmediate(addr)) {
614 0 : movq_mEAX(addr);
615 0 : return;
616 : }
617 :
618 0 : spew("movq %p, %s", addr, GPReg64Name(dst));
619 0 : m_formatter.oneByteOp64(OP_MOV_GvEv, addr, dst);
620 : }
621 :
622 118 : void leaq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
623 : {
624 118 : spew("leaq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
625 118 : m_formatter.oneByteOp64(OP_LEA, offset, base, index, scale, dst);
626 118 : }
627 :
628 1381 : void movq_i32m(int32_t imm, int32_t offset, RegisterID base)
629 : {
630 1381 : spew("movq $%d, " MEM_ob, imm, ADDR_ob(offset, base));
631 1381 : m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, GROUP11_MOV);
632 1381 : m_formatter.immediate32(imm);
633 1381 : }
634 0 : void movq_i32m(int32_t imm, int32_t offset, RegisterID base, RegisterID index, int scale)
635 : {
636 0 : spew("movq $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
637 0 : m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, index, scale, GROUP11_MOV);
638 0 : m_formatter.immediate32(imm);
639 0 : }
640 0 : void movq_i32m(int32_t imm, const void* addr)
641 : {
642 0 : spew("movq $%d, %p", imm, addr);
643 0 : m_formatter.oneByteOp64(OP_GROUP11_EvIz, addr, GROUP11_MOV);
644 0 : m_formatter.immediate32(imm);
645 0 : }
646 :
647 : // Note that this instruction sign-extends its 32-bit immediate field to 64
648 : // bits and loads the 64-bit value into a 64-bit register.
649 : //
650 : // Note also that this is similar to the movl_i32r instruction, except that
651 : // movl_i32r *zero*-extends its 32-bit immediate, and it has smaller code
652 : // size, so it's preferred for values which could use either.
653 0 : void movq_i32r(int32_t imm, RegisterID dst)
654 : {
655 0 : spew("movq $%d, %s", imm, GPRegName(dst));
656 0 : m_formatter.oneByteOp64(OP_GROUP11_EvIz, dst, GROUP11_MOV);
657 0 : m_formatter.immediate32(imm);
658 0 : }
659 :
660 62253 : void movq_i64r(int64_t imm, RegisterID dst)
661 : {
662 62253 : spew("movabsq $0x%" PRIx64 ", %s", imm, GPReg64Name(dst));
663 62253 : m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
664 62251 : m_formatter.immediate64(imm);
665 62247 : }
666 :
667 0 : void movsbq_mr(int32_t offset, RegisterID base, RegisterID dst)
668 : {
669 0 : spew("movsbq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
670 0 : m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, dst);
671 0 : }
672 0 : void movsbq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
673 : {
674 0 : spew("movsbq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
675 0 : m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, index, scale, dst);
676 0 : }
677 :
678 0 : void movswq_mr(int32_t offset, RegisterID base, RegisterID dst)
679 : {
680 0 : spew("movswq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
681 0 : m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, dst);
682 0 : }
683 0 : void movswq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
684 : {
685 0 : spew("movswq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
686 0 : m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, index, scale, dst);
687 0 : }
688 :
689 0 : void movslq_rr(RegisterID src, RegisterID dst)
690 : {
691 0 : spew("movslq %s, %s", GPReg32Name(src), GPReg64Name(dst));
692 0 : m_formatter.oneByteOp64(OP_MOVSXD_GvEv, src, dst);
693 0 : }
694 0 : void movslq_mr(int32_t offset, RegisterID base, RegisterID dst)
695 : {
696 0 : spew("movslq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
697 0 : m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, dst);
698 0 : }
699 0 : void movslq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
700 : {
701 0 : spew("movslq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
702 0 : m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, index, scale, dst);
703 0 : }
704 :
705 : MOZ_MUST_USE JmpSrc
706 : movl_ripr(RegisterID dst)
707 : {
708 : m_formatter.oneByteRipOp(OP_MOV_GvEv, 0, (RegisterID)dst);
709 : JmpSrc label(m_formatter.size());
710 : spew("movl " MEM_o32r ", %s", ADDR_o32r(label.offset()), GPReg32Name(dst));
711 : return label;
712 : }
713 :
714 : MOZ_MUST_USE JmpSrc
715 : movl_rrip(RegisterID src)
716 : {
717 : m_formatter.oneByteRipOp(OP_MOV_EvGv, 0, (RegisterID)src);
718 : JmpSrc label(m_formatter.size());
719 : spew("movl %s, " MEM_o32r "", GPReg32Name(src), ADDR_o32r(label.offset()));
720 : return label;
721 : }
722 :
723 : MOZ_MUST_USE JmpSrc
724 : movq_ripr(RegisterID dst)
725 : {
726 : m_formatter.oneByteRipOp64(OP_MOV_GvEv, 0, dst);
727 : JmpSrc label(m_formatter.size());
728 : spew("movq " MEM_o32r ", %s", ADDR_o32r(label.offset()), GPRegName(dst));
729 : return label;
730 : }
731 :
732 : MOZ_MUST_USE JmpSrc
733 : movq_rrip(RegisterID src)
734 : {
735 : m_formatter.oneByteRipOp64(OP_MOV_EvGv, 0, (RegisterID)src);
736 : JmpSrc label(m_formatter.size());
737 : spew("movq %s, " MEM_o32r "", GPRegName(src), ADDR_o32r(label.offset()));
738 : return label;
739 : }
740 :
741 3087 : void leaq_mr(int32_t offset, RegisterID base, RegisterID dst)
742 : {
743 3087 : spew("leaq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
744 3087 : m_formatter.oneByteOp64(OP_LEA, offset, base, dst);
745 3087 : }
746 :
747 : MOZ_MUST_USE JmpSrc
748 : leaq_rip(RegisterID dst)
749 : {
750 : m_formatter.oneByteRipOp64(OP_LEA, 0, dst);
751 : JmpSrc label(m_formatter.size());
752 : spew("leaq " MEM_o32r ", %s", ADDR_o32r(label.offset()), GPRegName(dst));
753 : return label;
754 : }
755 :
756 : // Flow control:
757 :
758 19127 : void jmp_rip(int ripOffset)
759 : {
760 : // rip-relative addressing.
761 19127 : spew("jmp *%d(%%rip)", ripOffset);
762 19127 : m_formatter.oneByteRipOp(OP_GROUP5_Ev, ripOffset, GROUP5_OP_JMPN);
763 19127 : }
764 :
765 19127 : void immediate64(int64_t imm)
766 : {
767 19127 : spew(".quad %lld", (long long)imm);
768 19127 : m_formatter.immediate64(imm);
769 19127 : }
770 :
771 : // SSE operations:
772 :
773 0 : void vcvtsq2sd_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
774 : {
775 0 : twoByteOpInt64Simd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, src1, src0, dst);
776 0 : }
777 0 : void vcvtsq2ss_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst)
778 : {
779 0 : twoByteOpInt64Simd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, src1, src0, dst);
780 0 : }
781 :
782 : void vcvtsi2sdq_rr(RegisterID src, XMMRegisterID dst)
783 : {
784 : twoByteOpInt64Simd("vcvtsi2sdq", VEX_SD, OP2_CVTSI2SD_VsdEd, src, invalid_xmm, dst);
785 : }
786 :
787 0 : void vcvttsd2sq_rr(XMMRegisterID src, RegisterID dst)
788 : {
789 0 : twoByteOpSimdInt64("vcvttsd2si", VEX_SD, OP2_CVTTSD2SI_GdWsd, src, dst);
790 0 : }
791 :
792 0 : void vcvttss2sq_rr(XMMRegisterID src, RegisterID dst)
793 : {
794 0 : twoByteOpSimdInt64("vcvttss2si", VEX_SS, OP2_CVTTSD2SI_GdWsd, src, dst);
795 0 : }
796 :
797 9 : void vmovq_rr(XMMRegisterID src, RegisterID dst)
798 : {
799 : // While this is called "vmovq", it actually uses the vmovd encoding
800 : // with a REX prefix modifying it to be 64-bit.
801 9 : twoByteOpSimdInt64("vmovq", VEX_PD, OP2_MOVD_EdVd, (XMMRegisterID)dst, (RegisterID)src);
802 9 : }
803 :
804 52 : void vmovq_rr(RegisterID src, XMMRegisterID dst)
805 : {
806 : // While this is called "vmovq", it actually uses the vmovd encoding
807 : // with a REX prefix modifying it to be 64-bit.
808 52 : twoByteOpInt64Simd("vmovq", VEX_PD, OP2_MOVD_VdEd, src, invalid_xmm, dst);
809 52 : }
810 :
811 : MOZ_MUST_USE JmpSrc
812 0 : vmovsd_ripr(XMMRegisterID dst)
813 : {
814 0 : return twoByteRipOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, invalid_xmm, dst);
815 : }
816 : MOZ_MUST_USE JmpSrc
817 0 : vmovss_ripr(XMMRegisterID dst)
818 : {
819 0 : return twoByteRipOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, invalid_xmm, dst);
820 : }
821 : MOZ_MUST_USE JmpSrc
822 : vmovsd_rrip(XMMRegisterID src)
823 : {
824 : return twoByteRipOpSimd("vmovsd", VEX_SD, OP2_MOVSD_WsdVsd, invalid_xmm, src);
825 : }
826 : MOZ_MUST_USE JmpSrc
827 : vmovss_rrip(XMMRegisterID src)
828 : {
829 : return twoByteRipOpSimd("vmovss", VEX_SS, OP2_MOVSD_WsdVsd, invalid_xmm, src);
830 : }
831 : MOZ_MUST_USE JmpSrc
832 : vmovdqa_rrip(XMMRegisterID src)
833 : {
834 : return twoByteRipOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_WdqVdq, invalid_xmm, src);
835 : }
836 : MOZ_MUST_USE JmpSrc
837 : vmovaps_rrip(XMMRegisterID src)
838 : {
839 : return twoByteRipOpSimd("vmovdqa", VEX_PS, OP2_MOVAPS_WsdVsd, invalid_xmm, src);
840 : }
841 :
842 : MOZ_MUST_USE JmpSrc
843 0 : vmovaps_ripr(XMMRegisterID dst)
844 : {
845 0 : return twoByteRipOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, invalid_xmm, dst);
846 : }
847 :
848 : MOZ_MUST_USE JmpSrc
849 0 : vmovdqa_ripr(XMMRegisterID dst)
850 : {
851 0 : return twoByteRipOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, invalid_xmm, dst);
852 : }
853 :
854 : private:
855 :
856 : MOZ_MUST_USE JmpSrc
857 0 : twoByteRipOpSimd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
858 : XMMRegisterID src0, XMMRegisterID dst)
859 : {
860 0 : if (useLegacySSEEncoding(src0, dst)) {
861 0 : m_formatter.legacySSEPrefix(ty);
862 0 : m_formatter.twoByteRipOp(opcode, 0, dst);
863 0 : JmpSrc label(m_formatter.size());
864 0 : if (IsXMMReversedOperands(opcode))
865 0 : spew("%-11s%s, " MEM_o32r "", legacySSEOpName(name), XMMRegName(dst), ADDR_o32r(label.offset()));
866 : else
867 0 : spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name), ADDR_o32r(label.offset()), XMMRegName(dst));
868 0 : return label;
869 : }
870 :
871 0 : m_formatter.twoByteRipOpVex(ty, opcode, 0, src0, dst);
872 0 : JmpSrc label(m_formatter.size());
873 0 : if (src0 == invalid_xmm) {
874 0 : if (IsXMMReversedOperands(opcode))
875 0 : spew("%-11s%s, " MEM_o32r "", name, XMMRegName(dst), ADDR_o32r(label.offset()));
876 : else
877 0 : spew("%-11s" MEM_o32r ", %s", name, ADDR_o32r(label.offset()), XMMRegName(dst));
878 : } else {
879 0 : spew("%-11s" MEM_o32r ", %s, %s", name, ADDR_o32r(label.offset()), XMMRegName(src0), XMMRegName(dst));
880 : }
881 0 : return label;
882 : }
883 :
884 52 : void twoByteOpInt64Simd(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
885 : RegisterID rm, XMMRegisterID src0, XMMRegisterID dst)
886 : {
887 52 : if (useLegacySSEEncoding(src0, dst)) {
888 52 : if (IsXMMReversedOperands(opcode))
889 0 : spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(dst), GPRegName(rm));
890 : else
891 52 : spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(rm), XMMRegName(dst));
892 52 : m_formatter.legacySSEPrefix(ty);
893 52 : m_formatter.twoByteOp64(opcode, rm, dst);
894 52 : return;
895 : }
896 :
897 0 : if (src0 == invalid_xmm) {
898 0 : if (IsXMMReversedOperands(opcode))
899 0 : spew("%-11s%s, %s", name, XMMRegName(dst), GPRegName(rm));
900 : else
901 0 : spew("%-11s%s, %s", name, GPRegName(rm), XMMRegName(dst));
902 : } else {
903 0 : spew("%-11s%s, %s, %s", name, GPRegName(rm), XMMRegName(src0), XMMRegName(dst));
904 : }
905 0 : m_formatter.twoByteOpVex64(ty, opcode, rm, src0, dst);
906 : }
907 :
908 9 : void twoByteOpSimdInt64(const char* name, VexOperandType ty, TwoByteOpcodeID opcode,
909 : XMMRegisterID rm, RegisterID dst)
910 : {
911 9 : if (useLegacySSEEncodingForOtherOutput()) {
912 9 : if (IsXMMReversedOperands(opcode))
913 0 : spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(dst), XMMRegName(rm));
914 9 : else if (opcode == OP2_MOVD_EdVd)
915 9 : spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName((XMMRegisterID)dst), GPRegName((RegisterID)rm));
916 : else
917 0 : spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm), GPRegName(dst));
918 9 : m_formatter.legacySSEPrefix(ty);
919 9 : m_formatter.twoByteOp64(opcode, (RegisterID)rm, dst);
920 9 : return;
921 : }
922 :
923 0 : if (IsXMMReversedOperands(opcode))
924 0 : spew("%-11s%s, %s", name, GPRegName(dst), XMMRegName(rm));
925 0 : else if (opcode == OP2_MOVD_EdVd)
926 0 : spew("%-11s%s, %s", name, XMMRegName((XMMRegisterID)dst), GPRegName((RegisterID)rm));
927 : else
928 0 : spew("%-11s%s, %s", name, XMMRegName(rm), GPRegName(dst));
929 0 : m_formatter.twoByteOpVex64(ty, opcode, (RegisterID)rm, invalid_xmm, (XMMRegisterID)dst);
930 : }
931 : };
932 :
933 : typedef BaseAssemblerX64 BaseAssemblerSpecific;
934 :
935 : } // namespace X86Encoding
936 :
937 : } // namespace jit
938 : } // namespace js
939 :
940 : #endif /* jit_x64_BaseAssembler_x64_h */
|