Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : /* For overall documentation, see jit/AtomicOperations.h */
8 :
9 : #ifndef jit_shared_AtomicOperations_x86_shared_h
10 : #define jit_shared_AtomicOperations_x86_shared_h
11 :
12 : #include "mozilla/Assertions.h"
13 : #include "mozilla/Types.h"
14 :
15 : // Lock-freedom on x86 and x64:
16 : //
17 : // On x86 and x64 there are atomic instructions for 8-byte accesses:
18 : //
19 : // Load and stores:
20 : // - Loads and stores are single-copy atomic for up to 8 bytes
21 : // starting with the Pentium; the store requires a post-fence for
22 : // sequential consistency
23 : //
24 : // CompareExchange:
25 : // - On x64 CMPXCHGQ can always be used
26 : // - On x86 CMPXCHG8B can be used starting with the first Pentium
27 : //
28 : // Exchange:
29 : // - On x64 XCHGQ can always be used
30 : // - On x86 one has to use a CompareExchange loop
31 : //
32 : // Observe also that the JIT will not be enabled unless we have SSE2,
33 : // which was introduced with the Pentium 4. Ergo the JIT will be able
34 : // to use atomic instructions for up to 8 bytes on all x86 platforms
35 : // for the primitives we care about.
36 : //
37 : // However, C++ compilers and libraries may not provide access to
38 : // those 8-byte instructions directly. Clang in 32-bit mode does not
39 : // provide 8-byte atomic primitives at all (even with eg -arch i686
40 : // specified). On Windows 32-bit, MSVC does not provide
41 : // _InterlockedExchange64 since it does not map directly to an
42 : // instruction.
43 : //
44 : // There are thus sundry workarounds below to handle known corner
45 : // cases.
46 :
47 : #if defined(__clang__) || defined(__GNUC__)
48 :
49 : // The default implementation tactic for gcc/clang is to use the newer
50 : // __atomic intrinsics added for use in C++11 <atomic>. Where that
51 : // isn't available, we use GCC's older __sync functions instead.
52 : //
53 : // ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
54 : // compatible option for older compilers: enable this to use GCC's old
55 : // __sync functions instead of the newer __atomic functions. This
56 : // will be required for GCC 4.6.x and earlier, and probably for Clang
57 : // 3.1, should we need to use those versions.
58 :
59 : // #define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
60 :
61 : // Lock-free 8-byte atomics are assumed on x86 but must be disabled in
62 : // corner cases, see comments below and in isLockfree8().
63 :
64 : # define LOCKFREE8
65 :
66 : // This pertains to Clang compiling with -m32, in this case the 64-bit
67 : // __atomic builtins are not available (observed on various Mac OS X
68 : // versions with Apple Clang and on Linux with Clang 3.5).
69 : //
70 : // For now just punt: disable lock-free 8-word data. The JIT will
71 : // call isLockfree8() to determine what to do and will stay in sync.
72 : // (Bug 1146817 tracks the work to improve on this.)
73 :
74 : # if defined(__clang__) && defined(__i386)
75 : # undef LOCKFREE8
76 : # endif
77 :
78 : inline bool
79 : js::jit::AtomicOperations::isLockfree8()
80 : {
81 : # ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
82 : MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
83 : MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
84 : MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
85 : # endif
86 : # ifdef LOCKFREE8
87 : # ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
88 : MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
89 : # endif
90 : return true;
91 : # else
92 : return false;
93 : # endif
94 : }
95 :
96 : inline void
97 : js::jit::AtomicOperations::fenceSeqCst()
98 : {
99 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
100 : __sync_synchronize();
101 : # else
102 : __atomic_thread_fence(__ATOMIC_SEQ_CST);
103 : # endif
104 : }
105 :
106 : template<typename T>
107 : inline T
108 0 : js::jit::AtomicOperations::loadSeqCst(T* addr)
109 : {
110 0 : MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
111 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
112 : // Inhibit compiler reordering with a volatile load. The x86 does
113 : // not reorder loads with respect to subsequent loads or stores
114 : // and no ordering barrier is required here. See more elaborate
115 : // comments in storeSeqCst.
116 : T v = *static_cast<T volatile*>(addr);
117 : # else
118 : T v;
119 0 : __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
120 : # endif
121 0 : return v;
122 : }
123 :
124 : # ifndef LOCKFREE8
125 : template<>
126 : inline int64_t
127 : js::jit::AtomicOperations::loadSeqCst(int64_t* addr)
128 : {
129 : MOZ_CRASH();
130 : }
131 :
132 : template<>
133 : inline uint64_t
134 : js::jit::AtomicOperations::loadSeqCst(uint64_t* addr)
135 : {
136 : MOZ_CRASH();
137 : }
138 : # endif // LOCKFREE8
139 :
140 : template<typename T>
141 : inline void
142 0 : js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
143 : {
144 0 : MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
145 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
146 : // Inhibit compiler reordering with a volatile store. The x86 may
147 : // reorder a store with respect to a subsequent load from a
148 : // different location, hence there is an ordering barrier here to
149 : // prevent that.
150 : //
151 : // By way of background, look to eg
152 : // http://bartoszmilewski.com/2008/11/05/who-ordered-memory-fences-on-an-x86/
153 : //
154 : // Consider:
155 : //
156 : // uint8_t x = 0, y = 0; // to start
157 : //
158 : // thread1:
159 : // sx: AtomicOperations::store(&x, 1);
160 : // gy: uint8_t obs1 = AtomicOperations::loadSeqCst(&y);
161 : //
162 : // thread2:
163 : // sy: AtomicOperations::store(&y, 1);
164 : // gx: uint8_t obs2 = AtomicOperations::loadSeqCst(&x);
165 : //
166 : // Sequential consistency requires a total global ordering of
167 : // operations: sx-gy-sy-gx, sx-sy-gx-gy, sx-sy-gy-gx, sy-gx-sx-gy,
168 : // sy-sx-gy-gx, or sy-sx-gx-gy. In every ordering at least one of
169 : // sx-before-gx or sy-before-gy happens, so *at least one* of
170 : // obs1/obs2 is 1.
171 : //
172 : // If AtomicOperations::{load,store}SeqCst were just volatile
173 : // {load,store}, x86 could reorder gx/gy before each thread's
174 : // prior load. That would permit gx-gy-sx-sy: both loads would be
175 : // 0! Thus after a volatile store we must synchronize to ensure
176 : // the store happens before the load.
177 : *static_cast<T volatile*>(addr) = val;
178 : __sync_synchronize();
179 : # else
180 0 : __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
181 : # endif
182 0 : }
183 :
184 : # ifndef LOCKFREE8
185 : template<>
186 : inline void
187 : js::jit::AtomicOperations::storeSeqCst(int64_t* addr, int64_t val)
188 : {
189 : MOZ_CRASH();
190 : }
191 :
192 : template<>
193 : inline void
194 : js::jit::AtomicOperations::storeSeqCst(uint64_t* addr, uint64_t val)
195 : {
196 : MOZ_CRASH();
197 : }
198 : # endif // LOCKFREE8
199 :
200 : template<typename T>
201 : inline T
202 0 : js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
203 : {
204 0 : MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
205 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
206 : T v;
207 : do {
208 : // Here I assume the compiler will not hoist the load. It
209 : // shouldn't, because the CAS could affect* addr.
210 : v = *addr;
211 : } while (!__sync_bool_compare_and_swap(addr, v, val));
212 : return v;
213 : # else
214 : T v;
215 0 : __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
216 0 : return v;
217 : # endif
218 : }
219 :
220 : # ifndef LOCKFREE8
221 : template<>
222 : inline int64_t
223 : js::jit::AtomicOperations::exchangeSeqCst(int64_t* addr, int64_t val)
224 : {
225 : MOZ_CRASH();
226 : }
227 :
228 : template<>
229 : inline uint64_t
230 : js::jit::AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val)
231 : {
232 : MOZ_CRASH();
233 : }
234 : # endif // LOCKFREE8
235 :
236 : template<typename T>
237 : inline T
238 0 : js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
239 : {
240 0 : MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
241 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
242 : return __sync_val_compare_and_swap(addr, oldval, newval);
243 : # else
244 0 : __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
245 0 : return oldval;
246 : # endif
247 : }
248 :
249 : # ifndef LOCKFREE8
250 : template<>
251 : inline int64_t
252 : js::jit::AtomicOperations::compareExchangeSeqCst(int64_t* addr, int64_t oldval, int64_t newval)
253 : {
254 : MOZ_CRASH();
255 : }
256 :
257 : template<>
258 : inline uint64_t
259 : js::jit::AtomicOperations::compareExchangeSeqCst(uint64_t* addr, uint64_t oldval, uint64_t newval)
260 : {
261 : MOZ_CRASH();
262 : }
263 : # endif // LOCKFREE8
264 :
265 : template<typename T>
266 : inline T
267 0 : js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
268 : {
269 : static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
270 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
271 : return __sync_fetch_and_add(addr, val);
272 : # else
273 0 : return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
274 : # endif
275 : }
276 :
277 : template<typename T>
278 : inline T
279 0 : js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
280 : {
281 : static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
282 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
283 : return __sync_fetch_and_sub(addr, val);
284 : # else
285 0 : return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
286 : # endif
287 : }
288 :
289 : template<typename T>
290 : inline T
291 0 : js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
292 : {
293 : static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
294 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
295 : return __sync_fetch_and_and(addr, val);
296 : # else
297 0 : return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
298 : # endif
299 : }
300 :
301 : template<typename T>
302 : inline T
303 0 : js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
304 : {
305 : static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
306 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
307 : return __sync_fetch_and_or(addr, val);
308 : # else
309 0 : return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
310 : # endif
311 : }
312 :
313 : template<typename T>
314 : inline T
315 0 : js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
316 : {
317 : static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
318 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
319 : return __sync_fetch_and_xor(addr, val);
320 : # else
321 0 : return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
322 : # endif
323 : }
324 :
325 : template<typename T>
326 : inline T
327 0 : js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
328 : {
329 0 : return *addr; // FIXME (1208663): not yet safe
330 : }
331 :
332 : template<typename T>
333 : inline void
334 0 : js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
335 : {
336 0 : *addr = val; // FIXME (1208663): not yet safe
337 0 : }
338 :
339 : inline void
340 0 : js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
341 : {
342 0 : MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest+nbytes));
343 0 : MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src+nbytes));
344 0 : ::memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
345 0 : }
346 :
347 : inline void
348 0 : js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
349 : {
350 0 : ::memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
351 0 : }
352 :
353 : template<size_t nbytes>
354 : inline void
355 : js::jit::RegionLock::acquire(void* addr)
356 : {
357 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
358 : while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
359 : continue;
360 : # else
361 : uint32_t zero = 0;
362 : uint32_t one = 1;
363 : while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
364 : zero = 0;
365 : continue;
366 : }
367 : # endif
368 : }
369 :
370 : template<size_t nbytes>
371 : inline void
372 : js::jit::RegionLock::release(void* addr)
373 : {
374 : MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
375 : # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
376 : __sync_sub_and_fetch(&spinlock, 1); // Should turn into LOCK XADD
377 : # else
378 : uint32_t zero = 0;
379 : __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
380 : # endif
381 : }
382 :
383 : # undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
384 : # undef LOCKFREE8
385 :
386 : #elif defined(_MSC_VER)
387 :
388 : // On 32-bit CPUs there is no 64-bit XCHG instruction, one must
389 : // instead use a loop with CMPXCHG8B. Since MSVC provides
390 : // _InterlockedExchange64 only if it maps directly to XCHG, the
391 : // workaround must be manual.
392 :
393 : # define HAVE_EXCHANGE64
394 :
395 : # if !_WIN64
396 : # undef HAVE_EXCHANGE64
397 : # endif
398 :
399 : // Below, _ReadWriteBarrier is a compiler directive, preventing
400 : // reordering of instructions and reuse of memory values across it.
401 :
402 : inline bool
403 : js::jit::AtomicOperations::isLockfree8()
404 : {
405 : // See general comments at the start of this file.
406 : //
407 : // The MSDN docs suggest very strongly that if code is compiled for
408 : // Pentium or better the 64-bit primitives will be lock-free, see
409 : // eg the "Remarks" secion of the page for _InterlockedCompareExchange64,
410 : // currently here:
411 : // https://msdn.microsoft.com/en-us/library/ttk2z1ws%28v=vs.85%29.aspx
412 : //
413 : // But I've found no way to assert that at compile time or run time,
414 : // there appears to be no WinAPI is_lock_free() test.
415 : return true;
416 : }
417 :
418 : inline void
419 : js::jit::AtomicOperations::fenceSeqCst()
420 : {
421 : _ReadWriteBarrier();
422 : # if JS_BITS_PER_WORD == 32
423 : // If configured for SSE2+ we can use the MFENCE instruction, available
424 : // through the _mm_mfence intrinsic. But for non-SSE2 systems we have
425 : // to do something else. Linux uses "lock add [esp], 0", so why not?
426 : __asm lock add [esp], 0;
427 : # else
428 : _mm_mfence();
429 : # endif
430 : }
431 :
432 : template<typename T>
433 : inline T
434 : js::jit::AtomicOperations::loadSeqCst(T* addr)
435 : {
436 : MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
437 : _ReadWriteBarrier();
438 : T v = *addr;
439 : _ReadWriteBarrier();
440 : return v;
441 : }
442 :
443 : template<typename T>
444 : inline void
445 : js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
446 : {
447 : MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
448 : _ReadWriteBarrier();
449 : *addr = val;
450 : fenceSeqCst();
451 : }
452 :
453 : # define MSC_EXCHANGEOP(T, U, xchgop) \
454 : template<> inline T \
455 : js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) { \
456 : MOZ_ASSERT(sizeof(T) < 8 || isLockfree8()); \
457 : return (T)xchgop((U volatile*)addr, (U)val); \
458 : }
459 :
460 : # define MSC_EXCHANGEOP_CAS(T, U, cmpxchg) \
461 : template<> inline T \
462 : js::jit::AtomicOperations::exchangeSeqCst(T* addr, T newval) { \
463 : MOZ_ASSERT(sizeof(T) < 8 || isLockfree8()); \
464 : T oldval; \
465 : do { \
466 : _ReadWriteBarrier(); \
467 : oldval = *addr; \
468 : } while (!cmpxchg((U volatile*)addr, (U)newval, (U)oldval)); \
469 : return oldval; \
470 : }
471 :
472 : MSC_EXCHANGEOP(int8_t, char, _InterlockedExchange8)
473 : MSC_EXCHANGEOP(uint8_t, char, _InterlockedExchange8)
474 : MSC_EXCHANGEOP(int16_t, short, _InterlockedExchange16)
475 : MSC_EXCHANGEOP(uint16_t, short, _InterlockedExchange16)
476 : MSC_EXCHANGEOP(int32_t, long, _InterlockedExchange)
477 : MSC_EXCHANGEOP(uint32_t, long, _InterlockedExchange)
478 : # ifdef HAVE_EXCHANGE64
479 : MSC_EXCHANGEOP(int64_t, __int64, _InterlockedExchange64)
480 : MSC_EXCHANGEOP(uint64_t, __int64, _InterlockedExchange64)
481 : # else
482 : MSC_EXCHANGEOP_CAS(int64_t, __int64, _InterlockedCompareExchange64)
483 : MSC_EXCHANGEOP_CAS(uint64_t, __int64, _InterlockedCompareExchange64)
484 : # endif
485 :
486 : # undef MSC_EXCHANGEOP
487 : # undef MSC_EXCHANGEOP_CAS
488 :
489 : # define MSC_CAS(T, U, cmpxchg) \
490 : template<> inline T \
491 : js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval) { \
492 : MOZ_ASSERT(sizeof(T) < 8 || isLockfree8()); \
493 : return (T)cmpxchg((U volatile*)addr, (U)newval, (U)oldval); \
494 : }
495 :
496 : MSC_CAS(int8_t, char, _InterlockedCompareExchange8)
497 : MSC_CAS(uint8_t, char, _InterlockedCompareExchange8)
498 : MSC_CAS(int16_t, short, _InterlockedCompareExchange16)
499 : MSC_CAS(uint16_t, short, _InterlockedCompareExchange16)
500 : MSC_CAS(int32_t, long, _InterlockedCompareExchange)
501 : MSC_CAS(uint32_t, long, _InterlockedCompareExchange)
502 : MSC_CAS(int64_t, __int64, _InterlockedCompareExchange64)
503 : MSC_CAS(uint64_t, __int64, _InterlockedCompareExchange64)
504 :
505 : # undef MSC_CAS
506 :
507 : # define MSC_FETCHADDOP(T, U, xadd) \
508 : template<> inline T \
509 : js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
510 : static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
511 : return (T)xadd((U volatile*)addr, (U)val); \
512 : } \
513 : template<> inline T \
514 : js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) { \
515 : static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
516 : return (T)xadd((U volatile*)addr, -(U)val); \
517 : }
518 :
519 : MSC_FETCHADDOP(int8_t, char, _InterlockedExchangeAdd8)
520 : MSC_FETCHADDOP(uint8_t, char, _InterlockedExchangeAdd8)
521 : MSC_FETCHADDOP(int16_t, short, _InterlockedExchangeAdd16)
522 : MSC_FETCHADDOP(uint16_t, short, _InterlockedExchangeAdd16)
523 : MSC_FETCHADDOP(int32_t, long, _InterlockedExchangeAdd)
524 : MSC_FETCHADDOP(uint32_t, long, _InterlockedExchangeAdd)
525 :
526 : # undef MSC_FETCHADDOP
527 :
528 : # define MSC_FETCHBITOP(T, U, andop, orop, xorop) \
529 : template<> inline T \
530 : js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) { \
531 : static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
532 : return (T)andop((U volatile*)addr, (U)val); \
533 : } \
534 : template<> inline T \
535 : js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) { \
536 : static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
537 : return (T)orop((U volatile*)addr, (U)val); \
538 : } \
539 : template<> inline T \
540 : js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) { \
541 : static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
542 : return (T)xorop((U volatile*)addr, (U)val); \
543 : }
544 :
545 : MSC_FETCHBITOP(int8_t, char, _InterlockedAnd8, _InterlockedOr8, _InterlockedXor8)
546 : MSC_FETCHBITOP(uint8_t, char, _InterlockedAnd8, _InterlockedOr8, _InterlockedXor8)
547 : MSC_FETCHBITOP(int16_t, short, _InterlockedAnd16, _InterlockedOr16, _InterlockedXor16)
548 : MSC_FETCHBITOP(uint16_t, short, _InterlockedAnd16, _InterlockedOr16, _InterlockedXor16)
549 : MSC_FETCHBITOP(int32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
550 : MSC_FETCHBITOP(uint32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
551 :
552 : # undef MSC_FETCHBITOP
553 :
554 : template<typename T>
555 : inline T
556 : js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
557 : {
558 : return *addr; // FIXME (1208663): not yet safe
559 : }
560 :
561 : template<typename T>
562 : inline void
563 : js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
564 : {
565 : *addr = val; // FIXME (1208663): not yet safe
566 : }
567 :
568 : inline void
569 : js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
570 : {
571 : MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest+nbytes));
572 : MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src+nbytes));
573 : ::memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
574 : }
575 :
576 : inline void
577 : js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
578 : {
579 : ::memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
580 : }
581 :
582 : template<size_t nbytes>
583 : inline void
584 : js::jit::RegionLock::acquire(void* addr)
585 : {
586 : while (_InterlockedCompareExchange((long*)&spinlock, /*newval=*/1, /*oldval=*/0) == 1)
587 : continue;
588 : }
589 :
590 : template<size_t nbytes>
591 : inline void
592 : js::jit::RegionLock::release(void* addr)
593 : {
594 : MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
595 : _InterlockedExchange((long*)&spinlock, 0);
596 : }
597 :
598 : # undef HAVE_EXCHANGE64
599 :
600 : #elif defined(ENABLE_SHARED_ARRAY_BUFFER)
601 :
602 : # error "Either disable JS shared memory at compile time, use GCC, Clang, or MSVC, or add code here"
603 :
604 : #endif // platform
605 :
606 : #endif // jit_shared_AtomicOperations_x86_shared_h
|