Line data Source code
1 : /*-
2 : * Copyright (c) 2009-2010 Brad Penoff
3 : * Copyright (c) 2009-2010 Humaira Kamal
4 : * Copyright (c) 2011-2012 Irene Ruengeler
5 : * Copyright (c) 2011-2012 Michael Tuexen
6 : *
7 : * All rights reserved.
8 : *
9 : * Redistribution and use in source and binary forms, with or without
10 : * modification, are permitted provided that the following conditions
11 : * are met:
12 : * 1. Redistributions of source code must retain the above copyright
13 : * notice, this list of conditions and the following disclaimer.
14 : * 2. Redistributions in binary form must reproduce the above copyright
15 : * notice, this list of conditions and the following disclaimer in the
16 : * documentation and/or other materials provided with the distribution.
17 : *
18 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 : * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 : * SUCH DAMAGE.
29 : */
30 :
31 : #ifndef _USER_ATOMIC_H_
32 : #define _USER_ATOMIC_H_
33 :
34 : /* __Userspace__ version of sys/i386/include/atomic.h goes here */
35 :
36 : /* TODO In the future, might want to not use i386 specific assembly.
37 : * The options include:
38 : * - implement them generically (but maybe not truly atomic?) in userspace
39 : * - have ifdef's for __Userspace_arch_ perhaps (OS isn't enough...)
40 : */
41 :
42 : #include <stdio.h>
43 : #include <sys/types.h>
44 :
45 : #if defined(__Userspace_os_Darwin) || defined (__Userspace_os_Windows)
46 : #if defined (__Userspace_os_Windows)
47 : #define atomic_add_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
48 : #define atomic_fetchadd_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
49 : #define atomic_subtract_int(addr, val) InterlockedExchangeAdd((LPLONG)addr,-((LONG)val))
50 : #define atomic_cmpset_int(dst, exp, src) InterlockedCompareExchange((LPLONG)dst, src, exp)
51 : #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (InterlockedExchangeAdd((LPLONG)addr, (-1L)) == 1)
52 : #else
53 : #include <libkern/OSAtomic.h>
54 : #define atomic_add_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
55 : #define atomic_fetchadd_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
56 : #define atomic_subtract_int(addr, val) OSAtomicAdd32Barrier(-val, (int32_t *)addr)
57 : #define atomic_cmpset_int(dst, exp, src) OSAtomicCompareAndSwapIntBarrier(exp, src, (int *)dst)
58 : #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 0)
59 : #endif
60 :
61 : #if defined(INVARIANTS)
62 : #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
63 : { \
64 : int32_t newval; \
65 : newval = atomic_fetchadd_int(addr, -val); \
66 : if (newval < 0) { \
67 : panic("Counter goes negative"); \
68 : } \
69 : }
70 : #else
71 : #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
72 : { \
73 : int32_t newval; \
74 : newval = atomic_fetchadd_int(addr, -val); \
75 : if (newval < 0) { \
76 : *addr = 0; \
77 : } \
78 : }
79 : #if defined(__Userspace_os_Windows)
80 : static void atomic_init() {} /* empty when we are not using atomic_mtx */
81 : #else
82 : static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
83 : #endif
84 : #endif
85 :
86 : #else
87 : /* Using gcc built-in functions for atomic memory operations
88 : Reference: http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
89 : Requires gcc version 4.1.0
90 : compile with -march=i486
91 : */
92 :
93 : /*Atomically add V to *P.*/
94 : #define atomic_add_int(P, V) (void) __sync_fetch_and_add(P, V)
95 :
96 : /*Atomically subtrace V from *P.*/
97 : #define atomic_subtract_int(P, V) (void) __sync_fetch_and_sub(P, V)
98 :
99 : /*
100 : * Atomically add the value of v to the integer pointed to by p and return
101 : * the previous value of *p.
102 : */
103 : #define atomic_fetchadd_int(p, v) __sync_fetch_and_add(p, v)
104 :
105 : /* Following explanation from src/sys/i386/include/atomic.h,
106 : * for atomic compare and set
107 : *
108 : * if (*dst == exp) *dst = src (all 32 bit words)
109 : *
110 : * Returns 0 on failure, non-zero on success
111 : */
112 :
113 : #define atomic_cmpset_int(dst, exp, src) __sync_bool_compare_and_swap(dst, exp, src)
114 :
115 : #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1)
116 : #if defined(INVARIANTS)
117 : #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
118 : { \
119 : int32_t oldval; \
120 : oldval = atomic_fetchadd_int(addr, -val); \
121 : if (oldval < val) { \
122 : panic("Counter goes negative"); \
123 : } \
124 : }
125 : #else
126 : #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
127 : { \
128 : int32_t oldval; \
129 : oldval = atomic_fetchadd_int(addr, -val); \
130 : if (oldval < val) { \
131 : *addr = 0; \
132 : } \
133 : }
134 : #endif
135 0 : static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
136 : #endif
137 :
138 : #if 0 /* using libatomic_ops */
139 : #include "user_include/atomic_ops.h"
140 :
141 : /*Atomically add incr to *P, and return the original value of *P.*/
142 : #define atomic_add_int(P, V) AO_fetch_and_add((AO_t*)P, V)
143 :
144 : #define atomic_subtract_int(P, V) AO_fetch_and_add((AO_t*)P, -(V))
145 :
146 : /*
147 : * Atomically add the value of v to the integer pointed to by p and return
148 : * the previous value of *p.
149 : */
150 : #define atomic_fetchadd_int(p, v) AO_fetch_and_add((AO_t*)p, v)
151 :
152 : /* Atomically compare *addr to old_val, and replace *addr by new_val
153 : if the first comparison succeeds. Returns nonzero if the comparison
154 : succeeded and *addr was updated.
155 : */
156 : /* Following Explanation from src/sys/i386/include/atomic.h, which
157 : matches that of AO_compare_and_swap above.
158 : * Atomic compare and set, used by the mutex functions
159 : *
160 : * if (*dst == exp) *dst = src (all 32 bit words)
161 : *
162 : * Returns 0 on failure, non-zero on success
163 : */
164 :
165 : #define atomic_cmpset_int(dst, exp, src) AO_compare_and_swap((AO_t*)dst, exp, src)
166 :
167 : static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
168 : #endif /* closing #if for libatomic */
169 :
170 : #if 0 /* using atomic_mtx */
171 :
172 : #include <pthread.h>
173 :
174 : extern userland_mutex_t atomic_mtx;
175 :
176 : #if defined (__Userspace_os_Windows)
177 : static inline void atomic_init() {
178 : InitializeCriticalSection(&atomic_mtx);
179 : }
180 : static inline void atomic_destroy() {
181 : DeleteCriticalSection(&atomic_mtx);
182 : }
183 : static inline void atomic_lock() {
184 : EnterCriticalSection(&atomic_mtx);
185 : }
186 : static inline void atomic_unlock() {
187 : LeaveCriticalSection(&atomic_mtx);
188 : }
189 : #else
190 : static inline void atomic_init() {
191 : (void)pthread_mutex_init(&atomic_mtx, NULL);
192 : }
193 : static inline void atomic_destroy() {
194 : (void)pthread_mutex_destroy(&atomic_mtx);
195 : }
196 : static inline void atomic_lock() {
197 : (void)pthread_mutex_lock(&atomic_mtx);
198 : }
199 : static inline void atomic_unlock() {
200 : (void)pthread_mutex_unlock(&atomic_mtx);
201 : }
202 : #endif
203 : /*
204 : * For userland, always use lock prefixes so that the binaries will run
205 : * on both SMP and !SMP systems.
206 : */
207 :
208 : #define MPLOCKED "lock ; "
209 :
210 : /*
211 : * Atomically add the value of v to the integer pointed to by p and return
212 : * the previous value of *p.
213 : */
214 : static __inline u_int
215 : atomic_fetchadd_int(volatile void *n, u_int v)
216 : {
217 : int *p = (int *) n;
218 : atomic_lock();
219 : __asm __volatile(
220 : " " MPLOCKED " "
221 : " xaddl %0, %1 ; "
222 : "# atomic_fetchadd_int"
223 : : "+r" (v), /* 0 (result) */
224 : "=m" (*p) /* 1 */
225 : : "m" (*p)); /* 2 */
226 : atomic_unlock();
227 :
228 : return (v);
229 : }
230 :
231 :
232 : #ifdef CPU_DISABLE_CMPXCHG
233 :
234 : static __inline int
235 : atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
236 : {
237 : u_char res;
238 :
239 : atomic_lock();
240 : __asm __volatile(
241 : " pushfl ; "
242 : " cli ; "
243 : " cmpl %3,%4 ; "
244 : " jne 1f ; "
245 : " movl %2,%1 ; "
246 : "1: "
247 : " sete %0 ; "
248 : " popfl ; "
249 : "# atomic_cmpset_int"
250 : : "=q" (res), /* 0 */
251 : "=m" (*dst) /* 1 */
252 : : "r" (src), /* 2 */
253 : "r" (exp), /* 3 */
254 : "m" (*dst) /* 4 */
255 : : "memory");
256 : atomic_unlock();
257 :
258 : return (res);
259 : }
260 :
261 : #else /* !CPU_DISABLE_CMPXCHG */
262 :
263 : static __inline int
264 : atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
265 : {
266 : atomic_lock();
267 : u_char res;
268 :
269 : __asm __volatile(
270 : " " MPLOCKED " "
271 : " cmpxchgl %2,%1 ; "
272 : " sete %0 ; "
273 : "1: "
274 : "# atomic_cmpset_int"
275 : : "=a" (res), /* 0 */
276 : "=m" (*dst) /* 1 */
277 : : "r" (src), /* 2 */
278 : "a" (exp), /* 3 */
279 : "m" (*dst) /* 4 */
280 : : "memory");
281 : atomic_unlock();
282 :
283 : return (res);
284 : }
285 :
286 : #endif /* CPU_DISABLE_CMPXCHG */
287 :
288 : #define atomic_add_int(P, V) do { \
289 : atomic_lock(); \
290 : (*(u_int *)(P) += (V)); \
291 : atomic_unlock(); \
292 : } while(0)
293 : #define atomic_subtract_int(P, V) do { \
294 : atomic_lock(); \
295 : (*(u_int *)(P) -= (V)); \
296 : atomic_unlock(); \
297 : } while(0)
298 :
299 : #endif
300 : #endif
|