Back to index

glibc  2.9
atomic.h
Go to the documentation of this file.
00001 /* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
00002    This file is part of the GNU C Library.
00003    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
00004 
00005    The GNU C Library is free software; you can redistribute it and/or
00006    modify it under the terms of the GNU Lesser General Public
00007    License as published by the Free Software Foundation; either
00008    version 2.1 of the License, or (at your option) any later version.
00009 
00010    The GNU C Library is distributed in the hope that it will be useful,
00011    but WITHOUT ANY WARRANTY; without even the implied warranty of
00012    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013    Lesser General Public License for more details.
00014 
00015    You should have received a copy of the GNU Lesser General Public
00016    License along with the GNU C Library; if not, write to the Free
00017    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
00018    02111-1307 USA.  */
00019 
00020 #include <stdint.h>
00021 #include <tls.h>     /* For tcbhead_t.  */
00022 
00023 
00024 typedef int8_t atomic8_t;
00025 typedef uint8_t uatomic8_t;
00026 typedef int_fast8_t atomic_fast8_t;
00027 typedef uint_fast8_t uatomic_fast8_t;
00028 
00029 typedef int16_t atomic16_t;
00030 typedef uint16_t uatomic16_t;
00031 typedef int_fast16_t atomic_fast16_t;
00032 typedef uint_fast16_t uatomic_fast16_t;
00033 
00034 typedef int32_t atomic32_t;
00035 typedef uint32_t uatomic32_t;
00036 typedef int_fast32_t atomic_fast32_t;
00037 typedef uint_fast32_t uatomic_fast32_t;
00038 
00039 typedef int64_t atomic64_t;
00040 typedef uint64_t uatomic64_t;
00041 typedef int_fast64_t atomic_fast64_t;
00042 typedef uint_fast64_t uatomic_fast64_t;
00043 
00044 typedef intptr_t atomicptr_t;
00045 typedef uintptr_t uatomicptr_t;
00046 typedef intmax_t atomic_max_t;
00047 typedef uintmax_t uatomic_max_t;
00048 
00049 
00050 #ifndef LOCK_PREFIX
00051 # ifdef UP
00052 #  define LOCK_PREFIX       /* nothing */
00053 # else
00054 #  define LOCK_PREFIX "lock;"
00055 # endif
00056 #endif
00057 
00058 
00059 #if __GNUC_PREREQ (4, 1)
00060 # define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
00061   __sync_val_compare_and_swap (mem, oldval, newval)
00062 #  define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
00063   (! __sync_bool_compare_and_swap (mem, oldval, newval))
00064 #else
00065 # define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
00066   ({ __typeof (*mem) ret;                                            \
00067      __asm __volatile (LOCK_PREFIX "cmpxchgb %b2, %1"                       \
00068                      : "=a" (ret), "=m" (*mem)                       \
00069                      : "q" (newval), "m" (*mem), "0" (oldval));             \
00070      ret; })
00071 
00072 # define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
00073   ({ __typeof (*mem) ret;                                            \
00074      __asm __volatile (LOCK_PREFIX "cmpxchgw %w2, %1"                       \
00075                      : "=a" (ret), "=m" (*mem)                       \
00076                      : "r" (newval), "m" (*mem), "0" (oldval));             \
00077      ret; })
00078 
00079 # define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
00080   ({ __typeof (*mem) ret;                                            \
00081      __asm __volatile (LOCK_PREFIX "cmpxchgl %2, %1"                        \
00082                      : "=a" (ret), "=m" (*mem)                       \
00083                      : "r" (newval), "m" (*mem), "0" (oldval));             \
00084      ret; })
00085 #endif
00086 
00087 
00088 #define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
00089   ({ __typeof (*mem) ret;                                            \
00090      __asm __volatile ("cmpl $0, %%gs:%P5\n\t"                                \
00091                        "je 0f\n\t"                                            \
00092                        "lock\n"                                               \
00093                        "0:\tcmpxchgb %b2, %1"                               \
00094                      : "=a" (ret), "=m" (*mem)                       \
00095                      : "q" (newval), "m" (*mem), "0" (oldval),       \
00096                       "i" (offsetof (tcbhead_t, multiple_threads)));        \
00097      ret; })
00098 
00099 #define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
00100   ({ __typeof (*mem) ret;                                            \
00101      __asm __volatile ("cmpl $0, %%gs:%P5\n\t"                                \
00102                        "je 0f\n\t"                                            \
00103                        "lock\n"                                               \
00104                        "0:\tcmpxchgw %w2, %1"                               \
00105                      : "=a" (ret), "=m" (*mem)                       \
00106                      : "r" (newval), "m" (*mem), "0" (oldval),       \
00107                       "i" (offsetof (tcbhead_t, multiple_threads)));        \
00108      ret; })
00109 
00110 #define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
00111   ({ __typeof (*mem) ret;                                            \
00112      __asm __volatile ("cmpl $0, %%gs:%P5\n\t"                                \
00113                        "je 0f\n\t"                                            \
00114                        "lock\n"                                               \
00115                        "0:\tcmpxchgl %2, %1"                                \
00116                      : "=a" (ret), "=m" (*mem)                       \
00117                      : "r" (newval), "m" (*mem), "0" (oldval),       \
00118                       "i" (offsetof (tcbhead_t, multiple_threads)));        \
00119      ret; })
00120 
00121 /* XXX We do not really need 64-bit compare-and-exchange.  At least
00122    not in the moment.  Using it would mean causing portability
00123    problems since not many other 32-bit architectures have support for
00124    such an operation.  So don't define any code for now.  If it is
00125    really going to be used the code below can be used on Intel Pentium
00126    and later, but NOT on i486.  */
00127 #if 1
00128 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
00129   ({ __typeof (*mem) ret = *(mem); abort (); ret = (newval); ret = (oldval); })
00130 # define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
00131   ({ __typeof (*mem) ret = *(mem); abort (); ret = (newval); ret = (oldval); })
00132 #else
00133 # ifdef __PIC__
00134 #  define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
00135   ({ __typeof (*mem) ret;                                            \
00136      __asm __volatile ("xchgl %2, %%ebx\n\t"                                \
00137                      LOCK_PREFIX "cmpxchg8b %1\n\t"                         \
00138                      "xchgl %2, %%ebx"                               \
00139                      : "=A" (ret), "=m" (*mem)                       \
00140                      : "DS" (((unsigned long long int) (newval))            \
00141                             & 0xffffffff),                                  \
00142                       "c" (((unsigned long long int) (newval)) >> 32),     \
00143                       "m" (*mem), "a" (((unsigned long long int) (oldval)) \
00144                                      & 0xffffffff),                  \
00145                       "d" (((unsigned long long int) (oldval)) >> 32));    \
00146      ret; })
00147 
00148 #  define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
00149   ({ __typeof (*mem) ret;                                            \
00150      __asm __volatile ("xchgl %2, %%ebx\n\t"                                \
00151                      "cmpl $0, %%gs:%P7\n\t"                                \
00152                      "je 0f\n\t"                                     \
00153                      "lock\n"                                               \
00154                      "0:\tcmpxchg8b %1\n\t"                                 \
00155                      "xchgl %2, %%ebx"                               \
00156                      : "=A" (ret), "=m" (*mem)                       \
00157                      : "DS" (((unsigned long long int) (newval))            \
00158                             & 0xffffffff),                                  \
00159                       "c" (((unsigned long long int) (newval)) >> 32),     \
00160                       "m" (*mem), "a" (((unsigned long long int) (oldval)) \
00161                                      & 0xffffffff),                  \
00162                       "d" (((unsigned long long int) (oldval)) >> 32),     \
00163                       "i" (offsetof (tcbhead_t, multiple_threads)));        \
00164      ret; })
00165 # else
00166 #  define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
00167   ({ __typeof (*mem) ret;                                            \
00168      __asm __volatile (LOCK_PREFIX "cmpxchg8b %1"                           \
00169                      : "=A" (ret), "=m" (*mem)                       \
00170                      : "b" (((unsigned long long int) (newval))             \
00171                            & 0xffffffff),                            \
00172                       "c" (((unsigned long long int) (newval)) >> 32),     \
00173                       "m" (*mem), "a" (((unsigned long long int) (oldval)) \
00174                                      & 0xffffffff),                  \
00175                       "d" (((unsigned long long int) (oldval)) >> 32));    \
00176      ret; })
00177 
00178 #  define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
00179   ({ __typeof (*mem) ret;                                            \
00180      __asm __volatile ("cmpl $0, %%gs:%P7\n\t"                              \
00181                      "je 0f\n\t"                                     \
00182                      "lock\n"                                               \
00183                      "0:\tcmpxchg8b %1"                              \
00184                      : "=A" (ret), "=m" (*mem)                       \
00185                      : "b" (((unsigned long long int) (newval))             \
00186                            & 0xffffffff),                            \
00187                       "c" (((unsigned long long int) (newval)) >> 32),     \
00188                       "m" (*mem), "a" (((unsigned long long int) (oldval)) \
00189                                      & 0xffffffff),                  \
00190                       "d" (((unsigned long long int) (oldval)) >> 32),     \
00191                       "i" (offsetof (tcbhead_t, multiple_threads)));        \
00192      ret; })
00193 # endif
00194 #endif
00195 
00196 
00197 /* Note that we need no lock prefix.  */
00198 #define atomic_exchange_acq(mem, newvalue) \
00199   ({ __typeof (*mem) result;                                                \
00200      if (sizeof (*mem) == 1)                                                \
00201        __asm __volatile ("xchgb %b0, %1"                             \
00202                       : "=q" (result), "=m" (*mem)                          \
00203                       : "0" (newvalue), "m" (*mem));                        \
00204      else if (sizeof (*mem) == 2)                                    \
00205        __asm __volatile ("xchgw %w0, %1"                             \
00206                       : "=r" (result), "=m" (*mem)                          \
00207                       : "0" (newvalue), "m" (*mem));                        \
00208      else if (sizeof (*mem) == 4)                                    \
00209        __asm __volatile ("xchgl %0, %1"                                     \
00210                       : "=r" (result), "=m" (*mem)                          \
00211                       : "0" (newvalue), "m" (*mem));                        \
00212      else                                                            \
00213        {                                                             \
00214         result = 0;                                                  \
00215         abort ();                                                    \
00216        }                                                             \
00217      result; })
00218 
00219 
00220 #define __arch_exchange_and_add_body(lock, pfx, mem, value) \
00221   ({ __typeof (*mem) __result;                                              \
00222      __typeof (value) __addval = (value);                            \
00223      if (sizeof (*mem) == 1)                                                \
00224        __asm __volatile (lock "xaddb %b0, %1"                               \
00225                       : "=q" (__result), "=m" (*mem)                        \
00226                       : "0" (__addval), "m" (*mem),                         \
00227                         "i" (offsetof (tcbhead_t, multiple_threads)));     \
00228      else if (sizeof (*mem) == 2)                                    \
00229        __asm __volatile (lock "xaddw %w0, %1"                               \
00230                       : "=r" (__result), "=m" (*mem)                        \
00231                       : "0" (__addval), "m" (*mem),                         \
00232                         "i" (offsetof (tcbhead_t, multiple_threads)));     \
00233      else if (sizeof (*mem) == 4)                                    \
00234        __asm __volatile (lock "xaddl %0, %1"                                \
00235                       : "=r" (__result), "=m" (*mem)                        \
00236                       : "0" (__addval), "m" (*mem),                         \
00237                         "i" (offsetof (tcbhead_t, multiple_threads)));     \
00238      else                                                            \
00239        {                                                             \
00240         __typeof (mem) __memp = (mem);                                      \
00241         __typeof (*mem) __tmpval;                                    \
00242         __result = *__memp;                                          \
00243         do                                                           \
00244           __tmpval = __result;                                              \
00245         while ((__result = pfx##_compare_and_exchange_val_64_acq            \
00246                (__memp, __result + __addval, __result)) == __tmpval);       \
00247        }                                                             \
00248      __result; })
00249 
00250 #if __GNUC_PREREQ (4, 1)
00251 # define atomic_exchange_and_add(mem, value) \
00252   __sync_fetch_and_add (mem, value)
00253 #else
00254 # define atomic_exchange_and_add(mem, value) \
00255   __arch_exchange_and_add_body (LOCK_PREFIX, __arch, mem, value)
00256 #endif
00257 
00258 #define __arch_exchange_and_add_cprefix \
00259   "cmpl $0, %%gs:%P4\n\tje 0f\n\tlock\n0:\t"
00260 
00261 #define catomic_exchange_and_add(mem, value) \
00262   __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, __arch_c,    \
00263                             mem, value)
00264 
00265 
00266 #define __arch_add_body(lock, pfx, mem, value) \
00267   do {                                                               \
00268     if (__builtin_constant_p (value) && (value) == 1)                       \
00269       atomic_increment (mem);                                               \
00270     else if (__builtin_constant_p (value) && (value) == -1)                 \
00271       atomic_decrement (mem);                                               \
00272     else if (sizeof (*mem) == 1)                                     \
00273       __asm __volatile (lock "addb %b1, %0"                                 \
00274                      : "=m" (*mem)                                   \
00275                      : "iq" (value), "m" (*mem),                     \
00276                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00277     else if (sizeof (*mem) == 2)                                     \
00278       __asm __volatile (lock "addw %w1, %0"                                 \
00279                      : "=m" (*mem)                                   \
00280                      : "ir" (value), "m" (*mem),                     \
00281                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00282     else if (sizeof (*mem) == 4)                                     \
00283       __asm __volatile (lock "addl %1, %0"                                  \
00284                      : "=m" (*mem)                                   \
00285                      : "ir" (value), "m" (*mem),                     \
00286                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00287     else                                                             \
00288       {                                                                     \
00289        __typeof (value) __addval = (value);                                 \
00290        __typeof (mem) __memp = (mem);                                       \
00291        __typeof (*mem) __oldval = *__memp;                                  \
00292        __typeof (*mem) __tmpval;                                     \
00293        do                                                            \
00294          __tmpval = __oldval;                                               \
00295        while ((__oldval = pfx##_compare_and_exchange_val_64_acq             \
00296               (__memp, __oldval + __addval, __oldval)) == __tmpval);        \
00297       }                                                                     \
00298   } while (0)
00299 
00300 #define atomic_add(mem, value) \
00301   __arch_add_body (LOCK_PREFIX, __arch, mem, value)
00302 
00303 #define __arch_add_cprefix \
00304   "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
00305 
00306 #define catomic_add(mem, value) \
00307   __arch_add_body (__arch_add_cprefix, __arch_c, mem, value)
00308 
00309 
00310 #define atomic_add_negative(mem, value) \
00311   ({ unsigned char __result;                                                \
00312      if (sizeof (*mem) == 1)                                                \
00313        __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1"                \
00314                       : "=m" (*mem), "=qm" (__result)                \
00315                       : "iq" (value), "m" (*mem));                          \
00316      else if (sizeof (*mem) == 2)                                    \
00317        __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1"                \
00318                       : "=m" (*mem), "=qm" (__result)                \
00319                       : "ir" (value), "m" (*mem));                          \
00320      else if (sizeof (*mem) == 4)                                    \
00321        __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1"                 \
00322                       : "=m" (*mem), "=qm" (__result)                \
00323                       : "ir" (value), "m" (*mem));                          \
00324      else                                                            \
00325        abort ();                                                     \
00326      __result; })
00327 
00328 
00329 #define atomic_add_zero(mem, value) \
00330   ({ unsigned char __result;                                                \
00331      if (sizeof (*mem) == 1)                                                \
00332        __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1"                \
00333                       : "=m" (*mem), "=qm" (__result)                \
00334                       : "iq" (value), "m" (*mem));                          \
00335      else if (sizeof (*mem) == 2)                                    \
00336        __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1"                \
00337                       : "=m" (*mem), "=qm" (__result)                \
00338                       : "ir" (value), "m" (*mem));                          \
00339      else if (sizeof (*mem) == 4)                                    \
00340        __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1"                 \
00341                       : "=m" (*mem), "=qm" (__result)                \
00342                       : "ir" (value), "m" (*mem));                          \
00343      else                                                            \
00344        abort ();                                                     \
00345      __result; })
00346 
00347 
00348 #define __arch_increment_body(lock,  pfx, mem) \
00349   do {                                                               \
00350     if (sizeof (*mem) == 1)                                          \
00351       __asm __volatile (lock "incb %b0"                                     \
00352                      : "=m" (*mem)                                   \
00353                      : "m" (*mem),                                   \
00354                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00355     else if (sizeof (*mem) == 2)                                     \
00356       __asm __volatile (lock "incw %w0"                                     \
00357                      : "=m" (*mem)                                   \
00358                      : "m" (*mem),                                   \
00359                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00360     else if (sizeof (*mem) == 4)                                     \
00361       __asm __volatile (lock "incl %0"                                      \
00362                      : "=m" (*mem)                                   \
00363                      : "m" (*mem),                                   \
00364                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00365     else                                                             \
00366       {                                                                     \
00367        __typeof (mem) __memp = (mem);                                       \
00368        __typeof (*mem) __oldval = *__memp;                                  \
00369        __typeof (*mem) __tmpval;                                     \
00370        do                                                            \
00371          __tmpval = __oldval;                                               \
00372        while ((__oldval = pfx##_compare_and_exchange_val_64_acq             \
00373               (__memp, __oldval + 1, __oldval)) == __tmpval);               \
00374       }                                                                     \
00375   } while (0)
00376 
00377 #define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, __arch, mem)
00378 
00379 #define __arch_increment_cprefix \
00380   "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
00381 
00382 #define catomic_increment(mem) \
00383   __arch_increment_body (__arch_increment_cprefix, __arch_c, mem)
00384 
00385 
00386 #define atomic_increment_and_test(mem) \
00387   ({ unsigned char __result;                                                \
00388      if (sizeof (*mem) == 1)                                                \
00389        __asm __volatile (LOCK_PREFIX "incb %0; sete %b1"                    \
00390                       : "=m" (*mem), "=qm" (__result)                \
00391                       : "m" (*mem));                                        \
00392      else if (sizeof (*mem) == 2)                                    \
00393        __asm __volatile (LOCK_PREFIX "incw %0; sete %w1"                    \
00394                       : "=m" (*mem), "=qm" (__result)                \
00395                       : "m" (*mem));                                        \
00396      else if (sizeof (*mem) == 4)                                    \
00397        __asm __volatile (LOCK_PREFIX "incl %0; sete %1"                     \
00398                       : "=m" (*mem), "=qm" (__result)                \
00399                       : "m" (*mem));                                        \
00400      else                                                            \
00401        abort ();                                                     \
00402      __result; })
00403 
00404 
00405 #define __arch_decrement_body(lock, pfx, mem) \
00406   do {                                                               \
00407     if (sizeof (*mem) == 1)                                          \
00408       __asm __volatile (lock "decb %b0"                                     \
00409                      : "=m" (*mem)                                   \
00410                      : "m" (*mem),                                   \
00411                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00412     else if (sizeof (*mem) == 2)                                     \
00413       __asm __volatile (lock "decw %w0"                                     \
00414                      : "=m" (*mem)                                   \
00415                      : "m" (*mem),                                   \
00416                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00417     else if (sizeof (*mem) == 4)                                     \
00418       __asm __volatile (lock "decl %0"                                      \
00419                      : "=m" (*mem)                                   \
00420                      : "m" (*mem),                                   \
00421                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00422     else                                                             \
00423       {                                                                     \
00424        __typeof (mem) __memp = (mem);                                       \
00425        __typeof (*mem) __oldval = *__memp;                                  \
00426        __typeof (*mem) __tmpval;                                     \
00427        do                                                            \
00428          __tmpval = __oldval;                                               \
00429        while ((__oldval = pfx##_compare_and_exchange_val_64_acq             \
00430               (__memp, __oldval - 1, __oldval)) == __tmpval);        \
00431       }                                                                     \
00432   } while (0)
00433 
00434 #define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem)
00435 
00436 #define __arch_decrement_cprefix \
00437   "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
00438 
00439 #define catomic_decrement(mem) \
00440   __arch_decrement_body (__arch_decrement_cprefix, __arch_c, mem)
00441 
00442 
00443 #define atomic_decrement_and_test(mem) \
00444   ({ unsigned char __result;                                                \
00445      if (sizeof (*mem) == 1)                                                \
00446        __asm __volatile (LOCK_PREFIX "decb %b0; sete %1"                    \
00447                       : "=m" (*mem), "=qm" (__result)                \
00448                       : "m" (*mem));                                        \
00449      else if (sizeof (*mem) == 2)                                    \
00450        __asm __volatile (LOCK_PREFIX "decw %w0; sete %1"                    \
00451                       : "=m" (*mem), "=qm" (__result)                \
00452                       : "m" (*mem));                                        \
00453      else if (sizeof (*mem) == 4)                                    \
00454        __asm __volatile (LOCK_PREFIX "decl %0; sete %1"                     \
00455                       : "=m" (*mem), "=qm" (__result)                \
00456                       : "m" (*mem));                                        \
00457      else                                                            \
00458        abort ();                                                     \
00459      __result; })
00460 
00461 
00462 #define atomic_bit_set(mem, bit) \
00463   do {                                                               \
00464     if (sizeof (*mem) == 1)                                          \
00465       __asm __volatile (LOCK_PREFIX "orb %b2, %0"                           \
00466                      : "=m" (*mem)                                   \
00467                      : "m" (*mem), "iq" (1 << (bit)));               \
00468     else if (sizeof (*mem) == 2)                                     \
00469       __asm __volatile (LOCK_PREFIX "orw %w2, %0"                           \
00470                      : "=m" (*mem)                                   \
00471                      : "m" (*mem), "ir" (1 << (bit)));               \
00472     else if (sizeof (*mem) == 4)                                     \
00473       __asm __volatile (LOCK_PREFIX "orl %2, %0"                     \
00474                      : "=m" (*mem)                                   \
00475                      : "m" (*mem), "ir" (1 << (bit)));               \
00476     else                                                             \
00477       abort ();                                                             \
00478   } while (0)
00479 
00480 
00481 #define atomic_bit_test_set(mem, bit) \
00482   ({ unsigned char __result;                                                \
00483      if (sizeof (*mem) == 1)                                                \
00484        __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0"                 \
00485                       : "=q" (__result), "=m" (*mem)                        \
00486                       : "m" (*mem), "ir" (bit));                     \
00487      else if (sizeof (*mem) == 2)                                    \
00488        __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0"                 \
00489                       : "=q" (__result), "=m" (*mem)                        \
00490                       : "m" (*mem), "ir" (bit));                     \
00491      else if (sizeof (*mem) == 4)                                    \
00492        __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0"                 \
00493                       : "=q" (__result), "=m" (*mem)                        \
00494                       : "m" (*mem), "ir" (bit));                     \
00495      else                                                            \
00496        abort ();                                                     \
00497      __result; })
00498 
00499 
00500 #define atomic_delay() asm ("rep; nop")
00501 
00502 
00503 #define atomic_and(mem, mask) \
00504   do {                                                               \
00505     if (sizeof (*mem) == 1)                                          \
00506       __asm __volatile (LOCK_PREFIX "andb %b1, %0"                          \
00507                      : "=m" (*mem)                                   \
00508                      : "iq" (mask), "m" (*mem));                     \
00509     else if (sizeof (*mem) == 2)                                     \
00510       __asm __volatile (LOCK_PREFIX "andw %w1, %0"                          \
00511                      : "=m" (*mem)                                   \
00512                      : "ir" (mask), "m" (*mem));                     \
00513     else if (sizeof (*mem) == 4)                                     \
00514       __asm __volatile (LOCK_PREFIX "andl %1, %0"                           \
00515                      : "=m" (*mem)                                   \
00516                      : "ir" (mask), "m" (*mem));                     \
00517     else                                                             \
00518       abort ();                                                             \
00519   } while (0)
00520 
00521 
00522 #define __arch_or_body(lock, mem, mask) \
00523   do {                                                               \
00524     if (sizeof (*mem) == 1)                                          \
00525       __asm __volatile (lock "orb %b1, %0"                                  \
00526                      : "=m" (*mem)                                   \
00527                      : "iq" (mask), "m" (*mem),                      \
00528                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00529     else if (sizeof (*mem) == 2)                                     \
00530       __asm __volatile (lock "orw %w1, %0"                                  \
00531                      : "=m" (*mem)                                   \
00532                      : "ir" (mask), "m" (*mem),                      \
00533                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00534     else if (sizeof (*mem) == 4)                                     \
00535       __asm __volatile (lock "orl %1, %0"                            \
00536                      : "=m" (*mem)                                   \
00537                      : "ir" (mask), "m" (*mem),                      \
00538                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00539     else                                                             \
00540       abort ();                                                             \
00541   } while (0)
00542 
00543 #define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
00544 
00545 #define __arch_or_cprefix \
00546   "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
00547 
00548 #define catomic_or(mem, mask) __arch_or_body (__arch_or_cprefix, mem, mask)