Back to index

glibc  2.9
atomic.h
Go to the documentation of this file.
00001 /* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
00002    This file is part of the GNU C Library.
00003    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
00004 
00005    The GNU C Library is free software; you can redistribute it and/or
00006    modify it under the terms of the GNU Lesser General Public
00007    License as published by the Free Software Foundation; either
00008    version 2.1 of the License, or (at your option) any later version.
00009 
00010    The GNU C Library is distributed in the hope that it will be useful,
00011    but WITHOUT ANY WARRANTY; without even the implied warranty of
00012    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013    Lesser General Public License for more details.
00014 
00015    You should have received a copy of the GNU Lesser General Public
00016    License along with the GNU C Library; if not, write to the Free
00017    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
00018    02111-1307 USA.  */
00019 
00020 #include <stdint.h>
00021 #include <tls.h>     /* For tcbhead_t.  */
00022 
00023 
00024 typedef int8_t atomic8_t;
00025 typedef uint8_t uatomic8_t;
00026 typedef int_fast8_t atomic_fast8_t;
00027 typedef uint_fast8_t uatomic_fast8_t;
00028 
00029 typedef int16_t atomic16_t;
00030 typedef uint16_t uatomic16_t;
00031 typedef int_fast16_t atomic_fast16_t;
00032 typedef uint_fast16_t uatomic_fast16_t;
00033 
00034 typedef int32_t atomic32_t;
00035 typedef uint32_t uatomic32_t;
00036 typedef int_fast32_t atomic_fast32_t;
00037 typedef uint_fast32_t uatomic_fast32_t;
00038 
00039 typedef int64_t atomic64_t;
00040 typedef uint64_t uatomic64_t;
00041 typedef int_fast64_t atomic_fast64_t;
00042 typedef uint_fast64_t uatomic_fast64_t;
00043 
00044 typedef intptr_t atomicptr_t;
00045 typedef uintptr_t uatomicptr_t;
00046 typedef intmax_t atomic_max_t;
00047 typedef uintmax_t uatomic_max_t;
00048 
00049 
00050 #ifndef LOCK_PREFIX
00051 # ifdef UP
00052 #  define LOCK_PREFIX       /* nothing */
00053 # else
00054 #  define LOCK_PREFIX "lock;"
00055 # endif
00056 #endif
00057 
00058 
00059 #if __GNUC_PREREQ (4, 1)
00060 # define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
00061   __sync_val_compare_and_swap (mem, oldval, newval)
00062 #  define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
00063   (! __sync_bool_compare_and_swap (mem, oldval, newval))
00064 #else
00065 # define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
00066   ({ __typeof (*mem) ret;                                            \
00067      __asm __volatile (LOCK_PREFIX "cmpxchgb %b2, %1"                       \
00068                      : "=a" (ret), "=m" (*mem)                       \
00069                      : "q" (newval), "m" (*mem), "0" (oldval));             \
00070      ret; })
00071 
00072 # define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
00073   ({ __typeof (*mem) ret;                                            \
00074      __asm __volatile (LOCK_PREFIX "cmpxchgw %w2, %1"                       \
00075                      : "=a" (ret), "=m" (*mem)                       \
00076                      : "r" (newval), "m" (*mem), "0" (oldval));             \
00077      ret; })
00078 
00079 # define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
00080   ({ __typeof (*mem) ret;                                            \
00081      __asm __volatile (LOCK_PREFIX "cmpxchgl %2, %1"                        \
00082                      : "=a" (ret), "=m" (*mem)                       \
00083                      : "r" (newval), "m" (*mem), "0" (oldval));             \
00084      ret; })
00085 
00086 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
00087   ({ __typeof (*mem) ret;                                            \
00088      __asm __volatile (LOCK_PREFIX "cmpxchgq %q2, %1"                       \
00089                      : "=a" (ret), "=m" (*mem)                       \
00090                      : "r" ((long int) (newval)), "m" (*mem),               \
00091                       "0" ((long int) (oldval)));                           \
00092      ret; })
00093 #endif
00094 
00095 
00096 #define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
00097   ({ __typeof (*mem) ret;                                            \
00098     __asm __volatile ("cmpl $0, %%fs:%P5\n\t"                               \
00099                     "je 0f\n\t"                                      \
00100                     "lock\n"                                                \
00101                      "0:\tcmpxchgb %b2, %1"                                 \
00102                      : "=a" (ret), "=m" (*mem)                       \
00103                      : "q" (newval), "m" (*mem), "0" (oldval),       \
00104                       "i" (offsetof (tcbhead_t, multiple_threads)));        \
00105      ret; })
00106 
00107 #define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
00108   ({ __typeof (*mem) ret;                                            \
00109     __asm __volatile ("cmpl $0, %%fs:%P5\n\t"                               \
00110                     "je 0f\n\t"                                      \
00111                     "lock\n"                                                \
00112                      "0:\tcmpxchgw %w2, %1"                                 \
00113                      : "=a" (ret), "=m" (*mem)                       \
00114                      : "q" (newval), "m" (*mem), "0" (oldval),       \
00115                       "i" (offsetof (tcbhead_t, multiple_threads)));        \
00116      ret; })
00117 
00118 #define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
00119   ({ __typeof (*mem) ret;                                            \
00120     __asm __volatile ("cmpl $0, %%fs:%P5\n\t"                               \
00121                     "je 0f\n\t"                                      \
00122                     "lock\n"                                                \
00123                      "0:\tcmpxchgl %2, %1"                                  \
00124                      : "=a" (ret), "=m" (*mem)                       \
00125                      : "q" (newval), "m" (*mem), "0" (oldval),       \
00126                       "i" (offsetof (tcbhead_t, multiple_threads)));        \
00127      ret; })
00128 
00129 #define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
00130   ({ __typeof (*mem) ret;                                            \
00131      __asm __volatile ("cmpl $0, %%fs:%P5\n\t"                              \
00132                      "je 0f\n\t"                                     \
00133                      "lock\n"                                               \
00134                      "0:\tcmpxchgq %q2, %1"                                 \
00135                      : "=a" (ret), "=m" (*mem)                       \
00136                      : "q" ((long int) (newval)), "m" (*mem),               \
00137                       "0" ((long int)oldval),                        \
00138                       "i" (offsetof (tcbhead_t, multiple_threads)));        \
00139      ret; })
00140 
00141 
00142 /* Note that we need no lock prefix.  */
00143 #define atomic_exchange_acq(mem, newvalue) \
00144   ({ __typeof (*mem) result;                                                \
00145      if (sizeof (*mem) == 1)                                                \
00146        __asm __volatile ("xchgb %b0, %1"                             \
00147                       : "=q" (result), "=m" (*mem)                          \
00148                       : "0" (newvalue), "m" (*mem));                        \
00149      else if (sizeof (*mem) == 2)                                    \
00150        __asm __volatile ("xchgw %w0, %1"                             \
00151                       : "=r" (result), "=m" (*mem)                          \
00152                       : "0" (newvalue), "m" (*mem));                        \
00153      else if (sizeof (*mem) == 4)                                    \
00154        __asm __volatile ("xchgl %0, %1"                                     \
00155                       : "=r" (result), "=m" (*mem)                          \
00156                       : "0" (newvalue), "m" (*mem));                        \
00157      else                                                            \
00158        __asm __volatile ("xchgq %q0, %1"                             \
00159                       : "=r" (result), "=m" (*mem)                          \
00160                       : "0" ((long) (newvalue)), "m" (*mem));        \
00161      result; })
00162 
00163 
00164 #define __arch_exchange_and_add_body(lock, mem, value)                      \
00165   ({ __typeof (*mem) result;                                                \
00166      if (sizeof (*mem) == 1)                                                \
00167        __asm __volatile (lock "xaddb %b0, %1"                               \
00168                       : "=q" (result), "=m" (*mem)                          \
00169                       : "0" (value), "m" (*mem),                     \
00170                         "i" (offsetof (tcbhead_t, multiple_threads)));     \
00171      else if (sizeof (*mem) == 2)                                    \
00172        __asm __volatile (lock "xaddw %w0, %1"                               \
00173                       : "=r" (result), "=m" (*mem)                          \
00174                       : "0" (value), "m" (*mem),                     \
00175                         "i" (offsetof (tcbhead_t, multiple_threads)));     \
00176      else if (sizeof (*mem) == 4)                                    \
00177        __asm __volatile (lock "xaddl %0, %1"                                \
00178                       : "=r" (result), "=m" (*mem)                          \
00179                       : "0" (value), "m" (*mem),                     \
00180                         "i" (offsetof (tcbhead_t, multiple_threads)));     \
00181      else                                                            \
00182        __asm __volatile (lock "xaddq %q0, %1"                               \
00183                       : "=r" (result), "=m" (*mem)                          \
00184                       : "0" ((long) (value)), "m" (*mem),                   \
00185                         "i" (offsetof (tcbhead_t, multiple_threads)));     \
00186      result; })
00187 
00188 #if __GNUC_PREREQ (4, 1)
00189 # define atomic_exchange_and_add(mem, value) \
00190   __sync_fetch_and_add (mem, value)
00191 #else
00192 # define atomic_exchange_and_add(mem, value) \
00193   __arch_exchange_and_add_body (LOCK_PREFIX, mem, value)
00194 #endif
00195 
00196 #define __arch_exchange_and_add_cprefix \
00197   "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
00198 
00199 #define catomic_exchange_and_add(mem, value) \
00200   __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
00201 
00202 
00203 #define __arch_add_body(lock, pfx, mem, value)                              \
00204   do {                                                               \
00205     if (__builtin_constant_p (value) && (value) == 1)                       \
00206       pfx##_increment (mem);                                                \
00207     else if (__builtin_constant_p (value) && (value) == -1)                 \
00208       pfx##_decrement (mem);                                                \
00209     else if (sizeof (*mem) == 1)                                     \
00210       __asm __volatile (lock "addb %b1, %0"                                 \
00211                      : "=m" (*mem)                                   \
00212                      : "iq" (value), "m" (*mem),                     \
00213                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00214     else if (sizeof (*mem) == 2)                                     \
00215       __asm __volatile (lock "addw %w1, %0"                                 \
00216                      : "=m" (*mem)                                   \
00217                      : "ir" (value), "m" (*mem),                     \
00218                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00219     else if (sizeof (*mem) == 4)                                     \
00220       __asm __volatile (lock "addl %1, %0"                                  \
00221                      : "=m" (*mem)                                   \
00222                      : "ir" (value), "m" (*mem),                     \
00223                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00224     else                                                             \
00225       __asm __volatile (lock "addq %q1, %0"                                 \
00226                      : "=m" (*mem)                                   \
00227                      : "ir" ((long) (value)), "m" (*mem),                   \
00228                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00229   } while (0)
00230 
00231 #define atomic_add(mem, value) \
00232   __arch_add_body (LOCK_PREFIX, atomic, mem, value)
00233 
00234 #define __arch_add_cprefix \
00235   "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
00236 
00237 #define catomic_add(mem, value) \
00238   __arch_add_body (__arch_add_cprefix, catomic, mem, value)
00239 
00240 
00241 #define atomic_add_negative(mem, value) \
00242   ({ unsigned char __result;                                                \
00243      if (sizeof (*mem) == 1)                                                \
00244        __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1"                \
00245                       : "=m" (*mem), "=qm" (__result)                \
00246                       : "iq" (value), "m" (*mem));                          \
00247      else if (sizeof (*mem) == 2)                                    \
00248        __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1"                \
00249                       : "=m" (*mem), "=qm" (__result)                \
00250                       : "ir" (value), "m" (*mem));                          \
00251      else if (sizeof (*mem) == 4)                                    \
00252        __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1"                 \
00253                       : "=m" (*mem), "=qm" (__result)                \
00254                       : "ir" (value), "m" (*mem));                          \
00255      else                                                            \
00256        __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1"                \
00257                       : "=m" (*mem), "=qm" (__result)                \
00258                       : "ir" ((long) (value)), "m" (*mem));                 \
00259      __result; })
00260 
00261 
00262 #define atomic_add_zero(mem, value) \
00263   ({ unsigned char __result;                                                \
00264      if (sizeof (*mem) == 1)                                                \
00265        __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1"                \
00266                       : "=m" (*mem), "=qm" (__result)                \
00267                       : "iq" (value), "m" (*mem));                          \
00268      else if (sizeof (*mem) == 2)                                    \
00269        __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1"                \
00270                       : "=m" (*mem), "=qm" (__result)                \
00271                       : "ir" (value), "m" (*mem));                          \
00272      else if (sizeof (*mem) == 4)                                    \
00273        __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1"                 \
00274                       : "=m" (*mem), "=qm" (__result)                \
00275                       : "ir" (value), "m" (*mem));                          \
00276      else                                                            \
00277        __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1"                \
00278                       : "=m" (*mem), "=qm" (__result)                \
00279                       : "ir" ((long) (value)), "m" (*mem));                 \
00280      __result; })
00281 
00282 
00283 #define __arch_increment_body(lock, mem) \
00284   do {                                                               \
00285     if (sizeof (*mem) == 1)                                          \
00286       __asm __volatile (lock "incb %b0"                                     \
00287                      : "=m" (*mem)                                   \
00288                      : "m" (*mem),                                   \
00289                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00290     else if (sizeof (*mem) == 2)                                     \
00291       __asm __volatile (lock "incw %w0"                                     \
00292                      : "=m" (*mem)                                   \
00293                      : "m" (*mem),                                   \
00294                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00295     else if (sizeof (*mem) == 4)                                     \
00296       __asm __volatile (lock "incl %0"                                      \
00297                      : "=m" (*mem)                                   \
00298                      : "m" (*mem),                                   \
00299                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00300     else                                                             \
00301       __asm __volatile (lock "incq %q0"                                     \
00302                      : "=m" (*mem)                                   \
00303                      : "m" (*mem),                                   \
00304                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00305   } while (0)
00306 
00307 #define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
00308 
00309 #define __arch_increment_cprefix \
00310   "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
00311 
00312 #define catomic_increment(mem) \
00313   __arch_increment_body (__arch_increment_cprefix, mem)
00314 
00315 
00316 #define atomic_increment_and_test(mem) \
00317   ({ unsigned char __result;                                                \
00318      if (sizeof (*mem) == 1)                                                \
00319        __asm __volatile (LOCK_PREFIX "incb %b0; sete %1"                    \
00320                       : "=m" (*mem), "=qm" (__result)                \
00321                       : "m" (*mem));                                        \
00322      else if (sizeof (*mem) == 2)                                    \
00323        __asm __volatile (LOCK_PREFIX "incw %w0; sete %1"                    \
00324                       : "=m" (*mem), "=qm" (__result)                \
00325                       : "m" (*mem));                                        \
00326      else if (sizeof (*mem) == 4)                                    \
00327        __asm __volatile (LOCK_PREFIX "incl %0; sete %1"                     \
00328                       : "=m" (*mem), "=qm" (__result)                \
00329                       : "m" (*mem));                                        \
00330      else                                                            \
00331        __asm __volatile (LOCK_PREFIX "incq %q0; sete %1"                    \
00332                       : "=m" (*mem), "=qm" (__result)                \
00333                       : "m" (*mem));                                        \
00334      __result; })
00335 
00336 
00337 #define __arch_decrement_body(lock, mem) \
00338   do {                                                               \
00339     if (sizeof (*mem) == 1)                                          \
00340       __asm __volatile (lock "decb %b0"                                     \
00341                      : "=m" (*mem)                                   \
00342                      : "m" (*mem),                                   \
00343                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00344     else if (sizeof (*mem) == 2)                                     \
00345       __asm __volatile (lock "decw %w0"                                     \
00346                      : "=m" (*mem)                                   \
00347                      : "m" (*mem),                                   \
00348                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00349     else if (sizeof (*mem) == 4)                                     \
00350       __asm __volatile (lock "decl %0"                                      \
00351                      : "=m" (*mem)                                   \
00352                      : "m" (*mem),                                   \
00353                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00354     else                                                             \
00355       __asm __volatile (lock "decq %q0"                                     \
00356                      : "=m" (*mem)                                   \
00357                      : "m" (*mem),                                   \
00358                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00359   } while (0)
00360 
00361 #define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
00362 
00363 #define __arch_decrement_cprefix \
00364   "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
00365 
00366 #define catomic_decrement(mem) \
00367   __arch_decrement_body (__arch_decrement_cprefix, mem)
00368 
00369 
00370 #define atomic_decrement_and_test(mem) \
00371   ({ unsigned char __result;                                                \
00372      if (sizeof (*mem) == 1)                                                \
00373        __asm __volatile (LOCK_PREFIX "decb %b0; sete %1"                    \
00374                       : "=m" (*mem), "=qm" (__result)                \
00375                       : "m" (*mem));                                        \
00376      else if (sizeof (*mem) == 2)                                    \
00377        __asm __volatile (LOCK_PREFIX "decw %w0; sete %1"                    \
00378                       : "=m" (*mem), "=qm" (__result)                \
00379                       : "m" (*mem));                                        \
00380      else if (sizeof (*mem) == 4)                                    \
00381        __asm __volatile (LOCK_PREFIX "decl %0; sete %1"                     \
00382                       : "=m" (*mem), "=qm" (__result)                \
00383                       : "m" (*mem));                                        \
00384      else                                                            \
00385        __asm __volatile (LOCK_PREFIX "decq %q0; sete %1"                    \
00386                       : "=m" (*mem), "=qm" (__result)                \
00387                       : "m" (*mem));                                        \
00388      __result; })
00389 
00390 
00391 #define atomic_bit_set(mem, bit) \
00392   do {                                                               \
00393     if (sizeof (*mem) == 1)                                          \
00394       __asm __volatile (LOCK_PREFIX "orb %b2, %0"                           \
00395                      : "=m" (*mem)                                   \
00396                      : "m" (*mem), "iq" (1L << (bit)));              \
00397     else if (sizeof (*mem) == 2)                                     \
00398       __asm __volatile (LOCK_PREFIX "orw %w2, %0"                           \
00399                      : "=m" (*mem)                                   \
00400                      : "m" (*mem), "ir" (1L << (bit)));              \
00401     else if (sizeof (*mem) == 4)                                     \
00402       __asm __volatile (LOCK_PREFIX "orl %2, %0"                     \
00403                      : "=m" (*mem)                                   \
00404                      : "m" (*mem), "ir" (1L << (bit)));              \
00405     else if (__builtin_constant_p (bit) && (bit) < 32)                      \
00406       __asm __volatile (LOCK_PREFIX "orq %2, %0"                     \
00407                      : "=m" (*mem)                                   \
00408                      : "m" (*mem), "i" (1L << (bit)));               \
00409     else                                                             \
00410       __asm __volatile (LOCK_PREFIX "orq %q2, %0"                           \
00411                      : "=m" (*mem)                                   \
00412                      : "m" (*mem), "r" (1UL << (bit)));              \
00413   } while (0)
00414 
00415 
00416 #define atomic_bit_test_set(mem, bit) \
00417   ({ unsigned char __result;                                                \
00418      if (sizeof (*mem) == 1)                                                \
00419        __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0"                 \
00420                       : "=q" (__result), "=m" (*mem)                        \
00421                       : "m" (*mem), "iq" (bit));                     \
00422      else if (sizeof (*mem) == 2)                                    \
00423        __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0"                 \
00424                       : "=q" (__result), "=m" (*mem)                        \
00425                       : "m" (*mem), "ir" (bit));                     \
00426      else if (sizeof (*mem) == 4)                                    \
00427        __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0"                 \
00428                       : "=q" (__result), "=m" (*mem)                        \
00429                       : "m" (*mem), "ir" (bit));                     \
00430      else                                                            \
00431        __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0"                 \
00432                       : "=q" (__result), "=m" (*mem)                        \
00433                       : "m" (*mem), "ir" (bit));                     \
00434      __result; })
00435 
00436 
00437 #define atomic_delay() asm ("rep; nop")
00438 
00439 
00440 #define atomic_and(mem, mask) \
00441   do {                                                               \
00442     if (sizeof (*mem) == 1)                                          \
00443       __asm __volatile (LOCK_PREFIX "andb %b1, %0"                          \
00444                      : "=m" (*mem)                                   \
00445                      : "iq" (mask), "m" (*mem));                     \
00446     else if (sizeof (*mem) == 2)                                     \
00447       __asm __volatile (LOCK_PREFIX "andw %w1, %0"                          \
00448                      : "=m" (*mem)                                   \
00449                      : "ir" (mask), "m" (*mem));                     \
00450     else if (sizeof (*mem) == 4)                                     \
00451       __asm __volatile (LOCK_PREFIX "andl %1, %0"                           \
00452                      : "=m" (*mem)                                   \
00453                      : "ir" (mask), "m" (*mem));                     \
00454     else                                                             \
00455       __asm __volatile (LOCK_PREFIX "andq %q1, %0"                          \
00456                      : "=m" (*mem)                                   \
00457                      : "ir" (mask), "m" (*mem));                     \
00458   } while (0)
00459 
00460 
00461 #define __arch_or_body(lock, mem, mask)                                     \
00462   do {                                                               \
00463     if (sizeof (*mem) == 1)                                          \
00464       __asm __volatile (lock "orb %b1, %0"                                  \
00465                      : "=m" (*mem)                                   \
00466                      : "iq" (mask), "m" (*mem),                      \
00467                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00468     else if (sizeof (*mem) == 2)                                     \
00469       __asm __volatile (lock "orw %w1, %0"                                  \
00470                      : "=m" (*mem)                                   \
00471                      : "ir" (mask), "m" (*mem),                      \
00472                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00473     else if (sizeof (*mem) == 4)                                     \
00474       __asm __volatile (lock "orl %1, %0"                            \
00475                      : "=m" (*mem)                                   \
00476                      : "ir" (mask), "m" (*mem),                      \
00477                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00478     else                                                             \
00479       __asm __volatile (lock "orq %q1, %0"                                  \
00480                      : "=m" (*mem)                                   \
00481                      : "ir" (mask), "m" (*mem),                      \
00482                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
00483   } while (0)
00484 
00485 #define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
00486 
00487 #define __arch_or_cprefix \
00488   "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
00489 
00490 #define catomic_or(mem, mask) __arch_or_body (__arch_or_cprefix, mem, mask)