Back to index

glibc  2.9
atomic.h
Go to the documentation of this file.
00001 /* Atomic operations.  PowerPC64 version.
00002    Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
00003    This file is part of the GNU C Library.
00004    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
00005 
00006    The GNU C Library is free software; you can redistribute it and/or
00007    modify it under the terms of the GNU Lesser General Public
00008    License as published by the Free Software Foundation; either
00009    version 2.1 of the License, or (at your option) any later version.
00010 
00011    The GNU C Library is distributed in the hope that it will be useful,
00012    but WITHOUT ANY WARRANTY; without even the implied warranty of
00013    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014    Lesser General Public License for more details.
00015 
00016    You should have received a copy of the GNU Lesser General Public
00017    License along with the GNU C Library; if not, write to the Free
00018    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
00019    02111-1307 USA.  */
00020 
00021 /*  POWER6 adds a "Mutex Hint" to the Load and Reserve instruction.
00022     This is a hint to the hardware to expect additional updates adjacent
00023     to the lock word or not.  If we are acquiring a Mutex, the hint
00024     should be true. Otherwise we releasing a Mutex or doing a simple
00025     atomic operation.  In that case we don't expect addtional updates
00026     adjacent to the lock word after the Store Conditional and the hint
00027     should be false.  */
00028 
00029 #if defined _ARCH_PWR6 || defined _ARCH_PWR6X
00030 # define MUTEX_HINT_ACQ     ",1"
00031 # define MUTEX_HINT_REL     ",0"
00032 #else
00033 # define MUTEX_HINT_ACQ
00034 # define MUTEX_HINT_REL
00035 #endif
00036 
00037 /* The 32-bit exchange_bool is different on powerpc64 because the subf
00038    does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
00039    (a load word and zero (high 32) form) load.
00040    In powerpc64 register values are 64-bit by default,  including oldval.
00041    The value in old val unknown sign extension, lwarx loads the 32-bit
00042    value as unsigned.  So we explicitly clear the high 32 bits in oldval.  */
00043 #define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
00044 ({                                                                   \
00045   unsigned int __tmp, __tmp2;                                               \
00046   __asm __volatile ("   clrldi  %1,%1,32\n"                                 \
00047                   "1:       lwarx  %0,0,%2" MUTEX_HINT_ACQ "\n"                    \
00048                   "  subf.  %0,%1,%0\n"                              \
00049                   "  bne    2f\n"                                    \
00050                   "  stwcx. %4,0,%2\n"                               \
00051                   "  bne-   1b\n"                                    \
00052                   "2:       " __ARCH_ACQ_INSTR                              \
00053                   : "=&r" (__tmp), "=r" (__tmp2)                     \
00054                   : "b" (mem), "1" (oldval), "r" (newval)                   \
00055                   : "cr0", "memory");                                       \
00056   __tmp != 0;                                                        \
00057 })
00058 
00059 #define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
00060 ({                                                                   \
00061   unsigned int __tmp, __tmp2;                                               \
00062   __asm __volatile (__ARCH_REL_INSTR "\n"                            \
00063                   "   clrldi  %1,%1,32\n"                            \
00064                   "1:       lwarx  %0,0,%2" MUTEX_HINT_REL "\n"                    \
00065                   "  subf.  %0,%1,%0\n"                              \
00066                   "  bne    2f\n"                                    \
00067                   "  stwcx. %4,0,%2\n"                               \
00068                   "  bne-   1b\n"                                    \
00069                   "2:       "                                               \
00070                   : "=&r" (__tmp), "=r" (__tmp2)                     \
00071                   : "b" (mem), "1" (oldval), "r" (newval)                   \
00072                   : "cr0", "memory");                                       \
00073   __tmp != 0;                                                        \
00074 })
00075 
00076 /*
00077  * Only powerpc64 processors support Load doubleword and reserve index (ldarx)
00078  * and Store doubleword conditional indexed (stdcx) instructions.  So here
00079  * we define the 64-bit forms.
00080  */
00081 #define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
00082 ({                                                                   \
00083   unsigned long      __tmp;                                                 \
00084   __asm __volatile (                                                 \
00085                   "1:       ldarx  %0,0,%1" MUTEX_HINT_ACQ "\n"                    \
00086                   "  subf.  %0,%2,%0\n"                              \
00087                   "  bne    2f\n"                                    \
00088                   "  stdcx. %3,0,%1\n"                               \
00089                   "  bne-   1b\n"                                    \
00090                   "2:       " __ARCH_ACQ_INSTR                              \
00091                   : "=&r" (__tmp)                                    \
00092                   : "b" (mem), "r" (oldval), "r" (newval)                   \
00093                   : "cr0", "memory");                                       \
00094   __tmp != 0;                                                        \
00095 })
00096 
00097 #define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
00098 ({                                                                   \
00099   unsigned long      __tmp;                                                 \
00100   __asm __volatile (__ARCH_REL_INSTR "\n"                            \
00101                   "1:       ldarx  %0,0,%2" MUTEX_HINT_REL "\n"                    \
00102                   "  subf.  %0,%2,%0\n"                              \
00103                   "  bne    2f\n"                                    \
00104                   "  stdcx. %3,0,%1\n"                               \
00105                   "  bne-   1b\n"                                    \
00106                   "2:       "                                               \
00107                   : "=&r" (__tmp)                                    \
00108                   : "b" (mem), "r" (oldval), "r" (newval)                   \
00109                   : "cr0", "memory");                                       \
00110   __tmp != 0;                                                        \
00111 })
00112 
00113 #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
00114   ({                                                                 \
00115       __typeof (*(mem)) __tmp;                                              \
00116       __typeof (mem)  __memp = (mem);                                       \
00117       __asm __volatile (                                             \
00118                       "1:   ldarx  %0,0,%1" MUTEX_HINT_ACQ "\n"             \
00119                       "     cmpd   %0,%2\n"                          \
00120                       "     bne    2f\n"                             \
00121                       "     stdcx. %3,0,%1\n"                        \
00122                       "     bne-   1b\n"                             \
00123                       "2:   " __ARCH_ACQ_INSTR                       \
00124                       : "=&r" (__tmp)                                       \
00125                       : "b" (__memp), "r" (oldval), "r" (newval)            \
00126                       : "cr0", "memory");                            \
00127       __tmp;                                                         \
00128   })
00129 
00130 #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
00131   ({                                                                 \
00132       __typeof (*(mem)) __tmp;                                              \
00133       __typeof (mem)  __memp = (mem);                                       \
00134       __asm __volatile (__ARCH_REL_INSTR "\n"                               \
00135                       "1:   ldarx  %0,0,%1" MUTEX_HINT_REL "\n"             \
00136                       "     cmpd   %0,%2\n"                          \
00137                       "     bne    2f\n"                             \
00138                       "     stdcx. %3,0,%1\n"                        \
00139                       "     bne-   1b\n"                             \
00140                       "2:   "                                        \
00141                       : "=&r" (__tmp)                                       \
00142                       : "b" (__memp), "r" (oldval), "r" (newval)            \
00143                       : "cr0", "memory");                            \
00144       __tmp;                                                         \
00145   })
00146 
00147 #define __arch_atomic_exchange_64_acq(mem, value) \
00148     ({                                                               \
00149       __typeof (*mem) __val;                                                \
00150       __asm __volatile (__ARCH_REL_INSTR "\n"                               \
00151                      "1:    ldarx  %0,0,%2" MUTEX_HINT_ACQ "\n"             \
00152                      "      stdcx. %3,0,%2\n"                        \
00153                      "      bne-   1b\n"                             \
00154                 " " __ARCH_ACQ_INSTR                                        \
00155                      : "=&r" (__val), "=m" (*mem)                           \
00156                      : "b" (mem), "r" (value), "m" (*mem)                   \
00157                      : "cr0", "memory");                             \
00158       __val;                                                         \
00159     })
00160 
00161 #define __arch_atomic_exchange_64_rel(mem, value) \
00162     ({                                                               \
00163       __typeof (*mem) __val;                                                \
00164       __asm __volatile (__ARCH_REL_INSTR "\n"                               \
00165                      "1:    ldarx  %0,0,%2" MUTEX_HINT_REL "\n"             \
00166                      "      stdcx. %3,0,%2\n"                        \
00167                      "      bne-   1b"                               \
00168                      : "=&r" (__val), "=m" (*mem)                           \
00169                      : "b" (mem), "r" (value), "m" (*mem)                   \
00170                      : "cr0", "memory");                             \
00171       __val;                                                         \
00172     })
00173 
00174 #define __arch_atomic_exchange_and_add_64(mem, value) \
00175     ({                                                               \
00176       __typeof (*mem) __val, __tmp;                                         \
00177       __asm __volatile ("1: ldarx  %0,0,%3\n"                        \
00178                      "      add    %1,%0,%4\n"                       \
00179                      "      stdcx. %1,0,%3\n"                        \
00180                      "      bne-   1b"                               \
00181                      : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)            \
00182                      : "b" (mem), "r" (value), "m" (*mem)                   \
00183                      : "cr0", "memory");                             \
00184       __val;                                                         \
00185     })
00186 
00187 #define __arch_atomic_increment_val_64(mem) \
00188     ({                                                               \
00189       __typeof (*(mem)) __val;                                              \
00190       __asm __volatile ("1: ldarx  %0,0,%2\n"                        \
00191                      "      addi   %0,%0,1\n"                        \
00192                      "      stdcx. %0,0,%2\n"                        \
00193                      "      bne-   1b"                               \
00194                      : "=&b" (__val), "=m" (*mem)                           \
00195                      : "b" (mem), "m" (*mem)                                \
00196                      : "cr0", "memory");                             \
00197       __val;                                                         \
00198     })
00199 
00200 #define __arch_atomic_decrement_val_64(mem) \
00201     ({                                                               \
00202       __typeof (*(mem)) __val;                                              \
00203       __asm __volatile ("1: ldarx  %0,0,%2\n"                        \
00204                      "      subi   %0,%0,1\n"                        \
00205                      "      stdcx. %0,0,%2\n"                        \
00206                      "      bne-   1b"                               \
00207                      : "=&b" (__val), "=m" (*mem)                           \
00208                      : "b" (mem), "m" (*mem)                                \
00209                      : "cr0", "memory");                             \
00210       __val;                                                         \
00211     })
00212 
00213 #define __arch_atomic_decrement_if_positive_64(mem) \
00214   ({ int __val, __tmp;                                                      \
00215      __asm __volatile ("1:  ldarx  %0,0,%3\n"                        \
00216                      "      cmpdi  0,%0,0\n"                         \
00217                      "      addi   %1,%0,-1\n"                       \
00218                      "      ble    2f\n"                             \
00219                      "      stdcx. %1,0,%3\n"                        \
00220                      "      bne-   1b\n"                             \
00221                      "2:    " __ARCH_ACQ_INSTR                       \
00222                      : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)            \
00223                      : "b" (mem), "m" (*mem)                                \
00224                      : "cr0", "memory");                             \
00225      __val;                                                          \
00226   })
00227 
00228 /*
00229  * All powerpc64 processors support the new "light weight"  sync (lwsync).
00230  */
00231 #define atomic_read_barrier()      __asm ("lwsync" ::: "memory")
00232 /*
00233  * "light weight" sync can also be used for the release barrier.
00234  */
00235 #ifndef UP
00236 # define __ARCH_REL_INSTR   "lwsync"
00237 #endif
00238 
00239 /*
00240  * Include the rest of the atomic ops macros which are common to both
00241  * powerpc32 and powerpc64.
00242  */
00243 #include_next <bits/atomic.h>