Back to index

glibc  2.9
atomic.h
Go to the documentation of this file.
00001 /* Atomic operations used inside libc.  Linux/SH version.
00002    Copyright (C) 2003 Free Software Foundation, Inc.
00003    This file is part of the GNU C Library.
00004 
00005    The GNU C Library is free software; you can redistribute it and/or
00006    modify it under the terms of the GNU Lesser General Public
00007    License as published by the Free Software Foundation; either
00008    version 2.1 of the License, or (at your option) any later version.
00009 
00010    The GNU C Library is distributed in the hope that it will be useful,
00011    but WITHOUT ANY WARRANTY; without even the implied warranty of
00012    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013    Lesser General Public License for more details.
00014 
00015    You should have received a copy of the GNU Lesser General Public
00016    License along with the GNU C Library; if not, write to the Free
00017    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
00018    02111-1307 USA.  */
00019 
00020 #include <stdint.h>
00021 
00022 
00023 typedef int8_t atomic8_t;
00024 typedef uint8_t uatomic8_t;
00025 typedef int_fast8_t atomic_fast8_t;
00026 typedef uint_fast8_t uatomic_fast8_t;
00027 
00028 typedef int16_t atomic16_t;
00029 typedef uint16_t uatomic16_t;
00030 typedef int_fast16_t atomic_fast16_t;
00031 typedef uint_fast16_t uatomic_fast16_t;
00032 
00033 typedef int32_t atomic32_t;
00034 typedef uint32_t uatomic32_t;
00035 typedef int_fast32_t atomic_fast32_t;
00036 typedef uint_fast32_t uatomic_fast32_t;
00037 
00038 typedef int64_t atomic64_t;
00039 typedef uint64_t uatomic64_t;
00040 typedef int_fast64_t atomic_fast64_t;
00041 typedef uint_fast64_t uatomic_fast64_t;
00042 
00043 typedef intptr_t atomicptr_t;
00044 typedef uintptr_t uatomicptr_t;
00045 typedef intmax_t atomic_max_t;
00046 typedef uintmax_t uatomic_max_t;
00047 
00048 /* SH kernel has implemented a gUSA ("g" User Space Atomicity) support
00049    for the user space atomicity. The atomicity macros use this scheme.
00050 
00051   Reference:
00052     Niibe Yutaka, "gUSA: Simple and Efficient User Space Atomicity
00053     Emulation with Little Kernel Modification", Linux Conference 2002,
00054     Japan. http://lc.linux.or.jp/lc2002/papers/niibe0919h.pdf (in
00055     Japanese).
00056 
00057     B.N. Bershad, D. Redell, and J. Ellis, "Fast Mutual Exclusion for
00058     Uniprocessors",  Proceedings of the Fifth Architectural Support for
00059     Programming Languages and Operating Systems (ASPLOS), pp. 223-233,
00060     October 1992. http://www.cs.washington.edu/homes/bershad/Papers/Rcs.ps
00061 
00062   SuperH ABI:
00063       r15:    -(size of atomic instruction sequence) < 0
00064       r0:     end point
00065       r1:     saved stack pointer
00066 */
00067 
00068 #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
00069   ({ __typeof (*(mem)) __result; \
00070      __asm __volatile ("\
00071        .align 2\n\
00072        mova 1f,r0\n\
00073        nop\n\
00074        mov r15,r1\n\
00075        mov #-8,r15\n\
00076      0: mov.b @%1,%0\n\
00077        cmp/eq %0,%3\n\
00078        bf 1f\n\
00079        mov.b %2,@%1\n\
00080      1: mov r1,r15"\
00081        : "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
00082        : "r0", "r1", "t", "memory"); \
00083      __result; })
00084 
00085 #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
00086   ({ __typeof (*(mem)) __result; \
00087      __asm __volatile ("\
00088        .align 2\n\
00089        mova 1f,r0\n\
00090        nop\n\
00091        mov r15,r1\n\
00092        mov #-8,r15\n\
00093      0: mov.w @%1,%0\n\
00094        cmp/eq %0,%3\n\
00095        bf 1f\n\
00096        mov.w %2,@%1\n\
00097      1: mov r1,r15"\
00098        : "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
00099        : "r0", "r1", "t", "memory"); \
00100      __result; })
00101 
00102 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
00103   ({ __typeof (*(mem)) __result; \
00104      __asm __volatile ("\
00105        .align 2\n\
00106        mova 1f,r0\n\
00107        nop\n\
00108        mov r15,r1\n\
00109        mov #-8,r15\n\
00110      0: mov.l @%1,%0\n\
00111        cmp/eq %0,%3\n\
00112        bf 1f\n\
00113        mov.l %2,@%1\n\
00114      1: mov r1,r15"\
00115        : "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
00116        : "r0", "r1", "t", "memory"); \
00117      __result; })
00118 
00119 /* XXX We do not really need 64-bit compare-and-exchange.  At least
00120    not in the moment.  Using it would mean causing portability
00121    problems since not many other 32-bit architectures have support for
00122    such an operation.  So don't define any code for now.  */
00123 
00124 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
00125   (abort (), (__typeof (*mem)) 0)
00126 
00127 #define atomic_exchange_and_add(mem, value) \
00128   ({ __typeof (*(mem)) __result, __tmp, __value = (value); \
00129      if (sizeof (*(mem)) == 1) \
00130        __asm __volatile ("\
00131          .align 2\n\
00132          mova 1f,r0\n\
00133          mov r15,r1\n\
00134          mov #-6,r15\n\
00135        0: mov.b @%2,%0\n\
00136          add %0,%1\n\
00137          mov.b %1,@%2\n\
00138        1: mov r1,r15"\
00139        : "=&r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
00140        : "r0", "r1", "memory"); \
00141      else if (sizeof (*(mem)) == 2) \
00142        __asm __volatile ("\
00143          .align 2\n\
00144          mova 1f,r0\n\
00145          mov r15,r1\n\
00146          mov #-6,r15\n\
00147        0: mov.w @%2,%0\n\
00148          add %0,%1\n\
00149          mov.w %1,@%2\n\
00150        1: mov r1,r15"\
00151        : "=&r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
00152        : "r0", "r1", "memory"); \
00153      else if (sizeof (*(mem)) == 4) \
00154        __asm __volatile ("\
00155          .align 2\n\
00156          mova 1f,r0\n\
00157          mov r15,r1\n\
00158          mov #-6,r15\n\
00159        0: mov.l @%2,%0\n\
00160          add %0,%1\n\
00161          mov.l %1,@%2\n\
00162        1: mov r1,r15"\
00163        : "=&r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
00164        : "r0", "r1", "memory"); \
00165      else \
00166        { \
00167         __typeof (mem) memp = (mem); \
00168         do \
00169           __result = *memp; \
00170         while (__arch_compare_and_exchange_val_64_acq \
00171                (memp,       __result + __value, __result) == __result); \
00172         (void) __value; \
00173        } \
00174      __result; })
00175 
00176 #define atomic_add(mem, value) \
00177   (void) ({ __typeof (*(mem)) __tmp, __value = (value); \
00178            if (sizeof (*(mem)) == 1) \
00179              __asm __volatile ("\
00180               .align 2\n\
00181               mova 1f,r0\n\
00182               mov r15,r1\n\
00183               mov #-6,r15\n\
00184             0: mov.b @%1,r2\n\
00185               add r2,%0\n\
00186               mov.b %0,@%1\n\
00187             1: mov r1,r15"\
00188               : "=&r" (__tmp) : "r" (mem), "0" (__value) \
00189               : "r0", "r1", "r2", "memory"); \
00190            else if (sizeof (*(mem)) == 2) \
00191              __asm __volatile ("\
00192               .align 2\n\
00193               mova 1f,r0\n\
00194               mov r15,r1\n\
00195               mov #-6,r15\n\
00196             0: mov.w @%1,r2\n\
00197               add r2,%0\n\
00198               mov.w %0,@%1\n\
00199             1: mov r1,r15"\
00200               : "=&r" (__tmp) : "r" (mem), "0" (__value) \
00201               : "r0", "r1", "r2", "memory"); \
00202            else if (sizeof (*(mem)) == 4) \
00203              __asm __volatile ("\
00204               .align 2\n\
00205               mova 1f,r0\n\
00206               mov r15,r1\n\
00207               mov #-6,r15\n\
00208             0: mov.l @%1,r2\n\
00209               add r2,%0\n\
00210               mov.l %0,@%1\n\
00211             1: mov r1,r15"\
00212               : "=&r" (__tmp) : "r" (mem), "0" (__value) \
00213               : "r0", "r1", "r2", "memory"); \
00214            else \
00215              { \
00216               __typeof (*(mem)) oldval; \
00217               __typeof (mem) memp = (mem); \
00218               do \
00219                 oldval = *memp; \
00220               while (__arch_compare_and_exchange_val_64_acq \
00221                      (memp, oldval + __value, oldval) == oldval); \
00222               (void) __value; \
00223              } \
00224            })
00225 
00226 #define atomic_add_negative(mem, value) \
00227   ({ unsigned char __result; \
00228      __typeof (*(mem)) __tmp, __value = (value); \
00229      if (sizeof (*(mem)) == 1) \
00230        __asm __volatile ("\
00231          .align 2\n\
00232          mova 1f,r0\n\
00233          mov r15,r1\n\
00234          mov #-6,r15\n\
00235        0: mov.b @%2,r2\n\
00236          add r2,%1\n\
00237          mov.b %1,@%2\n\
00238        1: mov r1,r15\n\
00239          shal %1\n\
00240          movt %0"\
00241        : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
00242        : "r0", "r1", "r2", "t", "memory"); \
00243      else if (sizeof (*(mem)) == 2) \
00244        __asm __volatile ("\
00245          .align 2\n\
00246          mova 1f,r0\n\
00247          mov r15,r1\n\
00248          mov #-6,r15\n\
00249        0: mov.w @%2,r2\n\
00250          add r2,%1\n\
00251          mov.w %1,@%2\n\
00252        1: mov r1,r15\n\
00253          shal %1\n\
00254          movt %0"\
00255        : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
00256        : "r0", "r1", "r2", "t", "memory"); \
00257      else if (sizeof (*(mem)) == 4) \
00258        __asm __volatile ("\
00259          .align 2\n\
00260          mova 1f,r0\n\
00261          mov r15,r1\n\
00262          mov #-6,r15\n\
00263        0: mov.l @%2,r2\n\
00264          add r2,%1\n\
00265          mov.l %1,@%2\n\
00266        1: mov r1,r15\n\
00267          shal %1\n\
00268          movt %0"\
00269        : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
00270        : "r0", "r1", "r2", "t", "memory"); \
00271      else \
00272        abort (); \
00273      __result; })
00274 
00275 #define atomic_add_zero(mem, value) \
00276   ({ unsigned char __result; \
00277      __typeof (*(mem)) __tmp, __value = (value); \
00278      if (sizeof (*(mem)) == 1) \
00279        __asm __volatile ("\
00280          .align 2\n\
00281          mova 1f,r0\n\
00282          mov r15,r1\n\
00283          mov #-6,r15\n\
00284        0: mov.b @%2,r2\n\
00285          add r2,%1\n\
00286          mov.b %1,@%2\n\
00287        1: mov r1,r15\n\
00288          tst %1,%1\n\
00289          movt %0"\
00290        : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
00291        : "r0", "r1", "r2", "t", "memory"); \
00292      else if (sizeof (*(mem)) == 2) \
00293        __asm __volatile ("\
00294          .align 2\n\
00295          mova 1f,r0\n\
00296          mov r15,r1\n\
00297          mov #-6,r15\n\
00298        0: mov.w @%2,r2\n\
00299          add r2,%1\n\
00300          mov.w %1,@%2\n\
00301        1: mov r1,r15\n\
00302          tst %1,%1\n\
00303          movt %0"\
00304        : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
00305        : "r0", "r1", "r2", "t", "memory"); \
00306      else if (sizeof (*(mem)) == 4) \
00307        __asm __volatile ("\
00308          .align 2\n\
00309          mova 1f,r0\n\
00310          mov r15,r1\n\
00311          mov #-6,r15\n\
00312        0: mov.l @%2,r2\n\
00313          add r2,%1\n\
00314          mov.l %1,@%2\n\
00315        1: mov r1,r15\n\
00316          tst %1,%1\n\
00317          movt %0"\
00318        : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
00319        : "r0", "r1", "r2", "t", "memory"); \
00320      else \
00321        abort (); \
00322      __result; })
00323 
00324 #define atomic_increment_and_test(mem) atomic_add_zero((mem), 1)
00325 #define atomic_decrement_and_test(mem) atomic_add_zero((mem), -1)
00326 
00327 #define atomic_bit_set(mem, bit) \
00328   (void) ({ unsigned int __mask = 1 << (bit); \
00329            if (sizeof (*(mem)) == 1) \
00330              __asm __volatile ("\
00331               .align 2\n\
00332               mova 1f,r0\n\
00333               mov r15,r1\n\
00334               mov #-6,r15\n\
00335             0: mov.b @%0,r2\n\
00336               or %1,r2\n\
00337               mov.b r2,@%0\n\
00338             1: mov r1,r15"\
00339               : : "r" (mem), "r" (__mask) \
00340               : "r0", "r1", "r2", "memory"); \
00341            else if (sizeof (*(mem)) == 2) \
00342              __asm __volatile ("\
00343               .align 2\n\
00344               mova 1f,r0\n\
00345               mov r15,r1\n\
00346               mov #-6,r15\n\
00347             0: mov.w @%0,r2\n\
00348               or %1,r2\n\
00349               mov.w r2,@%0\n\
00350             1: mov r1,r15"\
00351               : : "r" (mem), "r" (__mask) \
00352               : "r0", "r1", "r2", "memory"); \
00353            else if (sizeof (*(mem)) == 4) \
00354              __asm __volatile ("\
00355               .align 2\n\
00356               mova 1f,r0\n\
00357               mov r15,r1\n\
00358               mov #-6,r15\n\
00359             0: mov.l @%0,r2\n\
00360               or %1,r2\n\
00361               mov.l r2,@%0\n\
00362             1: mov r1,r15"\
00363               : : "r" (mem), "r" (__mask) \
00364               : "r0", "r1", "r2", "memory"); \
00365            else \
00366              abort (); \
00367            })
00368 
00369 #define atomic_bit_test_set(mem, bit) \
00370   ({ unsigned int __mask = 1 << (bit); \
00371      unsigned int __result = __mask; \
00372      if (sizeof (*(mem)) == 1) \
00373        __asm __volatile ("\
00374          .align 2\n\
00375          mova 1f,r0\n\
00376          nop\n\
00377          mov r15,r1\n\
00378          mov #-8,r15\n\
00379        0: mov.b @%2,r2\n\
00380          or r2,%1\n\
00381          and r2,%0\n\
00382          mov.b %1,@%2\n\
00383        1: mov r1,r15"\
00384        : "=&r" (__result), "=&r" (__mask) \
00385        : "r" (mem), "0" (__result), "1" (__mask) \
00386        : "r0", "r1", "r2", "memory"); \
00387      else if (sizeof (*(mem)) == 2) \
00388        __asm __volatile ("\
00389          .align 2\n\
00390          mova 1f,r0\n\
00391          nop\n\
00392          mov r15,r1\n\
00393          mov #-8,r15\n\
00394        0: mov.w @%2,r2\n\
00395          or r2,%1\n\
00396          and r2,%0\n\
00397          mov.w %1,@%2\n\
00398        1: mov r1,r15"\
00399        : "=&r" (__result), "=&r" (__mask) \
00400        : "r" (mem), "0" (__result), "1" (__mask) \
00401        : "r0", "r1", "r2", "memory"); \
00402      else if (sizeof (*(mem)) == 4) \
00403        __asm __volatile ("\
00404          .align 2\n\
00405          mova 1f,r0\n\
00406          nop\n\
00407          mov r15,r1\n\
00408          mov #-8,r15\n\
00409        0: mov.l @%2,r2\n\
00410          or r2,%1\n\
00411          and r2,%0\n\
00412          mov.l %1,@%2\n\
00413        1: mov r1,r15"\
00414        : "=&r" (__result), "=&r" (__mask) \
00415        : "r" (mem), "0" (__result), "1" (__mask) \
00416        : "r0", "r1", "r2", "memory"); \
00417      else \
00418        abort (); \
00419      __result; })