Back to index

glibc  2.9
libc-lock.h
Go to the documentation of this file.
00001 /* libc-internal interface for mutex locks.  NPTL version.
00002    Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
00003    This file is part of the GNU C Library.
00004 
00005    The GNU C Library is free software; you can redistribute it and/or
00006    modify it under the terms of the GNU Lesser General Public License as
00007    published by the Free Software Foundation; either version 2.1 of the
00008    License, or (at your option) any later version.
00009 
00010    The GNU C Library is distributed in the hope that it will be useful,
00011    but WITHOUT ANY WARRANTY; without even the implied warranty of
00012    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013    Lesser General Public License for more details.
00014 
00015    You should have received a copy of the GNU Lesser General Public
00016    License along with the GNU C Library; see the file COPYING.LIB.  If not,
00017    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
00018    Boston, MA 02111-1307, USA.  */
00019 
00020 #ifndef _BITS_LIBC_LOCK_H
00021 #define _BITS_LIBC_LOCK_H 1
00022 
00023 #include <pthread.h>
00024 #define __need_NULL
00025 #include <stddef.h>
00026 
00027 
00028 /* Fortunately Linux now has a mean to do locking which is realtime
00029    safe without the aid of the thread library.  We also need no fancy
00030    options like error checking mutexes etc.  We only need simple
00031    locks, maybe recursive.  This can be easily and cheaply implemented
00032    using futexes.  We will use them everywhere except in ld.so since
00033    ld.so might be used on old kernels with a different libc.so.  */
00034 #ifdef _LIBC
00035 # include <lowlevellock.h>
00036 # include <tls.h>
00037 # include <pthread-functions.h>
00038 #endif
00039 
00040 /* Mutex type.  */
00041 #if defined _LIBC || defined _IO_MTSAFE_IO
00042 # if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
00043 typedef pthread_mutex_t __libc_lock_t;
00044 typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
00045 # else
00046 typedef int __libc_lock_t;
00047 typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
00048 # endif
00049 typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
00050 # ifdef __USE_UNIX98
00051 typedef pthread_rwlock_t __libc_rwlock_t;
00052 # else
00053 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
00054 # endif
00055 #else
00056 typedef struct __libc_lock_opaque__ __libc_lock_t;
00057 typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
00058 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
00059 #endif
00060 
00061 /* Type for key to thread-specific data.  */
00062 typedef pthread_key_t __libc_key_t;
00063 
00064 /* Define a lock variable NAME with storage class CLASS.  The lock must be
00065    initialized with __libc_lock_init before it can be used (or define it
00066    with __libc_lock_define_initialized, below).  Use `extern' for CLASS to
00067    declare a lock defined in another module.  In public structure
00068    definitions you must use a pointer to the lock structure (i.e., NAME
00069    begins with a `*'), because its storage size will not be known outside
00070    of libc.  */
00071 #define __libc_lock_define(CLASS,NAME) \
00072   CLASS __libc_lock_t NAME;
00073 #define __libc_rwlock_define(CLASS,NAME) \
00074   CLASS __libc_rwlock_t NAME;
00075 #define __libc_lock_define_recursive(CLASS,NAME) \
00076   CLASS __libc_lock_recursive_t NAME;
00077 #define __rtld_lock_define_recursive(CLASS,NAME) \
00078   CLASS __rtld_lock_recursive_t NAME;
00079 
00080 /* Define an initialized lock variable NAME with storage class CLASS.
00081 
00082    For the C library we take a deeper look at the initializer.  For
00083    this implementation all fields are initialized to zero.  Therefore
00084    we don't initialize the variable which allows putting it into the
00085    BSS section.  (Except on PA-RISC and other odd architectures, where
00086    initialized locks must be set to one due to the lack of normal
00087    atomic operations.) */
00088 
00089 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00090 # if LLL_LOCK_INITIALIZER == 0
00091 #  define __libc_lock_define_initialized(CLASS,NAME) \
00092   CLASS __libc_lock_t NAME;
00093 # else
00094 #  define __libc_lock_define_initialized(CLASS,NAME) \
00095   CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
00096 # endif
00097 #else
00098 # if __LT_SPINLOCK_INIT == 0
00099 #  define __libc_lock_define_initialized(CLASS,NAME) \
00100   CLASS __libc_lock_t NAME;
00101 # else
00102 #  define __libc_lock_define_initialized(CLASS,NAME) \
00103   CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
00104 # endif
00105 #endif
00106 
00107 #define __libc_rwlock_define_initialized(CLASS,NAME) \
00108   CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
00109 
00110 /* Define an initialized recursive lock variable NAME with storage
00111    class CLASS.  */
00112 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00113 # if LLL_LOCK_INITIALIZER == 0
00114 #  define __libc_lock_define_initialized_recursive(CLASS,NAME) \
00115   CLASS __libc_lock_recursive_t NAME;
00116 # else
00117 #  define __libc_lock_define_initialized_recursive(CLASS,NAME) \
00118   CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
00119 # endif
00120 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
00121   { LLL_LOCK_INITIALIZER, 0, NULL }
00122 #else
00123 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
00124   CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
00125 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
00126   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
00127 #endif
00128 
00129 #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
00130   CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
00131 #define _RTLD_LOCK_RECURSIVE_INITIALIZER \
00132   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
00133 
00134 #define __rtld_lock_initialize(NAME) \
00135   (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
00136 
00137 /* If we check for a weakly referenced symbol and then perform a
00138    normal jump to it te code generated for some platforms in case of
00139    PIC is unnecessarily slow.  What would happen is that the function
00140    is first referenced as data and then it is called indirectly
00141    through the PLT.  We can make this a direct jump.  */
00142 #ifdef __PIC__
00143 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
00144   (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
00145                     _fn != NULL ? (*_fn) ARGS : ELSE; }))
00146 #else
00147 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
00148   (FUNC != NULL ? FUNC ARGS : ELSE)
00149 #endif
00150 
00151 /* Call thread functions through the function pointer table.  */
00152 #if defined SHARED && !defined NOT_IN_libc
00153 # define PTFAVAIL(NAME) __libc_pthread_functions_init
00154 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
00155   (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
00156 # define __libc_ptf_call_always(FUNC, ARGS) \
00157   PTHFCT_CALL (ptr_##FUNC, ARGS)
00158 #else
00159 # define PTFAVAIL(NAME) (NAME != NULL)
00160 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
00161   __libc_maybe_call (FUNC, ARGS, ELSE)
00162 # define __libc_ptf_call_always(FUNC, ARGS) \
00163   FUNC ARGS
00164 #endif
00165 
00166 
00167 /* Initialize the named lock variable, leaving it in a consistent, unlocked
00168    state.  */
00169 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00170 # define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
00171 #else
00172 # define __libc_lock_init(NAME) \
00173   __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
00174 #endif
00175 #if defined SHARED && !defined NOT_IN_libc
00176 /* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
00177    inefficient.  */
00178 # define __libc_rwlock_init(NAME) \
00179   (__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
00180 #else
00181 # define __libc_rwlock_init(NAME) \
00182   __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
00183 #endif
00184 
00185 /* Same as last but this time we initialize a recursive mutex.  */
00186 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00187 # define __libc_lock_init_recursive(NAME) \
00188   ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
00189 #else
00190 # define __libc_lock_init_recursive(NAME) \
00191   do {                                                               \
00192     if (__pthread_mutex_init != NULL)                                       \
00193       {                                                                     \
00194        pthread_mutexattr_t __attr;                                   \
00195        __pthread_mutexattr_init (&__attr);                                  \
00196        __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP);    \
00197        __pthread_mutex_init (&(NAME).mutex, &__attr);                       \
00198        __pthread_mutexattr_destroy (&__attr);                               \
00199       }                                                                     \
00200   } while (0)
00201 #endif
00202 
00203 #define __rtld_lock_init_recursive(NAME) \
00204   do {                                                               \
00205     if (__pthread_mutex_init != NULL)                                       \
00206       {                                                                     \
00207        pthread_mutexattr_t __attr;                                   \
00208        __pthread_mutexattr_init (&__attr);                                  \
00209        __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP);    \
00210        __pthread_mutex_init (&(NAME).mutex, &__attr);                       \
00211        __pthread_mutexattr_destroy (&__attr);                               \
00212       }                                                                     \
00213   } while (0)
00214 
00215 /* Finalize the named lock variable, which must be locked.  It cannot be
00216    used again until __libc_lock_init is called again on it.  This must be
00217    called on a lock variable before the containing storage is reused.  */
00218 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00219 # define __libc_lock_fini(NAME) ((void) 0)
00220 #else
00221 # define __libc_lock_fini(NAME) \
00222   __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
00223 #endif
00224 #if defined SHARED && !defined NOT_IN_libc
00225 # define __libc_rwlock_fini(NAME) ((void) 0)
00226 #else
00227 # define __libc_rwlock_fini(NAME) \
00228   __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
00229 #endif
00230 
00231 /* Finalize recursive named lock.  */
00232 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00233 # define __libc_lock_fini_recursive(NAME) ((void) 0)
00234 #else
00235 # define __libc_lock_fini_recursive(NAME) \
00236   __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
00237 #endif
00238 
00239 /* Lock the named lock variable.  */
00240 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00241 # define __libc_lock_lock(NAME) \
00242   ({ lll_lock (NAME, LLL_PRIVATE); 0; })
00243 #else
00244 # define __libc_lock_lock(NAME) \
00245   __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
00246 #endif
00247 #define __libc_rwlock_rdlock(NAME) \
00248   __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
00249 #define __libc_rwlock_wrlock(NAME) \
00250   __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
00251 
00252 /* Lock the recursive named lock variable.  */
00253 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00254 # define __libc_lock_lock_recursive(NAME) \
00255   do {                                                               \
00256     void *self = THREAD_SELF;                                               \
00257     if ((NAME).owner != self)                                               \
00258       {                                                                     \
00259        lll_lock ((NAME).lock, LLL_PRIVATE);                                 \
00260        (NAME).owner = self;                                          \
00261       }                                                                     \
00262     ++(NAME).cnt;                                                    \
00263   } while (0)
00264 #else
00265 # define __libc_lock_lock_recursive(NAME) \
00266   __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
00267 #endif
00268 
00269 /* Try to lock the named lock variable.  */
00270 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00271 # define __libc_lock_trylock(NAME) \
00272   lll_trylock (NAME)
00273 #else
00274 # define __libc_lock_trylock(NAME) \
00275   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
00276 #endif
00277 #define __libc_rwlock_tryrdlock(NAME) \
00278   __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
00279 #define __libc_rwlock_trywrlock(NAME) \
00280   __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
00281 
00282 /* Try to lock the recursive named lock variable.  */
00283 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00284 # define __libc_lock_trylock_recursive(NAME) \
00285   ({                                                                 \
00286     int result = 0;                                                  \
00287     void *self = THREAD_SELF;                                               \
00288     if ((NAME).owner != self)                                               \
00289       {                                                                     \
00290        if (lll_trylock ((NAME).lock) == 0)                                  \
00291          {                                                           \
00292            (NAME).owner = self;                                      \
00293            (NAME).cnt = 1;                                           \
00294          }                                                           \
00295        else                                                          \
00296          result = EBUSY;                                             \
00297       }                                                                     \
00298     else                                                             \
00299       ++(NAME).cnt;                                                  \
00300     result;                                                          \
00301   })
00302 #else
00303 # define __libc_lock_trylock_recursive(NAME) \
00304   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
00305 #endif
00306 
00307 #define __rtld_lock_trylock_recursive(NAME) \
00308   __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
00309 
00310 /* Unlock the named lock variable.  */
00311 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00312 # define __libc_lock_unlock(NAME) \
00313   lll_unlock (NAME, LLL_PRIVATE)
00314 #else
00315 # define __libc_lock_unlock(NAME) \
00316   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
00317 #endif
00318 #define __libc_rwlock_unlock(NAME) \
00319   __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
00320 
00321 /* Unlock the recursive named lock variable.  */
00322 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
00323 /* We do no error checking here.  */
00324 # define __libc_lock_unlock_recursive(NAME) \
00325   do {                                                               \
00326     if (--(NAME).cnt == 0)                                           \
00327       {                                                                     \
00328        (NAME).owner = NULL;                                          \
00329        lll_unlock ((NAME).lock, LLL_PRIVATE);                               \
00330       }                                                                     \
00331   } while (0)
00332 #else
00333 # define __libc_lock_unlock_recursive(NAME) \
00334   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
00335 #endif
00336 
00337 #if defined _LIBC && defined SHARED
00338 # define __rtld_lock_default_lock_recursive(lock) \
00339   ++((pthread_mutex_t *)(lock))->__data.__count;
00340 
00341 # define __rtld_lock_default_unlock_recursive(lock) \
00342   --((pthread_mutex_t *)(lock))->__data.__count;
00343 
00344 # define __rtld_lock_lock_recursive(NAME) \
00345   GL(dl_rtld_lock_recursive) (&(NAME).mutex)
00346 
00347 # define __rtld_lock_unlock_recursive(NAME) \
00348   GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
00349 #else
00350 # define __rtld_lock_lock_recursive(NAME) \
00351   __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
00352 
00353 # define __rtld_lock_unlock_recursive(NAME) \
00354   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
00355 #endif
00356 
00357 /* Define once control variable.  */
00358 #if PTHREAD_ONCE_INIT == 0
00359 /* Special case for static variables where we can avoid the initialization
00360    if it is zero.  */
00361 # define __libc_once_define(CLASS, NAME) \
00362   CLASS pthread_once_t NAME
00363 #else
00364 # define __libc_once_define(CLASS, NAME) \
00365   CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
00366 #endif
00367 
00368 /* Call handler iff the first call.  */
00369 #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
00370   do {                                                               \
00371     if (PTFAVAIL (__pthread_once))                                   \
00372       __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL),                    \
00373                                           INIT_FUNCTION));                  \
00374     else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) {                         \
00375       INIT_FUNCTION ();                                                     \
00376       (ONCE_CONTROL) |= 2;                                           \
00377     }                                                                \
00378   } while (0)
00379 
00380 
00381 /* Note that for I/O cleanup handling we are using the old-style
00382    cancel handling.  It does not have to be integrated with C++ snce
00383    no C++ code is called in the middle.  The old-style handling is
00384    faster and the support is not going away.  */
00385 extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
00386                                    void (*routine) (void *), void *arg);
00387 extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
00388                                   int execute);
00389 extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
00390                                          void (*routine) (void *), void *arg);
00391 extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
00392                                           int execute);
00393 
00394 /* Start critical region with cleanup.  */
00395 #define __libc_cleanup_region_start(DOIT, FCT, ARG) \
00396   { struct _pthread_cleanup_buffer _buffer;                                 \
00397     int _avail;                                                             \
00398     if (DOIT) {                                                             \
00399       _avail = PTFAVAIL (_pthread_cleanup_push_defer);                      \
00400       if (_avail) {                                                  \
00401        __libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT,  \
00402                                                        ARG));        \
00403       } else {                                                              \
00404        _buffer.__routine = (FCT);                                    \
00405        _buffer.__arg = (ARG);                                               \
00406       }                                                                     \
00407     } else {                                                         \
00408       _avail = 0;                                                    \
00409     }
00410 
00411 /* End critical region with cleanup.  */
00412 #define __libc_cleanup_region_end(DOIT) \
00413     if (_avail) {                                                    \
00414       __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
00415     } else if (DOIT)                                                 \
00416       _buffer.__routine (_buffer.__arg);                             \
00417   }
00418 
00419 /* Sometimes we have to exit the block in the middle.  */
00420 #define __libc_cleanup_end(DOIT) \
00421     if (_avail) {                                                    \
00422       __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
00423     } else if (DOIT)                                                 \
00424       _buffer.__routine (_buffer.__arg)
00425 
00426 
00427 /* Normal cleanup handling, based on C cleanup attribute.  */
00428 __extern_inline void
00429 __libc_cleanup_routine (struct __pthread_cleanup_frame *f)
00430 {
00431   if (f->__do_it)
00432     f->__cancel_routine (f->__cancel_arg);
00433 }
00434 
00435 #define __libc_cleanup_push(fct, arg) \
00436   do {                                                               \
00437     struct __pthread_cleanup_frame __clframe                                \
00438       __attribute__ ((__cleanup__ (__libc_cleanup_routine)))                \
00439       = { .__cancel_routine = (fct), .__cancel_arg = (arg),                 \
00440           .__do_it = 1 };
00441 
00442 #define __libc_cleanup_pop(execute) \
00443     __clframe.__do_it = (execute);                                   \
00444   } while (0)
00445 
00446 
00447 /* Create thread-specific key.  */
00448 #define __libc_key_create(KEY, DESTRUCTOR) \
00449   __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
00450 
00451 /* Get thread-specific data.  */
00452 #define __libc_getspecific(KEY) \
00453   __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
00454 
00455 /* Set thread-specific data.  */
00456 #define __libc_setspecific(KEY, VALUE) \
00457   __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
00458 
00459 
00460 /* Register handlers to execute before and after `fork'.  Note that the
00461    last parameter is NULL.  The handlers registered by the libc are
00462    never removed so this is OK.  */
00463 #define __libc_atfork(PREPARE, PARENT, CHILD) \
00464   __register_atfork (PREPARE, PARENT, CHILD, NULL)
00465 extern int __register_atfork (void (*__prepare) (void),
00466                            void (*__parent) (void),
00467                            void (*__child) (void),
00468                            void *__dso_handle);
00469 
00470 /* Functions that are used by this file and are internal to the GNU C
00471    library.  */
00472 
00473 extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
00474                              __const pthread_mutexattr_t *__mutex_attr);
00475 
00476 extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
00477 
00478 extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
00479 
00480 extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
00481 
00482 extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
00483 
00484 extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
00485 
00486 extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
00487 
00488 extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
00489                                    int __kind);
00490 
00491 #ifdef __USE_UNIX98
00492 extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
00493                               __const pthread_rwlockattr_t *__attr);
00494 
00495 extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
00496 
00497 extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
00498 
00499 extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
00500 
00501 extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
00502 
00503 extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
00504 
00505 extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
00506 #endif
00507 
00508 extern int __pthread_key_create (pthread_key_t *__key,
00509                              void (*__destr_function) (void *));
00510 
00511 extern int __pthread_setspecific (pthread_key_t __key,
00512                               __const void *__pointer);
00513 
00514 extern void *__pthread_getspecific (pthread_key_t __key);
00515 
00516 extern int __pthread_once (pthread_once_t *__once_control,
00517                         void (*__init_routine) (void));
00518 
00519 extern int __pthread_atfork (void (*__prepare) (void),
00520                           void (*__parent) (void),
00521                           void (*__child) (void));
00522 
00523 
00524 
00525 /* Make the pthread functions weak so that we can elide them from
00526    single-threaded processes.  */
00527 #ifndef __NO_WEAK_PTHREAD_ALIASES
00528 # ifdef weak_extern
00529 #  if _LIBC
00530 #   include <bp-sym.h>
00531 #  else
00532 #   define BP_SYM (sym) sym
00533 #  endif
00534 weak_extern (BP_SYM (__pthread_mutex_init))
00535 weak_extern (BP_SYM (__pthread_mutex_destroy))
00536 weak_extern (BP_SYM (__pthread_mutex_lock))
00537 weak_extern (BP_SYM (__pthread_mutex_trylock))
00538 weak_extern (BP_SYM (__pthread_mutex_unlock))
00539 weak_extern (BP_SYM (__pthread_mutexattr_init))
00540 weak_extern (BP_SYM (__pthread_mutexattr_destroy))
00541 weak_extern (BP_SYM (__pthread_mutexattr_settype))
00542 weak_extern (BP_SYM (__pthread_rwlock_init))
00543 weak_extern (BP_SYM (__pthread_rwlock_destroy))
00544 weak_extern (BP_SYM (__pthread_rwlock_rdlock))
00545 weak_extern (BP_SYM (__pthread_rwlock_tryrdlock))
00546 weak_extern (BP_SYM (__pthread_rwlock_wrlock))
00547 weak_extern (BP_SYM (__pthread_rwlock_trywrlock))
00548 weak_extern (BP_SYM (__pthread_rwlock_unlock))
00549 weak_extern (BP_SYM (__pthread_key_create))
00550 weak_extern (BP_SYM (__pthread_setspecific))
00551 weak_extern (BP_SYM (__pthread_getspecific))
00552 weak_extern (BP_SYM (__pthread_once))
00553 weak_extern (__pthread_initialize)
00554 weak_extern (__pthread_atfork)
00555 weak_extern (BP_SYM (_pthread_cleanup_push_defer))
00556 weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
00557 weak_extern (BP_SYM (pthread_setcancelstate))
00558 # else
00559 #  pragma weak __pthread_mutex_init
00560 #  pragma weak __pthread_mutex_destroy
00561 #  pragma weak __pthread_mutex_lock
00562 #  pragma weak __pthread_mutex_trylock
00563 #  pragma weak __pthread_mutex_unlock
00564 #  pragma weak __pthread_mutexattr_init
00565 #  pragma weak __pthread_mutexattr_destroy
00566 #  pragma weak __pthread_mutexattr_settype
00567 #  pragma weak __pthread_rwlock_destroy
00568 #  pragma weak __pthread_rwlock_rdlock
00569 #  pragma weak __pthread_rwlock_tryrdlock
00570 #  pragma weak __pthread_rwlock_wrlock
00571 #  pragma weak __pthread_rwlock_trywrlock
00572 #  pragma weak __pthread_rwlock_unlock
00573 #  pragma weak __pthread_key_create
00574 #  pragma weak __pthread_setspecific
00575 #  pragma weak __pthread_getspecific
00576 #  pragma weak __pthread_once
00577 #  pragma weak __pthread_initialize
00578 #  pragma weak __pthread_atfork
00579 #  pragma weak _pthread_cleanup_push_defer
00580 #  pragma weak _pthread_cleanup_pop_restore
00581 #  pragma weak pthread_setcancelstate
00582 # endif
00583 #endif
00584 
00585 #endif /* bits/libc-lock.h */