Back to index

glibc  2.9
Defines | Typedefs | Functions
libc-lock.h File Reference
#include <pthread.h>
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Defines

#define __libc_lock_define(CLASS, NAME)   CLASS __libc_lock_t NAME;
#define __libc_rwlock_define(CLASS, NAME)   CLASS __libc_rwlock_t NAME;
#define __libc_lock_define_recursive(CLASS, NAME)   CLASS __libc_lock_recursive_t NAME;
#define __rtld_lock_define_recursive(CLASS, NAME)   CLASS __rtld_lock_recursive_t NAME;
#define __libc_lock_define_initialized(CLASS, NAME)   CLASS __libc_lock_t NAME;
#define __libc_rwlock_define_initialized(CLASS, NAME)   CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
#define __libc_lock_define_initialized_recursive(CLASS, NAME)   CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
#define _LIBC_LOCK_RECURSIVE_INITIALIZER   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
#define __rtld_lock_define_initialized_recursive(CLASS, NAME)   CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
#define _RTLD_LOCK_RECURSIVE_INITIALIZER   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
#define __libc_maybe_call(FUNC, ARGS, ELSE)   (FUNC != NULL ? FUNC ARGS : ELSE)
#define __libc_maybe_call2(FUNC, ARGS, ELSE)   __libc_maybe_call (__##FUNC, ARGS, ELSE)
#define __libc_lock_init(NAME)   (__libc_maybe_call2 (pthread_mutex_init, (&(NAME), NULL), 0))
#define __libc_rwlock_init(NAME)   (__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0));
#define __libc_lock_init_recursive(NAME)
#define __rtld_lock_init_recursive(NAME)   __libc_lock_init_recursive (NAME)
#define __libc_lock_fini(NAME)   (__libc_maybe_call2 (pthread_mutex_destroy, (&(NAME)), 0));
#define __libc_rwlock_fini(NAME)   (__libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0));
#define __libc_lock_fini_recursive(NAME)   __libc_lock_fini ((NAME).mutex)
#define __rtld_lock_fini_recursive(NAME)   __libc_lock_fini_recursive (NAME)
#define __libc_lock_lock(NAME)   (__libc_maybe_call2 (pthread_mutex_lock, (&(NAME)), 0));
#define __libc_rwlock_rdlock(NAME)   (__libc_maybe_call (__pthread_rwlock_rdlock, (&(NAME)), 0));
#define __libc_rwlock_wrlock(NAME)   (__libc_maybe_call (__pthread_rwlock_wrlock, (&(NAME)), 0));
#define __libc_lock_lock_recursive(NAME)   __libc_lock_lock ((NAME).mutex)
#define __libc_lock_trylock(NAME)   (__libc_maybe_call2 (pthread_mutex_trylock, (&(NAME)), 0))
#define __libc_rwlock_tryrdlock(NAME)   (__libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0))
#define __libc_rwlock_trywrlock(NAME)   (__libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0))
#define __libc_lock_trylock_recursive(NAME)   __libc_lock_trylock ((NAME).mutex)
#define __rtld_lock_trylock_recursive(NAME)   __libc_lock_trylock_recursive (NAME)
#define __libc_lock_unlock(NAME)   (__libc_maybe_call2 (pthread_mutex_unlock, (&(NAME)), 0));
#define __libc_rwlock_unlock(NAME)   (__libc_maybe_call (__pthread_rwlock_unlock, (&(NAME)), 0));
#define __libc_lock_unlock_recursive(NAME)   __libc_lock_unlock ((NAME).mutex)
#define __rtld_lock_lock_recursive(NAME)   __libc_lock_lock_recursive (NAME)
#define __rtld_lock_unlock_recursive(NAME)   __libc_lock_unlock_recursive (NAME)
#define __libc_once_define(CLASS, NAME)   CLASS pthread_once_t NAME
#define __libc_once(ONCE_CONTROL, INIT_FUNCTION)
#define __libc_cleanup_region_start(DOIT, FCT, ARG)
#define __libc_cleanup_region_end(DOIT)
#define __libc_cleanup_end(DOIT)
#define __libc_cleanup_push(fct, arg)
#define __libc_cleanup_pop(execute)
#define __libc_key_create(KEY, DESTRUCTOR)   (__libc_maybe_call (__pthread_key_create, (KEY, DESTRUCTOR), 1))
#define __libc_getspecific(KEY)   (__libc_maybe_call (__pthread_getspecific, (KEY), NULL))
#define __libc_setspecific(KEY, VALUE)   (__libc_maybe_call (__pthread_setspecific, (KEY, VALUE), 0))
#define __libc_atfork(PREPARE, PARENT, CHILD)   (__libc_maybe_call (__pthread_atfork, (PREPARE, PARENT, CHILD), 0))
#define __libc_mutex_unlock   __pthread_mutex_unlock

Typedefs

typedef struct __libc_lock_opaque__
typedef struct __libc_lock_recursive_opaque__
typedef struct __libc_rwlock_opaque__
typedef pthread_key_t __libc_key_t

Functions

int __pthread_mutex_init (pthread_mutex_t *__mutex, __const pthread_mutexattr_t *__mutex_attr)
int __pthread_mutex_destroy (pthread_mutex_t *__mutex)
int __pthread_mutex_trylock (pthread_mutex_t *__mutex)
int __pthread_mutex_lock (pthread_mutex_t *__mutex)
int __pthread_mutex_unlock (pthread_mutex_t *__mutex)
int __pthread_mutexattr_init (pthread_mutexattr_t *__attr)
int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr)
int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr, int __kind)
int __pthread_key_create (pthread_key_t *__key, void(*__destr_function)(void *))
int __pthread_setspecific (pthread_key_t __key, __const void *__pointer)
void * __pthread_getspecific (pthread_key_t __key)
int __pthread_once (pthread_once_t *__once_control, void(*__init_routine)(void))
int __pthread_atfork (void(*__prepare)(void), void(*__parent)(void), void(*__child)(void))

Define Documentation

#define __libc_atfork (   PREPARE,
  PARENT,
  CHILD 
)    (__libc_maybe_call (__pthread_atfork, (PREPARE, PARENT, CHILD), 0))

Definition at line 289 of file libc-lock.h.

#define __libc_cleanup_end (   DOIT)
Value:
if (_avail) {                                                 \
      _pthread_cleanup_pop_restore (&_buffer, (DOIT));                      \
    }

Definition at line 262 of file libc-lock.h.

#define __libc_cleanup_pop (   execute)
Value:
__libc_maybe_call (_pthread_cleanup_pop, (&_buffer, execute), 0);           \
    }

Definition at line 271 of file libc-lock.h.

#define __libc_cleanup_push (   fct,
  arg 
)
Value:
{ struct _pthread_cleanup_buffer _buffer;                                   \
    __libc_maybe_call (_pthread_cleanup_push, (&_buffer, (fct), (arg)), 0)

Definition at line 267 of file libc-lock.h.

#define __libc_cleanup_region_end (   DOIT)
Value:
if (_avail) {                                                 \
      _pthread_cleanup_pop_restore (&_buffer, (DOIT));                      \
    }                                                                \
  }

Definition at line 255 of file libc-lock.h.

#define __libc_cleanup_region_start (   DOIT,
  FCT,
  ARG 
)
Value:
{ struct _pthread_cleanup_buffer _buffer;                            \
    int _avail = (DOIT) && _pthread_cleanup_push_defer != NULL;                    \
    if (_avail) {                                                    \
      _pthread_cleanup_push_defer (&_buffer, (FCT), (ARG));                 \
    }

Definition at line 247 of file libc-lock.h.

#define __libc_getspecific (   KEY)    (__libc_maybe_call (__pthread_getspecific, (KEY), NULL))

Definition at line 280 of file libc-lock.h.

#define __libc_key_create (   KEY,
  DESTRUCTOR 
)    (__libc_maybe_call (__pthread_key_create, (KEY, DESTRUCTOR), 1))

Definition at line 276 of file libc-lock.h.

#define __libc_lock_define (   CLASS,
  NAME 
)    CLASS __libc_lock_t NAME;

Definition at line 56 of file libc-lock.h.

#define __libc_lock_define_initialized (   CLASS,
  NAME 
)    CLASS __libc_lock_t NAME;

Definition at line 75 of file libc-lock.h.

#define __libc_lock_define_initialized_recursive (   CLASS,
  NAME 
)    CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;

Definition at line 87 of file libc-lock.h.

#define __libc_lock_define_recursive (   CLASS,
  NAME 
)    CLASS __libc_lock_recursive_t NAME;

Definition at line 60 of file libc-lock.h.

#define __libc_lock_fini (   NAME)    (__libc_maybe_call2 (pthread_mutex_destroy, (&(NAME)), 0));

Definition at line 164 of file libc-lock.h.

#define __libc_lock_fini_recursive (   NAME)    __libc_lock_fini ((NAME).mutex)

Definition at line 170 of file libc-lock.h.

#define __libc_lock_init (   NAME)    (__libc_maybe_call2 (pthread_mutex_init, (&(NAME), NULL), 0))

Definition at line 129 of file libc-lock.h.

#define __libc_lock_init_recursive (   NAME)
Value:
do {                                                                 \
    if (__pthread_mutex_init != NULL)                                       \
      {                                                                     \
	pthread_mutexattr_t __attr;                                         \
       __pthread_mutexattr_init (&__attr);                                  \
       __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
       __pthread_mutex_init (&(NAME).mutex, &__attr);                       \
       __pthread_mutexattr_destroy (&__attr);                               \
      }                                                                     \
  } while (0);

Definition at line 146 of file libc-lock.h.

#define __libc_lock_lock (   NAME)    (__libc_maybe_call2 (pthread_mutex_lock, (&(NAME)), 0));

Definition at line 174 of file libc-lock.h.

#define __libc_lock_lock_recursive (   NAME)    __libc_lock_lock ((NAME).mutex)

Definition at line 182 of file libc-lock.h.

#define __libc_lock_trylock (   NAME)    (__libc_maybe_call2 (pthread_mutex_trylock, (&(NAME)), 0))

Definition at line 185 of file libc-lock.h.

#define __libc_lock_trylock_recursive (   NAME)    __libc_lock_trylock ((NAME).mutex)

Definition at line 193 of file libc-lock.h.

#define __libc_lock_unlock (   NAME)    (__libc_maybe_call2 (pthread_mutex_unlock, (&(NAME)), 0));

Definition at line 198 of file libc-lock.h.

#define __libc_lock_unlock_recursive (   NAME)    __libc_lock_unlock ((NAME).mutex)

Definition at line 204 of file libc-lock.h.

#define __libc_maybe_call (   FUNC,
  ARGS,
  ELSE 
)    (FUNC != NULL ? FUNC ARGS : ELSE)

Definition at line 105 of file libc-lock.h.

#define __libc_maybe_call2 (   FUNC,
  ARGS,
  ELSE 
)    __libc_maybe_call (__##FUNC, ARGS, ELSE)

Definition at line 114 of file libc-lock.h.

Definition at line 411 of file libc-lock.h.

#define __libc_once (   ONCE_CONTROL,
  INIT_FUNCTION 
)
Value:
do {                                                                 \
    if (__pthread_once != NULL)                                             \
      __pthread_once (&(ONCE_CONTROL), (INIT_FUNCTION));                    \
    else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) {                         \
      INIT_FUNCTION ();                                                     \
      (ONCE_CONTROL) = 2;                                            \
    }                                                                \
  } while (0)

Definition at line 235 of file libc-lock.h.

#define __libc_once_define (   CLASS,
  NAME 
)    CLASS pthread_once_t NAME

Definition at line 227 of file libc-lock.h.

#define __libc_rwlock_define (   CLASS,
  NAME 
)    CLASS __libc_rwlock_t NAME;

Definition at line 58 of file libc-lock.h.

#define __libc_rwlock_define_initialized (   CLASS,
  NAME 
)    CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;

Definition at line 82 of file libc-lock.h.

#define __libc_rwlock_fini (   NAME)    (__libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0));

Definition at line 166 of file libc-lock.h.

#define __libc_rwlock_init (   NAME)    (__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0));

Definition at line 132 of file libc-lock.h.

#define __libc_rwlock_rdlock (   NAME)    (__libc_maybe_call (__pthread_rwlock_rdlock, (&(NAME)), 0));

Definition at line 176 of file libc-lock.h.

#define __libc_rwlock_tryrdlock (   NAME)    (__libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0))

Definition at line 187 of file libc-lock.h.

#define __libc_rwlock_trywrlock (   NAME)    (__libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0))

Definition at line 189 of file libc-lock.h.

#define __libc_rwlock_unlock (   NAME)    (__libc_maybe_call (__pthread_rwlock_unlock, (&(NAME)), 0));

Definition at line 200 of file libc-lock.h.

#define __libc_rwlock_wrlock (   NAME)    (__libc_maybe_call (__pthread_rwlock_wrlock, (&(NAME)), 0));

Definition at line 178 of file libc-lock.h.

#define __libc_setspecific (   KEY,
  VALUE 
)    (__libc_maybe_call (__pthread_setspecific, (KEY, VALUE), 0))

Definition at line 284 of file libc-lock.h.

#define __rtld_lock_define_initialized_recursive (   CLASS,
  NAME 
)    CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;

Definition at line 92 of file libc-lock.h.

#define __rtld_lock_define_recursive (   CLASS,
  NAME 
)    CLASS __rtld_lock_recursive_t NAME;

Definition at line 62 of file libc-lock.h.

#define __rtld_lock_fini_recursive (   NAME)    __libc_lock_fini_recursive (NAME)

Definition at line 171 of file libc-lock.h.

#define __rtld_lock_init_recursive (   NAME)    __libc_lock_init_recursive (NAME)

Definition at line 158 of file libc-lock.h.

#define __rtld_lock_lock_recursive (   NAME)    __libc_lock_lock_recursive (NAME)

Definition at line 219 of file libc-lock.h.

Definition at line 194 of file libc-lock.h.

Definition at line 220 of file libc-lock.h.

#define _LIBC_LOCK_RECURSIVE_INITIALIZER   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}

Definition at line 89 of file libc-lock.h.

#define _RTLD_LOCK_RECURSIVE_INITIALIZER   {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}

Definition at line 94 of file libc-lock.h.


Typedef Documentation

Definition at line 47 of file libc-lock.h.

typedef struct __libc_lock_opaque__

Definition at line 41 of file libc-lock.h.

Definition at line 42 of file libc-lock.h.

typedef struct __libc_rwlock_opaque__

Definition at line 43 of file libc-lock.h.


Function Documentation

int __pthread_atfork ( void(*)(void)  __prepare,
void(*)(void)  __parent,
void(*)(void)  __child 
)

Definition at line 51 of file pthread_atfork.c.

Definition at line 25 of file pthread_getspecific.c.

{
  struct pthread_key_data *data;

  /* Special case access to the first 2nd-level block.  This is the
     usual case.  */
  if (__builtin_expect (key < PTHREAD_KEY_2NDLEVEL_SIZE, 1))
    data = &THREAD_SELF->specific_1stblock[key];
  else
    {
      /* Verify the key is sane.  */
      if (key >= PTHREAD_KEYS_MAX)
       /* Not valid.  */
       return NULL;

      unsigned int idx1st = key / PTHREAD_KEY_2NDLEVEL_SIZE;
      unsigned int idx2nd = key % PTHREAD_KEY_2NDLEVEL_SIZE;

      /* If the sequence number doesn't match or the key cannot be defined
        for this thread since the second level array is not allocated
        return NULL, too.  */
      struct pthread_key_data *level2 = THREAD_GETMEM_NC (THREAD_SELF,
                                                   specific, idx1st);
      if (level2 == NULL)
       /* Not allocated, therefore no data.  */
       return NULL;

      /* There is data.  */
      data = &level2[idx2nd];
    }

  void *result = data->data;
  if (result != NULL)
    {
      uintptr_t seq = data->seq;

      if (__builtin_expect (seq != __pthread_keys[key].seq, 0))
       result = data->data = NULL;
    }

  return result;
}
int __pthread_key_create ( pthread_key_t __key,
void(*)(void *)  __destr_function 
)

Definition at line 42 of file specific.c.

{
  int i;

  pthread_mutex_lock(&pthread_keys_mutex);
  for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
    if (! pthread_keys[i].in_use) {
      /* Mark key in use */
      pthread_keys[i].in_use = 1;
      pthread_keys[i].destr = destr;
      pthread_mutex_unlock(&pthread_keys_mutex);
      *key = i;
      return 0;
    }
  }
  pthread_mutex_unlock(&pthread_keys_mutex);
  return EAGAIN;
}

Definition at line 25 of file pthread_mutex_destroy.c.

{
  if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
      && mutex->__data.__nusers != 0)
    return EBUSY;

  /* Set to an invalid value.  */
  mutex->__data.__kind = -1;

  return 0;
}

Definition at line 41 of file pthread_mutex_lock.c.

{
  assert (sizeof (mutex->__size) >= sizeof (mutex->__data));

  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  int retval = 0;
  switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
                         PTHREAD_MUTEX_TIMED_NP))
    {
      /* Recursive mutex.  */
    case PTHREAD_MUTEX_RECURSIVE_NP:
      /* Check whether we already hold the mutex.  */
      if (mutex->__data.__owner == id)
       {
         /* Just bump the counter.  */
         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
           /* Overflow of the counter.  */
           return EAGAIN;

         ++mutex->__data.__count;

         return 0;
       }

      /* We have to get the mutex.  */
      LLL_MUTEX_LOCK (mutex);

      assert (mutex->__data.__owner == 0);
      mutex->__data.__count = 1;
      break;

      /* Error checking mutex.  */
    case PTHREAD_MUTEX_ERRORCHECK_NP:
      /* Check whether we already hold the mutex.  */
      if (__builtin_expect (mutex->__data.__owner == id, 0))
       return EDEADLK;

      /* FALLTHROUGH */

    case PTHREAD_MUTEX_TIMED_NP:
    simple:
      /* Normal mutex.  */
      LLL_MUTEX_LOCK (mutex);
      assert (mutex->__data.__owner == 0);
      break;

    case PTHREAD_MUTEX_ADAPTIVE_NP:
      if (! __is_smp)
       goto simple;

      if (LLL_MUTEX_TRYLOCK (mutex) != 0)
       {
         int cnt = 0;
         int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
                          mutex->__data.__spins * 2 + 10);
         do
           {
             if (cnt++ >= max_cnt)
              {
                LLL_MUTEX_LOCK (mutex);
                break;
              }

#ifdef BUSY_WAIT_NOP
             BUSY_WAIT_NOP;
#endif
           }
         while (LLL_MUTEX_TRYLOCK (mutex) != 0);

         mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
       }
      assert (mutex->__data.__owner == 0);
      break;

    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                   &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
       {
       again:
         if ((oldval & FUTEX_OWNER_DIED) != 0)
           {
             /* The previous owner died.  Try locking the mutex.  */
             int newval = id;
#ifdef NO_INCR
             newval |= FUTEX_WAITERS;
#else
             newval |= (oldval & FUTEX_WAITERS);
#endif

             newval
              = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                 newval, oldval);

             if (newval != oldval)
              {
                oldval = newval;
                goto again;
              }

             /* We got the mutex.  */
             mutex->__data.__count = 1;
             /* But it is inconsistent unless marked otherwise.  */
             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

             ENQUEUE_MUTEX (mutex);
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

             /* Note that we deliberately exit here.  If we fall
               through to the end of the function __nusers would be
               incremented which is not correct because the old
               owner has to be discounted.  If we are not supposed
               to increment __nusers we actually have to decrement
               it here.  */
#ifdef NO_INCR
             --mutex->__data.__nusers;
#endif

             return EOWNERDEAD;
           }

         /* Check whether we already hold the mutex.  */
         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
           {
             int kind = PTHREAD_MUTEX_TYPE (mutex);
             if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);
                return EDEADLK;
              }

             if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);

                /* Just bump the counter.  */
                if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                  /* Overflow of the counter.  */
                  return EAGAIN;

                ++mutex->__data.__count;

                return 0;
              }
           }

         oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);

         if (__builtin_expect (mutex->__data.__owner
                            == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
           {
             /* This mutex is now not recoverable.  */
             mutex->__data.__count = 0;
             lll_unlock (mutex->__data.__lock,
                       PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
             return ENOTRECOVERABLE;
           }
       }
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      mutex->__data.__count = 1;
      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
       int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

       if (robust)
         /* Note: robust PI futexes are signaled by setting bit 0.  */
         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                      (void *) (((uintptr_t) &mutex->__data.__list.__next)
                               | 1));

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
              return EDEADLK;
             }

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       int newval = id;
#ifdef NO_INCR
       newval |= FUTEX_WAITERS;
#endif
       oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                newval, 0);

       if (oldval != 0)
         {
           /* The mutex is locked.  The kernel will now take care of
              everything.  */
           int private = (robust
                        ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
                        : PTHREAD_MUTEX_PSHARED (mutex));
           INTERNAL_SYSCALL_DECL (__err);
           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                                  __lll_private_flag (FUTEX_LOCK_PI,
                                                   private), 1, 0);

           if (INTERNAL_SYSCALL_ERROR_P (e, __err)
              && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
                  || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
             {
              assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
                     || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
                         && kind != PTHREAD_MUTEX_RECURSIVE_NP));
              /* ESRCH can happen only for non-robust PI mutexes where
                 the owner of the lock died.  */
              assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);

              /* Delay the thread indefinitely.  */
              while (1)
                pause_not_cancel ();
             }

           oldval = mutex->__data.__lock;

           assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
         }

       if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
         {
           atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

           /* We got the mutex.  */
           mutex->__data.__count = 1;
           /* But it is inconsistent unless marked otherwise.  */
           mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

           ENQUEUE_MUTEX_PI (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

           /* Note that we deliberately exit here.  If we fall
              through to the end of the function __nusers would be
              incremented which is not correct because the old owner
              has to be discounted.  If we are not supposed to
              increment __nusers we actually have to decrement it here.  */
#ifdef NO_INCR
           --mutex->__data.__nusers;
#endif

           return EOWNERDEAD;
         }

       if (robust
           && __builtin_expect (mutex->__data.__owner
                             == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
         {
           /* This mutex is now not recoverable.  */
           mutex->__data.__count = 0;

           INTERNAL_SYSCALL_DECL (__err);
           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                           __lll_private_flag (FUTEX_UNLOCK_PI,
                                            PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
),
                           0, 0);

           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
           return ENOTRECOVERABLE;
         }

       mutex->__data.__count = 1;
       if (robust)
         {
           ENQUEUE_MUTEX_PI (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
         }
      }
      break;

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (mutex->__data.__owner == id)
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             return EDEADLK;

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       int oldprio = -1, ceilval;
       do
         {
           int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
                       >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

           if (__pthread_current_priority () > ceiling)
             {
              if (oldprio != -1)
                __pthread_tpp_change_priority (oldprio, -1);
              return EINVAL;
             }

           retval = __pthread_tpp_change_priority (oldprio, ceiling);
           if (retval)
             return retval;

           ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
           oldprio = ceiling;

           oldval
             = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
#ifdef NO_INCR
                                               ceilval | 2,
#else
                                               ceilval | 1,
#endif
                                               ceilval);

           if (oldval == ceilval)
             break;

           do
             {
              oldval
                = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                  ceilval | 2,
                                                  ceilval | 1);

              if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
                break;

              if (oldval != ceilval)
                lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
                              PTHREAD_MUTEX_PSHARED (mutex));
             }
           while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                 ceilval | 2, ceilval)
                 != ceilval);
         }
       while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

       assert (mutex->__data.__owner == 0);
       mutex->__data.__count = 1;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }

  /* Record the ownership.  */
  mutex->__data.__owner = id;
#ifndef NO_INCR
  ++mutex->__data.__nusers;
#endif

  return retval;
}

Definition at line 28 of file pthread_mutex_trylock.c.

{
  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
                         PTHREAD_MUTEX_TIMED_NP))
    {
      /* Recursive mutex.  */
    case PTHREAD_MUTEX_RECURSIVE_NP:
      /* Check whether we already hold the mutex.  */
      if (mutex->__data.__owner == id)
       {
         /* Just bump the counter.  */
         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
           /* Overflow of the counter.  */
           return EAGAIN;

         ++mutex->__data.__count;
         return 0;
       }

      if (lll_trylock (mutex->__data.__lock) == 0)
       {
         /* Record the ownership.  */
         mutex->__data.__owner = id;
         mutex->__data.__count = 1;
         ++mutex->__data.__nusers;
         return 0;
       }
      break;

    case PTHREAD_MUTEX_ERRORCHECK_NP:
    case PTHREAD_MUTEX_TIMED_NP:
    case PTHREAD_MUTEX_ADAPTIVE_NP:
      /* Normal mutex.  */
      if (lll_trylock (mutex->__data.__lock) != 0)
       break;

      /* Record the ownership.  */
      mutex->__data.__owner = id;
      ++mutex->__data.__nusers;

      return 0;

    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                   &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
       {
       again:
         if ((oldval & FUTEX_OWNER_DIED) != 0)
           {
             /* The previous owner died.  Try locking the mutex.  */
             int newval = id | (oldval & FUTEX_WAITERS);

             newval
              = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                 newval, oldval);

             if (newval != oldval)
              {
                oldval = newval;
                goto again;
              }

             /* We got the mutex.  */
             mutex->__data.__count = 1;
             /* But it is inconsistent unless marked otherwise.  */
             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

             ENQUEUE_MUTEX (mutex);
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

             /* Note that we deliberately exist here.  If we fall
               through to the end of the function __nusers would be
               incremented which is not correct because the old
               owner has to be discounted.  */
             return EOWNERDEAD;
           }

         /* Check whether we already hold the mutex.  */
         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
           {
             int kind = PTHREAD_MUTEX_TYPE (mutex);
             if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);
                return EDEADLK;
              }

             if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);

                /* Just bump the counter.  */
                if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                  /* Overflow of the counter.  */
                  return EAGAIN;

                ++mutex->__data.__count;

                return 0;
              }
           }

         oldval = lll_robust_trylock (mutex->__data.__lock, id);
         if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
           {
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

             return EBUSY;
           }

         if (__builtin_expect (mutex->__data.__owner
                            == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
           {
             /* This mutex is now not recoverable.  */
             mutex->__data.__count = 0;
             if (oldval == id)
              lll_unlock (mutex->__data.__lock,
                         PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
             return ENOTRECOVERABLE;
           }
       }
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

      mutex->__data.__owner = id;
      ++mutex->__data.__nusers;
      mutex->__data.__count = 1;

      return 0;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
       int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

       if (robust)
         /* Note: robust PI futexes are signaled by setting bit 0.  */
         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                      (void *) (((uintptr_t) &mutex->__data.__list.__next)
                               | 1));

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
              return EDEADLK;
             }

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       oldval
         = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                           id, 0);

       if (oldval != 0)
         {
           if ((oldval & FUTEX_OWNER_DIED) == 0)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              return EBUSY;
             }

           assert (robust);

           /* The mutex owner died.  The kernel will now take care of
              everything.  */
           int private = (robust
                        ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
                        : PTHREAD_MUTEX_PSHARED (mutex));
           INTERNAL_SYSCALL_DECL (__err);
           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                                  __lll_private_flag (FUTEX_TRYLOCK_PI,
                                                   private), 0, 0);

           if (INTERNAL_SYSCALL_ERROR_P (e, __err)
              && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              return EBUSY;
             }

           oldval = mutex->__data.__lock;
         }

       if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
         {
           atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

           /* We got the mutex.  */
           mutex->__data.__count = 1;
           /* But it is inconsistent unless marked otherwise.  */
           mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

           ENQUEUE_MUTEX (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

           /* Note that we deliberately exit here.  If we fall
              through to the end of the function __nusers would be
              incremented which is not correct because the old owner
              has to be discounted.  */
           return EOWNERDEAD;
         }

       if (robust
           && __builtin_expect (mutex->__data.__owner
                             == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
         {
           /* This mutex is now not recoverable.  */
           mutex->__data.__count = 0;

           INTERNAL_SYSCALL_DECL (__err);
           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                           __lll_private_flag (FUTEX_UNLOCK_PI,
                                            PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
                           0, 0);

           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
           return ENOTRECOVERABLE;
         }

       if (robust)
         {
           ENQUEUE_MUTEX_PI (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
         }

       mutex->__data.__owner = id;
       ++mutex->__data.__nusers;
       mutex->__data.__count = 1;

       return 0;
      }

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (mutex->__data.__owner == id)
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             return EDEADLK;

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       int oldprio = -1, ceilval;
       do
         {
           int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
                       >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

           if (__pthread_current_priority () > ceiling)
             {
              if (oldprio != -1)
                __pthread_tpp_change_priority (oldprio, -1);
              return EINVAL;
             }

           int retval = __pthread_tpp_change_priority (oldprio, ceiling);
           if (retval)
             return retval;

           ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
           oldprio = ceiling;

           oldval
             = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                               ceilval | 1, ceilval);

           if (oldval == ceilval)
             break;
         }
       while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

       if (oldval != ceilval)
         {
           __pthread_tpp_change_priority (oldprio, -1);
           break;
         }

       assert (mutex->__data.__owner == 0);
       /* Record the ownership.  */
       mutex->__data.__owner = id;
       ++mutex->__data.__nusers;
       mutex->__data.__count = 1;

       return 0;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }

  return EBUSY;
}

Definition at line 265 of file pthread_mutex_unlock.c.

Definition at line 24 of file pthread_mutexattr_destroy.c.

{
  return 0;
}

Here is the caller graph for this function:

Definition at line 25 of file pthread_mutexattr_init.c.

{
  if (sizeof (struct pthread_mutexattr) != sizeof (pthread_mutexattr_t))
    memset (attr, '\0', sizeof (*attr));

  /* We use bit 31 to signal whether the mutex is going to be
     process-shared or not.  By default it is zero, i.e., the mutex is
     not process-shared.  */
  ((struct pthread_mutexattr *) attr)->mutexkind = PTHREAD_MUTEX_NORMAL;

  return 0;
}

Here is the caller graph for this function:

Definition at line 25 of file pthread_mutexattr_settype.c.

{
  struct pthread_mutexattr *iattr;

  if (kind < PTHREAD_MUTEX_NORMAL || kind > PTHREAD_MUTEX_ADAPTIVE_NP)
    return EINVAL;

  iattr = (struct pthread_mutexattr *) attr;

  iattr->mutexkind = (iattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_BITS) | kind;

  return 0;
}

Here is the caller graph for this function:

int __pthread_once ( pthread_once_t __once_control,
void(*)(void)  __init_routine 
)

Definition at line 287 of file mutex.c.

{
  /* flag for doing the condition broadcast outside of mutex */
  int state_changed;

  /* Test without locking first for speed */
  if (*once_control == DONE) {
    READ_MEMORY_BARRIER();
    return 0;
  }
  /* Lock and test again */

  state_changed = 0;

  pthread_mutex_lock(&once_masterlock);

  /* If this object was left in an IN_PROGRESS state in a parent
     process (indicated by stale generation field), reset it to NEVER. */
  if ((*once_control & 3) == IN_PROGRESS && (*once_control & ~3) != fork_generation)
    *once_control = NEVER;

  /* If init_routine is being called from another routine, wait until
     it completes. */
  while ((*once_control & 3) == IN_PROGRESS) {
    pthread_cond_wait(&once_finished, &once_masterlock);
  }
  /* Here *once_control is stable and either NEVER or DONE. */
  if (*once_control == NEVER) {
    *once_control = IN_PROGRESS | fork_generation;
    pthread_mutex_unlock(&once_masterlock);
    pthread_cleanup_push(pthread_once_cancelhandler, once_control);
    init_routine();
    pthread_cleanup_pop(0);
    pthread_mutex_lock(&once_masterlock);
    WRITE_MEMORY_BARRIER();
    *once_control = DONE;
    state_changed = 1;
  }
  pthread_mutex_unlock(&once_masterlock);

  if (state_changed)
    pthread_cond_broadcast(&once_finished);

  return 0;
}
int __pthread_setspecific ( pthread_key_t  __key,
__const void *  __pointer 
)