Back to index

glibc  2.9
Defines | Enumerations | Functions | Variables
pthreadP.h File Reference
#include <pthread.h>
#include <setjmp.h>
#include <stdbool.h>
#include <sys/syscall.h>
#include "descr.h"
#include <tls.h>
#include <lowlevellock.h>
#include <stackinfo.h>
#include <internaltypes.h>
#include <pthread-functions.h>
#include <atomic.h>
#include <kernel-features.h>

Go to the source code of this file.

Defines

#define THREAD_ATOMIC_CMPXCHG_VAL(descr, member, new, old)   atomic_compare_and_exchange_val_acq (&(descr)->member, new, old)
#define THREAD_ATOMIC_BIT_SET(descr, member, bit)   atomic_bit_set (&(descr)->member, bit)
#define MAX_ADAPTIVE_COUNT   100
#define PTHREAD_MUTEX_INCONSISTENT   INT_MAX
#define PTHREAD_MUTEX_NOTRECOVERABLE   (INT_MAX - 1)
#define PTHREAD_MUTEX_PSHARED_BIT   128
#define PTHREAD_MUTEX_TYPE(m)   ((m)->__data.__kind & 127)
#define PTHREAD_MUTEX_PSHARED(m)   (((m)->__data.__kind & 128) ? LLL_SHARED : LLL_PRIVATE)
#define PTHREAD_ROBUST_MUTEX_PSHARED(m)   LLL_SHARED
#define PTHREAD_MUTEX_PRIO_CEILING_SHIFT   19
#define PTHREAD_MUTEX_PRIO_CEILING_MASK   0xfff80000
#define PTHREAD_MUTEXATTR_PROTOCOL_SHIFT   28
#define PTHREAD_MUTEXATTR_PROTOCOL_MASK   0x30000000
#define PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT   12
#define PTHREAD_MUTEXATTR_PRIO_CEILING_MASK   0x00fff000
#define PTHREAD_MUTEXATTR_FLAG_ROBUST   0x40000000
#define PTHREAD_MUTEXATTR_FLAG_PSHARED   0x80000000
#define PTHREAD_MUTEXATTR_FLAG_BITS
#define PTHREAD_RWLOCK_PREFER_READER_P(rwlock)   ((rwlock)->__data.__flags == 0)
#define FUTEX_WAITERS   0x80000000
#define FUTEX_OWNER_DIED   0x40000000
#define FUTEX_TID_MASK   0x3fffffff
#define DEBUGGING_P   0
 For now disable debugging support.
#define INVALID_TD_P(pd)   __builtin_expect ((pd)->tid <= 0, 0)
#define INVALID_NOT_TERMINATED_TD_P(pd)   __builtin_expect ((pd)->tid < 0, 0)
#define CANCELLATION_P(self)
#define CANCEL_ASYNC()   __pthread_enable_asynccancel ()
#define CANCEL_RESET(oldtype)   __pthread_disable_asynccancel (oldtype)
#define LIBC_CANCEL_ASYNC()   __libc_enable_asynccancel ()
#define LIBC_CANCEL_RESET(oldtype)   __libc_disable_asynccancel (oldtype)
#define LIBC_CANCEL_HANDLED()
#define SIGCANCEL   __SIGRTMIN
#define SIGTIMER   SIGCANCEL
#define SIGSETXID   (__SIGRTMIN + 1)
#define __find_thread_by_id(tid)   (__find_thread_by_id ? (__find_thread_by_id) (tid) : (struct pthread *) NULL)
#define PTHREAD_STATIC_FN_REQUIRE(name)   __asm (".globl " #name);

Enumerations

enum  {
  PTHREAD_MUTEX_KIND_MASK_NP = 3, PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16, PTHREAD_MUTEX_ROBUST_RECURSIVE_NP = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP, PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
  PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ADAPTIVE_NP, PTHREAD_MUTEX_PRIO_INHERIT_NP = 32, PTHREAD_MUTEX_PI_NORMAL_NP = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_PI_RECURSIVE_NP = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
  PTHREAD_MUTEX_PI_ERRORCHECK_NP = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ERRORCHECK_NP, PTHREAD_MUTEX_PI_ADAPTIVE_NP = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ADAPTIVE_NP, PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP, PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_RECURSIVE_NP,
  PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP, PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP, PTHREAD_MUTEX_PRIO_PROTECT_NP = 64, PTHREAD_MUTEX_PP_NORMAL_NP = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_NORMAL,
  PTHREAD_MUTEX_PP_RECURSIVE_NP = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_RECURSIVE_NP, PTHREAD_MUTEX_PP_ERRORCHECK_NP = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ERRORCHECK_NP, PTHREAD_MUTEX_PP_ADAPTIVE_NP = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ADAPTIVE_NP
}

Functions

 hidden_proto (__stack_user) extern struct pthread_attr *__attr_list attribute_hidden
 hidden_proto (__pthread_keys) extern unsigned int __nptl_nthreads attribute_hidden
void __init_sched_fifo_prio (void) attribute_hidden
int __pthread_tpp_change_priority (int prev_prio, int new_prio) attribute_hidden
int __pthread_current_priority (void) attribute_hidden
void __pthread_unwind (__pthread_unwind_buf_t *__buf) __cleanup_fct_attribute __attribute((__noreturn__)) weak_function
void __pthread_unwind_next (__pthread_unwind_buf_t *__buf) __cleanup_fct_attribute __attribute((__noreturn__)) weak_function
void __pthread_register_cancel (__pthread_unwind_buf_t *__buf) __cleanup_fct_attribute
void __pthread_unregister_cancel (__pthread_unwind_buf_t *__buf) __cleanup_fct_attribute
static void __attribute ((noreturn, always_inline)) __do_cancel(void)
struct pthread__find_in_stack_list (struct pthread *pd) attribute_hidden internal_function
void __free_tcb (struct pthread *pd) attribute_hidden internal_function
void __deallocate_stack (struct pthread *pd) attribute_hidden internal_function
void __reclaim_stacks (void) attribute_hidden
int __make_stacks_executable (void **stack_endp) internal_function attribute_hidden
void __pthread_cleanup_upto (__jmp_buf target, char *targetframe)
int __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
int __pthread_create_2_0 (pthread_t *newthread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
int __pthread_attr_init_2_1 (pthread_attr_t *attr)
int __pthread_attr_init_2_0 (pthread_attr_t *attr)
void __nptl_create_event (void)
void __nptl_death_event (void)
 hidden_proto (__nptl_create_event) hidden_proto(__nptl_death_event) extern int *__libc_pthread_init(unsigned long int *ptr
struct pthread__find_thread_by_id (pid_t tid) attribute_hidden weak_function
void __pthread_init_static_tls (struct link_map *) attribute_hidden
int __pthread_getschedparam (pthread_t thread_id, int *policy, struct sched_param *param)
int __pthread_setschedparam (pthread_t thread_id, int policy, const struct sched_param *param)
int __pthread_setcancelstate (int state, int *oldstate)
int __pthread_mutex_init (pthread_mutex_t *__mutex, __const pthread_mutexattr_t *__mutexattr)
int __pthread_mutex_init_internal (pthread_mutex_t *__mutex, __const pthread_mutexattr_t *__mutexattr) attribute_hidden
int __pthread_mutex_destroy (pthread_mutex_t *__mutex)
int __pthread_mutex_destroy_internal (pthread_mutex_t *__mutex) attribute_hidden
int __pthread_mutex_trylock (pthread_mutex_t *_mutex)
int __pthread_mutex_lock (pthread_mutex_t *__mutex)
int __pthread_mutex_lock_internal (pthread_mutex_t *__mutex) attribute_hidden
int __pthread_mutex_cond_lock (pthread_mutex_t *__mutex) attribute_hidden internal_function
int __pthread_mutex_unlock (pthread_mutex_t *__mutex)
int __pthread_mutex_unlock_internal (pthread_mutex_t *__mutex) attribute_hidden
int __pthread_mutex_unlock_usercnt (pthread_mutex_t *__mutex, int __decr) attribute_hidden internal_function
int __pthread_mutexattr_init (pthread_mutexattr_t *attr)
int __pthread_mutexattr_destroy (pthread_mutexattr_t *attr)
int __pthread_mutexattr_settype (pthread_mutexattr_t *attr, int kind)
int __pthread_attr_destroy (pthread_attr_t *attr)
int __pthread_attr_getdetachstate (const pthread_attr_t *attr, int *detachstate)
int __pthread_attr_setdetachstate (pthread_attr_t *attr, int detachstate)
int __pthread_attr_getinheritsched (const pthread_attr_t *attr, int *inherit)
int __pthread_attr_setinheritsched (pthread_attr_t *attr, int inherit)
int __pthread_attr_getschedparam (const pthread_attr_t *attr, struct sched_param *param)
int __pthread_attr_setschedparam (pthread_attr_t *attr, const struct sched_param *param)
int __pthread_attr_getschedpolicy (const pthread_attr_t *attr, int *policy)
int __pthread_attr_setschedpolicy (pthread_attr_t *attr, int policy)
int __pthread_attr_getscope (const pthread_attr_t *attr, int *scope)
int __pthread_attr_setscope (pthread_attr_t *attr, int scope)
int __pthread_attr_getstackaddr (__const pthread_attr_t *__restrict __attr, void **__restrict __stackaddr)
int __pthread_attr_setstackaddr (pthread_attr_t *__attr, void *__stackaddr)
int __pthread_attr_getstacksize (__const pthread_attr_t *__restrict __attr, size_t *__restrict __stacksize)
int __pthread_attr_setstacksize (pthread_attr_t *__attr, size_t __stacksize)
int __pthread_attr_getstack (__const pthread_attr_t *__restrict __attr, void **__restrict __stackaddr, size_t *__restrict __stacksize)
int __pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr, size_t __stacksize)
int __pthread_rwlock_init (pthread_rwlock_t *__restrict __rwlock, __const pthread_rwlockattr_t *__restrict __attr)
int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock)
int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock)
int __pthread_rwlock_rdlock_internal (pthread_rwlock_t *__rwlock)
int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock)
int __pthread_rwlock_wrlock_internal (pthread_rwlock_t *__rwlock)
int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock)
int __pthread_rwlock_unlock_internal (pthread_rwlock_t *__rwlock)
int __pthread_cond_broadcast (pthread_cond_t *cond)
int __pthread_cond_destroy (pthread_cond_t *cond)
int __pthread_cond_init (pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
int __pthread_cond_signal (pthread_cond_t *cond)
int __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
int __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime)
int __pthread_condattr_destroy (pthread_condattr_t *attr)
int __pthread_condattr_init (pthread_condattr_t *attr)
int __pthread_key_create (pthread_key_t *key, void(*destr)(void *))
int __pthread_key_create_internal (pthread_key_t *key, void(*destr)(void *))
void * __pthread_getspecific (pthread_key_t key)
void * __pthread_getspecific_internal (pthread_key_t key)
int __pthread_setspecific (pthread_key_t key, const void *value)
int __pthread_setspecific_internal (pthread_key_t key, const void *value)
int __pthread_once (pthread_once_t *once_control, void(*init_routine)(void))
int __pthread_once_internal (pthread_once_t *once_control, void(*init_routine)(void))
int __pthread_atfork (void(*prepare)(void), void(*parent)(void), void(*child)(void))
pthread_t __pthread_self (void)
int __pthread_equal (pthread_t thread1, pthread_t thread2)
int __pthread_kill (pthread_t threadid, int signo)
void __pthread_exit (void *value)
int __pthread_setcanceltype (int type, int *oldtype)
int __pthread_enable_asynccancel (void) attribute_hidden
void __pthread_disable_asynccancel (int oldtype) internal_function attribute_hidden
int __pthread_cond_broadcast_2_0 (pthread_cond_2_0_t *cond)
int __pthread_cond_destroy_2_0 (pthread_cond_2_0_t *cond)
int __pthread_cond_init_2_0 (pthread_cond_2_0_t *cond, const pthread_condattr_t *cond_attr)
int __pthread_cond_signal_2_0 (pthread_cond_2_0_t *cond)
int __pthread_cond_timedwait_2_0 (pthread_cond_2_0_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime)
int __pthread_cond_wait_2_0 (pthread_cond_2_0_t *cond, pthread_mutex_t *mutex)
int __pthread_getaffinity_np (pthread_t th, size_t cpusetsize, cpu_set_t *cpuset)
int __libc_enable_asynccancel (void) attribute_hidden
void __libc_disable_asynccancel (int oldtype) internal_function attribute_hidden
int __librt_enable_asynccancel (void) attribute_hidden
void __librt_disable_asynccancel (int oldtype) internal_function attribute_hidden
void __pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer, void(*routine)(void *), void *arg)
void __pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer, int execute)
void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer, void(*routine)(void *), void *arg)
void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer, int execute)
void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer, void(*routine)(void *), void *arg)
void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer, int execute)
void __nptl_deallocate_tsd (void) attribute_hidden
int __nptl_setxid (struct xid_command *cmdp) attribute_hidden
void __free_stack_cache (void) attribute_hidden
void __wait_lookup_done (void) attribute_hidden

Variables

size_t __default_stacksize attribute_hidden
list_t __stack_user
void(* reclaim )(void)
void(*) struct
pthread_functions *function 
internal_function )

Define Documentation

Definition at line 393 of file pthreadP.h.

Definition at line 270 of file pthreadP.h.

#define CANCEL_RESET (   oldtype)    __pthread_disable_asynccancel (oldtype)

Definition at line 273 of file pthreadP.h.

#define CANCELLATION_P (   self)
Value:
do {                                                                 \
    int cancelhandling = THREAD_GETMEM (self, cancelhandling);              \
    if (CANCEL_ENABLED_AND_CANCELED (cancelhandling))                       \
      {                                                                     \
       THREAD_SETMEM (self, result, PTHREAD_CANCELED);                      \
       __do_cancel ();                                                      \
      }                                                                     \
  } while (0)

Definition at line 216 of file pthreadP.h.

#define DEBUGGING_P   0

For now disable debugging support.

Definition at line 206 of file pthreadP.h.

#define FUTEX_OWNER_DIED   0x40000000

Definition at line 144 of file pthreadP.h.

#define FUTEX_TID_MASK   0x3fffffff

Definition at line 145 of file pthreadP.h.

#define FUTEX_WAITERS   0x80000000

Definition at line 143 of file pthreadP.h.

#define INVALID_NOT_TERMINATED_TD_P (   pd)    __builtin_expect ((pd)->tid < 0, 0)

Definition at line 211 of file pthreadP.h.

#define INVALID_TD_P (   pd)    __builtin_expect ((pd)->tid <= 0, 0)

Definition at line 210 of file pthreadP.h.

Definition at line 278 of file pthreadP.h.

#define LIBC_CANCEL_HANDLED ( )
Value:
__asm (".globl " __SYMBOL_PREFIX "__libc_enable_asynccancel"); \
  __asm (".globl " __SYMBOL_PREFIX "__libc_disable_asynccancel")

Definition at line 283 of file pthreadP.h.

#define LIBC_CANCEL_RESET (   oldtype)    __libc_disable_asynccancel (oldtype)

Definition at line 281 of file pthreadP.h.

#define MAX_ADAPTIVE_COUNT   100

Definition at line 51 of file pthreadP.h.

Definition at line 56 of file pthreadP.h.

Definition at line 58 of file pthreadP.h.

#define PTHREAD_MUTEX_PRIO_CEILING_MASK   0xfff80000

Definition at line 122 of file pthreadP.h.

Definition at line 121 of file pthreadP.h.

#define PTHREAD_MUTEX_PSHARED (   m)    (((m)->__data.__kind & 128) ? LLL_SHARED : LLL_PRIVATE)

Definition at line 108 of file pthreadP.h.

#define PTHREAD_MUTEX_PSHARED_BIT   128

Definition at line 99 of file pthreadP.h.

#define PTHREAD_MUTEX_TYPE (   m)    ((m)->__data.__kind & 127)

Definition at line 101 of file pthreadP.h.

#define PTHREAD_MUTEXATTR_FLAG_PSHARED   0x80000000

Definition at line 131 of file pthreadP.h.

#define PTHREAD_MUTEXATTR_FLAG_ROBUST   0x40000000

Definition at line 130 of file pthreadP.h.

#define PTHREAD_MUTEXATTR_PRIO_CEILING_MASK   0x00fff000

Definition at line 129 of file pthreadP.h.

Definition at line 128 of file pthreadP.h.

#define PTHREAD_MUTEXATTR_PROTOCOL_MASK   0x30000000

Definition at line 127 of file pthreadP.h.

Definition at line 126 of file pthreadP.h.

Definition at line 114 of file pthreadP.h.

#define PTHREAD_RWLOCK_PREFER_READER_P (   rwlock)    ((rwlock)->__data.__flags == 0)

Definition at line 138 of file pthreadP.h.

#define PTHREAD_STATIC_FN_REQUIRE (   name)    __asm (".globl " #name);

Definition at line 574 of file pthreadP.h.

#define SIGCANCEL   __SIGRTMIN

Definition at line 307 of file pthreadP.h.

#define SIGSETXID   (__SIGRTMIN + 1)

Definition at line 317 of file pthreadP.h.

#define SIGTIMER   SIGCANCEL

Definition at line 313 of file pthreadP.h.

#define THREAD_ATOMIC_BIT_SET (   descr,
  member,
  bit 
)    atomic_bit_set (&(descr)->member, bit)

Definition at line 44 of file pthreadP.h.

#define THREAD_ATOMIC_CMPXCHG_VAL (   descr,
  member,
  new,
  old 
)    atomic_compare_and_exchange_val_acq (&(descr)->member, new, old)

Definition at line 39 of file pthreadP.h.


Enumeration Type Documentation

anonymous enum
Enumerator:
PTHREAD_MUTEX_KIND_MASK_NP 
PTHREAD_MUTEX_ROBUST_NORMAL_NP 
PTHREAD_MUTEX_ROBUST_RECURSIVE_NP 
PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP 
PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP 
PTHREAD_MUTEX_PRIO_INHERIT_NP 
PTHREAD_MUTEX_PI_NORMAL_NP 
PTHREAD_MUTEX_PI_RECURSIVE_NP 
PTHREAD_MUTEX_PI_ERRORCHECK_NP 
PTHREAD_MUTEX_PI_ADAPTIVE_NP 
PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP 
PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP 
PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP 
PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP 
PTHREAD_MUTEX_PRIO_PROTECT_NP 
PTHREAD_MUTEX_PP_NORMAL_NP 
PTHREAD_MUTEX_PP_RECURSIVE_NP 
PTHREAD_MUTEX_PP_ERRORCHECK_NP 
PTHREAD_MUTEX_PP_ADAPTIVE_NP 

Definition at line 62 of file pthreadP.h.

{
  PTHREAD_MUTEX_KIND_MASK_NP = 3,
  PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16,
  PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
  = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP,
  PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
  = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
  PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
  = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
  PTHREAD_MUTEX_PRIO_INHERIT_NP = 32,
  PTHREAD_MUTEX_PI_NORMAL_NP
  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_NORMAL,
  PTHREAD_MUTEX_PI_RECURSIVE_NP
  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
  PTHREAD_MUTEX_PI_ERRORCHECK_NP
  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
  PTHREAD_MUTEX_PI_ADAPTIVE_NP
  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
  PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP,
  PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_RECURSIVE_NP,
  PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP,
  PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
  = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP,
  PTHREAD_MUTEX_PRIO_PROTECT_NP = 64,
  PTHREAD_MUTEX_PP_NORMAL_NP
  = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_NORMAL,
  PTHREAD_MUTEX_PP_RECURSIVE_NP
  = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
  PTHREAD_MUTEX_PP_ERRORCHECK_NP
  = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
  PTHREAD_MUTEX_PP_ADAPTIVE_NP
  = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ADAPTIVE_NP
};

Function Documentation

static void __attribute ( (noreturn, always_inline)  ) [inline, static]

Definition at line 256 of file pthreadP.h.

{
  struct pthread *self = THREAD_SELF;

  /* Make sure we get no more cancellations.  */
  THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);

  __pthread_unwind ((__pthread_unwind_buf_t *)
                  THREAD_GETMEM (self, cleanup_jmp_buf));
}

Here is the call graph for this function:

void __deallocate_stack ( struct pthread pd)

Definition at line 700 of file allocatestack.c.

{
  lll_lock (stack_cache_lock, LLL_PRIVATE);

  /* Remove the thread from the list of threads with user defined
     stacks.  */
  list_del (&pd->list);

  /* Not much to do.  Just free the mmap()ed memory.  Note that we do
     not reset the 'used' flag in the 'tid' field.  This is done by
     the kernel.  If no thread has been created yet this field is
     still zero.  */
  if (__builtin_expect (! pd->user_stack, 1))
    (void) queue_stack (pd);
  else
    /* Free the memory associated with the ELF TLS.  */
    _dl_deallocate_tls (TLS_TPADJ (pd), false);

  lll_unlock (stack_cache_lock, LLL_PRIVATE);
}

Here is the call graph for this function:

Here is the caller graph for this function:

struct pthread* __find_in_stack_list ( struct pthread pd) [read]

Definition at line 61 of file pthread_create.c.

{
  list_t *entry;
  struct pthread *result = NULL;

  lll_lock (stack_cache_lock, LLL_PRIVATE);

  list_for_each (entry, &stack_used)
    {
      struct pthread *curp;

      curp = list_entry (entry, struct pthread, list);
      if (curp == pd)
       {
         result = curp;
         break;
       }
    }

  if (result == NULL)
    list_for_each (entry, &__stack_user)
      {
       struct pthread *curp;

       curp = list_entry (entry, struct pthread, list);
       if (curp == pd)
         {
           result = curp;
           break;
         }
      }

  lll_unlock (stack_cache_lock, LLL_PRIVATE);

  return result;
}

Here is the caller graph for this function:

struct pthread* __find_thread_by_id ( pid_t  tid) [read]
void __free_stack_cache ( void  )

Definition at line 273 of file allocatestack.c.

{
  free_stacks (0);
}

Here is the call graph for this function:

void __free_tcb ( struct pthread pd)

Definition at line 198 of file pthread_create.c.

{
  /* The thread is exiting now.  */
  if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
                                        TERMINATED_BIT) == 0, 1))
    {
      /* Remove the descriptor from the list.  */
      if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
       /* Something is really wrong.  The descriptor for a still
          running thread is gone.  */
       abort ();

      /* Free TPP data.  */
      if (__builtin_expect (pd->tpp != NULL, 0))
       {
         struct priority_protection_data *tpp = pd->tpp;

         pd->tpp = NULL;
         free (tpp);
       }

      /* Queue the stack memory block for reuse and exit the process.  The
        kernel will signal via writing to the address returned by
        QUEUE-STACK when the stack is available.  */
      __deallocate_stack (pd);
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

void __init_sched_fifo_prio ( void  )

Definition at line 33 of file tpp.c.

Here is the call graph for this function:

Here is the caller graph for this function:

void __libc_disable_asynccancel ( int  oldtype)

Definition at line 81 of file libc-cancellation.c.

{
  /* If asynchronous cancellation was enabled before we do not have
     anything to do.  */
  if (oldtype & CANCELTYPE_BITMASK)
    return;

  struct pthread *self = THREAD_SELF;
  int oldval = THREAD_GETMEM (self, cancelhandling);

  while (1)
    {
      int newval = oldval & ~CANCELTYPE_BITMASK;

      if (newval == oldval)
       break;

      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
                                         oldval);
      if (__builtin_expect (curval == oldval, 1))
       break;

      /* Prepare the next round.  */
      oldval = curval;
    }
}

Definition at line 34 of file libc-cancellation.c.

{
  struct pthread *self = THREAD_SELF;
  int oldval = THREAD_GETMEM (self, cancelhandling);

  while (1)
    {
      int newval = oldval | CANCELTYPE_BITMASK;

      if (__builtin_expect ((oldval & CANCELED_BITMASK) != 0, 0))
       {
         /* If we are already exiting or if PTHREAD_CANCEL_DISABLED,
            stop right here.  */
         if ((oldval & (EXITING_BITMASK | CANCELSTATE_BITMASK)) != 0)
           break;

         int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
                                            newval, oldval);
         if (__builtin_expect (curval != oldval, 0))
           {
             /* Somebody else modified the word, try again.  */
             oldval = curval;
             continue;
           }

         THREAD_SETMEM (self, result, PTHREAD_CANCELED);

         __do_cancel ();

         /* NOTREACHED */
       }

      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
                                         oldval);
      if (__builtin_expect (curval == oldval, 1))
       break;

      /* Prepare the next round.  */
      oldval = curval;
    }

  return oldval;
}
void __librt_disable_asynccancel ( int  oldtype)
int __make_stacks_executable ( void **  stack_endp)

Definition at line 724 of file allocatestack.c.

{
  /* First the main thread's stack.  */
  int err = _dl_make_stack_executable (stack_endp);
  if (err != 0)
    return err;

#ifdef NEED_SEPARATE_REGISTER_STACK
  const size_t pagemask = ~(__getpagesize () - 1);
#endif

  lll_lock (stack_cache_lock, LLL_PRIVATE);

  list_t *runp;
  list_for_each (runp, &stack_used)
    {
      err = change_stack_perm (list_entry (runp, struct pthread, list)
#ifdef NEED_SEPARATE_REGISTER_STACK
                            , pagemask
#endif
                            );
      if (err != 0)
       break;
    }

  /* Also change the permission for the currently unused stacks.  This
     might be wasted time but better spend it here than adding a check
     in the fast path.  */
  if (err == 0)
    list_for_each (runp, &stack_cache)
      {
       err = change_stack_perm (list_entry (runp, struct pthread, list)
#ifdef NEED_SEPARATE_REGISTER_STACK
                             , pagemask
#endif
                             );
       if (err != 0)
         break;
      }

  lll_unlock (stack_cache_lock, LLL_PRIVATE);

  return err;
}

Here is the call graph for this function:

Here is the caller graph for this function:

void __nptl_create_event ( void  )

Definition at line 25 of file events.c.

{
}

Here is the caller graph for this function:

void __nptl_deallocate_tsd ( void  )

Definition at line 103 of file pthread_create.c.

{
  struct pthread *self = THREAD_SELF;

  /* Maybe no data was ever allocated.  This happens often so we have
     a flag for this.  */
  if (THREAD_GETMEM (self, specific_used))
    {
      size_t round;
      size_t cnt;

      round = 0;
      do
       {
         size_t idx;

         /* So far no new nonzero data entry.  */
         THREAD_SETMEM (self, specific_used, false);

         for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
           {
             struct pthread_key_data *level2;

             level2 = THREAD_GETMEM_NC (self, specific, cnt);

             if (level2 != NULL)
              {
                size_t inner;

                for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
                     ++inner, ++idx)
                  {
                    void *data = level2[inner].data;

                    if (data != NULL)
                     {
                       /* Always clear the data.  */
                       level2[inner].data = NULL;

                       /* Make sure the data corresponds to a valid
                          key.  This test fails if the key was
                          deallocated and also if it was
                          re-allocated.  It is the user's
                          responsibility to free the memory in this
                          case.  */
                       if (level2[inner].seq
                           == __pthread_keys[idx].seq
                           /* It is not necessary to register a destructor
                             function.  */
                           && __pthread_keys[idx].destr != NULL)
                         /* Call the user-provided destructor.  */
                         __pthread_keys[idx].destr (data);
                     }
                  }
              }
             else
              idx += PTHREAD_KEY_1STLEVEL_SIZE;
           }

         if (THREAD_GETMEM (self, specific_used) == 0)
           /* No data has been modified.  */
           goto just_free;
       }
      /* We only repeat the process a fixed number of times.  */
      while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));

      /* Just clear the memory of the first block for reuse.  */
      memset (&THREAD_SELF->specific_1stblock, '\0',
             sizeof (self->specific_1stblock));

    just_free:
      /* Free the memory for the other blocks.  */
      for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
       {
         struct pthread_key_data *level2;

         level2 = THREAD_GETMEM_NC (self, specific, cnt);
         if (level2 != NULL)
           {
             /* The first block is allocated as part of the thread
               descriptor.  */
             free (level2);
             THREAD_SETMEM_NC (self, specific, cnt, NULL);
           }
       }

      THREAD_SETMEM (self, specific_used, false);
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

void __nptl_death_event ( void  )

Here is the caller graph for this function:

int __nptl_setxid ( struct xid_command cmdp)

Definition at line 940 of file allocatestack.c.

{
  int result;
  lll_lock (stack_cache_lock, LLL_PRIVATE);

  __xidcmd = cmdp;
  cmdp->cntr = 0;

  struct pthread *self = THREAD_SELF;

  /* Iterate over the list with system-allocated threads first.  */
  list_t *runp;
  list_for_each (runp, &stack_used)
    {
      struct pthread *t = list_entry (runp, struct pthread, list);
      if (t == self)
       continue;

      setxid_signal_thread (cmdp, t);
    }

  /* Now the list with threads using user-allocated stacks.  */
  list_for_each (runp, &__stack_user)
    {
      struct pthread *t = list_entry (runp, struct pthread, list);
      if (t == self)
       continue;

      setxid_signal_thread (cmdp, t);
    }

  int cur = cmdp->cntr;
  while (cur != 0)
    {
      lll_futex_wait (&cmdp->cntr, cur, LLL_PRIVATE);
      cur = cmdp->cntr;
    }

  /* This must be last, otherwise the current thread might not have
     permissions to send SIGSETXID syscall to the other threads.  */
  INTERNAL_SYSCALL_DECL (err);
  result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3,
                             cmdp->id[0], cmdp->id[1], cmdp->id[2]);
  if (INTERNAL_SYSCALL_ERROR_P (result, err))
    {
      __set_errno (INTERNAL_SYSCALL_ERRNO (result, err));
      result = -1;
    }

  lll_unlock (stack_cache_lock, LLL_PRIVATE);
  return result;
}

Here is the call graph for this function:

int __pthread_atfork ( void(*)(void)  prepare,
void(*)(void)  parent,
void(*)(void)  child 
)

Definition at line 51 of file pthread_atfork.c.

Definition at line 69 of file attr.c.

{
  return 0;
}
int __pthread_attr_getdetachstate ( const pthread_attr_t attr,
int detachstate 
)

Definition at line 85 of file attr.c.

{
  *detachstate = attr->__detachstate;
  return 0;
}

Definition at line 138 of file attr.c.

{
  *inherit = attr->__inheritsched;
  return 0;
}

Definition at line 105 of file attr.c.

{
  memcpy (param, &attr->__schedparam, sizeof (struct sched_param));
  return 0;
}

Definition at line 122 of file attr.c.

{
  *policy = attr->__schedpolicy;
  return 0;
}
int __pthread_attr_getscope ( const pthread_attr_t attr,
int scope 
)

Definition at line 159 of file attr.c.

{
  *scope = attr->__scope;
  return 0;
}
int __pthread_attr_getstack ( __const pthread_attr_t *__restrict  __attr,
void **__restrict  __stackaddr,
size_t *__restrict  __stacksize 
)

Definition at line 32 of file attr.c.

{
  size_t ps = __getpagesize ();

  attr->__detachstate = PTHREAD_CREATE_JOINABLE;
  attr->__schedpolicy = SCHED_OTHER;
  attr->__schedparam.sched_priority = 0;
  attr->__inheritsched = PTHREAD_EXPLICIT_SCHED;
  attr->__scope = PTHREAD_SCOPE_SYSTEM;
#ifdef NEED_SEPARATE_REGISTER_STACK
  attr->__guardsize = ps + ps;
#else
  attr->__guardsize = ps;
#endif
  attr->__stackaddr = NULL;
  attr->__stackaddr_set = 0;
  attr->__stacksize = STACK_SIZE - ps;
  return 0;
}

Here is the call graph for this function:

int __pthread_attr_setdetachstate ( pthread_attr_t attr,
int  detachstate 
)

Definition at line 75 of file attr.c.

{
  if (detachstate < PTHREAD_CREATE_JOINABLE ||
      detachstate > PTHREAD_CREATE_DETACHED)
    return EINVAL;
  attr->__detachstate = detachstate;
  return 0;
}

Definition at line 129 of file attr.c.

{
  if (inherit != PTHREAD_INHERIT_SCHED && inherit != PTHREAD_EXPLICIT_SCHED)
    return EINVAL;
  attr->__inheritsched = inherit;
  return 0;
}

Definition at line 92 of file attr.c.

{
  int max_prio = __sched_get_priority_max(attr->__schedpolicy);
  int min_prio = __sched_get_priority_min(attr->__schedpolicy);

  if (param->sched_priority < min_prio || param->sched_priority > max_prio)
    return EINVAL;
  memcpy (&attr->__schedparam, param, sizeof (struct sched_param));
  return 0;
}

Here is the call graph for this function:

Definition at line 113 of file attr.c.

{
  if (policy != SCHED_OTHER && policy != SCHED_FIFO && policy != SCHED_RR)
    return EINVAL;
  attr->__schedpolicy = policy;
  return 0;
}
int __pthread_attr_setscope ( pthread_attr_t attr,
int  scope 
)

Definition at line 145 of file attr.c.

{
  switch (scope) {
  case PTHREAD_SCOPE_SYSTEM:
    attr->__scope = scope;
    return 0;
  case PTHREAD_SCOPE_PROCESS:
    return ENOTSUP;
  default:
    return EINVAL;
  }
}
int __pthread_attr_setstack ( pthread_attr_t __attr,
void *  __stackaddr,
size_t  __stacksize 
)

Definition at line 32 of file pthread_attr_setstack.c.

{
  struct pthread_attr *iattr;

  assert (sizeof (*attr) >= sizeof (struct pthread_attr));
  iattr = (struct pthread_attr *) attr;

  /* Catch invalid sizes.  */
  if (stacksize < PTHREAD_STACK_MIN)
    return EINVAL;

#ifdef EXTRA_PARAM_CHECKS
  EXTRA_PARAM_CHECKS;
#endif

  iattr->stacksize = stacksize;
  iattr->stackaddr = (char *) stackaddr + stacksize;
  iattr->flags |= ATTR_FLAG_STACKADDR;

  return 0;
}
int __pthread_attr_setstackaddr ( pthread_attr_t __attr,
void *  __stackaddr 
)

Definition at line 26 of file pthread_attr_setstackaddr.c.

{
  struct pthread_attr *iattr;

#ifdef EXTRA_PARAM_CHECKS
  EXTRA_PARAM_CHECKS;
#endif

  assert (sizeof (*attr) >= sizeof (struct pthread_attr));
  iattr = (struct pthread_attr *) attr;

  iattr->stackaddr = stackaddr;
  iattr->flags |= ATTR_FLAG_STACKADDR;

  return 0;
}
int __pthread_attr_setstacksize ( pthread_attr_t __attr,
size_t  __stacksize 
)

Definition at line 31 of file pthread_attr_setstacksize.c.

{
  struct pthread_attr *iattr;

  assert (sizeof (*attr) >= sizeof (struct pthread_attr));
  iattr = (struct pthread_attr *) attr;

  /* Catch invalid sizes.  */
  if (stacksize < PTHREAD_STACK_MIN)
    return EINVAL;

  iattr->stacksize = stacksize;

  return 0;
}
void __pthread_cleanup_pop_restore ( struct _pthread_cleanup_buffer buffer,
int  execute 
)
void __pthread_cleanup_push_defer ( struct _pthread_cleanup_buffer buffer,
void(*)(void *)  routine,
void *  arg 
)
void __pthread_cleanup_upto ( __jmp_buf  target,
char *  targetframe 
)

Definition at line 33 of file ptcleanup.c.

{
  pthread_descr self = thread_self();
  struct _pthread_cleanup_buffer * c;

  for (c = THREAD_GETMEM(self, p_cleanup);
       c != NULL && _JMPBUF_UNWINDS(target, c, demangle_ptr);
       c = c->__prev)
    {
#if _STACK_GROWS_DOWN
      if ((char *) c <= targetframe)
       {
         c = NULL;
         break;
       }
#elif _STACK_GROWS_UP
      if ((char *) c >= targetframe)
       {
         c = NULL;
         break;
       }
#else
# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
#endif
      c->__routine(c->__arg);
    }
  THREAD_SETMEM(self, p_cleanup, c);
  if (THREAD_GETMEM(self, p_in_sighandler)
      && _JMPBUF_UNWINDS(target, THREAD_GETMEM(self, p_in_sighandler),
                      demangle_ptr))
    THREAD_SETMEM(self, p_in_sighandler, NULL);
}

Definition at line 288 of file condvar.c.

{
  pthread_descr tosignal, th;

  __pthread_lock(&cond->__c_lock, NULL);
  /* Copy the current state of the waiting queue and empty it */
  tosignal = cond->__c_waiting;
  cond->__c_waiting = NULL;
  __pthread_unlock(&cond->__c_lock);
  /* Now signal each process in the queue */
  while ((th = dequeue(&tosignal)) != NULL) {
    th->p_condvar_avail = 1;
    WRITE_MEMORY_BARRIER();
    restart(th);
  }
  return 0;
}

Here is the call graph for this function:

Definition at line 44 of file condvar.c.

{
  if (cond->__c_waiting != NULL) return EBUSY;
  return 0;
}

Definition at line 29 of file condvar.c.

{
  __pthread_init_lock(&cond->__c_lock);
  cond->__c_waiting = NULL;
  return 0;
}

Here is the call graph for this function:

Definition at line 266 of file condvar.c.

{
  pthread_descr th;

  __pthread_lock(&cond->__c_lock, NULL);
  th = dequeue(&cond->__c_waiting);
  __pthread_unlock(&cond->__c_lock);
  if (th != NULL) {
    th->p_condvar_avail = 1;
    WRITE_MEMORY_BARRIER();
    restart(th);
  }
  return 0;
}

Here is the call graph for this function:

int __pthread_cond_timedwait ( pthread_cond_t cond,
pthread_mutex_t mutex,
const struct timespec abstime 
)

Definition at line 252 of file condvar.c.

{
  /* Indirect call through pointer! */
  return pthread_cond_timedwait_relative(cond, mutex, abstime);
}

Here is the call graph for this function:

int __pthread_cond_timedwait_2_0 ( pthread_cond_2_0_t cond,
pthread_mutex_t mutex,
const struct timespec abstime 
)

Definition at line 73 of file condvar.c.

{
  volatile pthread_descr self = thread_self();
  pthread_extricate_if extr;
  int already_canceled = 0;
  int spurious_wakeup_count;

  /* Check whether the mutex is locked and owned by this thread.  */
  if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
      && mutex->__m_kind != PTHREAD_MUTEX_ADAPTIVE_NP
      && mutex->__m_owner != self)
    return EINVAL;

  /* Set up extrication interface */
  extr.pu_object = cond;
  extr.pu_extricate_func = cond_extricate_func;

  /* Register extrication interface */
  THREAD_SETMEM(self, p_condvar_avail, 0);
  __pthread_set_own_extricate_if(self, &extr);

  /* Atomically enqueue thread for waiting, but only if it is not
     canceled. If the thread is canceled, then it will fall through the
     suspend call below, and then call pthread_exit without
     having to worry about whether it is still on the condition variable queue.
     This depends on pthread_cancel setting p_canceled before calling the
     extricate function. */

  __pthread_lock(&cond->__c_lock, self);
  if (!(THREAD_GETMEM(self, p_canceled)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
    enqueue(&cond->__c_waiting, self);
  else
    already_canceled = 1;
  __pthread_unlock(&cond->__c_lock);

  if (already_canceled) {
    __pthread_set_own_extricate_if(self, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }

  pthread_mutex_unlock(mutex);

  spurious_wakeup_count = 0;
  while (1)
    {
      suspend(self);
      if (THREAD_GETMEM(self, p_condvar_avail) == 0
         && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
             || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
       {
         /* Count resumes that don't belong to us. */
         spurious_wakeup_count++;
         continue;
       }
      break;
    }

  __pthread_set_own_extricate_if(self, 0);

  /* Check for cancellation again, to provide correct cancellation
     point behavior */

  if (THREAD_GETMEM(self, p_woken_by_cancel)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
    THREAD_SETMEM(self, p_woken_by_cancel, 0);
    pthread_mutex_lock(mutex);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }

  /* Put back any resumes we caught that don't belong to us. */
  while (spurious_wakeup_count--)
    restart(self);

  pthread_mutex_lock(mutex);
  return 0;
}

Here is the call graph for this function:

Definition at line 24 of file pthread_condattr_destroy.c.

{
  /* Nothing to be done.  */
  return 0;
}

Definition at line 313 of file condvar.c.

{
  return 0;
}
int __pthread_create_2_0 ( pthread_t newthread,
const pthread_attr_t attr,
void *(*)(void *)  start_routine,
void *  arg 
)
int __pthread_create_2_1 ( pthread_t newthread,
const pthread_attr_t attr,
void *(*)(void *)  start_routine,
void *  arg 
)

Definition at line 834 of file pthread.c.

{
  pthread_descr self = thread_self();
  struct pthread_request request;
  int retval;
  if (__builtin_expect (__pthread_manager_request, 0) < 0) {
    if (__pthread_initialize_manager() < 0) return EAGAIN;
  }
  request.req_thread = self;
  request.req_kind = REQ_CREATE;
  request.req_args.create.attr = attr;
  request.req_args.create.fn = start_routine;
  request.req_args.create.arg = arg;
  sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
              &request.req_args.create.mask);
  TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
                                  (char *) &request, sizeof(request)));
  suspend(self);
  retval = THREAD_GETMEM(self, p_retcode);
  if (__builtin_expect (retval, 0) == 0)
    *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
  return retval;
}

Here is the call graph for this function:

Definition at line 138 of file tpp.c.

{
  struct pthread *self = THREAD_SELF;
  if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
      == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
    return self->schedparam.sched_priority;

  int result = 0;

  lll_lock (self->lock, LLL_PRIVATE);

  if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
    {
      if (__sched_getparam (self->tid, &self->schedparam) != 0)
       result = -1;
      else
       self->flags |= ATTR_FLAG_SCHED_SET;
    }

  if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
    {
      self->schedpolicy = __sched_getscheduler (self->tid);
      if (self->schedpolicy == -1)
       result = -1;
      else
       self->flags |= ATTR_FLAG_POLICY_SET;
    }

  if (result != -1)
    result = self->schedparam.sched_priority;

  lll_unlock (self->lock, LLL_PRIVATE);

  return result;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 82 of file cancel.c.

{
  pthread_descr self = thread_self();
  THREAD_SETMEM(self, p_canceltype, oldtype);
}

Here is the caller graph for this function:

Definition at line 69 of file cancel.c.

Here is the call graph for this function:

Here is the caller graph for this function:

int __pthread_equal ( pthread_t  thread1,
pthread_t  thread2 
)

Definition at line 902 of file pthread.c.

{
  return thread1 == thread2;
}
void __pthread_exit ( void *  value)

Definition at line 27 of file join.c.

Here is the call graph for this function:

int __pthread_getaffinity_np ( pthread_t  th,
size_t  cpusetsize,
cpu_set_t *  cpuset 
)

Here is the caller graph for this function:

int __pthread_getschedparam ( pthread_t  thread_id,
int policy,
struct sched_param param 
)

Definition at line 989 of file pthread.c.

{
  pthread_handle handle = thread_handle(thread);
  int pid, pol;

  __pthread_lock(&handle->h_lock, NULL);
  if (__builtin_expect (invalid_handle(handle, thread), 0)) {
    __pthread_unlock(&handle->h_lock);
    return ESRCH;
  }
  pid = handle->h_descr->p_pid;
  __pthread_unlock(&handle->h_lock);
  pol = __sched_getscheduler(pid);
  if (__builtin_expect (pol, 0) == -1) return errno;
  if (__sched_getparam(pid, param) == -1) return errno;
  *policy = pol;
  return 0;
}

Here is the call graph for this function:

Definition at line 25 of file pthread_getspecific.c.

{
  struct pthread_key_data *data;

  /* Special case access to the first 2nd-level block.  This is the
     usual case.  */
  if (__builtin_expect (key < PTHREAD_KEY_2NDLEVEL_SIZE, 1))
    data = &THREAD_SELF->specific_1stblock[key];
  else
    {
      /* Verify the key is sane.  */
      if (key >= PTHREAD_KEYS_MAX)
       /* Not valid.  */
       return NULL;

      unsigned int idx1st = key / PTHREAD_KEY_2NDLEVEL_SIZE;
      unsigned int idx2nd = key % PTHREAD_KEY_2NDLEVEL_SIZE;

      /* If the sequence number doesn't match or the key cannot be defined
        for this thread since the second level array is not allocated
        return NULL, too.  */
      struct pthread_key_data *level2 = THREAD_GETMEM_NC (THREAD_SELF,
                                                   specific, idx1st);
      if (level2 == NULL)
       /* Not allocated, therefore no data.  */
       return NULL;

      /* There is data.  */
      data = &level2[idx2nd];
    }

  void *result = data->data;
  if (result != NULL)
    {
      uintptr_t seq = data->seq;

      if (__builtin_expect (seq != __pthread_keys[key].seq, 0))
       result = data->data = NULL;
    }

  return result;
}
void __pthread_init_static_tls ( struct link_map )

Definition at line 1016 of file allocatestack.c.

{
  lll_lock (stack_cache_lock, LLL_PRIVATE);

  /* Iterate over the list with system-allocated threads first.  */
  list_t *runp;
  list_for_each (runp, &stack_used)
    init_one_static_tls (list_entry (runp, struct pthread, list), map);

  /* Now the list with threads using user-allocated stacks.  */
  list_for_each (runp, &__stack_user)
    init_one_static_tls (list_entry (runp, struct pthread, list), map);

  lll_unlock (stack_cache_lock, LLL_PRIVATE);
}

Here is the caller graph for this function:

int __pthread_key_create ( pthread_key_t key,
void(*)(void *)  destr 
)

Definition at line 42 of file specific.c.

{
  int i;

  pthread_mutex_lock(&pthread_keys_mutex);
  for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
    if (! pthread_keys[i].in_use) {
      /* Mark key in use */
      pthread_keys[i].in_use = 1;
      pthread_keys[i].destr = destr;
      pthread_mutex_unlock(&pthread_keys_mutex);
      *key = i;
      return 0;
    }
  }
  pthread_mutex_unlock(&pthread_keys_mutex);
  return EAGAIN;
}
int __pthread_key_create_internal ( pthread_key_t key,
void(*)(void *)  destr 
)
int __pthread_kill ( pthread_t  threadid,
int  signo 
)

Definition at line 29 of file pthread_kill.c.

{
  struct pthread *pd = (struct pthread *) threadid;

  /* Make sure the descriptor is valid.  */
  if (DEBUGGING_P && INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Force load of pd->tid into local variable or register.  Otherwise
     if a thread exits between ESRCH test and tgkill, we might return
     EINVAL, because pd->tid would be cleared by the kernel.  */
  pid_t tid = atomic_forced_read (pd->tid);
  if (__builtin_expect (tid <= 0, 0))
    /* Not a valid thread handle.  */
    return ESRCH;

  /* Disallow sending the signal we use for cancellation, timers, for
     for the setxid implementation.  */
  if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
    return EINVAL;

  /* We have a special syscall to do the work.  */
  INTERNAL_SYSCALL_DECL (err);

  /* One comment: The PID field in the TCB can temporarily be changed
     (in fork).  But this must not affect this code here.  Since this
     function would have to be called while the thread is executing
     fork, it would have to happen in a signal handler.  But this is
     no allowed, pthread_kill is not guaranteed to be async-safe.  */
  int val;
#if __ASSUME_TGKILL
  val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
                       tid, signo);
#else
# ifdef __NR_tgkill
  val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
                       tid, signo);
  if (INTERNAL_SYSCALL_ERROR_P (val, err)
      && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
# endif
    val = INTERNAL_SYSCALL (tkill, err, 2, tid, signo);
#endif

  return (INTERNAL_SYSCALL_ERROR_P (val, err)
         ? INTERNAL_SYSCALL_ERRNO (val, err) : 0);
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 25 of file pthread_mutex_destroy.c.

{
  if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
      && mutex->__data.__nusers != 0)
    return EBUSY;

  /* Set to an invalid value.  */
  mutex->__data.__kind = -1;

  return 0;
}

Definition at line 41 of file pthread_mutex_lock.c.

{
  assert (sizeof (mutex->__size) >= sizeof (mutex->__data));

  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  int retval = 0;
  switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
                         PTHREAD_MUTEX_TIMED_NP))
    {
      /* Recursive mutex.  */
    case PTHREAD_MUTEX_RECURSIVE_NP:
      /* Check whether we already hold the mutex.  */
      if (mutex->__data.__owner == id)
       {
         /* Just bump the counter.  */
         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
           /* Overflow of the counter.  */
           return EAGAIN;

         ++mutex->__data.__count;

         return 0;
       }

      /* We have to get the mutex.  */
      LLL_MUTEX_LOCK (mutex);

      assert (mutex->__data.__owner == 0);
      mutex->__data.__count = 1;
      break;

      /* Error checking mutex.  */
    case PTHREAD_MUTEX_ERRORCHECK_NP:
      /* Check whether we already hold the mutex.  */
      if (__builtin_expect (mutex->__data.__owner == id, 0))
       return EDEADLK;

      /* FALLTHROUGH */

    case PTHREAD_MUTEX_TIMED_NP:
    simple:
      /* Normal mutex.  */
      LLL_MUTEX_LOCK (mutex);
      assert (mutex->__data.__owner == 0);
      break;

    case PTHREAD_MUTEX_ADAPTIVE_NP:
      if (! __is_smp)
       goto simple;

      if (LLL_MUTEX_TRYLOCK (mutex) != 0)
       {
         int cnt = 0;
         int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
                          mutex->__data.__spins * 2 + 10);
         do
           {
             if (cnt++ >= max_cnt)
              {
                LLL_MUTEX_LOCK (mutex);
                break;
              }

#ifdef BUSY_WAIT_NOP
             BUSY_WAIT_NOP;
#endif
           }
         while (LLL_MUTEX_TRYLOCK (mutex) != 0);

         mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
       }
      assert (mutex->__data.__owner == 0);
      break;

    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                   &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
       {
       again:
         if ((oldval & FUTEX_OWNER_DIED) != 0)
           {
             /* The previous owner died.  Try locking the mutex.  */
             int newval = id;
#ifdef NO_INCR
             newval |= FUTEX_WAITERS;
#else
             newval |= (oldval & FUTEX_WAITERS);
#endif

             newval
              = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                 newval, oldval);

             if (newval != oldval)
              {
                oldval = newval;
                goto again;
              }

             /* We got the mutex.  */
             mutex->__data.__count = 1;
             /* But it is inconsistent unless marked otherwise.  */
             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

             ENQUEUE_MUTEX (mutex);
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

             /* Note that we deliberately exit here.  If we fall
               through to the end of the function __nusers would be
               incremented which is not correct because the old
               owner has to be discounted.  If we are not supposed
               to increment __nusers we actually have to decrement
               it here.  */
#ifdef NO_INCR
             --mutex->__data.__nusers;
#endif

             return EOWNERDEAD;
           }

         /* Check whether we already hold the mutex.  */
         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
           {
             int kind = PTHREAD_MUTEX_TYPE (mutex);
             if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);
                return EDEADLK;
              }

             if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);

                /* Just bump the counter.  */
                if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                  /* Overflow of the counter.  */
                  return EAGAIN;

                ++mutex->__data.__count;

                return 0;
              }
           }

         oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);

         if (__builtin_expect (mutex->__data.__owner
                            == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
           {
             /* This mutex is now not recoverable.  */
             mutex->__data.__count = 0;
             lll_unlock (mutex->__data.__lock,
                       PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
             return ENOTRECOVERABLE;
           }
       }
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      mutex->__data.__count = 1;
      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
       int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

       if (robust)
         /* Note: robust PI futexes are signaled by setting bit 0.  */
         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                      (void *) (((uintptr_t) &mutex->__data.__list.__next)
                               | 1));

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
              return EDEADLK;
             }

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       int newval = id;
#ifdef NO_INCR
       newval |= FUTEX_WAITERS;
#endif
       oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                newval, 0);

       if (oldval != 0)
         {
           /* The mutex is locked.  The kernel will now take care of
              everything.  */
           int private = (robust
                        ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
                        : PTHREAD_MUTEX_PSHARED (mutex));
           INTERNAL_SYSCALL_DECL (__err);
           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                                  __lll_private_flag (FUTEX_LOCK_PI,
                                                   private), 1, 0);

           if (INTERNAL_SYSCALL_ERROR_P (e, __err)
              && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
                  || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
             {
              assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
                     || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
                         && kind != PTHREAD_MUTEX_RECURSIVE_NP));
              /* ESRCH can happen only for non-robust PI mutexes where
                 the owner of the lock died.  */
              assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);

              /* Delay the thread indefinitely.  */
              while (1)
                pause_not_cancel ();
             }

           oldval = mutex->__data.__lock;

           assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
         }

       if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
         {
           atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

           /* We got the mutex.  */
           mutex->__data.__count = 1;
           /* But it is inconsistent unless marked otherwise.  */
           mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

           ENQUEUE_MUTEX_PI (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

           /* Note that we deliberately exit here.  If we fall
              through to the end of the function __nusers would be
              incremented which is not correct because the old owner
              has to be discounted.  If we are not supposed to
              increment __nusers we actually have to decrement it here.  */
#ifdef NO_INCR
           --mutex->__data.__nusers;
#endif

           return EOWNERDEAD;
         }

       if (robust
           && __builtin_expect (mutex->__data.__owner
                             == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
         {
           /* This mutex is now not recoverable.  */
           mutex->__data.__count = 0;

           INTERNAL_SYSCALL_DECL (__err);
           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                           __lll_private_flag (FUTEX_UNLOCK_PI,
                                            PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
),
                           0, 0);

           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
           return ENOTRECOVERABLE;
         }

       mutex->__data.__count = 1;
       if (robust)
         {
           ENQUEUE_MUTEX_PI (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
         }
      }
      break;

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (mutex->__data.__owner == id)
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             return EDEADLK;

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       int oldprio = -1, ceilval;
       do
         {
           int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
                       >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

           if (__pthread_current_priority () > ceiling)
             {
              if (oldprio != -1)
                __pthread_tpp_change_priority (oldprio, -1);
              return EINVAL;
             }

           retval = __pthread_tpp_change_priority (oldprio, ceiling);
           if (retval)
             return retval;

           ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
           oldprio = ceiling;

           oldval
             = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
#ifdef NO_INCR
                                               ceilval | 2,
#else
                                               ceilval | 1,
#endif
                                               ceilval);

           if (oldval == ceilval)
             break;

           do
             {
              oldval
                = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                  ceilval | 2,
                                                  ceilval | 1);

              if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
                break;

              if (oldval != ceilval)
                lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
                              PTHREAD_MUTEX_PSHARED (mutex));
             }
           while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                 ceilval | 2, ceilval)
                 != ceilval);
         }
       while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

       assert (mutex->__data.__owner == 0);
       mutex->__data.__count = 1;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }

  /* Record the ownership.  */
  mutex->__data.__owner = id;
#ifndef NO_INCR
  ++mutex->__data.__nusers;
#endif

  return retval;
}

Definition at line 28 of file pthread_mutex_trylock.c.

{
  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
                         PTHREAD_MUTEX_TIMED_NP))
    {
      /* Recursive mutex.  */
    case PTHREAD_MUTEX_RECURSIVE_NP:
      /* Check whether we already hold the mutex.  */
      if (mutex->__data.__owner == id)
       {
         /* Just bump the counter.  */
         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
           /* Overflow of the counter.  */
           return EAGAIN;

         ++mutex->__data.__count;
         return 0;
       }

      if (lll_trylock (mutex->__data.__lock) == 0)
       {
         /* Record the ownership.  */
         mutex->__data.__owner = id;
         mutex->__data.__count = 1;
         ++mutex->__data.__nusers;
         return 0;
       }
      break;

    case PTHREAD_MUTEX_ERRORCHECK_NP:
    case PTHREAD_MUTEX_TIMED_NP:
    case PTHREAD_MUTEX_ADAPTIVE_NP:
      /* Normal mutex.  */
      if (lll_trylock (mutex->__data.__lock) != 0)
       break;

      /* Record the ownership.  */
      mutex->__data.__owner = id;
      ++mutex->__data.__nusers;

      return 0;

    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                   &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
       {
       again:
         if ((oldval & FUTEX_OWNER_DIED) != 0)
           {
             /* The previous owner died.  Try locking the mutex.  */
             int newval = id | (oldval & FUTEX_WAITERS);

             newval
              = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                 newval, oldval);

             if (newval != oldval)
              {
                oldval = newval;
                goto again;
              }

             /* We got the mutex.  */
             mutex->__data.__count = 1;
             /* But it is inconsistent unless marked otherwise.  */
             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

             ENQUEUE_MUTEX (mutex);
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

             /* Note that we deliberately exist here.  If we fall
               through to the end of the function __nusers would be
               incremented which is not correct because the old
               owner has to be discounted.  */
             return EOWNERDEAD;
           }

         /* Check whether we already hold the mutex.  */
         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
           {
             int kind = PTHREAD_MUTEX_TYPE (mutex);
             if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);
                return EDEADLK;
              }

             if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);

                /* Just bump the counter.  */
                if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                  /* Overflow of the counter.  */
                  return EAGAIN;

                ++mutex->__data.__count;

                return 0;
              }
           }

         oldval = lll_robust_trylock (mutex->__data.__lock, id);
         if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
           {
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

             return EBUSY;
           }

         if (__builtin_expect (mutex->__data.__owner
                            == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
           {
             /* This mutex is now not recoverable.  */
             mutex->__data.__count = 0;
             if (oldval == id)
              lll_unlock (mutex->__data.__lock,
                         PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
             return ENOTRECOVERABLE;
           }
       }
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

      mutex->__data.__owner = id;
      ++mutex->__data.__nusers;
      mutex->__data.__count = 1;

      return 0;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
       int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

       if (robust)
         /* Note: robust PI futexes are signaled by setting bit 0.  */
         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                      (void *) (((uintptr_t) &mutex->__data.__list.__next)
                               | 1));

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
              return EDEADLK;
             }

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       oldval
         = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                           id, 0);

       if (oldval != 0)
         {
           if ((oldval & FUTEX_OWNER_DIED) == 0)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              return EBUSY;
             }

           assert (robust);

           /* The mutex owner died.  The kernel will now take care of
              everything.  */
           int private = (robust
                        ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
                        : PTHREAD_MUTEX_PSHARED (mutex));
           INTERNAL_SYSCALL_DECL (__err);
           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                                  __lll_private_flag (FUTEX_TRYLOCK_PI,
                                                   private), 0, 0);

           if (INTERNAL_SYSCALL_ERROR_P (e, __err)
              && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              return EBUSY;
             }

           oldval = mutex->__data.__lock;
         }

       if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
         {
           atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

           /* We got the mutex.  */
           mutex->__data.__count = 1;
           /* But it is inconsistent unless marked otherwise.  */
           mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

           ENQUEUE_MUTEX (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

           /* Note that we deliberately exit here.  If we fall
              through to the end of the function __nusers would be
              incremented which is not correct because the old owner
              has to be discounted.  */
           return EOWNERDEAD;
         }

       if (robust
           && __builtin_expect (mutex->__data.__owner
                             == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
         {
           /* This mutex is now not recoverable.  */
           mutex->__data.__count = 0;

           INTERNAL_SYSCALL_DECL (__err);
           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                           __lll_private_flag (FUTEX_UNLOCK_PI,
                                            PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
                           0, 0);

           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
           return ENOTRECOVERABLE;
         }

       if (robust)
         {
           ENQUEUE_MUTEX_PI (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
         }

       mutex->__data.__owner = id;
       ++mutex->__data.__nusers;
       mutex->__data.__count = 1;

       return 0;
      }

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (mutex->__data.__owner == id)
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             return EDEADLK;

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       int oldprio = -1, ceilval;
       do
         {
           int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
                       >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

           if (__pthread_current_priority () > ceiling)
             {
              if (oldprio != -1)
                __pthread_tpp_change_priority (oldprio, -1);
              return EINVAL;
             }

           int retval = __pthread_tpp_change_priority (oldprio, ceiling);
           if (retval)
             return retval;

           ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
           oldprio = ceiling;

           oldval
             = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                               ceilval | 1, ceilval);

           if (oldval == ceilval)
             break;
         }
       while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

       if (oldval != ceilval)
         {
           __pthread_tpp_change_priority (oldprio, -1);
           break;
         }

       assert (mutex->__data.__owner == 0);
       /* Record the ownership.  */
       mutex->__data.__owner = id;
       ++mutex->__data.__nusers;
       mutex->__data.__count = 1;

       return 0;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }

  return EBUSY;
}

Definition at line 265 of file pthread_mutex_unlock.c.

Definition at line 28 of file pthread_mutex_unlock.c.

{
  int newowner = 0;

  switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
                         PTHREAD_MUTEX_TIMED_NP))
    {
    case PTHREAD_MUTEX_RECURSIVE_NP:
      /* Recursive mutex.  */
      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
       return EPERM;

      if (--mutex->__data.__count != 0)
       /* We still hold the mutex.  */
       return 0;
      goto normal;

    case PTHREAD_MUTEX_ERRORCHECK_NP:
      /* Error checking mutex.  */
      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
         || ! lll_islocked (mutex->__data.__lock))
       return EPERM;
      /* FALLTHROUGH */

    case PTHREAD_MUTEX_TIMED_NP:
    case PTHREAD_MUTEX_ADAPTIVE_NP:
      /* Always reset the owner field.  */
    normal:
      mutex->__data.__owner = 0;
      if (decr)
       /* One less user.  */
       --mutex->__data.__nusers;

      /* Unlock.  */
      lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
      break;

    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
      /* Recursive mutex.  */
      if ((mutex->__data.__lock & FUTEX_TID_MASK)
         == THREAD_GETMEM (THREAD_SELF, tid)
         && __builtin_expect (mutex->__data.__owner
                            == PTHREAD_MUTEX_INCONSISTENT, 0))
       {
         if (--mutex->__data.__count != 0)
           /* We still hold the mutex.  */
           return ENOTRECOVERABLE;

         goto notrecoverable;
       }

      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
       return EPERM;

      if (--mutex->__data.__count != 0)
       /* We still hold the mutex.  */
       return 0;

      goto robust;

    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      if ((mutex->__data.__lock & FUTEX_TID_MASK)
         != THREAD_GETMEM (THREAD_SELF, tid)
         || ! lll_islocked (mutex->__data.__lock))
       return EPERM;

      /* If the previous owner died and the caller did not succeed in
        making the state consistent, mark the mutex as unrecoverable
        and make all waiters.  */
      if (__builtin_expect (mutex->__data.__owner
                         == PTHREAD_MUTEX_INCONSISTENT, 0))
      notrecoverable:
       newowner = PTHREAD_MUTEX_NOTRECOVERABLE;

    robust:
      /* Remove mutex from the list.  */
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                   &mutex->__data.__list.__next);
      DEQUEUE_MUTEX (mutex);

      mutex->__data.__owner = newowner;
      if (decr)
       /* One less user.  */
       --mutex->__data.__nusers;

      /* Unlock.  */
      lll_robust_unlock (mutex->__data.__lock,
                      PTHREAD_ROBUST_MUTEX_PSHARED (mutex));

      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
      /* Recursive mutex.  */
      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
       return EPERM;

      if (--mutex->__data.__count != 0)
       /* We still hold the mutex.  */
       return 0;
      goto continue_pi;

    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
      /* Recursive mutex.  */
      if ((mutex->__data.__lock & FUTEX_TID_MASK)
         == THREAD_GETMEM (THREAD_SELF, tid)
         && __builtin_expect (mutex->__data.__owner
                            == PTHREAD_MUTEX_INCONSISTENT, 0))
       {
         if (--mutex->__data.__count != 0)
           /* We still hold the mutex.  */
           return ENOTRECOVERABLE;

         goto pi_notrecoverable;
       }

      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
       return EPERM;

      if (--mutex->__data.__count != 0)
       /* We still hold the mutex.  */
       return 0;

      goto continue_pi;

    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      if ((mutex->__data.__lock & FUTEX_TID_MASK)
         != THREAD_GETMEM (THREAD_SELF, tid)
         || ! lll_islocked (mutex->__data.__lock))
       return EPERM;

      /* If the previous owner died and the caller did not succeed in
        making the state consistent, mark the mutex as unrecoverable
        and make all waiters.  */
      if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
         && __builtin_expect (mutex->__data.__owner
                            == PTHREAD_MUTEX_INCONSISTENT, 0))
      pi_notrecoverable:
       newowner = PTHREAD_MUTEX_NOTRECOVERABLE;

    continue_pi:
      if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
       {
         /* Remove mutex from the list.
            Note: robust PI futexes are signaled by setting bit 0.  */
         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                      (void *) (((uintptr_t) &mutex->__data.__list.__next)
                               | 1));
         DEQUEUE_MUTEX (mutex);
       }

      mutex->__data.__owner = newowner;
      if (decr)
       /* One less user.  */
       --mutex->__data.__nusers;

      /* Unlock.  */
      if ((mutex->__data.__lock & FUTEX_WAITERS) != 0
         || atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock, 0,
                                             THREAD_GETMEM (THREAD_SELF,
                                                          tid)))
       {
         int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
         int private = (robust
                      ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
                      : PTHREAD_MUTEX_PSHARED (mutex));
         INTERNAL_SYSCALL_DECL (__err);
         INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
                         __lll_private_flag (FUTEX_UNLOCK_PI, private));
       }

      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
      /* Recursive mutex.  */
      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
       return EPERM;

      if (--mutex->__data.__count != 0)
       /* We still hold the mutex.  */
       return 0;
      goto pp;

    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
      /* Error checking mutex.  */
      if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
         || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
       return EPERM;
      /* FALLTHROUGH */

    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      /* Always reset the owner field.  */
    pp:
      mutex->__data.__owner = 0;

      if (decr)
       /* One less user.  */
       --mutex->__data.__nusers;

      /* Unlock.  */
      int newval, oldval;
      do
       {
         oldval = mutex->__data.__lock;
         newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
       }
      while (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
                                             newval, oldval));

      if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
       lll_futex_wake (&mutex->__data.__lock, 1,
                     PTHREAD_MUTEX_PSHARED (mutex));

      int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
      return __pthread_tpp_change_priority (oldprio, -1);

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }

  return 0;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 24 of file pthread_mutexattr_destroy.c.

{
  return 0;
}

Definition at line 25 of file pthread_mutexattr_init.c.

{
  if (sizeof (struct pthread_mutexattr) != sizeof (pthread_mutexattr_t))
    memset (attr, '\0', sizeof (*attr));

  /* We use bit 31 to signal whether the mutex is going to be
     process-shared or not.  By default it is zero, i.e., the mutex is
     not process-shared.  */
  ((struct pthread_mutexattr *) attr)->mutexkind = PTHREAD_MUTEX_NORMAL;

  return 0;
}

Definition at line 25 of file pthread_mutexattr_settype.c.

{
  struct pthread_mutexattr *iattr;

  if (kind < PTHREAD_MUTEX_NORMAL || kind > PTHREAD_MUTEX_ADAPTIVE_NP)
    return EINVAL;

  iattr = (struct pthread_mutexattr *) attr;

  iattr->mutexkind = (iattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_BITS) | kind;

  return 0;
}
int __pthread_once ( pthread_once_t once_control,
void(*)(void)  init_routine 
)

Definition at line 287 of file mutex.c.

{
  /* flag for doing the condition broadcast outside of mutex */
  int state_changed;

  /* Test without locking first for speed */
  if (*once_control == DONE) {
    READ_MEMORY_BARRIER();
    return 0;
  }
  /* Lock and test again */

  state_changed = 0;

  pthread_mutex_lock(&once_masterlock);

  /* If this object was left in an IN_PROGRESS state in a parent
     process (indicated by stale generation field), reset it to NEVER. */
  if ((*once_control & 3) == IN_PROGRESS && (*once_control & ~3) != fork_generation)
    *once_control = NEVER;

  /* If init_routine is being called from another routine, wait until
     it completes. */
  while ((*once_control & 3) == IN_PROGRESS) {
    pthread_cond_wait(&once_finished, &once_masterlock);
  }
  /* Here *once_control is stable and either NEVER or DONE. */
  if (*once_control == NEVER) {
    *once_control = IN_PROGRESS | fork_generation;
    pthread_mutex_unlock(&once_masterlock);
    pthread_cleanup_push(pthread_once_cancelhandler, once_control);
    init_routine();
    pthread_cleanup_pop(0);
    pthread_mutex_lock(&once_masterlock);
    WRITE_MEMORY_BARRIER();
    *once_control = DONE;
    state_changed = 1;
  }
  pthread_mutex_unlock(&once_masterlock);

  if (state_changed)
    pthread_cond_broadcast(&once_finished);

  return 0;
}
int __pthread_once_internal ( pthread_once_t once_control,
void(*)(void)  init_routine 
)

Definition at line 26 of file cleanup.c.

{
  struct pthread_unwind_buf *ibuf = (struct pthread_unwind_buf *) buf;
  struct pthread *self = THREAD_SELF;

  /* Store old info.  */
  ibuf->priv.data.prev = THREAD_GETMEM (self, cleanup_jmp_buf);
  ibuf->priv.data.cleanup = THREAD_GETMEM (self, cleanup);

  /* Store the new cleanup handler info.  */
  THREAD_SETMEM (self, cleanup_jmp_buf, (struct pthread_unwind_buf *) buf);
}
int __pthread_rwlock_destroy ( pthread_rwlock_t *  __rwlock)

Definition at line 24 of file pthread_rwlock_destroy.c.

{
  /* Nothing to be done.  For now.  */
  return 0;
}
int __pthread_rwlock_init ( pthread_rwlock_t *__restrict  __rwlock,
__const pthread_rwlockattr_t *__restrict  __attr 
)
int __pthread_rwlock_rdlock ( pthread_rwlock_t *  __rwlock)

Definition at line 29 of file pthread_rwlock_rdlock.c.

{
  int result = 0;

  /* Make sure we are along.  */
  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

  while (1)
    {
      /* Get the rwlock if there is no writer...  */
      if (rwlock->__data.__writer == 0
         /* ...and if either no writer is waiting or we prefer readers.  */
         && (!rwlock->__data.__nr_writers_queued
             || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
       {
         /* Increment the reader counter.  Avoid overflow.  */
         if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0))
           {
             /* Overflow on number of readers.    */
             --rwlock->__data.__nr_readers;
             result = EAGAIN;
           }

         break;
       }

      /* Make sure we are not holding the rwlock as a writer.  This is
        a deadlock situation we recognize and report.  */
      if (__builtin_expect (rwlock->__data.__writer
                         == THREAD_GETMEM (THREAD_SELF, tid), 0))
       {
         result = EDEADLK;
         break;
       }

      /* Remember that we are a reader.  */
      if (__builtin_expect (++rwlock->__data.__nr_readers_queued == 0, 0))
       {
         /* Overflow on number of queued readers.  */
         --rwlock->__data.__nr_readers_queued;
         result = EAGAIN;
         break;
       }

      int waitval = rwlock->__data.__readers_wakeup;

      /* Free the lock.  */
      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

      /* Wait for the writer to finish.  */
      lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
                    rwlock->__data.__shared);

      /* Get the lock.  */
      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

      --rwlock->__data.__nr_readers_queued;
    }

  /* We are done, free the lock.  */
  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

  return result;
}
int __pthread_rwlock_rdlock_internal ( pthread_rwlock_t *  __rwlock)
int __pthread_rwlock_tryrdlock ( pthread_rwlock_t *  __rwlock)

Definition at line 26 of file pthread_rwlock_tryrdlock.c.

{
  int result = EBUSY;

  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

  if (rwlock->__data.__writer == 0
      && (rwlock->__data.__nr_writers_queued == 0
         || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
    {
      if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0))
       {
         --rwlock->__data.__nr_readers;
         result = EAGAIN;
       }
      else
       result = 0;
    }

  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

  return result;
}
int __pthread_rwlock_trywrlock ( pthread_rwlock_t *  __rwlock)

Definition at line 26 of file pthread_rwlock_trywrlock.c.

{
  int result = EBUSY;

  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

  if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)
    {
      rwlock->__data.__writer = THREAD_GETMEM (THREAD_SELF, tid);
      result = 0;
    }

  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

  return result;
}
int __pthread_rwlock_unlock ( pthread_rwlock_t *  __rwlock)

Definition at line 28 of file pthread_rwlock_unlock.c.

{
  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
  if (rwlock->__data.__writer)
    rwlock->__data.__writer = 0;
  else
    --rwlock->__data.__nr_readers;
  if (rwlock->__data.__nr_readers == 0)
    {
      if (rwlock->__data.__nr_writers_queued)
       {
         ++rwlock->__data.__writer_wakeup;
         lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
         lll_futex_wake (&rwlock->__data.__writer_wakeup, 1,
                       rwlock->__data.__shared);
         return 0;
       }
      else if (rwlock->__data.__nr_readers_queued)
       {
         ++rwlock->__data.__readers_wakeup;
         lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
         lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
                       rwlock->__data.__shared);
         return 0;
       }
    }
  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
  return 0;
}
int __pthread_rwlock_unlock_internal ( pthread_rwlock_t *  __rwlock)
int __pthread_rwlock_wrlock ( pthread_rwlock_t *  __rwlock)

Definition at line 29 of file pthread_rwlock_wrlock.c.

{
  int result = 0;

  /* Make sure we are along.  */
  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

  while (1)
    {
      /* Get the rwlock if there is no writer and no reader.  */
      if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)
       {
         /* Mark self as writer.  */
         rwlock->__data.__writer = THREAD_GETMEM (THREAD_SELF, tid);
         break;
       }

      /* Make sure we are not holding the rwlock as a writer.  This is
        a deadlock situation we recognize and report.  */
      if (__builtin_expect (rwlock->__data.__writer
                         == THREAD_GETMEM (THREAD_SELF, tid), 0))
       {
         result = EDEADLK;
         break;
       }

      /* Remember that we are a writer.  */
      if (++rwlock->__data.__nr_writers_queued == 0)
       {
         /* Overflow on number of queued writers.  */
         --rwlock->__data.__nr_writers_queued;
         result = EAGAIN;
         break;
       }

      int waitval = rwlock->__data.__writer_wakeup;

      /* Free the lock.  */
      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

      /* Wait for the writer or reader(s) to finish.  */
      lll_futex_wait (&rwlock->__data.__writer_wakeup, waitval,
                    rwlock->__data.__shared);

      /* Get the lock.  */
      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

      /* To start over again, remove the thread from the writer list.  */
      --rwlock->__data.__nr_writers_queued;
    }

  /* We are done, free the lock.  */
  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

  return result;
}
int __pthread_rwlock_wrlock_internal ( pthread_rwlock_t *  __rwlock)

Definition at line 895 of file pthread.c.

{
  pthread_descr self = thread_self();
  return THREAD_GETMEM(self, p_tid);
}
int __pthread_setcancelstate ( int  state,
int oldstate 
)

Definition at line 33 of file cancel.c.

Here is the call graph for this function:

int __pthread_setcanceltype ( int  type,
int oldtype 
)

Definition at line 48 of file cancel.c.

{
  pthread_descr self = thread_self();
  if (type < PTHREAD_CANCEL_DEFERRED || type > PTHREAD_CANCEL_ASYNCHRONOUS)
    return EINVAL;
  if (oldtype != NULL) *oldtype = THREAD_GETMEM(self, p_canceltype);
  THREAD_SETMEM(self, p_canceltype, type);
  if (THREAD_GETMEM(self, p_canceled) &&
      THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE &&
      THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  return 0;
}

Here is the call graph for this function:

int __pthread_setschedparam ( pthread_t  thread_id,
int  policy,
const struct sched_param param 
)

Definition at line 964 of file pthread.c.

{
  pthread_handle handle = thread_handle(thread);
  pthread_descr th;

  __pthread_lock(&handle->h_lock, NULL);
  if (__builtin_expect (invalid_handle(handle, thread), 0)) {
    __pthread_unlock(&handle->h_lock);
    return ESRCH;
  }
  th = handle->h_descr;
  if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
                     0)) {
    __pthread_unlock(&handle->h_lock);
    return errno;
  }
  th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
  __pthread_unlock(&handle->h_lock);
  if (__pthread_manager_request >= 0)
    __pthread_manager_adjust_prio(th->p_priority);
  return 0;
}

Here is the call graph for this function:

int __pthread_setspecific ( pthread_key_t  key,
const void *  value 
)

Definition at line 141 of file specific.c.

{
  pthread_descr self = thread_self();
  unsigned int idx1st, idx2nd;

  if (key >= PTHREAD_KEYS_MAX || !pthread_keys[key].in_use)
    return EINVAL;
  idx1st = key / PTHREAD_KEY_2NDLEVEL_SIZE;
  idx2nd = key % PTHREAD_KEY_2NDLEVEL_SIZE;
  if (THREAD_GETMEM_NC(self, p_specific[idx1st]) == NULL) {
    void *newp = calloc(PTHREAD_KEY_2NDLEVEL_SIZE, sizeof (void *));
    if (newp == NULL)
      return ENOMEM;
    THREAD_SETMEM_NC(self, p_specific[idx1st], newp);
  }
  THREAD_GETMEM_NC(self, p_specific[idx1st])[idx2nd] = (void *) pointer;
  return 0;
}

Here is the call graph for this function:

int __pthread_tpp_change_priority ( int  prev_prio,
int  new_prio 
)

Definition at line 41 of file tpp.c.

{
  struct pthread *self = THREAD_SELF;
  struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp);

  if (tpp == NULL)
    {
      if (__sched_fifo_min_prio == -1)
       __init_sched_fifo_prio ();

      size_t size = sizeof *tpp;
      size += (__sched_fifo_max_prio - __sched_fifo_min_prio + 1)
             * sizeof (tpp->priomap[0]);
      tpp = calloc (size, 1);
      if (tpp == NULL)
       return ENOMEM;
      tpp->priomax = __sched_fifo_min_prio - 1;
      THREAD_SETMEM (self, tpp, tpp);
    }

  assert (new_prio == -1
         || (new_prio >= __sched_fifo_min_prio
             && new_prio <= __sched_fifo_max_prio));
  assert (previous_prio == -1
         || (previous_prio >= __sched_fifo_min_prio
             && previous_prio <= __sched_fifo_max_prio));

  int priomax = tpp->priomax;
  int newpriomax = priomax;
  if (new_prio != -1)
    {
      if (tpp->priomap[new_prio - __sched_fifo_min_prio] + 1 == 0)
       return EAGAIN;
      ++tpp->priomap[new_prio - __sched_fifo_min_prio];
      if (new_prio > priomax)
       newpriomax = new_prio;
    }

  if (previous_prio != -1)
    {
      if (--tpp->priomap[previous_prio - __sched_fifo_min_prio] == 0
         && priomax == previous_prio
         && previous_prio > new_prio)
       {
         int i;
         for (i = previous_prio - 1; i >= __sched_fifo_min_prio; --i)
           if (tpp->priomap[i - __sched_fifo_min_prio])
             break;
         newpriomax = i;
       }
    }

  if (priomax == newpriomax)
    return 0;

  lll_lock (self->lock, LLL_PRIVATE);

  tpp->priomax = newpriomax;

  int result = 0;

  if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
    {
      if (__sched_getparam (self->tid, &self->schedparam) != 0)
       result = errno;
      else
       self->flags |= ATTR_FLAG_SCHED_SET;
    }

  if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
    {
      self->schedpolicy = __sched_getscheduler (self->tid);
      if (self->schedpolicy == -1)
       result = errno;
      else
       self->flags |= ATTR_FLAG_POLICY_SET;
    }

  if (result == 0)
    {
      struct sched_param sp = self->schedparam;
      if (sp.sched_priority < newpriomax || sp.sched_priority < priomax)
       {
         if (sp.sched_priority < newpriomax)
           sp.sched_priority = newpriomax;

         if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0)
           result = errno;
       }
    }

  lll_unlock (self->lock, LLL_PRIVATE);

  return result;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Here is the caller graph for this function:

void __reclaim_stacks ( void  )

Definition at line 775 of file allocatestack.c.

{
  struct pthread *self = (struct pthread *) THREAD_SELF;

  /* No locking necessary.  The caller is the only stack in use.  */

  /* Mark all stacks except the still running one as free.  */
  list_t *runp;
  list_for_each (runp, &stack_used)
    {
      struct pthread *curp = list_entry (runp, struct pthread, list);
      if (curp != self)
       {
         /* This marks the stack as free.  */
         curp->tid = 0;

         /* The PID field must be initialized for the new process.  */
         curp->pid = self->pid;

         /* Account for the size of the stack.  */
         stack_cache_actsize += curp->stackblock_size;

         if (curp->specific_used)
           {
             /* Clear the thread-specific data.  */
             memset (curp->specific_1stblock, '\0',
                    sizeof (curp->specific_1stblock));

             curp->specific_used = false;

             for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
              if (curp->specific[cnt] != NULL)
                {
                  memset (curp->specific[cnt], '\0',
                         sizeof (curp->specific_1stblock));

                  /* We have allocated the block which we do not
                     free here so re-set the bit.  */
                  curp->specific_used = true;
                }
           }
       }
    }

  /* Reset the PIDs in any cached stacks.  */
  list_for_each (runp, &stack_cache)
    {
      struct pthread *curp = list_entry (runp, struct pthread, list);
      curp->pid = self->pid;
    }

  /* Add the stack of all running threads to the cache.  */
  list_splice (&stack_used, &stack_cache);

  /* Remove the entry for the current thread to from the cache list
     and add it to the list of running threads.  Which of the two
     lists is decided by the user_stack flag.  */
  list_del (&self->list);

  /* Re-initialize the lists for all the threads.  */
  INIT_LIST_HEAD (&stack_used);
  INIT_LIST_HEAD (&__stack_user);

  if (__builtin_expect (THREAD_GETMEM (self, user_stack), 0))
    list_add (&self->list, &__stack_user);
  else
    list_add (&self->list, &stack_used);

  /* There is one thread running.  */
  __nptl_nthreads = 1;

  /* Initialize the lock.  */
  stack_cache_lock = LLL_LOCK_INITIALIZER;
}

Here is the call graph for this function:

Here is the caller graph for this function:

void __wait_lookup_done ( void  )

Definition at line 1035 of file allocatestack.c.

{
  lll_lock (stack_cache_lock, LLL_PRIVATE);

  struct pthread *self = THREAD_SELF;

  /* Iterate over the list with system-allocated threads first.  */
  list_t *runp;
  list_for_each (runp, &stack_used)
    {
      struct pthread *t = list_entry (runp, struct pthread, list);
      if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
       continue;

      int *const gscope_flagp = &t->header.gscope_flag;

      /* We have to wait until this thread is done with the global
        scope.  First tell the thread that we are waiting and
        possibly have to be woken.  */
      if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
                                          THREAD_GSCOPE_FLAG_WAIT,
                                          THREAD_GSCOPE_FLAG_USED))
       continue;

      do
       lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
      while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
    }

  /* Now the list with threads using user-allocated stacks.  */
  list_for_each (runp, &__stack_user)
    {
      struct pthread *t = list_entry (runp, struct pthread, list);
      if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
       continue;

      int *const gscope_flagp = &t->header.gscope_flag;

      /* We have to wait until this thread is done with the global
        scope.  First tell the thread that we are waiting and
        possibly have to be woken.  */
      if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
                                          THREAD_GSCOPE_FLAG_WAIT,
                                          THREAD_GSCOPE_FLAG_USED))
       continue;

      do
       lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
      while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
    }

  lll_unlock (stack_cache_lock, LLL_PRIVATE);
}

Here is the caller graph for this function:

void _pthread_cleanup_pop ( struct _pthread_cleanup_buffer buffer,
int  execute 
)

Definition at line 167 of file cancel.c.

{
  pthread_descr self = thread_self();
  if (execute) buffer->__routine(buffer->__arg);
  THREAD_SETMEM(self, p_cleanup, buffer->__prev);
}
void _pthread_cleanup_pop_restore ( struct _pthread_cleanup_buffer buffer,
int  execute 
)
void _pthread_cleanup_push ( struct _pthread_cleanup_buffer buffer,
void(*)(void *)  routine,
void *  arg 
)

Definition at line 155 of file cancel.c.

{
  pthread_descr self = thread_self();
  buffer->__routine = routine;
  buffer->__arg = arg;
  buffer->__prev = THREAD_GETMEM(self, p_cleanup);
  if (buffer->__prev != NULL && FRAME_LEFT (buffer, buffer->__prev))
    buffer->__prev = NULL;
  THREAD_SETMEM(self, p_cleanup, buffer);
}
void _pthread_cleanup_push_defer ( struct _pthread_cleanup_buffer buffer,
void(*)(void *)  routine,
void *  arg 
)

Definition at line 175 of file cancel.c.

{
  pthread_descr self = thread_self();
  buffer->__routine = routine;
  buffer->__arg = arg;
  buffer->__canceltype = THREAD_GETMEM(self, p_canceltype);
  buffer->__prev = THREAD_GETMEM(self, p_cleanup);
  if (buffer->__prev != NULL && FRAME_LEFT (buffer, buffer->__prev))
    buffer->__prev = NULL;
  THREAD_SETMEM(self, p_canceltype, PTHREAD_CANCEL_DEFERRED);
  THREAD_SETMEM(self, p_cleanup, buffer);
}
hidden_proto ( __pthread_keys  )

Variable Documentation

Definition at line 155 of file pthreadP.h.

void(*) struct pthread_functions* function internal_function)

Definition at line 378 of file pthreadP.h.

void(* reclaim)(void)

Definition at line 377 of file pthreadP.h.