Back to index

glibc  2.9
Classes | Defines | Typedefs | Functions | Variables
internals.h File Reference
#include <limits.h>
#include <signal.h>
#include <stdbool.h>
#include <unistd.h>
#include <stackinfo.h>
#include <sigcontextinfo.h>
#include <tls.h>
#include "descr.h"
#include "semaphore.h"
#include <pthread-functions.h>

Go to the source code of this file.

Classes

struct  pthread_key_struct
struct  pthread_handle_struct
struct  pthread_request
union  sighandler
union  pthread_request.req_args
struct  pthread_request.req_args.create
struct  pthread_request.req_args.free
struct  pthread_request.req_args.exit
struct  pthread_request.req_args.for_each

Defines

#define THREAD_GETMEM(descr, member)   descr->member
#define THREAD_GETMEM_NC(descr, member)   descr->member
#define THREAD_SETMEM(descr, member, value)   descr->member = (value)
#define THREAD_SETMEM_NC(descr, member, value)   descr->member = (value)
#define LIBC_THREAD_GETMEM(descr, member)   descr->member
#define LIBC_THREAD_SETMEM(descr, member, value)   descr->member = (value)
#define PTHREAD_START_ARGS_INITIALIZER(fct)   { (void *(*) (void *)) fct, NULL, {{0, }}, 0, { 0 } }
#define page_roundup(v, p)   ((((size_t) (v)) + (p) - 1) & ~((p) - 1))
#define PAGE_SIZE   (sysconf (_SC_PAGE_SIZE))
#define INITIAL_STACK_SIZE   (4 * PAGE_SIZE)
#define THREAD_MANAGER_STACK_SIZE   (2 * PAGE_SIZE - 32)
#define THREAD_STACK_START_ADDRESS   __pthread_initial_thread_bos
#define MEMORY_BARRIER()   asm ("" : : : "memory")
#define READ_MEMORY_BARRIER()   MEMORY_BARRIER()
#define WRITE_MEMORY_BARRIER()   MEMORY_BARRIER()
#define MAX_SPIN_COUNT   50
#define MAX_ADAPTIVE_SPIN_COUNT   100
#define SPIN_SLEEP_DURATION   2000001
#define ASSERT(x)
#define MSG(msg, arg...)
#define LIBC_CANCEL_ASYNC()   __libc_enable_asynccancel ()
#define LIBC_CANCEL_RESET(oldtype)   __libc_disable_asynccancel (oldtype)
#define LIBC_CANCEL_HANDLED()
#define thread_self()   __pthread_thread_self ()
#define __manager_thread   (&__pthread_manager_thread)

Typedefs

typedef void(* destr_function )(void *)
typedef struct
pthread_handle_struct
pthread_handle
typedef void(* arch_sighandler_t )(int, SIGCONTEXT)

Functions

static pthread_handle thread_handle (pthread_t id)
static int invalid_handle (pthread_handle h, pthread_t id)
static int nonexisting_handle (pthread_handle h, pthread_t id)
void __pthread_do_exit (void *retval, char *currentframe) __attribute__((__noreturn__))
void __pthread_destroy_specifics (void)
void __pthread_perform_cleanup (char *currentframe)
void __pthread_init_max_stacksize (void)
int __pthread_initialize_manager (void)
void __pthread_message (const char *fmt,...)
int __pthread_manager (void *reqfd)
int __pthread_manager_event (void *reqfd)
void __pthread_manager_sighandler (int sig)
void __pthread_reset_main_thread (void)
void __pthread_once_fork_prepare (void)
void __pthread_once_fork_parent (void)
void __pthread_once_fork_child (void)
void __flockfilelist (void)
void __funlockfilelist (void)
void __fresetlockfiles (void)
void __pthread_manager_adjust_prio (int thread_prio)
void __pthread_initialize_minimal (void)
int __pthread_attr_setguardsize (pthread_attr_t *__attr, size_t __guardsize)
int __pthread_attr_getguardsize (const pthread_attr_t *__attr, size_t *__guardsize)
int __pthread_attr_setstackaddr (pthread_attr_t *__attr, void *__stackaddr)
int __pthread_attr_getstackaddr (const pthread_attr_t *__attr, void **__stackaddr)
int __pthread_attr_setstacksize (pthread_attr_t *__attr, size_t __stacksize)
int __pthread_attr_getstacksize (const pthread_attr_t *__attr, size_t *__stacksize)
int __pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr, size_t __stacksize)
int __pthread_attr_getstack (const pthread_attr_t *__attr, void **__stackaddr, size_t *__stacksize)
int __pthread_attr_destroy (pthread_attr_t *attr)
int __pthread_attr_setdetachstate (pthread_attr_t *attr, int detachstate)
int __pthread_attr_getdetachstate (const pthread_attr_t *attr, int *detachstate)
int __pthread_attr_setschedparam (pthread_attr_t *attr, const struct sched_param *param)
int __pthread_attr_getschedparam (const pthread_attr_t *attr, struct sched_param *param)
int __pthread_attr_setschedpolicy (pthread_attr_t *attr, int policy)
int __pthread_attr_getschedpolicy (const pthread_attr_t *attr, int *policy)
int __pthread_attr_setinheritsched (pthread_attr_t *attr, int inherit)
int __pthread_attr_getinheritsched (const pthread_attr_t *attr, int *inherit)
int __pthread_attr_setscope (pthread_attr_t *attr, int scope)
int __pthread_attr_getscope (const pthread_attr_t *attr, int *scope)
int __pthread_getconcurrency (void)
int __pthread_setconcurrency (int __level)
int __pthread_mutex_timedlock (pthread_mutex_t *__mutex, const struct timespec *__abstime)
int __pthread_mutexattr_getpshared (const pthread_mutexattr_t *__attr, int *__pshared)
int __pthread_mutexattr_setpshared (pthread_mutexattr_t *__attr, int __pshared)
int __pthread_mutexattr_gettype (const pthread_mutexattr_t *__attr, int *__kind)
void __pthread_kill_other_threads_np (void)
int __pthread_mutex_init (pthread_mutex_t *__mutex, __const pthread_mutexattr_t *__mutex_attr)
int __pthread_mutex_destroy (pthread_mutex_t *__mutex)
int __pthread_mutex_lock (pthread_mutex_t *__mutex)
int __pthread_mutex_trylock (pthread_mutex_t *__mutex)
int __pthread_mutex_unlock (pthread_mutex_t *__mutex)
int __pthread_cond_init (pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
int __pthread_cond_destroy (pthread_cond_t *cond)
int __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
int __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime)
int __pthread_cond_signal (pthread_cond_t *cond)
int __pthread_cond_broadcast (pthread_cond_t *cond)
int __pthread_condattr_init (pthread_condattr_t *attr)
int __pthread_condattr_destroy (pthread_condattr_t *attr)
pthread_t __pthread_self (void)
pthread_descr __pthread_thread_self (void)
pthread_descr __pthread_self_stack (void) attribute_hidden
int __pthread_equal (pthread_t thread1, pthread_t thread2)
void __pthread_exit (void *retval)
int __pthread_getschedparam (pthread_t thread, int *policy, struct sched_param *param)
int __pthread_setschedparam (pthread_t thread, int policy, const struct sched_param *param)
int __pthread_setcancelstate (int state, int *oldstate)
int __pthread_setcanceltype (int type, int *oldtype)
void __pthread_restart_old (pthread_descr th)
void __pthread_suspend_old (pthread_descr self)
int __pthread_timedsuspend_old (pthread_descr self, const struct timespec *abs)
void __pthread_restart_new (pthread_descr th)
void __pthread_suspend_new (pthread_descr self)
int __pthread_timedsuspend_new (pthread_descr self, const struct timespec *abs)
void __pthread_wait_for_restart_signal (pthread_descr self)
void __pthread_sigsuspend (const sigset_t *mask) attribute_hidden
int __pthread_yield (void)
int __pthread_rwlock_timedrdlock (pthread_rwlock_t *__restrict __rwlock, __const struct timespec *__restrict __abstime)
int __pthread_rwlock_timedwrlock (pthread_rwlock_t *__restrict __rwlock, __const struct timespec *__restrict __abstime)
int __pthread_rwlockattr_destroy (pthread_rwlockattr_t *__attr)
int __pthread_barrierattr_getpshared (__const pthread_barrierattr_t *__restrict __attr, int *__restrict __pshared)
int __pthread_spin_lock (pthread_spinlock_t *__lock)
int __pthread_spin_trylock (pthread_spinlock_t *__lock)
int __pthread_spin_unlock (pthread_spinlock_t *__lock)
int __pthread_spin_init (pthread_spinlock_t *__lock, int __pshared)
int __pthread_spin_destroy (pthread_spinlock_t *__lock)
int __libc_close (int fd)
int __libc_nanosleep (const struct timespec *requested_time, struct timespec *remaining)
int __new_sem_post (sem_t *sem)
int __new_sem_init (sem_t *__sem, int __pshared, unsigned int __value)
int __new_sem_wait (sem_t *__sem)
int __new_sem_trywait (sem_t *__sem)
int __new_sem_getvalue (sem_t *__restrict __sem, int *__restrict __sval)
int __new_sem_destroy (sem_t *__sem)
int __pthread_attr_init_2_1 (pthread_attr_t *__attr)
int __pthread_attr_init_2_0 (pthread_attr_t *__attr)
int __pthread_create_2_1 (pthread_t *__restrict __threadp, const pthread_attr_t *__attr, void *(*__start_routine)(void *), void *__restrict __arg)
int __pthread_create_2_0 (pthread_t *__restrict thread, const pthread_attr_t *__attr, void *(*__start_routine)(void *), void *__restrict arg)
void __linuxthreads_create_event (void)
void __linuxthreads_death_event (void)
void __linuxthreads_reap_event (void)
void __pthread_initialize (void)
int __pthread_internal_tsd_set (int key, const void *pointer)
void * __pthread_internal_tsd_get (int key)
void ** __attribute__ ((__const__)) __pthread_internal_tsd_address(int key)
void __pthread_sighandler (int signo, SIGCONTEXT ctx)
void __pthread_sighandler_rt (int signo, struct siginfo *si, struct ucontext *uc)
void __pthread_null_sighandler (int sig)
int __pthread_sigaction (int sig, const struct sigaction *act, struct sigaction *oact)
int __pthread_sigwait (const sigset_t *set, int *sig)
int __pthread_raise (int sig)
int __pthread_enable_asynccancel (void) attribute_hidden
void __pthread_disable_asynccancel (int oldtype) internal_function attribute_hidden
int __libc_enable_asynccancel (void) attribute_hidden
void __libc_disable_asynccancel (int oldtype) internal_function attribute_hidden
int __librt_enable_asynccancel (void)
void __librt_disable_asynccancel (int oldtype) internal_function
void __pthread_cleanup_upto (__jmp_buf target, char *targetframe) attribute_hidden
pid_t __pthread_fork (struct fork_block *b) attribute_hidden
int__libc_pthread_init (const struct pthread_functions *functions)
 weak_extern (__pthread_thread_self) extern inline __attribute__((always_inline)) pthread_descr check_thread_self(void)

Variables

int __pthread_sig_restart
int __pthread_sig_cancel
int __pthread_sig_debug
pthread_descr __pthread_main_thread
int __pthread_manager_request
int __pthread_manager_reader
int __pthread_exit_requested
int __pthread_exit_code
volatile int __pthread_threads_debug
volatile td_thr_events_t __pthread_threads_events
volatile pthread_descr __pthread_last_event
int __pthread_smp_kernel
int __libc_multiple_threads attribute_hidden
int __librt_multiple_threads
void(* __pthread_restart )(pthread_descr)
void(* __pthread_suspend )(pthread_descr)
int(* __pthread_timedsuspend )(pthread_descr, const struct timespec *)

Class Documentation

struct pthread_handle_struct

Definition at line 73 of file internals.h.

Collaboration diagram for pthread_handle_struct:
Class Members
char * h_bottom
pthread_descr h_descr
union pthread_request.req_args

Definition at line 87 of file internals.h.

Class Members
req_args create
req_args exit
req_args for_each
req_args free
void * post
struct pthread_request.req_args.free

Definition at line 94 of file internals.h.

Class Members
pthread_t thread_id
struct pthread_request.req_args.exit

Definition at line 97 of file internals.h.

Class Members
int code

Define Documentation

#define __manager_thread   (&__pthread_manager_thread)
#define ASSERT (   x)

Definition at line 270 of file internals.h.

#define INITIAL_STACK_SIZE   (4 * PAGE_SIZE)

Definition at line 203 of file internals.h.

Definition at line 483 of file internals.h.

#define LIBC_CANCEL_HANDLED ( )
Value:
__asm (".globl " __SYMBOL_PREFIX "__libc_enable_asynccancel"); \
  __asm (".globl " __SYMBOL_PREFIX "__libc_disable_asynccancel")

Definition at line 487 of file internals.h.

#define LIBC_CANCEL_RESET (   oldtype)    __libc_disable_asynccancel (oldtype)

Definition at line 485 of file internals.h.

#define LIBC_THREAD_GETMEM (   descr,
  member 
)    descr->member

Definition at line 53 of file internals.h.

#define LIBC_THREAD_SETMEM (   descr,
  member,
  value 
)    descr->member = (value)

Definition at line 54 of file internals.h.

#define MAX_ADAPTIVE_SPIN_COUNT   100

Definition at line 246 of file internals.h.

#define MAX_SPIN_COUNT   50

Definition at line 239 of file internals.h.

#define MEMORY_BARRIER ( )    asm ("" : : : "memory")

Definition at line 226 of file internals.h.

#define MSG (   msg,
  arg... 
)

Definition at line 271 of file internals.h.

#define page_roundup (   v,
  p 
)    ((((size_t) (v)) + (p) - 1) & ~((p) - 1))

Definition at line 192 of file internals.h.

#define PAGE_SIZE   (sysconf (_SC_PAGE_SIZE))

Definition at line 198 of file internals.h.

#define PTHREAD_START_ARGS_INITIALIZER (   fct)    { (void *(*) (void *)) fct, NULL, {{0, }}, 0, { 0 } }

Definition at line 65 of file internals.h.

#define READ_MEMORY_BARRIER ( )    MEMORY_BARRIER()

Definition at line 229 of file internals.h.

#define SPIN_SLEEP_DURATION   2000001

Definition at line 256 of file internals.h.

#define THREAD_GETMEM (   descr,
  member 
)    descr->member

Definition at line 36 of file internals.h.

#define THREAD_GETMEM_NC (   descr,
  member 
)    descr->member

Definition at line 39 of file internals.h.

#define THREAD_MANAGER_STACK_SIZE   (2 * PAGE_SIZE - 32)

Definition at line 209 of file internals.h.

static pthread_descr thread_self (   void)    __pthread_thread_self () [inline]

Definition at line 252 of file descr.h.

{
#ifdef THREAD_SELF
  return THREAD_SELF;
#else
  char *sp = CURRENT_STACK_FRAME;
  if (sp >= __pthread_initial_thread_bos)
    return &__pthread_initial_thread;
  else if (sp >= __pthread_manager_thread_bos
          && sp < __pthread_manager_thread_tos)
    return &__pthread_manager_thread;
  else if (__pthread_nonstandard_stacks)
    return __pthread_find_self();
  else
#ifdef _STACK_GROWS_DOWN
    return (pthread_descr)(((unsigned long)sp | (STACK_SIZE-1))+1) - 1;
#else
    return (pthread_descr)((unsigned long)sp &~ (STACK_SIZE-1));
#endif
#endif
}
#define THREAD_SETMEM (   descr,
  member,
  value 
)    descr->member = (value)

Definition at line 42 of file internals.h.

#define THREAD_SETMEM_NC (   descr,
  member,
  value 
)    descr->member = (value)

Definition at line 45 of file internals.h.

Definition at line 216 of file internals.h.

#define WRITE_MEMORY_BARRIER ( )    MEMORY_BARRIER()

Definition at line 232 of file internals.h.


Typedef Documentation

typedef void(* arch_sighandler_t)(int, SIGCONTEXT)

Definition at line 110 of file internals.h.

typedef void(* destr_function)(void *)

Definition at line 57 of file internals.h.

Definition at line 71 of file internals.h.


Function Documentation

void** __attribute__ ( (__const__)  )

Definition at line 236 of file specific.c.

{
  pthread_descr self = thread_self();
  return &self->p_libc_specific[key];
}
void __flockfilelist ( void  )

Here is the caller graph for this function:

void __fresetlockfiles ( void  )

Definition at line 67 of file lockfile.c.

{
  _IO_ITER i;

  pthread_mutexattr_t attr;

  __pthread_mutexattr_init (&attr);
  __pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE_NP);

  for (i = _IO_iter_begin(); i != _IO_iter_end(); i = _IO_iter_next(i))
    __pthread_mutex_init (_IO_iter_file(i)->_lock, &attr);

  __pthread_mutexattr_destroy (&attr);

  _IO_list_resetlock();
}

Here is the call graph for this function:

Here is the caller graph for this function:

void __funlockfilelist ( void  )

Definition at line 61 of file lockfile.c.

{
  _IO_list_unlock();
}

Here is the caller graph for this function:

int __libc_close ( int  fd)

Here is the caller graph for this function:

void __libc_disable_asynccancel ( int  oldtype)

Definition at line 81 of file libc-cancellation.c.

{
  /* If asynchronous cancellation was enabled before we do not have
     anything to do.  */
  if (oldtype & CANCELTYPE_BITMASK)
    return;

  struct pthread *self = THREAD_SELF;
  int oldval = THREAD_GETMEM (self, cancelhandling);

  while (1)
    {
      int newval = oldval & ~CANCELTYPE_BITMASK;

      if (newval == oldval)
       break;

      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
                                         oldval);
      if (__builtin_expect (curval == oldval, 1))
       break;

      /* Prepare the next round.  */
      oldval = curval;
    }
}

Definition at line 34 of file libc-cancellation.c.

{
  struct pthread *self = THREAD_SELF;
  int oldval = THREAD_GETMEM (self, cancelhandling);

  while (1)
    {
      int newval = oldval | CANCELTYPE_BITMASK;

      if (__builtin_expect ((oldval & CANCELED_BITMASK) != 0, 0))
       {
         /* If we are already exiting or if PTHREAD_CANCEL_DISABLED,
            stop right here.  */
         if ((oldval & (EXITING_BITMASK | CANCELSTATE_BITMASK)) != 0)
           break;

         int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
                                            newval, oldval);
         if (__builtin_expect (curval != oldval, 0))
           {
             /* Somebody else modified the word, try again.  */
             oldval = curval;
             continue;
           }

         THREAD_SETMEM (self, result, PTHREAD_CANCELED);

         __do_cancel ();

         /* NOTREACHED */
       }

      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
                                         oldval);
      if (__builtin_expect (curval == oldval, 1))
       break;

      /* Prepare the next round.  */
      oldval = curval;
    }

  return oldval;
}
int __libc_nanosleep ( const struct timespec requested_time,
struct timespec remaining 
)

Definition at line 33 of file nanosleep.c.

{
  assert (sizeof (struct timestruc_t) == sizeof (*req));
  return _nsleep ((struct timestruc_t *) req, (struct timestruc_t *) rem);
}

Here is the caller graph for this function:

int* __libc_pthread_init ( const struct pthread_functions functions)

Here is the caller graph for this function:

void __librt_disable_asynccancel ( int  oldtype)
void __linuxthreads_create_event ( void  )

Definition at line 25 of file events.c.

{
}

Here is the caller graph for this function:

void __linuxthreads_death_event ( void  )

Definition at line 30 of file events.c.

{
}

Here is the caller graph for this function:

void __linuxthreads_reap_event ( void  )

Definition at line 35 of file events.c.

{
}

Here is the caller graph for this function:

int __new_sem_destroy ( sem_t __sem)

Definition at line 184 of file semaphore.c.

{
  if (sem->__sem_waiting != NULL) {
    __set_errno (EBUSY);
    return -1;
  }
  return 0;
}
int __new_sem_getvalue ( sem_t *__restrict  __sem,
int *__restrict  __sval 
)
int __new_sem_init ( sem_t __sem,
int  __pshared,
unsigned int  __value 
)

Definition at line 27 of file semaphore.c.

{
  if (value > SEM_VALUE_MAX) {
    errno = EINVAL;
    return -1;
  }
  if (pshared) {
    errno = ENOSYS;
    return -1;
  }
  __pthread_init_lock(&sem->__sem_lock);
  sem->__sem_value = value;
  sem->__sem_waiting = NULL;
  return 0;
}
int __new_sem_post ( sem_t sem)

Definition at line 137 of file semaphore.c.

{
  pthread_descr self = thread_self();
  pthread_descr th;
  struct pthread_request request;

  if (THREAD_GETMEM(self, p_in_sighandler) == NULL) {
    __pthread_lock(&sem->__sem_lock, self);
    if (sem->__sem_waiting == NULL) {
      if (sem->__sem_value >= SEM_VALUE_MAX) {
        /* Overflow */
        errno = ERANGE;
        __pthread_unlock(&sem->__sem_lock);
        return -1;
      }
      sem->__sem_value++;
      __pthread_unlock(&sem->__sem_lock);
    } else {
      th = dequeue(&sem->__sem_waiting);
      __pthread_unlock(&sem->__sem_lock);
      th->p_sem_avail = 1;
      WRITE_MEMORY_BARRIER();
      restart(th);
    }
  } else {
    /* If we're in signal handler, delegate post operation to
       the thread manager. */
    if (__pthread_manager_request < 0) {
      if (__pthread_initialize_manager() < 0) {
        errno = EAGAIN;
        return -1;
      }
    }
    request.req_kind = REQ_POST;
    request.req_args.post = sem;
    TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
                                   (char *) &request, sizeof(request)));
  }
  return 0;
}

Here is the caller graph for this function:

int __new_sem_trywait ( sem_t __sem)

Definition at line 121 of file semaphore.c.

{
  int retval;

  __pthread_lock(&sem->__sem_lock, NULL);
  if (sem->__sem_value == 0) {
    errno = EAGAIN;
    retval = -1;
  } else {
    sem->__sem_value--;
    retval = 0;
  }
  __pthread_unlock(&sem->__sem_lock);
  return retval;
}
int __new_sem_wait ( sem_t __sem)

Definition at line 59 of file semaphore.c.

{
  volatile pthread_descr self = thread_self();
  pthread_extricate_if extr;
  int already_canceled = 0;
  int spurious_wakeup_count;

  /* Set up extrication interface */
  extr.pu_object = sem;
  extr.pu_extricate_func = new_sem_extricate_func;

  __pthread_lock(&sem->__sem_lock, self);
  if (sem->__sem_value > 0) {
    sem->__sem_value--;
    __pthread_unlock(&sem->__sem_lock);
    return 0;
  }
  /* Register extrication interface */
  THREAD_SETMEM(self, p_sem_avail, 0);
  __pthread_set_own_extricate_if(self, &extr);
  /* Enqueue only if not already cancelled. */
  if (!(THREAD_GETMEM(self, p_canceled)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
    enqueue(&sem->__sem_waiting, self);
  else
    already_canceled = 1;
  __pthread_unlock(&sem->__sem_lock);

  if (already_canceled) {
    __pthread_set_own_extricate_if(self, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }

  /* Wait for sem_post or cancellation, or fall through if already canceled */
  spurious_wakeup_count = 0;
  while (1)
    {
      suspend(self);
      if (THREAD_GETMEM(self, p_sem_avail) == 0
         && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
             || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
       {
         /* Count resumes that don't belong to us. */
         spurious_wakeup_count++;
         continue;
       }
      break;
    }
  __pthread_set_own_extricate_if(self, 0);

  /* Terminate only if the wakeup came from cancellation. */
  /* Otherwise ignore cancellation because we got the semaphore. */

  if (THREAD_GETMEM(self, p_woken_by_cancel)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
    THREAD_SETMEM(self, p_woken_by_cancel, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }
  /* We got the semaphore */
  return 0;
}

Definition at line 69 of file attr.c.

{
  return 0;
}
int __pthread_attr_getdetachstate ( const pthread_attr_t attr,
int detachstate 
)

Definition at line 85 of file attr.c.

{
  *detachstate = attr->__detachstate;
  return 0;
}
int __pthread_attr_getguardsize ( const pthread_attr_t __attr,
size_t __guardsize 
)

Definition at line 138 of file attr.c.

{
  *inherit = attr->__inheritsched;
  return 0;
}

Definition at line 105 of file attr.c.

{
  memcpy (param, &attr->__schedparam, sizeof (struct sched_param));
  return 0;
}

Definition at line 122 of file attr.c.

{
  *policy = attr->__schedpolicy;
  return 0;
}
int __pthread_attr_getscope ( const pthread_attr_t attr,
int scope 
)

Definition at line 159 of file attr.c.

{
  *scope = attr->__scope;
  return 0;
}
int __pthread_attr_getstack ( const pthread_attr_t __attr,
void **  __stackaddr,
size_t __stacksize 
)

Definition at line 337 of file attr.c.

{
  /* XXX This function has a stupid definition.  The standard specifies
     no error value but what is if no stack address was set?  We simply
     return the value we have in the member.  */
#ifndef _STACK_GROWS_UP
  *stackaddr = (char *) attr->__stackaddr - attr->__stacksize;
#else
  *stackaddr = attr->__stackaddr;
#endif
  *stacksize = attr->__stacksize;
  return 0;
}
int __pthread_attr_getstackaddr ( const pthread_attr_t __attr,
void **  __stackaddr 
)
int __pthread_attr_getstacksize ( const pthread_attr_t __attr,
size_t __stacksize 
)

Definition at line 272 of file attr.c.

{
  *stacksize = attr->__stacksize;
  return 0;
}

Definition at line 32 of file attr.c.

{
  size_t ps = __getpagesize ();

  attr->__detachstate = PTHREAD_CREATE_JOINABLE;
  attr->__schedpolicy = SCHED_OTHER;
  attr->__schedparam.sched_priority = 0;
  attr->__inheritsched = PTHREAD_EXPLICIT_SCHED;
  attr->__scope = PTHREAD_SCOPE_SYSTEM;
#ifdef NEED_SEPARATE_REGISTER_STACK
  attr->__guardsize = ps + ps;
#else
  attr->__guardsize = ps;
#endif
  attr->__stackaddr = NULL;
  attr->__stackaddr_set = 0;
  attr->__stacksize = STACK_SIZE - ps;
  return 0;
}
int __pthread_attr_setdetachstate ( pthread_attr_t attr,
int  detachstate 
)

Definition at line 75 of file attr.c.

{
  if (detachstate < PTHREAD_CREATE_JOINABLE ||
      detachstate > PTHREAD_CREATE_DETACHED)
    return EINVAL;
  attr->__detachstate = detachstate;
  return 0;
}
int __pthread_attr_setguardsize ( pthread_attr_t __attr,
size_t  __guardsize 
)

Definition at line 166 of file attr.c.

{
  /* The guard size must not be larger than the stack itself */
  if (guardsize >= attr->__stacksize) return EINVAL;

  attr->__guardsize = guardsize;

  return 0;
}

Definition at line 129 of file attr.c.

{
  if (inherit != PTHREAD_INHERIT_SCHED && inherit != PTHREAD_EXPLICIT_SCHED)
    return EINVAL;
  attr->__inheritsched = inherit;
  return 0;
}

Definition at line 92 of file attr.c.

{
  int max_prio = __sched_get_priority_max(attr->__schedpolicy);
  int min_prio = __sched_get_priority_min(attr->__schedpolicy);

  if (param->sched_priority < min_prio || param->sched_priority > max_prio)
    return EINVAL;
  memcpy (&attr->__schedparam, param, sizeof (struct sched_param));
  return 0;
}

Definition at line 113 of file attr.c.

{
  if (policy != SCHED_OTHER && policy != SCHED_FIFO && policy != SCHED_RR)
    return EINVAL;
  attr->__schedpolicy = policy;
  return 0;
}
int __pthread_attr_setscope ( pthread_attr_t attr,
int  scope 
)

Definition at line 145 of file attr.c.

{
  switch (scope) {
  case PTHREAD_SCOPE_SYSTEM:
    attr->__scope = scope;
    return 0;
  case PTHREAD_SCOPE_PROCESS:
    return ENOTSUP;
  default:
    return EINVAL;
  }
}
int __pthread_attr_setstack ( pthread_attr_t __attr,
void *  __stackaddr,
size_t  __stacksize 
)

Definition at line 32 of file pthread_attr_setstack.c.

{
  struct pthread_attr *iattr;

  assert (sizeof (*attr) >= sizeof (struct pthread_attr));
  iattr = (struct pthread_attr *) attr;

  /* Catch invalid sizes.  */
  if (stacksize < PTHREAD_STACK_MIN)
    return EINVAL;

#ifdef EXTRA_PARAM_CHECKS
  EXTRA_PARAM_CHECKS;
#endif

  iattr->stacksize = stacksize;
  iattr->stackaddr = (char *) stackaddr + stacksize;
  iattr->flags |= ATTR_FLAG_STACKADDR;

  return 0;
}
int __pthread_attr_setstackaddr ( pthread_attr_t __attr,
void *  __stackaddr 
)

Definition at line 26 of file pthread_attr_setstackaddr.c.

{
  struct pthread_attr *iattr;

#ifdef EXTRA_PARAM_CHECKS
  EXTRA_PARAM_CHECKS;
#endif

  assert (sizeof (*attr) >= sizeof (struct pthread_attr));
  iattr = (struct pthread_attr *) attr;

  iattr->stackaddr = stackaddr;
  iattr->flags |= ATTR_FLAG_STACKADDR;

  return 0;
}
int __pthread_attr_setstacksize ( pthread_attr_t __attr,
size_t  __stacksize 
)

Definition at line 31 of file pthread_attr_setstacksize.c.

{
  struct pthread_attr *iattr;

  assert (sizeof (*attr) >= sizeof (struct pthread_attr));
  iattr = (struct pthread_attr *) attr;

  /* Catch invalid sizes.  */
  if (stacksize < PTHREAD_STACK_MIN)
    return EINVAL;

  iattr->stacksize = stacksize;

  return 0;
}

Here is the caller graph for this function:

int __pthread_barrierattr_getpshared ( __const pthread_barrierattr_t *__restrict  __attr,
int *__restrict  __pshared 
)
void __pthread_cleanup_upto ( __jmp_buf  target,
char *  targetframe 
)

Definition at line 33 of file ptcleanup.c.

{
  pthread_descr self = thread_self();
  struct _pthread_cleanup_buffer * c;

  for (c = THREAD_GETMEM(self, p_cleanup);
       c != NULL && _JMPBUF_UNWINDS(target, c, demangle_ptr);
       c = c->__prev)
    {
#if _STACK_GROWS_DOWN
      if ((char *) c <= targetframe)
       {
         c = NULL;
         break;
       }
#elif _STACK_GROWS_UP
      if ((char *) c >= targetframe)
       {
         c = NULL;
         break;
       }
#else
# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
#endif
      c->__routine(c->__arg);
    }
  THREAD_SETMEM(self, p_cleanup, c);
  if (THREAD_GETMEM(self, p_in_sighandler)
      && _JMPBUF_UNWINDS(target, THREAD_GETMEM(self, p_in_sighandler),
                      demangle_ptr))
    THREAD_SETMEM(self, p_in_sighandler, NULL);
}

Here is the caller graph for this function:

Definition at line 288 of file condvar.c.

{
  pthread_descr tosignal, th;

  __pthread_lock(&cond->__c_lock, NULL);
  /* Copy the current state of the waiting queue and empty it */
  tosignal = cond->__c_waiting;
  cond->__c_waiting = NULL;
  __pthread_unlock(&cond->__c_lock);
  /* Now signal each process in the queue */
  while ((th = dequeue(&tosignal)) != NULL) {
    th->p_condvar_avail = 1;
    WRITE_MEMORY_BARRIER();
    restart(th);
  }
  return 0;
}

Definition at line 44 of file condvar.c.

{
  if (cond->__c_waiting != NULL) return EBUSY;
  return 0;
}

Definition at line 29 of file condvar.c.

{
  __pthread_init_lock(&cond->__c_lock);
  cond->__c_waiting = NULL;
  return 0;
}

Definition at line 266 of file condvar.c.

{
  pthread_descr th;

  __pthread_lock(&cond->__c_lock, NULL);
  th = dequeue(&cond->__c_waiting);
  __pthread_unlock(&cond->__c_lock);
  if (th != NULL) {
    th->p_condvar_avail = 1;
    WRITE_MEMORY_BARRIER();
    restart(th);
  }
  return 0;
}
int __pthread_cond_timedwait ( pthread_cond_t cond,
pthread_mutex_t mutex,
const struct timespec abstime 
)

Definition at line 252 of file condvar.c.

{
  /* Indirect call through pointer! */
  return pthread_cond_timedwait_relative(cond, mutex, abstime);
}

Definition at line 73 of file condvar.c.

{
  volatile pthread_descr self = thread_self();
  pthread_extricate_if extr;
  int already_canceled = 0;
  int spurious_wakeup_count;

  /* Check whether the mutex is locked and owned by this thread.  */
  if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
      && mutex->__m_kind != PTHREAD_MUTEX_ADAPTIVE_NP
      && mutex->__m_owner != self)
    return EINVAL;

  /* Set up extrication interface */
  extr.pu_object = cond;
  extr.pu_extricate_func = cond_extricate_func;

  /* Register extrication interface */
  THREAD_SETMEM(self, p_condvar_avail, 0);
  __pthread_set_own_extricate_if(self, &extr);

  /* Atomically enqueue thread for waiting, but only if it is not
     canceled. If the thread is canceled, then it will fall through the
     suspend call below, and then call pthread_exit without
     having to worry about whether it is still on the condition variable queue.
     This depends on pthread_cancel setting p_canceled before calling the
     extricate function. */

  __pthread_lock(&cond->__c_lock, self);
  if (!(THREAD_GETMEM(self, p_canceled)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
    enqueue(&cond->__c_waiting, self);
  else
    already_canceled = 1;
  __pthread_unlock(&cond->__c_lock);

  if (already_canceled) {
    __pthread_set_own_extricate_if(self, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }

  pthread_mutex_unlock(mutex);

  spurious_wakeup_count = 0;
  while (1)
    {
      suspend(self);
      if (THREAD_GETMEM(self, p_condvar_avail) == 0
         && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
             || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
       {
         /* Count resumes that don't belong to us. */
         spurious_wakeup_count++;
         continue;
       }
      break;
    }

  __pthread_set_own_extricate_if(self, 0);

  /* Check for cancellation again, to provide correct cancellation
     point behavior */

  if (THREAD_GETMEM(self, p_woken_by_cancel)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
    THREAD_SETMEM(self, p_woken_by_cancel, 0);
    pthread_mutex_lock(mutex);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }

  /* Put back any resumes we caught that don't belong to us. */
  while (spurious_wakeup_count--)
    restart(self);

  pthread_mutex_lock(mutex);
  return 0;
}

Definition at line 24 of file pthread_condattr_destroy.c.

{
  /* Nothing to be done.  */
  return 0;
}

Definition at line 313 of file condvar.c.

{
  return 0;
}
int __pthread_create_2_0 ( pthread_t *__restrict  thread,
const pthread_attr_t __attr,
void *(*)(void *)  __start_routine,
void *__restrict  arg 
)
int __pthread_create_2_1 ( pthread_t *__restrict  __threadp,
const pthread_attr_t __attr,
void *(*)(void *)  __start_routine,
void *__restrict  __arg 
)
void __pthread_destroy_specifics ( void  )

Here is the caller graph for this function:

Definition at line 82 of file cancel.c.

{
  pthread_descr self = thread_self();
  THREAD_SETMEM(self, p_canceltype, oldtype);
}
void __pthread_do_exit ( void *  retval,
char *  currentframe 
)

Definition at line 33 of file join.c.

{
  pthread_descr self = thread_self();
  pthread_descr joining;
  struct pthread_request request;

  /* Reset the cancellation flag to avoid looping if the cleanup handlers
     contain cancellation points */
  THREAD_SETMEM(self, p_canceled, 0);
  /* Call cleanup functions and destroy the thread-specific data */
  __pthread_perform_cleanup(currentframe);
  __pthread_destroy_specifics();
  /* Store return value */
  __pthread_lock(THREAD_GETMEM(self, p_lock), self);
  THREAD_SETMEM(self, p_retval, retval);
  /* See whether we have to signal the death.  */
  if (THREAD_GETMEM(self, p_report_events))
    {
      /* See whether TD_DEATH is in any of the mask.  */
      int idx = __td_eventword (TD_DEATH);
      uint32_t mask = __td_eventmask (TD_DEATH);

      if ((mask & (__pthread_threads_events.event_bits[idx]
                 | THREAD_GETMEM_NC(self,
                                  p_eventbuf.eventmask.event_bits[idx])))
         != 0)
       {
         /* Yep, we have to signal the death.  */
         THREAD_SETMEM(self, p_eventbuf.eventnum, TD_DEATH);
         THREAD_SETMEM(self, p_eventbuf.eventdata, self);
         __pthread_last_event = self;

         /* Now call the function to signal the event.  */
         __linuxthreads_death_event();
       }
    }
  /* Say that we've terminated */
  THREAD_SETMEM(self, p_terminated, 1);
  /* See if someone is joining on us */
  joining = THREAD_GETMEM(self, p_joining);
  __pthread_unlock(THREAD_GETMEM(self, p_lock));
  /* Restart joining thread if any */
  if (joining != NULL) restart(joining);
  /* If this is the initial thread, block until all threads have terminated.
     If another thread calls exit, we'll be terminated from our signal
     handler. */
  if (self == __pthread_main_thread && __pthread_manager_request >= 0) {
    request.req_thread = self;
    request.req_kind = REQ_MAIN_THREAD_EXIT;
    TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
                                   (char *)&request, sizeof(request)));
    suspend(self);
    /* Main thread flushes stdio streams and runs atexit functions.
       It also calls a handler within LinuxThreads which sends a process exit
       request to the thread manager. */
    exit(0);
  }
  /* Threads other than the main one  terminate without flushing stdio streams
     or running atexit functions. */
  _exit(0);
}

Here is the call graph for this function:

Here is the caller graph for this function:

int __pthread_equal ( pthread_t  thread1,
pthread_t  thread2 
)

Definition at line 902 of file pthread.c.

{
  return thread1 == thread2;
}
void __pthread_exit ( void *  retval)

Definition at line 27 of file join.c.

pid_t __pthread_fork ( struct fork_block b)

Definition at line 28 of file ptfork.c.

{
  pid_t pid;
  list_t *runp;

  __libc_lock_lock (b->lock);

  /* Run all the registered preparation handlers.  In reverse order.  */
  list_for_each_prev (runp, &b->prepare_list)
    {
      struct fork_handler *curp;
      curp = list_entry (runp, struct fork_handler, list);
      curp->handler ();
    }

  __pthread_once_fork_prepare();
  __flockfilelist();

  pid = ARCH_FORK ();

  if (pid == 0) {
    __pthread_reset_main_thread();

    __fresetlockfiles();
    __pthread_once_fork_child();

    /* Run the handlers registered for the child.  */
    list_for_each (runp, &b->child_list)
      {
       struct fork_handler *curp;
       curp = list_entry (runp, struct fork_handler, list);
       curp->handler ();
      }

    __libc_lock_init (b->lock);
  } else {
    __funlockfilelist();
    __pthread_once_fork_parent();

    /* Run the handlers registered for the parent.  */
    list_for_each (runp, &b->parent_list)
      {
       struct fork_handler *curp;
       curp = list_entry (runp, struct fork_handler, list);
       curp->handler ();
      }

    __libc_lock_unlock (b->lock);
  }

  return pid;
}

Here is the call graph for this function:

int __pthread_getschedparam ( pthread_t  thread,
int policy,
struct sched_param param 
)

Definition at line 989 of file pthread.c.

{
  pthread_handle handle = thread_handle(thread);
  int pid, pol;

  __pthread_lock(&handle->h_lock, NULL);
  if (__builtin_expect (invalid_handle(handle, thread), 0)) {
    __pthread_unlock(&handle->h_lock);
    return ESRCH;
  }
  pid = handle->h_descr->p_pid;
  __pthread_unlock(&handle->h_lock);
  pol = __sched_getscheduler(pid);
  if (__builtin_expect (pol, 0) == -1) return errno;
  if (__sched_getparam(pid, param) == -1) return errno;
  *policy = pol;
  return 0;
}

Definition at line 423 of file pthread.c.

{
  struct rlimit limit;
  size_t max_stack;

  getrlimit(RLIMIT_STACK, &limit);
#ifdef FLOATING_STACKS
  if (limit.rlim_cur == RLIM_INFINITY)
    limit.rlim_cur = ARCH_STACK_MAX_SIZE;
# ifdef NEED_SEPARATE_REGISTER_STACK
  max_stack = limit.rlim_cur / 2;
# else
  max_stack = limit.rlim_cur;
# endif
#else
  /* Play with the stack size limit to make sure that no stack ever grows
     beyond STACK_SIZE minus one page (to act as a guard page). */
# ifdef NEED_SEPARATE_REGISTER_STACK
  /* STACK_SIZE bytes hold both the main stack and register backing
     store. The rlimit value applies to each individually.  */
  max_stack = STACK_SIZE/2 - __getpagesize ();
# else
  max_stack = STACK_SIZE - __getpagesize();
# endif
  if (limit.rlim_cur > max_stack) {
    limit.rlim_cur = max_stack;
    setrlimit(RLIMIT_STACK, &limit);
  }
#endif
  __pthread_max_stacksize = max_stack;
  if (max_stack / 4 < __MAX_ALLOCA_CUTOFF)
    {
#ifdef USE_TLS
      pthread_descr self = THREAD_SELF;
      self->p_alloca_cutoff = max_stack / 4;
#else
      __pthread_initial_thread.p_alloca_cutoff = max_stack / 4;
#endif
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

void __pthread_initialize ( void  )

Definition at line 622 of file pthread.c.

Here is the call graph for this function:

Definition at line 627 of file pthread.c.

{
  int manager_pipe[2];
  int pid;
  struct pthread_request request;
  int report_events;
  pthread_descr mgr;
#ifdef USE_TLS
  tcbhead_t *tcbp;
#endif

  __pthread_multiple_threads = 1;
#if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
  __pthread_main_thread->p_multiple_threads = 1;
#endif
  *__libc_multiple_threads_ptr = 1;

#ifndef HAVE_Z_NODELETE
  if (__builtin_expect (&__dso_handle != NULL, 1))
    __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
                __dso_handle);
#endif

  if (__pthread_max_stacksize == 0)
    __pthread_init_max_stacksize ();
  /* If basic initialization not done yet (e.g. we're called from a
     constructor run before our constructor), do it now */
  if (__pthread_initial_thread_bos == NULL) pthread_initialize();
  /* Setup stack for thread manager */
  __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
  if (__pthread_manager_thread_bos == NULL) return -1;
  __pthread_manager_thread_tos =
    __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
  /* Setup pipe to communicate with thread manager */
  if (pipe(manager_pipe) == -1) {
    free(__pthread_manager_thread_bos);
    return -1;
  }

#ifdef USE_TLS
  /* Allocate memory for the thread descriptor and the dtv.  */
  tcbp = _dl_allocate_tls (NULL);
  if (tcbp == NULL) {
    free(__pthread_manager_thread_bos);
    close_not_cancel(manager_pipe[0]);
    close_not_cancel(manager_pipe[1]);
    return -1;
  }

# if TLS_TCB_AT_TP
  mgr = (pthread_descr) tcbp;
# elif TLS_DTV_AT_TP
  /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls
     returns.  */
  mgr = (pthread_descr) ((char *) tcbp - TLS_PRE_TCB_SIZE);
# endif
  __pthread_handles[1].h_descr = manager_thread = mgr;

  /* Initialize the descriptor.  */
#if !defined USE_TLS || !TLS_DTV_AT_TP
  mgr->p_header.data.tcb = tcbp;
  mgr->p_header.data.self = mgr;
  mgr->p_header.data.multiple_threads = 1;
#elif TLS_MULTIPLE_THREADS_IN_TCB
  mgr->p_multiple_threads = 1;
#endif
  mgr->p_lock = &__pthread_handles[1].h_lock;
# ifndef HAVE___THREAD
  mgr->p_errnop = &mgr->p_errno;
# endif
  mgr->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
  mgr->p_nr = 1;
# if __LT_SPINLOCK_INIT != 0
  self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
# endif
  mgr->p_alloca_cutoff = PTHREAD_STACK_MIN / 4;
#else
  mgr = &__pthread_manager_thread;
#endif

  /* Copy the stack guard canary.  */
#ifdef THREAD_COPY_STACK_GUARD
  THREAD_COPY_STACK_GUARD (mgr);
#endif

  /* Copy the pointer guard value.  */
#ifdef THREAD_COPY_POINTER_GUARD
  THREAD_COPY_POINTER_GUARD (mgr);
#endif

  __pthread_manager_request = manager_pipe[1]; /* writing end */
  __pthread_manager_reader = manager_pipe[0]; /* reading end */

  /* Start the thread manager */
  pid = 0;
#ifdef USE_TLS
  if (__linuxthreads_initial_report_events != 0)
    THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
                 __linuxthreads_initial_report_events);
  report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
#else
  if (__linuxthreads_initial_report_events != 0)
    __pthread_initial_thread.p_report_events
      = __linuxthreads_initial_report_events;
  report_events = __pthread_initial_thread.p_report_events;
#endif
  if (__builtin_expect (report_events, 0))
    {
      /* It's a bit more complicated.  We have to report the creation of
        the manager thread.  */
      int idx = __td_eventword (TD_CREATE);
      uint32_t mask = __td_eventmask (TD_CREATE);
      uint32_t event_bits;

#ifdef USE_TLS
      event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
                                 p_eventbuf.eventmask.event_bits[idx]);
#else
      event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
#endif

      if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
         != 0)
       {
         __pthread_lock(mgr->p_lock, NULL);

#ifdef NEED_SEPARATE_REGISTER_STACK
         pid = __clone2(__pthread_manager_event,
                      (void **) __pthread_manager_thread_bos,
                      THREAD_MANAGER_STACK_SIZE,
                      CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
                      mgr);
#elif _STACK_GROWS_UP
         pid = __clone(__pthread_manager_event,
                     (void **) __pthread_manager_thread_bos,
                     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
                     mgr);
#else
         pid = __clone(__pthread_manager_event,
                     (void **) __pthread_manager_thread_tos,
                     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
                     mgr);
#endif

         if (pid != -1)
           {
             /* Now fill in the information about the new thread in
                the newly created thread's data structure.  We cannot let
                the new thread do this since we don't know whether it was
                already scheduled when we send the event.  */
             mgr->p_eventbuf.eventdata = mgr;
             mgr->p_eventbuf.eventnum = TD_CREATE;
             __pthread_last_event = mgr;
             mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
             mgr->p_pid = pid;

             /* Now call the function which signals the event.  */
             __linuxthreads_create_event ();
           }

         /* Now restart the thread.  */
         __pthread_unlock(mgr->p_lock);
       }
    }

  if (__builtin_expect (pid, 0) == 0)
    {
#ifdef NEED_SEPARATE_REGISTER_STACK
      pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
                   THREAD_MANAGER_STACK_SIZE,
                   CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
#elif _STACK_GROWS_UP
      pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
                  CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
#else
      pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
                  CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
#endif
    }
  if (__builtin_expect (pid, 0) == -1) {
#ifdef USE_TLS
    _dl_deallocate_tls (tcbp, true);
#endif
    free(__pthread_manager_thread_bos);
    close_not_cancel(manager_pipe[0]);
    close_not_cancel(manager_pipe[1]);
    return -1;
  }
  mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
  mgr->p_pid = pid;
  /* Make gdb aware of new thread manager */
  if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
    {
      raise(__pthread_sig_debug);
      /* We suspend ourself and gdb will wake us up when it is
        ready to handle us. */
      __pthread_wait_for_restart_signal(thread_self());
    }
  /* Synchronize debugging of the thread manager */
  request.req_kind = REQ_DEBUG;
  TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
                                  (char *) &request, sizeof(request)));
  return 0;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 306 of file pthread.c.

{
#ifdef USE_TLS
  pthread_descr self;

  /* First of all init __pthread_handles[0] and [1] if needed.  */
# if __LT_SPINLOCK_INIT != 0
  __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
  __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
# endif
# ifndef SHARED
  /* Unlike in the dynamically linked case the dynamic linker has not
     taken care of initializing the TLS data structures.  */
  __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
# elif !USE___THREAD
  if (__builtin_expect (GL(dl_tls_dtv_slotinfo_list) == NULL, 0))
    {
      tcbhead_t *tcbp;

      /* There is no actual TLS being used, so the thread register
        was not initialized in the dynamic linker.  */

      /* We need to install special hooks so that the malloc and memalign
        calls in _dl_tls_setup and _dl_allocate_tls won't cause full
        malloc initialization that will try to set up its thread state.  */

      extern void __libc_malloc_pthread_startup (bool first_time);
      __libc_malloc_pthread_startup (true);

      if (__builtin_expect (_dl_tls_setup (), 0)
         || __builtin_expect ((tcbp = _dl_allocate_tls (NULL)) == NULL, 0))
       {
         static const char msg[] = "\
cannot allocate TLS data structures for initial thread\n";
         TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
                                          msg, sizeof msg - 1));
         abort ();
       }
      const char *lossage = TLS_INIT_TP (tcbp, 0);
      if (__builtin_expect (lossage != NULL, 0))
       {
         static const char msg[] = "cannot set up thread-local storage: ";
         const char nl = '\n';
         TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
                                          msg, sizeof msg - 1));
         TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
                                          lossage, strlen (lossage)));
         TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO, &nl, 1));
       }

      /* Though it was allocated with libc's malloc, that was done without
        the user's __malloc_hook installed.  A later realloc that uses
        the hooks might not work with that block from the plain malloc.
        So we record this block as unfreeable just as the dynamic linker
        does when it allocates the DTV before the libc malloc exists.  */
      GL(dl_initial_dtv) = GET_DTV (tcbp);

      __libc_malloc_pthread_startup (false);
    }
# endif

  self = THREAD_SELF;

  /* The memory for the thread descriptor was allocated elsewhere as
     part of the TLS allocation.  We have to initialize the data
     structure by hand.  This initialization must mirror the struct
     definition above.  */
  self->p_nextlive = self->p_prevlive = self;
  self->p_tid = PTHREAD_THREADS_MAX;
  self->p_lock = &__pthread_handles[0].h_lock;
# ifndef HAVE___THREAD
  self->p_errnop = &_errno;
  self->p_h_errnop = &_h_errno;
# endif
  /* self->p_start_args need not be initialized, it's all zero.  */
  self->p_userstack = 1;
# if __LT_SPINLOCK_INIT != 0
  self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
# endif
  self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;

  /* Another variable which points to the thread descriptor.  */
  __pthread_main_thread = self;

  /* And fill in the pointer the the thread __pthread_handles array.  */
  __pthread_handles[0].h_descr = self;

#else  /* USE_TLS */

  /* First of all init __pthread_handles[0] and [1].  */
# if __LT_SPINLOCK_INIT != 0
  __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
  __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
# endif
  __pthread_handles[0].h_descr = &__pthread_initial_thread;
  __pthread_handles[1].h_descr = &__pthread_manager_thread;

  /* If we have special thread_self processing, initialize that for the
     main thread now.  */
# ifdef INIT_THREAD_SELF
  INIT_THREAD_SELF(&__pthread_initial_thread, 0);
# endif
#endif

#if HP_TIMING_AVAIL
# ifdef USE_TLS
  self->p_cpuclock_offset = GL(dl_cpuclock_offset);
# else
  __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
# endif
#endif

  __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions);
}

Here is the call graph for this function:

Definition at line 229 of file specific.c.

int __pthread_internal_tsd_set ( int  key,
const void *  pointer 
)

Definition at line 220 of file specific.c.

{
  pthread_descr self = thread_self();

  THREAD_SETMEM_NC(self, p_libc_specific[key], (void *) pointer);
  return 0;
}

Definition at line 1190 of file pthread.c.

{
  struct sigaction sa;
  /* Terminate all other threads and thread manager */
  pthread_onexit_process(0, NULL);
  /* Make current thread the main thread in case the calling thread
     changes its mind, does not exec(), and creates new threads instead. */
  __pthread_reset_main_thread();

  /* Reset the signal handlers behaviour for the signals the
     implementation uses since this would be passed to the new
     process.  */
  sigemptyset(&sa.sa_mask);
  sa.sa_flags = 0;
  sa.sa_handler = SIG_DFL;
  __libc_sigaction(__pthread_sig_restart, &sa, NULL);
  __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
  if (__pthread_sig_debug > 0)
    __libc_sigaction(__pthread_sig_debug, &sa, NULL);
}

Here is the caller graph for this function:

int __pthread_manager ( void *  reqfd)

Here is the caller graph for this function:

void __pthread_manager_adjust_prio ( int  thread_prio)

Definition at line 1114 of file manager.c.

{
  struct sched_param param;

  if (thread_prio <= manager_thread->p_priority) return;
  param.sched_priority =
    thread_prio < __sched_get_priority_max(SCHED_FIFO)
    ? thread_prio + 1 : thread_prio;
  __sched_setscheduler(manager_thread->p_pid, SCHED_FIFO, &param);
  manager_thread->p_priority = thread_prio;
}

Here is the call graph for this function:

Here is the caller graph for this function:

int __pthread_manager_event ( void *  reqfd)

Definition at line 236 of file manager.c.

{
  pthread_descr self = arg;
  /* If we have special thread_self processing, initialize it.  */
#ifdef INIT_THREAD_SELF
  INIT_THREAD_SELF(self, 1);
#endif

  /* Get the lock the manager will free once all is correctly set up.  */
  __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
  /* Free it immediately.  */
  __pthread_unlock (THREAD_GETMEM(self, p_lock));

  return __pthread_manager(arg);
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 1090 of file manager.c.

{
  int kick_manager = terminated_children == 0 && main_thread_exiting;
  terminated_children = 1;

  /* If the main thread is terminating, kick the thread manager loop
     each time some threads terminate. This eliminates a two second
     shutdown delay caused by the thread manager sleeping in the
     call to __poll(). Instead, the thread manager is kicked into
     action, reaps the outstanding threads and resumes the main thread
     so that it can complete the shutdown. */

  if (kick_manager) {
    struct pthread_request request;
    request.req_thread = 0;
    request.req_kind = REQ_KICK;
    TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
                                   (char *) &request, sizeof(request)));
  }
}

Here is the caller graph for this function:

void __pthread_message ( const char *  fmt,
  ... 
)

Definition at line 25 of file pthread_mutex_destroy.c.

{
  if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
      && mutex->__data.__nusers != 0)
    return EBUSY;

  /* Set to an invalid value.  */
  mutex->__data.__kind = -1;

  return 0;
}

Here is the caller graph for this function:

Definition at line 41 of file pthread_mutex_lock.c.

{
  assert (sizeof (mutex->__size) >= sizeof (mutex->__data));

  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  int retval = 0;
  switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
                         PTHREAD_MUTEX_TIMED_NP))
    {
      /* Recursive mutex.  */
    case PTHREAD_MUTEX_RECURSIVE_NP:
      /* Check whether we already hold the mutex.  */
      if (mutex->__data.__owner == id)
       {
         /* Just bump the counter.  */
         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
           /* Overflow of the counter.  */
           return EAGAIN;

         ++mutex->__data.__count;

         return 0;
       }

      /* We have to get the mutex.  */
      LLL_MUTEX_LOCK (mutex);

      assert (mutex->__data.__owner == 0);
      mutex->__data.__count = 1;
      break;

      /* Error checking mutex.  */
    case PTHREAD_MUTEX_ERRORCHECK_NP:
      /* Check whether we already hold the mutex.  */
      if (__builtin_expect (mutex->__data.__owner == id, 0))
       return EDEADLK;

      /* FALLTHROUGH */

    case PTHREAD_MUTEX_TIMED_NP:
    simple:
      /* Normal mutex.  */
      LLL_MUTEX_LOCK (mutex);
      assert (mutex->__data.__owner == 0);
      break;

    case PTHREAD_MUTEX_ADAPTIVE_NP:
      if (! __is_smp)
       goto simple;

      if (LLL_MUTEX_TRYLOCK (mutex) != 0)
       {
         int cnt = 0;
         int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
                          mutex->__data.__spins * 2 + 10);
         do
           {
             if (cnt++ >= max_cnt)
              {
                LLL_MUTEX_LOCK (mutex);
                break;
              }

#ifdef BUSY_WAIT_NOP
             BUSY_WAIT_NOP;
#endif
           }
         while (LLL_MUTEX_TRYLOCK (mutex) != 0);

         mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
       }
      assert (mutex->__data.__owner == 0);
      break;

    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                   &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
       {
       again:
         if ((oldval & FUTEX_OWNER_DIED) != 0)
           {
             /* The previous owner died.  Try locking the mutex.  */
             int newval = id;
#ifdef NO_INCR
             newval |= FUTEX_WAITERS;
#else
             newval |= (oldval & FUTEX_WAITERS);
#endif

             newval
              = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                 newval, oldval);

             if (newval != oldval)
              {
                oldval = newval;
                goto again;
              }

             /* We got the mutex.  */
             mutex->__data.__count = 1;
             /* But it is inconsistent unless marked otherwise.  */
             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

             ENQUEUE_MUTEX (mutex);
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

             /* Note that we deliberately exit here.  If we fall
               through to the end of the function __nusers would be
               incremented which is not correct because the old
               owner has to be discounted.  If we are not supposed
               to increment __nusers we actually have to decrement
               it here.  */
#ifdef NO_INCR
             --mutex->__data.__nusers;
#endif

             return EOWNERDEAD;
           }

         /* Check whether we already hold the mutex.  */
         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
           {
             int kind = PTHREAD_MUTEX_TYPE (mutex);
             if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);
                return EDEADLK;
              }

             if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);

                /* Just bump the counter.  */
                if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                  /* Overflow of the counter.  */
                  return EAGAIN;

                ++mutex->__data.__count;

                return 0;
              }
           }

         oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);

         if (__builtin_expect (mutex->__data.__owner
                            == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
           {
             /* This mutex is now not recoverable.  */
             mutex->__data.__count = 0;
             lll_unlock (mutex->__data.__lock,
                       PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
             return ENOTRECOVERABLE;
           }
       }
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      mutex->__data.__count = 1;
      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
       int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

       if (robust)
         /* Note: robust PI futexes are signaled by setting bit 0.  */
         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                      (void *) (((uintptr_t) &mutex->__data.__list.__next)
                               | 1));

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
              return EDEADLK;
             }

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       int newval = id;
#ifdef NO_INCR
       newval |= FUTEX_WAITERS;
#endif
       oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                newval, 0);

       if (oldval != 0)
         {
           /* The mutex is locked.  The kernel will now take care of
              everything.  */
           int private = (robust
                        ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
                        : PTHREAD_MUTEX_PSHARED (mutex));
           INTERNAL_SYSCALL_DECL (__err);
           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                                  __lll_private_flag (FUTEX_LOCK_PI,
                                                   private), 1, 0);

           if (INTERNAL_SYSCALL_ERROR_P (e, __err)
              && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
                  || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
             {
              assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
                     || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
                         && kind != PTHREAD_MUTEX_RECURSIVE_NP));
              /* ESRCH can happen only for non-robust PI mutexes where
                 the owner of the lock died.  */
              assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);

              /* Delay the thread indefinitely.  */
              while (1)
                pause_not_cancel ();
             }

           oldval = mutex->__data.__lock;

           assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
         }

       if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
         {
           atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

           /* We got the mutex.  */
           mutex->__data.__count = 1;
           /* But it is inconsistent unless marked otherwise.  */
           mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

           ENQUEUE_MUTEX_PI (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

           /* Note that we deliberately exit here.  If we fall
              through to the end of the function __nusers would be
              incremented which is not correct because the old owner
              has to be discounted.  If we are not supposed to
              increment __nusers we actually have to decrement it here.  */
#ifdef NO_INCR
           --mutex->__data.__nusers;
#endif

           return EOWNERDEAD;
         }

       if (robust
           && __builtin_expect (mutex->__data.__owner
                             == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
         {
           /* This mutex is now not recoverable.  */
           mutex->__data.__count = 0;

           INTERNAL_SYSCALL_DECL (__err);
           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                           __lll_private_flag (FUTEX_UNLOCK_PI,
                                            PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
),
                           0, 0);

           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
           return ENOTRECOVERABLE;
         }

       mutex->__data.__count = 1;
       if (robust)
         {
           ENQUEUE_MUTEX_PI (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
         }
      }
      break;

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (mutex->__data.__owner == id)
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             return EDEADLK;

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       int oldprio = -1, ceilval;
       do
         {
           int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
                       >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

           if (__pthread_current_priority () > ceiling)
             {
              if (oldprio != -1)
                __pthread_tpp_change_priority (oldprio, -1);
              return EINVAL;
             }

           retval = __pthread_tpp_change_priority (oldprio, ceiling);
           if (retval)
             return retval;

           ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
           oldprio = ceiling;

           oldval
             = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
#ifdef NO_INCR
                                               ceilval | 2,
#else
                                               ceilval | 1,
#endif
                                               ceilval);

           if (oldval == ceilval)
             break;

           do
             {
              oldval
                = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                  ceilval | 2,
                                                  ceilval | 1);

              if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
                break;

              if (oldval != ceilval)
                lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
                              PTHREAD_MUTEX_PSHARED (mutex));
             }
           while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                 ceilval | 2, ceilval)
                 != ceilval);
         }
       while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

       assert (mutex->__data.__owner == 0);
       mutex->__data.__count = 1;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }

  /* Record the ownership.  */
  mutex->__data.__owner = id;
#ifndef NO_INCR
  ++mutex->__data.__nusers;
#endif

  return retval;
}
int __pthread_mutex_timedlock ( pthread_mutex_t __mutex,
const struct timespec __abstime 
)

Definition at line 28 of file pthread_mutex_trylock.c.

{
  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
                         PTHREAD_MUTEX_TIMED_NP))
    {
      /* Recursive mutex.  */
    case PTHREAD_MUTEX_RECURSIVE_NP:
      /* Check whether we already hold the mutex.  */
      if (mutex->__data.__owner == id)
       {
         /* Just bump the counter.  */
         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
           /* Overflow of the counter.  */
           return EAGAIN;

         ++mutex->__data.__count;
         return 0;
       }

      if (lll_trylock (mutex->__data.__lock) == 0)
       {
         /* Record the ownership.  */
         mutex->__data.__owner = id;
         mutex->__data.__count = 1;
         ++mutex->__data.__nusers;
         return 0;
       }
      break;

    case PTHREAD_MUTEX_ERRORCHECK_NP:
    case PTHREAD_MUTEX_TIMED_NP:
    case PTHREAD_MUTEX_ADAPTIVE_NP:
      /* Normal mutex.  */
      if (lll_trylock (mutex->__data.__lock) != 0)
       break;

      /* Record the ownership.  */
      mutex->__data.__owner = id;
      ++mutex->__data.__nusers;

      return 0;

    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                   &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
       {
       again:
         if ((oldval & FUTEX_OWNER_DIED) != 0)
           {
             /* The previous owner died.  Try locking the mutex.  */
             int newval = id | (oldval & FUTEX_WAITERS);

             newval
              = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                                 newval, oldval);

             if (newval != oldval)
              {
                oldval = newval;
                goto again;
              }

             /* We got the mutex.  */
             mutex->__data.__count = 1;
             /* But it is inconsistent unless marked otherwise.  */
             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

             ENQUEUE_MUTEX (mutex);
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

             /* Note that we deliberately exist here.  If we fall
               through to the end of the function __nusers would be
               incremented which is not correct because the old
               owner has to be discounted.  */
             return EOWNERDEAD;
           }

         /* Check whether we already hold the mutex.  */
         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
           {
             int kind = PTHREAD_MUTEX_TYPE (mutex);
             if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);
                return EDEADLK;
              }

             if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
              {
                THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                             NULL);

                /* Just bump the counter.  */
                if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                  /* Overflow of the counter.  */
                  return EAGAIN;

                ++mutex->__data.__count;

                return 0;
              }
           }

         oldval = lll_robust_trylock (mutex->__data.__lock, id);
         if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
           {
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

             return EBUSY;
           }

         if (__builtin_expect (mutex->__data.__owner
                            == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
           {
             /* This mutex is now not recoverable.  */
             mutex->__data.__count = 0;
             if (oldval == id)
              lll_unlock (mutex->__data.__lock,
                         PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
             return ENOTRECOVERABLE;
           }
       }
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

      mutex->__data.__owner = id;
      ++mutex->__data.__nusers;
      mutex->__data.__count = 1;

      return 0;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
       int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

       if (robust)
         /* Note: robust PI futexes are signaled by setting bit 0.  */
         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                      (void *) (((uintptr_t) &mutex->__data.__list.__next)
                               | 1));

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
              return EDEADLK;
             }

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       oldval
         = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                           id, 0);

       if (oldval != 0)
         {
           if ((oldval & FUTEX_OWNER_DIED) == 0)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              return EBUSY;
             }

           assert (robust);

           /* The mutex owner died.  The kernel will now take care of
              everything.  */
           int private = (robust
                        ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
                        : PTHREAD_MUTEX_PSHARED (mutex));
           INTERNAL_SYSCALL_DECL (__err);
           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                                  __lll_private_flag (FUTEX_TRYLOCK_PI,
                                                   private), 0, 0);

           if (INTERNAL_SYSCALL_ERROR_P (e, __err)
              && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
             {
              THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

              return EBUSY;
             }

           oldval = mutex->__data.__lock;
         }

       if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
         {
           atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

           /* We got the mutex.  */
           mutex->__data.__count = 1;
           /* But it is inconsistent unless marked otherwise.  */
           mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

           ENQUEUE_MUTEX (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

           /* Note that we deliberately exit here.  If we fall
              through to the end of the function __nusers would be
              incremented which is not correct because the old owner
              has to be discounted.  */
           return EOWNERDEAD;
         }

       if (robust
           && __builtin_expect (mutex->__data.__owner
                             == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
         {
           /* This mutex is now not recoverable.  */
           mutex->__data.__count = 0;

           INTERNAL_SYSCALL_DECL (__err);
           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                           __lll_private_flag (FUTEX_UNLOCK_PI,
                                            PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
                           0, 0);

           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
           return ENOTRECOVERABLE;
         }

       if (robust)
         {
           ENQUEUE_MUTEX_PI (mutex);
           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
         }

       mutex->__data.__owner = id;
       ++mutex->__data.__nusers;
       mutex->__data.__count = 1;

       return 0;
      }

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

       oldval = mutex->__data.__lock;

       /* Check whether we already hold the mutex.  */
       if (mutex->__data.__owner == id)
         {
           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
             return EDEADLK;

           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
             {
              /* Just bump the counter.  */
              if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
                /* Overflow of the counter.  */
                return EAGAIN;

              ++mutex->__data.__count;

              return 0;
             }
         }

       int oldprio = -1, ceilval;
       do
         {
           int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
                       >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

           if (__pthread_current_priority () > ceiling)
             {
              if (oldprio != -1)
                __pthread_tpp_change_priority (oldprio, -1);
              return EINVAL;
             }

           int retval = __pthread_tpp_change_priority (oldprio, ceiling);
           if (retval)
             return retval;

           ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
           oldprio = ceiling;

           oldval
             = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                                               ceilval | 1, ceilval);

           if (oldval == ceilval)
             break;
         }
       while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

       if (oldval != ceilval)
         {
           __pthread_tpp_change_priority (oldprio, -1);
           break;
         }

       assert (mutex->__data.__owner == 0);
       /* Record the ownership.  */
       mutex->__data.__owner = id;
       ++mutex->__data.__nusers;
       mutex->__data.__count = 1;

       return 0;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }

  return EBUSY;
}

Here is the caller graph for this function:

Definition at line 265 of file pthread_mutex_unlock.c.

Here is the caller graph for this function:

Definition at line 71 of file sighandler.c.

{ }

Here is the caller graph for this function:

void __pthread_once_fork_child ( void  )

Definition at line 354 of file mutex.c.

{
  pthread_mutex_init(&once_masterlock, NULL);
  pthread_cond_init(&once_finished, NULL);
  if (fork_generation <= INT_MAX - 4)
    fork_generation += 4;   /* leave least significant two bits zero */
  else
    fork_generation = 0;
}

Here is the call graph for this function:

Here is the caller graph for this function:

void __pthread_once_fork_parent ( void  )

Definition at line 349 of file mutex.c.

{
  pthread_mutex_unlock(&once_masterlock);
}

Here is the call graph for this function:

Here is the caller graph for this function:

void __pthread_once_fork_prepare ( void  )

Here is the caller graph for this function:

void __pthread_perform_cleanup ( char *  currentframe)

Definition at line 202 of file cancel.c.

{
  pthread_descr self = thread_self();
  struct _pthread_cleanup_buffer *c = THREAD_GETMEM(self, p_cleanup);
  struct _pthread_cleanup_buffer *last;

  if (c != NULL)
    while (FRAME_LEFT (currentframe, c))
      {
       last = c;
       c = c->__prev;

       if (c == NULL || FRAME_LEFT (last, c))
         {
           c = NULL;
           break;
         }
      }

  while (c != NULL)
    {
      c->__routine(c->__arg);

      last = c;
      c = c->__prev;

      if (FRAME_LEFT (last, c))
       break;
    }

  /* And the TSD which needs special help.  */
  __libc_thread_freeres ();
}

Here is the caller graph for this function:

Definition at line 198 of file signals.c.

{
  int retcode = pthread_kill(pthread_self(), sig);
  if (retcode == 0)
    return 0;
  else {
    errno = retcode;
    return -1;
  }
}

Here is the call graph for this function:

void __pthread_reset_main_thread ( void  )

Definition at line 1146 of file pthread.c.

{
  pthread_descr self = thread_self();

  if (__pthread_manager_request != -1) {
    /* Free the thread manager stack */
    free(__pthread_manager_thread_bos);
    __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
    /* Close the two ends of the pipe */
    close_not_cancel(__pthread_manager_request);
    close_not_cancel(__pthread_manager_reader);
    __pthread_manager_request = __pthread_manager_reader = -1;
  }

  /* Update the pid of the main thread */
  THREAD_SETMEM(self, p_pid, __getpid());
  /* Make the forked thread the main thread */
  __pthread_main_thread = self;
  THREAD_SETMEM(self, p_nextlive, self);
  THREAD_SETMEM(self, p_prevlive, self);
#if !(USE_TLS && HAVE___THREAD)
  /* Now this thread modifies the global variables.  */
  THREAD_SETMEM(self, p_errnop, &_errno);
  THREAD_SETMEM(self, p_h_errnop, &_h_errno);
  THREAD_SETMEM(self, p_resp, &_res);
#endif

#ifndef FLOATING_STACKS
  /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
     XXX This can be wrong if the user set the limit during the run.  */
 {
   struct rlimit limit;
   if (getrlimit (RLIMIT_STACK, &limit) == 0
       && limit.rlim_cur != limit.rlim_max)
     {
       limit.rlim_cur = limit.rlim_max;
       setrlimit(RLIMIT_STACK, &limit);
     }
 }
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 1344 of file pthread.c.

{
  /* The barrier is proabably not needed, in which case it still documents
     our assumptions. The intent is to commit previous writes to shared
     memory so the woken thread will have a consistent view.  Complementary
     read barriers are present to the suspend functions. */
  WRITE_MEMORY_BARRIER();
  kill(th->p_pid, __pthread_sig_restart);
}

Here is the caller graph for this function:

Definition at line 1254 of file pthread.c.

{
  if (pthread_atomic_increment(&th->p_resume_count) == -1)
    kill(th->p_pid, __pthread_sig_restart);
}

Here is the call graph for this function:

int __pthread_rwlock_timedrdlock ( pthread_rwlock_t *__restrict  __rwlock,
__const struct timespec *__restrict  __abstime 
)
int __pthread_rwlock_timedwrlock ( pthread_rwlock_t *__restrict  __rwlock,
__const struct timespec *__restrict  __abstime 
)
int __pthread_rwlockattr_destroy ( pthread_rwlockattr_t *  __attr)

Definition at line 607 of file rwlock.c.

{
  return 0;
}

Definition at line 895 of file pthread.c.

{
  pthread_descr self = thread_self();
  return THREAD_GETMEM(self, p_tid);
}

Here is the caller graph for this function:

int __pthread_setcancelstate ( int  state,
int oldstate 
)
int __pthread_setcanceltype ( int  type,
int oldtype 
)

Definition at line 48 of file cancel.c.

{
  pthread_descr self = thread_self();
  if (type < PTHREAD_CANCEL_DEFERRED || type > PTHREAD_CANCEL_ASYNCHRONOUS)
    return EINVAL;
  if (oldtype != NULL) *oldtype = THREAD_GETMEM(self, p_canceltype);
  THREAD_SETMEM(self, p_canceltype, type);
  if (THREAD_GETMEM(self, p_canceled) &&
      THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE &&
      THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  return 0;
}
int __pthread_setschedparam ( pthread_t  thread,
int  policy,
const struct sched_param param 
)

Definition at line 964 of file pthread.c.

{
  pthread_handle handle = thread_handle(thread);
  pthread_descr th;

  __pthread_lock(&handle->h_lock, NULL);
  if (__builtin_expect (invalid_handle(handle, thread), 0)) {
    __pthread_unlock(&handle->h_lock);
    return ESRCH;
  }
  th = handle->h_descr;
  if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
                     0)) {
    __pthread_unlock(&handle->h_lock);
    return errno;
  }
  th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
  __pthread_unlock(&handle->h_lock);
  if (__pthread_manager_request >= 0)
    __pthread_manager_adjust_prio(th->p_priority);
  return 0;
}
int __pthread_sigaction ( int  sig,
const struct sigaction act,
struct sigaction oact 
)

Definition at line 80 of file signals.c.

{
  struct sigaction newact;
  struct sigaction *newactp;
  __sighandler_t old = SIG_DFL;

  if (sig == __pthread_sig_restart ||
      sig == __pthread_sig_cancel ||
      (sig == __pthread_sig_debug && __pthread_sig_debug > 0))
    {
      __set_errno (EINVAL);
      return -1;
    }
  if (sig > 0 && sig < NSIG)
    old = (__sighandler_t) __sighandler[sig].old;
  if (act)
    {
      newact = *act;
      if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL
         && sig > 0 && sig < NSIG)
       {
         if (act->sa_flags & SA_SIGINFO)
           newact.sa_handler = (__sighandler_t) __pthread_sighandler_rt;
         else
           newact.sa_handler = (__sighandler_t) __pthread_sighandler;
         if (old == SIG_IGN || old == SIG_DFL || old == SIG_ERR)
           __sighandler[sig].old = (arch_sighandler_t) act->sa_handler;
       }
      newactp = &newact;
    }
  else
    newactp = NULL;
  if (__libc_sigaction(sig, newactp, oact) == -1)
    {
      if (act)
       __sighandler[sig].old = (arch_sighandler_t) old;
      return -1;
    }
  if (sig > 0 && sig < NSIG)
    {
      if (oact != NULL
         /* We may have inherited SIG_IGN from the parent, so return the
            kernel's idea of the signal handler the first time
            through.  */
         && old != SIG_ERR)
       oact->sa_handler = old;
      if (act)
       /* For the assignment it does not matter whether it's a normal
          or real-time signal.  */
       __sighandler[sig].old = (arch_sighandler_t) act->sa_handler;
    }
  return 0;
}

Here is the call graph for this function:

void __pthread_sighandler ( int  signo,
SIGCONTEXT  ctx 
)

Definition at line 21 of file sighandler.c.

{
  pthread_descr self;
  char * in_sighandler;
  self = check_thread_self();

  /* If we're in a sigwait operation, just record the signal received
     and return without calling the user's handler */
  if (THREAD_GETMEM(self, p_sigwaiting)) {
    THREAD_SETMEM(self, p_sigwaiting, 0);
    THREAD_SETMEM(self, p_signal, signo);
    return;
  }
  /* Record that we're in a signal handler and call the user's
     handler function */
  in_sighandler = THREAD_GETMEM(self, p_in_sighandler);
  if (in_sighandler == NULL)
    THREAD_SETMEM(self, p_in_sighandler, CURRENT_STACK_FRAME);
  CALL_SIGHANDLER(__sighandler[signo].old, signo, ctx);
  if (in_sighandler == NULL)
    THREAD_SETMEM(self, p_in_sighandler, NULL);
}

Here is the caller graph for this function:

void __pthread_sighandler_rt ( int  signo,
struct siginfo *  si,
struct ucontext uc 
)

Definition at line 45 of file sighandler.c.

{
  pthread_descr self;
  char * in_sighandler;
  self = check_thread_self();

  /* If we're in a sigwait operation, just record the signal received
     and return without calling the user's handler */
  if (THREAD_GETMEM(self, p_sigwaiting)) {
    THREAD_SETMEM(self, p_sigwaiting, 0);
    THREAD_SETMEM(self, p_signal, signo);
    return;
  }
  /* Record that we're in a signal handler and call the user's
     handler function */
  in_sighandler = THREAD_GETMEM(self, p_in_sighandler);
  if (in_sighandler == NULL)
    THREAD_SETMEM(self, p_in_sighandler, CURRENT_STACK_FRAME);
  __sighandler[signo].rt(signo, si, uc);
  if (in_sighandler == NULL)
    THREAD_SETMEM(self, p_in_sighandler, NULL);
}

Here is the caller graph for this function:

Definition at line 29 of file pt-sigsuspend.c.

{
  INTERNAL_SYSCALL_DECL (err);
  INTERNAL_SYSCALL (rt_sigsuspend, err, 2, set, _NSIG / 8);
}

Here is the call graph for this function:

Here is the caller graph for this function:

int __pthread_sigwait ( const sigset_t set,
int sig 
)

Definition at line 140 of file signals.c.

{
  volatile pthread_descr self = thread_self();
  sigset_t mask;
  int s;
  sigjmp_buf jmpbuf;
  struct sigaction sa;

  /* Get ready to block all signals except those in set
     and the cancellation signal.
     Also check that handlers are installed on all signals in set,
     and if not, install our dummy handler.  This is conformant to
     POSIX: "The effect of sigwait() on the signal actions for the
     signals in set is unspecified." */
  sigfillset(&mask);
  sigdelset(&mask, __pthread_sig_cancel);
  for (s = 1; s < NSIG; s++) {
    if (sigismember(set, s) &&
        s != __pthread_sig_restart &&
        s != __pthread_sig_cancel &&
        s != __pthread_sig_debug) {
      sigdelset(&mask, s);
      if (__sighandler[s].old == (arch_sighandler_t) SIG_ERR ||
          __sighandler[s].old == (arch_sighandler_t) SIG_DFL ||
          __sighandler[s].old == (arch_sighandler_t) SIG_IGN) {
        sa.sa_handler = __pthread_null_sighandler;
        sigfillset(&sa.sa_mask);
        sa.sa_flags = 0;
        sigaction(s, &sa, NULL);
      }
    }
  }
  /* Test for cancellation */
  if (sigsetjmp(jmpbuf, 1) == 0) {
    THREAD_SETMEM(self, p_cancel_jmp, &jmpbuf);
    if (! (THREAD_GETMEM(self, p_canceled)
          && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE)) {
      /* Reset the signal count */
      THREAD_SETMEM(self, p_signal, 0);
      /* Say we're in sigwait */
      THREAD_SETMEM(self, p_sigwaiting, 1);
      /* Unblock the signals and wait for them */
      sigsuspend(&mask);
    }
  }
  THREAD_SETMEM(self, p_cancel_jmp, NULL);
  /* The signals are now reblocked.  Check for cancellation */
  pthread_testcancel();
  /* We should have self->p_signal != 0 and equal to the signal received */
  *sig = THREAD_GETMEM(self, p_signal);
  return 0;
}

Here is the call graph for this function:

int __pthread_spin_destroy ( pthread_spinlock_t *  __lock)
int __pthread_spin_init ( pthread_spinlock_t *  __lock,
int  __pshared 
)
int __pthread_spin_lock ( pthread_spinlock_t *  __lock)

Definition at line 35 of file pspinlock.c.

{
  unsigned int tmp;
  asm volatile
    ("1:      ldl_l  %0,%1\n"
     "        blbs   %0,2f\n"
     "        or     %0,1,%0\n"
     "        stl_c  %0,%1\n"
     "        beq    %0,2f\n"
     "        mb\n"
     ".subsection 2\n"
     "2:      ldl    %0,%1\n"
     "        blbs   %0,2b\n"
     "        br     1b\n"
     ".previous"
     : "=r" (tmp), "=m" (lock)
     : "m" (lock));
  return 0;
}

Here is the call graph for this function:

int __pthread_spin_trylock ( pthread_spinlock_t *  __lock)
int __pthread_spin_unlock ( pthread_spinlock_t *  __lock)

Definition at line 1260 of file pthread.c.

{
  if (pthread_atomic_decrement(&self->p_resume_count) <= 0)
    __pthread_wait_for_restart_signal(self);
}

Here is the call graph for this function:

Definition at line 890 of file pthread.c.

{
  return thread_self();
}

Definition at line 1358 of file pthread.c.

{
  sigset_t unblock, initial_mask;
  int was_signalled = 0;
  sigjmp_buf jmpbuf;

  if (sigsetjmp(jmpbuf, 1) == 0) {
    THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
    THREAD_SETMEM(self, p_signal, 0);
    /* Unblock the restart signal */
    sigemptyset(&unblock);
    sigaddset(&unblock, __pthread_sig_restart);
    sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);

    while (1) {
      struct timeval now;
      struct timespec reltime;

      /* Compute a time offset relative to now.  */
      __gettimeofday (&now, NULL);
      reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
      reltime.tv_sec = abstime->tv_sec - now.tv_sec;
      if (reltime.tv_nsec < 0) {
       reltime.tv_nsec += 1000000000;
       reltime.tv_sec -= 1;
      }

      /* Sleep for the required duration. If woken by a signal,
        resume waiting as required by Single Unix Specification.  */
      if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
       break;
    }

    /* Block the restart signal again */
    sigprocmask(SIG_SETMASK, &initial_mask, NULL);
    was_signalled = 0;
  } else {
    was_signalled = 1;
  }
  THREAD_SETMEM(self, p_signal_jmp, NULL);

  /* Now was_signalled is true if we exited the above code
     due to the delivery of a restart signal.  In that case,
     everything is cool. We have been removed from whatever
     we were waiting on by the other thread, and consumed its signal.

     Otherwise we this thread woke up spontaneously, or due to a signal other
     than restart. This is an ambiguous case  that must be resolved by
     the caller; the thread is still eligible for a restart wakeup
     so there is a race. */

  READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
  return was_signalled;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 1267 of file pthread.c.

{
  sigset_t unblock, initial_mask;
  int was_signalled = 0;
  sigjmp_buf jmpbuf;

  if (pthread_atomic_decrement(&self->p_resume_count) == 0) {
    /* Set up a longjmp handler for the restart signal, unblock
       the signal and sleep. */

    if (sigsetjmp(jmpbuf, 1) == 0) {
      THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
      THREAD_SETMEM(self, p_signal, 0);
      /* Unblock the restart signal */
      sigemptyset(&unblock);
      sigaddset(&unblock, __pthread_sig_restart);
      sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);

      while (1) {
       struct timeval now;
       struct timespec reltime;

       /* Compute a time offset relative to now.  */
       __gettimeofday (&now, NULL);
       reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
       reltime.tv_sec = abstime->tv_sec - now.tv_sec;
       if (reltime.tv_nsec < 0) {
         reltime.tv_nsec += 1000000000;
         reltime.tv_sec -= 1;
       }

       /* Sleep for the required duration. If woken by a signal,
          resume waiting as required by Single Unix Specification.  */
       if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
         break;
      }

      /* Block the restart signal again */
      sigprocmask(SIG_SETMASK, &initial_mask, NULL);
      was_signalled = 0;
    } else {
      was_signalled = 1;
    }
    THREAD_SETMEM(self, p_signal_jmp, NULL);
  }

  /* Now was_signalled is true if we exited the above code
     due to the delivery of a restart signal.  In that case,
     we know we have been dequeued and resumed and that the
     resume count is balanced.  Otherwise, there are some
     cases to consider. First, try to bump up the resume count
     back to zero. If it goes to 1, it means restart() was
     invoked on this thread. The signal must be consumed
     and the count bumped down and everything is cool. We
     can return a 1 to the caller.
     Otherwise, no restart was delivered yet, so a potential
     race exists; we return a 0 to the caller which must deal
     with this race in an appropriate way; for example by
     atomically removing the thread from consideration for a
     wakeup---if such a thing fails, it means a restart is
     being delivered. */

  if (!was_signalled) {
    if (pthread_atomic_increment(&self->p_resume_count) != -1) {
      __pthread_wait_for_restart_signal(self);
      pthread_atomic_decrement(&self->p_resume_count); /* should be zero now! */
      /* woke spontaneously and consumed restart signal */
      return 1;
    }
    /* woke spontaneously but did not consume restart---caller must resolve */
    return 0;
  }
  /* woken due to restart signal */
  return 1;
}

Here is the call graph for this function:

Here is the caller graph for this function:

int __pthread_yield ( void  )

Definition at line 1010 of file pthread.c.

{
  /* For now this is equivalent with the POSIX call.  */
  return sched_yield ();
}

Here is the call graph for this function:

static int invalid_handle ( pthread_handle  h,
pthread_t  id 
) [inline, static]

Definition at line 178 of file internals.h.

{
  return h->h_descr == NULL || h->h_descr->p_tid != id || h->h_descr->p_terminated;
}

Here is the caller graph for this function:

static int nonexisting_handle ( pthread_handle  h,
pthread_t  id 
) [inline, static]

Definition at line 183 of file internals.h.

{
  return h->h_descr == NULL || h->h_descr->p_tid != id;
}

Here is the caller graph for this function:

static pthread_handle thread_handle ( pthread_t  id) [inline, static]

Definition at line 171 of file internals.h.

{
  return &__pthread_handles[id % PTHREAD_THREADS_MAX];
}

Here is the caller graph for this function:

Definition at line 519 of file internals.h.

{
  pthread_descr self = thread_self ();
#if defined THREAD_SELF && defined INIT_THREAD_SELF
  if (self == __manager_thread)
    {
      /* A new thread might get a cancel signal before it is fully
        initialized, so that the thread register might still point to the
        manager thread.  Double check that this is really the manager
        thread.  */
      self = __pthread_self_stack();
      if (self != __manager_thread)
       /* Oops, thread_self() isn't working yet..  */
       INIT_THREAD_SELF(self, self->p_nr);
    }
#endif
  return self;
}

Here is the call graph for this function:


Variable Documentation

Definition at line 135 of file pthread.c.

Definition at line 134 of file pthread.c.

Definition at line 61 of file manager.c.

Definition at line 108 of file pthread.c.

Definition at line 125 of file pthread.c.

Definition at line 119 of file pthread.c.

Definition at line 148 of file pthread.c.

Definition at line 190 of file pthread.c.

Definition at line 191 of file pthread.c.

Definition at line 189 of file pthread.c.

Definition at line 141 of file pthread.c.

Definition at line 149 of file pthread.c.

Definition at line 55 of file manager.c.

Definition at line 58 of file manager.c.

Definition at line 150 of file pthread.c.

int __libc_multiple_threads attribute_hidden

Definition at line 25 of file init-first.c.