Back to index

glibc  2.9
pthread.c
Go to the documentation of this file.
00001 
00002 /* Linuxthreads - a simple clone()-based implementation of Posix        */
00003 /* threads for Linux.                                                   */
00004 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr)              */
00005 /*                                                                      */
00006 /* This program is free software; you can redistribute it and/or        */
00007 /* modify it under the terms of the GNU Library General Public License  */
00008 /* as published by the Free Software Foundation; either version 2       */
00009 /* of the License, or (at your option) any later version.               */
00010 /*                                                                      */
00011 /* This program is distributed in the hope that it will be useful,      */
00012 /* but WITHOUT ANY WARRANTY; without even the implied warranty of       */
00013 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the        */
00014 /* GNU Library General Public License for more details.                 */
00015 
00016 /* Thread creation, initialization, and basic low-level routines */
00017 
00018 #include <errno.h>
00019 #include <stddef.h>
00020 #include <stdio.h>
00021 #include <stdlib.h>
00022 #include <string.h>
00023 #include <unistd.h>
00024 #include <fcntl.h>
00025 #include <sys/wait.h>
00026 #include <sys/resource.h>
00027 #include <sys/time.h>
00028 #include <shlib-compat.h>
00029 #include "pthread.h"
00030 #include "internals.h"
00031 #include "spinlock.h"
00032 #include "restart.h"
00033 #include "smp.h"
00034 #include <ldsodefs.h>
00035 #include <tls.h>
00036 #include <version.h>
00037 #include <not-cancel.h>
00038 
00039 /* Sanity check.  */
00040 #if !defined PTHREAD_SIGBASE && (!defined __SIGRTMIN || (__SIGRTMAX - __SIGRTMIN) < 3)
00041 # error "This must not happen"
00042 #endif
00043 
00044 #if !(USE_TLS && HAVE___THREAD)
00045 /* These variables are used by the setup code.  */
00046 extern int _errno;
00047 extern int _h_errno;
00048 
00049 /* We need the global/static resolver state here.  */
00050 # include <resolv.h>
00051 # undef _res
00052 
00053 extern struct __res_state _res;
00054 #endif
00055 
00056 #ifdef USE_TLS
00057 
00058 /* We need only a few variables.  */
00059 #define manager_thread __pthread_manager_threadp
00060 pthread_descr __pthread_manager_threadp attribute_hidden;
00061 
00062 #else
00063 
00064 /* Descriptor of the initial thread */
00065 
00066 struct _pthread_descr_struct __pthread_initial_thread = {
00067   .p_header.data.self = &__pthread_initial_thread,
00068   .p_nextlive = &__pthread_initial_thread,
00069   .p_prevlive = &__pthread_initial_thread,
00070   .p_tid = PTHREAD_THREADS_MAX,
00071   .p_lock = &__pthread_handles[0].h_lock,
00072   .p_start_args = PTHREAD_START_ARGS_INITIALIZER(NULL),
00073 #if !(USE_TLS && HAVE___THREAD)
00074   .p_errnop = &_errno,
00075   .p_h_errnop = &_h_errno,
00076   .p_resp = &_res,
00077 #endif
00078   .p_userstack = 1,
00079   .p_resume_count = __ATOMIC_INITIALIZER,
00080   .p_alloca_cutoff = __MAX_ALLOCA_CUTOFF
00081 };
00082 
00083 /* Descriptor of the manager thread; none of this is used but the error
00084    variables, the p_pid and p_priority fields,
00085    and the address for identification.  */
00086 
00087 #define manager_thread (&__pthread_manager_thread)
00088 struct _pthread_descr_struct __pthread_manager_thread = {
00089   .p_header.data.self = &__pthread_manager_thread,
00090   .p_header.data.multiple_threads = 1,
00091   .p_lock = &__pthread_handles[1].h_lock,
00092   .p_start_args = PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
00093 #if !(USE_TLS && HAVE___THREAD)
00094   .p_errnop = &__pthread_manager_thread.p_errno,
00095 #endif
00096   .p_nr = 1,
00097   .p_resume_count = __ATOMIC_INITIALIZER,
00098   .p_alloca_cutoff = PTHREAD_STACK_MIN / 4
00099 };
00100 #endif
00101 
00102 /* Pointer to the main thread (the father of the thread manager thread) */
00103 /* Originally, this is the initial thread, but this changes after fork() */
00104 
00105 #ifdef USE_TLS
00106 pthread_descr __pthread_main_thread;
00107 #else
00108 pthread_descr __pthread_main_thread = &__pthread_initial_thread;
00109 #endif
00110 
00111 /* Limit between the stack of the initial thread (above) and the
00112    stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
00113 
00114 char *__pthread_initial_thread_bos;
00115 
00116 /* File descriptor for sending requests to the thread manager. */
00117 /* Initially -1, meaning that the thread manager is not running. */
00118 
00119 int __pthread_manager_request = -1;
00120 
00121 int __pthread_multiple_threads attribute_hidden;
00122 
00123 /* Other end of the pipe for sending requests to the thread manager. */
00124 
00125 int __pthread_manager_reader;
00126 
00127 /* Limits of the thread manager stack */
00128 
00129 char *__pthread_manager_thread_bos;
00130 char *__pthread_manager_thread_tos;
00131 
00132 /* For process-wide exit() */
00133 
00134 int __pthread_exit_requested;
00135 int __pthread_exit_code;
00136 
00137 /* Maximum stack size.  */
00138 size_t __pthread_max_stacksize;
00139 
00140 /* Nozero if the machine has more than one processor.  */
00141 int __pthread_smp_kernel;
00142 
00143 
00144 #if !__ASSUME_REALTIME_SIGNALS
00145 /* Pointers that select new or old suspend/resume functions
00146    based on availability of rt signals. */
00147 
00148 void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
00149 void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
00150 int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
00151 #endif /* __ASSUME_REALTIME_SIGNALS */
00152 
00153 /* Communicate relevant LinuxThreads constants to gdb */
00154 
00155 const int __pthread_threads_max = PTHREAD_THREADS_MAX;
00156 const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
00157 const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
00158                                               h_descr);
00159 const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
00160                                             p_pid);
00161 const int __linuxthreads_pthread_sizeof_descr
00162   = sizeof(struct _pthread_descr_struct);
00163 
00164 const int __linuxthreads_initial_report_events;
00165 
00166 const char __linuxthreads_version[] = VERSION;
00167 
00168 /* Forward declarations */
00169 
00170 static void pthread_onexit_process(int retcode, void *arg);
00171 #ifndef HAVE_Z_NODELETE
00172 static void pthread_atexit_process(void *arg, int retcode);
00173 static void pthread_atexit_retcode(void *arg, int retcode);
00174 #endif
00175 static void pthread_handle_sigcancel(int sig);
00176 static void pthread_handle_sigrestart(int sig);
00177 static void pthread_handle_sigdebug(int sig);
00178 
00179 /* Signal numbers used for the communication.
00180    In these variables we keep track of the used variables.  If the
00181    platform does not support any real-time signals we will define the
00182    values to some unreasonable value which will signal failing of all
00183    the functions below.  */
00184 #if defined (PTHREAD_SIGBASE)
00185 int __pthread_sig_restart = PTHREAD_SIGBASE;
00186 int __pthread_sig_cancel = PTHREAD_SIGBASE + 1;
00187 int __pthread_sig_debug = PTHREAD_SIGBASE + 2;
00188 #else
00189 int __pthread_sig_restart = __SIGRTMIN;
00190 int __pthread_sig_cancel = __SIGRTMIN + 1;
00191 int __pthread_sig_debug = __SIGRTMIN + 2;
00192 #endif
00193 
00194 extern int __libc_current_sigrtmin_private (void);
00195 
00196 #if !__ASSUME_REALTIME_SIGNALS
00197 static int rtsigs_initialized;
00198 
00199 static void
00200 init_rtsigs (void)
00201 {
00202   if (rtsigs_initialized)
00203     return;
00204 
00205   if (__libc_current_sigrtmin_private () == -1)
00206     {
00207 #ifndef PTHREAD_SIGBASE
00208       __pthread_sig_restart = SIGUSR1;
00209       __pthread_sig_cancel = SIGUSR2;
00210       __pthread_sig_debug = 0;
00211 #endif
00212     }
00213   else
00214     {
00215       __pthread_restart = __pthread_restart_new;
00216       __pthread_suspend = __pthread_wait_for_restart_signal;
00217       __pthread_timedsuspend = __pthread_timedsuspend_new;
00218     }
00219 
00220   rtsigs_initialized = 1;
00221 }
00222 #endif
00223 
00224 
00225 /* Initialize the pthread library.
00226    Initialization is split in two functions:
00227    - a constructor function that blocks the __pthread_sig_restart signal
00228      (must do this very early, since the program could capture the signal
00229       mask with e.g. sigsetjmp before creating the first thread);
00230    - a regular function called from pthread_create when needed. */
00231 
00232 static void pthread_initialize(void) __attribute__((constructor));
00233 
00234 #ifndef HAVE_Z_NODELETE
00235 extern void *__dso_handle __attribute__ ((weak));
00236 #endif
00237 
00238 
00239 #if defined USE_TLS && !defined SHARED
00240 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
00241 #endif
00242 
00243 struct pthread_functions __pthread_functions =
00244   {
00245 #if !(USE_TLS && HAVE___THREAD)
00246     .ptr_pthread_internal_tsd_set = __pthread_internal_tsd_set,
00247     .ptr_pthread_internal_tsd_get = __pthread_internal_tsd_get,
00248     .ptr_pthread_internal_tsd_address = __pthread_internal_tsd_address,
00249 #endif
00250     .ptr_pthread_fork = __pthread_fork,
00251     .ptr_pthread_attr_destroy = __pthread_attr_destroy,
00252 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
00253     .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
00254 #endif
00255     .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
00256     .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
00257     .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
00258     .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
00259     .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
00260     .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
00261     .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
00262     .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
00263     .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
00264     .ptr_pthread_attr_getscope = __pthread_attr_getscope,
00265     .ptr_pthread_attr_setscope = __pthread_attr_setscope,
00266     .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
00267     .ptr_pthread_condattr_init = __pthread_condattr_init,
00268     .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
00269     .ptr___pthread_cond_destroy = __pthread_cond_destroy,
00270     .ptr___pthread_cond_init = __pthread_cond_init,
00271     .ptr___pthread_cond_signal = __pthread_cond_signal,
00272     .ptr___pthread_cond_wait = __pthread_cond_wait,
00273     .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
00274     .ptr_pthread_equal = __pthread_equal,
00275     .ptr___pthread_exit = __pthread_exit,
00276     .ptr_pthread_getschedparam = __pthread_getschedparam,
00277     .ptr_pthread_setschedparam = __pthread_setschedparam,
00278     .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
00279     .ptr_pthread_mutex_init = __pthread_mutex_init,
00280     .ptr_pthread_mutex_lock = __pthread_mutex_lock,
00281     .ptr_pthread_mutex_trylock = __pthread_mutex_trylock,
00282     .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
00283     .ptr_pthread_self = __pthread_self,
00284     .ptr_pthread_setcancelstate = __pthread_setcancelstate,
00285     .ptr_pthread_setcanceltype = __pthread_setcanceltype,
00286     .ptr_pthread_do_exit = __pthread_do_exit,
00287     .ptr_pthread_thread_self = __pthread_thread_self,
00288     .ptr_pthread_cleanup_upto = __pthread_cleanup_upto,
00289     .ptr_pthread_sigaction = __pthread_sigaction,
00290     .ptr_pthread_sigwait = __pthread_sigwait,
00291     .ptr_pthread_raise = __pthread_raise,
00292     .ptr__pthread_cleanup_push = _pthread_cleanup_push,
00293     .ptr__pthread_cleanup_pop = _pthread_cleanup_pop
00294   };
00295 #ifdef SHARED
00296 # define ptr_pthread_functions &__pthread_functions
00297 #else
00298 # define ptr_pthread_functions NULL
00299 #endif
00300 
00301 static int *__libc_multiple_threads_ptr;
00302 
00303 /* Do some minimal initialization which has to be done during the
00304    startup of the C library.  */
00305 void
00306 __pthread_initialize_minimal(void)
00307 {
00308 #ifdef USE_TLS
00309   pthread_descr self;
00310 
00311   /* First of all init __pthread_handles[0] and [1] if needed.  */
00312 # if __LT_SPINLOCK_INIT != 0
00313   __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
00314   __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
00315 # endif
00316 # ifndef SHARED
00317   /* Unlike in the dynamically linked case the dynamic linker has not
00318      taken care of initializing the TLS data structures.  */
00319   __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
00320 # elif !USE___THREAD
00321   if (__builtin_expect (GL(dl_tls_dtv_slotinfo_list) == NULL, 0))
00322     {
00323       tcbhead_t *tcbp;
00324 
00325       /* There is no actual TLS being used, so the thread register
00326         was not initialized in the dynamic linker.  */
00327 
00328       /* We need to install special hooks so that the malloc and memalign
00329         calls in _dl_tls_setup and _dl_allocate_tls won't cause full
00330         malloc initialization that will try to set up its thread state.  */
00331 
00332       extern void __libc_malloc_pthread_startup (bool first_time);
00333       __libc_malloc_pthread_startup (true);
00334 
00335       if (__builtin_expect (_dl_tls_setup (), 0)
00336          || __builtin_expect ((tcbp = _dl_allocate_tls (NULL)) == NULL, 0))
00337        {
00338          static const char msg[] = "\
00339 cannot allocate TLS data structures for initial thread\n";
00340          TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
00341                                           msg, sizeof msg - 1));
00342          abort ();
00343        }
00344       const char *lossage = TLS_INIT_TP (tcbp, 0);
00345       if (__builtin_expect (lossage != NULL, 0))
00346        {
00347          static const char msg[] = "cannot set up thread-local storage: ";
00348          const char nl = '\n';
00349          TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
00350                                           msg, sizeof msg - 1));
00351          TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
00352                                           lossage, strlen (lossage)));
00353          TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO, &nl, 1));
00354        }
00355 
00356       /* Though it was allocated with libc's malloc, that was done without
00357         the user's __malloc_hook installed.  A later realloc that uses
00358         the hooks might not work with that block from the plain malloc.
00359         So we record this block as unfreeable just as the dynamic linker
00360         does when it allocates the DTV before the libc malloc exists.  */
00361       GL(dl_initial_dtv) = GET_DTV (tcbp);
00362 
00363       __libc_malloc_pthread_startup (false);
00364     }
00365 # endif
00366 
00367   self = THREAD_SELF;
00368 
00369   /* The memory for the thread descriptor was allocated elsewhere as
00370      part of the TLS allocation.  We have to initialize the data
00371      structure by hand.  This initialization must mirror the struct
00372      definition above.  */
00373   self->p_nextlive = self->p_prevlive = self;
00374   self->p_tid = PTHREAD_THREADS_MAX;
00375   self->p_lock = &__pthread_handles[0].h_lock;
00376 # ifndef HAVE___THREAD
00377   self->p_errnop = &_errno;
00378   self->p_h_errnop = &_h_errno;
00379 # endif
00380   /* self->p_start_args need not be initialized, it's all zero.  */
00381   self->p_userstack = 1;
00382 # if __LT_SPINLOCK_INIT != 0
00383   self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
00384 # endif
00385   self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
00386 
00387   /* Another variable which points to the thread descriptor.  */
00388   __pthread_main_thread = self;
00389 
00390   /* And fill in the pointer the the thread __pthread_handles array.  */
00391   __pthread_handles[0].h_descr = self;
00392 
00393 #else  /* USE_TLS */
00394 
00395   /* First of all init __pthread_handles[0] and [1].  */
00396 # if __LT_SPINLOCK_INIT != 0
00397   __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
00398   __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
00399 # endif
00400   __pthread_handles[0].h_descr = &__pthread_initial_thread;
00401   __pthread_handles[1].h_descr = &__pthread_manager_thread;
00402 
00403   /* If we have special thread_self processing, initialize that for the
00404      main thread now.  */
00405 # ifdef INIT_THREAD_SELF
00406   INIT_THREAD_SELF(&__pthread_initial_thread, 0);
00407 # endif
00408 #endif
00409 
00410 #if HP_TIMING_AVAIL
00411 # ifdef USE_TLS
00412   self->p_cpuclock_offset = GL(dl_cpuclock_offset);
00413 # else
00414   __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
00415 # endif
00416 #endif
00417 
00418   __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions);
00419 }
00420 
00421 
00422 void
00423 __pthread_init_max_stacksize(void)
00424 {
00425   struct rlimit limit;
00426   size_t max_stack;
00427 
00428   getrlimit(RLIMIT_STACK, &limit);
00429 #ifdef FLOATING_STACKS
00430   if (limit.rlim_cur == RLIM_INFINITY)
00431     limit.rlim_cur = ARCH_STACK_MAX_SIZE;
00432 # ifdef NEED_SEPARATE_REGISTER_STACK
00433   max_stack = limit.rlim_cur / 2;
00434 # else
00435   max_stack = limit.rlim_cur;
00436 # endif
00437 #else
00438   /* Play with the stack size limit to make sure that no stack ever grows
00439      beyond STACK_SIZE minus one page (to act as a guard page). */
00440 # ifdef NEED_SEPARATE_REGISTER_STACK
00441   /* STACK_SIZE bytes hold both the main stack and register backing
00442      store. The rlimit value applies to each individually.  */
00443   max_stack = STACK_SIZE/2 - __getpagesize ();
00444 # else
00445   max_stack = STACK_SIZE - __getpagesize();
00446 # endif
00447   if (limit.rlim_cur > max_stack) {
00448     limit.rlim_cur = max_stack;
00449     setrlimit(RLIMIT_STACK, &limit);
00450   }
00451 #endif
00452   __pthread_max_stacksize = max_stack;
00453   if (max_stack / 4 < __MAX_ALLOCA_CUTOFF)
00454     {
00455 #ifdef USE_TLS
00456       pthread_descr self = THREAD_SELF;
00457       self->p_alloca_cutoff = max_stack / 4;
00458 #else
00459       __pthread_initial_thread.p_alloca_cutoff = max_stack / 4;
00460 #endif
00461     }
00462 }
00463 
00464 #ifdef SHARED
00465 # if USE___THREAD
00466 /* When using __thread for this, we do it in libc so as not
00467    to give libpthread its own TLS segment just for this.  */
00468 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
00469 # else
00470 static void ** __attribute__ ((const))
00471 __libc_dl_error_tsd (void)
00472 {
00473   return &thread_self ()->p_libc_specific[_LIBC_TSD_KEY_DL_ERROR];
00474 }
00475 # endif
00476 #endif
00477 
00478 #ifdef USE_TLS
00479 static inline void __attribute__((always_inline))
00480 init_one_static_tls (pthread_descr descr, struct link_map *map)
00481 {
00482 # if TLS_TCB_AT_TP
00483   dtv_t *dtv = GET_DTV (descr);
00484   void *dest = (char *) descr - map->l_tls_offset;
00485 # elif TLS_DTV_AT_TP
00486   dtv_t *dtv = GET_DTV ((pthread_descr) ((char *) descr + TLS_PRE_TCB_SIZE));
00487   void *dest = (char *) descr + map->l_tls_offset + TLS_PRE_TCB_SIZE;
00488 # else
00489 #  error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
00490 # endif
00491 
00492   /* Fill in the DTV slot so that a later LD/GD access will find it.  */
00493   dtv[map->l_tls_modid].pointer.val = dest;
00494   dtv[map->l_tls_modid].pointer.is_static = true;
00495 
00496   /* Initialize the memory.  */
00497   memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
00498          '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
00499 }
00500 
00501 static void
00502 __pthread_init_static_tls (struct link_map *map)
00503 {
00504   size_t i;
00505 
00506   for (i = 0; i < PTHREAD_THREADS_MAX; ++i)
00507     if (__pthread_handles[i].h_descr != NULL && i != 1)
00508       {
00509         __pthread_lock (&__pthread_handles[i].h_lock, NULL);
00510        if (__pthread_handles[i].h_descr != NULL)
00511          init_one_static_tls (__pthread_handles[i].h_descr, map);
00512         __pthread_unlock (&__pthread_handles[i].h_lock);
00513       }
00514 }
00515 #endif
00516 
00517 static void pthread_initialize(void)
00518 {
00519   struct sigaction sa;
00520   sigset_t mask;
00521 
00522   /* If already done (e.g. by a constructor called earlier!), bail out */
00523   if (__pthread_initial_thread_bos != NULL) return;
00524 #ifdef TEST_FOR_COMPARE_AND_SWAP
00525   /* Test if compare-and-swap is available */
00526   __pthread_has_cas = compare_and_swap_is_available();
00527 #endif
00528 #ifdef FLOATING_STACKS
00529   /* We don't need to know the bottom of the stack.  Give the pointer some
00530      value to signal that initialization happened.  */
00531   __pthread_initial_thread_bos = (void *) -1l;
00532 #else
00533   /* Determine stack size limits .  */
00534   __pthread_init_max_stacksize ();
00535 # ifdef _STACK_GROWS_UP
00536   /* The initial thread already has all the stack it needs */
00537   __pthread_initial_thread_bos = (char *)
00538     ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
00539 # else
00540   /* For the initial stack, reserve at least STACK_SIZE bytes of stack
00541      below the current stack address, and align that on a
00542      STACK_SIZE boundary. */
00543   __pthread_initial_thread_bos =
00544     (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
00545 # endif
00546 #endif
00547 #ifdef USE_TLS
00548   /* Update the descriptor for the initial thread. */
00549   THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid());
00550 # ifndef HAVE___THREAD
00551   /* Likewise for the resolver state _res.  */
00552   THREAD_SETMEM (((pthread_descr) NULL), p_resp, &_res);
00553 # endif
00554 #else
00555   /* Update the descriptor for the initial thread. */
00556   __pthread_initial_thread.p_pid = __getpid();
00557   /* Likewise for the resolver state _res.  */
00558   __pthread_initial_thread.p_resp = &_res;
00559 #endif
00560 #if !__ASSUME_REALTIME_SIGNALS
00561   /* Initialize real-time signals. */
00562   init_rtsigs ();
00563 #endif
00564   /* Setup signal handlers for the initial thread.
00565      Since signal handlers are shared between threads, these settings
00566      will be inherited by all other threads. */
00567   sa.sa_handler = pthread_handle_sigrestart;
00568   sigemptyset(&sa.sa_mask);
00569   sa.sa_flags = 0;
00570   __libc_sigaction(__pthread_sig_restart, &sa, NULL);
00571   sa.sa_handler = pthread_handle_sigcancel;
00572   sigaddset(&sa.sa_mask, __pthread_sig_restart);
00573   // sa.sa_flags = 0;
00574   __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
00575   if (__pthread_sig_debug > 0) {
00576     sa.sa_handler = pthread_handle_sigdebug;
00577     sigemptyset(&sa.sa_mask);
00578     // sa.sa_flags = 0;
00579     __libc_sigaction(__pthread_sig_debug, &sa, NULL);
00580   }
00581   /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
00582   sigemptyset(&mask);
00583   sigaddset(&mask, __pthread_sig_restart);
00584   sigprocmask(SIG_BLOCK, &mask, NULL);
00585   /* And unblock __pthread_sig_cancel if it has been blocked. */
00586   sigdelset(&mask, __pthread_sig_restart);
00587   sigaddset(&mask, __pthread_sig_cancel);
00588   sigprocmask(SIG_UNBLOCK, &mask, NULL);
00589   /* Register an exit function to kill all other threads. */
00590   /* Do it early so that user-registered atexit functions are called
00591      before pthread_*exit_process. */
00592 #ifndef HAVE_Z_NODELETE
00593   if (__builtin_expect (&__dso_handle != NULL, 1))
00594     __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
00595                 __dso_handle);
00596   else
00597 #endif
00598     __on_exit (pthread_onexit_process, NULL);
00599   /* How many processors.  */
00600   __pthread_smp_kernel = is_smp_system ();
00601 
00602 #ifdef SHARED
00603   /* Transfer the old value from the dynamic linker's internal location.  */
00604   *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
00605   GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
00606 
00607   /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
00608      keep the lock count from the ld.so implementation.  */
00609   GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
00610   GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
00611   unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__m_count;
00612   GL(dl_load_lock).mutex.__m_count = 0;
00613   while (rtld_lock_count-- > 0)
00614     __pthread_mutex_lock (&GL(dl_load_lock).mutex);
00615 #endif
00616 
00617 #ifdef USE_TLS
00618   GL(dl_init_static_tls) = &__pthread_init_static_tls;
00619 #endif
00620 }
00621 
00622 void __pthread_initialize(void)
00623 {
00624   pthread_initialize();
00625 }
00626 
00627 int __pthread_initialize_manager(void)
00628 {
00629   int manager_pipe[2];
00630   int pid;
00631   struct pthread_request request;
00632   int report_events;
00633   pthread_descr mgr;
00634 #ifdef USE_TLS
00635   tcbhead_t *tcbp;
00636 #endif
00637 
00638   __pthread_multiple_threads = 1;
00639 #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
00640   __pthread_main_thread->p_multiple_threads = 1;
00641 #endif
00642   *__libc_multiple_threads_ptr = 1;
00643 
00644 #ifndef HAVE_Z_NODELETE
00645   if (__builtin_expect (&__dso_handle != NULL, 1))
00646     __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
00647                 __dso_handle);
00648 #endif
00649 
00650   if (__pthread_max_stacksize == 0)
00651     __pthread_init_max_stacksize ();
00652   /* If basic initialization not done yet (e.g. we're called from a
00653      constructor run before our constructor), do it now */
00654   if (__pthread_initial_thread_bos == NULL) pthread_initialize();
00655   /* Setup stack for thread manager */
00656   __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
00657   if (__pthread_manager_thread_bos == NULL) return -1;
00658   __pthread_manager_thread_tos =
00659     __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
00660   /* Setup pipe to communicate with thread manager */
00661   if (pipe(manager_pipe) == -1) {
00662     free(__pthread_manager_thread_bos);
00663     return -1;
00664   }
00665 
00666 #ifdef USE_TLS
00667   /* Allocate memory for the thread descriptor and the dtv.  */
00668   tcbp = _dl_allocate_tls (NULL);
00669   if (tcbp == NULL) {
00670     free(__pthread_manager_thread_bos);
00671     close_not_cancel(manager_pipe[0]);
00672     close_not_cancel(manager_pipe[1]);
00673     return -1;
00674   }
00675 
00676 # if TLS_TCB_AT_TP
00677   mgr = (pthread_descr) tcbp;
00678 # elif TLS_DTV_AT_TP
00679   /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls
00680      returns.  */
00681   mgr = (pthread_descr) ((char *) tcbp - TLS_PRE_TCB_SIZE);
00682 # endif
00683   __pthread_handles[1].h_descr = manager_thread = mgr;
00684 
00685   /* Initialize the descriptor.  */
00686 #if !defined USE_TLS || !TLS_DTV_AT_TP
00687   mgr->p_header.data.tcb = tcbp;
00688   mgr->p_header.data.self = mgr;
00689   mgr->p_header.data.multiple_threads = 1;
00690 #elif TLS_MULTIPLE_THREADS_IN_TCB
00691   mgr->p_multiple_threads = 1;
00692 #endif
00693   mgr->p_lock = &__pthread_handles[1].h_lock;
00694 # ifndef HAVE___THREAD
00695   mgr->p_errnop = &mgr->p_errno;
00696 # endif
00697   mgr->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
00698   mgr->p_nr = 1;
00699 # if __LT_SPINLOCK_INIT != 0
00700   self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
00701 # endif
00702   mgr->p_alloca_cutoff = PTHREAD_STACK_MIN / 4;
00703 #else
00704   mgr = &__pthread_manager_thread;
00705 #endif
00706 
00707   /* Copy the stack guard canary.  */
00708 #ifdef THREAD_COPY_STACK_GUARD
00709   THREAD_COPY_STACK_GUARD (mgr);
00710 #endif
00711 
00712   /* Copy the pointer guard value.  */
00713 #ifdef THREAD_COPY_POINTER_GUARD
00714   THREAD_COPY_POINTER_GUARD (mgr);
00715 #endif
00716 
00717   __pthread_manager_request = manager_pipe[1]; /* writing end */
00718   __pthread_manager_reader = manager_pipe[0]; /* reading end */
00719 
00720   /* Start the thread manager */
00721   pid = 0;
00722 #ifdef USE_TLS
00723   if (__linuxthreads_initial_report_events != 0)
00724     THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
00725                  __linuxthreads_initial_report_events);
00726   report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
00727 #else
00728   if (__linuxthreads_initial_report_events != 0)
00729     __pthread_initial_thread.p_report_events
00730       = __linuxthreads_initial_report_events;
00731   report_events = __pthread_initial_thread.p_report_events;
00732 #endif
00733   if (__builtin_expect (report_events, 0))
00734     {
00735       /* It's a bit more complicated.  We have to report the creation of
00736         the manager thread.  */
00737       int idx = __td_eventword (TD_CREATE);
00738       uint32_t mask = __td_eventmask (TD_CREATE);
00739       uint32_t event_bits;
00740 
00741 #ifdef USE_TLS
00742       event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
00743                                  p_eventbuf.eventmask.event_bits[idx]);
00744 #else
00745       event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
00746 #endif
00747 
00748       if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
00749          != 0)
00750        {
00751          __pthread_lock(mgr->p_lock, NULL);
00752 
00753 #ifdef NEED_SEPARATE_REGISTER_STACK
00754          pid = __clone2(__pthread_manager_event,
00755                       (void **) __pthread_manager_thread_bos,
00756                       THREAD_MANAGER_STACK_SIZE,
00757                       CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
00758                       mgr);
00759 #elif _STACK_GROWS_UP
00760          pid = __clone(__pthread_manager_event,
00761                      (void **) __pthread_manager_thread_bos,
00762                      CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
00763                      mgr);
00764 #else
00765          pid = __clone(__pthread_manager_event,
00766                      (void **) __pthread_manager_thread_tos,
00767                      CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM,
00768                      mgr);
00769 #endif
00770 
00771          if (pid != -1)
00772            {
00773              /* Now fill in the information about the new thread in
00774                 the newly created thread's data structure.  We cannot let
00775                 the new thread do this since we don't know whether it was
00776                 already scheduled when we send the event.  */
00777              mgr->p_eventbuf.eventdata = mgr;
00778              mgr->p_eventbuf.eventnum = TD_CREATE;
00779              __pthread_last_event = mgr;
00780              mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
00781              mgr->p_pid = pid;
00782 
00783              /* Now call the function which signals the event.  */
00784              __linuxthreads_create_event ();
00785            }
00786 
00787          /* Now restart the thread.  */
00788          __pthread_unlock(mgr->p_lock);
00789        }
00790     }
00791 
00792   if (__builtin_expect (pid, 0) == 0)
00793     {
00794 #ifdef NEED_SEPARATE_REGISTER_STACK
00795       pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
00796                    THREAD_MANAGER_STACK_SIZE,
00797                    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
00798 #elif _STACK_GROWS_UP
00799       pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
00800                   CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
00801 #else
00802       pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
00803                   CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr);
00804 #endif
00805     }
00806   if (__builtin_expect (pid, 0) == -1) {
00807 #ifdef USE_TLS
00808     _dl_deallocate_tls (tcbp, true);
00809 #endif
00810     free(__pthread_manager_thread_bos);
00811     close_not_cancel(manager_pipe[0]);
00812     close_not_cancel(manager_pipe[1]);
00813     return -1;
00814   }
00815   mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
00816   mgr->p_pid = pid;
00817   /* Make gdb aware of new thread manager */
00818   if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
00819     {
00820       raise(__pthread_sig_debug);
00821       /* We suspend ourself and gdb will wake us up when it is
00822         ready to handle us. */
00823       __pthread_wait_for_restart_signal(thread_self());
00824     }
00825   /* Synchronize debugging of the thread manager */
00826   request.req_kind = REQ_DEBUG;
00827   TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
00828                                   (char *) &request, sizeof(request)));
00829   return 0;
00830 }
00831 
00832 /* Thread creation */
00833 
00834 int __pthread_create_2_1(pthread_t *thread, const pthread_attr_t *attr,
00835                       void * (*start_routine)(void *), void *arg)
00836 {
00837   pthread_descr self = thread_self();
00838   struct pthread_request request;
00839   int retval;
00840   if (__builtin_expect (__pthread_manager_request, 0) < 0) {
00841     if (__pthread_initialize_manager() < 0) return EAGAIN;
00842   }
00843   request.req_thread = self;
00844   request.req_kind = REQ_CREATE;
00845   request.req_args.create.attr = attr;
00846   request.req_args.create.fn = start_routine;
00847   request.req_args.create.arg = arg;
00848   sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
00849               &request.req_args.create.mask);
00850   TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
00851                                   (char *) &request, sizeof(request)));
00852   suspend(self);
00853   retval = THREAD_GETMEM(self, p_retcode);
00854   if (__builtin_expect (retval, 0) == 0)
00855     *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
00856   return retval;
00857 }
00858 
00859 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
00860 
00861 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
00862 
00863 int __pthread_create_2_0(pthread_t *thread, const pthread_attr_t *attr,
00864                       void * (*start_routine)(void *), void *arg)
00865 {
00866   /* The ATTR attribute is not really of type `pthread_attr_t *'.  It has
00867      the old size and access to the new members might crash the program.
00868      We convert the struct now.  */
00869   pthread_attr_t new_attr;
00870 
00871   if (attr != NULL)
00872     {
00873       size_t ps = __getpagesize ();
00874 
00875       memcpy (&new_attr, attr,
00876              (size_t) &(((pthread_attr_t*)NULL)->__guardsize));
00877       new_attr.__guardsize = ps;
00878       new_attr.__stackaddr_set = 0;
00879       new_attr.__stackaddr = NULL;
00880       new_attr.__stacksize = STACK_SIZE - ps;
00881       attr = &new_attr;
00882     }
00883   return __pthread_create_2_1 (thread, attr, start_routine, arg);
00884 }
00885 compat_symbol (libpthread, __pthread_create_2_0, pthread_create, GLIBC_2_0);
00886 #endif
00887 
00888 /* Simple operations on thread identifiers */
00889 
00890 pthread_descr __pthread_thread_self(void)
00891 {
00892   return thread_self();
00893 }
00894 
00895 pthread_t __pthread_self(void)
00896 {
00897   pthread_descr self = thread_self();
00898   return THREAD_GETMEM(self, p_tid);
00899 }
00900 strong_alias (__pthread_self, pthread_self);
00901 
00902 int __pthread_equal(pthread_t thread1, pthread_t thread2)
00903 {
00904   return thread1 == thread2;
00905 }
00906 strong_alias (__pthread_equal, pthread_equal);
00907 
00908 /* Helper function for thread_self in the case of user-provided stacks */
00909 
00910 #ifndef THREAD_SELF
00911 
00912 pthread_descr __pthread_find_self(void)
00913 {
00914   char * sp = CURRENT_STACK_FRAME;
00915   pthread_handle h;
00916 
00917   /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
00918      the manager threads handled specially in thread_self(), so start at 2 */
00919   h = __pthread_handles + 2;
00920 # ifdef _STACK_GROWS_UP
00921   while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr)) h++;
00922 # else
00923   while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
00924 # endif
00925   return h->h_descr;
00926 }
00927 
00928 #else
00929 
00930 pthread_descr __pthread_self_stack(void)
00931 {
00932   char *sp = CURRENT_STACK_FRAME;
00933   pthread_handle h;
00934 
00935   if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
00936     return manager_thread;
00937   h = __pthread_handles + 2;
00938 # ifdef USE_TLS
00939 #  ifdef _STACK_GROWS_UP
00940   while (h->h_descr == NULL
00941         || ! (sp >= h->h_descr->p_stackaddr && sp < h->h_descr->p_guardaddr))
00942     h++;
00943 #  else
00944   while (h->h_descr == NULL
00945         || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
00946     h++;
00947 #  endif
00948 # else
00949 #  ifdef _STACK_GROWS_UP
00950   while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr))
00951     h++;
00952 #  else
00953   while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
00954     h++;
00955 #  endif
00956 # endif
00957   return h->h_descr;
00958 }
00959 
00960 #endif
00961 
00962 /* Thread scheduling */
00963 
00964 int __pthread_setschedparam(pthread_t thread, int policy,
00965                             const struct sched_param *param)
00966 {
00967   pthread_handle handle = thread_handle(thread);
00968   pthread_descr th;
00969 
00970   __pthread_lock(&handle->h_lock, NULL);
00971   if (__builtin_expect (invalid_handle(handle, thread), 0)) {
00972     __pthread_unlock(&handle->h_lock);
00973     return ESRCH;
00974   }
00975   th = handle->h_descr;
00976   if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
00977                      0)) {
00978     __pthread_unlock(&handle->h_lock);
00979     return errno;
00980   }
00981   th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
00982   __pthread_unlock(&handle->h_lock);
00983   if (__pthread_manager_request >= 0)
00984     __pthread_manager_adjust_prio(th->p_priority);
00985   return 0;
00986 }
00987 strong_alias (__pthread_setschedparam, pthread_setschedparam);
00988 
00989 int __pthread_getschedparam(pthread_t thread, int *policy,
00990                             struct sched_param *param)
00991 {
00992   pthread_handle handle = thread_handle(thread);
00993   int pid, pol;
00994 
00995   __pthread_lock(&handle->h_lock, NULL);
00996   if (__builtin_expect (invalid_handle(handle, thread), 0)) {
00997     __pthread_unlock(&handle->h_lock);
00998     return ESRCH;
00999   }
01000   pid = handle->h_descr->p_pid;
01001   __pthread_unlock(&handle->h_lock);
01002   pol = __sched_getscheduler(pid);
01003   if (__builtin_expect (pol, 0) == -1) return errno;
01004   if (__sched_getparam(pid, param) == -1) return errno;
01005   *policy = pol;
01006   return 0;
01007 }
01008 strong_alias (__pthread_getschedparam, pthread_getschedparam);
01009 
01010 int __pthread_yield (void)
01011 {
01012   /* For now this is equivalent with the POSIX call.  */
01013   return sched_yield ();
01014 }
01015 weak_alias (__pthread_yield, pthread_yield)
01016 
01017 /* Process-wide exit() request */
01018 
01019 static void pthread_onexit_process(int retcode, void *arg)
01020 {
01021   if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
01022     struct pthread_request request;
01023     pthread_descr self = thread_self();
01024 
01025     /* Make sure we come back here after suspend(), in case we entered
01026        from a signal handler.  */
01027     THREAD_SETMEM(self, p_signal_jmp, NULL);
01028 
01029     request.req_thread = self;
01030     request.req_kind = REQ_PROCESS_EXIT;
01031     request.req_args.exit.code = retcode;
01032     TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
01033                                    (char *) &request, sizeof(request)));
01034     suspend(self);
01035     /* Main thread should accumulate times for thread manager and its
01036        children, so that timings for main thread account for all threads. */
01037     if (self == __pthread_main_thread)
01038       {
01039 #ifdef USE_TLS
01040        waitpid(manager_thread->p_pid, NULL, __WCLONE);
01041 #else
01042        waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
01043 #endif
01044        /* Since all threads have been asynchronously terminated
01045            (possibly holding locks), free cannot be used any more.
01046            For mtrace, we'd like to print something though.  */
01047        /* #ifdef USE_TLS
01048           tcbhead_t *tcbp = (tcbhead_t *) manager_thread;
01049           # if TLS_DTV_AT_TP
01050           tcbp = (tcbhead_t) ((char *) tcbp + TLS_PRE_TCB_SIZE);
01051           # endif
01052           _dl_deallocate_tls (tcbp, true);
01053           #endif
01054           free (__pthread_manager_thread_bos); */
01055        __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
01056       }
01057   }
01058 }
01059 
01060 #ifndef HAVE_Z_NODELETE
01061 static int __pthread_atexit_retcode;
01062 
01063 static void pthread_atexit_process(void *arg, int retcode)
01064 {
01065   pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
01066 }
01067 
01068 static void pthread_atexit_retcode(void *arg, int retcode)
01069 {
01070   __pthread_atexit_retcode = retcode;
01071 }
01072 #endif
01073 
01074 /* The handler for the RESTART signal just records the signal received
01075    in the thread descriptor, and optionally performs a siglongjmp
01076    (for pthread_cond_timedwait). */
01077 
01078 static void pthread_handle_sigrestart(int sig)
01079 {
01080   pthread_descr self = check_thread_self();
01081   THREAD_SETMEM(self, p_signal, sig);
01082   if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
01083     siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
01084 }
01085 
01086 /* The handler for the CANCEL signal checks for cancellation
01087    (in asynchronous mode), for process-wide exit and exec requests.
01088    For the thread manager thread, redirect the signal to
01089    __pthread_manager_sighandler. */
01090 
01091 static void pthread_handle_sigcancel(int sig)
01092 {
01093   pthread_descr self = check_thread_self();
01094   sigjmp_buf * jmpbuf;
01095 
01096   if (self == manager_thread)
01097     {
01098       __pthread_manager_sighandler(sig);
01099       return;
01100     }
01101   if (__builtin_expect (__pthread_exit_requested, 0)) {
01102     /* Main thread should accumulate times for thread manager and its
01103        children, so that timings for main thread account for all threads. */
01104     if (self == __pthread_main_thread) {
01105 #ifdef USE_TLS
01106       waitpid(manager_thread->p_pid, NULL, __WCLONE);
01107 #else
01108       waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
01109 #endif
01110     }
01111     _exit(__pthread_exit_code);
01112   }
01113   if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
01114       && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
01115     if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
01116       __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
01117     jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
01118     if (jmpbuf != NULL) {
01119       THREAD_SETMEM(self, p_cancel_jmp, NULL);
01120       siglongjmp(*jmpbuf, 1);
01121     }
01122   }
01123 }
01124 
01125 /* Handler for the DEBUG signal.
01126    The debugging strategy is as follows:
01127    On reception of a REQ_DEBUG request (sent by new threads created to
01128    the thread manager under debugging mode), the thread manager throws
01129    __pthread_sig_debug to itself. The debugger (if active) intercepts
01130    this signal, takes into account new threads and continue execution
01131    of the thread manager by propagating the signal because it doesn't
01132    know what it is specifically done for. In the current implementation,
01133    the thread manager simply discards it. */
01134 
01135 static void pthread_handle_sigdebug(int sig)
01136 {
01137   /* Nothing */
01138 }
01139 
01140 /* Reset the state of the thread machinery after a fork().
01141    Close the pipe used for requests and set the main thread to the forked
01142    thread.
01143    Notice that we can't free the stack segments, as the forked thread
01144    may hold pointers into them. */
01145 
01146 void __pthread_reset_main_thread(void)
01147 {
01148   pthread_descr self = thread_self();
01149 
01150   if (__pthread_manager_request != -1) {
01151     /* Free the thread manager stack */
01152     free(__pthread_manager_thread_bos);
01153     __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
01154     /* Close the two ends of the pipe */
01155     close_not_cancel(__pthread_manager_request);
01156     close_not_cancel(__pthread_manager_reader);
01157     __pthread_manager_request = __pthread_manager_reader = -1;
01158   }
01159 
01160   /* Update the pid of the main thread */
01161   THREAD_SETMEM(self, p_pid, __getpid());
01162   /* Make the forked thread the main thread */
01163   __pthread_main_thread = self;
01164   THREAD_SETMEM(self, p_nextlive, self);
01165   THREAD_SETMEM(self, p_prevlive, self);
01166 #if !(USE_TLS && HAVE___THREAD)
01167   /* Now this thread modifies the global variables.  */
01168   THREAD_SETMEM(self, p_errnop, &_errno);
01169   THREAD_SETMEM(self, p_h_errnop, &_h_errno);
01170   THREAD_SETMEM(self, p_resp, &_res);
01171 #endif
01172 
01173 #ifndef FLOATING_STACKS
01174   /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
01175      XXX This can be wrong if the user set the limit during the run.  */
01176  {
01177    struct rlimit limit;
01178    if (getrlimit (RLIMIT_STACK, &limit) == 0
01179        && limit.rlim_cur != limit.rlim_max)
01180      {
01181        limit.rlim_cur = limit.rlim_max;
01182        setrlimit(RLIMIT_STACK, &limit);
01183      }
01184  }
01185 #endif
01186 }
01187 
01188 /* Process-wide exec() request */
01189 
01190 void __pthread_kill_other_threads_np(void)
01191 {
01192   struct sigaction sa;
01193   /* Terminate all other threads and thread manager */
01194   pthread_onexit_process(0, NULL);
01195   /* Make current thread the main thread in case the calling thread
01196      changes its mind, does not exec(), and creates new threads instead. */
01197   __pthread_reset_main_thread();
01198 
01199   /* Reset the signal handlers behaviour for the signals the
01200      implementation uses since this would be passed to the new
01201      process.  */
01202   sigemptyset(&sa.sa_mask);
01203   sa.sa_flags = 0;
01204   sa.sa_handler = SIG_DFL;
01205   __libc_sigaction(__pthread_sig_restart, &sa, NULL);
01206   __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
01207   if (__pthread_sig_debug > 0)
01208     __libc_sigaction(__pthread_sig_debug, &sa, NULL);
01209 }
01210 weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
01211 
01212 /* Concurrency symbol level.  */
01213 static int current_level;
01214 
01215 int __pthread_setconcurrency(int level)
01216 {
01217   /* We don't do anything unless we have found a useful interpretation.  */
01218   current_level = level;
01219   return 0;
01220 }
01221 weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
01222 
01223 int __pthread_getconcurrency(void)
01224 {
01225   return current_level;
01226 }
01227 weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
01228 
01229 /* Primitives for controlling thread execution */
01230 
01231 void __pthread_wait_for_restart_signal(pthread_descr self)
01232 {
01233   sigset_t mask;
01234 
01235   sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
01236   sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
01237   THREAD_SETMEM(self, p_signal, 0);
01238   do {
01239     __pthread_sigsuspend(&mask);   /* Wait for signal.  Must not be a
01240                                       cancellation point. */
01241   } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
01242 
01243   READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
01244 }
01245 
01246 #if !__ASSUME_REALTIME_SIGNALS
01247 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
01248    signals.
01249    On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
01250    Since the restart signal does not queue, we use an atomic counter to create
01251    queuing semantics. This is needed to resolve a rare race condition in
01252    pthread_cond_timedwait_relative. */
01253 
01254 void __pthread_restart_old(pthread_descr th)
01255 {
01256   if (pthread_atomic_increment(&th->p_resume_count) == -1)
01257     kill(th->p_pid, __pthread_sig_restart);
01258 }
01259 
01260 void __pthread_suspend_old(pthread_descr self)
01261 {
01262   if (pthread_atomic_decrement(&self->p_resume_count) <= 0)
01263     __pthread_wait_for_restart_signal(self);
01264 }
01265 
01266 int
01267 __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
01268 {
01269   sigset_t unblock, initial_mask;
01270   int was_signalled = 0;
01271   sigjmp_buf jmpbuf;
01272 
01273   if (pthread_atomic_decrement(&self->p_resume_count) == 0) {
01274     /* Set up a longjmp handler for the restart signal, unblock
01275        the signal and sleep. */
01276 
01277     if (sigsetjmp(jmpbuf, 1) == 0) {
01278       THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
01279       THREAD_SETMEM(self, p_signal, 0);
01280       /* Unblock the restart signal */
01281       sigemptyset(&unblock);
01282       sigaddset(&unblock, __pthread_sig_restart);
01283       sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
01284 
01285       while (1) {
01286        struct timeval now;
01287        struct timespec reltime;
01288 
01289        /* Compute a time offset relative to now.  */
01290        __gettimeofday (&now, NULL);
01291        reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
01292        reltime.tv_sec = abstime->tv_sec - now.tv_sec;
01293        if (reltime.tv_nsec < 0) {
01294          reltime.tv_nsec += 1000000000;
01295          reltime.tv_sec -= 1;
01296        }
01297 
01298        /* Sleep for the required duration. If woken by a signal,
01299           resume waiting as required by Single Unix Specification.  */
01300        if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
01301          break;
01302       }
01303 
01304       /* Block the restart signal again */
01305       sigprocmask(SIG_SETMASK, &initial_mask, NULL);
01306       was_signalled = 0;
01307     } else {
01308       was_signalled = 1;
01309     }
01310     THREAD_SETMEM(self, p_signal_jmp, NULL);
01311   }
01312 
01313   /* Now was_signalled is true if we exited the above code
01314      due to the delivery of a restart signal.  In that case,
01315      we know we have been dequeued and resumed and that the
01316      resume count is balanced.  Otherwise, there are some
01317      cases to consider. First, try to bump up the resume count
01318      back to zero. If it goes to 1, it means restart() was
01319      invoked on this thread. The signal must be consumed
01320      and the count bumped down and everything is cool. We
01321      can return a 1 to the caller.
01322      Otherwise, no restart was delivered yet, so a potential
01323      race exists; we return a 0 to the caller which must deal
01324      with this race in an appropriate way; for example by
01325      atomically removing the thread from consideration for a
01326      wakeup---if such a thing fails, it means a restart is
01327      being delivered. */
01328 
01329   if (!was_signalled) {
01330     if (pthread_atomic_increment(&self->p_resume_count) != -1) {
01331       __pthread_wait_for_restart_signal(self);
01332       pthread_atomic_decrement(&self->p_resume_count); /* should be zero now! */
01333       /* woke spontaneously and consumed restart signal */
01334       return 1;
01335     }
01336     /* woke spontaneously but did not consume restart---caller must resolve */
01337     return 0;
01338   }
01339   /* woken due to restart signal */
01340   return 1;
01341 }
01342 #endif /* __ASSUME_REALTIME_SIGNALS */
01343 
01344 void __pthread_restart_new(pthread_descr th)
01345 {
01346   /* The barrier is proabably not needed, in which case it still documents
01347      our assumptions. The intent is to commit previous writes to shared
01348      memory so the woken thread will have a consistent view.  Complementary
01349      read barriers are present to the suspend functions. */
01350   WRITE_MEMORY_BARRIER();
01351   kill(th->p_pid, __pthread_sig_restart);
01352 }
01353 
01354 /* There is no __pthread_suspend_new because it would just
01355    be a wasteful wrapper for __pthread_wait_for_restart_signal */
01356 
01357 int
01358 __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
01359 {
01360   sigset_t unblock, initial_mask;
01361   int was_signalled = 0;
01362   sigjmp_buf jmpbuf;
01363 
01364   if (sigsetjmp(jmpbuf, 1) == 0) {
01365     THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
01366     THREAD_SETMEM(self, p_signal, 0);
01367     /* Unblock the restart signal */
01368     sigemptyset(&unblock);
01369     sigaddset(&unblock, __pthread_sig_restart);
01370     sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
01371 
01372     while (1) {
01373       struct timeval now;
01374       struct timespec reltime;
01375 
01376       /* Compute a time offset relative to now.  */
01377       __gettimeofday (&now, NULL);
01378       reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
01379       reltime.tv_sec = abstime->tv_sec - now.tv_sec;
01380       if (reltime.tv_nsec < 0) {
01381        reltime.tv_nsec += 1000000000;
01382        reltime.tv_sec -= 1;
01383       }
01384 
01385       /* Sleep for the required duration. If woken by a signal,
01386         resume waiting as required by Single Unix Specification.  */
01387       if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
01388        break;
01389     }
01390 
01391     /* Block the restart signal again */
01392     sigprocmask(SIG_SETMASK, &initial_mask, NULL);
01393     was_signalled = 0;
01394   } else {
01395     was_signalled = 1;
01396   }
01397   THREAD_SETMEM(self, p_signal_jmp, NULL);
01398 
01399   /* Now was_signalled is true if we exited the above code
01400      due to the delivery of a restart signal.  In that case,
01401      everything is cool. We have been removed from whatever
01402      we were waiting on by the other thread, and consumed its signal.
01403 
01404      Otherwise we this thread woke up spontaneously, or due to a signal other
01405      than restart. This is an ambiguous case  that must be resolved by
01406      the caller; the thread is still eligible for a restart wakeup
01407      so there is a race. */
01408 
01409   READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
01410   return was_signalled;
01411 }
01412 
01413 
01414 /* Debugging aid */
01415 
01416 #ifdef DEBUG
01417 #include <stdarg.h>
01418 
01419 void __pthread_message(const char * fmt, ...)
01420 {
01421   char buffer[1024];
01422   va_list args;
01423   sprintf(buffer, "%05d : ", __getpid());
01424   va_start(args, fmt);
01425   vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
01426   va_end(args);
01427   TEMP_FAILURE_RETRY(write_not_cancel(2, buffer, strlen(buffer)));
01428 }
01429 
01430 #endif