Back to index

glibc  2.9
pthread_create.c
Go to the documentation of this file.
00001 /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
00002    This file is part of the GNU C Library.
00003    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
00004 
00005    The GNU C Library is free software; you can redistribute it and/or
00006    modify it under the terms of the GNU Lesser General Public
00007    License as published by the Free Software Foundation; either
00008    version 2.1 of the License, or (at your option) any later version.
00009 
00010    The GNU C Library is distributed in the hope that it will be useful,
00011    but WITHOUT ANY WARRANTY; without even the implied warranty of
00012    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013    Lesser General Public License for more details.
00014 
00015    You should have received a copy of the GNU Lesser General Public
00016    License along with the GNU C Library; if not, write to the Free
00017    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
00018    02111-1307 USA.  */
00019 
00020 #include <errno.h>
00021 #include <stdbool.h>
00022 #include <stdlib.h>
00023 #include <string.h>
00024 #include "pthreadP.h"
00025 #include <hp-timing.h>
00026 #include <ldsodefs.h>
00027 #include <atomic.h>
00028 #include <libc-internal.h>
00029 #include <resolv.h>
00030 #include <kernel-features.h>
00031 
00032 #include <shlib-compat.h>
00033 
00034 
00035 /* Local function to start thread and handle cleanup.  */
00036 static int start_thread (void *arg);
00037 
00038 
00039 /* Nozero if debugging mode is enabled.  */
00040 int __pthread_debug;
00041 
00042 /* Globally enabled events.  */
00043 static td_thr_events_t __nptl_threads_events;
00044 
00045 /* Pointer to descriptor with the last event.  */
00046 static struct pthread *__nptl_last_event;
00047 
00048 /* Number of threads running.  */
00049 unsigned int __nptl_nthreads = 1;
00050 
00051 
00052 /* Code to allocate and deallocate a stack.  */
00053 #include "allocatestack.c"
00054 
00055 /* Code to create the thread.  */
00056 #include <createthread.c>
00057 
00058 
00059 struct pthread *
00060 internal_function
00061 __find_in_stack_list (pd)
00062      struct pthread *pd;
00063 {
00064   list_t *entry;
00065   struct pthread *result = NULL;
00066 
00067   lll_lock (stack_cache_lock, LLL_PRIVATE);
00068 
00069   list_for_each (entry, &stack_used)
00070     {
00071       struct pthread *curp;
00072 
00073       curp = list_entry (entry, struct pthread, list);
00074       if (curp == pd)
00075        {
00076          result = curp;
00077          break;
00078        }
00079     }
00080 
00081   if (result == NULL)
00082     list_for_each (entry, &__stack_user)
00083       {
00084        struct pthread *curp;
00085 
00086        curp = list_entry (entry, struct pthread, list);
00087        if (curp == pd)
00088          {
00089            result = curp;
00090            break;
00091          }
00092       }
00093 
00094   lll_unlock (stack_cache_lock, LLL_PRIVATE);
00095 
00096   return result;
00097 }
00098 
00099 
00100 /* Deallocate POSIX thread-local-storage.  */
00101 void
00102 attribute_hidden
00103 __nptl_deallocate_tsd (void)
00104 {
00105   struct pthread *self = THREAD_SELF;
00106 
00107   /* Maybe no data was ever allocated.  This happens often so we have
00108      a flag for this.  */
00109   if (THREAD_GETMEM (self, specific_used))
00110     {
00111       size_t round;
00112       size_t cnt;
00113 
00114       round = 0;
00115       do
00116        {
00117          size_t idx;
00118 
00119          /* So far no new nonzero data entry.  */
00120          THREAD_SETMEM (self, specific_used, false);
00121 
00122          for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
00123            {
00124              struct pthread_key_data *level2;
00125 
00126              level2 = THREAD_GETMEM_NC (self, specific, cnt);
00127 
00128              if (level2 != NULL)
00129               {
00130                 size_t inner;
00131 
00132                 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
00133                      ++inner, ++idx)
00134                   {
00135                     void *data = level2[inner].data;
00136 
00137                     if (data != NULL)
00138                      {
00139                        /* Always clear the data.  */
00140                        level2[inner].data = NULL;
00141 
00142                        /* Make sure the data corresponds to a valid
00143                           key.  This test fails if the key was
00144                           deallocated and also if it was
00145                           re-allocated.  It is the user's
00146                           responsibility to free the memory in this
00147                           case.  */
00148                        if (level2[inner].seq
00149                            == __pthread_keys[idx].seq
00150                            /* It is not necessary to register a destructor
00151                              function.  */
00152                            && __pthread_keys[idx].destr != NULL)
00153                          /* Call the user-provided destructor.  */
00154                          __pthread_keys[idx].destr (data);
00155                      }
00156                   }
00157               }
00158              else
00159               idx += PTHREAD_KEY_1STLEVEL_SIZE;
00160            }
00161 
00162          if (THREAD_GETMEM (self, specific_used) == 0)
00163            /* No data has been modified.  */
00164            goto just_free;
00165        }
00166       /* We only repeat the process a fixed number of times.  */
00167       while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
00168 
00169       /* Just clear the memory of the first block for reuse.  */
00170       memset (&THREAD_SELF->specific_1stblock, '\0',
00171              sizeof (self->specific_1stblock));
00172 
00173     just_free:
00174       /* Free the memory for the other blocks.  */
00175       for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
00176        {
00177          struct pthread_key_data *level2;
00178 
00179          level2 = THREAD_GETMEM_NC (self, specific, cnt);
00180          if (level2 != NULL)
00181            {
00182              /* The first block is allocated as part of the thread
00183                descriptor.  */
00184              free (level2);
00185              THREAD_SETMEM_NC (self, specific, cnt, NULL);
00186            }
00187        }
00188 
00189       THREAD_SETMEM (self, specific_used, false);
00190     }
00191 }
00192 
00193 
00194 /* Deallocate a thread's stack after optionally making sure the thread
00195    descriptor is still valid.  */
00196 void
00197 internal_function
00198 __free_tcb (struct pthread *pd)
00199 {
00200   /* The thread is exiting now.  */
00201   if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
00202                                         TERMINATED_BIT) == 0, 1))
00203     {
00204       /* Remove the descriptor from the list.  */
00205       if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
00206        /* Something is really wrong.  The descriptor for a still
00207           running thread is gone.  */
00208        abort ();
00209 
00210       /* Free TPP data.  */
00211       if (__builtin_expect (pd->tpp != NULL, 0))
00212        {
00213          struct priority_protection_data *tpp = pd->tpp;
00214 
00215          pd->tpp = NULL;
00216          free (tpp);
00217        }
00218 
00219       /* Queue the stack memory block for reuse and exit the process.  The
00220         kernel will signal via writing to the address returned by
00221         QUEUE-STACK when the stack is available.  */
00222       __deallocate_stack (pd);
00223     }
00224 }
00225 
00226 
00227 static int
00228 start_thread (void *arg)
00229 {
00230   struct pthread *pd = (struct pthread *) arg;
00231 
00232 #if HP_TIMING_AVAIL
00233   /* Remember the time when the thread was started.  */
00234   hp_timing_t now;
00235   HP_TIMING_NOW (now);
00236   THREAD_SETMEM (pd, cpuclock_offset, now);
00237 #endif
00238 
00239   /* Initialize resolver state pointer.  */
00240   __resp = &pd->res;
00241 
00242 #ifdef __NR_set_robust_list
00243 # ifndef __ASSUME_SET_ROBUST_LIST
00244   if (__set_robust_list_avail >= 0)
00245 # endif
00246     {
00247       INTERNAL_SYSCALL_DECL (err);
00248       /* This call should never fail because the initial call in init.c
00249         succeeded.  */
00250       INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
00251                      sizeof (struct robust_list_head));
00252     }
00253 #endif
00254 
00255   /* If the parent was running cancellation handlers while creating
00256      the thread the new thread inherited the signal mask.  Reset the
00257      cancellation signal mask.  */
00258   if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
00259     {
00260       INTERNAL_SYSCALL_DECL (err);
00261       sigset_t mask;
00262       __sigemptyset (&mask);
00263       __sigaddset (&mask, SIGCANCEL);
00264       (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
00265                             NULL, _NSIG / 8);
00266     }
00267 
00268   /* This is where the try/finally block should be created.  For
00269      compilers without that support we do use setjmp.  */
00270   struct pthread_unwind_buf unwind_buf;
00271 
00272   /* No previous handlers.  */
00273   unwind_buf.priv.data.prev = NULL;
00274   unwind_buf.priv.data.cleanup = NULL;
00275 
00276   int not_first_call;
00277   not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
00278   if (__builtin_expect (! not_first_call, 1))
00279     {
00280       /* Store the new cleanup handler info.  */
00281       THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
00282 
00283       if (__builtin_expect (pd->stopped_start, 0))
00284        {
00285          int oldtype = CANCEL_ASYNC ();
00286 
00287          /* Get the lock the parent locked to force synchronization.  */
00288          lll_lock (pd->lock, LLL_PRIVATE);
00289          /* And give it up right away.  */
00290          lll_unlock (pd->lock, LLL_PRIVATE);
00291 
00292          CANCEL_RESET (oldtype);
00293        }
00294 
00295       /* Run the code the user provided.  */
00296 #ifdef CALL_THREAD_FCT
00297       THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
00298 #else
00299       THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
00300 #endif
00301     }
00302 
00303   /* Run the destructor for the thread-local data.  */
00304   __nptl_deallocate_tsd ();
00305 
00306   /* Clean up any state libc stored in thread-local variables.  */
00307   __libc_thread_freeres ();
00308 
00309   /* If this is the last thread we terminate the process now.  We
00310      do not notify the debugger, it might just irritate it if there
00311      is no thread left.  */
00312   if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
00313     /* This was the last thread.  */
00314     exit (0);
00315 
00316   /* Report the death of the thread if this is wanted.  */
00317   if (__builtin_expect (pd->report_events, 0))
00318     {
00319       /* See whether TD_DEATH is in any of the mask.  */
00320       const int idx = __td_eventword (TD_DEATH);
00321       const uint32_t mask = __td_eventmask (TD_DEATH);
00322 
00323       if ((mask & (__nptl_threads_events.event_bits[idx]
00324                  | pd->eventbuf.eventmask.event_bits[idx])) != 0)
00325        {
00326          /* Yep, we have to signal the death.  Add the descriptor to
00327             the list but only if it is not already on it.  */
00328          if (pd->nextevent == NULL)
00329            {
00330              pd->eventbuf.eventnum = TD_DEATH;
00331              pd->eventbuf.eventdata = pd;
00332 
00333              do
00334               pd->nextevent = __nptl_last_event;
00335              while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
00336                                                     pd, pd->nextevent));
00337            }
00338 
00339          /* Now call the function to signal the event.  */
00340          __nptl_death_event ();
00341        }
00342     }
00343 
00344   /* The thread is exiting now.  Don't set this bit until after we've hit
00345      the event-reporting breakpoint, so that td_thr_get_info on us while at
00346      the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE.  */
00347   atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
00348 
00349 #ifndef __ASSUME_SET_ROBUST_LIST
00350   /* If this thread has any robust mutexes locked, handle them now.  */
00351 # if __WORDSIZE == 64
00352   void *robust = pd->robust_head.list;
00353 # else
00354   __pthread_slist_t *robust = pd->robust_list.__next;
00355 # endif
00356   /* We let the kernel do the notification if it is able to do so.
00357      If we have to do it here there for sure are no PI mutexes involved
00358      since the kernel support for them is even more recent.  */
00359   if (__set_robust_list_avail < 0
00360       && __builtin_expect (robust != (void *) &pd->robust_head, 0))
00361     {
00362       do
00363        {
00364          struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
00365            ((char *) robust - offsetof (struct __pthread_mutex_s,
00366                                     __list.__next));
00367          robust = *((void **) robust);
00368 
00369 # ifdef __PTHREAD_MUTEX_HAVE_PREV
00370          this->__list.__prev = NULL;
00371 # endif
00372          this->__list.__next = NULL;
00373 
00374          lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
00375        }
00376       while (robust != (void *) &pd->robust_head);
00377     }
00378 #endif
00379 
00380   /* If the thread is detached free the TCB.  */
00381   if (IS_DETACHED (pd))
00382     /* Free the TCB.  */
00383     __free_tcb (pd);
00384   else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
00385     {
00386       /* Some other thread might call any of the setXid functions and expect
00387         us to reply.  In this case wait until we did that.  */
00388       do
00389        lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
00390       while (pd->cancelhandling & SETXID_BITMASK);
00391 
00392       /* Reset the value so that the stack can be reused.  */
00393       pd->setxid_futex = 0;
00394     }
00395 
00396   /* We cannot call '_exit' here.  '_exit' will terminate the process.
00397 
00398      The 'exit' implementation in the kernel will signal when the
00399      process is really dead since 'clone' got passed the CLONE_CLEARTID
00400      flag.  The 'tid' field in the TCB will be set to zero.
00401 
00402      The exit code is zero since in case all threads exit by calling
00403      'pthread_exit' the exit status must be 0 (zero).  */
00404   __exit_thread_inline (0);
00405 
00406   /* NOTREACHED */
00407   return 0;
00408 }
00409 
00410 
00411 /* Default thread attributes for the case when the user does not
00412    provide any.  */
00413 static const struct pthread_attr default_attr =
00414   {
00415     /* Just some value > 0 which gets rounded to the nearest page size.  */
00416     .guardsize = 1,
00417   };
00418 
00419 
00420 int
00421 __pthread_create_2_1 (newthread, attr, start_routine, arg)
00422      pthread_t *newthread;
00423      const pthread_attr_t *attr;
00424      void *(*start_routine) (void *);
00425      void *arg;
00426 {
00427   STACK_VARIABLES;
00428 
00429   const struct pthread_attr *iattr = (struct pthread_attr *) attr;
00430   if (iattr == NULL)
00431     /* Is this the best idea?  On NUMA machines this could mean
00432        accessing far-away memory.  */
00433     iattr = &default_attr;
00434 
00435   struct pthread *pd = NULL;
00436   int err = ALLOCATE_STACK (iattr, &pd);
00437   if (__builtin_expect (err != 0, 0))
00438     /* Something went wrong.  Maybe a parameter of the attributes is
00439        invalid or we could not allocate memory.  */
00440     return err;
00441 
00442 
00443   /* Initialize the TCB.  All initializations with zero should be
00444      performed in 'get_cached_stack'.  This way we avoid doing this if
00445      the stack freshly allocated with 'mmap'.  */
00446 
00447 #ifdef TLS_TCB_AT_TP
00448   /* Reference to the TCB itself.  */
00449   pd->header.self = pd;
00450 
00451   /* Self-reference for TLS.  */
00452   pd->header.tcb = pd;
00453 #endif
00454 
00455   /* Store the address of the start routine and the parameter.  Since
00456      we do not start the function directly the stillborn thread will
00457      get the information from its thread descriptor.  */
00458   pd->start_routine = start_routine;
00459   pd->arg = arg;
00460 
00461   /* Copy the thread attribute flags.  */
00462   struct pthread *self = THREAD_SELF;
00463   pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
00464               | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
00465 
00466   /* Initialize the field for the ID of the thread which is waiting
00467      for us.  This is a self-reference in case the thread is created
00468      detached.  */
00469   pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
00470 
00471   /* The debug events are inherited from the parent.  */
00472   pd->eventbuf = self->eventbuf;
00473 
00474 
00475   /* Copy the parent's scheduling parameters.  The flags will say what
00476      is valid and what is not.  */
00477   pd->schedpolicy = self->schedpolicy;
00478   pd->schedparam = self->schedparam;
00479 
00480   /* Copy the stack guard canary.  */
00481 #ifdef THREAD_COPY_STACK_GUARD
00482   THREAD_COPY_STACK_GUARD (pd);
00483 #endif
00484 
00485   /* Copy the pointer guard value.  */
00486 #ifdef THREAD_COPY_POINTER_GUARD
00487   THREAD_COPY_POINTER_GUARD (pd);
00488 #endif
00489 
00490   /* Determine scheduling parameters for the thread.  */
00491   if (attr != NULL
00492       && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
00493       && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
00494     {
00495       INTERNAL_SYSCALL_DECL (scerr);
00496 
00497       /* Use the scheduling parameters the user provided.  */
00498       if (iattr->flags & ATTR_FLAG_POLICY_SET)
00499        pd->schedpolicy = iattr->schedpolicy;
00500       else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
00501        {
00502          pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
00503          pd->flags |= ATTR_FLAG_POLICY_SET;
00504        }
00505 
00506       if (iattr->flags & ATTR_FLAG_SCHED_SET)
00507        memcpy (&pd->schedparam, &iattr->schedparam,
00508               sizeof (struct sched_param));
00509       else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
00510        {
00511          INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
00512          pd->flags |= ATTR_FLAG_SCHED_SET;
00513        }
00514 
00515       /* Check for valid priorities.  */
00516       int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
00517                                   iattr->schedpolicy);
00518       int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
00519                                   iattr->schedpolicy);
00520       if (pd->schedparam.sched_priority < minprio
00521          || pd->schedparam.sched_priority > maxprio)
00522        {
00523          err = EINVAL;
00524          goto errout;
00525        }
00526     }
00527 
00528   /* Pass the descriptor to the caller.  */
00529   *newthread = (pthread_t) pd;
00530 
00531   /* Remember whether the thread is detached or not.  In case of an
00532      error we have to free the stacks of non-detached stillborn
00533      threads.  */
00534   bool is_detached = IS_DETACHED (pd);
00535 
00536   /* Start the thread.  */
00537   err = create_thread (pd, iattr, STACK_VARIABLES_ARGS);
00538   if (err != 0)
00539     {
00540       /* Something went wrong.  Free the resources.  */
00541       if (!is_detached)
00542        {
00543        errout:
00544          __deallocate_stack (pd);
00545        }
00546       return err;
00547     }
00548 
00549   return 0;
00550 }
00551 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
00552 
00553 
00554 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
00555 int
00556 __pthread_create_2_0 (newthread, attr, start_routine, arg)
00557      pthread_t *newthread;
00558      const pthread_attr_t *attr;
00559      void *(*start_routine) (void *);
00560      void *arg;
00561 {
00562   /* The ATTR attribute is not really of type `pthread_attr_t *'.  It has
00563      the old size and access to the new members might crash the program.
00564      We convert the struct now.  */
00565   struct pthread_attr new_attr;
00566 
00567   if (attr != NULL)
00568     {
00569       struct pthread_attr *iattr = (struct pthread_attr *) attr;
00570       size_t ps = __getpagesize ();
00571 
00572       /* Copy values from the user-provided attributes.  */
00573       new_attr.schedparam = iattr->schedparam;
00574       new_attr.schedpolicy = iattr->schedpolicy;
00575       new_attr.flags = iattr->flags;
00576 
00577       /* Fill in default values for the fields not present in the old
00578         implementation.  */
00579       new_attr.guardsize = ps;
00580       new_attr.stackaddr = NULL;
00581       new_attr.stacksize = 0;
00582       new_attr.cpuset = NULL;
00583 
00584       /* We will pass this value on to the real implementation.  */
00585       attr = (pthread_attr_t *) &new_attr;
00586     }
00587 
00588   return __pthread_create_2_1 (newthread, attr, start_routine, arg);
00589 }
00590 compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
00591               GLIBC_2_0);
00592 #endif
00593 
00594 /* Information for libthread_db.  */
00595 
00596 #include "../nptl_db/db_info.c"
00597 
00598 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
00599    functions to be present as well.  */
00600 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
00601 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
00602 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
00603 
00604 PTHREAD_STATIC_FN_REQUIRE (pthread_once)
00605 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
00606 
00607 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
00608 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
00609 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
00610 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)