Back to index

glibc  2.9
fork.c
Go to the documentation of this file.
00001 /* Copyright (C) 2002, 2003, 2007, 2008 Free Software Foundation, Inc.
00002    This file is part of the GNU C Library.
00003    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
00004 
00005    The GNU C Library is free software; you can redistribute it and/or
00006    modify it under the terms of the GNU Lesser General Public
00007    License as published by the Free Software Foundation; either
00008    version 2.1 of the License, or (at your option) any later version.
00009 
00010    The GNU C Library is distributed in the hope that it will be useful,
00011    but WITHOUT ANY WARRANTY; without even the implied warranty of
00012    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013    Lesser General Public License for more details.
00014 
00015    You should have received a copy of the GNU Lesser General Public
00016    License along with the GNU C Library; if not, write to the Free
00017    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
00018    02111-1307 USA.  */
00019 
00020 #include <assert.h>
00021 #include <stdlib.h>
00022 #include <unistd.h>
00023 #include <sys/types.h>
00024 #include <sysdep.h>
00025 #include <libio/libioP.h>
00026 #include <tls.h>
00027 #include "fork.h"
00028 #include <hp-timing.h>
00029 #include <ldsodefs.h>
00030 #include <bits/stdio-lock.h>
00031 #include <atomic.h>
00032 
00033 
00034 unsigned long int *__fork_generation_pointer;
00035 
00036 
00037 
00038 /* The single linked list of all currently registered for handlers.  */
00039 struct fork_handler *__fork_handlers;
00040 
00041 
00042 static void
00043 fresetlockfiles (void)
00044 {
00045   _IO_ITER i;
00046 
00047   for (i = _IO_iter_begin(); i != _IO_iter_end(); i = _IO_iter_next(i))
00048     _IO_lock_init (*((_IO_lock_t *) _IO_iter_file(i)->_lock));
00049 }
00050 
00051 
00052 pid_t
00053 __libc_fork (void)
00054 {
00055   pid_t pid;
00056   struct used_handler
00057   {
00058     struct fork_handler *handler;
00059     struct used_handler *next;
00060   } *allp = NULL;
00061 
00062   /* Run all the registered preparation handlers.  In reverse order.
00063      While doing this we build up a list of all the entries.  */
00064   struct fork_handler *runp;
00065   while ((runp = __fork_handlers) != NULL)
00066     {
00067       /* Make sure we read from the current RUNP pointer.  */
00068       atomic_full_barrier ();
00069 
00070       unsigned int oldval = runp->refcntr;
00071 
00072       if (oldval == 0)
00073        /* This means some other thread removed the list just after
00074           the pointer has been loaded.  Try again.  Either the list
00075           is empty or we can retry it.  */
00076        continue;
00077 
00078       /* Bump the reference counter.  */
00079       if (atomic_compare_and_exchange_bool_acq (&__fork_handlers->refcntr,
00080                                           oldval + 1, oldval))
00081        /* The value changed, try again.  */
00082        continue;
00083 
00084       /* We bumped the reference counter for the first entry in the
00085         list.  That means that none of the following entries will
00086         just go away.  The unloading code works in the order of the
00087         list.
00088 
00089          While executing the registered handlers we are building a
00090          list of all the entries so that we can go backward later on.  */
00091       while (1)
00092        {
00093          /* Execute the handler if there is one.  */
00094          if (runp->prepare_handler != NULL)
00095            runp->prepare_handler ();
00096 
00097          /* Create a new element for the list.  */
00098          struct used_handler *newp
00099            = (struct used_handler *) alloca (sizeof (*newp));
00100          newp->handler = runp;
00101          newp->next = allp;
00102          allp = newp;
00103 
00104          /* Advance to the next handler.  */
00105          runp = runp->next;
00106          if (runp == NULL)
00107            break;
00108 
00109          /* Bump the reference counter for the next entry.  */
00110          atomic_increment (&runp->refcntr);
00111        }
00112 
00113       /* We are done.  */
00114       break;
00115     }
00116 
00117   _IO_list_lock ();
00118 
00119 #ifndef NDEBUG
00120   pid_t ppid = THREAD_GETMEM (THREAD_SELF, tid);
00121 #endif
00122 
00123   /* We need to prevent the getpid() code to update the PID field so
00124      that, if a signal arrives in the child very early and the signal
00125      handler uses getpid(), the value returned is correct.  */
00126   pid_t parentpid = THREAD_GETMEM (THREAD_SELF, pid);
00127   THREAD_SETMEM (THREAD_SELF, pid, -parentpid);
00128 
00129 #ifdef ARCH_FORK
00130   pid = ARCH_FORK ();
00131 #else
00132 # error "ARCH_FORK must be defined so that the CLONE_SETTID flag is used"
00133   pid = INLINE_SYSCALL (fork, 0);
00134 #endif
00135 
00136 
00137   if (pid == 0)
00138     {
00139       struct pthread *self = THREAD_SELF;
00140 
00141       assert (THREAD_GETMEM (self, tid) != ppid);
00142 
00143       if (__fork_generation_pointer != NULL)
00144        *__fork_generation_pointer += 4;
00145 
00146       /* Adjust the PID field for the new process.  */
00147       THREAD_SETMEM (self, pid, THREAD_GETMEM (self, tid));
00148 
00149 #if HP_TIMING_AVAIL
00150       /* The CPU clock of the thread and process have to be set to zero.  */
00151       hp_timing_t now;
00152       HP_TIMING_NOW (now);
00153       THREAD_SETMEM (self, cpuclock_offset, now);
00154       GL(dl_cpuclock_offset) = now;
00155 #endif
00156 
00157       /* Reset the file list.  These are recursive mutexes.  */
00158       fresetlockfiles ();
00159 
00160       /* Reset locks in the I/O code.  */
00161       _IO_list_resetlock ();
00162 
00163       /* Reset the lock the dynamic loader uses to protect its data.  */
00164       __rtld_lock_initialize (GL(dl_load_lock));
00165 
00166       /* Run the handlers registered for the child.  */
00167       while (allp != NULL)
00168        {
00169          if (allp->handler->child_handler != NULL)
00170            allp->handler->child_handler ();
00171 
00172          /* Note that we do not have to wake any possible waiter.
00173             This is the only thread in the new process.  The count
00174             may have been bumped up by other threads doing a fork.
00175             We reset it to 1, to avoid waiting for non-existing
00176             thread(s) to release the count.  */
00177          allp->handler->refcntr = 1;
00178 
00179          /* XXX We could at this point look through the object pool
00180             and mark all objects not on the __fork_handlers list as
00181             unused.  This is necessary in case the fork() happened
00182             while another thread called dlclose() and that call had
00183             to create a new list.  */
00184 
00185          allp = allp->next;
00186        }
00187 
00188       /* Initialize the fork lock.  */
00189       __fork_lock = LLL_LOCK_INITIALIZER;
00190     }
00191   else
00192     {
00193       assert (THREAD_GETMEM (THREAD_SELF, tid) == ppid);
00194 
00195       /* Restore the PID value.  */
00196       THREAD_SETMEM (THREAD_SELF, pid, parentpid);
00197 
00198       /* We execute this even if the 'fork' call failed.  */
00199       _IO_list_unlock ();
00200 
00201       /* Run the handlers registered for the parent.  */
00202       while (allp != NULL)
00203        {
00204          if (allp->handler->parent_handler != NULL)
00205            allp->handler->parent_handler ();
00206 
00207          if (atomic_decrement_and_test (&allp->handler->refcntr)
00208              && allp->handler->need_signal)
00209            lll_futex_wake (allp->handler->refcntr, 1, LLL_PRIVATE);
00210 
00211          allp = allp->next;
00212        }
00213     }
00214 
00215   return pid;
00216 }
00217 weak_alias (__libc_fork, __fork)
00218 libc_hidden_def (__fork)
00219 weak_alias (__libc_fork, fork)