Back to index

glibc  2.9
timer_routines.c
Go to the documentation of this file.
00001 /* Helper code for POSIX timer implementation on LinuxThreads.
00002    Copyright (C) 2000, 2001, 2002, 2004 Free Software Foundation, Inc.
00003    This file is part of the GNU C Library.
00004    Contributed by Kaz Kylheku <kaz@ashi.footprints.net>.
00005 
00006    The GNU C Library is free software; you can redistribute it and/or
00007    modify it under the terms of the GNU Lesser General Public License as
00008    published by the Free Software Foundation; either version 2.1 of the
00009    License, or (at your option) any later version.
00010 
00011    The GNU C Library is distributed in the hope that it will be useful,
00012    but WITHOUT ANY WARRANTY; without even the implied warranty of
00013    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014    Lesser General Public License for more details.
00015 
00016    You should have received a copy of the GNU Lesser General Public
00017    License along with the GNU C Library; see the file COPYING.LIB.  If not,
00018    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
00019    Boston, MA 02111-1307, USA.  */
00020 
00021 #include <assert.h>
00022 #include <errno.h>
00023 #include <pthread.h>
00024 #include <stddef.h>
00025 #include <stdlib.h>
00026 #include <string.h>
00027 #include <sysdep.h>
00028 #include <time.h>
00029 #include <unistd.h>
00030 #include <sys/syscall.h>
00031 
00032 #include "posix-timer.h"
00033 
00034 
00035 /* Number of threads used.  */
00036 #define THREAD_MAXNODES     16
00037 
00038 /* Array containing the descriptors for the used threads.  */
00039 static struct thread_node thread_array[THREAD_MAXNODES];
00040 
00041 /* Static array with the structures for all the timers.  */
00042 struct timer_node __timer_array[TIMER_MAX];
00043 
00044 /* Global lock to protect operation on the lists.  */
00045 pthread_mutex_t __timer_mutex = PTHREAD_MUTEX_INITIALIZER;
00046 
00047 /* Variable to protext initialization.  */
00048 pthread_once_t __timer_init_once_control = PTHREAD_ONCE_INIT;
00049 
00050 /* Nonzero if initialization of timer implementation failed.  */
00051 int __timer_init_failed;
00052 
00053 /* Node for the thread used to deliver signals.  */
00054 struct thread_node __timer_signal_thread_rclk;
00055 
00056 /* Lists to keep free and used timers and threads.  */
00057 struct list_links timer_free_list;
00058 struct list_links thread_free_list;
00059 struct list_links thread_active_list;
00060 
00061 
00062 #ifdef __NR_rt_sigqueueinfo
00063 extern int __syscall_rt_sigqueueinfo (int, int, siginfo_t *);
00064 #endif
00065 
00066 
00067 /* List handling functions.  */
00068 static inline void
00069 list_init (struct list_links *list)
00070 {
00071   list->next = list->prev = list;
00072 }
00073 
00074 static inline void
00075 list_append (struct list_links *list, struct list_links *newp)
00076 {
00077   newp->prev = list->prev;
00078   newp->next = list;
00079   list->prev->next = newp;
00080   list->prev = newp;
00081 }
00082 
00083 static inline void
00084 list_insbefore (struct list_links *list, struct list_links *newp)
00085 {
00086   list_append (list, newp);
00087 }
00088 
00089 /*
00090  * Like list_unlink_ip, except that calling it on a node that
00091  * is already unlinked is disastrous rather than a noop.
00092  */
00093 
00094 static inline void
00095 list_unlink (struct list_links *list)
00096 {
00097   struct list_links *lnext = list->next, *lprev = list->prev;
00098 
00099   lnext->prev = lprev;
00100   lprev->next = lnext;
00101 }
00102 
00103 static inline struct list_links *
00104 list_first (struct list_links *list)
00105 {
00106   return list->next;
00107 }
00108 
00109 static inline struct list_links *
00110 list_null (struct list_links *list)
00111 {
00112   return list;
00113 }
00114 
00115 static inline struct list_links *
00116 list_next (struct list_links *list)
00117 {
00118   return list->next;
00119 }
00120 
00121 static inline int
00122 list_isempty (struct list_links *list)
00123 {
00124   return list->next == list;
00125 }
00126 
00127 
00128 /* Functions build on top of the list functions.  */
00129 static inline struct thread_node *
00130 thread_links2ptr (struct list_links *list)
00131 {
00132   return (struct thread_node *) ((char *) list
00133                              - offsetof (struct thread_node, links));
00134 }
00135 
00136 static inline struct timer_node *
00137 timer_links2ptr (struct list_links *list)
00138 {
00139   return (struct timer_node *) ((char *) list
00140                             - offsetof (struct timer_node, links));
00141 }
00142 
00143 
00144 /* Initialize a newly allocated thread structure.  */
00145 static void
00146 thread_init (struct thread_node *thread, const pthread_attr_t *attr, clockid_t clock_id)
00147 {
00148   if (attr != NULL)
00149     thread->attr = *attr;
00150   else
00151     {
00152       pthread_attr_init (&thread->attr);
00153       pthread_attr_setdetachstate (&thread->attr, PTHREAD_CREATE_DETACHED);
00154     }
00155 
00156   thread->exists = 0;
00157   list_init (&thread->timer_queue);
00158   pthread_cond_init (&thread->cond, 0);
00159   thread->current_timer = 0;
00160   thread->captured = pthread_self ();
00161   thread->clock_id = clock_id;
00162 }
00163 
00164 
00165 /* Initialize the global lists, and acquire global resources.  Error
00166    reporting is done by storing a non-zero value to the global variable
00167    timer_init_failed.  */
00168 static void
00169 init_module (void)
00170 {
00171   int i;
00172 
00173   list_init (&timer_free_list);
00174   list_init (&thread_free_list);
00175   list_init (&thread_active_list);
00176 
00177   for (i = 0; i < TIMER_MAX; ++i)
00178     {
00179       list_append (&timer_free_list, &__timer_array[i].links);
00180       __timer_array[i].inuse = TIMER_FREE;
00181     }
00182 
00183   for (i = 0; i < THREAD_MAXNODES; ++i)
00184     list_append (&thread_free_list, &thread_array[i].links);
00185 
00186   thread_init (&__timer_signal_thread_rclk, 0, CLOCK_REALTIME);
00187 }
00188 
00189 
00190 /* This is a handler executed in a child process after a fork()
00191    occurs.  It reinitializes the module, resetting all of the data
00192    structures to their initial state.  The mutex is initialized in
00193    case it was locked in the parent process.  */
00194 static void
00195 reinit_after_fork (void)
00196 {
00197   init_module ();
00198   pthread_mutex_init (&__timer_mutex, 0);
00199 }
00200 
00201 
00202 /* Called once form pthread_once in timer_init. This initializes the
00203    module and ensures that reinit_after_fork will be executed in any
00204    child process.  */
00205 void
00206 __timer_init_once (void)
00207 {
00208   init_module ();
00209   pthread_atfork (0, 0, reinit_after_fork);
00210 }
00211 
00212 
00213 /* Deinitialize a thread that is about to be deallocated.  */
00214 static void
00215 thread_deinit (struct thread_node *thread)
00216 {
00217   assert (list_isempty (&thread->timer_queue));
00218   pthread_cond_destroy (&thread->cond);
00219 }
00220 
00221 
00222 /* Allocate a thread structure from the global free list.  Global
00223    mutex lock must be held by caller.  The thread is moved to
00224    the active list. */
00225 struct thread_node *
00226 __timer_thread_alloc (const pthread_attr_t *desired_attr, clockid_t clock_id)
00227 {
00228   struct list_links *node = list_first (&thread_free_list);
00229 
00230   if (node != list_null (&thread_free_list))
00231     {
00232       struct thread_node *thread = thread_links2ptr (node);
00233       list_unlink (node);
00234       thread_init (thread, desired_attr, clock_id);
00235       list_append (&thread_active_list, node);
00236       return thread;
00237     }
00238 
00239   return 0;
00240 }
00241 
00242 
00243 /* Return a thread structure to the global free list.  Global lock
00244    must be held by caller.  */
00245 void
00246 __timer_thread_dealloc (struct thread_node *thread)
00247 {
00248   thread_deinit (thread);
00249   list_unlink (&thread->links);
00250   list_append (&thread_free_list, &thread->links);
00251 }
00252 
00253 
00254 /* Each of our threads which terminates executes this cleanup
00255    handler. We never terminate threads ourselves; if a thread gets here
00256    it means that the evil application has killed it.  If the thread has
00257    timers, these require servicing and so we must hire a replacement
00258    thread right away.  We must also unblock another thread that may
00259    have been waiting for this thread to finish servicing a timer (see
00260    timer_delete()).  */
00261 
00262 static void
00263 thread_cleanup (void *val)
00264 {
00265   if (val != NULL)
00266     {
00267       struct thread_node *thread = val;
00268 
00269       /* How did the signal thread get killed?  */
00270       assert (thread != &__timer_signal_thread_rclk);
00271 
00272       pthread_mutex_lock (&__timer_mutex);
00273 
00274       thread->exists = 0;
00275 
00276       /* We are no longer processing a timer event.  */
00277       thread->current_timer = 0;
00278 
00279       if (list_isempty (&thread->timer_queue))
00280          __timer_thread_dealloc (thread);
00281       else
00282        (void) __timer_thread_start (thread);
00283 
00284       pthread_mutex_unlock (&__timer_mutex);
00285 
00286       /* Unblock potentially blocked timer_delete().  */
00287       pthread_cond_broadcast (&thread->cond);
00288     }
00289 }
00290 
00291 
00292 /* Handle a timer which is supposed to go off now.  */
00293 static void
00294 thread_expire_timer (struct thread_node *self, struct timer_node *timer)
00295 {
00296   self->current_timer = timer; /* Lets timer_delete know timer is running. */
00297 
00298   pthread_mutex_unlock (&__timer_mutex);
00299 
00300   switch (__builtin_expect (timer->event.sigev_notify, SIGEV_SIGNAL))
00301     {
00302     case SIGEV_NONE:
00303       break;
00304 
00305     case SIGEV_SIGNAL:
00306 #ifdef __NR_rt_sigqueueinfo
00307       {
00308        siginfo_t info;
00309 
00310        /* First, clear the siginfo_t structure, so that we don't pass our
00311           stack content to other tasks.  */
00312        memset (&info, 0, sizeof (siginfo_t));
00313        /* We must pass the information about the data in a siginfo_t
00314            value.  */
00315        info.si_signo = timer->event.sigev_signo;
00316        info.si_code = SI_TIMER;
00317        info.si_pid = timer->creator_pid;
00318        info.si_uid = getuid ();
00319        info.si_value = timer->event.sigev_value;
00320 
00321        INLINE_SYSCALL (rt_sigqueueinfo, 3, info.si_pid, info.si_signo, &info);
00322       }
00323 #else
00324       if (pthread_kill (self->captured, timer->event.sigev_signo) != 0)
00325        {
00326          if (pthread_kill (self->id, timer->event.sigev_signo) != 0)
00327            abort ();
00328         }
00329 #endif
00330       break;
00331 
00332     case SIGEV_THREAD:
00333       timer->event.sigev_notify_function (timer->event.sigev_value);
00334       break;
00335 
00336     default:
00337       assert (! "unknown event");
00338       break;
00339     }
00340 
00341   pthread_mutex_lock (&__timer_mutex);
00342 
00343   self->current_timer = 0;
00344 
00345   pthread_cond_broadcast (&self->cond);
00346 }
00347 
00348 
00349 /* Thread function; executed by each timer thread. The job of this
00350    function is to wait on the thread's timer queue and expire the
00351    timers in chronological order as close to their scheduled time as
00352    possible.  */
00353 static void
00354 __attribute__ ((noreturn))
00355 thread_func (void *arg)
00356 {
00357   struct thread_node *self = arg;
00358 
00359   /* Register cleanup handler, in case rogue application terminates
00360      this thread.  (This cannot happen to __timer_signal_thread, which
00361      doesn't invoke application callbacks). */
00362 
00363   pthread_cleanup_push (thread_cleanup, self);
00364 
00365   pthread_mutex_lock (&__timer_mutex);
00366 
00367   while (1)
00368     {
00369       struct list_links *first;
00370       struct timer_node *timer = NULL;
00371 
00372       /* While the timer queue is not empty, inspect the first node.  */
00373       first = list_first (&self->timer_queue);
00374       if (first != list_null (&self->timer_queue))
00375        {
00376          struct timespec now;
00377 
00378          timer = timer_links2ptr (first);
00379 
00380          /* This assumes that the elements of the list of one thread
00381             are all for the same clock.  */
00382          clock_gettime (timer->clock, &now);
00383 
00384          while (1)
00385            {
00386              /* If the timer is due or overdue, remove it from the queue.
00387                If it's a periodic timer, re-compute its new time and
00388                requeue it.  Either way, perform the timer expiry. */
00389              if (timespec_compare (&now, &timer->expirytime) < 0)
00390               break;
00391 
00392              list_unlink_ip (first);
00393 
00394              if (__builtin_expect (timer->value.it_interval.tv_sec, 0) != 0
00395                 || timer->value.it_interval.tv_nsec != 0)
00396               {
00397                 timer->overrun_count = 0;
00398                 timespec_add (&timer->expirytime, &timer->expirytime,
00399                             &timer->value.it_interval);
00400                 while (timespec_compare (&timer->expirytime, &now) < 0)
00401                   {
00402                     timespec_add (&timer->expirytime, &timer->expirytime,
00403                                 &timer->value.it_interval);
00404                     if (timer->overrun_count < DELAYTIMER_MAX)
00405                      ++timer->overrun_count;
00406                   }
00407                 __timer_thread_queue_timer (self, timer);
00408               }
00409 
00410              thread_expire_timer (self, timer);
00411 
00412              first = list_first (&self->timer_queue);
00413              if (first == list_null (&self->timer_queue))
00414               break;
00415 
00416              timer = timer_links2ptr (first);
00417            }
00418        }
00419 
00420       /* If the queue is not empty, wait until the expiry time of the
00421         first node.  Otherwise wait indefinitely.  Insertions at the
00422         head of the queue must wake up the thread by broadcasting
00423         this condition variable.  */
00424       if (timer != NULL)
00425        pthread_cond_timedwait (&self->cond, &__timer_mutex,
00426                             &timer->expirytime);
00427       else
00428        pthread_cond_wait (&self->cond, &__timer_mutex);
00429     }
00430   /* This macro will never be executed since the while loop loops
00431      forever - but we have to add it for proper nesting.  */
00432   pthread_cleanup_pop (1);
00433 }
00434 
00435 
00436 /* Enqueue a timer in wakeup order in the thread's timer queue.
00437    Returns 1 if the timer was inserted at the head of the queue,
00438    causing the queue's next wakeup time to change. */
00439 
00440 int
00441 __timer_thread_queue_timer (struct thread_node *thread,
00442                          struct timer_node *insert)
00443 {
00444   struct list_links *iter;
00445   int athead = 1;
00446 
00447   for (iter = list_first (&thread->timer_queue);
00448        iter != list_null (&thread->timer_queue);
00449         iter = list_next (iter))
00450     {
00451       struct timer_node *timer = timer_links2ptr (iter);
00452 
00453       if (timespec_compare (&insert->expirytime, &timer->expirytime) < 0)
00454          break;
00455       athead = 0;
00456     }
00457 
00458   list_insbefore (iter, &insert->links);
00459   return athead;
00460 }
00461 
00462 
00463 /* Start a thread and associate it with the given thread node.  Global
00464    lock must be held by caller.  */
00465 int
00466 __timer_thread_start (struct thread_node *thread)
00467 {
00468   int retval = 1;
00469 
00470   assert (!thread->exists);
00471   thread->exists = 1;
00472 
00473   if (pthread_create (&thread->id, &thread->attr,
00474                     (void *(*) (void *)) thread_func, thread) != 0)
00475     {
00476       thread->exists = 0;
00477       retval = -1;
00478     }
00479 
00480   return retval;
00481 }
00482 
00483 
00484 void
00485 __timer_thread_wakeup (struct thread_node *thread)
00486 {
00487   pthread_cond_broadcast (&thread->cond);
00488 }
00489 
00490 
00491 /* Compare two pthread_attr_t thread attributes for exact equality.
00492    Returns 1 if they are equal, otherwise zero if they are not equal or
00493    contain illegal values.  This version is LinuxThreads-specific for
00494    performance reason.  One could use the access functions to get the
00495    values of all the fields of the attribute structure.  */
00496 static int
00497 thread_attr_compare (const pthread_attr_t *left, const pthread_attr_t *right)
00498 {
00499   return (left->__detachstate == right->__detachstate
00500          && left->__schedpolicy == right->__schedpolicy
00501          && left->__guardsize == right->__guardsize
00502          && (left->__schedparam.sched_priority
00503              == right->__schedparam.sched_priority)
00504          && left->__inheritsched == right->__inheritsched
00505          && left->__scope == right->__scope
00506          && left->__stacksize == right->__stacksize
00507          && left->__stackaddr_set == right->__stackaddr_set
00508          && (left->__stackaddr_set
00509              || left->__stackaddr == right->__stackaddr));
00510 }
00511 
00512 
00513 /* Search the list of active threads and find one which has matching
00514    attributes.  Global mutex lock must be held by caller.  */
00515 struct thread_node *
00516 __timer_thread_find_matching (const pthread_attr_t *desired_attr,
00517                            clockid_t desired_clock_id)
00518 {
00519   struct list_links *iter = list_first (&thread_active_list);
00520 
00521   while (iter != list_null (&thread_active_list))
00522     {
00523       struct thread_node *candidate = thread_links2ptr (iter);
00524 
00525       if (thread_attr_compare (desired_attr, &candidate->attr)
00526          && desired_clock_id == candidate->clock_id)
00527        return candidate;
00528 
00529       iter = list_next (iter);
00530     }
00531 
00532   return NULL;
00533 }
00534 
00535 
00536 /* Grab a free timer structure from the global free list.  The global
00537    lock must be held by the caller.  */
00538 struct timer_node *
00539 __timer_alloc (void)
00540 {
00541   struct list_links *node = list_first (&timer_free_list);
00542 
00543   if (node != list_null (&timer_free_list))
00544     {
00545       struct timer_node *timer = timer_links2ptr (node);
00546       list_unlink_ip (node);
00547       timer->inuse = TIMER_INUSE;
00548       timer->refcount = 1;
00549       return timer;
00550     }
00551 
00552   return NULL;
00553 }
00554 
00555 
00556 /* Return a timer structure to the global free list.  The global lock
00557    must be held by the caller.  */
00558 void
00559 __timer_dealloc (struct timer_node *timer)
00560 {
00561   assert (timer->refcount == 0);
00562   timer->thread = NULL;     /* Break association between timer and thread.  */
00563   timer->inuse = TIMER_FREE;
00564   list_append (&timer_free_list, &timer->links);
00565 }
00566 
00567 
00568 /* Thread cancellation handler which unlocks a mutex.  */
00569 void
00570 __timer_mutex_cancel_handler (void *arg)
00571 {
00572   pthread_mutex_unlock (arg);
00573 }