Back to index

lightning-sunbird  0.9+nobinonly
linux_threads.c
Go to the documentation of this file.
00001 /* 
00002  * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
00003  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
00004  * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
00005  *
00006  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
00007  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
00008  *
00009  * Permission is hereby granted to use or copy this program
00010  * for any purpose,  provided the above notices are retained on all copies.
00011  * Permission to modify the code and to distribute modified code is granted,
00012  * provided the above notices are retained, and a notice that the code was
00013  * modified is included with the above copyright notice.
00014  */
00015 /*
00016  * Support code for LinuxThreads, the clone()-based kernel
00017  * thread package for Linux which is included in libc6.
00018  *
00019  * This code relies on implementation details of LinuxThreads,
00020  * (i.e. properties not guaranteed by the Pthread standard):
00021  *
00022  *     - the function GC_linux_thread_top_of_stack(void)
00023  *       relies on the way LinuxThreads lays out thread stacks
00024  *       in the address space.
00025  *
00026  * Note that there is a lot of code duplication between linux_threads.c
00027  * and irix_threads.c; any changes made here may need to be reflected
00028  * there too.
00029  */
00030 
00031 /* #define DEBUG_THREADS 1 */
00032 
00033 /* ANSI C requires that a compilation unit contains something */
00034 # include "gc_priv.h"
00035 
00036 # if defined(LINUX_THREADS)
00037 
00038 # include <pthread.h>
00039 # include <time.h>
00040 # include <errno.h>
00041 # include <unistd.h>
00042 # include <sys/mman.h>
00043 # include <sys/time.h>
00044 # include <semaphore.h>
00045 
00046 #undef pthread_create
00047 #undef pthread_sigmask
00048 #undef pthread_join
00049 
00050 void GC_thr_init();
00051 
00052 #if 0
00053 void GC_print_sig_mask()
00054 {
00055     sigset_t blocked;
00056     int i;
00057 
00058     if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
00059        ABORT("pthread_sigmask");
00060     GC_printf0("Blocked: ");
00061     for (i = 1; i <= MAXSIG; i++) {
00062         if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
00063     }
00064     GC_printf0("\n");
00065 }
00066 #endif
00067 
00068 /* We use the allocation lock to protect thread-related data structures. */
00069 
00070 /* The set of all known threads.  We intercept thread creation and    */
00071 /* joins.  We never actually create detached threads.  We allocate all       */
00072 /* new thread stacks ourselves.  These allow us to maintain this      */
00073 /* data structure.                                             */
00074 /* Protected by GC_thr_lock.                                          */
00075 /* Some of this should be declared volatile, but that's incosnsistent */
00076 /* with some library routine declarations.                            */
00077 typedef struct GC_Thread_Rep {
00078     struct GC_Thread_Rep * next;  /* More recently allocated threads  */
00079                               /* with a given pthread id come  */
00080                               /* first.  (All but the first are       */
00081                               /* guaranteed to be dead, but we may  */
00082                               /* not yet have registered the join.) */
00083     pthread_t id;
00084     word flags;
00085 #      define FINISHED 1    /* Thread has exited.       */
00086 #      define DETACHED 2    /* Thread is intended to be detached.     */
00087 #      define MAIN_THREAD 4 /* True for the original thread only.     */
00088 
00089     ptr_t stack_end;
00090     ptr_t stack_ptr;               /* Valid only when stopped. */
00091     int       signal;
00092     void * status;          /* The value returned from the thread.  */
00093                             /* Used only to avoid premature    */
00094                             /* reclamation of any data it might       */
00095                             /* reference.                      */
00096 } * GC_thread;
00097 
00098 GC_thread GC_lookup_thread(pthread_t id);
00099 
00100 /*
00101  * The only way to suspend threads given the pthread interface is to send
00102  * signals.  We can't use SIGSTOP directly, because we need to get the
00103  * thread to save its stack pointer in the GC thread table before
00104  * suspending.  So we have to reserve a signal of our own for this.
00105  * This means we have to intercept client calls to change the signal mask.
00106  * The linuxthreads package already uses SIGUSR1 and SIGUSR2,
00107  * so we need to reuse something else.  I chose SIGPWR.
00108  * (Perhaps SIGUNUSED would be a better choice.)
00109  */
00110 #define SIG_SUSPEND SIGPWR
00111 
00112 #define SIG_RESTART SIGXCPU
00113 
00114 sem_t GC_suspend_ack_sem;
00115 
00116 /*
00117 GC_linux_thread_top_of_stack() relies on implementation details of
00118 LinuxThreads, namely that thread stacks are allocated on 2M boundaries
00119 and grow to no more than 2M.
00120 To make sure that we're using LinuxThreads and not some other thread
00121 package, we generate a dummy reference to `__pthread_initial_thread_bos',
00122 which is a symbol defined in LinuxThreads, but (hopefully) not in other
00123 thread packages.
00124 */
00125 #if 0
00126 extern char * __pthread_initial_thread_bos;
00127 char **dummy_var_to_force_linux_threads = &__pthread_initial_thread_bos;
00128 #endif
00129 
00130 #define LINUX_THREADS_STACK_SIZE  (2 * 1024 * 1024)
00131 
00132 static inline ptr_t GC_linux_thread_top_of_stack(void)
00133 {
00134   char *sp = GC_approx_sp();
00135   ptr_t tos = (ptr_t) (((unsigned long)sp | (LINUX_THREADS_STACK_SIZE - 1)) + 1);
00136 #if DEBUG_THREADS
00137   GC_printf1("SP = %lx\n", (unsigned long)sp);
00138   GC_printf1("TOS = %lx\n", (unsigned long)tos);
00139 #endif
00140   return tos;
00141 }
00142 
00143 void GC_suspend_handler(int sig)
00144 {
00145     int dummy;
00146     pthread_t my_thread = pthread_self();
00147     GC_thread me;
00148     sigset_t all_sigs;
00149     sigset_t old_sigs;
00150     int i;
00151     sigset_t mask;
00152 
00153     if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
00154 
00155 #if DEBUG_THREADS
00156     GC_printf1("Suspending 0x%x\n", my_thread);
00157 #endif
00158 
00159     me = GC_lookup_thread(my_thread);
00160     /* The lookup here is safe, since I'm doing this on behalf  */
00161     /* of a thread which holds the allocation lock in order    */
00162     /* to stop the world.  Thus concurrent modification of the */
00163     /* data structure is impossible.                           */
00164     me -> stack_ptr = (ptr_t)(&dummy);
00165     me -> stack_end = GC_linux_thread_top_of_stack();
00166 
00167     /* Tell the thread that wants to stop the world that this   */
00168     /* thread has been stopped.  Note that sem_post() is       */
00169     /* the only async-signal-safe primitive in LinuxThreads.    */
00170     sem_post(&GC_suspend_ack_sem);
00171 
00172     /* Wait until that thread tells us to restart by sending    */
00173     /* this thread a SIG_RESTART signal.                */
00174     /* SIG_RESTART should be masked at this point.  Thus there */
00175     /* is no race.                                      */
00176     if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
00177     if (sigdelset(&mask, SIG_RESTART) != 0) ABORT("sigdelset() failed");
00178     do {
00179            me->signal = 0;
00180            sigsuspend(&mask);             /* Wait for signal */
00181     } while (me->signal != SIG_RESTART);
00182 
00183 #if DEBUG_THREADS
00184     GC_printf1("Continuing 0x%x\n", my_thread);
00185 #endif
00186 }
00187 
00188 void GC_restart_handler(int sig)
00189 {
00190     GC_thread me;
00191 
00192     if (sig != SIG_RESTART) ABORT("Bad signal in suspend_handler");
00193 
00194     /* Let the GC_suspend_handler() know that we got a SIG_RESTART. */
00195     /* The lookup here is safe, since I'm doing this on behalf  */
00196     /* of a thread which holds the allocation lock in order    */
00197     /* to stop the world.  Thus concurrent modification of the */
00198     /* data structure is impossible.                           */
00199     me = GC_lookup_thread(pthread_self());
00200     me->signal = SIG_RESTART;
00201 
00202     /*
00203     ** Note: even if we didn't do anything useful here,
00204     ** it would still be necessary to have a signal handler,
00205     ** rather than ignoring the signals, otherwise
00206     ** the signals will not be delivered at all, and
00207     ** will thus not interrupt the sigsuspend() above.
00208     */
00209 
00210 #if DEBUG_THREADS
00211     GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
00212 #endif
00213 }
00214 
00215 GC_bool GC_thr_initialized = FALSE;
00216 
00217 # define THREAD_TABLE_SZ 128       /* Must be power of 2       */
00218 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
00219 
00220 /* Add a thread to GC_threads.  We assume it wasn't already there.    */
00221 /* Caller holds allocation lock.                               */
00222 GC_thread GC_new_thread(pthread_t id)
00223 {
00224     int hv = ((word)id) % THREAD_TABLE_SZ;
00225     GC_thread result;
00226     static struct GC_Thread_Rep first_thread;
00227     static GC_bool first_thread_used = FALSE;
00228     
00229     if (!first_thread_used) {
00230        result = &first_thread;
00231        first_thread_used = TRUE;
00232        /* Dont acquire allocation lock, since we may already hold it. */
00233     } else {
00234         result = (struct GC_Thread_Rep *)
00235                GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
00236     }
00237     if (result == 0) return(0);
00238     result -> id = id;
00239     result -> next = GC_threads[hv];
00240     GC_threads[hv] = result;
00241     /* result -> flags = 0; */
00242     return(result);
00243 }
00244 
00245 /* Delete a thread from GC_threads.  We assume it is there.    */
00246 /* (The code intentionally traps if it wasn't.)                */
00247 /* Caller holds allocation lock.                        */
00248 void GC_delete_thread(pthread_t id)
00249 {
00250     int hv = ((word)id) % THREAD_TABLE_SZ;
00251     register GC_thread p = GC_threads[hv];
00252     register GC_thread prev = 0;
00253     
00254     while (!pthread_equal(p -> id, id)) {
00255         prev = p;
00256         p = p -> next;
00257     }
00258     if (prev == 0) {
00259         GC_threads[hv] = p -> next;
00260     } else {
00261         prev -> next = p -> next;
00262     }
00263 }
00264 
00265 /* If a thread has been joined, but we have not yet            */
00266 /* been notified, then there may be more than one thread       */
00267 /* in the table with the same pthread id.               */
00268 /* This is OK, but we need a way to delete a specific one.     */
00269 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
00270 {
00271     int hv = ((word)id) % THREAD_TABLE_SZ;
00272     register GC_thread p = GC_threads[hv];
00273     register GC_thread prev = 0;
00274 
00275     while (p != gc_id) {
00276         prev = p;
00277         p = p -> next;
00278     }
00279     if (prev == 0) {
00280         GC_threads[hv] = p -> next;
00281     } else {
00282         prev -> next = p -> next;
00283     }
00284 }
00285 
00286 /* Return a GC_thread corresponding to a given thread_t.       */
00287 /* Returns 0 if it's not there.                                */
00288 /* Caller holds  allocation lock or otherwise inhibits         */
00289 /* updates.                                             */
00290 /* If there is more than one thread with the given id we       */
00291 /* return the most recent one.                                 */
00292 GC_thread GC_lookup_thread(pthread_t id)
00293 {
00294     int hv = ((word)id) % THREAD_TABLE_SZ;
00295     register GC_thread p = GC_threads[hv];
00296     
00297     while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
00298     return(p);
00299 }
00300 
00301 /* Caller holds allocation lock.   */
00302 void GC_stop_world()
00303 {
00304     pthread_t my_thread = pthread_self();
00305     register int i;
00306     register GC_thread p;
00307     register int n_live_threads = 0;
00308     register int result;
00309 
00310     for (i = 0; i < THREAD_TABLE_SZ; i++) {
00311       for (p = GC_threads[i]; p != 0; p = p -> next) {
00312         if (p -> id != my_thread) {
00313             if (p -> flags & FINISHED) continue;
00314             n_live_threads++;
00315            #if DEBUG_THREADS
00316              GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
00317            #endif
00318             result = pthread_kill(p -> id, SIG_SUSPEND);
00319            switch(result) {
00320                 case ESRCH:
00321                     /* Not really there anymore.  Possible? */
00322                     n_live_threads--;
00323                     break;
00324                 case 0:
00325                     break;
00326                 default:
00327                     ABORT("pthread_kill failed");
00328             }
00329         }
00330       }
00331     }
00332     for (i = 0; i < n_live_threads; i++) {
00333        sem_wait(&GC_suspend_ack_sem);
00334     }
00335     #if DEBUG_THREADS
00336     GC_printf1("World stopped 0x%x\n", pthread_self());
00337     #endif
00338 }
00339 
00340 /* Caller holds allocation lock.   */
00341 void GC_start_world()
00342 {
00343     pthread_t my_thread = pthread_self();
00344     register int i;
00345     register GC_thread p;
00346     register int n_live_threads = 0;
00347     register int result;
00348     
00349 #   if DEBUG_THREADS
00350       GC_printf0("World starting\n");
00351 #   endif
00352 
00353     for (i = 0; i < THREAD_TABLE_SZ; i++) {
00354       for (p = GC_threads[i]; p != 0; p = p -> next) {
00355         if (p -> id != my_thread) {
00356             if (p -> flags & FINISHED) continue;
00357             n_live_threads++;
00358            #if DEBUG_THREADS
00359              GC_printf1("Sending restart signal to 0x%x\n", p -> id);
00360            #endif
00361             result = pthread_kill(p -> id, SIG_RESTART);
00362            switch(result) {
00363                 case ESRCH:
00364                     /* Not really there anymore.  Possible? */
00365                     n_live_threads--;
00366                     break;
00367                 case 0:
00368                     break;
00369                 default:
00370                     ABORT("pthread_kill failed");
00371             }
00372         }
00373       }
00374     }
00375     #if DEBUG_THREADS
00376       GC_printf0("World started\n");
00377     #endif
00378 }
00379 
00380 /* We hold allocation lock.  We assume the world is stopped.   */
00381 void GC_push_all_stacks()
00382 {
00383     register int i;
00384     register GC_thread p;
00385     register ptr_t sp = GC_approx_sp();
00386     register ptr_t lo, hi;
00387     pthread_t me = pthread_self();
00388     
00389     if (!GC_thr_initialized) GC_thr_init();
00390     #if DEBUG_THREADS
00391         GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
00392     #endif
00393     for (i = 0; i < THREAD_TABLE_SZ; i++) {
00394       for (p = GC_threads[i]; p != 0; p = p -> next) {
00395         if (p -> flags & FINISHED) continue;
00396         if (pthread_equal(p -> id, me)) {
00397            lo = GC_approx_sp();
00398        } else {
00399            lo = p -> stack_ptr;
00400        }
00401         if ((p -> flags & MAIN_THREAD) == 0) {
00402            if (pthread_equal(p -> id, me)) {
00403               hi = GC_linux_thread_top_of_stack();
00404            } else {
00405               hi = p -> stack_end;
00406            }
00407         } else {
00408             /* The original stack. */
00409             hi = GC_stackbottom;
00410         }
00411         #if DEBUG_THREADS
00412             GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
00413                (unsigned long) p -> id,
00414               (unsigned long) lo, (unsigned long) hi);
00415         #endif
00416         GC_push_all_stack(lo, hi);
00417       }
00418     }
00419 }
00420 
00421 
00422 /* We hold the allocation lock.    */
00423 void GC_thr_init()
00424 {
00425     GC_thread t;
00426     struct sigaction act;
00427 
00428     if (GC_thr_initialized) return;
00429     GC_thr_initialized = TRUE;
00430 
00431     if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
00432        ABORT("sem_init failed");
00433 
00434     act.sa_flags = SA_RESTART;
00435     if (sigfillset(&act.sa_mask) != 0) {
00436        ABORT("sigfillset() failed");
00437     }
00438     /* SIG_RESTART is unmasked by the handler when necessary.  */
00439     act.sa_handler = GC_suspend_handler;
00440     if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
00441        ABORT("Cannot set SIG_SUSPEND handler");
00442     }
00443 
00444     act.sa_handler = GC_restart_handler;
00445     if (sigaction(SIG_RESTART, &act, NULL) != 0) {
00446        ABORT("Cannot set SIG_SUSPEND handler");
00447     }
00448 
00449     /* Add the initial thread, so we can stop it.       */
00450       t = GC_new_thread(pthread_self());
00451       t -> stack_ptr = 0;
00452       t -> flags = DETACHED | MAIN_THREAD;
00453 }
00454 
00455 int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
00456 {
00457     sigset_t fudged_set;
00458     
00459     if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
00460         fudged_set = *set;
00461         sigdelset(&fudged_set, SIG_SUSPEND);
00462         set = &fudged_set;
00463     }
00464     return(pthread_sigmask(how, set, oset));
00465 }
00466 
00467 struct start_info {
00468     void *(*start_routine)(void *);
00469     void *arg;
00470     word flags;
00471     sem_t registered;       /* 1 ==> in our thread table, but  */
00472                             /* parent hasn't yet noticed.             */
00473 };
00474 
00475 
00476 void GC_thread_exit_proc(void *arg)
00477 {
00478     GC_thread me;
00479     struct start_info * si = arg;
00480 
00481     LOCK();
00482     me = GC_lookup_thread(pthread_self());
00483     if (me -> flags & DETACHED) {
00484        GC_delete_thread(pthread_self());
00485     } else {
00486        me -> flags |= FINISHED;
00487     }
00488     UNLOCK();
00489 }
00490 
00491 int GC_pthread_join(pthread_t thread, void **retval)
00492 {
00493     int result;
00494     GC_thread thread_gc_id;
00495     
00496     LOCK();
00497     thread_gc_id = GC_lookup_thread(thread);
00498     /* This is guaranteed to be the intended one, since the thread id */
00499     /* cant have been recycled by pthreads.                           */
00500     UNLOCK();
00501     result = pthread_join(thread, retval);
00502     LOCK();
00503     /* Here the pthread thread id may have been recycled. */
00504     GC_delete_gc_thread(thread, thread_gc_id);
00505     UNLOCK();
00506     return result;
00507 }
00508 
00509 void * GC_start_routine(void * arg)
00510 {
00511     struct start_info * si = arg;
00512     void * result;
00513     GC_thread me;
00514     pthread_t my_pthread;
00515     void *(*start)(void *);
00516     void *start_arg;
00517 
00518     my_pthread = pthread_self();
00519     LOCK();
00520     me = GC_new_thread(my_pthread);
00521     me -> flags = si -> flags;
00522     me -> stack_ptr = 0;
00523     me -> stack_end = 0;
00524     UNLOCK();
00525     start = si -> start_routine;
00526     start_arg = si -> arg;
00527     sem_post(&(si -> registered));
00528     pthread_cleanup_push(GC_thread_exit_proc, si);
00529 #   ifdef DEBUG_THREADS
00530         GC_printf1("Starting thread 0x%lx\n", pthread_self());
00531         GC_printf1("pid = %ld\n", (long) getpid());
00532         GC_printf1("sp = 0x%lx\n", (long) &arg);
00533        GC_printf1("start_routine = 0x%lx\n", start);
00534 #   endif
00535     result = (*start)(start_arg);
00536 #if DEBUG_THREADS
00537         GC_printf1("Finishing thread 0x%x\n", pthread_self());
00538 #endif
00539     me -> status = result;
00540     me -> flags |= FINISHED;
00541     pthread_cleanup_pop(1);
00542     /* Cleanup acquires lock, ensuring that we can't exit             */
00543     /* while a collection that thinks we're alive is trying to stop     */
00544     /* us.                                                     */
00545     return(result);
00546 }
00547 
00548 int
00549 GC_pthread_create(pthread_t *new_thread,
00550                 const pthread_attr_t *attr,
00551                   void *(*start_routine)(void *), void *arg)
00552 {
00553     int result;
00554     GC_thread t;
00555     pthread_t my_new_thread;
00556     void * stack;
00557     size_t stacksize;
00558     pthread_attr_t new_attr;
00559     int detachstate;
00560     word my_flags = 0;
00561     struct start_info * si = GC_malloc(sizeof(struct start_info)); 
00562        /* This is otherwise saved only in an area mmapped by the thread */
00563        /* library, which isn't visible to the collector.               */
00564 
00565     if (0 == si) return(ENOMEM);
00566     sem_init(&(si -> registered), 0, 0);
00567     si -> start_routine = start_routine;
00568     si -> arg = arg;
00569     LOCK();
00570     if (!GC_thr_initialized) GC_thr_init();
00571     if (NULL == attr) {
00572         stack = 0;
00573        (void) pthread_attr_init(&new_attr);
00574     } else {
00575         new_attr = *attr;
00576     }
00577     pthread_attr_getdetachstate(&new_attr, &detachstate);
00578     if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
00579     si -> flags = my_flags;
00580     UNLOCK();
00581     result = pthread_create(new_thread, &new_attr, GC_start_routine, si);
00582     /* Wait until child has been added to the thread table.           */
00583     /* This also ensures that we hold onto si until the child is done */
00584     /* with it.  Thus it doesn't matter whether it is otherwise              */
00585     /* visible to the collector.                               */
00586         if (0 != sem_wait(&(si -> registered))) ABORT("sem_wait failed");
00587         sem_destroy(&(si -> registered));
00588     /* pthread_attr_destroy(&new_attr); */
00589     /* pthread_attr_destroy(&new_attr); */
00590     return(result);
00591 }
00592 
00593 GC_bool GC_collecting = 0;
00594                      /* A hint that we're in the collector and       */
00595                         /* holding the allocation lock for an           */
00596                         /* extended period.                             */
00597 
00598 /* Reasonably fast spin locks.  Basically the same implementation */
00599 /* as STL alloc.h.  This isn't really the right way to do this.   */
00600 /* but until the POSIX scheduling mess gets straightened out ...  */
00601 
00602 volatile unsigned int GC_allocate_lock = 0;
00603 
00604 
00605 void GC_lock()
00606 {
00607 #   define low_spin_max 30  /* spin cycles if we suspect uniprocessor */
00608 #   define high_spin_max 1000 /* spin cycles for multiprocessor */
00609     static unsigned spin_max = low_spin_max;
00610     unsigned my_spin_max;
00611     static unsigned last_spins = 0;
00612     unsigned my_last_spins;
00613     volatile unsigned junk;
00614 #   define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
00615     int i;
00616 
00617     if (!GC_test_and_set(&GC_allocate_lock)) {
00618         return;
00619     }
00620     junk = 0;
00621     my_spin_max = spin_max;
00622     my_last_spins = last_spins;
00623     for (i = 0; i < my_spin_max; i++) {
00624         if (GC_collecting) goto yield;
00625         if (i < my_last_spins/2 || GC_allocate_lock) {
00626             PAUSE; 
00627             continue;
00628         }
00629         if (!GC_test_and_set(&GC_allocate_lock)) {
00630            /*
00631              * got it!
00632              * Spinning worked.  Thus we're probably not being scheduled
00633              * against the other process with which we were contending.
00634              * Thus it makes sense to spin longer the next time.
00635             */
00636             last_spins = i;
00637             spin_max = high_spin_max;
00638             return;
00639         }
00640     }
00641     /* We are probably being scheduled against the other process.  Sleep. */
00642     spin_max = low_spin_max;
00643 yield:
00644     for (i = 0;; ++i) {
00645         if (!GC_test_and_set(&GC_allocate_lock)) {
00646             return;
00647         }
00648 #       define SLEEP_THRESHOLD 12
00649               /* nanosleep(<= 2ms) just spins under Linux.  We */
00650               /* want to be careful to avoid that behavior.           */
00651         if (i < SLEEP_THRESHOLD) {
00652             sched_yield();
00653        } else {
00654            struct timespec ts;
00655        
00656            if (i > 26) i = 26;
00657                      /* Don't wait for more than about 60msecs, even  */
00658                      /* under extreme contention.                     */
00659            ts.tv_sec = 0;
00660            ts.tv_nsec = 1 << i;
00661            nanosleep(&ts, 0);
00662        }
00663     }
00664 }
00665 
00666 # endif /* LINUX_THREADS */
00667