Back to index

plt-scheme  4.2.1
specific.h
Go to the documentation of this file.
00001 /*
00002  * This is a reimplementation of a subset of the pthread_getspecific/setspecific
00003  * interface. This appears to outperform the standard linuxthreads one
00004  * by a significant margin.
00005  * The major restriction is that each thread may only make a single
00006  * pthread_setspecific call on a single key.  (The current data structure
00007  * doesn't really require that.  The restriction should be easily removable.)
00008  * We don't currently support the destruction functions, though that
00009  * could be done.
00010  * We also currently assume that only one pthread_setspecific call
00011  * can be executed at a time, though that assumption would be easy to remove
00012  * by adding a lock.
00013  */
00014 
00015 #include <errno.h>
00016 
00017 /* Called during key creation or setspecific.           */
00018 /* For the GC we already hold lock.                     */
00019 /* Currently allocated objects leak on thread exit.     */
00020 /* That's hard to fix, but OK if we allocate garbage    */
00021 /* collected memory.                             */
00022 #define MALLOC_CLEAR(n) GC_INTERNAL_MALLOC(n, NORMAL)
00023 #define PREFIXED(name) GC_##name
00024 
00025 #define TS_CACHE_SIZE 1024
00026 #define CACHE_HASH(n) (((((long)n) >> 8) ^ (long)n) & (TS_CACHE_SIZE - 1))
00027 #define TS_HASH_SIZE 1024
00028 #define HASH(n) (((((long)n) >> 8) ^ (long)n) & (TS_HASH_SIZE - 1))
00029 
00030 /* An entry describing a thread-specific value for a given thread.    */
00031 /* All such accessible structures preserve the invariant that if either      */
00032 /* thread is a valid pthread id or qtid is a valid "quick tread id"   */
00033 /* for a thread, then value holds the corresponding thread specific   */
00034 /* value.  This invariant must be preserved at ALL times, since              */
00035 /* asynchronous reads are allowed.                             */
00036 typedef struct thread_specific_entry {
00037        unsigned long qtid;  /* quick thread id, only for cache */
00038        void * value;
00039        struct thread_specific_entry *next;
00040        pthread_t thread;
00041 } tse;
00042 
00043 
00044 /* We represent each thread-specific datum as two tables.  The first is      */
00045 /* a cache, indexed by a "quick thread identifier".  The "quick" thread      */
00046 /* identifier is an easy to compute value, which is guaranteed to     */
00047 /* determine the thread, though a thread may correspond to more than  */
00048 /* one value.  We typically use the address of a page in the stack.   */
00049 /* The second is a hash table, indexed by pthread_self().  It is used */
00050 /* only as a backup.                                           */
00051 
00052 /* Return the "quick thread id".  Default version.  Assumes page size,       */
00053 /* or at least thread stack separation, is at least 4K.               */
00054 /* Must be defined so that it never returns 0.  (Page 0 can't really  */
00055 /* be part of any stack, since that would make 0 a valid stack pointer.)*/
00056 static __inline__ unsigned long quick_thread_id() {
00057     int dummy;
00058     return (unsigned long)(&dummy) >> 12;
00059 }
00060 
00061 #define INVALID_QTID ((unsigned long)0)
00062 #define INVALID_THREADID ((pthread_t)0)
00063 
00064 typedef struct thread_specific_data {
00065     tse * volatile cache[TS_CACHE_SIZE];
00066                      /* A faster index to the hash table */
00067     tse * hash[TS_HASH_SIZE];
00068     pthread_mutex_t lock;
00069 } tsd;
00070 
00071 typedef tsd * PREFIXED(key_t);
00072 
00073 extern int PREFIXED(key_create) (tsd ** key_ptr, void (* destructor)(void *));
00074 
00075 extern int PREFIXED(setspecific) (tsd * key, void * value);
00076 
00077 extern void PREFIXED(remove_specific) (tsd * key);
00078 
00079 /* An internal version of getspecific that assumes a cache miss.      */
00080 void * PREFIXED(slow_getspecific) (tsd * key, unsigned long qtid,
00081                                tse * volatile * cache_entry);
00082 
00083 static __inline__ void * PREFIXED(getspecific) (tsd * key) {
00084     long qtid = quick_thread_id();
00085     unsigned hash_val = CACHE_HASH(qtid);
00086     tse * volatile * entry_ptr = key -> cache + hash_val;
00087     tse * entry = *entry_ptr;   /* Must be loaded only once.   */
00088     if (EXPECT(entry -> qtid == qtid, 1)) {
00089       GC_ASSERT(entry -> thread == pthread_self());
00090       return entry -> value;
00091     }
00092     return PREFIXED(slow_getspecific) (key, qtid, entry_ptr);
00093 }
00094 
00095