Back to index

glibc  2.9
arena.c
Go to the documentation of this file.
00001 /* Malloc implementation for multiple threads without lock contention.
00002    Copyright (C) 2001,2002,2003,2004,2005,2006,2007
00003    Free Software Foundation, Inc.
00004    This file is part of the GNU C Library.
00005    Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
00006 
00007    The GNU C Library is free software; you can redistribute it and/or
00008    modify it under the terms of the GNU Lesser General Public License as
00009    published by the Free Software Foundation; either version 2.1 of the
00010    License, or (at your option) any later version.
00011 
00012    The GNU C Library is distributed in the hope that it will be useful,
00013    but WITHOUT ANY WARRANTY; without even the implied warranty of
00014    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00015    Lesser General Public License for more details.
00016 
00017    You should have received a copy of the GNU Lesser General Public
00018    License along with the GNU C Library; see the file COPYING.LIB.  If not,
00019    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
00020    Boston, MA 02111-1307, USA.  */
00021 
00022 #include <stdbool.h>
00023 
00024 /* Compile-time constants.  */
00025 
00026 #define HEAP_MIN_SIZE (32*1024)
00027 #ifndef HEAP_MAX_SIZE
00028 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
00029 #  define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
00030 # else
00031 #  define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
00032 # endif
00033 #endif
00034 
00035 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
00036    that are dynamically created for multi-threaded programs.  The
00037    maximum size must be a power of two, for fast determination of
00038    which heap belongs to a chunk.  It should be much larger than the
00039    mmap threshold, so that requests with a size just below that
00040    threshold can be fulfilled without creating too many heaps.  */
00041 
00042 
00043 #ifndef THREAD_STATS
00044 #define THREAD_STATS 0
00045 #endif
00046 
00047 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
00048    computed.  */
00049 
00050 /***************************************************************************/
00051 
00052 #define top(ar_ptr) ((ar_ptr)->top)
00053 
00054 /* A heap is a single contiguous memory region holding (coalesceable)
00055    malloc_chunks.  It is allocated with mmap() and always starts at an
00056    address aligned to HEAP_MAX_SIZE.  Not used unless compiling with
00057    USE_ARENAS. */
00058 
00059 typedef struct _heap_info {
00060   mstate ar_ptr; /* Arena for this heap. */
00061   struct _heap_info *prev; /* Previous heap. */
00062   size_t size;   /* Current size in bytes. */
00063   size_t mprotect_size;     /* Size in bytes that has been mprotected
00064                         PROT_READ|PROT_WRITE.  */
00065   /* Make sure the following data is properly aligned, particularly
00066      that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
00067      MALLOC_ALIGNMENT. */
00068   char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
00069 } heap_info;
00070 
00071 /* Get a compile-time error if the heap_info padding is not correct
00072    to make alignment work as expected in sYSMALLOc.  */
00073 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
00074                                         + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
00075                                        ? -1 : 1];
00076 
00077 /* Thread specific data */
00078 
00079 static tsd_key_t arena_key;
00080 static mutex_t list_lock;
00081 
00082 #if THREAD_STATS
00083 static int stat_n_heaps;
00084 #define THREAD_STAT(x) x
00085 #else
00086 #define THREAD_STAT(x) do ; while(0)
00087 #endif
00088 
00089 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
00090 static unsigned long arena_mem;
00091 
00092 /* Already initialized? */
00093 int __malloc_initialized = -1;
00094 
00095 /**************************************************************************/
00096 
00097 #if USE_ARENAS
00098 
00099 /* arena_get() acquires an arena and locks the corresponding mutex.
00100    First, try the one last locked successfully by this thread.  (This
00101    is the common case and handled with a macro for speed.)  Then, loop
00102    once over the circularly linked list of arenas.  If no arena is
00103    readily available, create a new one.  In this latter case, `size'
00104    is just a hint as to how much memory will be required immediately
00105    in the new arena. */
00106 
00107 #define arena_get(ptr, size) do { \
00108   Void_t *vptr = NULL; \
00109   ptr = (mstate)tsd_getspecific(arena_key, vptr); \
00110   if(ptr && !mutex_trylock(&ptr->mutex)) { \
00111     THREAD_STAT(++(ptr->stat_lock_direct)); \
00112   } else \
00113     ptr = arena_get2(ptr, (size)); \
00114 } while(0)
00115 
00116 /* find the heap and corresponding arena for a given ptr */
00117 
00118 #define heap_for_ptr(ptr) \
00119  ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
00120 #define arena_for_chunk(ptr) \
00121  (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
00122 
00123 #else /* !USE_ARENAS */
00124 
00125 /* There is only one arena, main_arena. */
00126 
00127 #if THREAD_STATS
00128 #define arena_get(ar_ptr, sz) do { \
00129   ar_ptr = &main_arena; \
00130   if(!mutex_trylock(&ar_ptr->mutex)) \
00131     ++(ar_ptr->stat_lock_direct); \
00132   else { \
00133     (void)mutex_lock(&ar_ptr->mutex); \
00134     ++(ar_ptr->stat_lock_wait); \
00135   } \
00136 } while(0)
00137 #else
00138 #define arena_get(ar_ptr, sz) do { \
00139   ar_ptr = &main_arena; \
00140   (void)mutex_lock(&ar_ptr->mutex); \
00141 } while(0)
00142 #endif
00143 #define arena_for_chunk(ptr) (&main_arena)
00144 
00145 #endif /* USE_ARENAS */
00146 
00147 /**************************************************************************/
00148 
00149 #ifndef NO_THREADS
00150 
00151 /* atfork support.  */
00152 
00153 static __malloc_ptr_t (*save_malloc_hook) (size_t __size,
00154                                       __const __malloc_ptr_t);
00155 # if !defined _LIBC || (defined SHARED && !USE___THREAD)
00156 static __malloc_ptr_t (*save_memalign_hook) (size_t __align, size_t __size,
00157                                         __const __malloc_ptr_t);
00158 # endif
00159 static void           (*save_free_hook) (__malloc_ptr_t __ptr,
00160                                     __const __malloc_ptr_t);
00161 static Void_t*        save_arena;
00162 
00163 #ifdef ATFORK_MEM
00164 ATFORK_MEM;
00165 #endif
00166 
00167 /* Magic value for the thread-specific arena pointer when
00168    malloc_atfork() is in use.  */
00169 
00170 #define ATFORK_ARENA_PTR ((Void_t*)-1)
00171 
00172 /* The following hooks are used while the `atfork' handling mechanism
00173    is active. */
00174 
00175 static Void_t*
00176 malloc_atfork(size_t sz, const Void_t *caller)
00177 {
00178   Void_t *vptr = NULL;
00179   Void_t *victim;
00180 
00181   tsd_getspecific(arena_key, vptr);
00182   if(vptr == ATFORK_ARENA_PTR) {
00183     /* We are the only thread that may allocate at all.  */
00184     if(save_malloc_hook != malloc_check) {
00185       return _int_malloc(&main_arena, sz);
00186     } else {
00187       if(top_check()<0)
00188         return 0;
00189       victim = _int_malloc(&main_arena, sz+1);
00190       return mem2mem_check(victim, sz);
00191     }
00192   } else {
00193     /* Suspend the thread until the `atfork' handlers have completed.
00194        By that time, the hooks will have been reset as well, so that
00195        mALLOc() can be used again. */
00196     (void)mutex_lock(&list_lock);
00197     (void)mutex_unlock(&list_lock);
00198     return public_mALLOc(sz);
00199   }
00200 }
00201 
00202 static void
00203 free_atfork(Void_t* mem, const Void_t *caller)
00204 {
00205   Void_t *vptr = NULL;
00206   mstate ar_ptr;
00207   mchunkptr p;                          /* chunk corresponding to mem */
00208 
00209   if (mem == 0)                              /* free(0) has no effect */
00210     return;
00211 
00212   p = mem2chunk(mem);         /* do not bother to replicate free_check here */
00213 
00214 #if HAVE_MMAP
00215   if (chunk_is_mmapped(p))                       /* release mmapped memory. */
00216   {
00217     munmap_chunk(p);
00218     return;
00219   }
00220 #endif
00221 
00222   ar_ptr = arena_for_chunk(p);
00223   tsd_getspecific(arena_key, vptr);
00224   if(vptr != ATFORK_ARENA_PTR)
00225     (void)mutex_lock(&ar_ptr->mutex);
00226   _int_free(ar_ptr, mem);
00227   if(vptr != ATFORK_ARENA_PTR)
00228     (void)mutex_unlock(&ar_ptr->mutex);
00229 }
00230 
00231 
00232 /* Counter for number of times the list is locked by the same thread.  */
00233 static unsigned int atfork_recursive_cntr;
00234 
00235 /* The following two functions are registered via thread_atfork() to
00236    make sure that the mutexes remain in a consistent state in the
00237    fork()ed version of a thread.  Also adapt the malloc and free hooks
00238    temporarily, because the `atfork' handler mechanism may use
00239    malloc/free internally (e.g. in LinuxThreads). */
00240 
00241 static void
00242 ptmalloc_lock_all (void)
00243 {
00244   mstate ar_ptr;
00245 
00246   if(__malloc_initialized < 1)
00247     return;
00248   if (mutex_trylock(&list_lock))
00249     {
00250       Void_t *my_arena;
00251       tsd_getspecific(arena_key, my_arena);
00252       if (my_arena == ATFORK_ARENA_PTR)
00253        /* This is the same thread which already locks the global list.
00254           Just bump the counter.  */
00255        goto out;
00256 
00257       /* This thread has to wait its turn.  */
00258       (void)mutex_lock(&list_lock);
00259     }
00260   for(ar_ptr = &main_arena;;) {
00261     (void)mutex_lock(&ar_ptr->mutex);
00262     ar_ptr = ar_ptr->next;
00263     if(ar_ptr == &main_arena) break;
00264   }
00265   save_malloc_hook = __malloc_hook;
00266   save_free_hook = __free_hook;
00267   __malloc_hook = malloc_atfork;
00268   __free_hook = free_atfork;
00269   /* Only the current thread may perform malloc/free calls now. */
00270   tsd_getspecific(arena_key, save_arena);
00271   tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
00272  out:
00273   ++atfork_recursive_cntr;
00274 }
00275 
00276 static void
00277 ptmalloc_unlock_all (void)
00278 {
00279   mstate ar_ptr;
00280 
00281   if(__malloc_initialized < 1)
00282     return;
00283   if (--atfork_recursive_cntr != 0)
00284     return;
00285   tsd_setspecific(arena_key, save_arena);
00286   __malloc_hook = save_malloc_hook;
00287   __free_hook = save_free_hook;
00288   for(ar_ptr = &main_arena;;) {
00289     (void)mutex_unlock(&ar_ptr->mutex);
00290     ar_ptr = ar_ptr->next;
00291     if(ar_ptr == &main_arena) break;
00292   }
00293   (void)mutex_unlock(&list_lock);
00294 }
00295 
00296 #ifdef __linux__
00297 
00298 /* In NPTL, unlocking a mutex in the child process after a
00299    fork() is currently unsafe, whereas re-initializing it is safe and
00300    does not leak resources.  Therefore, a special atfork handler is
00301    installed for the child. */
00302 
00303 static void
00304 ptmalloc_unlock_all2 (void)
00305 {
00306   mstate ar_ptr;
00307 
00308   if(__malloc_initialized < 1)
00309     return;
00310 #if defined _LIBC || defined MALLOC_HOOKS
00311   tsd_setspecific(arena_key, save_arena);
00312   __malloc_hook = save_malloc_hook;
00313   __free_hook = save_free_hook;
00314 #endif
00315   for(ar_ptr = &main_arena;;) {
00316     mutex_init(&ar_ptr->mutex);
00317     ar_ptr = ar_ptr->next;
00318     if(ar_ptr == &main_arena) break;
00319   }
00320   mutex_init(&list_lock);
00321   atfork_recursive_cntr = 0;
00322 }
00323 
00324 #else
00325 
00326 #define ptmalloc_unlock_all2 ptmalloc_unlock_all
00327 
00328 #endif
00329 
00330 #endif /* !defined NO_THREADS */
00331 
00332 /* Initialization routine. */
00333 #ifdef _LIBC
00334 #include <string.h>
00335 extern char **_environ;
00336 
00337 static char *
00338 internal_function
00339 next_env_entry (char ***position)
00340 {
00341   char **current = *position;
00342   char *result = NULL;
00343 
00344   while (*current != NULL)
00345     {
00346       if (__builtin_expect ((*current)[0] == 'M', 0)
00347          && (*current)[1] == 'A'
00348          && (*current)[2] == 'L'
00349          && (*current)[3] == 'L'
00350          && (*current)[4] == 'O'
00351          && (*current)[5] == 'C'
00352          && (*current)[6] == '_')
00353        {
00354          result = &(*current)[7];
00355 
00356          /* Save current position for next visit.  */
00357          *position = ++current;
00358 
00359          break;
00360        }
00361 
00362       ++current;
00363     }
00364 
00365   return result;
00366 }
00367 #endif /* _LIBC */
00368 
00369 /* Set up basic state so that _int_malloc et al can work.  */
00370 static void
00371 ptmalloc_init_minimal (void)
00372 {
00373 #if DEFAULT_TOP_PAD != 0
00374   mp_.top_pad        = DEFAULT_TOP_PAD;
00375 #endif
00376   mp_.n_mmaps_max    = DEFAULT_MMAP_MAX;
00377   mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
00378   mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD;
00379   mp_.pagesize       = malloc_getpagesize;
00380 }
00381 
00382 
00383 #ifdef _LIBC
00384 # ifdef SHARED
00385 static void *
00386 __failing_morecore (ptrdiff_t d)
00387 {
00388   return (void *) MORECORE_FAILURE;
00389 }
00390 
00391 extern struct dl_open_hook *_dl_open_hook;
00392 libc_hidden_proto (_dl_open_hook);
00393 # endif
00394 
00395 # if defined SHARED && !USE___THREAD
00396 /* This is called by __pthread_initialize_minimal when it needs to use
00397    malloc to set up the TLS state.  We cannot do the full work of
00398    ptmalloc_init (below) until __pthread_initialize_minimal has finished,
00399    so it has to switch to using the special startup-time hooks while doing
00400    those allocations.  */
00401 void
00402 __libc_malloc_pthread_startup (bool first_time)
00403 {
00404   if (first_time)
00405     {
00406       ptmalloc_init_minimal ();
00407       save_malloc_hook = __malloc_hook;
00408       save_memalign_hook = __memalign_hook;
00409       save_free_hook = __free_hook;
00410       __malloc_hook = malloc_starter;
00411       __memalign_hook = memalign_starter;
00412       __free_hook = free_starter;
00413     }
00414   else
00415     {
00416       __malloc_hook = save_malloc_hook;
00417       __memalign_hook = save_memalign_hook;
00418       __free_hook = save_free_hook;
00419     }
00420 }
00421 # endif
00422 #endif
00423 
00424 static void
00425 ptmalloc_init (void)
00426 {
00427 #if __STD_C
00428   const char* s;
00429 #else
00430   char* s;
00431 #endif
00432   int secure = 0;
00433 
00434   if(__malloc_initialized >= 0) return;
00435   __malloc_initialized = 0;
00436 
00437 #ifdef _LIBC
00438 # if defined SHARED && !USE___THREAD
00439   /* ptmalloc_init_minimal may already have been called via
00440      __libc_malloc_pthread_startup, above.  */
00441   if (mp_.pagesize == 0)
00442 # endif
00443 #endif
00444     ptmalloc_init_minimal();
00445 
00446 #ifndef NO_THREADS
00447 # if defined _LIBC
00448   /* We know __pthread_initialize_minimal has already been called,
00449      and that is enough.  */
00450 #   define NO_STARTER
00451 # endif
00452 # ifndef NO_STARTER
00453   /* With some threads implementations, creating thread-specific data
00454      or initializing a mutex may call malloc() itself.  Provide a
00455      simple starter version (realloc() won't work). */
00456   save_malloc_hook = __malloc_hook;
00457   save_memalign_hook = __memalign_hook;
00458   save_free_hook = __free_hook;
00459   __malloc_hook = malloc_starter;
00460   __memalign_hook = memalign_starter;
00461   __free_hook = free_starter;
00462 #  ifdef _LIBC
00463   /* Initialize the pthreads interface. */
00464   if (__pthread_initialize != NULL)
00465     __pthread_initialize();
00466 #  endif /* !defined _LIBC */
00467 # endif       /* !defined NO_STARTER */
00468 #endif /* !defined NO_THREADS */
00469   mutex_init(&main_arena.mutex);
00470   main_arena.next = &main_arena;
00471 
00472 #if defined _LIBC && defined SHARED
00473   /* In case this libc copy is in a non-default namespace, never use brk.
00474      Likewise if dlopened from statically linked program.  */
00475   Dl_info di;
00476   struct link_map *l;
00477 
00478   if (_dl_open_hook != NULL
00479       || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
00480          && l->l_ns != LM_ID_BASE))
00481     __morecore = __failing_morecore;
00482 #endif
00483 
00484   mutex_init(&list_lock);
00485   tsd_key_create(&arena_key, NULL);
00486   tsd_setspecific(arena_key, (Void_t *)&main_arena);
00487   thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
00488 #ifndef NO_THREADS
00489 # ifndef NO_STARTER
00490   __malloc_hook = save_malloc_hook;
00491   __memalign_hook = save_memalign_hook;
00492   __free_hook = save_free_hook;
00493 # else
00494 #  undef NO_STARTER
00495 # endif
00496 #endif
00497 #ifdef _LIBC
00498   secure = __libc_enable_secure;
00499   s = NULL;
00500   if (__builtin_expect (_environ != NULL, 1))
00501     {
00502       char **runp = _environ;
00503       char *envline;
00504 
00505       while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
00506                             0))
00507        {
00508          size_t len = strcspn (envline, "=");
00509 
00510          if (envline[len] != '=')
00511            /* This is a "MALLOC_" variable at the end of the string
00512               without a '=' character.  Ignore it since otherwise we
00513               will access invalid memory below.  */
00514            continue;
00515 
00516          switch (len)
00517            {
00518            case 6:
00519              if (memcmp (envline, "CHECK_", 6) == 0)
00520               s = &envline[7];
00521              break;
00522            case 8:
00523              if (! secure)
00524               {
00525                 if (memcmp (envline, "TOP_PAD_", 8) == 0)
00526                   mALLOPt(M_TOP_PAD, atoi(&envline[9]));
00527                 else if (memcmp (envline, "PERTURB_", 8) == 0)
00528                   mALLOPt(M_PERTURB, atoi(&envline[9]));
00529               }
00530              break;
00531            case 9:
00532              if (! secure && memcmp (envline, "MMAP_MAX_", 9) == 0)
00533               mALLOPt(M_MMAP_MAX, atoi(&envline[10]));
00534              break;
00535            case 15:
00536              if (! secure)
00537               {
00538                 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
00539                   mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16]));
00540                 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
00541                   mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16]));
00542               }
00543              break;
00544            default:
00545              break;
00546            }
00547        }
00548     }
00549 #else
00550   if (! secure)
00551     {
00552       if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
00553        mALLOPt(M_TRIM_THRESHOLD, atoi(s));
00554       if((s = getenv("MALLOC_TOP_PAD_")))
00555        mALLOPt(M_TOP_PAD, atoi(s));
00556       if((s = getenv("MALLOC_PERTURB_")))
00557        mALLOPt(M_PERTURB, atoi(s));
00558       if((s = getenv("MALLOC_MMAP_THRESHOLD_")))
00559        mALLOPt(M_MMAP_THRESHOLD, atoi(s));
00560       if((s = getenv("MALLOC_MMAP_MAX_")))
00561        mALLOPt(M_MMAP_MAX, atoi(s));
00562     }
00563   s = getenv("MALLOC_CHECK_");
00564 #endif
00565   if(s && s[0]) {
00566     mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
00567     if (check_action != 0)
00568       __malloc_check_init();
00569   }
00570   if(__malloc_initialize_hook != NULL)
00571     (*__malloc_initialize_hook)();
00572   __malloc_initialized = 1;
00573 }
00574 
00575 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
00576 #ifdef thread_atfork_static
00577 thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
00578                      ptmalloc_unlock_all2)
00579 #endif
00580 
00581 
00582 
00583 /* Managing heaps and arenas (for concurrent threads) */
00584 
00585 #if USE_ARENAS
00586 
00587 #if MALLOC_DEBUG > 1
00588 
00589 /* Print the complete contents of a single heap to stderr. */
00590 
00591 static void
00592 #if __STD_C
00593 dump_heap(heap_info *heap)
00594 #else
00595 dump_heap(heap) heap_info *heap;
00596 #endif
00597 {
00598   char *ptr;
00599   mchunkptr p;
00600 
00601   fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
00602   ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
00603     (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
00604   p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
00605                   ~MALLOC_ALIGN_MASK);
00606   for(;;) {
00607     fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
00608     if(p == top(heap->ar_ptr)) {
00609       fprintf(stderr, " (top)\n");
00610       break;
00611     } else if(p->size == (0|PREV_INUSE)) {
00612       fprintf(stderr, " (fence)\n");
00613       break;
00614     }
00615     fprintf(stderr, "\n");
00616     p = next_chunk(p);
00617   }
00618 }
00619 
00620 #endif /* MALLOC_DEBUG > 1 */
00621 
00622 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
00623    addresses as opposed to increasing, new_heap would badly fragment the
00624    address space.  In that case remember the second HEAP_MAX_SIZE part
00625    aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
00626    call (if it is already aligned) and try to reuse it next time.  We need
00627    no locking for it, as kernel ensures the atomicity for us - worst case
00628    we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
00629    multiple threads, but only one will succeed.  */
00630 static char *aligned_heap_area;
00631 
00632 /* Create a new heap.  size is automatically rounded up to a multiple
00633    of the page size. */
00634 
00635 static heap_info *
00636 internal_function
00637 #if __STD_C
00638 new_heap(size_t size, size_t top_pad)
00639 #else
00640 new_heap(size, top_pad) size_t size, top_pad;
00641 #endif
00642 {
00643   size_t page_mask = malloc_getpagesize - 1;
00644   char *p1, *p2;
00645   unsigned long ul;
00646   heap_info *h;
00647 
00648   if(size+top_pad < HEAP_MIN_SIZE)
00649     size = HEAP_MIN_SIZE;
00650   else if(size+top_pad <= HEAP_MAX_SIZE)
00651     size += top_pad;
00652   else if(size > HEAP_MAX_SIZE)
00653     return 0;
00654   else
00655     size = HEAP_MAX_SIZE;
00656   size = (size + page_mask) & ~page_mask;
00657 
00658   /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
00659      No swap space needs to be reserved for the following large
00660      mapping (on Linux, this is the case for all non-writable mappings
00661      anyway). */
00662   p2 = MAP_FAILED;
00663   if(aligned_heap_area) {
00664     p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
00665                     MAP_PRIVATE|MAP_NORESERVE);
00666     aligned_heap_area = NULL;
00667     if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
00668       munmap(p2, HEAP_MAX_SIZE);
00669       p2 = MAP_FAILED;
00670     }
00671   }
00672   if(p2 == MAP_FAILED) {
00673     p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE,
00674                     MAP_PRIVATE|MAP_NORESERVE);
00675     if(p1 != MAP_FAILED) {
00676       p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
00677                   & ~(HEAP_MAX_SIZE-1));
00678       ul = p2 - p1;
00679       if (ul)
00680        munmap(p1, ul);
00681       else
00682        aligned_heap_area = p2 + HEAP_MAX_SIZE;
00683       munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
00684     } else {
00685       /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
00686         is already aligned. */
00687       p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
00688       if(p2 == MAP_FAILED)
00689        return 0;
00690       if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
00691        munmap(p2, HEAP_MAX_SIZE);
00692        return 0;
00693       }
00694     }
00695   }
00696   if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
00697     munmap(p2, HEAP_MAX_SIZE);
00698     return 0;
00699   }
00700   h = (heap_info *)p2;
00701   h->size = size;
00702   h->mprotect_size = size;
00703   THREAD_STAT(stat_n_heaps++);
00704   return h;
00705 }
00706 
00707 /* Grow a heap.  size is automatically rounded up to a
00708    multiple of the page size. */
00709 
00710 static int
00711 #if __STD_C
00712 grow_heap(heap_info *h, long diff)
00713 #else
00714 grow_heap(h, diff) heap_info *h; long diff;
00715 #endif
00716 {
00717   size_t page_mask = malloc_getpagesize - 1;
00718   long new_size;
00719 
00720   diff = (diff + page_mask) & ~page_mask;
00721   new_size = (long)h->size + diff;
00722   if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
00723     return -1;
00724   if((unsigned long) new_size > h->mprotect_size) {
00725     if (mprotect((char *)h + h->mprotect_size,
00726                (unsigned long) new_size - h->mprotect_size,
00727                PROT_READ|PROT_WRITE) != 0)
00728       return -2;
00729     h->mprotect_size = new_size;
00730   }
00731 
00732   h->size = new_size;
00733   return 0;
00734 }
00735 
00736 /* Shrink a heap.  */
00737 
00738 static int
00739 #if __STD_C
00740 shrink_heap(heap_info *h, long diff)
00741 #else
00742 shrink_heap(h, diff) heap_info *h; long diff;
00743 #endif
00744 {
00745   long new_size;
00746 
00747   new_size = (long)h->size - diff;
00748   if(new_size < (long)sizeof(*h))
00749     return -1;
00750   /* Try to re-map the extra heap space freshly to save memory, and
00751      make it inaccessible. */
00752 #ifdef _LIBC
00753   if (__builtin_expect (__libc_enable_secure, 0))
00754 #else
00755   if (1)
00756 #endif
00757     {
00758       if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
00759                     MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
00760        return -2;
00761       h->mprotect_size = new_size;
00762     }
00763 #ifdef _LIBC
00764   else
00765     madvise ((char *)h + new_size, diff, MADV_DONTNEED);
00766 #endif
00767   /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
00768 
00769   h->size = new_size;
00770   return 0;
00771 }
00772 
00773 /* Delete a heap. */
00774 
00775 #define delete_heap(heap) \
00776   do {                                                  \
00777     if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area)   \
00778       aligned_heap_area = NULL;                                \
00779     munmap((char*)(heap), HEAP_MAX_SIZE);               \
00780   } while (0)
00781 
00782 static int
00783 internal_function
00784 #if __STD_C
00785 heap_trim(heap_info *heap, size_t pad)
00786 #else
00787 heap_trim(heap, pad) heap_info *heap; size_t pad;
00788 #endif
00789 {
00790   mstate ar_ptr = heap->ar_ptr;
00791   unsigned long pagesz = mp_.pagesize;
00792   mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
00793   heap_info *prev_heap;
00794   long new_size, top_size, extra;
00795 
00796   /* Can this heap go away completely? */
00797   while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
00798     prev_heap = heap->prev;
00799     p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
00800     assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
00801     p = prev_chunk(p);
00802     new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
00803     assert(new_size>0 && new_size<(long)(2*MINSIZE));
00804     if(!prev_inuse(p))
00805       new_size += p->prev_size;
00806     assert(new_size>0 && new_size<HEAP_MAX_SIZE);
00807     if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
00808       break;
00809     ar_ptr->system_mem -= heap->size;
00810     arena_mem -= heap->size;
00811     delete_heap(heap);
00812     heap = prev_heap;
00813     if(!prev_inuse(p)) { /* consolidate backward */
00814       p = prev_chunk(p);
00815       unlink(p, bck, fwd);
00816     }
00817     assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
00818     assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
00819     top(ar_ptr) = top_chunk = p;
00820     set_head(top_chunk, new_size | PREV_INUSE);
00821     /*check_chunk(ar_ptr, top_chunk);*/
00822   }
00823   top_size = chunksize(top_chunk);
00824   extra = ((top_size - pad - MINSIZE + (pagesz-1))/pagesz - 1) * pagesz;
00825   if(extra < (long)pagesz)
00826     return 0;
00827   /* Try to shrink. */
00828   if(shrink_heap(heap, extra) != 0)
00829     return 0;
00830   ar_ptr->system_mem -= extra;
00831   arena_mem -= extra;
00832 
00833   /* Success. Adjust top accordingly. */
00834   set_head(top_chunk, (top_size - extra) | PREV_INUSE);
00835   /*check_chunk(ar_ptr, top_chunk);*/
00836   return 1;
00837 }
00838 
00839 /* Create a new arena with initial size "size".  */
00840 
00841 static mstate
00842 _int_new_arena(size_t size)
00843 {
00844   mstate a;
00845   heap_info *h;
00846   char *ptr;
00847   unsigned long misalign;
00848 
00849   h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
00850               mp_.top_pad);
00851   if(!h) {
00852     /* Maybe size is too large to fit in a single heap.  So, just try
00853        to create a minimally-sized arena and let _int_malloc() attempt
00854        to deal with the large request via mmap_chunk().  */
00855     h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
00856     if(!h)
00857       return 0;
00858   }
00859   a = h->ar_ptr = (mstate)(h+1);
00860   malloc_init_state(a);
00861   /*a->next = NULL;*/
00862   a->system_mem = a->max_system_mem = h->size;
00863   arena_mem += h->size;
00864 #ifdef NO_THREADS
00865   if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
00866      mp_.max_total_mem)
00867     mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
00868 #endif
00869 
00870   /* Set up the top chunk, with proper alignment. */
00871   ptr = (char *)(a + 1);
00872   misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
00873   if (misalign > 0)
00874     ptr += MALLOC_ALIGNMENT - misalign;
00875   top(a) = (mchunkptr)ptr;
00876   set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
00877 
00878   return a;
00879 }
00880 
00881 static mstate
00882 internal_function
00883 #if __STD_C
00884 arena_get2(mstate a_tsd, size_t size)
00885 #else
00886 arena_get2(a_tsd, size) mstate a_tsd; size_t size;
00887 #endif
00888 {
00889   mstate a;
00890 
00891   if(!a_tsd)
00892     a = a_tsd = &main_arena;
00893   else {
00894     a = a_tsd->next;
00895     if(!a) {
00896       /* This can only happen while initializing the new arena. */
00897       (void)mutex_lock(&main_arena.mutex);
00898       THREAD_STAT(++(main_arena.stat_lock_wait));
00899       return &main_arena;
00900     }
00901   }
00902 
00903   /* Check the global, circularly linked list for available arenas. */
00904   bool retried = false;
00905  repeat:
00906   do {
00907     if(!mutex_trylock(&a->mutex)) {
00908       if (retried)
00909        (void)mutex_unlock(&list_lock);
00910       THREAD_STAT(++(a->stat_lock_loop));
00911       tsd_setspecific(arena_key, (Void_t *)a);
00912       return a;
00913     }
00914     a = a->next;
00915   } while(a != a_tsd);
00916 
00917   /* If not even the list_lock can be obtained, try again.  This can
00918      happen during `atfork', or for example on systems where thread
00919      creation makes it temporarily impossible to obtain _any_
00920      locks. */
00921   if(!retried && mutex_trylock(&list_lock)) {
00922     /* We will block to not run in a busy loop.  */
00923     (void)mutex_lock(&list_lock);
00924 
00925     /* Since we blocked there might be an arena available now.  */
00926     retried = true;
00927     a = a_tsd;
00928     goto repeat;
00929   }
00930 
00931   /* Nothing immediately available, so generate a new arena.  */
00932   a = _int_new_arena(size);
00933   if(a)
00934     {
00935       tsd_setspecific(arena_key, (Void_t *)a);
00936       mutex_init(&a->mutex);
00937       mutex_lock(&a->mutex); /* remember result */
00938 
00939       /* Add the new arena to the global list.  */
00940       a->next = main_arena.next;
00941       atomic_write_barrier ();
00942       main_arena.next = a;
00943 
00944       THREAD_STAT(++(a->stat_lock_loop));
00945     }
00946   (void)mutex_unlock(&list_lock);
00947 
00948   return a;
00949 }
00950 
00951 #endif /* USE_ARENAS */
00952 
00953 /*
00954  * Local variables:
00955  * c-basic-offset: 2
00956  * End:
00957  */