Back to index

lightning-sunbird  0.9+nobinonly
Classes | Defines | Typedefs | Functions | Variables
malloc.c File Reference
#include "xpcom-private.h"
#include <sys/types.h>
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
#include <sys/param.h>
#include <pthread.h>
#include <fcntl.h>
#include <sys/mman.h>

Go to the source code of this file.

Classes

struct  mallinfo
struct  malloc_chunk
struct  malloc_state

Defines

#define USE_MALLOC_LOCK
#define DEFAULT_TRIM_THRESHOLD   (256 * 1024)
#define __STD_C   0
#define Void_t   char
#define assert(x)   ((void)0)
#define INTERNAL_SIZE_T   size_t
#define SIZE_SZ   (sizeof(INTERNAL_SIZE_T))
#define MALLOC_ALIGNMENT   (2 * SIZE_SZ)
#define MALLOC_ALIGN_MASK   (MALLOC_ALIGNMENT - 1)
#define USE_PUBLIC_MALLOC_WRAPPERS
#define HAVE_MEMCPY
#define USE_MEMCPY   1
#define MALLOC_FAILURE_ACTION
#define HAVE_MMAP   1
#define MMAP_AS_MORECORE_SIZE   (1024 * 1024)
#define HAVE_MREMAP   0
#define M_MXFAST   1 /* Set maximum fastbin size */
#define M_NLBLKS   2 /* UNUSED in this malloc */
#define M_GRAIN   3 /* UNUSED in this malloc */
#define M_KEEP   4 /* UNUSED in this malloc */
#define M_TRIM_THRESHOLD   -1
#define M_TOP_PAD   -2
#define M_MMAP_THRESHOLD   -3
#define M_MMAP_MAX   -4
#define DEFAULT_MXFAST   64
#define DEFAULT_TOP_PAD   (0)
#define DEFAULT_MMAP_THRESHOLD   (128 * 1024)
#define DEFAULT_MMAP_MAX   (256)
#define TRIM_FASTBINS   0
#define MORECORE   sbrk
#define MORECORE_FAILURE   (-1)
#define MORECORE_CONTIGUOUS   1
#define malloc_getpagesize   (4096)
#define public_cALLOc   calloc
#define public_fREe   free
#define public_cFREe   cfree
#define public_mALLOc   malloc
#define public_mEMALIGn   memalign
#define public_rEALLOc   realloc
#define public_vALLOc   valloc
#define public_pVALLOc   pvalloc
#define public_mALLINFo   mallinfo
#define public_mALLOPt   mallopt
#define public_mTRIm   malloc_trim
#define public_mSTATs   malloc_stats
#define public_mUSABLe   malloc_usable_size
#define MALLOC_PREACTION   pthread_mutex_lock(&mALLOC_MUTEx)
#define MALLOC_POSTACTION   pthread_mutex_unlock(&mALLOC_MUTEx)
#define MALLOC_COPY(dest, src, nbytes, overlap)   ((overlap) ? memmove(dest, src, nbytes) : memcpy(dest, src, nbytes))
#define MALLOC_ZERO(dest, nbytes)   memset(dest, 0, nbytes)
#define MMAP(addr, size, prot, flags)
#define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
#define mem2chunk(mem)   ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
#define MIN_CHUNK_SIZE   (sizeof(struct malloc_chunk))
#define MINSIZE   ((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
#define aligned_OK(m)   (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
#define IS_NEGATIVE(x)
#define request2size(req)
#define checked_request2size(req, sz)
#define PREV_INUSE   0x1
#define IS_MMAPPED   0x2
#define SIZE_BITS   (PREV_INUSE|IS_MMAPPED)
#define next_chunk(p)   ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
#define prev_chunk(p)   ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
#define chunk_at_offset(p, s)   ((mchunkptr)(((char*)(p)) + (s)))
#define inuse(p)   ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
#define prev_inuse(p)   ((p)->size & PREV_INUSE)
#define chunk_is_mmapped(p)   ((p)->size & IS_MMAPPED)
#define set_inuse(p)   ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
#define clear_inuse(p)   ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
#define inuse_bit_at_offset(p, s)   (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
#define set_inuse_bit_at_offset(p, s)   (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
#define clear_inuse_bit_at_offset(p, s)   (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
#define chunksize(p)   ((p)->size & ~(SIZE_BITS))
#define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
#define set_head(p, s)   ((p)->size = (s))
#define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
#define NBINS   128
#define bin_at(m, i)   ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
#define next_bin(b)   ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
#define first(b)   ((b)->fd)
#define last(b)   ((b)->bk)
#define unlink(P, BK, FD)
#define NSMALLBINS   64
#define SMALLBIN_WIDTH   8
#define MIN_LARGE_SIZE   512
#define in_smallbin_range(sz)   ((sz) < MIN_LARGE_SIZE)
#define smallbin_index(sz)   (((unsigned)(sz)) >> 3)
#define largebin_index(sz)
#define bin_index(sz)   ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
#define unsorted_chunks(M)   (bin_at(M, 1))
#define initial_top(M)   (unsorted_chunks(M))
#define BINMAPSHIFT   5
#define BITSPERMAP   (1U << BINMAPSHIFT)
#define BINMAPSIZE   (NBINS / BITSPERMAP)
#define idx2block(i)   ((i) >> BINMAPSHIFT)
#define idx2bit(i)   ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
#define mark_bin(m, i)   ((m)->binmap[idx2block(i)] |= idx2bit(i))
#define unmark_bin(m, i)   ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
#define get_binmap(m, i)   ((m)->binmap[idx2block(i)] & idx2bit(i))
#define fastbin_index(sz)   ((((unsigned int)(sz)) >> 3) - 2)
#define MAX_FAST_SIZE   80
#define NFASTBINS   (fastbin_index(request2size(MAX_FAST_SIZE))+1)
#define have_fastchunks(M)   (((M)->max_fast & 1U) == 0)
#define clear_fastchunks(M)   ((M)->max_fast |= 1U)
#define set_fastchunks(M)   ((M)->max_fast &= ~1U)
#define req2max_fast(s)   (((((s) == 0)? SMALLBIN_WIDTH: request2size(s))) | 1U)
#define NONCONTIGUOUS_REGIONS   ((char*)(-3))
#define get_malloc_state()   (&(av_))
#define check_chunk(P)
#define check_free_chunk(P)
#define check_inuse_chunk(P)
#define check_remalloced_chunk(P, N)
#define check_malloced_chunk(P, N)
#define check_malloc_state()

Typedefs

typedef struct malloc_chunkmchunkptr
typedef struct malloc_chunkmbinptr
typedef struct malloc_chunkmfastbinptr
typedef struct malloc_statemstate

Functions

Void_tmemset ()
Void_tmemcpy ()
Void_tmemmove ()
Void_tpublic_mALLOc ()
void public_fREe ()
Void_tpublic_rEALLOc ()
Void_tpublic_mEMALIGn ()
Void_tpublic_vALLOc ()
Void_tpublic_pVALLOc ()
Void_tpublic_cALLOc ()
void public_cFREe ()
int public_mTRIm ()
size_t public_mUSABLe ()
void public_mSTATs ()
int public_mALLOPt ()
struct mallinfo public_mALLINFo ()
static Void_tmALLOc ()
static void fREe ()
static Void_trEALLOc ()
static Void_tmEMALIGn ()
static Void_tvALLOc ()
static Void_tpVALLOc ()
static Void_tcALLOc ()
static void cFREe ()
static int mTRIm ()
static size_t mUSABLe ()
static void mSTATs ()
static int mALLOPt ()
static struct mallinfo mALLINFo ()
Void_tpublic_mALLOc (size_t bytes)
void public_fREe (Void_t *m)
Void_tpublic_rEALLOc (Void_t *m, size_t bytes)
Void_tpublic_mEMALIGn (size_t alignment, size_t bytes)
Void_tpublic_vALLOc (size_t bytes)
Void_tpublic_pVALLOc (size_t bytes)
Void_tpublic_cALLOc (size_t n, size_t elem_size)
void public_cFREe (Void_t *m)
int public_mTRIm (size_t s)
size_t public_mUSABLe (Void_t *m)
int public_mALLOPt (int p, int v)
static void malloc_init_state (mstate av)
static Void_tsYSMALLOc ()
static int sYSTRIm ()
static void malloc_consolidate ()
static Void_tsYSMALLOc (INTERNAL_SIZE_T nb, mstate av)
static int sYSTRIm (size_t pad, mstate av)
Void_tmALLOc (size_t bytes)
void fREe (Void_t *mem)
static void malloc_consolidate (mstate av)
Void_trEALLOc (Void_t *oldmem, size_t bytes)
Void_tmEMALIGn (size_t alignment, size_t bytes)
Void_tcALLOc (size_t n_elements, size_t elem_size)
void cFREe (Void_t *mem)
Void_tvALLOc (size_t bytes)
Void_tpVALLOc (size_t bytes)
int mTRIm (size_t pad)
size_t mUSABLe (Void_t *mem)
int mALLOPt (int param_number, int value)

Variables

static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER
static int dev_zero_fd = -1
static struct malloc_state

Class Documentation

struct mallinfo

Definition at line 652 of file malloc.c.

Collaboration diagram for mallinfo:
Class Members
int arena
int fordblks
int fsmblks
int hblkhd
int hblks
int keepcost
int ordblks
int smblks
int uordblks
int usmblks
struct malloc_chunk

Definition at line 1587 of file malloc.c.

Collaboration diagram for malloc_chunk:
Class Members
struct malloc_chunk * bk
struct malloc_chunk * fd
INTERNAL_SIZE_T prev_size
INTERNAL_SIZE_T size
struct malloc_state

Definition at line 2119 of file malloc.c.

Collaboration diagram for malloc_state:
Class Members
unsigned int binmap
mchunkptr bins
mfastbinptr fastbins
mchunkptr last_remainder
INTERNAL_SIZE_T max_fast
INTERNAL_SIZE_T max_mmapped_mem
int max_n_mmaps
INTERNAL_SIZE_T max_sbrked_mem
INTERNAL_SIZE_T max_total_mem
INTERNAL_SIZE_T mmap_threshold
INTERNAL_SIZE_T mmapped_mem
int n_mmaps
int n_mmaps_max
unsigned int pagesize
char * sbrk_base
INTERNAL_SIZE_T sbrked_mem
mchunkptr top
INTERNAL_SIZE_T top_pad
unsigned long trim_threshold

Define Documentation

#define __STD_C   0

Definition at line 326 of file malloc.c.

#define aligned_OK (   m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)

Definition at line 1706 of file malloc.c.

#define assert (   x)    ((void)0)

Definition at line 398 of file malloc.c.

#define bin_at (   m,
  i 
)    ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))

Definition at line 1914 of file malloc.c.

#define bin_index (   sz)    ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))

Definition at line 1977 of file malloc.c.

Definition at line 2035 of file malloc.c.

Definition at line 2037 of file malloc.c.

#define BITSPERMAP   (1U << BINMAPSHIFT)

Definition at line 2036 of file malloc.c.

Definition at line 2264 of file malloc.c.

Definition at line 2265 of file malloc.c.

Definition at line 2266 of file malloc.c.

Definition at line 2269 of file malloc.c.

#define check_malloced_chunk (   P,
  N 
)

Definition at line 2268 of file malloc.c.

Definition at line 2267 of file malloc.c.

#define checked_request2size (   req,
  sz 
)
Value:
if (IS_NEGATIVE(req)) {                                               \
     MALLOC_FAILURE_ACTION;                                              \
     return 0;                                                           \
   }                                                                     \
   (sz) = request2size(req);

Definition at line 1734 of file malloc.c.

#define chunk2mem (   p)    ((Void_t*)((char*)(p) + 2*SIZE_SZ))

Definition at line 1694 of file malloc.c.

#define chunk_at_offset (   p,
  s 
)    ((mchunkptr)(((char*)(p)) + (s)))

Definition at line 1776 of file malloc.c.

#define chunk_is_mmapped (   p)    ((p)->size & IS_MMAPPED)

Definition at line 1805 of file malloc.c.

#define chunksize (   p)    ((p)->size & ~(SIZE_BITS))

Definition at line 1837 of file malloc.c.

#define clear_fastchunks (   M)    ((M)->max_fast |= 1U)

Definition at line 2084 of file malloc.c.

#define clear_inuse (   p)    ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)

Definition at line 1813 of file malloc.c.

#define clear_inuse_bit_at_offset (   p,
  s 
)    (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))

Definition at line 1825 of file malloc.c.

#define DEFAULT_MMAP_MAX   (256)

Definition at line 885 of file malloc.c.

#define DEFAULT_MMAP_THRESHOLD   (128 * 1024)

Definition at line 861 of file malloc.c.

Definition at line 722 of file malloc.c.

Definition at line 819 of file malloc.c.

#define DEFAULT_TRIM_THRESHOLD   (256 * 1024)

Definition at line 2 of file malloc.c.

#define fastbin_index (   sz)    ((((unsigned int)(sz)) >> 3) - 2)

Definition at line 2064 of file malloc.c.

#define first (   b)    ((b)->fd)

Definition at line 1924 of file malloc.c.

#define get_binmap (   m,
  i 
)    ((m)->binmap[idx2block(i)] & idx2bit(i))

Definition at line 2044 of file malloc.c.

#define get_malloc_state ( )    (&(av_))

Definition at line 2187 of file malloc.c.

#define have_fastchunks (   M)    (((M)->max_fast & 1U) == 0)

Definition at line 2083 of file malloc.c.

Definition at line 517 of file malloc.c.

#define HAVE_MMAP   1

Definition at line 581 of file malloc.c.

Definition at line 614 of file malloc.c.

#define idx2bit (   i)    ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))

Definition at line 2040 of file malloc.c.

#define idx2block (   i)    ((i) >> BINMAPSHIFT)

Definition at line 2039 of file malloc.c.

#define in_smallbin_range (   sz)    ((sz) < MIN_LARGE_SIZE)

Definition at line 1965 of file malloc.c.

Definition at line 2020 of file malloc.c.

#define INTERNAL_SIZE_T   size_t

Definition at line 429 of file malloc.c.

#define inuse (   p)    ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)

Definition at line 1794 of file malloc.c.

#define inuse_bit_at_offset (   p,
  s 
)    (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)

Definition at line 1819 of file malloc.c.

#define IS_MMAPPED   0x2

Definition at line 1756 of file malloc.c.

Value:
((unsigned long)x >= \
   (unsigned long)((((INTERNAL_SIZE_T)(1)) << ((SIZE_SZ)*8 - 1))))

Definition at line 1714 of file malloc.c.

Value:
(((((unsigned long)(sz)) >>  6) <= 32)?  56 + (((unsigned long)(sz)) >>  6): \
 ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
 ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
 ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
                                        126)

Definition at line 1969 of file malloc.c.

#define last (   b)    ((b)->bk)

Definition at line 1925 of file malloc.c.

#define M_GRAIN   3 /* UNUSED in this malloc */

Definition at line 669 of file malloc.c.

#define M_KEEP   4 /* UNUSED in this malloc */

Definition at line 670 of file malloc.c.

#define M_MMAP_MAX   -4

Definition at line 691 of file malloc.c.

Definition at line 687 of file malloc.c.

#define M_MXFAST   1 /* Set maximum fastbin size */

Definition at line 667 of file malloc.c.

#define M_NLBLKS   2 /* UNUSED in this malloc */

Definition at line 668 of file malloc.c.

#define M_TOP_PAD   -2

Definition at line 683 of file malloc.c.

Definition at line 679 of file malloc.c.

Definition at line 452 of file malloc.c.

Definition at line 448 of file malloc.c.

#define MALLOC_COPY (   dest,
  src,
  nbytes,
  overlap 
)    ((overlap) ? memmove(dest, src, nbytes) : memcpy(dest, src, nbytes))

Definition at line 1351 of file malloc.c.

Definition at line 564 of file malloc.c.

#define malloc_getpagesize   (4096)

Definition at line 1022 of file malloc.c.

Definition at line 1191 of file malloc.c.

Definition at line 1190 of file malloc.c.

#define MALLOC_ZERO (   dest,
  nbytes 
)    memset(dest, 0, nbytes)

Definition at line 1353 of file malloc.c.

#define mark_bin (   m,
  i 
)    ((m)->binmap[idx2block(i)] |= idx2bit(i))

Definition at line 2042 of file malloc.c.

Definition at line 2067 of file malloc.c.

#define mem2chunk (   mem)    ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))

Definition at line 1695 of file malloc.c.

#define MIN_CHUNK_SIZE   (sizeof(struct malloc_chunk))

Definition at line 1698 of file malloc.c.

Definition at line 1963 of file malloc.c.

Definition at line 1702 of file malloc.c.

#define MMAP (   addr,
  size,
  prot,
  flags 
)
Value:
((dev_zero_fd < 0) ? \
 (dev_zero_fd = open("/dev/zero", O_RDWR), \
  mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
   mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))

Definition at line 1423 of file malloc.c.

#define MMAP_AS_MORECORE_SIZE   (1024 * 1024)

Definition at line 599 of file malloc.c.

Definition at line 938 of file malloc.c.

Definition at line 965 of file malloc.c.

Definition at line 950 of file malloc.c.

#define NBINS   128

Definition at line 1909 of file malloc.c.

#define next_bin (   b)    ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))

Definition at line 1919 of file malloc.c.

#define next_chunk (   p)    ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))

Definition at line 1766 of file malloc.c.

Definition at line 2069 of file malloc.c.

#define NONCONTIGUOUS_REGIONS   ((char*)(-3))

Definition at line 2111 of file malloc.c.

#define NSMALLBINS   64

Definition at line 1961 of file malloc.c.

#define prev_chunk (   p)    ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))

Definition at line 1771 of file malloc.c.

#define PREV_INUSE   0x1

Definition at line 1751 of file malloc.c.

#define prev_inuse (   p)    ((p)->size & PREV_INUSE)

Definition at line 1800 of file malloc.c.

Definition at line 1066 of file malloc.c.

Definition at line 1068 of file malloc.c.

Definition at line 1067 of file malloc.c.

struct mallinfo public_mALLINFo   mallinfo [read]

Definition at line 1074 of file malloc.c.

Definition at line 1069 of file malloc.c.

#define public_mALLOPt   mallopt

Definition at line 1075 of file malloc.c.

Definition at line 1070 of file malloc.c.

void public_mSTATs   malloc_stats

Definition at line 1077 of file malloc.c.

#define public_mTRIm   malloc_trim

Definition at line 1076 of file malloc.c.

#define public_mUSABLe   malloc_usable_size

Definition at line 1078 of file malloc.c.

Definition at line 1073 of file malloc.c.

Definition at line 1071 of file malloc.c.

Definition at line 1072 of file malloc.c.

#define req2max_fast (   s)    (((((s) == 0)? SMALLBIN_WIDTH: request2size(s))) | 1U)

Definition at line 2092 of file malloc.c.

#define request2size (   req)
Value:
(((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?            \
   MINSIZE :                                                     \
   ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)

Definition at line 1721 of file malloc.c.

#define set_fastchunks (   M)    ((M)->max_fast &= ~1U)

Definition at line 2085 of file malloc.c.

#define set_foot (   p,
  s 
)    (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))

Definition at line 1849 of file malloc.c.

#define set_head (   p,
  s 
)    ((p)->size = (s))

Definition at line 1845 of file malloc.c.

#define set_head_size (   p,
  s 
)    ((p)->size = (((p)->size & PREV_INUSE) | (s)))

Definition at line 1841 of file malloc.c.

#define set_inuse (   p)    ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE

Definition at line 1810 of file malloc.c.

#define set_inuse_bit_at_offset (   p,
  s 
)    (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)

Definition at line 1822 of file malloc.c.

Definition at line 1761 of file malloc.c.

#define SIZE_SZ   (sizeof(INTERNAL_SIZE_T))

Definition at line 433 of file malloc.c.

#define smallbin_index (   sz)    (((unsigned)(sz)) >> 3)

Definition at line 1967 of file malloc.c.

Definition at line 1962 of file malloc.c.

Definition at line 911 of file malloc.c.

#define unlink (   P,
  BK,
  FD 
)
Value:
{                                            \
  FD = P->fd;                                                          \
  BK = P->bk;                                                          \
  FD->bk = BK;                                                         \
  BK->fd = FD;                                                         \
}

Definition at line 1931 of file malloc.c.

#define unmark_bin (   m,
  i 
)    ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))

Definition at line 2043 of file malloc.c.

#define unsorted_chunks (   M)    (bin_at(M, 1))

Definition at line 1996 of file malloc.c.

Definition at line 1 of file malloc.c.

#define USE_MEMCPY   1

Definition at line 521 of file malloc.c.

Definition at line 495 of file malloc.c.

#define Void_t   char

Definition at line 340 of file malloc.c.


Typedef Documentation

typedef struct malloc_chunk* mbinptr

Definition at line 1907 of file malloc.c.

typedef struct malloc_chunk* mchunkptr

Definition at line 1597 of file malloc.c.

typedef struct malloc_chunk* mfastbinptr

Definition at line 2061 of file malloc.c.

typedef struct malloc_state* mstate

Definition at line 2163 of file malloc.c.


Function Documentation

static Void_t* cALLOc ( ) [static]

Here is the caller graph for this function:

Void_t* cALLOc ( size_t  n_elements,
size_t  elem_size 
)

Definition at line 4149 of file malloc.c.

{
  mchunkptr p;
  INTERNAL_SIZE_T clearsize;
  int nclears;
  INTERNAL_SIZE_T* d;

  Void_t* mem = mALLOc(n_elements * elem_size);

  if (mem != 0) {
    p = mem2chunk(mem);
    if (!chunk_is_mmapped(p)) {  /* don't need to clear mmapped space */

      /*
        Unroll clear of <= 36 bytes (72 if 8byte sizes)
        We know that contents have an odd number of
        INTERNAL_SIZE_T-sized words; minimally 3.
      */

      d = (INTERNAL_SIZE_T*)mem;
      clearsize = chunksize(p) - SIZE_SZ;
      nclears = clearsize / sizeof(INTERNAL_SIZE_T);
      assert(nclears >= 3);

      if (nclears > 9)
        MALLOC_ZERO(d, clearsize);

      else {
        *(d+0) = 0;
        *(d+1) = 0;
        *(d+2) = 0;
        if (nclears > 4) {
          *(d+3) = 0;
          *(d+4) = 0;
          if (nclears > 6) {
            *(d+5) = 0;
            *(d+6) = 0;
            if (nclears > 8) {
              *(d+7) = 0;
              *(d+8) = 0;
            }
          }
        }
      }
    }
  }
  return mem;
}

Here is the call graph for this function:

static void cFREe ( ) [static]

Here is the caller graph for this function:

void cFREe ( Void_t mem)

Definition at line 4209 of file malloc.c.

{
  fREe(mem);
}

Here is the call graph for this function:

static void fREe ( ) [static]

Here is the caller graph for this function:

void fREe ( Void_t mem)

Definition at line 3451 of file malloc.c.

{
  mstate av = get_malloc_state();

  mchunkptr       p;           /* chunk corresponding to mem */
  INTERNAL_SIZE_T size;        /* its size */
  mfastbinptr*    fb;          /* associated fastbin */
  mchunkptr       nextchunk;   /* next contiguous chunk */
  INTERNAL_SIZE_T nextsize;    /* its size */
  int             nextinuse;   /* true if nextchunk is used */
  INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
  mchunkptr       bck;         /* misc temp for linking */
  mchunkptr       fwd;         /* misc temp for linking */


  /* free(0) has no effect */
  if (mem != 0) {

    p = mem2chunk(mem);
    check_inuse_chunk(p);

    size = chunksize(p);

    /*
      If eligible, place chunk on a fastbin so it can be found
      and used quickly in malloc.
    */

    if ((unsigned long)size <= (unsigned long)av->max_fast

#if TRIM_FASTBINS
        /* 
           If TRIM_FASTBINS set, don't place chunks
           bordering top into fastbins
        */
        && (chunk_at_offset(p, size) != av->top)
#endif
        ) {

      set_fastchunks(av);
      fb = &(av->fastbins[fastbin_index(size)]);
      p->fd = *fb;
      *fb = p;
    }

    /*
       Consolidate non-mmapped chunks as they arrive.
    */

    else if (!chunk_is_mmapped(p)) {

      nextchunk = chunk_at_offset(p, size);

      /* consolidate backward */
      if (!prev_inuse(p)) {
        prevsize = p->prev_size;
        size += prevsize;
        p = chunk_at_offset(p, -((long) prevsize));
        unlink(p, bck, fwd);
      }

      nextsize = chunksize(nextchunk);

      if (nextchunk != av->top) {

        /* get and clear inuse bit */
        nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
        set_head(nextchunk, nextsize);

        /* consolidate forward */
        if (!nextinuse) {
          unlink(nextchunk, bck, fwd);
          size += nextsize;
        }

        /*
          Place chunk in unsorted chunk list. Chunks are
          not placed into regular bins until after they have
          been given one chance to be used in malloc.
        */

        bck = unsorted_chunks(av);
        fwd = bck->fd;
        p->bk = bck;
        p->fd = fwd;
        bck->fd = p;
        fwd->bk = p;

        set_head(p, size | PREV_INUSE);
        set_foot(p, size);
      }

      /*
         If the chunk borders the current high end of memory,
         consolidate into top
      */

      else {
        size += nextsize;
        set_head(p, size | PREV_INUSE);
        av->top = p;

        /*
          If the total unused topmost memory exceeds trim
          threshold, ask malloc_trim to reduce top. 

          Unless max_fast is 0, we don't know if there are fastbins
          bordering top, so we cannot tell for sure whether threshold has
          been reached unless fastbins are consolidated.  But we don't
          want to consolidate on each free.  As a compromise,
          consolidation is performed if half the threshold is
          reached.

        */

        if ((unsigned long)(size) > (unsigned long)(av->trim_threshold / 2)) {
          if (have_fastchunks(av)) {
            malloc_consolidate(av);
            size = chunksize(av->top);
          }

          if ((unsigned long)(size) > (unsigned long)(av->trim_threshold)) 
            sYSTRIm(av->top_pad, av);
        }
      }
    }

    /*
      If the chunk was allocated via mmap, release via munmap()
      Note that if HAVE_MMAP is false but chunk_is_mmapped is
      true, then user must have overwritten memory. There's nothing
      we can do to catch this error unless DEBUG is set, in which case
      check_inuse_chunk (above) will have triggered error.
    */

    else {
#if HAVE_MMAP
      int ret;
      INTERNAL_SIZE_T offset = p->prev_size;
      av->n_mmaps--;
      av->mmapped_mem -= (size + offset);
      ret = munmap((char*)p - offset, size + offset);
      /* munmap returns non-zero on failure */
      assert(ret == 0);
#endif
    }
  }
}

Here is the call graph for this function:

struct mallinfo mALLINFo ( ) [static, read]

Definition at line 4326 of file malloc.c.

{
  mstate av = get_malloc_state();
  struct mallinfo mi;
  int i;
  mbinptr b;
  mchunkptr p;
  INTERNAL_SIZE_T avail;
  int navail;
  int nfastblocks;
  int fastbytes;

  /* Ensure initialization */
  if (av->top == 0)  malloc_consolidate(av);

  check_malloc_state();

  /* Account for top */
  avail = chunksize(av->top);
  navail = 1;  /* top always exists */

  /* traverse fastbins */
  nfastblocks = 0;
  fastbytes = 0;

  for (i = 0; i < NFASTBINS; ++i) {
    for (p = av->fastbins[i]; p != 0; p = p->fd) {
      ++nfastblocks;
      fastbytes += chunksize(p);
    }
  }

  avail += fastbytes;

  /* traverse regular bins */
  for (i = 1; i < NBINS; ++i) {
    b = bin_at(av, i);
    for (p = last(b); p != b; p = p->bk) {
      avail += chunksize(p);
      navail++;
    }
  }

  mi.smblks = nfastblocks;
  mi.ordblks = navail;
  mi.fordblks = avail;
  mi.uordblks = av->sbrked_mem - avail;
  mi.arena = av->sbrked_mem;
  mi.hblks = av->n_mmaps;
  mi.hblkhd = av->mmapped_mem;
  mi.fsmblks = fastbytes;
  mi.keepcost = chunksize(av->top);
  mi.usmblks = av->max_total_mem;
  return mi;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static Void_t* mALLOc ( ) [static]

Here is the caller graph for this function:

Void_t* mALLOc ( size_t  bytes)

Definition at line 3067 of file malloc.c.

{
  mstate av = get_malloc_state();

  INTERNAL_SIZE_T nb;               /* normalized request size */
  unsigned int    idx;              /* associated bin index */
  mbinptr         bin;              /* associated bin */
  mfastbinptr*    fb;               /* associated fastbin */

  mchunkptr       victim;           /* inspected/selected chunk */
  INTERNAL_SIZE_T size;             /* its size */
  int             victim_index;     /* its bin index */

  mchunkptr       remainder;        /* remainder from a split */
  long            remainder_size;   /* its size */

  unsigned int    block;            /* bit map traverser */
  unsigned int    bit;              /* bit map traverser */
  unsigned int    map;              /* current word of binmap */

  mchunkptr       fwd;              /* misc temp for linking */
  mchunkptr       bck;              /* misc temp for linking */


  /*
    Check request for legality and convert to internal form, nb.
    This rejects negative arguments when size_t is treated as
    signed. It also rejects arguments that are so large that the size
    appears negative when aligned and padded.  The converted form
    adds SIZE_T bytes overhead plus possibly more to obtain necessary
    alignment and/or to obtain a size of at least MINSIZE, the
    smallest allocatable size.
  */

  checked_request2size(bytes, nb);

  /*
    If the size qualifies as a fastbin, first check corresponding bin.
    This code is safe to execute even if av not yet initialized, so we
    can try it first, which saves some time on this fast path.
  */

  if (nb <= av->max_fast) { 
    fb = &(av->fastbins[(fastbin_index(nb))]);
    if ( (victim = *fb) != 0) {
      *fb = victim->fd;
      check_remalloced_chunk(victim, nb);
      return chunk2mem(victim);
    }
  }

  /*
    If a small request, check regular bin.  Since these "smallbins"
    hold one size each, no searching within bins is necessary.
    
    (If a large request, we need to wait until unsorted chunks are
    processed to find best fit. But for small ones, fits are exact
    anyway, so we can check now, which is faster.)
  */

  if (in_smallbin_range(nb)) {
    idx = smallbin_index(nb);
    bin = bin_at(av,idx);

    if ( (victim = last(bin)) != bin) {
      if (victim == 0) /* initialization check */
        malloc_consolidate(av);
      else {
        bck = victim->bk;
        set_inuse_bit_at_offset(victim, nb);
        bin->bk = bck;
        bck->fd = bin;
        
        check_malloced_chunk(victim, nb);
        return chunk2mem(victim);
      }
    }
  }

  /* 
     If a large request, consolidate fastbins before continuing.
     While it might look excessive to kill all fastbins before
     even seeing if there is space available, this avoids
     fragmentation problems normally associated with fastbins.
     Also, in practice, programs tend to have runs of either small or
     large requests, but less often mixtures, so consolidation is not 
     usually invoked all that often.
  */

  else {
    idx = largebin_index(nb);
    if (have_fastchunks(av)) /* consolidation/initialization check */
      malloc_consolidate(av);
  }


  /*
    Process recently freed or remaindered chunks, taking one only if
    it is exact fit, or, if a small request, it is the remainder from
    the most recent non-exact fit.  Place other traversed chunks in
    bins.  Note that this step is the only place in any routine where
    chunks are placed in bins.

    The outer loop here is needed because we might not realize until
    near the end of malloc that we should have consolidated, so must
    do so and retry. This happens at most once, and only when we would
    otherwise need to expand memory to service a "small" request.
  */

    
  for(;;) {    
    
    while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
      bck = victim->bk;
      size = chunksize(victim);

      /* 
         If a small request, try to use last remainder if it is the
         only chunk in unsorted bin.  This helps promote locality for
         runs of consecutive small requests. This is the only
         exception to best-fit.
      */

      if (in_smallbin_range(nb) && 
          victim == av->last_remainder &&
          bck == unsorted_chunks(av) &&
          (remainder_size = (long)size - (long)nb) >= (long)MINSIZE) {

        /* split and reattach remainder */
        remainder = chunk_at_offset(victim, nb);
        unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
        av->last_remainder = remainder; 
        remainder->bk = remainder->fd = unsorted_chunks(av);
        
        set_head(victim, nb | PREV_INUSE);
        set_head(remainder, remainder_size | PREV_INUSE);
        set_foot(remainder, remainder_size);
        
        check_malloced_chunk(victim, nb);
        return chunk2mem(victim);
      }
      
      /* remove from unsorted list */
      unsorted_chunks(av)->bk = bck;
      bck->fd = unsorted_chunks(av);
      
      /* Take now instead of binning if exact fit */
      
      if (size == nb) {
        set_inuse_bit_at_offset(victim, size);
        check_malloced_chunk(victim, nb);
        return chunk2mem(victim);
      }
      
      /* place chunk in bin */
      
      if (in_smallbin_range(size)) {
        victim_index = smallbin_index(size);
        bck = bin_at(av, victim_index);
        fwd = bck->fd;
      }
      else {
        victim_index = largebin_index(size);
        bck = bin_at(av, victim_index);
        fwd = bck->fd;

        /* maintain large bins in sorted order */
        if (fwd != bck) {
          /* if smaller than smallest, bypass loop below */
          if ((unsigned long)size <= 
              (unsigned long)(chunksize(bck->bk))) {
            fwd = bck;
            bck = bck->bk;
          }
          else {
            while (fwd != bck && 
                   (unsigned long)size < (unsigned long)(chunksize(fwd))) {
              fwd = fwd->fd;
            }
            bck = fwd->bk;
          }
        }
      }
      
      mark_bin(av, victim_index);
      victim->bk = bck;
      victim->fd = fwd;
      fwd->bk = victim;
      bck->fd = victim;
    }
   
    /*
      If a large request, scan through the chunks of current bin in
      sorted order to find smallest that fits.  This is the only step
      where an unbounded number of chunks might be scanned without doing
      anything useful with them. However the lists tend to be very
      short.
    */
      
    if (!in_smallbin_range(nb)) {
      bin = bin_at(av, idx);

      /* skip scan if largest chunk is too small */
      if ((victim = last(bin)) != bin &&
          (long)(chunksize(first(bin))) - (long)(nb) >= 0) {
        do {
          size = chunksize(victim);
          remainder_size = (long)size - (long)nb;
          
          if (remainder_size >= 0)  {
            unlink(victim, bck, fwd);
            
            /* Exhaust */
            if (remainder_size < (long)MINSIZE)  {
              set_inuse_bit_at_offset(victim, size);
              check_malloced_chunk(victim, nb);
              return chunk2mem(victim);
            }
            /* Split */
            else {
              remainder = chunk_at_offset(victim, nb);
              unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
              remainder->bk = remainder->fd = unsorted_chunks(av);
              set_head(victim, nb | PREV_INUSE);
              set_head(remainder, remainder_size | PREV_INUSE);
              set_foot(remainder, remainder_size);
              check_malloced_chunk(victim, nb);
              return chunk2mem(victim);
            }
          }
        } while ( (victim = victim->bk) != bin);
      }
    }    

    /*
      Search for a chunk by scanning bins, starting with next largest
      bin. This search is strictly by best-fit; i.e., the smallest
      (with ties going to approximately the least recently used) chunk
      that fits is selected.
      
      The bitmap avoids needing to check that most blocks are nonempty.
      The particular case of skipping all bins during warm-up phases
      when no chunks have been returned yet is faster than it might look.
    */
    
    ++idx;
    bin = bin_at(av,idx);
    block = idx2block(idx);
    map = av->binmap[block];
    bit = idx2bit(idx);
    
    for (;;) {
      /*
        Skip rest of block if there are no more set bits in this block.
      */
      
      if (bit > map || bit == 0) {
        for (;;) {
          if (++block >= BINMAPSIZE)  /* out of bins */
            break;

          else if ( (map = av->binmap[block]) != 0) {
            bin = bin_at(av, (block << BINMAPSHIFT));
            bit = 1;
            break;
          }
        }
        /* Optimizers seem to like this double-break better than goto */
        if (block >= BINMAPSIZE) 
          break;
      }
      
      /* Advance to bin with set bit. There must be one. */
      while ((bit & map) == 0) {
        bin = next_bin(bin);
        bit <<= 1;
      }
      
      victim = last(bin);
      
      /*  False alarm -- the bin is empty. Clear the bit. */
      if (victim == bin) {
        av->binmap[block] = map &= ~bit; /* Write through */
        bin = next_bin(bin);
        bit <<= 1;
      }
      
      /*  We know the first chunk in this bin is big enough to use. */
      else {
        size = chunksize(victim);
        remainder_size = (long)size - (long)nb;
        
        assert(remainder_size >= 0);
        
        /* unlink */
        bck = victim->bk;
        bin->bk = bck;
        bck->fd = bin;
        
        
        /* Exhaust */
        if (remainder_size < (long)MINSIZE) {
          set_inuse_bit_at_offset(victim, size);
          check_malloced_chunk(victim, nb);
          return chunk2mem(victim);
        }
        
        /* Split */
        else {
          remainder = chunk_at_offset(victim, nb);
          
          unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
          remainder->bk = remainder->fd = unsorted_chunks(av);
          /* advertise as last remainder */
          if (in_smallbin_range(nb)) 
            av->last_remainder = remainder; 
          
          set_head(victim, nb | PREV_INUSE);
          set_head(remainder, remainder_size | PREV_INUSE);
          set_foot(remainder, remainder_size);
          check_malloced_chunk(victim, nb);
          return chunk2mem(victim);
        }
      }
    }
    
    /*
      If large enough, split off the chunk bordering the end of memory
      ("top"). Note that this use of top is in accord with the best-fit search
      rule.  In effect, top is treated as larger (and thus less well
      fitting) than any other available chunk since it can be extended
      to be as large as necessary (up to system limitations).

      We require that "top" always exists (i.e., has size >= MINSIZE)
      after initialization, so if it would otherwise be exhuasted by
      current request, it is replenished. (Among the reasons for
      ensuring it exists is that we may need MINSIZE space to put in
      fenceposts in sysmalloc.)
    */

    victim = av->top;
    size = chunksize(victim);
    remainder_size = (long)size - (long)nb;
   
    if (remainder_size >= (long)MINSIZE) {
      remainder = chunk_at_offset(victim, nb);
      av->top = remainder;
      set_head(victim, nb | PREV_INUSE);
      set_head(remainder, remainder_size | PREV_INUSE);
      
      check_malloced_chunk(victim, nb);
      return chunk2mem(victim);
    }
    
    /*
      If there is space available in fastbins, consolidate and retry,
      to possibly avoid expanding memory. This can occur only if nb is
      in smallbin range so we didn't consolidate upon entry.
    */

    else if (have_fastchunks(av)) {
      assert(in_smallbin_range(nb));
      idx = smallbin_index(nb); /* restore original bin index */
      malloc_consolidate(av);
    }

    /* 
       Otherwise, relay to handle system-dependent cases 
    */
    else 
      return sYSMALLOc(nb, av);    
  }
}

Here is the call graph for this function:

static void malloc_consolidate ( ) [static]

Here is the caller graph for this function:

static void malloc_consolidate ( mstate  av) [static]

Definition at line 3618 of file malloc.c.

{
  mfastbinptr*    fb;
  mfastbinptr*    maxfb;
  mchunkptr       p;
  mchunkptr       nextp;
  mchunkptr       unsorted_bin;
  mchunkptr       first_unsorted;

  /* These have same use as in free() */
  mchunkptr       nextchunk;
  INTERNAL_SIZE_T size;
  INTERNAL_SIZE_T nextsize;
  INTERNAL_SIZE_T prevsize;
  int             nextinuse;
  mchunkptr       bck;
  mchunkptr       fwd;

  /*
    If max_fast is 0, we know that malloc hasn't
    yet been initialized, in which case do so.
  */

  if (av->max_fast == 0) {
    malloc_init_state(av);
    check_malloc_state();
  }
  else if (have_fastchunks(av)) {
    clear_fastchunks(av);
    
    unsorted_bin = unsorted_chunks(av);
    
    /*
      Remove each chunk from fast bin and consolidate it, placing it
      then in unsorted bin. Among other reasons for doing this,
      placing in unsorted bin avoids needing to calculate actual bins
      until malloc is sure that chunks aren't immediately going to be
      reused anyway.
    */
    
    maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
    fb = &(av->fastbins[0]);
    do {
      if ( (p = *fb) != 0) {
        *fb = 0;
        
        do {
          check_inuse_chunk(p);
          nextp = p->fd;
          
          /* Slightly streamlined version of consolidation code in free() */
          size = p->size & ~PREV_INUSE;
          nextchunk = chunk_at_offset(p, size);
          
          if (!prev_inuse(p)) {
            prevsize = p->prev_size;
            size += prevsize;
            p = chunk_at_offset(p, -((long) prevsize));
            unlink(p, bck, fwd);
          }
          
          nextsize = chunksize(nextchunk);
          
          if (nextchunk != av->top) {
            
            nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
            set_head(nextchunk, nextsize);
            
            if (!nextinuse) {
              size += nextsize;
              unlink(nextchunk, bck, fwd);
            }
            
            first_unsorted = unsorted_bin->fd;
            unsorted_bin->fd = p;
            first_unsorted->bk = p;
            
            set_head(p, size | PREV_INUSE);
            p->bk = unsorted_bin;
            p->fd = first_unsorted;
            set_foot(p, size);
          }
          
          else {
            size += nextsize;
            set_head(p, size | PREV_INUSE);
            av->top = p;
          }
          
        } while ( (p = nextp) != 0);
        
      }
    } while (fb++ != maxfb);
  }
}

Here is the call graph for this function:

static void malloc_init_state ( mstate  av) [static]

Definition at line 2202 of file malloc.c.

{
  int     i;
  mbinptr bin;

  
  /* Uncomment this if you are not using a static av */
  /* MALLOC_ZERO(av, sizeof(struct malloc_state); */

  /* Establish circular links for normal bins */
  for (i = 1; i < NBINS; ++i) { 
    bin = bin_at(av,i);
    bin->fd = bin->bk = bin;
  }

  av->max_fast       = req2max_fast(DEFAULT_MXFAST);

  av->top_pad        = DEFAULT_TOP_PAD;
  av->n_mmaps_max    = DEFAULT_MMAP_MAX;
  av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;

#if MORECORE_CONTIGUOUS
  av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
  av->sbrk_base      = (char*)MORECORE_FAILURE;
#else
  av->trim_threshold = (unsigned long)(-1);
  av->sbrk_base      = NONCONTIGUOUS_REGIONS;
#endif

  av->top            = initial_top(av);
  av->pagesize       = malloc_getpagesize;
}

Here is the caller graph for this function:

static int mALLOPt ( ) [static]

Here is the caller graph for this function:

int mALLOPt ( int  param_number,
int  value 
)

Definition at line 4455 of file malloc.c.

{
  mstate av = get_malloc_state();
  /* Ensure initialization/consolidation */
  malloc_consolidate(av);

  switch(param_number) {
  case M_MXFAST:
    if (value >= 0 && value <= MAX_FAST_SIZE) {
      av->max_fast = req2max_fast(value);
      return 1;
    }
    else
      return 0;

  case M_TRIM_THRESHOLD:
    av->trim_threshold = value;
    return 1;

  case M_TOP_PAD:
    av->top_pad = value;
    return 1;

  case M_MMAP_THRESHOLD:
    av->mmap_threshold = value;
    return 1;

  case M_MMAP_MAX:
#if HAVE_MMAP
    av->n_mmaps_max = value;
    return 1;
#else
    if (value != 0)
      return 0;
    else {
      av->n_mmaps_max = value;
      return 1;
    }
#endif

  default:
    return 0;
  }
}

Here is the call graph for this function:

static Void_t* mEMALIGn ( ) [static]

Here is the caller graph for this function:

Void_t* mEMALIGn ( size_t  alignment,
size_t  bytes 
)

Definition at line 4044 of file malloc.c.

{
  INTERNAL_SIZE_T nb;             /* padded  request size */
  char*           m;              /* memory returned by malloc call */
  mchunkptr       p;              /* corresponding chunk */
  char*           brk;            /* alignment point within p */
  mchunkptr       newp;           /* chunk to return */
  INTERNAL_SIZE_T newsize;        /* its size */
  INTERNAL_SIZE_T leadsize;       /* leading space befor alignment point */
  mchunkptr       remainder;      /* spare room at end to split off */
  long            remainder_size; /* its size */


  /* If need less alignment than we give anyway, just relay to malloc */

  if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);

  /* Otherwise, ensure that it is at least a minimum chunk size */

  if (alignment <  MINSIZE) alignment = MINSIZE;

  /* Make sure alignment is power of 2 (in case MINSIZE is not).  */
  if ((alignment & (alignment - 1)) != 0) {
    size_t a = MALLOC_ALIGNMENT * 2;
    while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
    alignment = a;
  }

  checked_request2size(bytes, nb);

  /* Call malloc with worst case padding to hit alignment. */

  m  = (char*)(mALLOc(nb + alignment + MINSIZE));

  if (m == 0) return 0; /* propagate failure */

  p = mem2chunk(m);

  if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */

    /*
      Find an aligned spot inside chunk.  Since we need to give back
      leading space in a chunk of at least MINSIZE, if the first
      calculation places us at a spot with less than MINSIZE leader,
      we can move to the next aligned spot -- we've allocated enough
      total room so that this is always possible.
    */

    brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
                           -((signed long) alignment));
    if ((long)(brk - (char*)(p)) < (long)MINSIZE)
      brk = brk + alignment;

    newp = (mchunkptr)brk;
    leadsize = brk - (char*)(p);
    newsize = chunksize(p) - leadsize;

    /* For mmapped chunks, just adjust offset */
    if (chunk_is_mmapped(p)) {
      newp->prev_size = p->prev_size + leadsize;
      set_head(newp, newsize|IS_MMAPPED);
      return chunk2mem(newp);
    }

    /* Otherwise, give back leader, use the rest */

    set_head(newp, newsize | PREV_INUSE);
    set_inuse_bit_at_offset(newp, newsize);
    set_head_size(p, leadsize);
    fREe(chunk2mem(p));
    p = newp;

    assert (newsize >= nb &&
            (((unsigned long)(chunk2mem(p))) % alignment) == 0);
  }

  /* Also give back spare room at the end */
  if (!chunk_is_mmapped(p)) {

    remainder_size = (long)(chunksize(p)) - (long)nb;

    if (remainder_size >= (long)MINSIZE) {
      remainder = chunk_at_offset(p, nb);
      set_head(remainder, remainder_size | PREV_INUSE);
      set_head_size(p, nb);
      fREe(chunk2mem(remainder));
    }
  }

  check_inuse_chunk(p);
  return chunk2mem(p);

}

Here is the call graph for this function:

Void_t* memcpy ( )
Void_t* memset ( )
void mSTATs ( ) [static]

Definition at line 4403 of file malloc.c.

{
  struct mallinfo mi = mALLINFo();

#ifdef WIN32
  {
    unsigned long free, reserved, committed;
    vminfo (&free, &reserved, &committed);
    fprintf(stderr, "free bytes       = %10lu\n", 
            free);
    fprintf(stderr, "reserved bytes   = %10lu\n", 
            reserved);
    fprintf(stderr, "committed bytes  = %10lu\n", 
            committed);
  }
#endif


  fprintf(stderr, "max system bytes = %10lu\n",
          (unsigned long)(mi.usmblks));
  fprintf(stderr, "system bytes     = %10lu\n",
          (unsigned long)(mi.arena + mi.hblkhd));
  fprintf(stderr, "in use bytes     = %10lu\n",
          (unsigned long)(mi.uordblks + mi.hblkhd));

#ifdef WIN32 
  {
    unsigned long kernel, user;
    if (cpuinfo (TRUE, &kernel, &user)) {
      fprintf(stderr, "kernel ms        = %10lu\n", 
              kernel);
      fprintf(stderr, "user ms          = %10lu\n", 
              user);
    }
  }
#endif
}

Here is the call graph for this function:

static int mTRIm ( ) [static]

Here is the caller graph for this function:

int mTRIm ( size_t  pad)

Definition at line 4284 of file malloc.c.

{
  mstate av = get_malloc_state();
  /* Ensure initialization/consolidation */
  malloc_consolidate(av);

  return sYSTRIm(pad, av);
}

Here is the call graph for this function:

static size_t mUSABLe ( ) [static]

Here is the caller graph for this function:

size_t mUSABLe ( Void_t mem)

Definition at line 4305 of file malloc.c.

{
  mchunkptr p;
  if (mem != 0) {
    p = mem2chunk(mem);
    if (chunk_is_mmapped(p))
      return chunksize(p) - 2*SIZE_SZ;
    else if (inuse(p))
      return chunksize(p) - SIZE_SZ;
  }
  return 0;
}
Void_t* public_cALLOc ( size_t  n,
size_t  elem_size 
)

Definition at line 1267 of file malloc.c.

                                                  {
  Void_t* m;
  if (MALLOC_PREACTION != 0) {
    return 0;
  }
  m = cALLOc(n, elem_size);
  if (MALLOC_POSTACTION != 0) {
  }
  return m;
}

Here is the call graph for this function:

Definition at line 1278 of file malloc.c.

                             {
  if (MALLOC_PREACTION != 0) {
    return;
  }
  cFREe(m);
  if (MALLOC_POSTACTION != 0) {
  }
}

Here is the call graph for this function:

Definition at line 1215 of file malloc.c.

                            {
  if (MALLOC_PREACTION != 0) {
    return;
  }
  fREe(m);
  if (MALLOC_POSTACTION != 0) {
  }
}

Here is the call graph for this function:

struct mallinfo public_mALLINFo ( ) [read]
Void_t* public_mALLOc ( size_t  bytes)

Definition at line 1204 of file malloc.c.

                                    {
  Void_t* m;
  if (MALLOC_PREACTION != 0) {
    return 0;
  }
  m = mALLOc(bytes);
  if (MALLOC_POSTACTION != 0) {
  }
  return m;
}

Here is the call graph for this function:

int public_mALLOPt ( int  p,
int  v 
)

Definition at line 1331 of file malloc.c.

                                 {
  int result;
  if (MALLOC_PREACTION != 0) {
    return 0;
  }
  result = mALLOPt(p, v);
  if (MALLOC_POSTACTION != 0) {
  }
  return result;
}

Here is the call graph for this function:

Void_t* public_mEMALIGn ( size_t  alignment,
size_t  bytes 
)

Definition at line 1234 of file malloc.c.

                                                        {
  Void_t* m;
  if (MALLOC_PREACTION != 0) {
    return 0;
  }
  m = mEMALIGn(alignment, bytes);
  if (MALLOC_POSTACTION != 0) {
  }
  return m;
}

Here is the call graph for this function:

int public_mTRIm ( size_t  s)

Definition at line 1287 of file malloc.c.

                           {
  int result;
  if (MALLOC_PREACTION != 0) {
    return 0;
  }
  result = mTRIm(s);
  if (MALLOC_POSTACTION != 0) {
  }
  return result;
}

Here is the call graph for this function:

size_t public_mUSABLe ( )
size_t public_mUSABLe ( Void_t m)

Definition at line 1299 of file malloc.c.

                                 {
  size_t result;
  if (MALLOC_PREACTION != 0) {
    return 0;
  }
  result = mUSABLe(m);
  if (MALLOC_POSTACTION != 0) {
  }
  return result;
}

Here is the call graph for this function:

Void_t* public_pVALLOc ( size_t  bytes)

Definition at line 1256 of file malloc.c.

                                     {
  Void_t* m;
  if (MALLOC_PREACTION != 0) {
    return 0;
  }
  m = pVALLOc(bytes);
  if (MALLOC_POSTACTION != 0) {
  }
  return m;
}

Here is the call graph for this function:

Void_t* public_rEALLOc ( Void_t m,
size_t  bytes 
)

Definition at line 1224 of file malloc.c.

                                                {
  if (MALLOC_PREACTION != 0) {
    return 0;
  }
  m = rEALLOc(m, bytes);
  if (MALLOC_POSTACTION != 0) {
  }
  return m;
}

Here is the call graph for this function:

Void_t* public_vALLOc ( size_t  bytes)

Definition at line 1245 of file malloc.c.

                                    {
  Void_t* m;
  if (MALLOC_PREACTION != 0) {
    return 0;
  }
  m = vALLOc(bytes);
  if (MALLOC_POSTACTION != 0) {
  }
  return m;
}

Here is the call graph for this function:

static Void_t* pVALLOc ( ) [static]

Here is the caller graph for this function:

Void_t* pVALLOc ( size_t  bytes)

Definition at line 4246 of file malloc.c.

{
  mstate av = get_malloc_state();
  size_t pagesz;

  /* Ensure initialization/consolidation */
  malloc_consolidate(av);

  pagesz = av->pagesize;
  return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
}

Here is the call graph for this function:

static Void_t* rEALLOc ( ) [static]

Here is the caller graph for this function:

Void_t* rEALLOc ( Void_t oldmem,
size_t  bytes 
)

Definition at line 3755 of file malloc.c.

{
  mstate av = get_malloc_state();

  INTERNAL_SIZE_T  nb;              /* padded request size */

  mchunkptr        oldp;            /* chunk corresponding to oldmem */
  INTERNAL_SIZE_T  oldsize;         /* its size */

  mchunkptr        newp;            /* chunk to return */
  INTERNAL_SIZE_T  newsize;         /* its size */
  Void_t*          newmem;          /* corresponding user mem */

  mchunkptr        next;            /* next contiguous chunk after oldp */
  mchunkptr        prev;            /* previous contiguous chunk before oldp */

  mchunkptr        remainder;       /* extra space at end of newp */
  long             remainder_size;  /* its size */

  mchunkptr        bck;             /* misc temp for linking */
  mchunkptr        fwd;             /* misc temp for linking */

  INTERNAL_SIZE_T  copysize;        /* bytes to copy */
  int              ncopies;         /* INTERNAL_SIZE_T words to copy */
  INTERNAL_SIZE_T* s;               /* copy source */ 
  INTERNAL_SIZE_T* d;               /* copy destination */


#ifdef REALLOC_ZERO_BYTES_FREES
  if (bytes == 0) {
    fREe(oldmem);
    return 0;
  }
#endif

  /* realloc of null is supposed to be same as malloc */
  if (oldmem == 0) return mALLOc(bytes);

  checked_request2size(bytes, nb);

  oldp    = mem2chunk(oldmem);
  oldsize = chunksize(oldp);

  check_inuse_chunk(oldp);

  if (!chunk_is_mmapped(oldp)) {

    if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
      /* already big enough; split below */
      newp = oldp;
      newsize = oldsize;
    }

    else {
      newp = 0;
      newsize = 0;

      next = chunk_at_offset(oldp, oldsize);

      if (next == av->top) {            /* Expand forward into top */
        newsize = oldsize + chunksize(next);

        if ((unsigned long)(newsize) >= (unsigned long)(nb + MINSIZE)) {
          set_head_size(oldp, nb);
          av->top = chunk_at_offset(oldp, nb);
          set_head(av->top, (newsize - nb) | PREV_INUSE);
          return chunk2mem(oldp);
        }

        else if (!prev_inuse(oldp)) {   /* Shift backwards + top */
          prev = prev_chunk(oldp);
          newsize += chunksize(prev);

          if ((unsigned long)(newsize) >= (unsigned long)(nb + MINSIZE)) {
            newp = prev;
            unlink(prev, bck, fwd);
            av->top = chunk_at_offset(newp, nb);
            set_head(av->top, (newsize - nb) | PREV_INUSE);
            newsize = nb; 
          }
        }
      }

      else if (!inuse(next)) {          /* Forward into next chunk */
        newsize = oldsize + chunksize(next);
        
        if (((unsigned long)(newsize) >= (unsigned long)(nb))) {
          newp = oldp;
          unlink(next, bck, fwd);
        }
        
        else if (!prev_inuse(oldp)) {   /* Forward + backward */
          prev = prev_chunk(oldp);
          newsize += chunksize(prev);
          
          if (((unsigned long)(newsize) >= (unsigned long)(nb))) {
            newp = prev;
            unlink(prev, bck, fwd);
            unlink(next, bck, fwd);
          }
        }
      }
      
      else if (!prev_inuse(oldp)) {     /* Backward only */
        prev = prev_chunk(oldp);
        newsize = oldsize + chunksize(prev);
        
        if ((unsigned long)(newsize) >= (unsigned long)(nb)) {
          newp = prev;
          unlink(prev, bck, fwd);
        }
      }
      
      if (newp != 0) {
        if (newp != oldp) {
          /* Backward copies are not worth unrolling */
          MALLOC_COPY(chunk2mem(newp), oldmem, oldsize - SIZE_SZ, 1);
        }
      }

      /* Must allocate */
      else {                  
        newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
        if (newmem == 0)
          return 0; /* propagate failure */

        newp = mem2chunk(newmem);
        newsize = chunksize(newp);

        /*
          Avoid copy if newp is next chunk after oldp.
        */
        if (newp == next) {
          newsize += oldsize;
          newp = oldp;
        }
        else {

          /*
            Unroll copy of <= 36 bytes (72 if 8byte sizes)
            We know that contents have an odd number of
            INTERNAL_SIZE_T-sized words; minimally 3.
          */
          
          copysize = oldsize - SIZE_SZ;
          s = (INTERNAL_SIZE_T*)oldmem;
          d = (INTERNAL_SIZE_T*)(chunk2mem(newp));
          ncopies = copysize / sizeof(INTERNAL_SIZE_T);
          assert(ncopies >= 3);
          
          if (ncopies > 9)
            MALLOC_COPY(d, s, copysize, 0);
          
          else {
            *(d+0) = *(s+0);
            *(d+1) = *(s+1);
            *(d+2) = *(s+2);
            if (ncopies > 4) {
              *(d+3) = *(s+3);
              *(d+4) = *(s+4);
              if (ncopies > 6) {
                *(d+5) = *(s+5);
                *(d+6) = *(s+6);
                if (ncopies > 8) {
                  *(d+7) = *(s+7);
                  *(d+8) = *(s+8);
                }
              }
            }
          }

          fREe(oldmem);
          check_inuse_chunk(newp);
          return chunk2mem(newp);
        }
      }
    }


    /* If possible, free extra space in old or extended chunk */

    remainder_size = (long)newsize - (long)nb;
    assert(remainder_size >= 0);

    if (remainder_size >= (long)MINSIZE) { /* split remainder */
      remainder = chunk_at_offset(newp, nb);
      set_head_size(newp, nb);
      set_head(remainder, remainder_size | PREV_INUSE);
      /* Mark remainder as inuse so free() won't complain */
      set_inuse_bit_at_offset(remainder, remainder_size);
      fREe(chunk2mem(remainder)); 
    }

    else { /* not enough extra to split off */
      set_head_size(newp, newsize);
      set_inuse_bit_at_offset(newp, newsize);
    }

    check_inuse_chunk(newp);
    return chunk2mem(newp);
  }

  /*
    Handle mmap cases
  */

  else {
#if HAVE_MMAP

#if HAVE_MREMAP
    INTERNAL_SIZE_T offset = oldp->prev_size;
    size_t pagemask = av->pagesize - 1;
    char *cp;
    unsigned long sum;
    
    /* Note the extra SIZE_SZ overhead */
    newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;

    /* don't need to remap if still within same page */
    if (oldsize == newsize - offset) 
      return oldmem;

    cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
    
    if (cp != (char*)MORECORE_FAILURE) {

      newp = (mchunkptr)(cp + offset);
      set_head(newp, (newsize - offset)|IS_MMAPPED);
      
      assert(aligned_OK(chunk2mem(newp)));
      assert((newp->prev_size == offset));
      
      /* update statistics */
      sum = av->mmapped_mem += newsize - oldsize;
      if (sum > (unsigned long)(av->max_mmapped_mem)) 
        av->max_mmapped_mem = sum;
      sum += av->sbrked_mem;
      if (sum > (unsigned long)(av->max_total_mem)) 
        av->max_total_mem = sum;
      
      return chunk2mem(newp);
    }

#endif

    /* Note the extra SIZE_SZ overhead. */
    if ((long)oldsize - (long)SIZE_SZ >= (long)nb)
      newmem = oldmem; /* do nothing */
    else {
      /* Must alloc, copy, free. */
      newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
      if (newmem != 0) {
        MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ, 0);
        fREe(oldmem);
      }
    }
    return newmem;

#else 
    /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
    check_malloc_state();
    MALLOC_FAILURE_ACTION;
    return 0;
#endif
  }

}

Here is the call graph for this function:

static Void_t* sYSMALLOc ( ) [static]

Here is the caller graph for this function:

static Void_t* sYSMALLOc ( INTERNAL_SIZE_T  nb,
mstate  av 
) [static]

Definition at line 2616 of file malloc.c.

{
  mchunkptr       old_top;        /* incoming value of av->top */
  INTERNAL_SIZE_T old_size;       /* its size */
  char*           old_end;        /* its end address */

  long            size;           /* arg to first MORECORE or mmap call */
  char*           brk;            /* return value from MORECORE */
  char*           mm;             /* return value from mmap call*/

  long            correction;     /* arg to 2nd MORECORE call */
  char*           snd_brk;        /* 2nd return val */

  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
  INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
  char*           aligned_brk;    /* aligned offset into brk */

  mchunkptr       p;              /* the allocated/returned chunk */
  mchunkptr       remainder;      /* remainder from allocation */
  long            remainder_size; /* its size */

  unsigned long   sum;            /* for updating stats */

  size_t          pagemask  = av->pagesize - 1;

  /*
    If have mmap, and the request size meets the mmap threshold, and
    the system supports mmap, and there are few enough currently
    allocated mmapped regions, and a call to mmap succeeds, try to
    directly map this request rather than expanding top.
  */

#if HAVE_MMAP
  if ((unsigned long)nb >= (unsigned long)(av->mmap_threshold) &&
      (av->n_mmaps < av->n_mmaps_max)) {

    /*
      Round up size to nearest page.  For mmapped chunks, the overhead
      is one SIZE_SZ unit larger than for normal chunks, because there
      is no following chunk whose prev_size field could be used.
    */
    size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
    
    mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));

    if (mm != (char*)(MORECORE_FAILURE)) {

      /*
        The offset to the start of the mmapped region is stored
        in the prev_size field of the chunk. This allows us to adjust
        returned start address to meet alignment requirements here 
        and in memalign(), and still be able to compute proper
        address argument for later munmap in free() and realloc().
      */

      front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
      if (front_misalign > 0) {
        correction = MALLOC_ALIGNMENT - front_misalign;
        p = (mchunkptr)(mm + correction);
        p->prev_size = correction;
        set_head(p, (size - correction) |IS_MMAPPED);
      }
      else {
        p = (mchunkptr)mm;
        set_head(p, size|IS_MMAPPED);
      }

      check_chunk(p);
      
      /* update statistics */

      if (++av->n_mmaps > av->max_n_mmaps) 
        av->max_n_mmaps = av->n_mmaps;

      sum = av->mmapped_mem += size;
      if (sum > (unsigned long)(av->max_mmapped_mem)) 
        av->max_mmapped_mem = sum;
      sum += av->sbrked_mem;
      if (sum > (unsigned long)(av->max_total_mem)) 
        av->max_total_mem = sum;
      
      return chunk2mem(p);
    }
  }
#endif

  /* record incoming configuration of top */

  old_top  = av->top;
  old_size = chunksize(old_top);
  old_end  = (char*)(chunk_at_offset(old_top, old_size));

  brk = snd_brk = (char*)(MORECORE_FAILURE); 

  /* 
     If not the first time through, we require old_size to be
     at least MINSIZE and to have prev_inuse set.
  */

  assert(old_top == initial_top(av) || 
         ((unsigned long) (old_size) >= (unsigned long)(MINSIZE) &&
          prev_inuse(old_top)));


  /* Request enough space for nb + pad + overhead */

  size = nb + av->top_pad + MINSIZE;

  /*
    If contiguous, we can subtract out existing space that we hope to
    combine with new space. We add it back later only if
    we don't actually get contiguous space.
  */

  if (av->sbrk_base != NONCONTIGUOUS_REGIONS)
    size -= old_size;

  /*
    Round to a multiple of page size.
    If MORECORE is not contiguous, this ensures that we only call it
    with whole-page arguments.  And if MORECORE is contiguous and
    this is not first time through, this preserves page-alignment of
    previous calls. Otherwise, we re-correct anyway to page-align below.
  */

  size = (size + pagemask) & ~pagemask;

  /*
    Don't try to call MORECORE if argument is so big as to appear
    negative. Note that since mmap takes size_t arg, it may succeed
    below even if we cannot call MORECORE.
  */

  if (size > 0) 
    brk = (char*)(MORECORE(size));

  /*
    If have mmap, try using it as a backup when MORECORE fails. This
    is worth doing on systems that have "holes" in address space, so
    sbrk cannot extend to give contiguous space, but space is available
    elsewhere.  Note that we ignore mmap max count and threshold limits,
    since there is no reason to artificially limit use here.
  */

#if HAVE_MMAP
  if (brk == (char*)(MORECORE_FAILURE)) {

    /* Cannot merge with old top, so add its size back in */

    if (av->sbrk_base != NONCONTIGUOUS_REGIONS)
      size = (size + old_size + pagemask) & ~pagemask;

    /* If we are relying on mmap as backup, then use larger units */

    if ((unsigned long)size < (unsigned long)MMAP_AS_MORECORE_SIZE)
      size = MMAP_AS_MORECORE_SIZE;

    brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));

    if (brk != (char*)(MORECORE_FAILURE)) {

      /* We do not need, and cannot use, another sbrk call to find end */
      snd_brk = brk + size;

      /* 
         Record that we no longer have a contiguous sbrk region. 
         After the first time mmap is used as backup, we cannot
         ever rely on contiguous space.
      */
      av->sbrk_base = NONCONTIGUOUS_REGIONS; 
    }
  }
#endif

  if (brk != (char*)(MORECORE_FAILURE)) {

    av->sbrked_mem += size;

    /*
      If MORECORE extends previous space, we can likewise extend top size.
    */
    
    if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
      set_head(old_top, (size + old_size) | PREV_INUSE);
    }
    
    /*
      Otherwise, make adjustments guided by the special values of 
      av->sbrk_base (MORECORE_FAILURE or NONCONTIGUOUS_REGIONS):
      
      * If the first time through or noncontiguous, we need to call sbrk
        just to find out where the end of memory lies.

      * We need to ensure that all returned chunks from malloc will meet
        MALLOC_ALIGNMENT

      * If there was an intervening foreign sbrk, we need to adjust sbrk
        request size to account for fact that we will not be able to
        combine new space with existing space in old_top.

      * Almost all systems internally allocate whole pages at a time, in
        which case we might as well use the whole last page of request.
        So we allocate enough more memory to hit a page boundary now,
        which in turn causes future contiguous calls to page-align.

    */
    
    else {
      front_misalign = 0;
      end_misalign = 0;
      correction = 0;
      aligned_brk = brk;
      
      /* handle contiguous cases */
      if (av->sbrk_base != NONCONTIGUOUS_REGIONS) { 
        
        /* Guarantee alignment of first new chunk made from this space */

        front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
        if (front_misalign > 0) {

          /*
            Skip over some bytes to arrive at an aligned position.
            We don't need to specially mark these wasted front bytes.
            They will never be accessed anyway because
            prev_inuse of av->top (and any chunk created from its start)
            is always true after initialization.
          */

          correction = MALLOC_ALIGNMENT - front_misalign;
          aligned_brk += correction;
        }
        
        /*
          If this isn't adjacent to a previous sbrk, then we will not
          be able to merge with old_top space, so must add to 2nd request.
        */
        
        correction += old_size;
        
        /* Pad out to hit a page boundary */

        end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
        correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
        
        assert(correction >= 0);
        
        snd_brk = (char*)(MORECORE(correction));
        
        /*
          If can't allocate correction, try to at least find out current
          brk.  It might be enough to proceed without failing.
          
          Note that if second sbrk did NOT fail, we assume that space
          is contiguous with first sbrk. This is a safe assumption unless
          program is multithreaded but doesn't use locks and a foreign sbrk
          occurred between our first and second calls.
        */
        
        if (snd_brk == (char*)(MORECORE_FAILURE)) {
          correction = 0;
          snd_brk = (char*)(MORECORE(0));
        }
      }
      
      /* handle non-contiguous cases */
      else { 
        
        /* MORECORE/mmap must correctly align etc */
        assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
        
        /* Find out current end of memory */
        if (snd_brk == (char*)(MORECORE_FAILURE)) {
          snd_brk = (char*)(MORECORE(0));
        }
        
        /* This must lie on a page boundary */
        if (snd_brk != (char*)(MORECORE_FAILURE)) {
          assert(((INTERNAL_SIZE_T)(snd_brk) & pagemask) == 0);
        }
      }
      
      /* Adjust top based on results of second sbrk */
      if (snd_brk != (char*)(MORECORE_FAILURE)) {
       
        av->top = (mchunkptr)aligned_brk;
        set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
        
        av->sbrked_mem += correction;
        
        /* If first time through and contiguous, record base */
        if (old_top == initial_top(av)) {
          if (av->sbrk_base == (char*)(MORECORE_FAILURE)) 
            av->sbrk_base = brk;
        }
        
        /*
          Otherwise, we either have a gap due to foreign sbrk or a
          non-contiguous region.  Insert a double fencepost at old_top
          to prevent consolidation with space we don't own. These
          fenceposts are artificial chunks that are marked as inuse
          and are in any case too small to use.  We need two to make
          sizes and alignments work out.
        */

        else {
          
          /* 
             Shrink old_top to insert fenceposts, keeping size a
             multiple of MALLOC_ALIGNMENT. 
          */
          old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
          set_head(old_top, old_size | PREV_INUSE);
          
          /*
            Note that the following assignments overwrite old_top when
            old_size was previously MINSIZE.  This is intentional. We
            need the fencepost, even if old_top otherwise gets lost.
          */
          chunk_at_offset(old_top, old_size          )->size =
            SIZE_SZ|PREV_INUSE;

          chunk_at_offset(old_top, old_size + SIZE_SZ)->size =
            SIZE_SZ|PREV_INUSE;
          
          /* If possible, release the rest. */
          if (old_size >= MINSIZE) 
            fREe(chunk2mem(old_top));

        }
      }
    }
    
    /* Update statistics */
    
    sum = av->sbrked_mem;
    if (sum > (unsigned long)(av->max_sbrked_mem))
      av->max_sbrked_mem = sum;
    
    sum += av->mmapped_mem;
    if (sum > (unsigned long)(av->max_total_mem))
      av->max_total_mem = sum;

    check_malloc_state();
    
    /* finally, do the allocation */
    
    p = av->top;
    size = chunksize(p);
    remainder_size = (long)size - (long)nb;
    
    /* check that one of the above allocation paths succeeded */
    if (remainder_size >= (long)MINSIZE) {
      remainder = chunk_at_offset(p, nb);
      av->top = remainder;
      set_head(p, nb | PREV_INUSE);
      set_head(remainder, remainder_size | PREV_INUSE);
      
      check_malloced_chunk(p, nb);
      return chunk2mem(p);
    }
  }

  /* catch all failure paths */
  MALLOC_FAILURE_ACTION;
  return 0;
}

Here is the call graph for this function:

static int sYSTRIm ( ) [static]

Here is the caller graph for this function:

static int sYSTRIm ( size_t  pad,
mstate  av 
) [static]

Definition at line 2998 of file malloc.c.

{
  long  top_size;        /* Amount of top-most memory */
  long  extra;           /* Amount to release */
  long  released;        /* Amount actually released */
  char* current_brk;     /* address returned by pre-check sbrk call */
  char* new_brk;         /* address returned by post-check sbrk call */
  size_t pagesz;

  /* Don't bother trying if sbrk doesn't provide contiguous regions */
  if (av->sbrk_base != NONCONTIGUOUS_REGIONS) {

    pagesz = av->pagesize;
    top_size = chunksize(av->top);
    
    /* Release in pagesize units, keeping at least one page */
    extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
    
    if (extra > 0) {
      
      /*
        Only proceed if end of memory is where we last set it.
        This avoids problems if there were foreign sbrk calls.
      */
      current_brk = (char*)(MORECORE(0));
      if (current_brk == (char*)(av->top) + top_size) {
        
        /*
          Attempt to release memory. We ignore return value,
          and instead call again to find out where new end of memory is.
          This avoids problems if first call releases less than we asked,
          of if failure somehow altered brk value. (We could still
          encounter problems if it altered brk in some very bad way,
          but the only thing we can do is adjust anyway, which will cause
          some downstream failure.)
        */
        
        MORECORE(-extra);
        new_brk = (char*)(MORECORE(0));
        
        if (new_brk != (char*)MORECORE_FAILURE) {
          released = (long)(current_brk - new_brk);

          if (released != 0) {
            /* Success. Adjust top. */
            av->sbrked_mem -= released;
            set_head(av->top, (top_size - released) | PREV_INUSE);
            check_malloc_state();
            return 1;
          }
        }
      }
    }
  }

  return 0;
}
static Void_t* vALLOc ( ) [static]

Here is the caller graph for this function:

Void_t* vALLOc ( size_t  bytes)

Definition at line 4228 of file malloc.c.

{
  /* Ensure initialization/consolidation */
  mstate av = get_malloc_state();
  malloc_consolidate(av);
  return mEMALIGn(av->pagesize, bytes);
}

Here is the call graph for this function:


Variable Documentation

int dev_zero_fd = -1 [static]

Definition at line 1421 of file malloc.c.

Definition at line 1188 of file malloc.c.

struct malloc_state [static]

Definition at line 2175 of file malloc.c.