Back to index

glibc  2.9
Classes | Defines | Typedefs | Functions | Variables
malloc.c File Reference
#include <sys/types.h>
#include <malloc-machine.h>
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
#include <stdarg.h>
#include <sys/uio.h>
#include <sys/syslog.h>
#include <dlfcn.h>
#include <sys/param.h>
#include <malloc.h>
#include <fcntl.h>
#include <sys/mman.h>
#include "arena.c"
#include "hooks.c"

Go to the source code of this file.

Classes

struct  malloc_chunk
struct  malloc_state
struct  malloc_par

Defines

#define __STD_C   0
#define Void_t   char
#define assert(x)   ((void)0)
#define INTERNAL_SIZE_T   size_t
#define SIZE_SZ   (sizeof(INTERNAL_SIZE_T))
#define MALLOC_ALIGNMENT   (2 * SIZE_SZ)
#define MALLOC_ALIGN_MASK   (MALLOC_ALIGNMENT - 1)
#define REALLOC_ZERO_BYTES_FREES   1
#define TRIM_FASTBINS   0
#define public_cALLOc   calloc
#define public_fREe   free
#define public_cFREe   cfree
#define public_mALLOc   malloc
#define public_mEMALIGn   memalign
#define public_rEALLOc   realloc
#define public_vALLOc   valloc
#define public_pVALLOc   pvalloc
#define public_mALLINFo   mallinfo
#define public_mALLOPt   mallopt
#define public_mTRIm   malloc_trim
#define public_mSTATs   malloc_stats
#define public_mUSABLe   malloc_usable_size
#define public_iCALLOc   independent_calloc
#define public_iCOMALLOc   independent_comalloc
#define public_gET_STATe   malloc_get_state
#define public_sET_STATe   malloc_set_state
#define __builtin_expect(expr, val)   (expr)
#define fwrite(buf, size, count, fp)   _IO_fwrite (buf, size, count, fp)
#define HAVE_MEMCPY
#define USE_MEMCPY   1
#define MALLOC_FAILURE_ACTION
#define MORECORE   sbrk
#define MORECORE_FAILURE   (-1)
#define MORECORE_CONTIGUOUS   1
#define MORECORE_CLEARS   1
#define HAVE_MMAP   1
#define MMAP_CLEARS   1
#define MMAP_AS_MORECORE_SIZE   (1024 * 1024)
#define HAVE_MREMAP   0
#define USE_ARENAS   HAVE_MMAP
#define malloc_getpagesize   (4096)
#define M_MXFAST   1
#define DEFAULT_MXFAST   64
#define M_TRIM_THRESHOLD   -1
#define DEFAULT_TRIM_THRESHOLD   (128 * 1024)
#define M_TOP_PAD   -2
#define DEFAULT_TOP_PAD   (0)
#define DEFAULT_MMAP_THRESHOLD_MIN   (128 * 1024)
#define DEFAULT_MMAP_THRESHOLD_MAX   (4 * 1024 * 1024 * sizeof(long))
#define M_MMAP_THRESHOLD   -3
#define DEFAULT_MMAP_THRESHOLD   DEFAULT_MMAP_THRESHOLD_MIN
#define M_MMAP_MAX   -4
#define DEFAULT_MMAP_MAX   (65536)
#define BOUNDED_N(ptr, sz)   (ptr)
#define RETURN_ADDRESS(X_)   (NULL)
#define internal_function
#define MALLOC_COPY(dest, src, nbytes)   memcpy(dest, src, nbytes)
#define MALLOC_ZERO(dest, nbytes)   memset(dest, 0, nbytes)
#define MAP_FAILED   ((char*)-1)
#define MAP_NORESERVE   0
#define MMAP(addr, size, prot, flags)
#define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
#define mem2chunk(mem)   ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
#define MIN_CHUNK_SIZE   (offsetof(struct malloc_chunk, fd_nextsize))
#define MINSIZE   (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
#define aligned_OK(m)   (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
#define misaligned_chunk(p)
#define REQUEST_OUT_OF_RANGE(req)
#define request2size(req)
#define checked_request2size(req, sz)
#define PREV_INUSE   0x1
#define prev_inuse(p)   ((p)->size & PREV_INUSE)
#define IS_MMAPPED   0x2
#define chunk_is_mmapped(p)   ((p)->size & IS_MMAPPED)
#define NON_MAIN_ARENA   0x4
#define chunk_non_main_arena(p)   ((p)->size & NON_MAIN_ARENA)
#define SIZE_BITS   (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
#define chunksize(p)   ((p)->size & ~(SIZE_BITS))
#define next_chunk(p)   ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
#define prev_chunk(p)   ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
#define chunk_at_offset(p, s)   ((mchunkptr)(((char*)(p)) + (s)))
#define inuse(p)   ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
#define set_inuse(p)   ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
#define clear_inuse(p)   ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
#define inuse_bit_at_offset(p, s)   (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
#define set_inuse_bit_at_offset(p, s)   (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
#define clear_inuse_bit_at_offset(p, s)   (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
#define set_head_size(p, s)   ((p)->size = (((p)->size & SIZE_BITS) | (s)))
#define set_head(p, s)   ((p)->size = (s))
#define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
#define bin_at(m, i)
#define next_bin(b)   ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
#define first(b)   ((b)->fd)
#define last(b)   ((b)->bk)
#define unlink(P, BK, FD)
#define NBINS   128
#define NSMALLBINS   64
#define SMALLBIN_WIDTH   MALLOC_ALIGNMENT
#define MIN_LARGE_SIZE   (NSMALLBINS * SMALLBIN_WIDTH)
#define in_smallbin_range(sz)   ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
#define smallbin_index(sz)   (SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3))
#define largebin_index_32(sz)
#define largebin_index_64(sz)
#define largebin_index(sz)   (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))
#define bin_index(sz)   ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
#define unsorted_chunks(M)   (bin_at(M, 1))
#define initial_top(M)   (unsorted_chunks(M))
#define BINMAPSHIFT   5
#define BITSPERMAP   (1U << BINMAPSHIFT)
#define BINMAPSIZE   (NBINS / BITSPERMAP)
#define idx2block(i)   ((i) >> BINMAPSHIFT)
#define idx2bit(i)   ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
#define mark_bin(m, i)   ((m)->binmap[idx2block(i)] |= idx2bit(i))
#define unmark_bin(m, i)   ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
#define get_binmap(m, i)   ((m)->binmap[idx2block(i)] & idx2bit(i))
#define fastbin_index(sz)   ((((unsigned int)(sz)) >> 3) - 2)
#define MAX_FAST_SIZE   80
#define NFASTBINS   (fastbin_index(request2size(MAX_FAST_SIZE))+1)
#define FASTBIN_CONSOLIDATION_THRESHOLD   (65536UL)
#define FASTCHUNKS_BIT   (1U)
#define have_fastchunks(M)   (((M)->flags & FASTCHUNKS_BIT) == 0)
#define clear_fastchunks(M)   ((M)->flags |= FASTCHUNKS_BIT)
#define set_fastchunks(M)   ((M)->flags &= ~FASTCHUNKS_BIT)
#define NONCONTIGUOUS_BIT   (2U)
#define contiguous(M)   (((M)->flags & NONCONTIGUOUS_BIT) == 0)
#define noncontiguous(M)   (((M)->flags & NONCONTIGUOUS_BIT) != 0)
#define set_noncontiguous(M)   ((M)->flags |= NONCONTIGUOUS_BIT)
#define set_contiguous(M)   ((M)->flags &= ~NONCONTIGUOUS_BIT)
#define set_max_fast(s)   global_max_fast = ((s) == 0)? SMALLBIN_WIDTH: request2size(s)
#define get_max_fast()   global_max_fast
#define weak_variable
#define DEFAULT_CHECK_ACTION   3
#define alloc_perturb(p, n)   memset (p, (perturb_byte ^ 0xff) & 0xff, n)
#define free_perturb(p, n)   memset (p, perturb_byte & 0xff, n)
#define check_chunk(A, P)
#define check_free_chunk(A, P)
#define check_inuse_chunk(A, P)
#define check_remalloced_chunk(A, P, N)
#define check_malloced_chunk(A, P, N)
#define check_malloc_state(A)
#define HALF_INTERNAL_SIZE_T   (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
#define MAX_ITERS   10000

Typedefs

typedef struct malloc_chunkmchunkptr
typedef struct malloc_chunkmbinptr
typedef struct malloc_chunkmfastbinptr

Functions

Void_tmemset ()
Void_tmemcpy ()
Void_tpublic_mALLOc ()
void public_fREe ()
Void_tpublic_cALLOc ()
Void_tpublic_rEALLOc ()
Void_tpublic_mEMALIGn ()
Void_tpublic_vALLOc ()
int public_mALLOPt ()
struct mallinfo public_mALLINFo ()
Void_t ** public_iCALLOc ()
Void_t ** public_iCOMALLOc ()
Void_tpublic_pVALLOc ()
void public_cFREe ()
int public_mTRIm ()
size_t public_mUSABLe ()
void public_mSTATs ()
Void_tpublic_gET_STATe ()
int public_sET_STATe ()
Void_t_int_malloc ()
void _int_free ()
Void_t_int_realloc ()
Void_t_int_memalign ()
Void_t_int_valloc ()
Void_t_int_pvalloc ()
static Void_t ** _int_icalloc ()
static Void_t ** _int_icomalloc ()
static int mTRIm ()
static size_t mUSABLe ()
static void mSTATs ()
static int mALLOPt ()
static struct mallinfo mALLINFo ()
static void malloc_init_state (mstate av)
static Void_tsYSMALLOc ()
static int sYSTRIm ()
static void malloc_consolidate ()
static Void_t ** iALLOc ()
static Void_t *malloc_hook_ini __MALLOC_P ((size_t sz, const __malloc_ptr_t caller))
static Void_t *realloc_hook_ini __MALLOC_P ((Void_t *ptr, size_t sz, const __malloc_ptr_t caller))
static Void_t *memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz, const __malloc_ptr_t caller))
static Void_tsYSMALLOc (INTERNAL_SIZE_T nb, mstate av)
static int sYSTRIm (size_t pad, mstate av)
static void internal_function munmap_chunk (mchunkptr p)
Void_tpublic_mALLOc (size_t bytes)
void public_fREe (Void_t *mem)
Void_tpublic_rEALLOc (Void_t *oldmem, size_t bytes)
Void_tpublic_mEMALIGn (size_t alignment, size_t bytes)
Void_tpublic_vALLOc (size_t bytes)
Void_tpublic_pVALLOc (size_t bytes)
Void_tpublic_cALLOc (size_t n, size_t elem_size)
Void_t ** public_iCALLOc (size_t n, size_t elem_size, Void_t **chunks)
Void_t ** public_iCOMALLOc (size_t n, size_t sizes[], Void_t **chunks)
void public_cFREe (Void_t *m)
int public_mTRIm (size_t s)
size_t public_mUSABLe (Void_t *m)
int public_mALLOPt (int p, int v)
Void_t_int_malloc (mstate av, size_t bytes)
void _int_free (mstate av, Void_t *mem)
static void malloc_consolidate (mstate av)
Void_t_int_realloc (mstate av, Void_t *oldmem, size_t bytes)
Void_t_int_memalign (mstate av, size_t alignment, size_t bytes)
Void_t ** _int_icalloc (mstate av, size_t n_elements, size_t elem_size, chunks)
Void_t ** _int_icomalloc (mstate av, size_t n_elements, sizes, chunks)
static Void_t ** iALLOc (mstate av, size_t n_elements, size_t *sizes, int opts, chunks)
Void_t_int_valloc (mstate av, size_t bytes)
Void_t_int_pvalloc (mstate av, bytes)
static int mTRIm (mstate av, size_t pad)
size_t mUSABLe (Void_t *mem)
struct mallinfo mALLINFo (mstate av)
int mALLOPt (int param_number, int value)
static void malloc_printerr (int action, const char *str, void *ptr)

Variables

static int dev_zero_fd = -1
static struct malloc_state
static struct malloc_par
static INTERNAL_SIZE_T global_max_fast
void weak_variable(* __malloc_initialize_hook )(void) = NULL
void weak_variable(* __free_hook )(__malloc_ptr_t __ptr, const __malloc_ptr_t) = NULL
__malloc_ptr_t weak_variable(* __malloc_hook )(size_t __size, const __malloc_ptr_t) = malloc_hook_ini
__malloc_ptr_t weak_variable(* __realloc_hook )(__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t) = realloc_hook_ini
__malloc_ptr_t weak_variable(* __memalign_hook )(size_t __alignment, size_t __size, const __malloc_ptr_t) = memalign_hook_ini
void weak_variable(* __after_morecore_hook )(void) = NULL
static int check_action = DEFAULT_CHECK_ACTION
static int perturb_byte
char **__libc_argv attribute_hidden

Class Documentation

struct malloc_chunk

Definition at line 1773 of file malloc.c.

Collaboration diagram for malloc_chunk:
Class Members
struct malloc_chunk * bk
struct malloc_chunk * bk_nextsize
struct malloc_chunk * fd
struct malloc_chunk * fd_nextsize
INTERNAL_SIZE_T prev_size
INTERNAL_SIZE_T size
struct malloc_state

Definition at line 2317 of file malloc.c.

Collaboration diagram for malloc_state:
Class Members
unsigned int binmap
mchunkptr bins
mfastbinptr fastbins
int flags
mchunkptr last_remainder
INTERNAL_SIZE_T max_system_mem
mutex_t mutex
struct malloc_state * next
INTERNAL_SIZE_T system_mem
mchunkptr top
struct malloc_par

Definition at line 2352 of file malloc.c.

Class Members
INTERNAL_SIZE_T max_mmapped_mem
int max_n_mmaps
INTERNAL_SIZE_T max_total_mem
INTERNAL_SIZE_T mmap_threshold
INTERNAL_SIZE_T mmapped_mem
int n_mmaps
int n_mmaps_max
int no_dyn_threshold
unsigned int pagesize
char * sbrk_base
INTERNAL_SIZE_T top_pad
unsigned long trim_threshold

Define Documentation

#define __builtin_expect (   expr,
  val 
)    (expr)

Definition at line 519 of file malloc.c.

#define __STD_C   0

Definition at line 230 of file malloc.c.

#define aligned_OK (   m)    (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)

Definition at line 1893 of file malloc.c.

#define alloc_perturb (   p,
  n 
)    memset (p, (perturb_byte ^ 0xff) & 0xff, n)

Definition at line 2501 of file malloc.c.

#define assert (   x)    ((void)0)

Definition at line 328 of file malloc.c.

#define bin_at (   m,
  i 
)
Value:
(mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))                           \
            - offsetof (struct malloc_chunk, fd))

Definition at line 2064 of file malloc.c.

#define bin_index (   sz)    ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))

Definition at line 2159 of file malloc.c.

#define BINMAPSHIFT   5

Definition at line 2213 of file malloc.c.

#define BINMAPSIZE   (NBINS / BITSPERMAP)

Definition at line 2215 of file malloc.c.

#define BITSPERMAP   (1U << BINMAPSHIFT)

Definition at line 2214 of file malloc.c.

#define BOUNDED_N (   ptr,
  sz 
)    (ptr)

Definition at line 1563 of file malloc.c.

#define check_chunk (   A,
  P 
)

Definition at line 2520 of file malloc.c.

#define check_free_chunk (   A,
  P 
)

Definition at line 2521 of file malloc.c.

#define check_inuse_chunk (   A,
  P 
)

Definition at line 2522 of file malloc.c.

#define check_malloc_state (   A)

Definition at line 2525 of file malloc.c.

#define check_malloced_chunk (   A,
  P,
  N 
)

Definition at line 2524 of file malloc.c.

#define check_remalloced_chunk (   A,
  P,
  N 
)

Definition at line 2523 of file malloc.c.

#define checked_request2size (   req,
  sz 
)
Value:
if (REQUEST_OUT_OF_RANGE(req)) {                                \
    MALLOC_FAILURE_ACTION;                                        \
    return 0;                                                     \
  }                                                               \
  (sz) = request2size(req);

Definition at line 1919 of file malloc.c.

#define chunk2mem (   p)    ((Void_t*)((char*)(p) + 2*SIZE_SZ))

Definition at line 1880 of file malloc.c.

#define chunk_at_offset (   p,
  s 
)    ((mchunkptr)(((char*)(p)) + (s)))

Definition at line 1975 of file malloc.c.

#define chunk_is_mmapped (   p)    ((p)->size & IS_MMAPPED)

Definition at line 1942 of file malloc.c.

#define chunk_non_main_arena (   p)    ((p)->size & NON_MAIN_ARENA)

Definition at line 1951 of file malloc.c.

#define chunksize (   p)    ((p)->size & ~(SIZE_BITS))

Definition at line 1965 of file malloc.c.

#define clear_fastchunks (   M)    ((M)->flags |= FASTCHUNKS_BIT)

Definition at line 2282 of file malloc.c.

#define clear_inuse (   p)    ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)

Definition at line 1985 of file malloc.c.

#define clear_inuse_bit_at_offset (   p,
  s 
)    (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))

Definition at line 1996 of file malloc.c.

#define contiguous (   M)    (((M)->flags & NONCONTIGUOUS_BIT) == 0)

Definition at line 2296 of file malloc.c.

#define DEFAULT_CHECK_ACTION   3

Definition at line 2491 of file malloc.c.

#define DEFAULT_MMAP_MAX   (65536)

Definition at line 1550 of file malloc.c.

Definition at line 1530 of file malloc.c.

#define DEFAULT_MMAP_THRESHOLD_MAX   (4 * 1024 * 1024 * sizeof(long))

Definition at line 1431 of file malloc.c.

#define DEFAULT_MMAP_THRESHOLD_MIN   (128 * 1024)

Definition at line 1420 of file malloc.c.

#define DEFAULT_MXFAST   64

Definition at line 1311 of file malloc.c.

#define DEFAULT_TOP_PAD   (0)

Definition at line 1411 of file malloc.c.

#define DEFAULT_TRIM_THRESHOLD   (128 * 1024)

Definition at line 1378 of file malloc.c.

#define FASTBIN_CONSOLIDATION_THRESHOLD   (65536UL)

Definition at line 2262 of file malloc.c.

#define fastbin_index (   sz)    ((((unsigned int)(sz)) >> 3) - 2)

Definition at line 2244 of file malloc.c.

#define FASTCHUNKS_BIT   (1U)

Definition at line 2279 of file malloc.c.

#define first (   b)    ((b)->fd)

Definition at line 2072 of file malloc.c.

#define free_perturb (   p,
  n 
)    memset (p, perturb_byte & 0xff, n)

Definition at line 2502 of file malloc.c.

#define fwrite (   buf,
  size,
  count,
  fp 
)    _IO_fwrite (buf, size, count, fp)

Definition at line 521 of file malloc.c.

#define get_binmap (   m,
  i 
)    ((m)->binmap[idx2block(i)] & idx2bit(i))

Definition at line 2222 of file malloc.c.

#define get_max_fast ( )    global_max_fast

Definition at line 2310 of file malloc.c.

#define HALF_INTERNAL_SIZE_T   (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
#define have_fastchunks (   M)    (((M)->flags & FASTCHUNKS_BIT) == 0)

Definition at line 2281 of file malloc.c.

#define HAVE_MEMCPY

Definition at line 538 of file malloc.c.

#define HAVE_MMAP   1

Definition at line 673 of file malloc.c.

#define HAVE_MREMAP   0

Definition at line 719 of file malloc.c.

#define idx2bit (   i)    ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))

Definition at line 2218 of file malloc.c.

#define idx2block (   i)    ((i) >> BINMAPSHIFT)

Definition at line 2217 of file malloc.c.

#define in_smallbin_range (   sz)    ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)

Definition at line 2131 of file malloc.c.

#define initial_top (   M)    (unsorted_chunks(M))

Definition at line 2199 of file malloc.c.

Definition at line 1573 of file malloc.c.

#define INTERNAL_SIZE_T   size_t

Definition at line 364 of file malloc.c.

#define inuse (   p)    ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)

Definition at line 1978 of file malloc.c.

#define inuse_bit_at_offset (   p,
  s 
)    (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)

Definition at line 1990 of file malloc.c.

#define IS_MMAPPED   0x2

Definition at line 1939 of file malloc.c.

#define largebin_index (   sz)    (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))

Definition at line 2156 of file malloc.c.

#define largebin_index_32 (   sz)
Value:
(((((unsigned long)(sz)) >>  6) <= 38)?  56 + (((unsigned long)(sz)) >>  6): \
 ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
 ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
 ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
                                        126)

Definition at line 2137 of file malloc.c.

#define largebin_index_64 (   sz)
Value:
(((((unsigned long)(sz)) >>  6) <= 48)?  48 + (((unsigned long)(sz)) >>  6): \
 ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
 ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
 ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
                                        126)

Definition at line 2148 of file malloc.c.

#define last (   b)    ((b)->bk)

Definition at line 2073 of file malloc.c.

#define M_MMAP_MAX   -4

Definition at line 1546 of file malloc.c.

#define M_MMAP_THRESHOLD   -3

Definition at line 1527 of file malloc.c.

#define M_MXFAST   1

Definition at line 1307 of file malloc.c.

#define M_TOP_PAD   -2

Definition at line 1408 of file malloc.c.

#define M_TRIM_THRESHOLD   -1

Definition at line 1375 of file malloc.c.

#define MALLOC_ALIGN_MASK   (MALLOC_ALIGNMENT - 1)

Definition at line 394 of file malloc.c.

#define MALLOC_ALIGNMENT   (2 * SIZE_SZ)

Definition at line 390 of file malloc.c.

#define MALLOC_COPY (   dest,
  src,
  nbytes 
)    memcpy(dest, src, nbytes)

Definition at line 1665 of file malloc.c.

Definition at line 582 of file malloc.c.

#define malloc_getpagesize   (4096)

Definition at line 788 of file malloc.c.

#define MALLOC_ZERO (   dest,
  nbytes 
)    memset(dest, 0, nbytes)

Definition at line 1666 of file malloc.c.

#define MAP_FAILED   ((char*)-1)

Definition at line 1725 of file malloc.c.

#define MAP_NORESERVE   0

Definition at line 1732 of file malloc.c.

#define mark_bin (   m,
  i 
)    ((m)->binmap[idx2block(i)] |= idx2bit(i))

Definition at line 2220 of file malloc.c.

#define MAX_FAST_SIZE   80

Definition at line 2247 of file malloc.c.

#define MAX_ITERS   10000
#define mem2chunk (   mem)    ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))

Definition at line 1881 of file malloc.c.

#define MIN_CHUNK_SIZE   (offsetof(struct malloc_chunk, fd_nextsize))

Definition at line 1884 of file malloc.c.

Definition at line 2129 of file malloc.c.

#define MINSIZE   (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))

Definition at line 1888 of file malloc.c.

#define misaligned_chunk (   p)
Value:

Definition at line 1895 of file malloc.c.

#define MMAP (   addr,
  size,
  prot,
  flags 
)
Value:
((dev_zero_fd < 0) ? \
 (dev_zero_fd = open("/dev/zero", O_RDWR), \
  mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
   mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))

Definition at line 1746 of file malloc.c.

#define MMAP_AS_MORECORE_SIZE   (1024 * 1024)

Definition at line 706 of file malloc.c.

#define MMAP_CLEARS   1

Definition at line 681 of file malloc.c.

#define MORECORE   sbrk

Definition at line 609 of file malloc.c.

#define MORECORE_CLEARS   1

Definition at line 656 of file malloc.c.

#define MORECORE_CONTIGUOUS   1

Definition at line 634 of file malloc.c.

#define MORECORE_FAILURE   (-1)

Definition at line 620 of file malloc.c.

#define NBINS   128

Definition at line 2126 of file malloc.c.

#define next_bin (   b)    ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))

Definition at line 2069 of file malloc.c.

#define next_chunk (   p)    ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))

Definition at line 1969 of file malloc.c.

Definition at line 2249 of file malloc.c.

#define NON_MAIN_ARENA   0x4

Definition at line 1948 of file malloc.c.

#define noncontiguous (   M)    (((M)->flags & NONCONTIGUOUS_BIT) != 0)

Definition at line 2297 of file malloc.c.

#define NONCONTIGUOUS_BIT   (2U)

Definition at line 2294 of file malloc.c.

#define NSMALLBINS   64

Definition at line 2127 of file malloc.c.

#define prev_chunk (   p)    ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))

Definition at line 1972 of file malloc.c.

#define PREV_INUSE   0x1

Definition at line 1932 of file malloc.c.

#define prev_inuse (   p)    ((p)->size & PREV_INUSE)

Definition at line 1935 of file malloc.c.

#define public_cALLOc   calloc

Definition at line 498 of file malloc.c.

#define public_cFREe   cfree

Definition at line 500 of file malloc.c.

#define public_fREe   free

Definition at line 499 of file malloc.c.

#define public_gET_STATe   malloc_get_state

Definition at line 513 of file malloc.c.

#define public_iCALLOc   independent_calloc

Definition at line 511 of file malloc.c.

#define public_iCOMALLOc   independent_comalloc

Definition at line 512 of file malloc.c.

struct mallinfo public_mALLINFo   mallinfo [read]

Definition at line 506 of file malloc.c.

#define public_mALLOc   malloc

Definition at line 501 of file malloc.c.

#define public_mALLOPt   mallopt

Definition at line 507 of file malloc.c.

#define public_mEMALIGn   memalign

Definition at line 502 of file malloc.c.

void public_mSTATs   malloc_stats

Definition at line 509 of file malloc.c.

#define public_mTRIm   malloc_trim

Definition at line 508 of file malloc.c.

#define public_mUSABLe   malloc_usable_size

Definition at line 510 of file malloc.c.

#define public_pVALLOc   pvalloc

Definition at line 505 of file malloc.c.

#define public_rEALLOc   realloc

Definition at line 503 of file malloc.c.

#define public_sET_STATe   malloc_set_state

Definition at line 514 of file malloc.c.

#define public_vALLOc   valloc

Definition at line 504 of file malloc.c.

#define REALLOC_ZERO_BYTES_FREES   1

Definition at line 406 of file malloc.c.

#define request2size (   req)
Value:
(((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?             \
   MINSIZE :                                                      \
   ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)

Definition at line 1912 of file malloc.c.

#define REQUEST_OUT_OF_RANGE (   req)
Value:
((unsigned long)(req) >=                                        \
   (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))

Definition at line 1906 of file malloc.c.

#define RETURN_ADDRESS (   X_)    (NULL)

Definition at line 1566 of file malloc.c.

#define set_contiguous (   M)    ((M)->flags &= ~NONCONTIGUOUS_BIT)

Definition at line 2299 of file malloc.c.

#define set_fastchunks (   M)    ((M)->flags &= ~FASTCHUNKS_BIT)

Definition at line 2283 of file malloc.c.

#define set_foot (   p,
  s 
)    (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))

Definition at line 2007 of file malloc.c.

#define set_head (   p,
  s 
)    ((p)->size = (s))

Definition at line 2004 of file malloc.c.

#define set_head_size (   p,
  s 
)    ((p)->size = (((p)->size & SIZE_BITS) | (s)))

Definition at line 2001 of file malloc.c.

#define set_inuse (   p)    ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE

Definition at line 1982 of file malloc.c.

#define set_inuse_bit_at_offset (   p,
  s 
)    (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)

Definition at line 1993 of file malloc.c.

#define set_max_fast (   s)    global_max_fast = ((s) == 0)? SMALLBIN_WIDTH: request2size(s)

Definition at line 2308 of file malloc.c.

#define set_noncontiguous (   M)    ((M)->flags |= NONCONTIGUOUS_BIT)

Definition at line 2298 of file malloc.c.

Definition at line 1962 of file malloc.c.

#define SIZE_SZ   (sizeof(INTERNAL_SIZE_T))

Definition at line 368 of file malloc.c.

#define smallbin_index (   sz)    (SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3))

Definition at line 2134 of file malloc.c.

Definition at line 2128 of file malloc.c.

#define TRIM_FASTBINS   0

Definition at line 426 of file malloc.c.

#define unlink (   P,
  BK,
  FD 
)
Value:
{                                            \
  FD = P->fd;                                                          \
  BK = P->bk;                                                          \
  if (__builtin_expect (FD->bk != P || BK->fd != P, 0))                \
    malloc_printerr (check_action, "corrupted double-linked list", P); \
  else {                                                               \
    FD->bk = BK;                                                       \
    BK->fd = FD;                                                       \
    if (!in_smallbin_range (P->size)                                  \
       && __builtin_expect (P->fd_nextsize != NULL, 0)) {             \
      assert (P->fd_nextsize->bk_nextsize == P);               \
      assert (P->bk_nextsize->fd_nextsize == P);               \
      if (FD->fd_nextsize == NULL) {                                  \
       if (P->fd_nextsize == P)                                \
         FD->fd_nextsize = FD->bk_nextsize = FD;               \
       else {                                                  \
         FD->fd_nextsize = P->fd_nextsize;                            \
         FD->bk_nextsize = P->bk_nextsize;                            \
         P->fd_nextsize->bk_nextsize = FD;                            \
         P->bk_nextsize->fd_nextsize = FD;                            \
       }                                                       \
      }       else {                                                  \
       P->fd_nextsize->bk_nextsize = P->bk_nextsize;                  \
       P->bk_nextsize->fd_nextsize = P->fd_nextsize;                  \
      }                                                               \
    }                                                          \
  }                                                                    \
}

Definition at line 2076 of file malloc.c.

#define unmark_bin (   m,
  i 
)    ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))

Definition at line 2221 of file malloc.c.

#define unsorted_chunks (   M)    (bin_at(M, 1))

Definition at line 2178 of file malloc.c.

#define USE_ARENAS   HAVE_MMAP

Definition at line 730 of file malloc.c.

#define USE_MEMCPY   1

Definition at line 542 of file malloc.c.

#define Void_t   char

Definition at line 243 of file malloc.c.

#define weak_variable

Definition at line 2458 of file malloc.c.


Typedef Documentation

typedef struct malloc_chunk* mbinptr

Definition at line 2061 of file malloc.c.

typedef struct malloc_chunk* mchunkptr

Definition at line 1578 of file malloc.c.

typedef struct malloc_chunk* mfastbinptr

Definition at line 2241 of file malloc.c.


Function Documentation

static Void_t* malloc_hook_ini __MALLOC_P ( (size_t sz, const __malloc_ptr_t caller)  ) [static]
static Void_t* realloc_hook_ini __MALLOC_P ( (Void_t *ptr, size_t sz, const __malloc_ptr_t caller)  ) [static]
static Void_t* memalign_hook_ini __MALLOC_P ( (size_t alignment, size_t sz, const __malloc_ptr_t caller)  ) [static]
void _int_free ( )
void _int_free ( mstate  av,
Void_t mem 
)

Definition at line 4593 of file malloc.c.

{
  mchunkptr       p;           /* chunk corresponding to mem */
  INTERNAL_SIZE_T size;        /* its size */
  mfastbinptr*    fb;          /* associated fastbin */
  mchunkptr       nextchunk;   /* next contiguous chunk */
  INTERNAL_SIZE_T nextsize;    /* its size */
  int             nextinuse;   /* true if nextchunk is used */
  INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
  mchunkptr       bck;         /* misc temp for linking */
  mchunkptr       fwd;         /* misc temp for linking */

  const char *errstr = NULL;

  p = mem2chunk(mem);
  size = chunksize(p);

  /* Little security check which won't hurt performance: the
     allocator never wrapps around at the end of the address space.
     Therefore we can exclude some size values which might appear
     here by accident or by "design" from some intruder.  */
  if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
      || __builtin_expect (misaligned_chunk (p), 0))
    {
      errstr = "free(): invalid pointer";
    errout:
      malloc_printerr (check_action, errstr, mem);
      return;
    }
  /* We know that each chunk is at least MINSIZE bytes in size.  */
  if (__builtin_expect (size < MINSIZE, 0))
    {
      errstr = "free(): invalid size";
      goto errout;
    }

  check_inuse_chunk(av, p);

  /*
    If eligible, place chunk on a fastbin so it can be found
    and used quickly in malloc.
  */

  if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())

#if TRIM_FASTBINS
      /*
       If TRIM_FASTBINS set, don't place chunks
       bordering top into fastbins
      */
      && (chunk_at_offset(p, size) != av->top)
#endif
      ) {

    if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
       || __builtin_expect (chunksize (chunk_at_offset (p, size))
                          >= av->system_mem, 0))
      {
       errstr = "free(): invalid next size (fast)";
       goto errout;
      }

    set_fastchunks(av);
    fb = &(av->fastbins[fastbin_index(size)]);
    /* Another simple check: make sure the top of the bin is not the
       record we are going to add (i.e., double free).  */
    if (__builtin_expect (*fb == p, 0))
      {
       errstr = "double free or corruption (fasttop)";
       goto errout;
      }

    if (__builtin_expect (perturb_byte, 0))
      free_perturb (mem, size - SIZE_SZ);

    p->fd = *fb;
    *fb = p;
  }

  /*
    Consolidate other non-mmapped chunks as they arrive.
  */

  else if (!chunk_is_mmapped(p)) {
    nextchunk = chunk_at_offset(p, size);

    /* Lightweight tests: check whether the block is already the
       top block.  */
    if (__builtin_expect (p == av->top, 0))
      {
       errstr = "double free or corruption (top)";
       goto errout;
      }
    /* Or whether the next chunk is beyond the boundaries of the arena.  */
    if (__builtin_expect (contiguous (av)
                       && (char *) nextchunk
                       >= ((char *) av->top + chunksize(av->top)), 0))
      {
       errstr = "double free or corruption (out)";
       goto errout;
      }
    /* Or whether the block is actually not marked used.  */
    if (__builtin_expect (!prev_inuse(nextchunk), 0))
      {
       errstr = "double free or corruption (!prev)";
       goto errout;
      }

    nextsize = chunksize(nextchunk);
    if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
       || __builtin_expect (nextsize >= av->system_mem, 0))
      {
       errstr = "free(): invalid next size (normal)";
       goto errout;
      }

    if (__builtin_expect (perturb_byte, 0))
      free_perturb (mem, size - SIZE_SZ);

    /* consolidate backward */
    if (!prev_inuse(p)) {
      prevsize = p->prev_size;
      size += prevsize;
      p = chunk_at_offset(p, -((long) prevsize));
      unlink(p, bck, fwd);
    }

    if (nextchunk != av->top) {
      /* get and clear inuse bit */
      nextinuse = inuse_bit_at_offset(nextchunk, nextsize);

      /* consolidate forward */
      if (!nextinuse) {
       unlink(nextchunk, bck, fwd);
       size += nextsize;
      } else
       clear_inuse_bit_at_offset(nextchunk, 0);

      /*
       Place the chunk in unsorted chunk list. Chunks are
       not placed into regular bins until after they have
       been given one chance to be used in malloc.
      */

      bck = unsorted_chunks(av);
      fwd = bck->fd;
      p->fd = fwd;
      p->bk = bck;
      if (!in_smallbin_range(size))
       {
         p->fd_nextsize = NULL;
         p->bk_nextsize = NULL;
       }
      bck->fd = p;
      fwd->bk = p;

      set_head(p, size | PREV_INUSE);
      set_foot(p, size);

      check_free_chunk(av, p);
    }

    /*
      If the chunk borders the current high end of memory,
      consolidate into top
    */

    else {
      size += nextsize;
      set_head(p, size | PREV_INUSE);
      av->top = p;
      check_chunk(av, p);
    }

    /*
      If freeing a large space, consolidate possibly-surrounding
      chunks. Then, if the total unused topmost memory exceeds trim
      threshold, ask malloc_trim to reduce top.

      Unless max_fast is 0, we don't know if there are fastbins
      bordering top, so we cannot tell for sure whether threshold
      has been reached unless fastbins are consolidated.  But we
      don't want to consolidate on each free.  As a compromise,
      consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
      is reached.
    */

    if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
      if (have_fastchunks(av))
       malloc_consolidate(av);

      if (av == &main_arena) {
#ifndef MORECORE_CANNOT_TRIM
       if ((unsigned long)(chunksize(av->top)) >=
           (unsigned long)(mp_.trim_threshold))
         sYSTRIm(mp_.top_pad, av);
#endif
      } else {
       /* Always try heap_trim(), even if the top chunk is not
          large, because the corresponding heap might go away.  */
       heap_info *heap = heap_for_ptr(top(av));

       assert(heap->ar_ptr == av);
       heap_trim(heap, mp_.top_pad);
      }
    }

  }
  /*
    If the chunk was allocated via mmap, release via munmap(). Note
    that if HAVE_MMAP is false but chunk_is_mmapped is true, then
    user must have overwritten memory. There's nothing we can do to
    catch this error unless MALLOC_DEBUG is set, in which case
    check_inuse_chunk (above) will have triggered error.
  */

  else {
#if HAVE_MMAP
    munmap_chunk (p);
#endif
  }
}

Here is the call graph for this function:

static Void_t** _int_icalloc ( ) [static]

Here is the caller graph for this function:

Void_t** _int_icalloc ( mstate  av,
size_t  n_elements,
size_t  elem_size,
chunks   
)

Definition at line 5355 of file malloc.c.

{
  size_t sz = elem_size; /* serves as 1-element array */
  /* opts arg of 3 means all elements are same size, and should be cleared */
  return iALLOc(av, n_elements, &sz, 3, chunks);
}

Here is the call graph for this function:

static Void_t** _int_icomalloc ( ) [static]

Here is the caller graph for this function:

Void_t** _int_icomalloc ( mstate  av,
size_t  n_elements,
sizes  ,
chunks   
)

Definition at line 5372 of file malloc.c.

{
  return iALLOc(av, n_elements, sizes, 0, chunks);
}

Here is the call graph for this function:

Void_t* _int_malloc ( mstate  av,
size_t  bytes 
)

Definition at line 4129 of file malloc.c.

{
  INTERNAL_SIZE_T nb;               /* normalized request size */
  unsigned int    idx;              /* associated bin index */
  mbinptr         bin;              /* associated bin */
  mfastbinptr*    fb;               /* associated fastbin */

  mchunkptr       victim;           /* inspected/selected chunk */
  INTERNAL_SIZE_T size;             /* its size */
  int             victim_index;     /* its bin index */

  mchunkptr       remainder;        /* remainder from a split */
  unsigned long   remainder_size;   /* its size */

  unsigned int    block;            /* bit map traverser */
  unsigned int    bit;              /* bit map traverser */
  unsigned int    map;              /* current word of binmap */

  mchunkptr       fwd;              /* misc temp for linking */
  mchunkptr       bck;              /* misc temp for linking */

  /*
    Convert request size to internal form by adding SIZE_SZ bytes
    overhead plus possibly more to obtain necessary alignment and/or
    to obtain a size of at least MINSIZE, the smallest allocatable
    size. Also, checked_request2size traps (returning 0) request sizes
    that are so large that they wrap around zero when padded and
    aligned.
  */

  checked_request2size(bytes, nb);

  /*
    If the size qualifies as a fastbin, first check corresponding bin.
    This code is safe to execute even if av is not yet initialized, so we
    can try it without checking, which saves some time on this fast path.
  */

  if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) {
    long int idx = fastbin_index(nb);
    fb = &(av->fastbins[idx]);
    if ( (victim = *fb) != 0) {
      if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
       malloc_printerr (check_action, "malloc(): memory corruption (fast)",
                      chunk2mem (victim));
      *fb = victim->fd;
      check_remalloced_chunk(av, victim, nb);
      void *p = chunk2mem(victim);
      if (__builtin_expect (perturb_byte, 0))
       alloc_perturb (p, bytes);
      return p;
    }
  }

  /*
    If a small request, check regular bin.  Since these "smallbins"
    hold one size each, no searching within bins is necessary.
    (For a large request, we need to wait until unsorted chunks are
    processed to find best fit. But for small ones, fits are exact
    anyway, so we can check now, which is faster.)
  */

  if (in_smallbin_range(nb)) {
    idx = smallbin_index(nb);
    bin = bin_at(av,idx);

    if ( (victim = last(bin)) != bin) {
      if (victim == 0) /* initialization check */
        malloc_consolidate(av);
      else {
        bck = victim->bk;
        set_inuse_bit_at_offset(victim, nb);
        bin->bk = bck;
        bck->fd = bin;

        if (av != &main_arena)
         victim->size |= NON_MAIN_ARENA;
        check_malloced_chunk(av, victim, nb);
       void *p = chunk2mem(victim);
       if (__builtin_expect (perturb_byte, 0))
         alloc_perturb (p, bytes);
       return p;
      }
    }
  }

  /*
     If this is a large request, consolidate fastbins before continuing.
     While it might look excessive to kill all fastbins before
     even seeing if there is space available, this avoids
     fragmentation problems normally associated with fastbins.
     Also, in practice, programs tend to have runs of either small or
     large requests, but less often mixtures, so consolidation is not
     invoked all that often in most programs. And the programs that
     it is called frequently in otherwise tend to fragment.
  */

  else {
    idx = largebin_index(nb);
    if (have_fastchunks(av))
      malloc_consolidate(av);
  }

  /*
    Process recently freed or remaindered chunks, taking one only if
    it is exact fit, or, if this a small request, the chunk is remainder from
    the most recent non-exact fit.  Place other traversed chunks in
    bins.  Note that this step is the only place in any routine where
    chunks are placed in bins.

    The outer loop here is needed because we might not realize until
    near the end of malloc that we should have consolidated, so must
    do so and retry. This happens at most once, and only when we would
    otherwise need to expand memory to service a "small" request.
  */

  for(;;) {

    int iters = 0;
    while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
      bck = victim->bk;
      if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
         || __builtin_expect (victim->size > av->system_mem, 0))
       malloc_printerr (check_action, "malloc(): memory corruption",
                      chunk2mem (victim));
      size = chunksize(victim);

      /*
         If a small request, try to use last remainder if it is the
         only chunk in unsorted bin.  This helps promote locality for
         runs of consecutive small requests. This is the only
         exception to best-fit, and applies only when there is
         no exact fit for a small chunk.
      */

      if (in_smallbin_range(nb) &&
          bck == unsorted_chunks(av) &&
          victim == av->last_remainder &&
          (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {

        /* split and reattach remainder */
        remainder_size = size - nb;
        remainder = chunk_at_offset(victim, nb);
        unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
        av->last_remainder = remainder;
        remainder->bk = remainder->fd = unsorted_chunks(av);
       if (!in_smallbin_range(remainder_size))
         {
           remainder->fd_nextsize = NULL;
           remainder->bk_nextsize = NULL;
         }

        set_head(victim, nb | PREV_INUSE |
               (av != &main_arena ? NON_MAIN_ARENA : 0));
        set_head(remainder, remainder_size | PREV_INUSE);
        set_foot(remainder, remainder_size);

        check_malloced_chunk(av, victim, nb);
       void *p = chunk2mem(victim);
       if (__builtin_expect (perturb_byte, 0))
         alloc_perturb (p, bytes);
       return p;
      }

      /* remove from unsorted list */
      unsorted_chunks(av)->bk = bck;
      bck->fd = unsorted_chunks(av);

      /* Take now instead of binning if exact fit */

      if (size == nb) {
        set_inuse_bit_at_offset(victim, size);
       if (av != &main_arena)
         victim->size |= NON_MAIN_ARENA;
        check_malloced_chunk(av, victim, nb);
       void *p = chunk2mem(victim);
       if (__builtin_expect (perturb_byte, 0))
         alloc_perturb (p, bytes);
       return p;
      }

      /* place chunk in bin */

      if (in_smallbin_range(size)) {
        victim_index = smallbin_index(size);
        bck = bin_at(av, victim_index);
        fwd = bck->fd;
      }
      else {
        victim_index = largebin_index(size);
        bck = bin_at(av, victim_index);
        fwd = bck->fd;

        /* maintain large bins in sorted order */
        if (fwd != bck) {
         /* Or with inuse bit to speed comparisons */
          size |= PREV_INUSE;
          /* if smaller than smallest, bypass loop below */
         assert((bck->bk->size & NON_MAIN_ARENA) == 0);
         if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
            fwd = bck;
            bck = bck->bk;

           victim->fd_nextsize = fwd->fd;
           victim->bk_nextsize = fwd->fd->bk_nextsize;
           fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
          }
          else {
           assert((fwd->size & NON_MAIN_ARENA) == 0);
           while ((unsigned long) size < fwd->size)
             {
              fwd = fwd->fd_nextsize;
              assert((fwd->size & NON_MAIN_ARENA) == 0);
             }

           if ((unsigned long) size == (unsigned long) fwd->size)
             /* Always insert in the second position.  */
             fwd = fwd->fd;
           else
             {
              victim->fd_nextsize = fwd;
              victim->bk_nextsize = fwd->bk_nextsize;
              fwd->bk_nextsize = victim;
              victim->bk_nextsize->fd_nextsize = victim;
             }
           bck = fwd->bk;
          }
       } else
         victim->fd_nextsize = victim->bk_nextsize = victim;
      }

      mark_bin(av, victim_index);
      victim->bk = bck;
      victim->fd = fwd;
      fwd->bk = victim;
      bck->fd = victim;

#define MAX_ITERS    10000
      if (++iters >= MAX_ITERS)
       break;
    }

    /*
      If a large request, scan through the chunks of current bin in
      sorted order to find smallest that fits.  Use the skip list for this.
    */

    if (!in_smallbin_range(nb)) {
      bin = bin_at(av, idx);

      /* skip scan if empty or largest chunk is too small */
      if ((victim = first(bin)) != bin &&
          (unsigned long)(victim->size) >= (unsigned long)(nb)) {

       victim = victim->bk_nextsize;
        while (((unsigned long)(size = chunksize(victim)) <
                (unsigned long)(nb)))
          victim = victim->bk_nextsize;

       /* Avoid removing the first entry for a size so that the skip
          list does not have to be rerouted.  */
       if (victim != last(bin) && victim->size == victim->fd->size)
         victim = victim->fd;

        remainder_size = size - nb;
        unlink(victim, bck, fwd);

        /* Exhaust */
        if (remainder_size < MINSIZE)  {
          set_inuse_bit_at_offset(victim, size);
         if (av != &main_arena)
           victim->size |= NON_MAIN_ARENA;
        }
        /* Split */
        else {
          remainder = chunk_at_offset(victim, nb);
          /* We cannot assume the unsorted list is empty and therefore
             have to perform a complete insert here.  */
         bck = unsorted_chunks(av);
         fwd = bck->fd;
         remainder->bk = bck;
         remainder->fd = fwd;
         bck->fd = remainder;
         fwd->bk = remainder;
         if (!in_smallbin_range(remainder_size))
           {
             remainder->fd_nextsize = NULL;
             remainder->bk_nextsize = NULL;
           }
          set_head(victim, nb | PREV_INUSE |
                 (av != &main_arena ? NON_MAIN_ARENA : 0));
          set_head(remainder, remainder_size | PREV_INUSE);
          set_foot(remainder, remainder_size);
        }
       check_malloced_chunk(av, victim, nb);
       void *p = chunk2mem(victim);
       if (__builtin_expect (perturb_byte, 0))
         alloc_perturb (p, bytes);
       return p;
      }
    }

    /*
      Search for a chunk by scanning bins, starting with next largest
      bin. This search is strictly by best-fit; i.e., the smallest
      (with ties going to approximately the least recently used) chunk
      that fits is selected.

      The bitmap avoids needing to check that most blocks are nonempty.
      The particular case of skipping all bins during warm-up phases
      when no chunks have been returned yet is faster than it might look.
    */

    ++idx;
    bin = bin_at(av,idx);
    block = idx2block(idx);
    map = av->binmap[block];
    bit = idx2bit(idx);

    for (;;) {

      /* Skip rest of block if there are no more set bits in this block.  */
      if (bit > map || bit == 0) {
        do {
          if (++block >= BINMAPSIZE)  /* out of bins */
            goto use_top;
        } while ( (map = av->binmap[block]) == 0);

        bin = bin_at(av, (block << BINMAPSHIFT));
        bit = 1;
      }

      /* Advance to bin with set bit. There must be one. */
      while ((bit & map) == 0) {
        bin = next_bin(bin);
        bit <<= 1;
        assert(bit != 0);
      }

      /* Inspect the bin. It is likely to be non-empty */
      victim = last(bin);

      /*  If a false alarm (empty bin), clear the bit. */
      if (victim == bin) {
        av->binmap[block] = map &= ~bit; /* Write through */
        bin = next_bin(bin);
        bit <<= 1;
      }

      else {
        size = chunksize(victim);

        /*  We know the first chunk in this bin is big enough to use. */
        assert((unsigned long)(size) >= (unsigned long)(nb));

        remainder_size = size - nb;

        /* unlink */
        unlink(victim, bck, fwd);

        /* Exhaust */
        if (remainder_size < MINSIZE) {
          set_inuse_bit_at_offset(victim, size);
         if (av != &main_arena)
           victim->size |= NON_MAIN_ARENA;
        }

        /* Split */
        else {
          remainder = chunk_at_offset(victim, nb);

         /* We cannot assume the unsorted list is empty and therefore
            have to perform a complete insert here.  */
         bck = unsorted_chunks(av);
         fwd = bck->fd;
         remainder->bk = bck;
         remainder->fd = fwd;
         bck->fd = remainder;
         fwd->bk = remainder;

          /* advertise as last remainder */
          if (in_smallbin_range(nb))
            av->last_remainder = remainder;
         if (!in_smallbin_range(remainder_size))
           {
             remainder->fd_nextsize = NULL;
             remainder->bk_nextsize = NULL;
           }
          set_head(victim, nb | PREV_INUSE |
                 (av != &main_arena ? NON_MAIN_ARENA : 0));
          set_head(remainder, remainder_size | PREV_INUSE);
          set_foot(remainder, remainder_size);
        }
       check_malloced_chunk(av, victim, nb);
       void *p = chunk2mem(victim);
       if (__builtin_expect (perturb_byte, 0))
         alloc_perturb (p, bytes);
       return p;
      }
    }

  use_top:
    /*
      If large enough, split off the chunk bordering the end of memory
      (held in av->top). Note that this is in accord with the best-fit
      search rule.  In effect, av->top is treated as larger (and thus
      less well fitting) than any other available chunk since it can
      be extended to be as large as necessary (up to system
      limitations).

      We require that av->top always exists (i.e., has size >=
      MINSIZE) after initialization, so if it would otherwise be
      exhausted by current request, it is replenished. (The main
      reason for ensuring it exists is that we may need MINSIZE space
      to put in fenceposts in sysmalloc.)
    */

    victim = av->top;
    size = chunksize(victim);

    if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
      remainder_size = size - nb;
      remainder = chunk_at_offset(victim, nb);
      av->top = remainder;
      set_head(victim, nb | PREV_INUSE |
              (av != &main_arena ? NON_MAIN_ARENA : 0));
      set_head(remainder, remainder_size | PREV_INUSE);

      check_malloced_chunk(av, victim, nb);
      void *p = chunk2mem(victim);
      if (__builtin_expect (perturb_byte, 0))
       alloc_perturb (p, bytes);
      return p;
    }

    /*
      If there is space available in fastbins, consolidate and retry,
      to possibly avoid expanding memory. This can occur only if nb is
      in smallbin range so we didn't consolidate upon entry.
    */

    else if (have_fastchunks(av)) {
      assert(in_smallbin_range(nb));
      malloc_consolidate(av);
      idx = smallbin_index(nb); /* restore original bin index */
    }

    /*
       Otherwise, relay to handle system-dependent cases
    */
    else {
      void *p = sYSMALLOc(nb, av);
      if (p != NULL && __builtin_expect (perturb_byte, 0))
       alloc_perturb (p, bytes);
      return p;
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

Void_t* _int_memalign ( mstate  av,
size_t  alignment,
size_t  bytes 
)

Definition at line 5185 of file malloc.c.

{
  INTERNAL_SIZE_T nb;             /* padded  request size */
  char*           m;              /* memory returned by malloc call */
  mchunkptr       p;              /* corresponding chunk */
  char*           brk;            /* alignment point within p */
  mchunkptr       newp;           /* chunk to return */
  INTERNAL_SIZE_T newsize;        /* its size */
  INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
  mchunkptr       remainder;      /* spare room at end to split off */
  unsigned long   remainder_size; /* its size */
  INTERNAL_SIZE_T size;

  /* If need less alignment than we give anyway, just relay to malloc */

  if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);

  /* Otherwise, ensure that it is at least a minimum chunk size */

  if (alignment <  MINSIZE) alignment = MINSIZE;

  /* Make sure alignment is power of 2 (in case MINSIZE is not).  */
  if ((alignment & (alignment - 1)) != 0) {
    size_t a = MALLOC_ALIGNMENT * 2;
    while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
    alignment = a;
  }

  checked_request2size(bytes, nb);

  /*
    Strategy: find a spot within that chunk that meets the alignment
    request, and then possibly free the leading and trailing space.
  */


  /* Call malloc with worst case padding to hit alignment. */

  m  = (char*)(_int_malloc(av, nb + alignment + MINSIZE));

  if (m == 0) return 0; /* propagate failure */

  p = mem2chunk(m);

  if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */

    /*
      Find an aligned spot inside chunk.  Since we need to give back
      leading space in a chunk of at least MINSIZE, if the first
      calculation places us at a spot with less than MINSIZE leader,
      we can move to the next aligned spot -- we've allocated enough
      total room so that this is always possible.
    */

    brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
                           -((signed long) alignment));
    if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
      brk += alignment;

    newp = (mchunkptr)brk;
    leadsize = brk - (char*)(p);
    newsize = chunksize(p) - leadsize;

    /* For mmapped chunks, just adjust offset */
    if (chunk_is_mmapped(p)) {
      newp->prev_size = p->prev_size + leadsize;
      set_head(newp, newsize|IS_MMAPPED);
      return chunk2mem(newp);
    }

    /* Otherwise, give back leader, use the rest */
    set_head(newp, newsize | PREV_INUSE |
            (av != &main_arena ? NON_MAIN_ARENA : 0));
    set_inuse_bit_at_offset(newp, newsize);
    set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
    _int_free(av, chunk2mem(p));
    p = newp;

    assert (newsize >= nb &&
            (((unsigned long)(chunk2mem(p))) % alignment) == 0);
  }

  /* Also give back spare room at the end */
  if (!chunk_is_mmapped(p)) {
    size = chunksize(p);
    if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
      remainder_size = size - nb;
      remainder = chunk_at_offset(p, nb);
      set_head(remainder, remainder_size | PREV_INUSE |
              (av != &main_arena ? NON_MAIN_ARENA : 0));
      set_head_size(p, nb);
      _int_free(av, chunk2mem(remainder));
    }
  }

  check_inuse_chunk(av, p);
  return chunk2mem(p);
}

Here is the call graph for this function:

Here is the caller graph for this function:

Here is the caller graph for this function:

Void_t* _int_pvalloc ( mstate  av,
bytes   
)

Definition at line 5538 of file malloc.c.

{
  size_t pagesz;

  /* Ensure initialization/consolidation */
  if (have_fastchunks(av)) malloc_consolidate(av);
  pagesz = mp_.pagesize;
  return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
}

Here is the call graph for this function:

Void_t* _int_realloc ( mstate  av,
Void_t oldmem,
size_t  bytes 
)

Definition at line 4946 of file malloc.c.

{
  INTERNAL_SIZE_T  nb;              /* padded request size */

  mchunkptr        oldp;            /* chunk corresponding to oldmem */
  INTERNAL_SIZE_T  oldsize;         /* its size */

  mchunkptr        newp;            /* chunk to return */
  INTERNAL_SIZE_T  newsize;         /* its size */
  Void_t*          newmem;          /* corresponding user mem */

  mchunkptr        next;            /* next contiguous chunk after oldp */

  mchunkptr        remainder;       /* extra space at end of newp */
  unsigned long    remainder_size;  /* its size */

  mchunkptr        bck;             /* misc temp for linking */
  mchunkptr        fwd;             /* misc temp for linking */

  unsigned long    copysize;        /* bytes to copy */
  unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
  INTERNAL_SIZE_T* s;               /* copy source */
  INTERNAL_SIZE_T* d;               /* copy destination */

  const char *errstr = NULL;


  checked_request2size(bytes, nb);

  oldp    = mem2chunk(oldmem);
  oldsize = chunksize(oldp);

  /* Simple tests for old block integrity.  */
  if (__builtin_expect (misaligned_chunk (oldp), 0))
    {
      errstr = "realloc(): invalid pointer";
    errout:
      malloc_printerr (check_action, errstr, oldmem);
      return NULL;
    }
  if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
      || __builtin_expect (oldsize >= av->system_mem, 0))
    {
      errstr = "realloc(): invalid old size";
      goto errout;
    }

  check_inuse_chunk(av, oldp);

  if (!chunk_is_mmapped(oldp)) {

    next = chunk_at_offset(oldp, oldsize);
    INTERNAL_SIZE_T nextsize = chunksize(next);
    if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
       || __builtin_expect (nextsize >= av->system_mem, 0))
      {
       errstr = "realloc(): invalid next size";
       goto errout;
      }

    if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
      /* already big enough; split below */
      newp = oldp;
      newsize = oldsize;
    }

    else {
      /* Try to expand forward into top */
      if (next == av->top &&
          (unsigned long)(newsize = oldsize + nextsize) >=
          (unsigned long)(nb + MINSIZE)) {
        set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
        av->top = chunk_at_offset(oldp, nb);
        set_head(av->top, (newsize - nb) | PREV_INUSE);
       check_inuse_chunk(av, oldp);
        return chunk2mem(oldp);
      }

      /* Try to expand forward into next chunk;  split off remainder below */
      else if (next != av->top &&
               !inuse(next) &&
               (unsigned long)(newsize = oldsize + nextsize) >=
               (unsigned long)(nb)) {
        newp = oldp;
        unlink(next, bck, fwd);
      }

      /* allocate, copy, free */
      else {
        newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
        if (newmem == 0)
          return 0; /* propagate failure */

        newp = mem2chunk(newmem);
        newsize = chunksize(newp);

        /*
          Avoid copy if newp is next chunk after oldp.
        */
        if (newp == next) {
          newsize += oldsize;
          newp = oldp;
        }
        else {
          /*
            Unroll copy of <= 36 bytes (72 if 8byte sizes)
            We know that contents have an odd number of
            INTERNAL_SIZE_T-sized words; minimally 3.
          */

          copysize = oldsize - SIZE_SZ;
          s = (INTERNAL_SIZE_T*)(oldmem);
          d = (INTERNAL_SIZE_T*)(newmem);
          ncopies = copysize / sizeof(INTERNAL_SIZE_T);
          assert(ncopies >= 3);

          if (ncopies > 9)
            MALLOC_COPY(d, s, copysize);

          else {
            *(d+0) = *(s+0);
            *(d+1) = *(s+1);
            *(d+2) = *(s+2);
            if (ncopies > 4) {
              *(d+3) = *(s+3);
              *(d+4) = *(s+4);
              if (ncopies > 6) {
                *(d+5) = *(s+5);
                *(d+6) = *(s+6);
                if (ncopies > 8) {
                  *(d+7) = *(s+7);
                  *(d+8) = *(s+8);
                }
              }
            }
          }

          _int_free(av, oldmem);
          check_inuse_chunk(av, newp);
          return chunk2mem(newp);
        }
      }
    }

    /* If possible, free extra space in old or extended chunk */

    assert((unsigned long)(newsize) >= (unsigned long)(nb));

    remainder_size = newsize - nb;

    if (remainder_size < MINSIZE) { /* not enough extra to split off */
      set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
      set_inuse_bit_at_offset(newp, newsize);
    }
    else { /* split remainder */
      remainder = chunk_at_offset(newp, nb);
      set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
      set_head(remainder, remainder_size | PREV_INUSE |
              (av != &main_arena ? NON_MAIN_ARENA : 0));
      /* Mark remainder as inuse so free() won't complain */
      set_inuse_bit_at_offset(remainder, remainder_size);
      _int_free(av, chunk2mem(remainder));
    }

    check_inuse_chunk(av, newp);
    return chunk2mem(newp);
  }

  /*
    Handle mmap cases
  */

  else {
#if HAVE_MMAP

#if HAVE_MREMAP
    INTERNAL_SIZE_T offset = oldp->prev_size;
    size_t pagemask = mp_.pagesize - 1;
    char *cp;
    unsigned long sum;

    /* Note the extra SIZE_SZ overhead */
    newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;

    /* don't need to remap if still within same page */
    if (oldsize == newsize - offset)
      return oldmem;

    cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);

    if (cp != MAP_FAILED) {

      newp = (mchunkptr)(cp + offset);
      set_head(newp, (newsize - offset)|IS_MMAPPED);

      assert(aligned_OK(chunk2mem(newp)));
      assert((newp->prev_size == offset));

      /* update statistics */
      sum = mp_.mmapped_mem += newsize - oldsize;
      if (sum > (unsigned long)(mp_.max_mmapped_mem))
        mp_.max_mmapped_mem = sum;
#ifdef NO_THREADS
      sum += main_arena.system_mem;
      if (sum > (unsigned long)(mp_.max_total_mem))
        mp_.max_total_mem = sum;
#endif

      return chunk2mem(newp);
    }
#endif

    /* Note the extra SIZE_SZ overhead. */
    if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
      newmem = oldmem; /* do nothing */
    else {
      /* Must alloc, copy, free. */
      newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
      if (newmem != 0) {
        MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
        _int_free(av, oldmem);
      }
    }
    return newmem;

#else
    /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
    check_malloc_state(av);
    MALLOC_FAILURE_ACTION;
    return 0;
#endif
  }
}

Here is the call graph for this function:

Void_t* _int_valloc ( mstate  av,
size_t  bytes 
)

Definition at line 5521 of file malloc.c.

{
  /* Ensure initialization/consolidation */
  if (have_fastchunks(av)) malloc_consolidate(av);
  return _int_memalign(av, mp_.pagesize, bytes);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static Void_t** iALLOc ( ) [static]

Here is the caller graph for this function:

static Void_t** iALLOc ( mstate  av,
size_t  n_elements,
size_t sizes,
int  opts,
chunks   
) [static]

Definition at line 5395 of file malloc.c.

{
  INTERNAL_SIZE_T element_size;   /* chunksize of each element, if all same */
  INTERNAL_SIZE_T contents_size;  /* total size of elements */
  INTERNAL_SIZE_T array_size;     /* request size of pointer array */
  Void_t*         mem;            /* malloced aggregate space */
  mchunkptr       p;              /* corresponding chunk */
  INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
  Void_t**        marray;         /* either "chunks" or malloced ptr array */
  mchunkptr       array_chunk;    /* chunk for malloced ptr array */
  int             mmx;            /* to disable mmap */
  INTERNAL_SIZE_T size;
  INTERNAL_SIZE_T size_flags;
  size_t          i;

  /* Ensure initialization/consolidation */
  if (have_fastchunks(av)) malloc_consolidate(av);

  /* compute array length, if needed */
  if (chunks != 0) {
    if (n_elements == 0)
      return chunks; /* nothing to do */
    marray = chunks;
    array_size = 0;
  }
  else {
    /* if empty req, must still return chunk representing empty array */
    if (n_elements == 0)
      return (Void_t**) _int_malloc(av, 0);
    marray = 0;
    array_size = request2size(n_elements * (sizeof(Void_t*)));
  }

  /* compute total element size */
  if (opts & 0x1) { /* all-same-size */
    element_size = request2size(*sizes);
    contents_size = n_elements * element_size;
  }
  else { /* add up all the sizes */
    element_size = 0;
    contents_size = 0;
    for (i = 0; i != n_elements; ++i)
      contents_size += request2size(sizes[i]);
  }

  /* subtract out alignment bytes from total to minimize overallocation */
  size = contents_size + array_size - MALLOC_ALIGN_MASK;

  /*
     Allocate the aggregate chunk.
     But first disable mmap so malloc won't use it, since
     we would not be able to later free/realloc space internal
     to a segregated mmap region.
  */
  mmx = mp_.n_mmaps_max;   /* disable mmap */
  mp_.n_mmaps_max = 0;
  mem = _int_malloc(av, size);
  mp_.n_mmaps_max = mmx;   /* reset mmap */
  if (mem == 0)
    return 0;

  p = mem2chunk(mem);
  assert(!chunk_is_mmapped(p));
  remainder_size = chunksize(p);

  if (opts & 0x2) {       /* optionally clear the elements */
    MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
  }

  size_flags = PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0);

  /* If not provided, allocate the pointer array as final part of chunk */
  if (marray == 0) {
    array_chunk = chunk_at_offset(p, contents_size);
    marray = (Void_t**) (chunk2mem(array_chunk));
    set_head(array_chunk, (remainder_size - contents_size) | size_flags);
    remainder_size = contents_size;
  }

  /* split out elements */
  for (i = 0; ; ++i) {
    marray[i] = chunk2mem(p);
    if (i != n_elements-1) {
      if (element_size != 0)
        size = element_size;
      else
        size = request2size(sizes[i]);
      remainder_size -= size;
      set_head(p, size | size_flags);
      p = chunk_at_offset(p, size);
    }
    else { /* the final element absorbs any overallocation slop */
      set_head(p, remainder_size | size_flags);
      break;
    }
  }

#if MALLOC_DEBUG
  if (marray != chunks) {
    /* final element must have exactly exhausted chunk */
    if (element_size != 0)
      assert(remainder_size == element_size);
    else
      assert(remainder_size == request2size(sizes[i]));
    check_inuse_chunk(av, mem2chunk(marray));
  }

  for (i = 0; i != n_elements; ++i)
    check_inuse_chunk(av, mem2chunk(marray[i]));
#endif

  return marray;
}

Here is the call graph for this function:

static struct mallinfo mALLINFo ( ) [static, read]

Here is the caller graph for this function:

struct mallinfo mALLINFo ( mstate  av) [read]

Definition at line 5638 of file malloc.c.

{
  struct mallinfo mi;
  size_t i;
  mbinptr b;
  mchunkptr p;
  INTERNAL_SIZE_T avail;
  INTERNAL_SIZE_T fastavail;
  int nblocks;
  int nfastblocks;

  /* Ensure initialization */
  if (av->top == 0)  malloc_consolidate(av);

  check_malloc_state(av);

  /* Account for top */
  avail = chunksize(av->top);
  nblocks = 1;  /* top always exists */

  /* traverse fastbins */
  nfastblocks = 0;
  fastavail = 0;

  for (i = 0; i < NFASTBINS; ++i) {
    for (p = av->fastbins[i]; p != 0; p = p->fd) {
      ++nfastblocks;
      fastavail += chunksize(p);
    }
  }

  avail += fastavail;

  /* traverse regular bins */
  for (i = 1; i < NBINS; ++i) {
    b = bin_at(av, i);
    for (p = last(b); p != b; p = p->bk) {
      ++nblocks;
      avail += chunksize(p);
    }
  }

  mi.smblks = nfastblocks;
  mi.ordblks = nblocks;
  mi.fordblks = avail;
  mi.uordblks = av->system_mem - avail;
  mi.arena = av->system_mem;
  mi.hblks = mp_.n_mmaps;
  mi.hblkhd = mp_.mmapped_mem;
  mi.fsmblks = fastavail;
  mi.keepcost = chunksize(av->top);
  mi.usmblks = mp_.max_total_mem;
  return mi;
}

Here is the call graph for this function:

static void malloc_consolidate ( ) [static]

Here is the caller graph for this function:

static void malloc_consolidate ( mstate  av) [static]

Definition at line 4833 of file malloc.c.

{
  mfastbinptr*    fb;                 /* current fastbin being consolidated */
  mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
  mchunkptr       p;                  /* current chunk being consolidated */
  mchunkptr       nextp;              /* next chunk to consolidate */
  mchunkptr       unsorted_bin;       /* bin header */
  mchunkptr       first_unsorted;     /* chunk to link to */

  /* These have same use as in free() */
  mchunkptr       nextchunk;
  INTERNAL_SIZE_T size;
  INTERNAL_SIZE_T nextsize;
  INTERNAL_SIZE_T prevsize;
  int             nextinuse;
  mchunkptr       bck;
  mchunkptr       fwd;

  /*
    If max_fast is 0, we know that av hasn't
    yet been initialized, in which case do so below
  */

  if (get_max_fast () != 0) {
    clear_fastchunks(av);

    unsorted_bin = unsorted_chunks(av);

    /*
      Remove each chunk from fast bin and consolidate it, placing it
      then in unsorted bin. Among other reasons for doing this,
      placing in unsorted bin avoids needing to calculate actual bins
      until malloc is sure that chunks aren't immediately going to be
      reused anyway.
    */

#if 0
    /* It is wrong to limit the fast bins to search using get_max_fast
       because, except for the main arena, all the others might have
       blocks in the high fast bins.  It's not worth it anyway, just
       search all bins all the time.  */
    maxfb = &(av->fastbins[fastbin_index(get_max_fast ())]);
#else
    maxfb = &(av->fastbins[NFASTBINS - 1]);
#endif
    fb = &(av->fastbins[0]);
    do {
      if ( (p = *fb) != 0) {
        *fb = 0;

        do {
          check_inuse_chunk(av, p);
          nextp = p->fd;

          /* Slightly streamlined version of consolidation code in free() */
          size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
          nextchunk = chunk_at_offset(p, size);
          nextsize = chunksize(nextchunk);

          if (!prev_inuse(p)) {
            prevsize = p->prev_size;
            size += prevsize;
            p = chunk_at_offset(p, -((long) prevsize));
            unlink(p, bck, fwd);
          }

          if (nextchunk != av->top) {
            nextinuse = inuse_bit_at_offset(nextchunk, nextsize);

            if (!nextinuse) {
              size += nextsize;
              unlink(nextchunk, bck, fwd);
            } else
             clear_inuse_bit_at_offset(nextchunk, 0);

            first_unsorted = unsorted_bin->fd;
            unsorted_bin->fd = p;
            first_unsorted->bk = p;

            if (!in_smallbin_range (size)) {
             p->fd_nextsize = NULL;
             p->bk_nextsize = NULL;
           }

            set_head(p, size | PREV_INUSE);
            p->bk = unsorted_bin;
            p->fd = first_unsorted;
            set_foot(p, size);
          }

          else {
            size += nextsize;
            set_head(p, size | PREV_INUSE);
            av->top = p;
          }

        } while ( (p = nextp) != 0);

      }
    } while (fb++ != maxfb);
  }
  else {
    malloc_init_state(av);
    check_malloc_state(av);
  }
}

Here is the call graph for this function:

static void malloc_init_state ( mstate  av) [static]

Definition at line 2410 of file malloc.c.

{
  int     i;
  mbinptr bin;

  /* Establish circular links for normal bins */
  for (i = 1; i < NBINS; ++i) {
    bin = bin_at(av,i);
    bin->fd = bin->bk = bin;
  }

#if MORECORE_CONTIGUOUS
  if (av != &main_arena)
#endif
    set_noncontiguous(av);
  if (av == &main_arena)
    set_max_fast(DEFAULT_MXFAST);
  av->flags |= FASTCHUNKS_BIT;

  av->top            = initial_top(av);
}

Here is the caller graph for this function:

static void malloc_printerr ( int  action,
const char *  str,
void *  ptr 
) [static]

Definition at line 5981 of file malloc.c.

{
  if ((action & 5) == 5)
    __libc_message (action & 2, "%s\n", str);
  else if (action & 1)
    {
      char buf[2 * sizeof (uintptr_t) + 1];

      buf[sizeof (buf) - 1] = '\0';
      char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
      while (cp > buf)
       *--cp = '0';

      __libc_message (action & 2,
                    "*** glibc detected *** %s: %s: 0x%s ***\n",
                    __libc_argv[0] ?: "<unknown>", str, cp);
    }
  else if (action & 2)
    abort ();
}

Here is the call graph for this function:

Here is the caller graph for this function:

static int mALLOPt ( ) [static]

Here is the caller graph for this function:

int mALLOPt ( int  param_number,
int  value 
)

Definition at line 5772 of file malloc.c.

{
  mstate av = &main_arena;
  int res = 1;

  if(__malloc_initialized < 0)
    ptmalloc_init ();
  (void)mutex_lock(&av->mutex);
  /* Ensure initialization/consolidation */
  malloc_consolidate(av);

  switch(param_number) {
  case M_MXFAST:
    if (value >= 0 && value <= MAX_FAST_SIZE) {
      set_max_fast(value);
    }
    else
      res = 0;
    break;

  case M_TRIM_THRESHOLD:
    mp_.trim_threshold = value;
    mp_.no_dyn_threshold = 1;
    break;

  case M_TOP_PAD:
    mp_.top_pad = value;
    mp_.no_dyn_threshold = 1;
    break;

  case M_MMAP_THRESHOLD:
#if USE_ARENAS
    /* Forbid setting the threshold too high. */
    if((unsigned long)value > HEAP_MAX_SIZE/2)
      res = 0;
    else
#endif
      mp_.mmap_threshold = value;
      mp_.no_dyn_threshold = 1;
    break;

  case M_MMAP_MAX:
#if !HAVE_MMAP
    if (value != 0)
      res = 0;
    else
#endif
      mp_.n_mmaps_max = value;
      mp_.no_dyn_threshold = 1;
    break;

  case M_CHECK_ACTION:
    check_action = value;
    break;

  case M_PERTURB:
    perturb_byte = value;
    break;
  }
  (void)mutex_unlock(&av->mutex);
  return res;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Void_t* memcpy ( )
Void_t* memset ( )
void mSTATs ( ) [static]

Definition at line 5697 of file malloc.c.

{
  int i;
  mstate ar_ptr;
  struct mallinfo mi;
  unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
#if THREAD_STATS
  long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
#endif

  if(__malloc_initialized < 0)
    ptmalloc_init ();
#ifdef _LIBC
  _IO_flockfile (stderr);
  int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
  ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
#endif
  for (i=0, ar_ptr = &main_arena;; i++) {
    (void)mutex_lock(&ar_ptr->mutex);
    mi = mALLINFo(ar_ptr);
    fprintf(stderr, "Arena %d:\n", i);
    fprintf(stderr, "system bytes     = %10u\n", (unsigned int)mi.arena);
    fprintf(stderr, "in use bytes     = %10u\n", (unsigned int)mi.uordblks);
#if MALLOC_DEBUG > 1
    if (i > 0)
      dump_heap(heap_for_ptr(top(ar_ptr)));
#endif
    system_b += mi.arena;
    in_use_b += mi.uordblks;
#if THREAD_STATS
    stat_lock_direct += ar_ptr->stat_lock_direct;
    stat_lock_loop += ar_ptr->stat_lock_loop;
    stat_lock_wait += ar_ptr->stat_lock_wait;
#endif
    (void)mutex_unlock(&ar_ptr->mutex);
    ar_ptr = ar_ptr->next;
    if(ar_ptr == &main_arena) break;
  }
#if HAVE_MMAP
  fprintf(stderr, "Total (incl. mmap):\n");
#else
  fprintf(stderr, "Total:\n");
#endif
  fprintf(stderr, "system bytes     = %10u\n", system_b);
  fprintf(stderr, "in use bytes     = %10u\n", in_use_b);
#ifdef NO_THREADS
  fprintf(stderr, "max system bytes = %10u\n", (unsigned int)mp_.max_total_mem);
#endif
#if HAVE_MMAP
  fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
  fprintf(stderr, "max mmap bytes   = %10lu\n",
         (unsigned long)mp_.max_mmapped_mem);
#endif
#if THREAD_STATS
  fprintf(stderr, "heaps created    = %10d\n",  stat_n_heaps);
  fprintf(stderr, "locked directly  = %10ld\n", stat_lock_direct);
  fprintf(stderr, "locked in loop   = %10ld\n", stat_lock_loop);
  fprintf(stderr, "locked waiting   = %10ld\n", stat_lock_wait);
  fprintf(stderr, "locked total     = %10ld\n",
          stat_lock_direct + stat_lock_loop + stat_lock_wait);
#endif
#ifdef _LIBC
  ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
  _IO_funlockfile (stderr);
#endif
}

Here is the call graph for this function:

static int mTRIm ( ) [static]

Here is the caller graph for this function:

static int mTRIm ( mstate  av,
size_t  pad 
) [static]

Definition at line 5557 of file malloc.c.

{
  /* Ensure initialization/consolidation */
  malloc_consolidate (av);

  const size_t ps = mp_.pagesize;
  int psindex = bin_index (ps);
  const size_t psm1 = ps - 1;

  int result = 0;
  for (int i = 1; i < NBINS; ++i)
    if (i == 1 || i >= psindex)
      {
        mbinptr bin = bin_at (av, i);

        for (mchunkptr p = last (bin); p != bin; p = p->bk)
         {
           INTERNAL_SIZE_T size = chunksize (p);

           if (size > psm1 + sizeof (struct malloc_chunk))
             {
              /* See whether the chunk contains at least one unused page.  */
              char *paligned_mem = (char *) (((uintptr_t) p
                                          + sizeof (struct malloc_chunk)
                                          + psm1) & ~psm1);

              assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
              assert ((char *) p + size > paligned_mem);

              /* This is the size we could potentially free.  */
              size -= paligned_mem - (char *) p;

              if (size > psm1)
                {
#ifdef MALLOC_DEBUG
                  /* When debugging we simulate destroying the memory
                     content.  */
                  memset (paligned_mem, 0x89, size & ~psm1);
#endif
                  madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);

                  result = 1;
                }
             }
         }
      }

#ifndef MORECORE_CANNOT_TRIM
  return result | (av == &main_arena ? sYSTRIm (pad, av) : 0);
#else
  return result;
#endif
}

Here is the call graph for this function:

static void internal_function munmap_chunk ( mchunkptr  p) [static]

Definition at line 3445 of file malloc.c.

{
  INTERNAL_SIZE_T size = chunksize(p);

  assert (chunk_is_mmapped(p));
#if 0
  assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
  assert((mp_.n_mmaps > 0));
#endif

  uintptr_t block = (uintptr_t) p - p->prev_size;
  size_t total_size = p->prev_size + size;
  /* Unfortunately we have to do the compilers job by hand here.  Normally
     we would test BLOCK and TOTAL-SIZE separately for compliance with the
     page size.  But gcc does not recognize the optimization possibility
     (in the moment at least) so we combine the two values into one before
     the bit test.  */
  if (__builtin_expect (((block | total_size) & (mp_.pagesize - 1)) != 0, 0))
    {
      malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
                     chunk2mem (p));
      return;
    }

  mp_.n_mmaps--;
  mp_.mmapped_mem -= total_size;

  int ret __attribute__ ((unused)) = munmap((char *)block, total_size);

  /* munmap returns non-zero on failure */
  assert(ret == 0);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static size_t mUSABLe ( ) [static]

Here is the caller graph for this function:

size_t mUSABLe ( Void_t mem)

Definition at line 5620 of file malloc.c.

{
  mchunkptr p;
  if (mem != 0) {
    p = mem2chunk(mem);
    if (chunk_is_mmapped(p))
      return chunksize(p) - 2*SIZE_SZ;
    else if (inuse(p))
      return chunksize(p) - SIZE_SZ;
  }
  return 0;
}
Void_t* public_cALLOc ( size_t  n,
size_t  elem_size 
)

Definition at line 3886 of file malloc.c.

{
  mstate av;
  mchunkptr oldtop, p;
  INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
  Void_t* mem;
  unsigned long clearsize;
  unsigned long nclears;
  INTERNAL_SIZE_T* d;
  __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
    __malloc_hook;

  /* size_t is unsigned so the behavior on overflow is defined.  */
  bytes = n * elem_size;
#define HALF_INTERNAL_SIZE_T \
  (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
  if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
    if (elem_size != 0 && bytes / elem_size != n) {
      MALLOC_FAILURE_ACTION;
      return 0;
    }
  }

  if (hook != NULL) {
    sz = bytes;
    mem = (*hook)(sz, RETURN_ADDRESS (0));
    if(mem == 0)
      return 0;
#ifdef HAVE_MEMCPY
    return memset(mem, 0, sz);
#else
    while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
    return mem;
#endif
  }

  sz = bytes;

  arena_get(av, sz);
  if(!av)
    return 0;

  /* Check if we hand out the top chunk, in which case there may be no
     need to clear. */
#if MORECORE_CLEARS
  oldtop = top(av);
  oldtopsize = chunksize(top(av));
#if MORECORE_CLEARS < 2
  /* Only newly allocated memory is guaranteed to be cleared.  */
  if (av == &main_arena &&
      oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
    oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
#endif
  if (av != &main_arena)
    {
      heap_info *heap = heap_for_ptr (oldtop);
      if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
       oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
    }
#endif
  mem = _int_malloc(av, sz);

  /* Only clearing follows, so we can unlock early. */
  (void)mutex_unlock(&av->mutex);

  assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
        av == arena_for_chunk(mem2chunk(mem)));

  if (mem == 0) {
    /* Maybe the failure is due to running out of mmapped areas. */
    if(av != &main_arena) {
      (void)mutex_lock(&main_arena.mutex);
      mem = _int_malloc(&main_arena, sz);
      (void)mutex_unlock(&main_arena.mutex);
    } else {
#if USE_ARENAS
      /* ... or sbrk() has failed and there is still a chance to mmap() */
      (void)mutex_lock(&main_arena.mutex);
      av = arena_get2(av->next ? av : 0, sz);
      (void)mutex_unlock(&main_arena.mutex);
      if(av) {
        mem = _int_malloc(av, sz);
        (void)mutex_unlock(&av->mutex);
      }
#endif
    }
    if (mem == 0) return 0;
  }
  p = mem2chunk(mem);

  /* Two optional cases in which clearing not necessary */
#if HAVE_MMAP
  if (chunk_is_mmapped (p))
    {
      if (__builtin_expect (perturb_byte, 0))
       MALLOC_ZERO (mem, sz);
      return mem;
    }
#endif

  csz = chunksize(p);

#if MORECORE_CLEARS
  if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
    /* clear only the bytes from non-freshly-sbrked memory */
    csz = oldtopsize;
  }
#endif

  /* Unroll clear of <= 36 bytes (72 if 8byte sizes).  We know that
     contents have an odd number of INTERNAL_SIZE_T-sized words;
     minimally 3.  */
  d = (INTERNAL_SIZE_T*)mem;
  clearsize = csz - SIZE_SZ;
  nclears = clearsize / sizeof(INTERNAL_SIZE_T);
  assert(nclears >= 3);

  if (nclears > 9)
    MALLOC_ZERO(d, clearsize);

  else {
    *(d+0) = 0;
    *(d+1) = 0;
    *(d+2) = 0;
    if (nclears > 4) {
      *(d+3) = 0;
      *(d+4) = 0;
      if (nclears > 6) {
       *(d+5) = 0;
       *(d+6) = 0;
       if (nclears > 8) {
         *(d+7) = 0;
         *(d+8) = 0;
       }
      }
    }
  }

  return mem;
}

Here is the call graph for this function:

void public_cFREe ( )
void public_cFREe ( Void_t m)

Definition at line 4060 of file malloc.c.

{
  public_fREe(m);
}
void public_fREe ( )
void public_fREe ( Void_t mem)

Definition at line 3582 of file malloc.c.

{
  mstate ar_ptr;
  mchunkptr p;                          /* chunk corresponding to mem */

  void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t) = __free_hook;
  if (hook != NULL) {
    (*hook)(mem, RETURN_ADDRESS (0));
    return;
  }

  if (mem == 0)                              /* free(0) has no effect */
    return;

  p = mem2chunk(mem);

#if HAVE_MMAP
  if (chunk_is_mmapped(p))                       /* release mmapped memory. */
  {
    /* see if the dynamic brk/mmap threshold needs adjusting */
    if (!mp_.no_dyn_threshold
       && p->size > mp_.mmap_threshold
        && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
      {
       mp_.mmap_threshold = chunksize (p);
       mp_.trim_threshold = 2 * mp_.mmap_threshold;
      }
    munmap_chunk(p);
    return;
  }
#endif

  ar_ptr = arena_for_chunk(p);
#if THREAD_STATS
  if(!mutex_trylock(&ar_ptr->mutex))
    ++(ar_ptr->stat_lock_direct);
  else {
    (void)mutex_lock(&ar_ptr->mutex);
    ++(ar_ptr->stat_lock_wait);
  }
#else
  (void)mutex_lock(&ar_ptr->mutex);
#endif
  _int_free(ar_ptr, mem);
  (void)mutex_unlock(&ar_ptr->mutex);
}

Here is the call graph for this function:

Definition at line 520 of file hooks.c.

{
  struct malloc_save_state* ms;
  int i;
  mbinptr b;

  ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
  if (!ms)
    return 0;
  (void)mutex_lock(&main_arena.mutex);
  malloc_consolidate(&main_arena);
  ms->magic = MALLOC_STATE_MAGIC;
  ms->version = MALLOC_STATE_VERSION;
  ms->av[0] = 0;
  ms->av[1] = 0; /* used to be binblocks, now no longer used */
  ms->av[2] = top(&main_arena);
  ms->av[3] = 0; /* used to be undefined */
  for(i=1; i<NBINS; i++) {
    b = bin_at(&main_arena, i);
    if(first(b) == b)
      ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
    else {
      ms->av[2*i+2] = first(b);
      ms->av[2*i+3] = last(b);
    }
  }
  ms->sbrk_base = mp_.sbrk_base;
  ms->sbrked_mem_bytes = main_arena.system_mem;
  ms->trim_threshold = mp_.trim_threshold;
  ms->top_pad = mp_.top_pad;
  ms->n_mmaps_max = mp_.n_mmaps_max;
  ms->mmap_threshold = mp_.mmap_threshold;
  ms->check_action = check_action;
  ms->max_sbrked_mem = main_arena.max_system_mem;
#ifdef NO_THREADS
  ms->max_total_mem = mp_.max_total_mem;
#else
  ms->max_total_mem = 0;
#endif
  ms->n_mmaps = mp_.n_mmaps;
  ms->max_n_mmaps = mp_.max_n_mmaps;
  ms->mmapped_mem = mp_.mmapped_mem;
  ms->max_mmapped_mem = mp_.max_mmapped_mem;
  ms->using_malloc_checking = using_malloc_checking;
  (void)mutex_unlock(&main_arena.mutex);
  return (Void_t*)ms;
}

Here is the call graph for this function:

Void_t** public_iCALLOc ( size_t  n,
size_t  elem_size,
Void_t **  chunks 
)

Definition at line 4030 of file malloc.c.

{
  mstate ar_ptr;
  Void_t** m;

  arena_get(ar_ptr, n*elem_size);
  if(!ar_ptr)
    return 0;

  m = _int_icalloc(ar_ptr, n, elem_size, chunks);
  (void)mutex_unlock(&ar_ptr->mutex);
  return m;
}

Here is the call graph for this function:

Void_t** public_iCOMALLOc ( size_t  n,
size_t  sizes[],
Void_t **  chunks 
)

Definition at line 4045 of file malloc.c.

{
  mstate ar_ptr;
  Void_t** m;

  arena_get(ar_ptr, 0);
  if(!ar_ptr)
    return 0;

  m = _int_icomalloc(ar_ptr, n, sizes, chunks);
  (void)mutex_unlock(&ar_ptr->mutex);
  return m;
}

Here is the call graph for this function:

struct mallinfo public_mALLINFo ( ) [read]

Definition at line 3539 of file malloc.c.

{
  mstate ar_ptr;
  Void_t *victim;

  __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t) = __malloc_hook;
  if (hook != NULL)
    return (*hook)(bytes, RETURN_ADDRESS (0));

  arena_get(ar_ptr, bytes);
  if(!ar_ptr)
    return 0;
  victim = _int_malloc(ar_ptr, bytes);
  if(!victim) {
    /* Maybe the failure is due to running out of mmapped areas. */
    if(ar_ptr != &main_arena) {
      (void)mutex_unlock(&ar_ptr->mutex);
      ar_ptr = &main_arena;
      (void)mutex_lock(&ar_ptr->mutex);
      victim = _int_malloc(ar_ptr, bytes);
      (void)mutex_unlock(&ar_ptr->mutex);
    } else {
#if USE_ARENAS
      /* ... or sbrk() has failed and there is still a chance to mmap() */
      ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
      (void)mutex_unlock(&main_arena.mutex);
      if(ar_ptr) {
        victim = _int_malloc(ar_ptr, bytes);
        (void)mutex_unlock(&ar_ptr->mutex);
      }
#endif
    }
  } else
    (void)mutex_unlock(&ar_ptr->mutex);
  assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
        ar_ptr == arena_for_chunk(mem2chunk(victim)));
  return victim;
}

Here is the call graph for this function:

int public_mALLOPt ( int  p,
int  v 
)

Definition at line 4117 of file malloc.c.

{
  int result;
  result = mALLOPt(p, v);
  return result;
}

Here is the call graph for this function:

Void_t* public_mEMALIGn ( size_t  alignment,
size_t  bytes 
)

Definition at line 3743 of file malloc.c.

{
  mstate ar_ptr;
  Void_t *p;

  __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
                                   __const __malloc_ptr_t)) =
    __memalign_hook;
  if (hook != NULL)
    return (*hook)(alignment, bytes, RETURN_ADDRESS (0));

  /* If need less alignment than we give anyway, just relay to malloc */
  if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);

  /* Otherwise, ensure that it is at least a minimum chunk size */
  if (alignment <  MINSIZE) alignment = MINSIZE;

  arena_get(ar_ptr, bytes + alignment + MINSIZE);
  if(!ar_ptr)
    return 0;
  p = _int_memalign(ar_ptr, alignment, bytes);
  if(!p) {
    /* Maybe the failure is due to running out of mmapped areas. */
    if(ar_ptr != &main_arena) {
      (void)mutex_unlock(&ar_ptr->mutex);
      ar_ptr = &main_arena;
      (void)mutex_lock(&ar_ptr->mutex);
      p = _int_memalign(ar_ptr, alignment, bytes);
      (void)mutex_unlock(&ar_ptr->mutex);
    } else {
#if USE_ARENAS
      /* ... or sbrk() has failed and there is still a chance to mmap() */
      mstate prev = ar_ptr->next ? ar_ptr : 0;
      (void)mutex_unlock(&ar_ptr->mutex);
      ar_ptr = arena_get2(prev, bytes);
      if(ar_ptr) {
        p = _int_memalign(ar_ptr, alignment, bytes);
        (void)mutex_unlock(&ar_ptr->mutex);
      }
#endif
    }
  } else
    (void)mutex_unlock(&ar_ptr->mutex);
  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
        ar_ptr == arena_for_chunk(mem2chunk(p)));
  return p;
}

Here is the call graph for this function:

void public_mSTATs ( )

Definition at line 4068 of file malloc.c.

{
  int result = 0;

  if(__malloc_initialized < 0)
    ptmalloc_init ();

  mstate ar_ptr = &main_arena;
  do
    {
      (void) mutex_lock (&ar_ptr->mutex);
      result |= mTRIm (ar_ptr, s);
      (void) mutex_unlock (&ar_ptr->mutex);

      ar_ptr = ar_ptr->next;
    }
  while (ar_ptr != &main_arena);

  return result;
}

Here is the call graph for this function:

Definition at line 4090 of file malloc.c.

{
  size_t result;

  result = mUSABLe(m);
  return result;
}

Here is the call graph for this function:

Definition at line 3840 of file malloc.c.

{
  mstate ar_ptr;
  Void_t *p;

  if(__malloc_initialized < 0)
    ptmalloc_init ();

  size_t pagesz = mp_.pagesize;
  size_t page_mask = mp_.pagesize - 1;
  size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);

  __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
                                   __const __malloc_ptr_t)) =
    __memalign_hook;
  if (hook != NULL)
    return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));

  arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
  p = _int_pvalloc(ar_ptr, bytes);
  (void)mutex_unlock(&ar_ptr->mutex);
  if(!p) {
    /* Maybe the failure is due to running out of mmapped areas. */
    if(ar_ptr != &main_arena) {
      (void)mutex_lock(&main_arena.mutex);
      p = _int_memalign(&main_arena, pagesz, rounded_bytes);
      (void)mutex_unlock(&main_arena.mutex);
    } else {
#if USE_ARENAS
      /* ... or sbrk() has failed and there is still a chance to mmap() */
      ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0,
                       bytes + 2*pagesz + MINSIZE);
      if(ar_ptr) {
        p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
        (void)mutex_unlock(&ar_ptr->mutex);
      }
#endif
    }
  }
  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
        ar_ptr == arena_for_chunk(mem2chunk(p)));

  return p;
}

Here is the call graph for this function:

Void_t* public_rEALLOc ( Void_t oldmem,
size_t  bytes 
)

Definition at line 3633 of file malloc.c.

{
  mstate ar_ptr;
  INTERNAL_SIZE_T    nb;      /* padded request size */

  mchunkptr oldp;             /* chunk corresponding to oldmem */
  INTERNAL_SIZE_T    oldsize; /* its size */

  Void_t* newp;             /* chunk to return */

  __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) =
    __realloc_hook;
  if (hook != NULL)
    return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));

#if REALLOC_ZERO_BYTES_FREES
  if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
#endif

  /* realloc of null is supposed to be same as malloc */
  if (oldmem == 0) return public_mALLOc(bytes);

  oldp    = mem2chunk(oldmem);
  oldsize = chunksize(oldp);

  /* Little security check which won't hurt performance: the
     allocator never wrapps around at the end of the address space.
     Therefore we can exclude some size values which might appear
     here by accident or by "design" from some intruder.  */
  if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
      || __builtin_expect (misaligned_chunk (oldp), 0))
    {
      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
      return NULL;
    }

  checked_request2size(bytes, nb);

#if HAVE_MMAP
  if (chunk_is_mmapped(oldp))
  {
    Void_t* newmem;

#if HAVE_MREMAP
    newp = mremap_chunk(oldp, nb);
    if(newp) return chunk2mem(newp);
#endif
    /* Note the extra SIZE_SZ overhead. */
    if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
    /* Must alloc, copy, free. */
    newmem = public_mALLOc(bytes);
    if (newmem == 0) return 0; /* propagate failure */
    MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
    munmap_chunk(oldp);
    return newmem;
  }
#endif

  ar_ptr = arena_for_chunk(oldp);
#if THREAD_STATS
  if(!mutex_trylock(&ar_ptr->mutex))
    ++(ar_ptr->stat_lock_direct);
  else {
    (void)mutex_lock(&ar_ptr->mutex);
    ++(ar_ptr->stat_lock_wait);
  }
#else
  (void)mutex_lock(&ar_ptr->mutex);
#endif

#ifndef NO_THREADS
  /* As in malloc(), remember this arena for the next allocation. */
  tsd_setspecific(arena_key, (Void_t *)ar_ptr);
#endif

  newp = _int_realloc(ar_ptr, oldmem, bytes);

  (void)mutex_unlock(&ar_ptr->mutex);
  assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
        ar_ptr == arena_for_chunk(mem2chunk(newp)));

  if (newp == NULL)
    {
      /* Try harder to allocate memory in other arenas.  */
      newp = public_mALLOc(bytes);
      if (newp != NULL)
       {
         MALLOC_COPY (newp, oldmem, oldsize - SIZE_SZ);
#if THREAD_STATS
         if(!mutex_trylock(&ar_ptr->mutex))
           ++(ar_ptr->stat_lock_direct);
         else {
           (void)mutex_lock(&ar_ptr->mutex);
           ++(ar_ptr->stat_lock_wait);
         }
#else
         (void)mutex_lock(&ar_ptr->mutex);
#endif
         _int_free(ar_ptr, oldmem);
         (void)mutex_unlock(&ar_ptr->mutex);
       }
    }

  return newp;
}

Here is the call graph for this function:

Definition at line 3795 of file malloc.c.

{
  mstate ar_ptr;
  Void_t *p;

  if(__malloc_initialized < 0)
    ptmalloc_init ();

  size_t pagesz = mp_.pagesize;

  __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
                                   __const __malloc_ptr_t)) =
    __memalign_hook;
  if (hook != NULL)
    return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));

  arena_get(ar_ptr, bytes + pagesz + MINSIZE);
  if(!ar_ptr)
    return 0;
  p = _int_valloc(ar_ptr, bytes);
  (void)mutex_unlock(&ar_ptr->mutex);
  if(!p) {
    /* Maybe the failure is due to running out of mmapped areas. */
    if(ar_ptr != &main_arena) {
      (void)mutex_lock(&main_arena.mutex);
      p = _int_memalign(&main_arena, pagesz, bytes);
      (void)mutex_unlock(&main_arena.mutex);
    } else {
#if USE_ARENAS
      /* ... or sbrk() has failed and there is still a chance to mmap() */
      ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
      if(ar_ptr) {
        p = _int_memalign(ar_ptr, pagesz, bytes);
        (void)mutex_unlock(&ar_ptr->mutex);
      }
#endif
    }
  }
  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
        ar_ptr == arena_for_chunk(mem2chunk(p)));

  return p;
}

Here is the call graph for this function:

static Void_t* sYSMALLOc ( ) [static]

Here is the caller graph for this function:

static Void_t* sYSMALLOc ( INTERNAL_SIZE_T  nb,
mstate  av 
) [static]

Definition at line 2906 of file malloc.c.

{
  mchunkptr       old_top;        /* incoming value of av->top */
  INTERNAL_SIZE_T old_size;       /* its size */
  char*           old_end;        /* its end address */

  long            size;           /* arg to first MORECORE or mmap call */
  char*           brk;            /* return value from MORECORE */

  long            correction;     /* arg to 2nd MORECORE call */
  char*           snd_brk;        /* 2nd return val */

  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
  INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
  char*           aligned_brk;    /* aligned offset into brk */

  mchunkptr       p;              /* the allocated/returned chunk */
  mchunkptr       remainder;      /* remainder from allocation */
  unsigned long   remainder_size; /* its size */

  unsigned long   sum;            /* for updating stats */

  size_t          pagemask  = mp_.pagesize - 1;
  bool            tried_mmap = false;


#if HAVE_MMAP

  /*
    If have mmap, and the request size meets the mmap threshold, and
    the system supports mmap, and there are few enough currently
    allocated mmapped regions, try to directly map this request
    rather than expanding top.
  */

  if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
      (mp_.n_mmaps < mp_.n_mmaps_max)) {

    char* mm;             /* return value from mmap call*/

  try_mmap:
    /*
      Round up size to nearest page.  For mmapped chunks, the overhead
      is one SIZE_SZ unit larger than for normal chunks, because there
      is no following chunk whose prev_size field could be used.
    */
#if 1
    /* See the front_misalign handling below, for glibc there is no
       need for further alignments.  */
    size = (nb + SIZE_SZ + pagemask) & ~pagemask;
#else
    size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
#endif
    tried_mmap = true;

    /* Don't try if size wraps around 0 */
    if ((unsigned long)(size) > (unsigned long)(nb)) {

      mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));

      if (mm != MAP_FAILED) {

        /*
          The offset to the start of the mmapped region is stored
          in the prev_size field of the chunk. This allows us to adjust
          returned start address to meet alignment requirements here
          and in memalign(), and still be able to compute proper
          address argument for later munmap in free() and realloc().
        */

#if 1
       /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
          MALLOC_ALIGN_MASK is 2*SIZE_SZ-1.  Each mmap'ed area is page
          aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
        assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
#else
        front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
        if (front_misalign > 0) {
          correction = MALLOC_ALIGNMENT - front_misalign;
          p = (mchunkptr)(mm + correction);
          p->prev_size = correction;
          set_head(p, (size - correction) |IS_MMAPPED);
        }
        else
#endif
         {
           p = (mchunkptr)mm;
           set_head(p, size|IS_MMAPPED);
         }

        /* update statistics */

        if (++mp_.n_mmaps > mp_.max_n_mmaps)
          mp_.max_n_mmaps = mp_.n_mmaps;

        sum = mp_.mmapped_mem += size;
        if (sum > (unsigned long)(mp_.max_mmapped_mem))
          mp_.max_mmapped_mem = sum;
#ifdef NO_THREADS
        sum += av->system_mem;
        if (sum > (unsigned long)(mp_.max_total_mem))
          mp_.max_total_mem = sum;
#endif

        check_chunk(av, p);

        return chunk2mem(p);
      }
    }
  }
#endif

  /* Record incoming configuration of top */

  old_top  = av->top;
  old_size = chunksize(old_top);
  old_end  = (char*)(chunk_at_offset(old_top, old_size));

  brk = snd_brk = (char*)(MORECORE_FAILURE);

  /*
     If not the first time through, we require old_size to be
     at least MINSIZE and to have prev_inuse set.
  */

  assert((old_top == initial_top(av) && old_size == 0) ||
         ((unsigned long) (old_size) >= MINSIZE &&
          prev_inuse(old_top) &&
         ((unsigned long)old_end & pagemask) == 0));

  /* Precondition: not enough current space to satisfy nb request */
  assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));

  /* Precondition: all fastbins are consolidated */
  assert(!have_fastchunks(av));


  if (av != &main_arena) {

    heap_info *old_heap, *heap;
    size_t old_heap_size;

    /* First try to extend the current heap. */
    old_heap = heap_for_ptr(old_top);
    old_heap_size = old_heap->size;
    if ((long) (MINSIZE + nb - old_size) > 0
       && grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
      av->system_mem += old_heap->size - old_heap_size;
      arena_mem += old_heap->size - old_heap_size;
#if 0
      if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
        max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
#endif
      set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
              | PREV_INUSE);
    }
    else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
      /* Use a newly allocated heap.  */
      heap->ar_ptr = av;
      heap->prev = old_heap;
      av->system_mem += heap->size;
      arena_mem += heap->size;
#if 0
      if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
       max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
#endif
      /* Set up the new top.  */
      top(av) = chunk_at_offset(heap, sizeof(*heap));
      set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);

      /* Setup fencepost and free the old top chunk. */
      /* The fencepost takes at least MINSIZE bytes, because it might
        become the top chunk again later.  Note that a footer is set
        up, too, although the chunk is marked in use. */
      old_size -= MINSIZE;
      set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
      if (old_size >= MINSIZE) {
       set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
       set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
       set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
       _int_free(av, chunk2mem(old_top));
      } else {
       set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
       set_foot(old_top, (old_size + 2*SIZE_SZ));
      }
    }
    else if (!tried_mmap)
      /* We can at least try to use to mmap memory.  */
      goto try_mmap;

  } else { /* av == main_arena */


  /* Request enough space for nb + pad + overhead */

  size = nb + mp_.top_pad + MINSIZE;

  /*
    If contiguous, we can subtract out existing space that we hope to
    combine with new space. We add it back later only if
    we don't actually get contiguous space.
  */

  if (contiguous(av))
    size -= old_size;

  /*
    Round to a multiple of page size.
    If MORECORE is not contiguous, this ensures that we only call it
    with whole-page arguments.  And if MORECORE is contiguous and
    this is not first time through, this preserves page-alignment of
    previous calls. Otherwise, we correct to page-align below.
  */

  size = (size + pagemask) & ~pagemask;

  /*
    Don't try to call MORECORE if argument is so big as to appear
    negative. Note that since mmap takes size_t arg, it may succeed
    below even if we cannot call MORECORE.
  */

  if (size > 0)
    brk = (char*)(MORECORE(size));

  if (brk != (char*)(MORECORE_FAILURE)) {
    /* Call the `morecore' hook if necessary.  */
    if (__after_morecore_hook)
      (*__after_morecore_hook) ();
  } else {
  /*
    If have mmap, try using it as a backup when MORECORE fails or
    cannot be used. This is worth doing on systems that have "holes" in
    address space, so sbrk cannot extend to give contiguous space, but
    space is available elsewhere.  Note that we ignore mmap max count
    and threshold limits, since the space will not be used as a
    segregated mmap region.
  */

#if HAVE_MMAP
    /* Cannot merge with old top, so add its size back in */
    if (contiguous(av))
      size = (size + old_size + pagemask) & ~pagemask;

    /* If we are relying on mmap as backup, then use larger units */
    if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
      size = MMAP_AS_MORECORE_SIZE;

    /* Don't try if size wraps around 0 */
    if ((unsigned long)(size) > (unsigned long)(nb)) {

      char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));

      if (mbrk != MAP_FAILED) {

        /* We do not need, and cannot use, another sbrk call to find end */
        brk = mbrk;
        snd_brk = brk + size;

        /*
           Record that we no longer have a contiguous sbrk region.
           After the first time mmap is used as backup, we do not
           ever rely on contiguous space since this could incorrectly
           bridge regions.
        */
        set_noncontiguous(av);
      }
    }
#endif
  }

  if (brk != (char*)(MORECORE_FAILURE)) {
    if (mp_.sbrk_base == 0)
      mp_.sbrk_base = brk;
    av->system_mem += size;

    /*
      If MORECORE extends previous space, we can likewise extend top size.
    */

    if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
      set_head(old_top, (size + old_size) | PREV_INUSE);

    else if (contiguous(av) && old_size && brk < old_end) {
      /* Oops!  Someone else killed our space..  Can't touch anything.  */
      assert(0);
    }

    /*
      Otherwise, make adjustments:

      * If the first time through or noncontiguous, we need to call sbrk
        just to find out where the end of memory lies.

      * We need to ensure that all returned chunks from malloc will meet
        MALLOC_ALIGNMENT

      * If there was an intervening foreign sbrk, we need to adjust sbrk
        request size to account for fact that we will not be able to
        combine new space with existing space in old_top.

      * Almost all systems internally allocate whole pages at a time, in
        which case we might as well use the whole last page of request.
        So we allocate enough more memory to hit a page boundary now,
        which in turn causes future contiguous calls to page-align.
    */

    else {
      front_misalign = 0;
      end_misalign = 0;
      correction = 0;
      aligned_brk = brk;

      /* handle contiguous cases */
      if (contiguous(av)) {

       /* Count foreign sbrk as system_mem.  */
       if (old_size)
         av->system_mem += brk - old_end;

        /* Guarantee alignment of first new chunk made from this space */

        front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
        if (front_misalign > 0) {

          /*
            Skip over some bytes to arrive at an aligned position.
            We don't need to specially mark these wasted front bytes.
            They will never be accessed anyway because
            prev_inuse of av->top (and any chunk created from its start)
            is always true after initialization.
          */

          correction = MALLOC_ALIGNMENT - front_misalign;
          aligned_brk += correction;
        }

        /*
          If this isn't adjacent to existing space, then we will not
          be able to merge with old_top space, so must add to 2nd request.
        */

        correction += old_size;

        /* Extend the end address to hit a page boundary */
        end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
        correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;

        assert(correction >= 0);
        snd_brk = (char*)(MORECORE(correction));

        /*
          If can't allocate correction, try to at least find out current
          brk.  It might be enough to proceed without failing.

          Note that if second sbrk did NOT fail, we assume that space
          is contiguous with first sbrk. This is a safe assumption unless
          program is multithreaded but doesn't use locks and a foreign sbrk
          occurred between our first and second calls.
        */

        if (snd_brk == (char*)(MORECORE_FAILURE)) {
          correction = 0;
          snd_brk = (char*)(MORECORE(0));
        } else
         /* Call the `morecore' hook if necessary.  */
         if (__after_morecore_hook)
           (*__after_morecore_hook) ();
      }

      /* handle non-contiguous cases */
      else {
        /* MORECORE/mmap must correctly align */
        assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);

        /* Find out current end of memory */
        if (snd_brk == (char*)(MORECORE_FAILURE)) {
          snd_brk = (char*)(MORECORE(0));
        }
      }

      /* Adjust top based on results of second sbrk */
      if (snd_brk != (char*)(MORECORE_FAILURE)) {
        av->top = (mchunkptr)aligned_brk;
        set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
        av->system_mem += correction;

        /*
          If not the first time through, we either have a
          gap due to foreign sbrk or a non-contiguous region.  Insert a
          double fencepost at old_top to prevent consolidation with space
          we don't own. These fenceposts are artificial chunks that are
          marked as inuse and are in any case too small to use.  We need
          two to make sizes and alignments work out.
        */

        if (old_size != 0) {
          /*
             Shrink old_top to insert fenceposts, keeping size a
             multiple of MALLOC_ALIGNMENT. We know there is at least
             enough space in old_top to do this.
          */
          old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
          set_head(old_top, old_size | PREV_INUSE);

          /*
            Note that the following assignments completely overwrite
            old_top when old_size was previously MINSIZE.  This is
            intentional. We need the fencepost, even if old_top otherwise gets
            lost.
          */
          chunk_at_offset(old_top, old_size            )->size =
            (2*SIZE_SZ)|PREV_INUSE;

          chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
            (2*SIZE_SZ)|PREV_INUSE;

          /* If possible, release the rest. */
          if (old_size >= MINSIZE) {
            _int_free(av, chunk2mem(old_top));
          }

        }
      }
    }

    /* Update statistics */
#ifdef NO_THREADS
    sum = av->system_mem + mp_.mmapped_mem;
    if (sum > (unsigned long)(mp_.max_total_mem))
      mp_.max_total_mem = sum;
#endif

  }

  } /* if (av !=  &main_arena) */

  if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
    av->max_system_mem = av->system_mem;
  check_malloc_state(av);

  /* finally, do the allocation */
  p = av->top;
  size = chunksize(p);

  /* check that one of the above allocation paths succeeded */
  if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
    remainder_size = size - nb;
    remainder = chunk_at_offset(p, nb);
    av->top = remainder;
    set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
    set_head(remainder, remainder_size | PREV_INUSE);
    check_malloced_chunk(av, p, nb);
    return chunk2mem(p);
  }

  /* catch all failure paths */
  MALLOC_FAILURE_ACTION;
  return 0;
}

Here is the call graph for this function:

static int sYSTRIm ( ) [static]

Here is the caller graph for this function:

static int sYSTRIm ( size_t  pad,
mstate  av 
) [static]

Definition at line 3381 of file malloc.c.

{
  long  top_size;        /* Amount of top-most memory */
  long  extra;           /* Amount to release */
  long  released;        /* Amount actually released */
  char* current_brk;     /* address returned by pre-check sbrk call */
  char* new_brk;         /* address returned by post-check sbrk call */
  size_t pagesz;

  pagesz = mp_.pagesize;
  top_size = chunksize(av->top);

  /* Release in pagesize units, keeping at least one page */
  extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;

  if (extra > 0) {

    /*
      Only proceed if end of memory is where we last set it.
      This avoids problems if there were foreign sbrk calls.
    */
    current_brk = (char*)(MORECORE(0));
    if (current_brk == (char*)(av->top) + top_size) {

      /*
        Attempt to release memory. We ignore MORECORE return value,
        and instead call again to find out where new end of memory is.
        This avoids problems if first call releases less than we asked,
        of if failure somehow altered brk value. (We could still
        encounter problems if it altered brk in some very bad way,
        but the only thing we can do is adjust anyway, which will cause
        some downstream failure.)
      */

      MORECORE(-extra);
      /* Call the `morecore' hook if necessary.  */
      if (__after_morecore_hook)
       (*__after_morecore_hook) ();
      new_brk = (char*)(MORECORE(0));

      if (new_brk != (char*)MORECORE_FAILURE) {
        released = (long)(current_brk - new_brk);

        if (released != 0) {
          /* Success. Adjust top. */
          av->system_mem -= released;
          set_head(av->top, (top_size - released) | PREV_INUSE);
          check_malloc_state(av);
          return 1;
        }
      }
    }
  }
  return 0;
}

Variable Documentation

Definition at line 2485 of file malloc.c.

Definition at line 2475 of file malloc.c.

Definition at line 2478 of file malloc.c.

Definition at line 2474 of file malloc.c.

Definition at line 2483 of file malloc.c.

Definition at line 2480 of file malloc.c.

Definition at line 25 of file init-first.c.

Definition at line 2494 of file malloc.c.

int dev_zero_fd = -1 [static]

Definition at line 1744 of file malloc.c.

Definition at line 2395 of file malloc.c.

struct malloc_par [static]

Definition at line 2391 of file malloc.c.

struct malloc_state [static]

Definition at line 2387 of file malloc.c.

int perturb_byte [static]

Definition at line 2499 of file malloc.c.