Back to index

plt-scheme  4.2.1
Classes | Defines | Typedefs | Enumerations | Functions | Variables
sgc.c File Reference
#include <stdlib.h>
#include <setjmp.h>
#include <stdio.h>
#include <string.h>
#include "../sconfig.h"
#include "mzconfig.h"
#include "sgc.h"
#include "../utils/splay.c"
#include "collect.inc"

Go to the source code of this file.

Classes

struct  BlockOfMemory
struct  MemoryChunk
struct  SectorPage
struct  SectorFreepage
struct  GC_Set
struct  GC_SetWithOthers
struct  Finalizer
struct  ManagedBlockHeader
struct  ManagedBlock
struct  ManagedBucket
struct  Managed
union  Finalizer.u

Defines

#define SGC_STD_DEBUGGING   0
#define SGC_STD_DEBUGGING_UNIX   SGC_STD_DEBUGGING
#define SGC_STD_DEBUGGING_WINDOWS   0
#define SGC_AUTO_ROOTS   0
#define NO_COLLECTIONS   0
#define NO_DISAPPEARING   0
#define NO_FINALIZING   0
#define NO_ATOMIC   0
#define KEEP_BLOCKS_FOREVER   0
#define NO_STACK_OFFBYONE   0
#define WATCH_FOR_FINALIZATION_CYCLES   0
#define USE_GC_FREE_SPACE_DIVISOR   1
#define PROVIDE_GC_FREE   0
#define PROVIDE_CHUNK_GC_FREE   1
#define PROVIDE_MALLOC_AND_FREE   0
#define GET_MEM_VIA_SBRK   0
#define GET_MEM_VIA_MMAP   SGC_STD_DEBUGGING_UNIX
#define GET_MEM_VIA_VIRTUAL_ALLOC   SGC_STD_DEBUGGING_WINDOWS
#define RELEASE_UNUSED_SECTORS   1
#define DISTINGUISH_FREE_FROM_UNMARKED   0
#define TERSE_MEMORY_TRACING   0
#define STD_MEMORY_TRACING   SGC_STD_DEBUGGING
#define DETAIL_MEMORY_TRACING   0
#define STAMP_AND_REMEMBER_SOURCE   0
#define ALLOW_TRACE_COUNT   0
#define ALLOW_TRACE_PATH   0
#define KEEP_DETAIL_PATH   SGC_STD_DEBUGGING
#define CHECK_SKIP_MARK_AT_FIRST   0
#define ALLOW_SET_LOCKING   0
#define ALLOW_SET_FINALIZER   0
#define CHECK_WATCH_FOR_PTR_ALLOC   SGC_STD_DEBUGGING
#define USE_WATCH_FOUND_FUNC   SGC_STD_DEBUGGING
#define PAD_BOUNDARY_BYTES   SGC_STD_DEBUGGING
#define CHECK_SIMPLE_INTERIOR_POINTERS   0
#define DUMP_BLOCK_COUNTS   1
#define DUMP_SECTOR_MAP   1
#define DUMP_BLOCK_MAPS   1 /* 0 */
#define CHECK_FREES   SGC_STD_DEBUGGING
#define FPRINTF_USE_PRIM_STRINGOUT   SGC_STD_DEBUGGING_WINDOWS
#define PRIM_STRINGOUT_AS_FWRITE   0
#define PRIM_STRINGOUT_AS_WINDOWS_CONSOLE   SGC_STD_DEBUGGING_WINDOWS
#define AUTO_STATIC_ROOTS_IF_POSSIBLE   SGC_AUTO_ROOTS
#define PRINT_INFO_PER_GC   SGC_STD_DEBUGGING
#define SHOW_SECTOR_MAPS_AT_GC   0
#define FIRST_GC_LIMIT   100000
#define MEM_USE_FACTOR   3
#define PTR_TO_INT(v)   ((unsigned long)(v))
#define INT_TO_PTR(v)   ((void *)(v))
#define MALLOC   malloc
#define FREE   free
#define RELEASE_UNUSED_SECTORS   0
#define PTR_ALIGNMENT   4
#define LOG_PTR_SIZE   2
#define LOW_32_BITS(x)   x
#define PTR_SIZE   (1 << LOG_PTR_SIZE)
#define DOUBLE_SIZE   sizeof(double)
#define LOG_SECTOR_SEGMENT_SIZE   12
#define SECTOR_SEGMENT_SIZE   (1 << LOG_SECTOR_SEGMENT_SIZE)
#define SECTOR_SEGMENT_MASK   (~(SECTOR_SEGMENT_SIZE-1))
#define MAX_COMMON_SIZE   (SECTOR_SEGMENT_SIZE >> 2)
#define NUM_COMMON_SIZE   ((2 * LOG_SECTOR_SEGMENT_SIZE) + 8)
#define SECTOR_SEGMENT_GROUP_SIZE   32
#define SECTOR_LOOKUP_PAGESETBITS   12
#define LOG_MAP_PTR_SIZE   2
#define MAP_PTR_SIZE   4
#define SECTOR_LOOKUP_SHIFT   ((MAP_PTR_SIZE*8) - SECTOR_LOOKUP_PAGESETBITS)
#define LOG_SECTOR_LOOKUP_PAGESIZE   ((MAP_PTR_SIZE*8) - SECTOR_LOOKUP_PAGESETBITS - LOG_SECTOR_SEGMENT_SIZE)
#define SECTOR_LOOKUP_PAGESIZE   (1 << LOG_SECTOR_LOOKUP_PAGESIZE)
#define SECTOR_LOOKUP_PAGEMASK   (SECTOR_LOOKUP_PAGESIZE - 1)
#define SECTOR_LOOKUP_PAGETABLE(x)   (LOW_32_BITS(x) >> SECTOR_LOOKUP_SHIFT)
#define SECTOR_LOOKUP_PAGEPOS(x)   ((LOW_32_BITS(x) >> LOG_SECTOR_SEGMENT_SIZE) & SECTOR_LOOKUP_PAGEMASK)
#define LOG_SECTOR_PAGEREC_SIZE   (LOG_PTR_SIZE + 1)
#define CHECK   0
#define PRINT   0
#define TIME   0
#define ALWAYS_TRACE   0
#define CHECK_COLLECTING   0
#define MARK_STATS   0
#define ALLOC_STATS   0
#define FINISH_STATS   0
#define DUMP_BLOCK_COUNTS   1
#define EXTRA_FREE_CHECKS   0
#define KEEP_SET_NO   0
#define KEEP_CHUNK_SET_NO   1
#define KEEP_PREV_PTR   1
#define NULL   0L
#define PAD_FORWARD(p)   (p)
#define PAD_BACKWARD(p)   (p)
#define FREE_BIT_PER_ELEM   8
#define LOG_FREE_BIT_PER_ELEM   3
#define FREE_BIT_SIZE   (8 >> LOG_FREE_BIT_PER_ELEM)
#define FREE_BIT_START   0x1
#define UNMARK_BIT_START   0x1
#define POS_TO_FREE_INDEX(p)   (p >> LOG_FREE_BIT_PER_ELEM)
#define POS_TO_UNMARK_INDEX(p)   (p >> LOG_FREE_BIT_PER_ELEM)
#define POS_TO_FREE_BIT(p)   (FREE_BIT_START << (p & (FREE_BIT_PER_ELEM - 1)))
#define POS_TO_UNMARK_BIT(p)   (UNMARK_BIT_START << (p & (FREE_BIT_PER_ELEM - 1)))
#define ALL_UNMARKED   0xFF
#define _NOT_FREE(x)   1
#define NOT_FREE(x)   (!(x))
#define IS_FREE(x)   (x)
#define NOT_MARKED(x)   (x)
#define IS_MARKED(x)   (!(x))
#define ELEM_PER_BLOCK(b)   b->elem_per_block
#define TREE_FP(t)   ((SectorFreepage *)(t->data))
#define TABLE_HI_SHIFT   LOG_SECTOR_SEGMENT_SIZE
#define TABLE_LO_MASK   (SECTOR_SEGMENT_SIZE-1)
#define EACH_TABLE_COUNT   (1 << (LOG_SECTOR_SEGMENT_SIZE - LOG_PTR_SIZE))
#define do_malloc_ATOMIC   0x1
#define do_malloc_UNCOLLECTABLE   0x2
#define do_malloc_ATOMIC_UNLESS_DISABLED   do_malloc_ATOMIC
#define FPRINTF   fprintf
#define STDERR   stderr
#define DECL_SECTOR_PAGETABLES
#define GET_SECTOR_PAGETABLES(p)
#define FIND_SECTOR_PAGETABLES(p)
#define ALLOC_STATISTIC(x)   /* empty */
#define SET_NO_BACKINFO   int set_no,
#define KEEP_SET_INFO_ARG(x)   x,
#define TRACE_COLLECT_SWITCH   1
#define FINISH_STATISTIC(x)
#define INITIAL_COLLECT_STACK_SIZE   8192
#define PUSH_SRC(src)   /*empty*/
#define LOCAL_PUSH_SRC(src)   /*empty*/
#define COLLECT_STACK_FRAME_SIZE   2
#define PUSH_COLLECT(s, e, src)
#define LOCAL_PUSH_COLLECT(s, e, src)
#define COLLECT   semi_collect_stack
#define STACK_TRACE
#define COLLECT   collect
#define FLUSH_REGISTER_WINDOWS   /* empty */
#define GETTIME()   scheme_get_process_milliseconds()
#define INITTIME()   /* empty */
#define PRINTTIME(x)   /* empty */

Typedefs

typedef struct BlockOfMemory BlockOfMemory
typedef struct MemoryChunk MemoryChunk
typedef struct SectorFreepage SectorFreepage
typedef struct GC_Set GC_Set
typedef struct GC_SetWithOthers GC_SetWithOthers
typedef struct DisappearingLink DisappearingLink
typedef struct Finalizer Finalizer
typedef struct ManagedBlock ManagedBlock

Enumerations

enum  {
  sector_kind_block, sector_kind_free, sector_kind_freed, sector_kind_chunk,
  sector_kind_managed, sector_kind_other
}
enum  { dl_normal, dl_restored, dl_late }

Functions

static Treenext (Tree *node)
static void remove_freepage (SectorFreepage *fp)
static void add_freepage (SectorFreepage *naya)
GC_collect_start_callback_Proc GC_set_collect_start_callback (GC_collect_start_callback_Proc func)
GC_collect_end_callback_Proc GC_set_collect_end_callback (GC_collect_end_callback_Proc func)
static voidplatform_plain_sector (int count)
static voidmalloc_plain_sector (int count)
static void register_sector (void *naya, int need, long kind)
static voidmalloc_sector (long size, long kind, int no_new)
static void free_sector (void *p)
static void prepare_collect_temp ()
static voidrealloc_collect_temp (void *v, long oldsize, long newsize)
static void free_collect_temp (void *v, long oldsize)
static voidmalloc_managed (long size)
static void free_managed (void *s)
static void init_size_map ()
void GC_add_roots (void *start, void *end)
static void init_static_variables (void)
static void GC_initialize (void)
void GC_set_stack_base (void *base)
voidGC_get_stack_base (void)
static voidfind_ptr (void *d, int *_size, BlockOfMemory **_block, int *_pos, MemoryChunk **_chunk, int find_anyway)
voidGC_base (void *d)
int GC_size (void *d)
int GC_is_atomic (void *d)
int GC_orig_size (void *d)
voidGC_orig_base (void *d)
GC_SetGC_set (void *d)
static void dump_sector_map (char *prefix)
void GC_dump (void)
long GC_get_memory_use ()
void GC_end_stubborn_change (void *p)
static void init_positions (int cpos, int size, int num_elems)
static voiddo_malloc (SET_NO_BACKINFO unsigned long size, BlockOfMemory **common, MemoryChunk **othersptr, int flags)
GC_SetGC_new_set (char *name, GC_trace_init trace_init, GC_trace_done trace_done, GC_count_tracer count_tracer, GC_path_tracer path_tracer, GC_set_elem_finalizer final, int flags)
voidGC_malloc (size_t size)
voidGC_malloc_atomic (size_t size)
voidGC_malloc_uncollectable (size_t size)
voidGC_malloc_atomic_uncollectable (size_t size)
voidGC_malloc_specific (size_t size, GC_Set *set)
voidGC_malloc_stubborn (size_t size)
static void register_disappearing_link (void **p, void *a, int late)
void GC_general_register_disappearing_link (void **p, void *a)
void GC_register_late_disappearing_link (void **p, void *a)
void GC_unregister_disappearing_link (void **p)
static void register_finalizer (void *p, void(*f)(void *p, void *data), void *data, void(**oldf)(void *p, void *data), void **olddata, int eager_level, int ignore_self)
void GC_register_finalizer (void *p, void(*f)(void *p, void *data), void *data, void(**oldf)(void *p, void *data), void **olddata)
void GC_register_eager_finalizer (void *p, int level, void(*f)(void *p, void *data), void *data, void(**oldf)(void *p, void *data), void **olddata)
void GC_register_finalizer_ignore_self (void *p, void(*f)(void *p, void *data), void *data, void(**oldf)(void *p, void *data), void **olddata)
void GC_for_each_element (GC_Set *set, void(*f)(void *p, int size, void *data), void *data)
static void free_chunk (MemoryChunk *k, MemoryChunk **prev, GC_Set *set)
void GC_free (void *p)
static void collect_init_chunk (MemoryChunk *c, int uncollectable, int ty)
static void collect_finish_chunk (MemoryChunk **c, GC_Set *set)
static void collect_init_common (BlockOfMemory **blocks, int uncollectable, int ty)
static void collect_finish_common (BlockOfMemory **blocks, BlockOfMemory **block_ends, GC_Set *set)
static void push_collect (unsigned long start, unsigned long end, unsigned long src)
static void prepare_stack_collect ()
static void push_stack (void *stack_now)
static void push_uncollectable_chunk (MemoryChunk *c, GC_Set *set)
static void push_uncollectable_common (BlockOfMemory **blocks, GC_Set *set)
static void push_collect_ignore (unsigned long s, unsigned long e, unsigned long a)
static void mark_chunks_for_finalizations (MemoryChunk *c)
static void mark_common_for_finalizations (BlockOfMemory **blocks, int atomic)
static void enqueue_fn (Finalizer *fn)
static void queue_chunk_finalizeable (MemoryChunk *c, int eager_level)
static void queue_common_finalizeable (BlockOfMemory **blocks, int eager_level)
static void do_disappearing (DisappearingLink **disappearing_ptr)
static void trim_disappearing (DisappearingLink **disappearing_ptr)
static void do_disappear_and_finals ()
static int compare_roots (const void *a, const void *b)
static void sort_and_merge_roots ()
static void run_finalizers (void)
long scheme_get_process_milliseconds (void)
void GC_push_all_stack (void *sp, void *ep)
void GC_flush_mark_stack ()
static void do_GC_gcollect (void *stack_now)
void GC_gcollect (void)
int GC_trace_count (int *stack, int *roots, int *uncollectable, int *final)
void GC_trace_path (void)
void GC_store_path (void *v, unsigned long src, void *path_data)
void ** GC_get_next_path (void **prev, int *len)
void GC_clear_paths (void)

Variables

voidGC_initial_trace_root
int(* GC_inital_root_skip )(void *, size_t)
void(* GC_out_of_memory )(void)
static Treesector_freepage_by_start
static Treesector_freepage_by_end
static Treesector_freepage_by_size
static GC_Set ** common_sets
static int num_common_sets
static BlockOfMemorycommon [2 *NUM_COMMON_SIZE]
static BlockOfMemoryatomic_common [2 *NUM_COMMON_SIZE]
static BlockOfMemoryuncollectable_common [2 *NUM_COMMON_SIZE]
static BlockOfMemoryuncollectable_atomic_common [2 *NUM_COMMON_SIZE]
static MemoryChunkothers
static MemoryChunkatomic_others
static MemoryChunkuncollectable_others
static MemoryChunkuncollectable_atomic_others
static intcommon_positionses [NUM_COMMON_SIZE]
int GC_dl_entries
int GC_fo_entries
void(* GC_push_last_roots )(void)
void(* GC_push_last_roots_again )(void)
static DisappearingLinkdisappearing
static DisappearingLinklate_disappearing
static Finalizerqueued_finalizers
static Finalizerlast_queued_finalizer
static int num_queued_finalizers
static unsigned long sector_low_plausible
static unsigned long sector_high_plausible
static unsigned long low_plausible
static unsigned long high_plausible
voidGC_stackbottom
static long mem_use
static long mem_limit = FIRST_GC_LIMIT
int GC_free_space_divisor = 4
static long mem_real_use
static long mem_uncollectable_use
static long sector_mem_use
static long sector_admin_mem_use
static long sector_free_mem_use
static long manage_mem_use
static long manage_real_mem_use
static long collect_mem_use
static long num_sector_allocs
static long num_sector_frees
static long num_chunks
static long num_blocks
GC_collect_start_callback_Proc GC_collect_start_callback
GC_collect_end_callback_Proc GC_collect_end_callback
void(* GC_custom_finalize )(void)
static long roots_count
static long roots_size
static unsigned long * roots
static long * size_index_map
static long * size_map
static SectorPage ** sector_pagetables
static Managedmanaged
static int statics_setup = 0
static int initialized = 0
static unsigned long trace_stack_start
static unsigned long trace_stack_end
static unsigned long trace_reg_start
static unsigned long trace_reg_end
static voidzero_ptr
static int collect_stack_count
static int collect_stack_size
static unsigned long * collect_stack
static jmp_buf buf

Class Documentation

struct BlockOfMemory

Definition at line 482 of file sgc.c.

Collaboration diagram for BlockOfMemory:
Class Members
short atomic
short elem_per_block
unsigned long end
struct Finalizer * finalizers
unsigned char free
short free_search_bit
short free_search_offset
short free_search_start
struct BlockOfMemory * next
int * positions
short size
unsigned long start
unsigned long top
struct MemoryChunk

Definition at line 552 of file sgc.c.

Collaboration diagram for MemoryChunk:
Class Members
short atomic
char data
unsigned long end
struct Finalizer * finalizers
short marked
struct MemoryChunk * next
struct MemoryChunk ** prev_ptr
int set_no
unsigned long start
struct SectorPage

Definition at line 573 of file sgc.c.

Class Members
long kind
unsigned long start
struct SectorFreepage

Definition at line 582 of file sgc.c.

Collaboration diagram for SectorFreepage:
Class Members
Tree by_end
Tree by_size
Tree by_start
Tree by_start_per_size
unsigned long end
Tree * same_size
long size
unsigned long start
struct GC_Set

Definition at line 657 of file sgc.c.

Collaboration diagram for GC_Set:
Class Members
short atomic
BlockOfMemory ** block_ends
BlockOfMemory ** blocks
char * name
int no
MemoryChunk ** othersptr
unsigned long total
short uncollectable
struct GC_SetWithOthers

Definition at line 687 of file sgc.c.

Collaboration diagram for GC_SetWithOthers:
Class Members
GC_Set c
MemoryChunk * others
struct DisappearingLink

Definition at line 729 of file sgc.c.

Collaboration diagram for DisappearingLink:
Class Members
void ** disappear
short kind
struct DisappearingLink * next
struct DisappearingLink * prev
void * saved_value
void * watch
struct ManagedBlockHeader

Definition at line 1315 of file sgc.c.

Collaboration diagram for ManagedBlockHeader:
Class Members
long count
unsigned long end
struct ManagedBlock * next
struct ManagedBlock * prev
long size
struct ManagedBlock

Definition at line 1323 of file sgc.c.

Collaboration diagram for ManagedBlock:
Class Members
char free
ManagedBlockHeader head
struct ManagedBucket

Definition at line 1328 of file sgc.c.

Collaboration diagram for ManagedBucket:
Class Members
ManagedBlock * block
long offset
long perblock
long size
struct Managed

Definition at line 1335 of file sgc.c.

Collaboration diagram for Managed:
Class Members
ManagedBucket buckets
int num_buckets
union Finalizer.u

Definition at line 738 of file sgc.c.

Class Members
int pos
void * watch

Define Documentation

#define _NOT_FREE (   x)    1

Definition at line 541 of file sgc.c.

#define ALL_UNMARKED   0xFF

Definition at line 539 of file sgc.c.

#define ALLOC_STATISTIC (   x)    /* empty */

Definition at line 2190 of file sgc.c.

#define ALLOC_STATS   0

Definition at line 364 of file sgc.c.

#define ALLOW_SET_FINALIZER   0

Definition at line 163 of file sgc.c.

#define ALLOW_SET_LOCKING   0

Definition at line 160 of file sgc.c.

#define ALLOW_TRACE_COUNT   0

Definition at line 147 of file sgc.c.

#define ALLOW_TRACE_PATH   0

Definition at line 150 of file sgc.c.

#define ALWAYS_TRACE   0

Definition at line 361 of file sgc.c.

Definition at line 221 of file sgc.c.

#define CHECK   0

Definition at line 358 of file sgc.c.

#define CHECK_COLLECTING   0

Definition at line 362 of file sgc.c.

Definition at line 203 of file sgc.c.

Definition at line 180 of file sgc.c.

#define CHECK_SKIP_MARK_AT_FIRST   0

Definition at line 156 of file sgc.c.

Definition at line 166 of file sgc.c.

#define COLLECT   semi_collect_stack

Definition at line 3626 of file sgc.c.

#define COLLECT   collect

Definition at line 3626 of file sgc.c.

#define COLLECT_STACK_FRAME_SIZE   2

Definition at line 3452 of file sgc.c.

Definition at line 875 of file sgc.c.

#define DETAIL_MEMORY_TRACING   0

Definition at line 140 of file sgc.c.

Definition at line 129 of file sgc.c.

#define do_malloc_ATOMIC   0x1

Definition at line 709 of file sgc.c.

Definition at line 714 of file sgc.c.

#define do_malloc_UNCOLLECTABLE   0x2

Definition at line 710 of file sgc.c.

#define DOUBLE_SIZE   sizeof(double)

Definition at line 298 of file sgc.c.

#define DUMP_BLOCK_COUNTS   1

Definition at line 405 of file sgc.c.

#define DUMP_BLOCK_COUNTS   1

Definition at line 405 of file sgc.c.

#define DUMP_BLOCK_MAPS   1 /* 0 */

Definition at line 198 of file sgc.c.

#define DUMP_SECTOR_MAP   1

Definition at line 190 of file sgc.c.

Definition at line 655 of file sgc.c.

#define ELEM_PER_BLOCK (   b)    b->elem_per_block

Definition at line 550 of file sgc.c.

#define EXTRA_FREE_CHECKS   0

Definition at line 415 of file sgc.c.

#define FIND_SECTOR_PAGETABLES (   p)

Definition at line 877 of file sgc.c.

#define FINISH_STATISTIC (   x)

Definition at line 3181 of file sgc.c.

#define FINISH_STATS   0

Definition at line 365 of file sgc.c.

#define FIRST_GC_LIMIT   100000

Definition at line 245 of file sgc.c.

#define FLUSH_REGISTER_WINDOWS   /* empty */

Definition at line 3643 of file sgc.c.

#define FPRINTF   fprintf

Definition at line 817 of file sgc.c.

Definition at line 206 of file sgc.c.

#define FREE   free

Definition at line 263 of file sgc.c.

#define FREE_BIT_PER_ELEM   8

Definition at line 528 of file sgc.c.

#define FREE_BIT_SIZE   (8 >> LOG_FREE_BIT_PER_ELEM)

Definition at line 530 of file sgc.c.

#define FREE_BIT_START   0x1

Definition at line 531 of file sgc.c.

Definition at line 117 of file sgc.c.

#define GET_MEM_VIA_SBRK   0

Definition at line 113 of file sgc.c.

Definition at line 121 of file sgc.c.

#define GET_SECTOR_PAGETABLES (   p)

Definition at line 876 of file sgc.c.

Definition at line 4300 of file sgc.c.

#define INITIAL_COLLECT_STACK_SIZE   8192

Definition at line 3443 of file sgc.c.

#define INITTIME ( )    /* empty */

Definition at line 4309 of file sgc.c.

#define INT_TO_PTR (   v)    ((void *)(v))

Definition at line 260 of file sgc.c.

#define IS_FREE (   x)    (x)

Definition at line 546 of file sgc.c.

#define IS_MARKED (   x)    (!(x))

Definition at line 548 of file sgc.c.

#define KEEP_BLOCKS_FOREVER   0

Definition at line 80 of file sgc.c.

#define KEEP_CHUNK_SET_NO   1

Definition at line 431 of file sgc.c.

Definition at line 153 of file sgc.c.

#define KEEP_PREV_PTR   1

Definition at line 436 of file sgc.c.

#define KEEP_SET_INFO_ARG (   x)    x,

Definition at line 2195 of file sgc.c.

#define KEEP_SET_NO   0

Definition at line 429 of file sgc.c.

#define LOCAL_PUSH_COLLECT (   s,
  e,
  src 
)
Value:
if (local_collect_stack_count < local_collect_stack_size) { \
    local_collect_stack[local_collect_stack_count++] = s; \
    local_collect_stack[local_collect_stack_count++] = e + 1 - PTR_ALIGNMENT; \
    LOCAL_PUSH_SRC(src) \
  } else { \
    collect_stack_count = local_collect_stack_count; \
    push_collect(s, e + 1 - PTR_ALIGNMENT, src); \
    local_collect_stack = collect_stack; \
    local_collect_stack_count = collect_stack_count; \
    local_collect_stack_size = collect_stack_size; \
  }

Definition at line 3486 of file sgc.c.

#define LOCAL_PUSH_SRC (   src)    /*empty*/

Definition at line 3451 of file sgc.c.

#define LOG_FREE_BIT_PER_ELEM   3

Definition at line 529 of file sgc.c.

#define LOG_MAP_PTR_SIZE   2

Definition at line 323 of file sgc.c.

#define LOG_PTR_SIZE   2

Definition at line 293 of file sgc.c.

Definition at line 327 of file sgc.c.

Definition at line 334 of file sgc.c.

#define LOG_SECTOR_SEGMENT_SIZE   12

Definition at line 304 of file sgc.c.

#define LOW_32_BITS (   x)    x

Definition at line 294 of file sgc.c.

#define MALLOC   malloc

Definition at line 262 of file sgc.c.

#define MAP_PTR_SIZE   4

Definition at line 324 of file sgc.c.

#define MARK_STATS   0

Definition at line 363 of file sgc.c.

#define MAX_COMMON_SIZE   (SECTOR_SEGMENT_SIZE >> 2)

Definition at line 310 of file sgc.c.

#define MEM_USE_FACTOR   3

Definition at line 246 of file sgc.c.

#define NO_ATOMIC   0

Definition at line 77 of file sgc.c.

#define NO_COLLECTIONS   0

Definition at line 68 of file sgc.c.

#define NO_DISAPPEARING   0

Definition at line 71 of file sgc.c.

#define NO_FINALIZING   0

Definition at line 74 of file sgc.c.

#define NO_STACK_OFFBYONE   0

Definition at line 85 of file sgc.c.

#define NOT_FREE (   x)    (!(x))

Definition at line 545 of file sgc.c.

#define NOT_MARKED (   x)    (x)

Definition at line 547 of file sgc.c.

#define NULL   0L

Definition at line 442 of file sgc.c.

#define NUM_COMMON_SIZE   ((2 * LOG_SECTOR_SEGMENT_SIZE) + 8)

Definition at line 312 of file sgc.c.

#define PAD_BACKWARD (   p)    (p)

Definition at line 456 of file sgc.c.

Definition at line 174 of file sgc.c.

#define PAD_FORWARD (   p)    (p)

Definition at line 455 of file sgc.c.

#define POS_TO_FREE_BIT (   p)    (FREE_BIT_START << (p & (FREE_BIT_PER_ELEM - 1)))

Definition at line 536 of file sgc.c.

#define POS_TO_FREE_INDEX (   p)    (p >> LOG_FREE_BIT_PER_ELEM)

Definition at line 534 of file sgc.c.

#define POS_TO_UNMARK_BIT (   p)    (UNMARK_BIT_START << (p & (FREE_BIT_PER_ELEM - 1)))

Definition at line 537 of file sgc.c.

#define POS_TO_UNMARK_INDEX (   p)    (p >> LOG_FREE_BIT_PER_ELEM)

Definition at line 535 of file sgc.c.

#define PRIM_STRINGOUT_AS_FWRITE   0

Definition at line 213 of file sgc.c.

Definition at line 217 of file sgc.c.

#define PRINT   0

Definition at line 359 of file sgc.c.

Definition at line 225 of file sgc.c.

#define PRINTTIME (   x)    /* empty */

Definition at line 4310 of file sgc.c.

#define PROVIDE_CHUNK_GC_FREE   1

Definition at line 99 of file sgc.c.

#define PROVIDE_GC_FREE   0

Definition at line 96 of file sgc.c.

#define PROVIDE_MALLOC_AND_FREE   0

Definition at line 103 of file sgc.c.

#define PTR_ALIGNMENT   4

Definition at line 292 of file sgc.c.

#define PTR_SIZE   (1 << LOG_PTR_SIZE)

Definition at line 296 of file sgc.c.

#define PTR_TO_INT (   v)    ((unsigned long)(v))

Definition at line 259 of file sgc.c.

#define PUSH_COLLECT (   s,
  e,
  src 
)
Value:
if (collect_stack_count < collect_stack_size) { \
    collect_stack[collect_stack_count++] = s; \
    collect_stack[collect_stack_count++] = e + 1 - PTR_ALIGNMENT; \
    PUSH_SRC(src) \
  } else \
    push_collect(s, e + 1 - PTR_ALIGNMENT, src);

Definition at line 3478 of file sgc.c.

#define PUSH_SRC (   src)    /*empty*/

Definition at line 3450 of file sgc.c.

#define RELEASE_UNUSED_SECTORS   1

Definition at line 283 of file sgc.c.

#define RELEASE_UNUSED_SECTORS   0

Definition at line 283 of file sgc.c.

Definition at line 329 of file sgc.c.

Definition at line 332 of file sgc.c.

#define SECTOR_LOOKUP_PAGESETBITS   12

Definition at line 321 of file sgc.c.

Definition at line 328 of file sgc.c.

Definition at line 331 of file sgc.c.

Definition at line 326 of file sgc.c.

#define SECTOR_SEGMENT_GROUP_SIZE   32

Definition at line 316 of file sgc.c.

Definition at line 306 of file sgc.c.

Definition at line 305 of file sgc.c.

#define SET_NO_BACKINFO   int set_no,

Definition at line 2194 of file sgc.c.

#define SGC_AUTO_ROOTS   0

Definition at line 61 of file sgc.c.

#define SGC_STD_DEBUGGING   0

Definition at line 49 of file sgc.c.

Definition at line 56 of file sgc.c.

#define SGC_STD_DEBUGGING_WINDOWS   0

Definition at line 57 of file sgc.c.

#define SHOW_SECTOR_MAPS_AT_GC   0

Definition at line 229 of file sgc.c.

#define STACK_TRACE

Definition at line 3599 of file sgc.c.

#define STAMP_AND_REMEMBER_SOURCE   0

Definition at line 144 of file sgc.c.

Definition at line 136 of file sgc.c.

#define STDERR   stderr

Definition at line 818 of file sgc.c.

Definition at line 653 of file sgc.c.

Definition at line 654 of file sgc.c.

#define TERSE_MEMORY_TRACING   0

Definition at line 132 of file sgc.c.

#define TIME   0

Definition at line 360 of file sgc.c.

#define TRACE_COLLECT_SWITCH   1

Definition at line 3098 of file sgc.c.

#define TREE_FP (   t)    ((SectorFreepage *)(t->data))

Definition at line 597 of file sgc.c.

#define UNMARK_BIT_START   0x1

Definition at line 532 of file sgc.c.

#define USE_GC_FREE_SPACE_DIVISOR   1

Definition at line 91 of file sgc.c.

Definition at line 171 of file sgc.c.

Definition at line 88 of file sgc.c.


Typedef Documentation

typedef struct GC_Set GC_Set

Enumeration Type Documentation

anonymous enum
Enumerator:
sector_kind_block 
sector_kind_free 
sector_kind_freed 
sector_kind_chunk 
sector_kind_managed 
sector_kind_other 

Definition at line 469 of file sgc.c.

     {
  sector_kind_block,
  sector_kind_free,
#if !RELEASE_UNUSED_SECTORS
  sector_kind_freed,
#else
# define sector_kind_freed sector_kind_free
#endif
  sector_kind_chunk,
  sector_kind_managed,
  sector_kind_other
};
anonymous enum
Enumerator:
dl_normal 
dl_restored 
dl_late 

Definition at line 723 of file sgc.c.


Function Documentation

static void add_freepage ( SectorFreepage naya) [static]

Definition at line 635 of file sgc.c.

{
  naya->by_start.data = (void *)naya;
  sector_freepage_by_start = splay_insert(naya->start, &naya->by_start, sector_freepage_by_start);
  naya->by_end.data = (void *)naya;
  sector_freepage_by_end = splay_insert(naya->end, &naya->by_end, sector_freepage_by_end);
  naya->by_size.data = (void *)naya;
  sector_freepage_by_size = splay_insert(naya->size, &naya->by_size, sector_freepage_by_size);
  if (TREE_FP(sector_freepage_by_size) != naya) {
    /* This size was already in the tree; add it to the next_size list, instead */
    SectorFreepage *already = TREE_FP(sector_freepage_by_size);
    naya->by_start_per_size.data = (void *)naya;
    already->same_size = splay_insert(naya->start, &naya->by_start_per_size, already->same_size);
  } else
    naya->same_size = NULL;
}

Here is the caller graph for this function:

static void collect_finish_chunk ( MemoryChunk **  c,
GC_Set set 
) [static]

Definition at line 3184 of file sgc.c.

{
  unsigned long local_low_plausible;
  unsigned long local_high_plausible;

  local_low_plausible = low_plausible;
  local_high_plausible = high_plausible;

  while (*c) {
    MemoryChunk *k = *c;

    FINISH_STATISTIC(num_finish_chunk_stat++);

    if (k->marked) {
      c = &k->next;

      FINISH_STATISTIC(num_finish_chunkkeep_stat++);

      if (!local_low_plausible || (k->start < local_low_plausible))
       local_low_plausible = k->start;
      if (!local_high_plausible || (k->end > local_high_plausible))
       local_high_plausible = k->end;     
    } else {
      FINISH_STATISTIC(num_finish_chunkfree_stat++);

      free_chunk(k, c, set);
    }
  }

  low_plausible = local_low_plausible;
  high_plausible = local_high_plausible;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void collect_finish_common ( BlockOfMemory **  blocks,
BlockOfMemory **  block_ends,
GC_Set set 
) [static]

Definition at line 3304 of file sgc.c.

{
  int i;
#if KEEP_BLOCKS_FOREVER
  int kept;
#endif
  unsigned long local_low_plausible;
  unsigned long local_high_plausible;

  local_low_plausible = low_plausible;
  local_high_plausible = high_plausible;

  for (i = 0; i < NUM_COMMON_SIZE; i++) {
    BlockOfMemory **prev = &blocks[i];
    BlockOfMemory *block = *prev;
#if CHECK
    long size = size_map[i];
#endif

#if KEEP_BLOCKS_FOREVER
    kept = 0;
#endif

    while (block) {
      int unfree;

      FINISH_STATISTIC(num_finish_block_stat++);
      
#if CHECK
      if (block->end < block->start
         || block->top < block->start
         || block->top > block->end)
       FPRINTF(STDERR,
              "bad block: %ld %ld %ld %ld\n",
              size, block->start, block->top, block->end);
#endif

#if ALLOW_SET_FINALIZER
      if (set->finalizer) {
       unsigned long s;
       int j;
       for (j = 0, s = block->start; s < block->top; s += block->size, j++) {
         int pos = POS_TO_UNMARK_INDEX(j);
         int bit = POS_TO_UNMARK_BIT(j);

         if (NOT_MARKED(block->free[pos] & bit)) {
           void *p = INT_TO_PTR(s);
#if PAD_BOUNDARY_BYTES
           p = PAD_FORWARD(p);
#endif
           set->finalizer(p);
         }
       }
      }
#endif

      unfree = 0;
      {
       int j;
       for (j = ELEM_PER_BLOCK(block); j-- ; ) {
         FINISH_STATISTIC(num_finish_blockfiltercycles_stat++);
         if ((block->free[j] & ALL_UNMARKED) != ALL_UNMARKED) {
           unfree = j + 1;
           break;
         }
       }
      }

#if KEEP_BLOCKS_FOREVER
      if (!unfree && (kept < KEEP_BLOCKS_FOREVER)) {
       int j;
       block->top = block->start;
       for (j = ELEM_PER_BLOCK(block); j-- ; )
         block->free[j] = 0;
       kept++;
       unfree = 1;
      }
#endif

      if (!unfree) {
       FINISH_STATISTIC(num_finish_blockfree_stat++);

       --num_blocks;

       *prev = block->next;
       free_sector(block);
       mem_real_use -= SECTOR_SEGMENT_SIZE;
       block = *prev;
      } else {
#if DISTINGUISH_FREE_FROM_UNMARKED
       /* If it's unmarked, free it: */
       int j;

       for (j = ELEM_PER_BLOCK(block); j-- ; )
         block->free[j] |= SHIFT_UNMARK_TO_FREE(block->free[j]);
#endif

       /* Push down block->top if it's easy */
       {
         unsigned long dt = (unfree << LOG_FREE_BIT_PER_ELEM) * (unsigned long)block->size;
         if (block->top > block->start + dt) {
           int k;
           FINISH_STATISTIC(num_finish_blockadjust_stat++);
           block->top = block->start + dt;
           for (k = ELEM_PER_BLOCK(block); --k >= unfree; ) {
             block->free[k] = 0;
           }
         }
       }
       
       block->free_search_start = unfree - 1;
       block->free_search_bit = (FREE_BIT_START | UNMARK_BIT_START);
       block->free_search_offset = 0;

       FINISH_STATISTIC(num_finish_blockkeep_stat++);

       if (!local_low_plausible || (block->start < local_low_plausible))
         local_low_plausible = block->start;
       if (!local_high_plausible || (block->end > local_high_plausible))
         local_high_plausible = block->end;

       prev = &block->next;
       block = block->next;
      }
    }

    block_ends[i] = blocks[i];
  }

  low_plausible = local_low_plausible;
  high_plausible = local_high_plausible;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void collect_init_chunk ( MemoryChunk c,
int  uncollectable,
int  ty 
) [static]

Definition at line 3121 of file sgc.c.

{
  for (; c; c = c->next) {
    if (uncollectable && TRACE_COLLECT_SWITCH)
      c->marked = 1;
    else
      c->marked = 0;

#if PAD_BOUNDARY_BYTES
    /* Test padding: */
    {
      void *s = INT_TO_PTR(c->start);
      long pd, sz, diff;
      sz = c->end - c->start;
      diff = ((long *)s)[1];
      pd = *(long *)s;
      if (pd != PAD_PATTERN)
       bad_pad("start", s, ty, sz, diff, 0, pd, PAD_PATTERN);
      pd = *(long *)INT_TO_PTR(c->end - PAD_END_SIZE);
      if (pd != PAD_PATTERN)
       bad_pad("end1", s, ty, sz, diff, 0, pd, PAD_PATTERN);
      pd = *(long *)INT_TO_PTR(c->end - PAD_END_SIZE + sizeof(long));
      if (pd != PAD_PATTERN)
       bad_pad("end2", s, ty, sz, diff, 0, pd, PAD_PATTERN);
      if (diff) {
       /* Given was bigger than requested; check extra bytes: */
       unsigned char *ps = ((unsigned char *)s) + sz - PAD_END_SIZE - diff;
       long d = 0;
       while (d < diff) {
         if (*ps != PAD_FILL_PATTERN) {
           bad_pad("extra", s, ty, sz, diff, d, *ps, PAD_FILL_PATTERN);
         }
         ps++;
         d++;
       }
      }
    }
#endif

#if CHECK
    chk_count++;
    if ((!low_plausible || (c->start < low_plausible))
       || (!high_plausible || (c->end > high_plausible)))
      FPRINTF(STDERR, "implausible chunk!\n");
#endif
  }
}

Here is the caller graph for this function:

static void collect_init_common ( BlockOfMemory **  blocks,
int  uncollectable,
int  ty 
) [static]

Definition at line 3217 of file sgc.c.

{
  int i, j;
  int boundary, boundary_val = 0;

  for (i = 0; i < NUM_COMMON_SIZE; i++) {
    BlockOfMemory *block = blocks[i];

    while (block) {
#if CHECK
      cmn_count++;
      if ((!low_plausible || (block->start < low_plausible))
         || (!high_plausible || (block->end > high_plausible)))
       FPRINTF(STDERR, "implausible block!\n");
#endif

#if STAMP_AND_REMEMBER_SOURCE
      block->low_marker = block->high_marker = 0;
#endif

#if PAD_BOUNDARY_BYTES
      /* Test padding: */
      {
       unsigned long p;
       long size = size_map[i];
       
       for (p = block->start; p < block->top; p += size) {
         void *s = INT_TO_PTR(p);
         long pd, diff;
         pd = *(long *)s;
         diff = ((long *)s)[1];
         if (pd != PAD_PATTERN)
           bad_pad("start", s, ty, size, diff, 0, pd, PAD_PATTERN);
         pd = *(long *)INT_TO_PTR(p + size - PAD_END_SIZE);
         if (pd != PAD_PATTERN)
           bad_pad("end1", s, ty, size, diff, 0, pd, PAD_PATTERN);
         pd = *(long *)INT_TO_PTR(p + size - PAD_END_SIZE + sizeof(long));
         if (pd != PAD_PATTERN)
           bad_pad("end2", s, ty, size, diff, 0, pd, PAD_PATTERN);
         if (diff) {
           /* Given was bigger than requested; check extra bytes: */
           unsigned char *ps = ((unsigned char *)s) + size - PAD_END_SIZE - diff;
           long d = 0;
           while (d < diff) {
             if (*ps != PAD_FILL_PATTERN) {
              bad_pad("extra", s, ty, size, diff, d, *ps, PAD_FILL_PATTERN);
             }
             ps++;
             d++;
           }
         }
       }
      }
#endif

      if (uncollectable && TRACE_COLLECT_SWITCH) {
       for (j = ELEM_PER_BLOCK(block); j-- ; ) {
#if DISTINGUISH_FREE_FROM_UNMARKED
         block->free[j] = SHIFT_COPY_FREE_TO_UNMARKED(block->free[j]);
#else
         block->free[j] = 0;
#endif
       }
      } else {
       if (block->top < block->end) {
         int pos = block->positions[(block->top - block->start) >> LOG_PTR_SIZE];
         boundary = POS_TO_UNMARK_INDEX(pos);
         boundary_val = (POS_TO_UNMARK_BIT(pos) - 1) & ALL_UNMARKED;
       } else {
         boundary = ELEM_PER_BLOCK(block);
       }

       for (j = ELEM_PER_BLOCK(block); j-- ; ) {
         if (j < boundary)
           block->free[j] |= ALL_UNMARKED;
         else if (j == boundary)
           block->free[j] = boundary_val;
         else
           block->free[j] = 0;
       }
      }

      block = block->next;
    }
  }
}

Here is the caller graph for this function:

static int compare_roots ( const void a,
const void b 
) [static]

Definition at line 4214 of file sgc.c.

{
  if (*(unsigned long *)a < *(unsigned long *)b)
    return -1;
  else
    return 1;
}

Here is the caller graph for this function:

static void do_disappear_and_finals ( ) [static]

Definition at line 4133 of file sgc.c.

{
  DisappearingLink *dl, *next;
  Finalizer *fn;
  int j;

  /* Mark data in (not-yet-finalized) queued finalizable */
  for (fn = queued_finalizers; fn; fn = fn->next) {
    unsigned long p;

    p = PTR_TO_INT(&fn->u.watch);
    PUSH_COLLECT(p, p + PTR_SIZE, 0);

    p = PTR_TO_INT(&fn->data);
    PUSH_COLLECT(p, p + PTR_SIZE, 0);
  }
  collect();
  if (GC_push_last_roots_again) { GC_push_last_roots_again(); collect(); }

#if !NO_DISAPPEARING
  /* Do disappearing: */
  do_disappearing(&disappearing);
#endif

  /* Queue unreachable eager finalizable, level 1: */  
  /* DO NOT COLLECT FROM collect_stack UNTIL AFTER THIS LOOP */
  /* (Otherwise, some ready eager finalizations may not be queued.) */
  for (j = 0; j < num_common_sets; j++) {
    queue_chunk_finalizeable(*(common_sets[j]->othersptr), 1);
    queue_common_finalizeable(common_sets[j]->blocks, 1);
  }
  collect();
  if (GC_push_last_roots_again) { GC_push_last_roots_again(); collect(); }

  /* Queue unreachable eager finalizable, level 2: */  
  /* DO NOT COLLECT FROM collect_stack UNTIL AFTER THIS LOOP */
  for (j = 0; j < num_common_sets; j++) {
    queue_chunk_finalizeable(*(common_sets[j]->othersptr), 2);
    queue_common_finalizeable(common_sets[j]->blocks, 2);
  }
  collect();
  if (GC_push_last_roots_again) { GC_push_last_roots_again(); collect(); }

  /* Mark reachable from (non-eager) finalized blocks: */
  for (j = 0; j < num_common_sets; j++) {
    mark_chunks_for_finalizations(*(common_sets[j]->othersptr));
    mark_common_for_finalizations(common_sets[j]->blocks, common_sets[j]->atomic);
  }

  /* Queue unreachable (non-eager) finalizable: */  
  for (j = 0; j < num_common_sets; j++) {
    queue_chunk_finalizeable(*(common_sets[j]->othersptr), 0);
    queue_common_finalizeable(common_sets[j]->blocks, 0);
  }
  collect();

  /* Restore disappeared links where watch value is NULL: */
  for (dl = disappearing; dl; dl = next) {
    next = dl->next;
    if ((dl->kind == dl_restored) && dl->saved_value) {
      /* Restore disappearing value and deregister */
      *dl->disappear = dl->saved_value;
      dl->saved_value = NULL;
    }
  }

  if (GC_push_last_roots_again) { GC_push_last_roots_again(); collect(); }

  /* Deregister dangling disappearings: */
  trim_disappearing(&disappearing);
  trim_disappearing(&late_disappearing);

#if !NO_DISAPPEARING
  /* Do late disappearing: */
  do_disappearing(&late_disappearing);
#endif

  if (GC_custom_finalize)
    GC_custom_finalize();
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void do_disappearing ( DisappearingLink **  disappearing_ptr) [static]

Definition at line 4060 of file sgc.c.

{
  DisappearingLink *dl, *next, *disappearing;
  void *watch;
  int size;

  disappearing = *disappearing_ptr;

  for (dl = disappearing; dl; dl = next) {
    next = dl->next;
      
    watch = (dl->watch ? dl->watch : *dl->disappear);
    
    size = 0;
    if (watch && !find_ptr(watch, &size, NULL, NULL, NULL, 0)) {
      /* was the pointer allocated at all? */
      if (size) {
       /* It was allocated, and now it's gone: */
       if (dl->kind != dl_restored) {
         *dl->disappear = NULL;
         /* disappear is done */
         if (dl->prev)
           dl->prev->next = dl->next;
         else
           disappearing = dl->next;
         if (dl->next)
           dl->next->prev = dl->prev;
         
         mem_real_use -= sizeof(DisappearingLink);
         free_managed(dl);
         --GC_dl_entries;
       } else {
         /* We'll need to restore this one: */
         dl->saved_value = *dl->disappear;
         *dl->disappear = NULL;
       }
      }
    }
  }

  *disappearing_ptr = disappearing;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void do_GC_gcollect ( void stack_now) [static]

Definition at line 4335 of file sgc.c.

{
  long root_marked;
  int j;

#if PRINT_INFO_PER_GC
  long orig_mem_use = mem_use;
  long start_time;
  start_time = GETTIME();
  FPRINTF(STDERR, "gc at %ld (%ld): %ld after %ld msecs\n",
         mem_use, sector_mem_use, 
# if GET_MEM_VIA_SBRK
         (long)sbrk(0),
# elif defined(WIN32) && AUTO_STATIC_ROOTS_IF_POSSIBLE
         total_memory_use(),
# else
         (long)0,
# endif
         start_time - last_gc_end);
# if SHOW_SECTOR_MAPS_AT_GC
  dump_sector_map("");
# endif
#endif

  if (!GC_stackbottom) {
    /* Stack position not yet initialized; delay collection */
    if (mem_use)
      mem_limit = MEM_USE_FACTOR * mem_use;
    return;
  }

  if (!initialized)
    GC_initialize();

  if (!statics_setup)
    init_static_variables();

  if (GC_collect_start_callback)
    GC_collect_start_callback();

#if CHECK_COLLECTING
  collecting_now = 1;
#endif

#if !NO_COLLECTIONS

# if ALWAYS_TRACE && ALLOW_TRACE_COUNT
  collecting_with_trace_count = 1;
# endif

# if CHECK
  cmn_count = chk_count = 0;
# endif

  INITTIME();
  PRINTTIME((STDERR, "gc: init start: %ld\n", GETTIMEREL()));

  for (j = 0; j < num_common_sets; j++) {
# if ALLOW_SET_LOCKING
    if (!common_sets[j]->locked) {
# endif
      collect_init_chunk(*(common_sets[j]->othersptr),
                      common_sets[j]->uncollectable,
                      j);
      collect_init_common(common_sets[j]->blocks,
                       common_sets[j]->uncollectable,
                       j);
# if ALLOW_SET_LOCKING
    }
# endif
  }

# if CHECK
  if (num_chunks != chk_count) {
    FPRINTF(STDERR, "bad chunk count: %ld != %ld\n", num_chunks, chk_count);
  }

  if (num_blocks != cmn_count) {
    FPRINTF(STDERR, "bad block count: %ld != %ld\n", num_blocks, cmn_count);
  }
# endif

# if PRINT
  FPRINTF(STDERR, "gc at %ld (%ld)\n", mem_use, mem_real_use);
  FPRINTF(STDERR,
         "low: %lx hi: %lx blocks: %ld chunks: %ld\n", 
         low_plausible, high_plausible, 
         num_blocks, num_chunks);
# endif

  mem_use = 0;

  sort_and_merge_roots();

# if ALLOW_TRACE_COUNT
  init_trace_stack(&collect_trace_stack);
  init_trace_stack(&collect_wait_trace_stack);
  collect_start_tracing = 0;
  collect_end_tracing = -1;
# endif
# if ALLOW_TRACE_PATH
  init_trace_stack(&collect_trace_path_stack);
# endif

  prepare_collect_temp();

  /*** Mark from roots ***/
  collect_stack_size = roots_count ? COLLECT_STACK_FRAME_SIZE * roots_count : 10;
  if (collect_stack_size < INITIAL_COLLECT_STACK_SIZE)
    collect_stack_size = INITIAL_COLLECT_STACK_SIZE;
  collect_stack_count = 0;
  collect_stack = (unsigned long *)realloc_collect_temp(NULL,
                                                 0,
                                                 sizeof(unsigned long) 
                                                 * (collect_stack_size + 2));

  for (j = 0; j < roots_count; j += 2) {
    collect_stack[collect_stack_count++] = roots[j];
    collect_stack[collect_stack_count++] = roots[j + 1];
# if KEEP_DETAIL_PATH
    collect_stack[collect_stack_count++] = 0;
# endif
  }

  if (GC_initial_trace_root) {
# if CHECK_SKIP_MARK_AT_FIRST
    collect_start_disable_mark_skip = collect_stack_count;
    skip_mark_at_first = GC_inital_root_skip;
# endif
    collect_stack[collect_stack_count++] = (unsigned long)&GC_initial_trace_root;
    collect_stack[collect_stack_count++] = ((unsigned long)&GC_initial_trace_root) + 1;
# if KEEP_DETAIL_PATH
    collect_stack[collect_stack_count++] = 0;
# endif
  }

  PRINTTIME((STDERR, "gc: root collect start: %ld\n", GETTIMEREL()));

# if ALLOW_TRACE_COUNT
  collect_trace_count = 0;
  mem_traced = 0;
# endif

# if ALLOW_TRACE_PATH
  current_trace_source = "root";
# endif

  collect();

# if ALLOW_SET_LOCKING
  for (j = 0; j < num_common_sets; j++) {
    if (common_sets[j]->locked) {
      int a = common_sets[j]->atomic;
      push_locked_chunk(*(common_sets[j]->othersptr), a);
      push_locked_common(common_sets[j]->blocks, a);
    }
  }

  collect();
# endif

# if ALLOW_TRACE_COUNT
  traced_from_roots = collect_trace_count;
  collect_trace_count = 0;
# endif

  root_marked = mem_use;

  PRINTTIME((STDERR, "gc: stack push start: %ld\n", GETTIMEREL()));

  /*** Mark from stack ***/
  push_stack(stack_now);
  
# if PRINT && 0
  FPRINTF(STDERR, "stack until: %ld\n", collect_end_stackbased);
# endif

# if ALLOW_TRACE_PATH
  current_trace_source = "stack";
# endif

  PRINTTIME((STDERR, "gc: stack collect start: %ld\n", GETTIMEREL()));

  collect();

# if ALLOW_TRACE_COUNT
  traced_from_stack = collect_trace_count;
  collect_trace_count = 0;
# endif

  PRINTTIME((STDERR, "gc: uncollectable start: %ld\n", GETTIMEREL()));

  /*** Uncollectable and pointerful ***/
  for (j = 0; j < num_common_sets; j++)
    if (common_sets[j]->uncollectable)
      if (!common_sets[j]->atomic
# if ALLOW_TRACE_COUNT
         || collecting_with_trace_count
# endif
         ) {
       push_uncollectable_chunk(*(common_sets[j]->othersptr), common_sets[j]);
       push_uncollectable_common(common_sets[j]->blocks, common_sets[j]);
      }

# if ALLOW_TRACE_PATH
  current_trace_source = "uncollectable";
# endif

  collect();

# if ALLOW_TRACE_COUNT
  traced_from_uncollectable = collect_trace_count;
  collect_trace_count = 0;
# endif

# if ALLOW_TRACE_PATH
  /* External stacks may collect eagerly: */
  current_trace_source = "xstack";
# endif

  if (GC_push_last_roots) {
    PRINTTIME((STDERR, "gc: last roots push start: %ld\n", GETTIMEREL()));
    /*** ``Last'' roots external hook ***/
    GC_push_last_roots();
    PRINTTIME((STDERR, "gc: last roots start: %ld\n", GETTIMEREL()));
  }

# if ALLOW_TRACE_PATH
  current_trace_source = "xstack";
# endif

  collect();
  
# if ALLOW_TRACE_COUNT
  /* Count this as stack tracing */
  traced_from_stack += collect_trace_count;
  collect_trace_count = 0;
# endif
  
  PRINTTIME((STDERR, "gc: queue finalize start: %ld\n", GETTIMEREL()));

# if ALLOW_TRACE_PATH
  current_trace_source = "finalization";
# endif

  /*** Disappearing Links and Finalization ***/
  do_disappear_and_finals();

# if ALLOW_TRACE_COUNT
  traced_from_finals = collect_trace_count;
# endif

  PRINTTIME((STDERR, "gc: finish start: %ld\n", GETTIMEREL()));

  low_plausible = high_plausible = 0;

  for (j = 0; j < num_common_sets; j++) {
    FINISH_STATISTIC(num_finishes_stat++);
    collect_finish_chunk(common_sets[j]->othersptr, common_sets[j]);
    collect_finish_common(common_sets[j]->blocks, 
                       common_sets[j]->block_ends,
                       common_sets[j]);
  }

  PRINTTIME((STDERR, "gc: all done: %ld\n", GETTIMEREL()));

# if PRINT
  FPRINTF(STDERR,
         "done %ld (%ld), %ld from stack\n", mem_use, mem_real_use,
         mem_use - root_marked);
# endif

  if (mem_use) {
# if USE_GC_FREE_SPACE_DIVISOR
    long root_size;

    if (roots_count)
      root_size = roots[1] - roots[0];
    else
      root_size = 0;

    mem_limit = mem_use + ((sector_mem_use + root_size) / GC_free_space_divisor);
# else
    mem_limit = MEM_USE_FACTOR * mem_use;
# endif
  }

  free_collect_temp(collect_stack, sizeof(unsigned long) * (collect_stack_size + 1));

# if ALLOW_TRACE_COUNT
  done_trace_stack(&collect_trace_stack);
  done_trace_stack(&collect_wait_trace_stack);
# endif
# if ALLOW_TRACE_PATH
  done_trace_stack(&collect_trace_path_stack);
# endif

#else
  if (mem_use)
    mem_limit = MEM_USE_FACTOR * mem_use;
#endif

#if PRINT_INFO_PER_GC
  FPRINTF(STDERR, "done  %ld (%ld); recovered %ld in %ld msecs\n",
         mem_use, sector_mem_use, orig_mem_use - mem_use,
         (long)GETTIME() - start_time);
# if SHOW_SECTOR_MAPS_AT_GC
  dump_sector_map("                            ");
# endif
  last_gc_end = GETTIME();
#endif

#if STAMP_AND_REMEMBER_SOURCE
  stamp_clock++;
#endif

#if CHECK_COLLECTING
  collecting_now = 0;
#endif

  if (GC_collect_end_callback)
    GC_collect_end_callback();

  /* Run queued finalizers. Garbage collections may happen: */
  PRINTTIME((STDERR, "gc: finalize start: %ld\n", GETTIMEREL()));
  run_finalizers();
  PRINTTIME((STDERR, "gc: finalize end: %ld\n", GETTIMEREL()));

#if MARK_STATS
  fprintf(STDERR, 
         "mark stats:\n"
         " %d pairs\n"
         " %d lookups\n"
         "   %d interior\n"
         "   %d plausible\n"
         "     %d paged\n"
         "       %d block page\n"
         "         %d block\n"
         "           %d block aligned\n"
         "             %d block mark\n"
         "               %d block pushes\n"
         "                 %d block tail pushes\n"
         "       %d chunk page\n"
         "         %d chunk mark\n",
         num_pairs_stat,
         num_checks_stat,
         num_interior_checks_stat,
         num_plausibles_stat,
         num_pages_stat,
         num_blocks_stat,
         num_blockallocs_stat,
         num_blockaligns_stat,
         num_blockmarks_stat,
         num_blockpushes_stat,
         num_blockpushes_tail_stat,
         num_chunks_stat,
         num_chunkmarks_stat);
#endif
#if ALLOC_STATS
  fprintf(STDERR, 
         "alloc stats:\n"
         " %d allocs\n"
         "   %d nonzero allocs\n"
         "   %d common allocs\n"
         "     %d common tries\n"
         "     %d common fails\n"
         "     %d common second tries\n"
         "     %d common newblocks\n"
         "   %d chunk allocs\n",
         num_allocs_stat,
         num_nonzero_allocs_stat,
         num_common_allocs_stat,
         num_block_alloc_checks_stat,
         num_block_alloc_nexts_stat,
         num_block_alloc_second_checks_stat,
         num_newblock_allocs_stat,
         num_chunk_allocs_stat);
#endif
#if FINISH_STATS
  fprintf(STDERR,
         "finish stats:\n"
         " %d finishes\n"
         "  %d chunk finishes\n"
         "   %d chunk keep finishes\n"
         "   %d chunk free finishes\n"
         "  %d block finishes\n"
         "   %d block filter steps\n"
         "   %d block keep finishes\n"
         "   %d block free finishes\n"
         "   %d block adjust finishes\n",
         num_finishes_stat,
         num_finish_chunk_stat,
         num_finish_chunkkeep_stat,
         num_finish_chunkfree_stat,
         num_finish_block_stat,
         num_finish_blockfiltercycles_stat,
         num_finish_blockkeep_stat,
         num_finish_blockfree_stat,
         num_finish_blockadjust_stat);
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* do_malloc ( SET_NO_BACKINFO unsigned long  size,
BlockOfMemory **  common,
MemoryChunk **  othersptr,
int  flags 
) [static]

Definition at line 2201 of file sgc.c.

{
  BlockOfMemory **find, *block;
  BlockOfMemory **common_ends;
  void *s;
  long c;
  unsigned long p;
  long sizeElemBit;
  int i, cpos, elem_per_block, extra_alignment;
#if PAD_BOUNDARY_BYTES
  long origsize;
#endif

#if CHECK_COLLECTING
  if (collecting_now) {
    exit(-1);
  }
#endif

  ALLOC_STATISTIC(num_allocs_stat++);

  if (!size)
    return (void *)&zero_ptr;

  ALLOC_STATISTIC(num_nonzero_allocs_stat++);

#if PAD_BOUNDARY_BYTES
  origsize = size;
  size += PAD_START_SIZE + PAD_END_SIZE;
#endif

  if (size < (MAX_COMMON_SIZE - PTR_SIZE + 1)) {
    ALLOC_STATISTIC(num_common_allocs_stat++);

    if (!size_map)
      init_size_map();

    cpos = size_index_map[((size + PTR_SIZE - 1) >> LOG_PTR_SIZE) - 1];
#if 0
    if (size > size_map[cpos]) {
      FPRINTF(STDERR, "map error: %d < %d\n", size_map[cpos], size);
    }
#endif
    size = size_map[cpos];

    block = common[cpos + NUM_COMMON_SIZE];
    find = NULL;

    while (block) {
      int search_bit, search_offset;

      if (block->top < block->end)
       goto block_top;

      ALLOC_STATISTIC(num_block_alloc_checks_stat++);

      search_bit = block->free_search_bit;
      search_offset = block->free_search_offset;

      for (i = block->free_search_start; i >= 0; i--)
       if (block->free[i]) {
         char *zp;
         int v = block->free[i];
         
         while (IS_MARKED(v & search_bit)) {
           search_bit = search_bit << FREE_BIT_SIZE;
           search_offset++;
         }
         block->free[i] -= search_bit;
         block->free_search_start = i;
         block->free_search_bit = search_bit << FREE_BIT_SIZE;
         block->free_search_offset = search_offset + 1;

         c = (i << LOG_FREE_BIT_PER_ELEM) + search_offset;
         
         if (flags & do_malloc_UNCOLLECTABLE)
           mem_uncollectable_use += size;
         else
           mem_use += size;
         
         p = block->start + c * size;

         zp = INT_TO_PTR(p);

         if (!(flags & do_malloc_ATOMIC)) {
           void **p = (void **)zp;
           unsigned long sz = size >> LOG_PTR_SIZE;
           for (; sz--; p++)
             *p = 0;
         }

#if CHECK_WATCH_FOR_PTR_ALLOC
         if (zp == UNHIDE_WATCH(GC_watch_for_ptr)) {
#if USE_WATCH_FOUND_FUNC
           GC_found_watch();
#else
           findings++;
#endif
         }
#endif

#if PAD_BOUNDARY_BYTES
         SET_PAD(zp, size, origsize);
         zp = PAD_FORWARD(zp);
#endif

         return zp;
       } else {
         search_bit = (FREE_BIT_START | UNMARK_BIT_START);
         search_offset = 0;
       }

      find = &block->next;

      block = block->next;
      common[cpos + NUM_COMMON_SIZE] = block;

      ALLOC_STATISTIC(num_block_alloc_nexts_stat++);
      ALLOC_STATISTIC(if (block) num_block_alloc_second_checks_stat++);
    }

  } else {
    void *a;
    MemoryChunk *c;

    /* Round up to ptr-aligned size: */
    if (size & (PTR_SIZE-1))
      size += PTR_SIZE - (size & (PTR_SIZE-1));

    ALLOC_STATISTIC(num_chunk_allocs_stat++);

    cpos = 0;

    a = malloc_sector(size + sizeof(MemoryChunk), sector_kind_chunk, 1);
    if (!a) {
      if (mem_use >= mem_limit)
       GC_gcollect();
      
      a = malloc_sector(size + sizeof(MemoryChunk), sector_kind_chunk, 0);
    }

    c = (MemoryChunk *)a;
    
    c->finalizers = NULL;
    c->marked = 1;

#if STAMP_AND_REMEMBER_SOURCE
    c->make_time = stamp_clock;
#endif
#if KEEP_CHUNK_SET_NO
    c->set_no = set_no;
#endif

    c->next = *othersptr;
#if CHECK_FREES
    if (PTR_TO_INT(c->next) & (SECTOR_SEGMENT_SIZE - 1))
      free_error("bad next\n");
#endif
    *othersptr = c;
#if KEEP_PREV_PTR
    c->prev_ptr = othersptr;
    if (c->next)
      c->next->prev_ptr = &c->next;
#endif
    
    c->start = PTR_TO_INT(&c->data);
    c->end = c->start + size;
    c->atomic = flags & do_malloc_ATOMIC;

    if (flags & do_malloc_UNCOLLECTABLE)
      mem_uncollectable_use += size;
    else
      mem_use += size;
    mem_real_use += (size + sizeof(MemoryChunk));
    num_chunks++;

    if (!low_plausible || (c->start < low_plausible))
      low_plausible = c->start;
    if (!high_plausible || (c->end > high_plausible))
      high_plausible = c->end;     

    if (!(flags & do_malloc_ATOMIC)) {
      void **p = (void **)&c->data;
      unsigned long sz = size >> LOG_PTR_SIZE;
      for (; sz--; p++)
       *p = 0;
    }

#if CHECK_WATCH_FOR_PTR_ALLOC
    if ((&c->data) == UNHIDE_WATCH(GC_watch_for_ptr)) {
#if USE_WATCH_FOUND_FUNC
      GC_found_watch();
#else
      findings++;
#endif
    }
#endif

    s = (void *)&c->data;

#if PAD_BOUNDARY_BYTES
    SET_PAD(s, size, origsize);
    s = PAD_FORWARD(s);
#endif

    return s;
  }

  ALLOC_STATISTIC(num_newblock_allocs_stat++);

  sizeElemBit = size << LOG_FREE_BIT_PER_ELEM;
  
#if PAD_BOUNDARY_BYTES
  /* Assume alignment */
  extra_alignment = (DOUBLE_SIZE - PTR_SIZE);
#else
  extra_alignment = (size & (DOUBLE_SIZE - 1)) ? 0 : (DOUBLE_SIZE - PTR_SIZE);
#endif

  /* upper bound: */
  elem_per_block = (SECTOR_SEGMENT_SIZE - sizeof(BlockOfMemory)) / sizeElemBit;
  /*                ^- mem area size      ^- block record */
  /* use this one: */
  elem_per_block = ((SECTOR_SEGMENT_SIZE - sizeof(BlockOfMemory) - elem_per_block
  /*                ^- mem area size      ^- block record       ^- elems     */
                   - (extra_alignment + PTR_SIZE - 2)) / sizeElemBit);
  /*                     ^- possible elem padding, -2 since BlockOfMemory has free[1] */
  if (elem_per_block) {
    /* Small enough to fit into one segment */
    c = SECTOR_SEGMENT_SIZE;
  } else {
    elem_per_block = 1;
    /* Add (PTR_SIZE - 1) to ensure enough room after alignment: */
    c = sizeof(BlockOfMemory) + (PTR_SIZE - 1) + sizeElemBit;
  }

  block = (BlockOfMemory *)malloc_sector(c, sector_kind_block, 1);
  if (!block) {
    if (mem_use >= mem_limit) {
      GC_gcollect();
      return do_malloc(KEEP_SET_INFO_ARG(set_no)
                     size, common, othersptr, flags);
    } else
      block = (BlockOfMemory *)malloc_sector(c, sector_kind_block, 0);
  }

  
  block->elem_per_block = elem_per_block;

  block->finalizers = NULL;

#if STAMP_AND_REMEMBER_SOURCE
  block->make_time = stamp_clock;
#endif
#if KEEP_SET_NO
  block->set_no = set_no;
#endif

  /* offset for data (ptr aligned): */
  c = sizeof(BlockOfMemory) + (elem_per_block - 1);
  if (c & (PTR_SIZE - 1))
    c += (PTR_SIZE - (c & (PTR_SIZE - 1)));
#if !PAD_BOUNDARY_BYTES
  if (!(size & (DOUBLE_SIZE - 1))) /* Even more alignment for doubles: */
#else
    /* Assume alignment */
#endif
    if (c & (DOUBLE_SIZE - 1))
      c += (DOUBLE_SIZE - (c & (DOUBLE_SIZE - 1)));
  p = PTR_TO_INT(block) + c;

  common_ends = common + NUM_COMMON_SIZE;

  if (common_ends[cpos] || (find && !common[cpos])) {
    /* hey! - GC happened and reset stuff. find may not be alive anymore,
       so find it again. */
    find = &common_ends[cpos];
    while (*find)
      find = &(*find)->next;
  }

  if (find)
    *find = block;
  else if (!common[cpos])
    common[cpos] = block;

  if (!common_ends[cpos])
    common_ends[cpos] = block;

  num_blocks++;

  for (i = ELEM_PER_BLOCK(block); i-- ; )
    block->free[i] = 0;
  block->free_search_start = -1; /* a free search will not yield results until a GC */

  block->start = block->top = p;
  block->end = block->start + (elem_per_block * sizeElemBit);
  block->size = (short)size;
  block->next = NULL;
  block->atomic = flags & do_malloc_ATOMIC;
  if (!common_positionses[cpos])
    init_positions(cpos, size, elem_per_block);
  block->positions = common_positionses[cpos];

  if (!low_plausible || (block->start < low_plausible))
    low_plausible = block->start;
  if (!high_plausible || (block->end > high_plausible))
    high_plausible = block->end;   

  mem_real_use += SECTOR_SEGMENT_SIZE;

 block_top:

#if STAMP_AND_REMEMBER_SOURCE
  block->use_time = stamp_clock;
#endif

#if CHECK
  if (block->end < block->start
      || block->top < block->start
      || block->top > block->end)
    FPRINTF(STDERR,
           "bad block: %ld %ld %ld %ld\n",
           size, block->start, block->top, block->end);
#endif      

  s = INT_TO_PTR(block->top);
  block->top = block->top + size;

  if (flags & do_malloc_UNCOLLECTABLE)
    mem_uncollectable_use += size;
  else
    mem_use += size;

  if (!(flags & do_malloc_ATOMIC)) {
    void **p = (void **)s;
    unsigned long sz = size >> LOG_PTR_SIZE;
    for (; sz--; p++)
      *p = 0;
  }

#if CHECK_WATCH_FOR_PTR_ALLOC
  if (s == UNHIDE_WATCH(GC_watch_for_ptr)) {
#if USE_WATCH_FOUND_FUNC
    GC_found_watch();
#else
    findings++;
#endif
  }
#endif

#if PAD_BOUNDARY_BYTES
    SET_PAD(s, size, origsize);
    s = PAD_FORWARD(s);
#endif

  return s;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void dump_sector_map ( char *  prefix) [static]

Definition at line 1829 of file sgc.c.

{
  FPRINTF(STDERR, "%sBegin Sectors\n"
         "%sO0:free; ,.:block; =-:chunk; mn:other; \"':other; %d each\n%s",
         prefix, prefix, SECTOR_SEGMENT_SIZE, prefix);
  {
    int i, j;
    int c = 0;
    unsigned long was_sec = 0;
    int was_kind = 0;

    for (i = 0; i < (1 << SECTOR_LOOKUP_PAGESETBITS); i++) {
      SectorPage *pagetable;
      pagetable = sector_pagetables[i];
      if (pagetable) {
       for (j = 0; j < SECTOR_LOOKUP_PAGESIZE; j++) {
         long kind;
         kind = pagetable[j].kind;
         if (kind != sector_kind_free) {
           char *same_sec, *diff_sec;

           if (c++ > 40) {
             FPRINTF(STDERR, "\n%s", prefix);
             c = 1;
           }

           switch(kind) {
#if !RELEASE_UNUSED_SECTORS
           case sector_kind_freed:
             same_sec = "0";
             diff_sec = "O";
             break;
#endif
           case sector_kind_block:
             same_sec = ".";
             diff_sec = ",";
             break;
           case sector_kind_chunk:
             same_sec = "-";
             diff_sec = "=";
             break;
           case sector_kind_managed:
             same_sec = "n";
             diff_sec = "m";
             break;
           case sector_kind_other:
             same_sec = "'";
             diff_sec = "\"";
             break;
           default:
             same_sec = "?";
             diff_sec = "?";
             break;
           }

           if ((was_kind != kind) || (was_sec != pagetable[j].start))
             same_sec = diff_sec;

           FPRINTF(STDERR, same_sec);
           
           was_kind = kind;
           was_sec = pagetable[j].start;
         }
       }
      }
    }
  }
  FPRINTF(STDERR, "\n%sEnd Sectors\n", prefix);
}

Here is the caller graph for this function:

static void enqueue_fn ( Finalizer fn) [static]

Definition at line 3962 of file sgc.c.

{
  /* DO NOT COLLECT FROM collect_stack DURING THIS PROCEDURE */

  unsigned long p;

  num_queued_finalizers++;

  if (last_queued_finalizer) {
    fn->prev = last_queued_finalizer;
    fn->prev->next = fn;
    fn->next = NULL;
  } else {
    fn->next = queued_finalizers;
    if (fn->next)
      fn->next->prev = fn;
    queued_finalizers = fn;
  }
  last_queued_finalizer = fn;

  /* Need to mark watched as in-use, now: */
  /* (if this finalizer is eager, block contents are now marked too) */
  p = PTR_TO_INT(&fn->u.watch);
  PUSH_COLLECT(p, p + PTR_SIZE, 0);
}

Here is the caller graph for this function:

static void* find_ptr ( void d,
int _size,
BlockOfMemory **  _block,
int _pos,
MemoryChunk **  _chunk,
int  find_anyway 
) [static]

Definition at line 1675 of file sgc.c.

{
  unsigned long p = PTR_TO_INT(d);
  DECL_SECTOR_PAGETABLES;

  FIND_SECTOR_PAGETABLES(p);

  if (!sector_pagetables)
    return NULL;

  if (p >= low_plausible && p < high_plausible) {
    SectorPage *pagetable = sector_pagetables[SECTOR_LOOKUP_PAGETABLE(p)];
    if (pagetable) {
      SectorPage *page = pagetable + SECTOR_LOOKUP_PAGEPOS(p);
      long kind = page->kind;

      if (kind == sector_kind_block) {
       /* Found common block: */
       BlockOfMemory *block = (BlockOfMemory *)INT_TO_PTR(page->start);
       if (p >= block->start && p < block->top) {
         int size = block->size;
         int diff = p - block->start;
         int pos = (diff / size), apos;
         int bit;
         unsigned long result;
         
         apos = POS_TO_UNMARK_INDEX(pos);
         bit = POS_TO_UNMARK_BIT(pos);
         
         if (_size)
           *_size = size;
         
         if (NOT_MARKED(block->free[apos] & bit) && !find_anyway)
           return NULL;
         
         result = block->start + (pos * size);
         
         if (_block)
           *_block = block;
         if (_pos)
           *_pos = pos;
         
         return INT_TO_PTR(result);
       }
      } else if (kind == sector_kind_chunk) {
       MemoryChunk *c = (MemoryChunk *)INT_TO_PTR(page->start);
       if ((p >= c->start) && (p < c->end)) {
         if (_size)
           *_size = (c->end - c->start);
         if (c->marked || find_anyway) {
           if (_chunk)
             *_chunk = c;
           return INT_TO_PTR(c->start);
         } else
           return NULL;
       }
      }
    }
  }

  return NULL;
}

Here is the caller graph for this function:

static void free_chunk ( MemoryChunk k,
MemoryChunk **  prev,
GC_Set set 
) [static]

Definition at line 2932 of file sgc.c.

{
  MemoryChunk *next;
  
#if ALLOW_SET_FINALIZER
  if (set->finalizer) {
    void *s = INT_TO_PTR(k->start);
#if PAD_BOUNDARY_BYTES
    s = PAD_FORWARD(s);
#endif
    set->finalizer(s);
  }
#endif
  
  mem_real_use -= (k->end - k->start + sizeof(MemoryChunk));
  
#if PRINT && 0
  FPRINTF(STDERR, "free chunk: %ld (%ld) %d %d\n", 
         (unsigned long)k, k->end - k->start,
         set->atomic, set->uncollectable);
#endif
  
  next = k->next;

#if KEEP_PREV_PTR
  if (next)
    next->prev_ptr = k->prev_ptr;
#endif

#if CHECK_FREES
  if (PTR_TO_INT(next) & (SECTOR_SEGMENT_SIZE - 1))
    free_error("bad next\n");
#endif

  *prev = next;

  free_sector(k);
  --num_chunks;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void free_collect_temp ( void v,
long  oldsize 
) [static]

Definition at line 1299 of file sgc.c.

{
#if GET_MEM_VIA_SBRK
  if (!(--c_refcount)) {
    collect_mem_use = (unsigned long)(sbrk(0)) - (unsigned long)save_brk;
    brk(save_brk);
  }
#elif GET_MEM_VIA_MMAP
  munmap(v, (oldsize + SECTOR_SEGMENT_SIZE - 1) >> LOG_SECTOR_SEGMENT_SIZE);
#elif GET_MEM_VIA_VIRTUAL_ALLOC
  VirtualFree(v, 0, MEM_RELEASE);
#else
  FREE(v);
#endif
}

Here is the caller graph for this function:

static void free_managed ( void s) [static]

Definition at line 1430 of file sgc.c.

{
  int i;
  unsigned long p;
  ManagedBucket *bucket;
  ManagedBlock *mb;

  p = PTR_TO_INT(s);

  /* Assume that s really is an allocated managed pointer: */
  mb = (ManagedBlock *)INT_TO_PTR((p & SECTOR_SEGMENT_MASK));
  
  for (i = 0; i < managed->num_buckets; i++) {
    bucket = managed->buckets + i;
    if (bucket->size == mb->head.size) {
      /* Found bucket */
      int which;
      which = (p - PTR_TO_INT(mb) - bucket->offset) / bucket->size;
      if ((which >= 0) && (which < bucket->perblock)) {
       if (mb->free[which]) {
         FPRINTF(STDERR, "error freeing managed\n");
         return;
       }
       mb->free[which] = 1;
       --mb->head.count;
       manage_mem_use -= bucket->size;
       if (!mb->head.count) {
         if (mb->head.prev) {
           if (mb->head.next)
             mb->head.next->head.prev = mb->head.prev;
           mb->head.prev->head.next = mb->head.next;
         } else {
           if (mb->head.next) {
             bucket->block = mb->head.next;
             bucket->block->head.prev = NULL;
           } else {
             /* Empty bucket */
             int j;
             --managed->num_buckets;
             for (j = i; j < managed->num_buckets; j++)
              memcpy(&(managed->buckets[j]), &(managed->buckets[j + 1]), sizeof(ManagedBucket));
           }
         }

         manage_real_mem_use -= (bucket->offset + bucket->size * bucket->perblock);

         free_sector(mb);
       }
       return;
      }
    }
  }
  
  FPRINTF(STDERR, "error freeing managed\n");
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void free_sector ( void p) [static]

Definition at line 1159 of file sgc.c.

{
  unsigned long s = PTR_TO_INT(p), t;
  int c = 0;
#if !RELEASE_UNUSED_SECTORS
  SectorFreepage *fp, *ifp;
#endif

  num_sector_frees++;
  
  /* Determine the size: */
  t = s;
  while(1) {
    DECL_SECTOR_PAGETABLES;
    long pagetableindex = SECTOR_LOOKUP_PAGETABLE(t);
    long pageindex = SECTOR_LOOKUP_PAGEPOS(t);
    GET_SECTOR_PAGETABLES(t);
    if (sector_pagetables[pagetableindex]
       && (sector_pagetables[pagetableindex][pageindex].start == s)) {
      sector_pagetables[pagetableindex][pageindex].kind = sector_kind_freed;
      sector_pagetables[pagetableindex][pageindex].start = 0;
      c++;
      t += SECTOR_SEGMENT_SIZE;
    } else
      break;
  }

#if CHECK_FREES
  if (!c) {
    free_error("bad sector free!\n");
    return;
  }
#endif

#if RELEASE_UNUSED_SECTORS
  free_plain_sector(p, c);
  sector_mem_use -= (c << LOG_SECTOR_SEGMENT_SIZE);
#else
  sector_free_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
  if (sector_freepage_by_end) {
    /* Try merge with a predecessor: */
    sector_freepage_by_end = splay(s, sector_freepage_by_end);
    ifp = TREE_FP(sector_freepage_by_end);
    if (ifp->end == s) {
      remove_freepage(ifp);
      c += ifp->size;
      s = ifp->start;
    }
    
    if (sector_freepage_by_start) {
      /* Try merge with a successor: */
      sector_freepage_by_start = splay(t, sector_freepage_by_start);
      ifp = TREE_FP(sector_freepage_by_start);
      if (ifp->start == t) {
       remove_freepage(ifp);
       c += ifp->size;
       t = ifp->end;
      }
    }
  }
  
  fp = (SectorFreepage *)p;
  fp->start = s;
  fp->end = t;
  fp->size = c;
  add_freepage(fp);
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

void GC_add_roots ( void start,
void end 
)

Definition at line 1540 of file sgc.c.

{
  if (roots_count >= roots_size) {
    unsigned long *naya;

    mem_real_use -= (sizeof(unsigned long) * roots_size);

    roots_size = roots_size ? 2 * roots_size : 500;
    naya = (unsigned long *)malloc_managed(sizeof(unsigned long) * (roots_size + 1));

    mem_real_use += (sizeof(unsigned long) * roots_size);

    memcpy((void *)naya, (void *)roots, 
          sizeof(unsigned long) * roots_count);

    if (roots)
      free_managed(roots);

    roots = naya;
  }

  roots[roots_count++] = PTR_TO_INT(start);
  roots[roots_count++] = PTR_TO_INT(end) - PTR_ALIGNMENT;
}

Here is the call graph for this function:

void* GC_base ( void d)

Definition at line 1741 of file sgc.c.

{
  void *p;

  p = find_ptr(d, NULL, NULL, NULL, NULL, 0);

#if PAD_BOUNDARY_BYTES
  if (p)
    p = PAD_FORWARD(p);
#endif
  
  return p;
}

Here is the call graph for this function:

Definition at line 4888 of file sgc.c.

{
#if ALLOW_TRACE_PATH
  int i;

  for (i = 0; i < TRACE_PATH_BUFFER_SIZE; i++)
    trace_path_buffer[i] = NULL;
#endif
}

Here is the caller graph for this function:

void GC_dump ( void  )

Definition at line 1900 of file sgc.c.

{
  FPRINTF(STDERR, "Begin Map\n");

  FPRINTF(STDERR,
         "allocated: %ld  collectable: %ld  uncollectable: %ld\n"
         "including known overhead: %ld  scheduled gc: %ld  last collect depth: %ld\n"
         "managed: %ld  managed including overhead: %ld\n"
         "sector used: %ld  sector free: %ld  sector total: %ld\n"
         "sector range: %ld  sector administration: %ld\n"
         "num sector allocs: %ld  num sector frees: %ld\n"
         "num disappearing links: %d  num finalizations: %d  queued: %d\n"
#if STAMP_AND_REMEMBER_SOURCE
         "current clock: %ld\n"
#endif
         , mem_use + mem_uncollectable_use, mem_use, mem_uncollectable_use, 
         mem_real_use, mem_limit, collect_mem_use,
         manage_mem_use, manage_real_mem_use,
         sector_mem_use - sector_free_mem_use, sector_free_mem_use, sector_mem_use,
         sector_high_plausible - sector_low_plausible,
         sector_admin_mem_use,
         num_sector_allocs, num_sector_frees,
         GC_dl_entries, GC_fo_entries, num_queued_finalizers
#if STAMP_AND_REMEMBER_SOURCE
         , stamp_clock
#endif
         );

#if DUMP_SECTOR_MAP
  dump_sector_map("");
#endif

#if DUMP_BLOCK_COUNTS
  {
    int i, j;
    unsigned long total;
    
#if DUMP_BLOCK_MAPS
    FPRINTF(STDERR, "roots: ======================================\n");
    for (i = 0; i < roots_count; i += 2)
      FPRINTF(STDERR, ">%lx-%lx", roots[i], roots[i + 1]);
    FPRINTF(STDERR, "\n");

    FPRINTF(STDERR, "stack: ======================================\n");
    FPRINTF(STDERR, ">%lx-%lx>%lx-%lx\n",
           trace_stack_start, trace_stack_end, trace_reg_start, trace_reg_end);
#endif

    for (j = 0; j < num_common_sets; j++) {
      GC_Set *cs = common_sets[j];

      total = 0;

      FPRINTF(STDERR,
             "Set: %s [%s/%s]: ======================================\n", 
             cs->name,
             cs->atomic ? "atomic" : "pointerful",
             cs->uncollectable ? "eternal" : "collectable");

      for (i = 0; i < NUM_COMMON_SIZE; i++) {
       BlockOfMemory *block;
       int counter = 0;

       block = (cs)->blocks[i];

       if (block) {
         FPRINTF(STDERR, "%d:", block->size);

#if DUMP_BLOCK_MAPS
         FPRINTF(STDERR, "[%lx]", block->start - (unsigned long)block);
#endif

         while (block) {
           int k, size = block->size;

#if DUMP_BLOCK_MAPS
           counter = 0;
#endif

           for (k = (block->top - block->start) / block->size; k-- ; ) {
             int bit = POS_TO_UNMARK_BIT(k);
             int pos = POS_TO_UNMARK_INDEX(k);
             
             if (IS_MARKED(block->free[pos] & bit)) {
              total += size;
              counter++;
             }
           }

#if DUMP_BLOCK_MAPS
           FPRINTF(STDERR,
                  ">%lxx%d"
#if STAMP_AND_REMEMBER_SOURCE
                  "@%ld-%ld:%lx-%lx" 
#endif
                  , (unsigned long)block, counter
#if STAMP_AND_REMEMBER_SOURCE
                  , block->make_time, 
                  block->use_time,
                  block->low_marker,
                  block->high_marker
#endif
                  );
#endif
           block = block->next;
         }
#if DUMP_BLOCK_MAPS
         FPRINTF(STDERR, "\n");
#else
         FPRINTF(STDERR, "%d;", counter);
#endif
       }
      }

      /* Print chunks, "sorting" so that same size are printed together: */
      {
       MemoryChunk *c, *cnext, *first = NULL, *last = NULL, *t, *next, *prev;
       int counter = 0;
       
       for (c = *(cs->othersptr); c; c = cnext) {
         unsigned long size = c->end - c->start;
         FPRINTF(STDERR, "%ld:", size);

#if DUMP_BLOCK_MAPS
         FPRINTF(STDERR, "[%lx]", c->start - (unsigned long)c);
#endif
         
         cnext = c->next;
         
         prev = NULL;
         for (t = c; t; t = next) {
           next = t->next;
           
           if (size == (t->end - t->start)) {
#if DUMP_BLOCK_MAPS
             FPRINTF(STDERR,
                    ">%lx"
#if STAMP_AND_REMEMBER_SOURCE
                    "@%ld:%lx" 
#endif
                    , (unsigned long)t
#if STAMP_AND_REMEMBER_SOURCE
                    , t->make_time,
                    t->marker
#endif
                    );
#endif
             
             counter++;

             if (last)
              last->next = t;
             else
              first = t;
             last = t;
             if (prev)
              prev->next = t->next;
             if (t == cnext)
              cnext = t->next;

             total += size;
           } else
             prev = t;
         }
#if DUMP_BLOCK_MAPS
         FPRINTF(STDERR, "\n");
#else
         FPRINTF(STDERR, "%d;", counter);
         counter = 0;
#endif
       }
       
       if (last)
         last->next = NULL;
       *(cs->othersptr) = first;
      }
      cs->total = total;

#if KEEP_PREV_PTR
      /* reset prev pointers: */
      {
       MemoryChunk *c, **prev_ptr = (cs->othersptr);
       for (c = *(cs->othersptr); c; c = c->next) {
         c->prev_ptr = prev_ptr;
         prev_ptr = &c->next;
       }
      }
#endif
      
      FPRINTF(STDERR, "total size: %ld\n", total);
    }

    FPRINTF(STDERR, "summary: ======================================\n");
    total = 0;
    for (j = 0; j < num_common_sets; j++) {
      GC_Set *cs = common_sets[j];
      FPRINTF(STDERR,
             "%12s: %10ld  [%s/%s]\n",
             cs->name, cs->total,
             cs->atomic ? "atomic" : "pointerful",
             cs->uncollectable ? "eternal" : "collectable");
      total += cs->total;
    }
    FPRINTF(STDERR, "%12s: %10ld\n", "total", total);
  }
#endif
  FPRINTF(STDERR, "End Map\n");
}

Here is the call graph for this function:

Definition at line 2114 of file sgc.c.

{
  /* stubborness is not exploited */
}

Definition at line 4326 of file sgc.c.

{
  collect();  
}

Here is the call graph for this function:

Here is the caller graph for this function:

void GC_for_each_element ( GC_Set set,
void(*)(void *p, int size, void *data f,
void data 
)

Definition at line 2868 of file sgc.c.

{
  int i;
  BlockOfMemory **blocks = set->blocks;
  MemoryChunk *c = *(set->othersptr);

#if ALLOW_SET_LOCKING
  if (!set->uncollectable)
    set->locked++;
#endif

  for (i = 0; i < NUM_COMMON_SIZE; i++) {
    BlockOfMemory **prev = &blocks[i];
    BlockOfMemory *block = *prev;

    while (block) {
      int j;

      j = (block->top - block->start) / block->size;
      
      while (j--) {
       int bit = POS_TO_FREE_BIT(j);
       int pos = POS_TO_FREE_INDEX(j);
       
       if (IS_MARKED(block->free[pos] & bit)) {
         unsigned long p;
         void *s;
         
         p = block->start + (block->size * j);
         s = INT_TO_PTR(p);
         
#if PAD_BOUNDARY_BYTES
         s = PAD_FORWARD(s);
#endif
         
         f(s, block->size, data);
       }
      }
      block = block->next;
    }
  }

  for (; c; c = c->next) {
    void *s;

    s = INT_TO_PTR(c->start);

#if PAD_BOUNDARY_BYTES
    s = PAD_FORWARD(s);
#endif

    f(s, c->end - c->start, data);
  }

#if ALLOW_SET_LOCKING
  if (!set->uncollectable)
    --set->locked;
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

void GC_free ( void p)

Definition at line 2972 of file sgc.c.

{
#if PROVIDE_GC_FREE || PROVIDE_CHUNK_GC_FREE
  BlockOfMemory *block = NULL;
  MemoryChunk *chunk = NULL;
  int fpos;
  void *found;
  GC_Set *set;

# if CHECK_COLLECTING && CHECK_FREES
  if (collecting_now)
    free_error("GC_free during collection\n");
# endif

  found = find_ptr(p, NULL, &block, &fpos, &chunk, 1);
  if (!found) {
# if CHECK_FREES
    char b[256];
    sprintf(b, "GC_free failed! %lx\n", (long)p);
    free_error(b);
# endif
    return;
  }

  if (PAD_FORWARD(found) == p) {
    if (block) {
# if PROVIDE_GC_FREE
      int i;
      int pos = POS_TO_FREE_INDEX(fpos);
      int fbit = POS_TO_FREE_BIT(fpos);
      int ubit = POS_TO_UNMARK_BIT(fpos);

# if CHECK_FREES
      if (block->free[pos] & fbit) {
       char b[256];
       sprintf(b, "Block element already free! %lx\n", (long)p);
       return;
      }
#   if EXTRA_FREE_CHECKS
      if (block->set_no != 4) {
       char b[256];
       sprintf(b, "GC_free on ptr from wrong block! %lx\n", (long)p);
       free_error(b);
       return;
      }
#   endif
#  endif

      block->free[pos] |= (fbit | ubit);
      if (block->free_search_start <= pos) {
       block->free_search_start = pos;
       block->free_search_bit = (FREE_BIT_START | UNMARK_BIT_START);
       block->free_search_offset = 0;
      }

      if (!initialized)
       GC_initialize();

      set = common_sets[block->set_no];
      
#  if ALLOW_SET_FINALIZER
      if (set->finalizer)
       set->finalizer(p);
#  endif

      {
       int size;
#  if PAD_BOUNDARY_BYTES
       size = block->size - PAD_START_SIZE - PAD_END_SIZE;
       ((long *)found)[1] = 0; /* 0 extra */
#  else
       size = block->size;
#  endif

       /* Clear, since collection scans whole block. */
       memset(p, 0, size);
      }

      /* Make sure this block is reachable from block_ends: */
      i = size_index_map[(block->size >> LOG_PTR_SIZE) - 1];
      if (set->block_ends[i] != block)
       set->block_ends[i] = set->blocks[i];

      if (set->uncollectable)
       mem_uncollectable_use -= block->size;
# endif
    } else {
      if (!initialized)
       GC_initialize();

# if CHECK_FREES && EXTRA_FREE_CHECKS
      if (chunk->set_no != 4) {
       char b[256];
       sprintf(b, "GC_free on ptr from wrong block! %lx\n", (long)p);
       free_error(b);
       return;
      }
# endif
      set = common_sets[chunk->set_no];
      if (set->uncollectable)
       mem_uncollectable_use -= (chunk->end - chunk->start);
      free_chunk(chunk, chunk->prev_ptr, set);
    }
  }
# if CHECK_FREES
  else {
    char b[256];
    sprintf(b, "GC_free on block interior! %lx != %lx\n", 
           (long)p, (long)PAD_FORWARD(found));
    free_error(b);
  }
# endif
#endif
}

Here is the call graph for this function:

Definition at line 4737 of file sgc.c.

{
  long dummy;

  if (!sector_mem_use)
    return;

  FLUSH_REGISTER_WINDOWS;
  if (!setjmp(buf))
    do_GC_gcollect((void *)&dummy);
}

Here is the call graph for this function:

Definition at line 2744 of file sgc.c.

Here is the call graph for this function:

long GC_get_memory_use ( void  )

Definition at line 2109 of file sgc.c.

{
  return mem_real_use;
}

Here is the call graph for this function:

Here is the caller graph for this function:

void** GC_get_next_path ( void **  prev,
int len 
)

Definition at line 4868 of file sgc.c.

{
#if ALLOW_TRACE_PATH
  void **p;

  if (!prev)
    p = trace_path_buffer;
  else
    p = prev + (2 * (((long *)prev)[-1]));
    
  *len = *(long *)p;
  if (!*len)
    return NULL;

  return p + 1;
#else
  return NULL;
#endif
}

Here is the caller graph for this function:

Definition at line 1670 of file sgc.c.

{
  return GC_stackbottom;
}

Here is the call graph for this function:

static void GC_initialize ( void  ) [static]

Definition at line 1587 of file sgc.c.

{
  int i;

#if PROVIDE_MALLOC_AND_FREE
  num_common_sets = 5;
#else
  num_common_sets = 4;
#endif
  common_sets = (GC_Set **)malloc_managed(sizeof(GC_Set*) * num_common_sets);

  common_sets[0] = (GC_Set *)malloc_managed(sizeof(GC_Set));
  common_sets[0]->atomic = 0;
  common_sets[0]->uncollectable = 0;
  common_sets[0]->blocks = common;
  common_sets[0]->block_ends = common + NUM_COMMON_SIZE;
  common_sets[0]->othersptr = &others;

  common_sets[1] = (GC_Set *)malloc_managed(sizeof(GC_Set));
  common_sets[1]->atomic = !NO_ATOMIC;
  common_sets[1]->uncollectable = 0;
  common_sets[1]->blocks = atomic_common;
  common_sets[1]->block_ends = atomic_common + NUM_COMMON_SIZE;
  common_sets[1]->othersptr = &atomic_others;

  common_sets[2] = (GC_Set *)malloc_managed(sizeof(GC_Set));
  common_sets[2]->atomic = 0;
  common_sets[2]->uncollectable = 1;
  common_sets[2]->blocks = uncollectable_common;
  common_sets[2]->block_ends = uncollectable_common + NUM_COMMON_SIZE;
  common_sets[2]->othersptr = &uncollectable_others;

  common_sets[3] = (GC_Set *)malloc_managed(sizeof(GC_Set));
  common_sets[3]->atomic = !NO_ATOMIC;
  common_sets[3]->uncollectable = 1;
  common_sets[3]->blocks = uncollectable_atomic_common;
  common_sets[3]->block_ends = uncollectable_atomic_common + NUM_COMMON_SIZE;
  common_sets[3]->othersptr = &uncollectable_atomic_others;

#if PROVIDE_MALLOC_AND_FREE
  common_sets[4] = (GC_Set *)malloc_managed(sizeof(GC_Set));
  common_sets[4]->atomic = 1;
  common_sets[4]->uncollectable = 1;
  common_sets[4]->blocks = sys_malloc;
  common_sets[4]->block_ends = sys_malloc + NUM_COMMON_SIZE;
  common_sets[4]->othersptr = &sys_malloc_others;
#endif

  for (i = 0; i < num_common_sets; i++) {
    common_sets[i]->name = "Basic";
#if ALLOW_SET_LOCKING
    common_sets[i]->locked = 0;
#endif
#if KEEP_SET_NO || KEEP_CHUNK_SET_NO
    common_sets[i]->no = i;
#endif
#if ALLOW_TRACE_COUNT
    common_sets[i]->count_tracer = NULL;
#endif
#if ALLOW_TRACE_PATH
    common_sets[i]->path_tracer = NULL;
#endif
#if ALLOW_TRACE_COUNT || ALLOW_TRACE_PATH
    common_sets[i]->trace_init = NULL;
    common_sets[i]->trace_done = NULL;
#endif
#if ALLOW_SET_FINALIZER
    common_sets[i]->finalizer = NULL;
#endif    
  }

#if PROVIDE_MALLOC_AND_FREE
  common_sets[4]->name = "Sysmalloc";
#endif

  initialized = 1;
}

Here is the call graph for this function:

Here is the caller graph for this function:

int GC_is_atomic ( void d)

Definition at line 1773 of file sgc.c.

{
  BlockOfMemory *block = NULL;
  MemoryChunk *chunk = NULL;
  
  if (find_ptr(d, NULL, &block, NULL, &chunk, 0)) {
    if (block)
      return block->atomic;
    else
      return chunk->atomic;
  } else
    return 0;
}

Here is the call graph for this function:

void* GC_malloc ( size_t  size)

Definition at line 2622 of file sgc.c.

Here is the call graph for this function:

void* GC_malloc_atomic ( size_t  size)

Definition at line 2629 of file sgc.c.

Here is the call graph for this function:

Definition at line 2645 of file sgc.c.

Here is the call graph for this function:

void* GC_malloc_specific ( size_t  size,
GC_Set set 
)

Definition at line 2653 of file sgc.c.

Here is the call graph for this function:

void* GC_malloc_stubborn ( size_t  size)

Definition at line 2661 of file sgc.c.

{
  return GC_malloc(size);
}
void* GC_malloc_uncollectable ( size_t  size)

Definition at line 2637 of file sgc.c.

Here is the call graph for this function:

GC_Set* GC_new_set ( char *  name,
GC_trace_init  trace_init,
GC_trace_done  trace_done,
GC_count_tracer  count_tracer,
GC_path_tracer  path_tracer,
GC_set_elem_finalizer  final,
int  flags 
) [read]

Definition at line 2564 of file sgc.c.

{
  GC_Set *c, **naya;
  int i;

  if (!initialized)
    GC_initialize();

  c = (GC_Set *)malloc_managed(sizeof(GC_SetWithOthers));

  naya = (GC_Set **)malloc_managed(sizeof(GC_Set *) * (num_common_sets + 1));
  for (i = 0; i < num_common_sets; i++)
    naya[i] = common_sets[i];
  
#if KEEP_SET_NO || KEEP_CHUNK_SET_NO
  c->no = num_common_sets;
#endif
#if ALLOW_TRACE_COUNT
  c->count_tracer = count_tracer;
#endif
#if ALLOW_TRACE_PATH
  c->path_tracer = path_tracer;
#endif
#if ALLOW_TRACE_COUNT || ALLOW_TRACE_PATH
  c->trace_init = trace_init;
  c->trace_done = trace_done;
#endif
#if ALLOW_SET_FINALIZER
  c->finalizer = final;
#endif    

  naya[num_common_sets++] = c;
  c->atomic = !!(flags & SGC_ATOMIC_SET);
  c->uncollectable = !!(flags & SGC_UNCOLLECTABLE_SET);
#if ALLOW_SET_LOCKING
  c->locked = 0;
#endif
  c->name = name;
  c->blocks = (BlockOfMemory **)malloc_managed(sizeof(BlockOfMemory*) * 2 * NUM_COMMON_SIZE);
  memset(c->blocks, 0, sizeof(BlockOfMemory*) * NUM_COMMON_SIZE);
  c->block_ends = c->blocks + NUM_COMMON_SIZE;
  memset(c->block_ends, 0, sizeof(BlockOfMemory*) * NUM_COMMON_SIZE);

  ((GC_SetWithOthers *)c)->others = NULL;
  c->othersptr = &((GC_SetWithOthers *)c)->others;

  free_managed(common_sets);
  common_sets = naya;

  return c;
}

Here is the call graph for this function:

void * GC_orig_base ( void d)

Definition at line 1795 of file sgc.c.

{
  return find_ptr(d, NULL, NULL, NULL, NULL, 1);
}

Here is the call graph for this function:

int GC_orig_size ( void d)

Definition at line 1787 of file sgc.c.

{
  int size = 0;
  
  find_ptr(d, &size, NULL, NULL, NULL, 0);
  return size;
}

Here is the call graph for this function:

void GC_push_all_stack ( void sp,
void ep 
)

Definition at line 4314 of file sgc.c.

{
  unsigned long s, e;

  s = PTR_TO_INT(sp);
  e = PTR_TO_INT(ep);

  PUSH_COLLECT(s, e, 0);

  prepare_stack_collect();
}

Here is the call graph for this function:

void GC_register_eager_finalizer ( void p,
int  level,
void(*)(void *p, void *data f,
void data,
void(**)(void *p, void *data oldf,
void **  olddata 
)

Definition at line 2852 of file sgc.c.

{
  register_finalizer(PAD_BACKWARD(p), f, data, oldf, olddata, level, 0);
}

Here is the call graph for this function:

void GC_register_finalizer ( void p,
void(*)(void *p, void *data f,
void data,
void(**)(void *p, void *data oldf,
void **  olddata 
)

Definition at line 2845 of file sgc.c.

{
  register_finalizer(PAD_BACKWARD(p), f, data, oldf, olddata, 0, 0);
}

Here is the call graph for this function:

void GC_register_finalizer_ignore_self ( void p,
void(*)(void *p, void *data f,
void data,
void(**)(void *p, void *data oldf,
void **  olddata 
)

Definition at line 2859 of file sgc.c.

{
  register_finalizer(PAD_BACKWARD(p), f, data, oldf, olddata, 0, 1);
}

Here is the call graph for this function:

Definition at line 2749 of file sgc.c.

Here is the call graph for this function:

GC_Set* GC_set ( void d) [read]

Definition at line 1800 of file sgc.c.

{
#if KEEP_SET_NO
  BlockOfMemory *block = NULL;
  MemoryChunk *chunk = NULL;
  
  if (!initialized)
    GC_initialize();

  if (find_ptr(d, NULL, &block, NULL, &chunk, 0)) {
    int set_no;
    if (block)
      set_no = block->set_no;
    else
      set_no = chunk->set_no;

    return common_sets[set_no];
  } else
    return NULL;
#else
  return NULL;
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 789 of file sgc.c.

Definition at line 783 of file sgc.c.

Definition at line 1665 of file sgc.c.

{
  GC_stackbottom = base;
}
int GC_size ( void d)

Definition at line 1760 of file sgc.c.

{
  int size;
  
  if (find_ptr(d, &size, NULL, NULL, NULL, 0)) {
#if PAD_BOUNDARY_BYTES
    size -= PAD_START_SIZE + PAD_END_SIZE;
#endif
    return size;
  } else
    return 0;
}

Here is the call graph for this function:

void GC_store_path ( void v,
unsigned long  src,
void path_data 
)

Definition at line 4812 of file sgc.c.

{
  /* Note: a trace path of the form X->X->Y->Z->... (with two Xs)
     indicates an off-by-one stack source. */
#if ALLOW_TRACE_PATH
  TraceStack *s = (TraceStack *)path_data;
  int len, i;

  if (trace_path_buffer_pos < 0)
    return;

  len = s->count / 3;
  if (len * 2 + 3 > (TRACE_PATH_BUFFER_SIZE - trace_path_buffer_pos - 7)) {
    trace_path_buffer[trace_path_buffer_pos++] = (void *)2;
    trace_path_buffer[trace_path_buffer_pos++] = "truncated";
    trace_path_buffer[trace_path_buffer_pos++] = 0;
    trace_path_buffer[trace_path_buffer_pos++] = v; /* already padded */
    trace_path_buffer[trace_path_buffer_pos++] = 0;
    trace_path_buffer[trace_path_buffer_pos] = 0;
    trace_path_buffer_pos = -1;
    return;
  }

  if (len) {
    unsigned long prev = 0;

    trace_path_buffer[trace_path_buffer_pos++] = (void *)(len + 2);
    trace_path_buffer[trace_path_buffer_pos++] = current_trace_source;
    trace_path_buffer[trace_path_buffer_pos++] = 0;
    for (i = 1; len--; i += 3) {
      trace_path_buffer[trace_path_buffer_pos++] = (void *)PAD_FORWARD(s->stack[i]);
      trace_path_buffer[trace_path_buffer_pos++] = 0; /* reset on next iteration */

      if (i > 1) {
       /* See if we have offset information in the original trace info.
          (It might be missing because KEEP_DETAIL might be turned off, or
            PUSH_COLLECT had 0 for its third argument.) */
       unsigned long diff;
       if (s->stack[i + 1])
         diff = ((unsigned long)s->stack[i + 1]) - prev;
       else
         diff = 0;
       trace_path_buffer[trace_path_buffer_pos - 3] = (void *)diff;
      }
      prev = (unsigned long)s->stack[i];
    }

    trace_path_buffer[trace_path_buffer_pos - 1] = (void *)(src - prev);

    trace_path_buffer[trace_path_buffer_pos++] = v; /* already padded */
    trace_path_buffer[trace_path_buffer_pos++] = 0;
    trace_path_buffer[trace_path_buffer_pos] = 0;
  }
#endif
}
int GC_trace_count ( int stack,
int roots,
int uncollectable,
int final 
)

Definition at line 4749 of file sgc.c.

{
#if ALLOW_TRACE_COUNT
  int j;

  if (!sector_mem_use)
    return 0;

  for (j = 0; j < num_common_sets; j++) {
    if (common_sets[j]->trace_init)
      common_sets[j]->trace_init();
  }

  collecting_with_trace_count = 1;
  GC_gcollect();
  collecting_with_trace_count = 0;

  if (stack)
    *stack = traced_from_stack;
  if (roots)
    *roots = traced_from_roots;
  if (uncollectable)
    *uncollectable = traced_from_uncollectable;
  if (final)
    *final = traced_from_finals;

  for (j = 0; j < num_common_sets; j++) {
    if (common_sets[j]->trace_done)
      common_sets[j]->trace_done();
  }

  return mem_traced;
#else
  return 0;
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 4786 of file sgc.c.

{
#if ALLOW_TRACE_PATH
  int j;

  if (!sector_mem_use)
    return;

  for (j = 0; j < num_common_sets; j++) {
    if (common_sets[j]->trace_init)
      common_sets[j]->trace_init();
  }

  trace_path_buffer_pos = 0;

  collecting_with_trace_path = 1;
  GC_gcollect();
  collecting_with_trace_path = 0;

  for (j = 0; j < num_common_sets; j++) {
    if (common_sets[j]->trace_done)
      common_sets[j]->trace_done();
  }
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 2754 of file sgc.c.

{
  /* We'll do it later */
}
static void init_positions ( int  cpos,
int  size,
int  num_elems 
) [static]

Definition at line 2157 of file sgc.c.

{
  int num_positions = num_elems << LOG_FREE_BIT_PER_ELEM;
  int block_size = size * num_positions;
  int num_offsets = block_size >> LOG_PTR_SIZE;
  int size_in_ptrs = size >> LOG_PTR_SIZE;
  int i, j, pos;
  int *positions;

  positions = (int *)malloc_sector(num_offsets * sizeof(int), sector_kind_other, 0);

  for (i = 0, pos = 0, j = 0; i < num_offsets; ) {
    positions[i++] = pos;
    if (++j == size_in_ptrs) {
      pos++;
      j = 0;
    }
  }

  common_positionses[cpos] = positions;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void init_size_map ( ) [static]

Definition at line 1488 of file sgc.c.

{
  int i, j, find_half;
  long k, next;

  size_index_map = (long *)malloc_sector(MAX_COMMON_SIZE, sector_kind_other, 0);
  size_map = (long *)malloc_sector(NUM_COMMON_SIZE * sizeof(long), sector_kind_other, 0);

  /* This is two loops instead of one to avoid a gcc 2.92.2 -O2 x86 bug: */
  for (i = 0; i < 8; i++) {
    size_index_map[i] = i;
  }
  for (i = 0; i < 8; i++) {
    size_map[i] = (i + 1) * PTR_SIZE;
  }
  /* i's final value is used below... */

  k = 8;
  next = 12;
  j = i;
  find_half = 1;
  while (j < (MAX_COMMON_SIZE >> 2)) {
    size_index_map[j] = i;
    if ((j + 1) == next) {
      size_map[i] = next * PTR_SIZE;
      i++;
      if (find_half) {
       next = 2 * k;
      } else {
       next = 3 * k;
       k = 2 * k;
      }
      find_half = !find_half;
    }
    j++;
  }
  if (i < NUM_COMMON_SIZE)
    size_map[i] = next * PTR_SIZE;

#if 0
  FPRINTF(STDERR, "max: %d  num: %d\n", MAX_COMMON_SIZE, NUM_COMMON_SIZE);
  for (i = 0; i < (MAX_COMMON_SIZE >> 2); i++) {
    FPRINTF(STDERR, "%d->%d=%d;", i, 
           size_index_map[i], 
           size_map[size_index_map[i]]);
  }
  FPRINTF(STDERR, "\n");
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void init_static_variables ( void  ) [static]

Definition at line 1571 of file sgc.c.

{
#if AUTO_STATIC_ROOTS_IF_POSSIBLE
# if USE_DATASTARTEND
  GC_add_roots(DATASTART, DATAEND);
# endif
# ifdef WIN32
  register_static_variables();
# endif
#endif

  statics_setup = 1;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* malloc_managed ( long  size) [static]

Definition at line 1342 of file sgc.c.

{
  /* A naive strategy is sufficient here.
     There will be many disappearing links, many
     finalizations, and very little of anything else. */
  int i, j;
  long perblock, offset;
  ManagedBlock *mb;
  
  if (size & PTR_SIZE)
    size += PTR_SIZE - (size & PTR_SIZE);

  if (!managed) {
    managed = (Managed *)malloc_sector(SECTOR_SEGMENT_SIZE, sector_kind_other, 0);
    managed->num_buckets = 0;
    manage_real_mem_use += SECTOR_SEGMENT_SIZE;
  }

  for (i = 0; i < managed->num_buckets; i++) {
    if (managed->buckets[i].size == size)
      break;
  }

  if (i >= managed->num_buckets) {
    managed->num_buckets++;
    managed->buckets[i].size = size;
    if (size < MAX_COMMON_SIZE) {
      int c;

      mb = (ManagedBlock *)malloc_sector(SECTOR_SEGMENT_SIZE, sector_kind_managed, 0);
      manage_real_mem_use += SECTOR_SEGMENT_SIZE;
      managed->buckets[i].block = mb;

      c = (SECTOR_SEGMENT_SIZE - sizeof(ManagedBlockHeader)) / size;
      if (c & (PTR_SIZE - 1))
       c += (PTR_SIZE - (c & (PTR_SIZE - 1)));
      managed->buckets[i].perblock = (SECTOR_SEGMENT_SIZE - sizeof(ManagedBlockHeader) - c) / size;
      managed->buckets[i].offset = c + sizeof(ManagedBlockHeader);
    } else {
      long l = size + sizeof(ManagedBlockHeader) + PTR_SIZE;
      mb = (ManagedBlock *)malloc_sector(l, sector_kind_managed, 0);
      manage_real_mem_use += l;
      managed->buckets[i].block = mb;
      managed->buckets[i].perblock = 1;
      managed->buckets[i].offset = sizeof(ManagedBlockHeader) + PTR_SIZE;
    }
    mb->head.count = 0;
    mb->head.size = size;
    mb->head.next = NULL;
    mb->head.prev = NULL;
    perblock = managed->buckets[i].perblock;
    for (j = perblock; j--; )
      mb->free[j] = 1;
    mb->head.end = PTR_TO_INT(mb) + managed->buckets[i].offset + size * perblock;
  }

  perblock = managed->buckets[i].perblock;
  offset = managed->buckets[i].offset;
  mb = managed->buckets[i].block;
  while ((mb->head.count == perblock) && mb->head.next)
    mb = mb->head.next;
  if (mb->head.count == perblock) {
    long l = offset + size * perblock;
    mb->head.next = (ManagedBlock *)malloc_sector(l, sector_kind_managed, 0);
    manage_real_mem_use += l;
    mb->head.next->head.prev = mb;
    mb = mb->head.next;
    mb->head.count = 0;
    mb->head.size = size;
    mb->head.next = NULL;
    for (j = perblock; j--; )
      mb->free[j] = 1;
    mb->head.end = PTR_TO_INT(mb) + offset + size * perblock;
  }

  manage_mem_use += size;

  mb->head.count++;
  for (j = perblock; j--; )
    if (mb->free[j]) {
      mb->free[j] = 0;
      return (((char *)mb) + offset) + size * j;
    }

  FPRINTF(STDERR, "error allocating managed\n");
  return NULL;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* malloc_plain_sector ( int  count) [static]

Definition at line 1023 of file sgc.c.

{
  void *m;

  m = platform_plain_sector(count);

  if (!m) {
    if (GC_out_of_memory)
      GC_out_of_memory();
    FPRINTF(STDERR, "out of memory\n");
    exit(-1);
  }

  return m;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* malloc_sector ( long  size,
long  kind,
int  no_new 
) [static]

Definition at line 1078 of file sgc.c.

{
  long need;
  void *naya;
#if !RELEASE_UNUSED_SECTORS
  SectorFreepage *fp;
#endif

#if CHECK_COLLECTING
  if (collecting_now) {
    free_error("alloc while collecting\n");
    return NULL;
  }
#endif

  num_sector_allocs++;

#ifndef SIXTY_FOUR_BIT_INTEGERS
  if (!sector_pagetables) {
    int i, c = (SECTOR_LOOKUP_PAGESETBITS + LOG_PTR_SIZE) - LOG_SECTOR_SEGMENT_SIZE;
    if (c < 0)
      c = 0;
    c = 1 << c;
    sector_pagetables = (SectorPage **)malloc_plain_sector(c);
    sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
    for (i = 0; i < (1 << SECTOR_LOOKUP_PAGESETBITS); i++)
      sector_pagetables[i] = NULL;
  }
#endif

  need = (size + SECTOR_SEGMENT_SIZE - 1) >> LOG_SECTOR_SEGMENT_SIZE;

#if !RELEASE_UNUSED_SECTORS
  if (sector_freepage_by_size) {
    sector_freepage_by_size = splay(need, sector_freepage_by_size);
    if (TREE_FP(sector_freepage_by_size)->size < need) {
      /* No exact match, so find the next size up */
      Tree *node;
      node = next(sector_freepage_by_size);
      if (node)
       fp = TREE_FP(node);
      else
       fp = NULL;
    } else 
      fp = TREE_FP(sector_freepage_by_size);
  } else
    fp = NULL;
  
  if (fp) {
    remove_freepage(fp);

    naya = INT_TO_PTR(fp->start);
    register_sector(naya, need, kind);
    if (fp->size > need) {
      /* Move freepage info and shrink */
      SectorFreepage *naya;
      unsigned long nfp;
      nfp = fp->start + (need << LOG_SECTOR_SEGMENT_SIZE);
      naya = (SectorFreepage *)INT_TO_PTR(nfp);
      naya->size = fp->size - need;
      naya->start = nfp;
      naya->end = fp->end;

      add_freepage(naya);
    }

    sector_free_mem_use -= (need << LOG_SECTOR_SEGMENT_SIZE);
    return naya;
  }
#endif

  if (no_new)
    return NULL;

  naya = malloc_plain_sector(need);
  sector_mem_use += (need << LOG_SECTOR_SEGMENT_SIZE);
  register_sector(naya, need, kind);

  return naya;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 3886 of file sgc.c.

{
  for (; c; c = c->next) {
    Finalizer *fn = c->finalizers;

    if (fn) {
      /* Always mark data associated with finalization: */
      unsigned long p = PTR_TO_INT(&fn->data);
      PUSH_COLLECT(p, p + PTR_SIZE, 0);

      /* If not eager, mark data reachable from finalized block: */
      if (!fn->eager_level && !c->marked && !c->atomic) {
       if (fn->ignore_self)
         push_collect_ignore(c->start, c->end, c->start);
       else {
         PUSH_COLLECT(c->start, c->end, 0);
       }
      }
    }
  }

  collect();
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void mark_common_for_finalizations ( BlockOfMemory **  blocks,
int  atomic 
) [static]

Definition at line 3910 of file sgc.c.

{
  int i;

  for (i = 0; i < NUM_COMMON_SIZE; i++) {
    BlockOfMemory *block = blocks[i];
    for (; block; block = block->next) {
      Finalizer *fn = block->finalizers;
      for (; fn ; fn = fn->next) {
       unsigned long p;
         
       /* Always mark data associated with finalization: */
       p = PTR_TO_INT(&fn->data);
       PUSH_COLLECT(p, p + PTR_SIZE, 0);

       /* If not eager, mark data reachable from finalized block: */
       if (!fn->eager_level) {
         int pos, apos;
         int bit, fbit;

         pos = fn->u.pos;
         apos = POS_TO_UNMARK_INDEX(pos);
         bit = POS_TO_UNMARK_BIT(pos);
         fbit = POS_TO_FREE_BIT(pos);
         
         if (NOT_MARKED(block->free[apos] & bit)
             && _NOT_FREE(block->free[apos] & fbit)) {
           int size = block->size;
           
           if (!atomic) {
             p = block->start + (pos * size);
             if (fn->ignore_self)
              push_collect_ignore(p, p + size, p);
             else {
              PUSH_COLLECT(p, p + size, 0);
             }

#if WATCH_FOR_FINALIZATION_CYCLES
             collect();
             if (IS_MARKED(block->free[apos] & bit))
              FPRINTF(STDERR, "cycle: %lx\n", p);
#endif
           }
         }
       }
      }
    }
  }

  collect();
}

Here is the call graph for this function:

Here is the caller graph for this function:

static Tree* next ( Tree node) [static]

Definition at line 599 of file sgc.c.

{
  node = node->right;
  if (node) {
    while (node->left) {
      node = node->left;
    }
    return node;
  } else
    return NULL;
}
static void* platform_plain_sector ( int  count) [static]

Definition at line 979 of file sgc.c.

{
  static int prealloced;
  static void *preallocptr;

  if (!prealloced) {
    unsigned long d;

    if (count <= (SECTOR_SEGMENT_GROUP_SIZE-1))
      prealloced = SECTOR_SEGMENT_GROUP_SIZE-1;
    else
      prealloced = count;

    preallocptr = MALLOC((prealloced + 1) << LOG_SECTOR_SEGMENT_SIZE);

    d = ((unsigned long)preallocptr) & TABLE_LO_MASK;
    if (d)
      preallocptr = ((char *)preallocptr) + (SECTOR_SEGMENT_SIZE - d);
  }

  if (prealloced >= count) {
    void *r = preallocptr;

    prealloced -= count;
    preallocptr = ((char *)preallocptr) + (count << LOG_SECTOR_SEGMENT_SIZE);
    
    return r;
  }

  {
    unsigned long d;
    void *r;

    r = MALLOC((count + 1) << LOG_SECTOR_SEGMENT_SIZE);

    d = ((unsigned long)r) & TABLE_LO_MASK;
    if (d)
      r = ((char *)r) + (SECTOR_SEGMENT_SIZE - d);

    return r;
  }
}

Here is the caller graph for this function:

static void prepare_collect_temp ( ) [static]

Definition at line 1249 of file sgc.c.

{
#if GET_MEM_VIA_SBRK
  save_brk = (char *)sbrk(0);
#else
  collect_mem_use = 0;
#endif
}

Here is the caller graph for this function:

static void prepare_stack_collect ( ) [static]

Definition at line 3602 of file sgc.c.

{
  unsigned long s, e;
  unsigned long source;
#if KEEP_DETAIL_PATH
  source = collect_stack[--collect_stack_count];
#else
  source = 0;
#endif
  e = collect_stack[--collect_stack_count];
  s = collect_stack[--collect_stack_count];
  e += PTR_ALIGNMENT - 1;

  PUSH_COLLECT(s, e, source);
  semi_collect_stack(0);

#if !NO_STACK_OFFBYONE
  PUSH_COLLECT(s, e, source);
  semi_collect_stack(-PTR_ALIGNMENT);
  /* Note: this nested-semi preparation can create trace paths of
     the form X->X->Y->Z->... */
#endif
}

Here is the caller graph for this function:

static void push_collect ( unsigned long  start,
unsigned long  end,
unsigned long  src 
) [static]

Definition at line 3455 of file sgc.c.

{
  if (collect_stack_count >= collect_stack_size) {
    long oldsize;

    if (collect_stack)
      oldsize = sizeof(unsigned long) * (collect_stack_size + (COLLECT_STACK_FRAME_SIZE - 1));
    else
      oldsize = 0;

    collect_stack_size = collect_stack_size ? 2 * collect_stack_size : 500;
    collect_stack = (unsigned long *)realloc_collect_temp(collect_stack, 
                                                   oldsize, 
                                                   sizeof(unsigned long) 
                                                   * (collect_stack_size + (COLLECT_STACK_FRAME_SIZE - 1)));
    /* fprintf(stderr, "grow push stack: %d\n", collect_stack_size); */
  }

  collect_stack[collect_stack_count++] = start;
  collect_stack[collect_stack_count++] = end;
  PUSH_SRC(src)
}

Here is the call graph for this function:

static void push_collect_ignore ( unsigned long  s,
unsigned long  e,
unsigned long  a 
) [static]

Definition at line 3859 of file sgc.c.

{
  unsigned long push_from = s;

#if PAD_BOUNDARY_BYTES
  a = PTR_TO_INT(PAD_FORWARD(INT_TO_PTR(a)));
#endif

  for (; s < e; s += PTR_ALIGNMENT) {
    void *d = *(void **)INT_TO_PTR(s);
    unsigned long p = PTR_TO_INT(d);

    if (p == a) {
      if (push_from != s) {
       PUSH_COLLECT(push_from, s, a);
      }
      push_from = s + PTR_ALIGNMENT;
    }
  }

  if (push_from != s) {
    PUSH_COLLECT(push_from, s, a);
  }
}

Here is the caller graph for this function:

static void push_stack ( void stack_now) [static]

Definition at line 3646 of file sgc.c.

{
  unsigned long start, end;

  start = PTR_TO_INT(GC_stackbottom);
  end = PTR_TO_INT(stack_now);

#if PRINT && STAMP_AND_REMEMBER_SOURCE
  FPRINTF(STDERR, "stack in [%lx, %lx]\n", start, end);
#endif

  if (start < end) {
    PUSH_COLLECT(start, end, 0);
  } else {
    PUSH_COLLECT(end, start, 0);
  }

#if DUMP_BLOCK_MAPS
  trace_stack_start = collect_stack[collect_stack_count - COLLECT_STACK_FRAME_SIZE];
  trace_stack_end = collect_stack[collect_stack_count - (COLLECT_STACK_FRAME_SIZE - 1)];
#endif

  prepare_stack_collect();

  start = PTR_TO_INT((void *)&buf);
  end = start + sizeof(buf);
  PUSH_COLLECT(start, end, 0);

#if DUMP_BLOCK_MAPS
  trace_reg_start = collect_stack[collect_stack_count - COLLECT_STACK_FRAME_SIZE];
  trace_reg_end = collect_stack[collect_stack_count - (COLLECT_STACK_FRAME_SIZE - 1)];
#endif

  prepare_stack_collect();

#if PRINT && STAMP_AND_REMEMBER_SOURCE
  FPRINTF(STDERR, "jmpbuf in [%lx, %lx]\n", start, end);
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void push_uncollectable_chunk ( MemoryChunk c,
GC_Set set 
) [static]

Definition at line 3729 of file sgc.c.

{
#if ALLOW_TRACE_COUNT
  if (!collecting_with_trace_count
      || !c
      || !set->count_tracer) {
#endif
    for (; c; c = c->next) {
#if ALLOW_TRACE_COUNT
      if (!c->marked) {
       if (collecting_with_trace_count) {
         c->marked = 1;
         collect_trace_count += (c->end - c->start);
       }
       if (!set->atomic) {
#endif      
         PUSH_COLLECT(c->start, c->end, 0);
#if ALLOW_TRACE_COUNT
       }
      } else {
       /* It got marked the normal way; deduct the size. */
       mem_use -= (c->end - c->start);
      }
#endif
    }
#if ALLOW_TRACE_COUNT
  } else {
    int save_count = collect_trace_count;
    for (; c; c = c->next) {
      if (!c->marked) {
       void *s;
       c->marked = 1;
       collect_trace_count = 0;
       if (!c->atomic) {
         PUSH_COLLECT(c->start, c->end, 0);
         collect();
       }
       collect_trace_count += (c->end - c->start);

       s = INT_TO_PTR(c->start);
#if PAD_BOUNDARY_BYTES
       s = PAD_FORWARD(s);
#endif
       set->count_tracer(s, collect_trace_count);
       mem_traced += collect_trace_count;
      } else {
       /* It got marked the normal way; deduct the size. */
       mem_use -= (c->end - c->start);
      }
    }
    collect_trace_count = save_count;
  }
#endif
}

Here is the caller graph for this function:

static void push_uncollectable_common ( BlockOfMemory **  blocks,
GC_Set set 
) [static]

Definition at line 3784 of file sgc.c.

{
  int i;

#if ALLOW_TRACE_COUNT
  if (!collecting_with_trace_count) {
#endif
    for (i = 0; i < NUM_COMMON_SIZE; i++) {
      BlockOfMemory *block = blocks[i];
      
      while (block) {
       PUSH_COLLECT(block->start, block->top, 0);
       block = block->next;
      }
    }
#if ALLOW_TRACE_COUNT
  } else {
    int save_count = collect_trace_count;

    for (i = 0; i < NUM_COMMON_SIZE; i++) {
      BlockOfMemory *block = blocks[i];
      
      while (block) {
       unsigned long size = block->size;
       unsigned long start = block->start;
       unsigned long top = block->top;
       int j;
       
       for (j = 0; start < top; start += size, j++) {
         int bit;
         int pos;
         int fbit;

         pos = POS_TO_UNMARK_INDEX(j);
         bit = POS_TO_UNMARK_BIT(j);
         fbit = POS_TO_FREE_BIT(j);

         if (NOT_MARKED(block->free[pos] & bit)
             && _NOT_FREE(block->free[pos] & fbit)) {
           block->free[pos] -= bit;
           if (set->count_tracer)
             collect_trace_count = 0;
           else
             collect_trace_count += size;
           if (!block->atomic) {
             PUSH_COLLECT(start, start + size, 0);
             collect();
           }
           if (set->count_tracer) {
             void *s;
             collect_trace_count += size;
             s = INT_TO_PTR(start);
#if PAD_BOUNDARY_BYTES
             s = PAD_FORWARD(s);
#endif
             set->count_tracer(s, collect_trace_count);
             mem_traced += collect_trace_count;
           }
         } else {
           /* It got marked the normal way; deduct the size. */
           mem_use -= size;
         }
       }

       block = block->next;
      }
    }

    if (set->count_tracer)
      collect_trace_count = save_count;
  }
#endif
}

Here is the caller graph for this function:

static void queue_chunk_finalizeable ( MemoryChunk c,
int  eager_level 
) [static]

Definition at line 3988 of file sgc.c.

{
  /* DO NOT COLLECT FROM collect_stack DURING THIS PROCEDURE */

  for (; c; c = c->next) {
    if (c->finalizers && !c->marked) {
      Finalizer *fn = c->finalizers;

      if (fn->eager_level == eager_level) {
       c->finalizers = NULL;

       fn->u.watch = INT_TO_PTR(c->start);
       enqueue_fn(fn);

       if (eager_level) {
         /* Always mark data associated with finalization: */
         unsigned long p = PTR_TO_INT(&fn->data);
         PUSH_COLLECT(p, p + PTR_SIZE, 0);
       }
      }
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void queue_common_finalizeable ( BlockOfMemory **  blocks,
int  eager_level 
) [static]

Definition at line 4012 of file sgc.c.

{
  /* DO NOT COLLECT FROM collect_stack DURING THIS PROCEDURE */

  int i;
  
  for (i = 0; i < NUM_COMMON_SIZE; i++) {
    BlockOfMemory *block = blocks[i];
    for (; block; block = block->next) {
      Finalizer *fn = block->finalizers, *next;
      
      for (; fn; fn = next) {
       int pos, apos;
       int bit;
         
       next = fn->next;

       pos = fn->u.pos;
       apos = POS_TO_UNMARK_INDEX(pos);
       bit = POS_TO_UNMARK_BIT(pos);
       
       if (NOT_MARKED(block->free[apos] & bit)) {
         unsigned long p;
       
         if (fn->eager_level == eager_level) {
           if (fn->prev)
             fn->prev->next = fn->next;
           else
             block->finalizers = fn->next;
           if (fn->next)
             fn->next->prev = fn->prev;
           
           p = block->start + (pos * block->size);
           fn->u.watch = INT_TO_PTR(p);
           enqueue_fn(fn);

           if (eager_level) {
             /* Always mark data associated with finalization: */
             p = PTR_TO_INT(&fn->data);
             PUSH_COLLECT(p, p + PTR_SIZE, 0);
           }
         }
       }
      }
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void* realloc_collect_temp ( void v,
long  oldsize,
long  newsize 
) [static]

Definition at line 1258 of file sgc.c.

{
#if GET_MEM_VIA_SBRK
  void *naya;

  naya = (void *)sbrk(newsize);
  memcpy(naya, v, oldsize);
  if (!v)
    c_refcount++;
  return naya;
#elif GET_MEM_VIA_MMAP
  void *naya;
  
  naya = platform_plain_sector((newsize + SECTOR_SEGMENT_SIZE - 1) >> LOG_SECTOR_SEGMENT_SIZE);
  memcpy(naya, v, oldsize);
  if (v)
    munmap(v, (oldsize + SECTOR_SEGMENT_SIZE - 1) >> LOG_SECTOR_SEGMENT_SIZE);

  return naya;
#elif GET_MEM_VIA_VIRTUAL_ALLOC
  void *naya;

  naya = VirtualAlloc(NULL, newsize, 
                    MEM_COMMIT | MEM_RESERVE,
                    PAGE_READWRITE);
  memcpy(naya, v, oldsize);
  if (v)
    VirtualFree(v, 0, MEM_RELEASE);

  return naya;
#else
  void *naya;

  naya = MALLOC(newsize);
  memcpy(naya, v, oldsize);
  FREE(v);
  collect_mem_use += newsize;
  return naya;
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void register_disappearing_link ( void **  p,
void a,
int  late 
) [static]

Definition at line 2721 of file sgc.c.

{
  DisappearingLink *dl;
    
  dl = (DisappearingLink *)malloc_managed(sizeof(DisappearingLink));
  dl->kind = late ? dl_late : (a ? dl_normal : dl_restored);
  dl->watch = a;
  dl->disappear = p;
  dl->saved_value = NULL;
  dl->prev = NULL;
  dl->next = late ? late_disappearing : disappearing;
  if (dl->next)
    dl->next->prev = dl;
  if (late)
    late_disappearing = dl;
  else
    disappearing = dl;

  GC_dl_entries++;

  mem_real_use += sizeof(DisappearingLink);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void register_finalizer ( void p,
void(*)(void *p, void *data f,
void data,
void(**)(void *p, void *data oldf,
void **  olddata,
int  eager_level,
int  ignore_self 
) [static]

Definition at line 2776 of file sgc.c.

{
  BlockOfMemory *block = NULL;
  MemoryChunk *chunk = NULL;
  int pos;

  if ((p = find_ptr(p, NULL, &block, &pos, &chunk, 0))) {
    Finalizer *fn;

    if (block) {
      fn = block->finalizers;
      while (fn && (fn->u.pos != pos))
       fn = fn->next;
      if (fn && !f) {
       if (fn->prev)
         fn->prev->next = fn->next;
       else
         block->finalizers = fn->next;
       if (fn->next)
         fn->next->prev = fn->prev;
      }
    } else {
      fn = chunk->finalizers;
      if (fn && !f)
       chunk->finalizers = NULL;
    }

    if (oldf)
      *oldf = (fn ? fn->f : NULL);
    if (olddata)
      *olddata = (fn ? fn->data : NULL);
    
    if (f) {
      int isnaya = !fn;

      if (!fn) {
       fn = (Finalizer *)malloc_managed(sizeof(Finalizer));
       mem_real_use += sizeof(Finalizer);
       GC_fo_entries++;
      }

      fn->u.pos = pos;
      fn->f = f;
      fn->data = data;
      fn->eager_level = eager_level;
      fn->ignore_self = ignore_self;
      
      if (isnaya) {
       fn->prev = NULL;
       if (block) {
         fn->next = block->finalizers;
         if (fn->next)
           fn->next->prev = fn;
         block->finalizers = fn;
       } else {
         chunk->finalizers = fn;
         fn->next = NULL;
       }
      }
    } else if (fn) {
      mem_real_use -= sizeof(Finalizer);
      free_managed(fn);
      --GC_fo_entries;
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void register_sector ( void naya,
int  need,
long  kind 
) [static]

Definition at line 1039 of file sgc.c.

{
  unsigned long ns, orig_ns;
  int pagetableindex, pageindex, i;
  SectorPage *pagetable;
  DECL_SECTOR_PAGETABLES;

  orig_ns = ns = PTR_TO_INT(naya);
  if (!sector_low_plausible || (ns < sector_low_plausible))
    sector_low_plausible = ns;
  if (!sector_high_plausible 
      || (ns + (need << LOG_SECTOR_SEGMENT_SIZE) > sector_high_plausible))
    sector_high_plausible = ns + (need << LOG_SECTOR_SEGMENT_SIZE);

  /* Register pages as existing: */
  for (i = need; i--; ns += SECTOR_SEGMENT_SIZE) {
    GET_SECTOR_PAGETABLES(ns);
    pagetableindex = SECTOR_LOOKUP_PAGETABLE(ns);
    pagetable = sector_pagetables[pagetableindex];
    if (!pagetable) {
      int j, c = (LOG_SECTOR_LOOKUP_PAGESIZE + LOG_SECTOR_PAGEREC_SIZE) - LOG_SECTOR_SEGMENT_SIZE;
      if (c < 0)
        c = 0;
      c = 1 << c;
      pagetable = (SectorPage *)malloc_plain_sector(c);
      sector_pagetables[pagetableindex] = pagetable;
      sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
      for (j = 0; j < SECTOR_LOOKUP_PAGESIZE; j++) {
       pagetable[j].start = 0; 
       pagetable[j].kind = sector_kind_free;
      }
    }

    pageindex = SECTOR_LOOKUP_PAGEPOS(ns);
    pagetable[pageindex].kind = kind;
    pagetable[pageindex].start = orig_ns;
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void remove_freepage ( SectorFreepage fp) [static]

Definition at line 611 of file sgc.c.

{
  /* Remove fp from freelists: */
  sector_freepage_by_start = splay_delete(fp->start, sector_freepage_by_start);
  sector_freepage_by_end = splay_delete(fp->end, sector_freepage_by_end);
  sector_freepage_by_size = splay(fp->size, sector_freepage_by_size);
  if (TREE_FP(sector_freepage_by_size) == fp) {
    /* This was the representative for its size; remove it. */
    sector_freepage_by_size = splay_delete(fp->size, sector_freepage_by_size);
    if (fp->same_size) {
      SectorFreepage *same;
      same = TREE_FP(fp->same_size);
      same->same_size = splay_delete(same->start, fp->same_size);
      sector_freepage_by_size = splay_insert(same->size, &same->by_size, sector_freepage_by_size);
    }
  } else {
    /* Not the top-level representative; remove it from the representative's
       same_size tree */
    SectorFreepage *same;
    same = TREE_FP(sector_freepage_by_size);
    same->same_size = splay_delete(fp->start, same->same_size);
  }
}

Here is the caller graph for this function:

static void run_finalizers ( void  ) [static]

Definition at line 4254 of file sgc.c.

{
  static int doing = 0;
  Finalizer *fn;
  void *s;

  /* don't allow nested finalizations */
  if (doing)
    return;
  doing++;

#if !NO_FINALIZING
  while (queued_finalizers) {
    fn = queued_finalizers;
    queued_finalizers = fn->next;
    if (!fn->next)
      last_queued_finalizer = NULL;

    --num_queued_finalizers;

    s = fn->u.watch;
    
#if PAD_BOUNDARY_BYTES
    s = PAD_FORWARD(s);
#endif

    fn->f(s, fn->data);

    mem_real_use -= sizeof(Finalizer);
    free_managed(fn);
    --GC_fo_entries;
  }
#endif

  doing--;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void sort_and_merge_roots ( ) [static]

Definition at line 4222 of file sgc.c.

{
  static int counter = 0;
  int i, offset, top;

  if (roots_count < 4)
    return;

  /* Only try this every 5 collections or so: */
  if (counter--)
    return;
  counter = 5;

  qsort(roots, roots_count >> 1, 2 * sizeof(unsigned long), compare_roots);
  offset = 0;
  top = roots_count;
  for (i = 2; i < top; i += 2) {
    if ((roots[i - 2 - offset] <= roots[i])
       && ((roots[i - 1 - offset] + (PTR_ALIGNMENT - 1)) >= roots[i])) {
      /* merge: */
      if (roots[i + 1] > roots[i - 1 - offset])
       roots[i - 1 - offset] = roots[i + 1];
      offset += 2;
      roots_count -= 2;
    } else if (offset) {
      /* compact: */
      roots[i - offset] = roots[i];
      roots[i + 1 - offset] = roots[i + 1];
    }
  }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void trim_disappearing ( DisappearingLink **  disappearing_ptr) [static]

Definition at line 4103 of file sgc.c.

{
  DisappearingLink *dl, *next, *disappearing;

  disappearing = *disappearing_ptr;

  for (dl = disappearing; dl; dl = next) {
    int size;

    next = dl->next;
    
    size = 0;
    if (!find_ptr(dl->disappear, &size, NULL, NULL, NULL, 0) && size) {
      /* Found it, but it was unmarked. Deregister disappearing. */
      if (dl->prev)
       dl->prev->next = dl->next;
      else
       disappearing = dl->next;
      if (dl->next)
       dl->next->prev = dl->prev;

      mem_real_use -= sizeof(DisappearingLink);
      free_managed(dl);
      --GC_dl_entries;
    }
  }

  *disappearing_ptr = disappearing;
}

Here is the call graph for this function:

Here is the caller graph for this function:


Variable Documentation

Definition at line 696 of file sgc.c.

Definition at line 699 of file sgc.c.

jmp_buf buf [static]

Definition at line 3632 of file sgc.c.

long collect_mem_use [static]

Definition at line 768 of file sgc.c.

unsigned long* collect_stack [static]

Definition at line 3441 of file sgc.c.

Definition at line 3439 of file sgc.c.

Definition at line 3440 of file sgc.c.

Definition at line 695 of file sgc.c.

Definition at line 702 of file sgc.c.

GC_Set** common_sets [static]

Definition at line 692 of file sgc.c.

Definition at line 749 of file sgc.c.

Definition at line 780 of file sgc.c.

Definition at line 779 of file sgc.c.

Definition at line 781 of file sgc.c.

Definition at line 717 of file sgc.c.

Definition at line 718 of file sgc.c.

Definition at line 760 of file sgc.c.

int(* GC_inital_root_skip)(void *, size_t)

Definition at line 464 of file sgc.c.

Definition at line 463 of file sgc.c.

Definition at line 466 of file sgc.c.

Definition at line 720 of file sgc.c.

Definition at line 721 of file sgc.c.

Definition at line 756 of file sgc.c.

unsigned long high_plausible [static]

Definition at line 754 of file sgc.c.

int initialized = 0 [static]

Definition at line 1585 of file sgc.c.

Definition at line 750 of file sgc.c.

Definition at line 749 of file sgc.c.

unsigned long low_plausible [static]

Definition at line 754 of file sgc.c.

long manage_mem_use [static]

Definition at line 766 of file sgc.c.

long manage_real_mem_use [static]

Definition at line 766 of file sgc.c.

Managed* managed [static]

Definition at line 1340 of file sgc.c.

long mem_limit = FIRST_GC_LIMIT [static]

Definition at line 758 of file sgc.c.

long mem_real_use [static]

Definition at line 763 of file sgc.c.

long mem_uncollectable_use [static]

Definition at line 763 of file sgc.c.

long mem_use [static]

Definition at line 758 of file sgc.c.

long num_blocks [static]

Definition at line 777 of file sgc.c.

long num_chunks [static]

Definition at line 776 of file sgc.c.

int num_common_sets [static]

Definition at line 693 of file sgc.c.

Definition at line 751 of file sgc.c.

long num_sector_allocs [static]

Definition at line 770 of file sgc.c.

long num_sector_frees [static]

Definition at line 770 of file sgc.c.

MemoryChunk* others [static]

Definition at line 699 of file sgc.c.

Definition at line 750 of file sgc.c.

unsigned long* roots [static]

Definition at line 799 of file sgc.c.

long roots_count [static]

Definition at line 797 of file sgc.c.

long roots_size [static]

Definition at line 798 of file sgc.c.

long sector_admin_mem_use [static]

Definition at line 765 of file sgc.c.

long sector_free_mem_use [static]

Definition at line 765 of file sgc.c.

Definition at line 594 of file sgc.c.

Definition at line 595 of file sgc.c.

Definition at line 593 of file sgc.c.

unsigned long sector_high_plausible [static]

Definition at line 753 of file sgc.c.

unsigned long sector_low_plausible [static]

Definition at line 753 of file sgc.c.

long sector_mem_use [static]

Definition at line 765 of file sgc.c.

Definition at line 874 of file sgc.c.

long* size_index_map [static]

Definition at line 805 of file sgc.c.

long* size_map [static]

Definition at line 806 of file sgc.c.

int statics_setup = 0 [static]

Definition at line 1569 of file sgc.c.

unsigned long trace_reg_end [static]

Definition at line 1825 of file sgc.c.

unsigned long trace_reg_start [static]

Definition at line 1825 of file sgc.c.

unsigned long trace_stack_end [static]

Definition at line 1825 of file sgc.c.

unsigned long trace_stack_start [static]

Definition at line 1825 of file sgc.c.

Definition at line 698 of file sgc.c.

Definition at line 700 of file sgc.c.

Definition at line 697 of file sgc.c.

Definition at line 700 of file sgc.c.

void* zero_ptr [static]

Definition at line 2119 of file sgc.c.