Back to index

lightning-sunbird  0.9+nobinonly
Classes | Defines | Typedefs | Functions | Variables
jsgc.c File Reference
#include "jsstddef.h"
#include <stdlib.h>
#include <string.h>
#include "jstypes.h"
#include "jsutil.h"
#include "jshash.h"
#include "jsapi.h"
#include "jsatom.h"
#include "jsbit.h"
#include "jsclist.h"
#include "jscntxt.h"
#include "jsconfig.h"
#include "jsdbgapi.h"
#include "jsexn.h"
#include "jsfun.h"
#include "jsgc.h"
#include "jsinterp.h"
#include "jsiter.h"
#include "jslock.h"
#include "jsnum.h"
#include "jsobj.h"
#include "jsscope.h"
#include "jsscript.h"
#include "jsstr.h"
#include "jsxml.h"

Go to the source code of this file.

Classes

struct  JSGCPageInfo
struct  JSGCArena
struct  JSPtrTableInfo
struct  JSGCRootHashEntry
struct  GCRootMapArgs
struct  JSTempCloseList
struct  JSGCLockHashEntry

Defines

#define GC_THINGS_SHIFT   13 /* 8KB for things on most platforms */
#define GC_THINGS_SIZE   JS_BIT(GC_THINGS_SHIFT)
#define GC_FLAGS_SIZE   (GC_THINGS_SIZE / sizeof(JSGCThing))
#define GC_PAGE_SHIFT   10
#define GC_PAGE_MASK   ((jsuword) JS_BITMASK(GC_PAGE_SHIFT))
#define GC_PAGE_SIZE   JS_BIT(GC_PAGE_SHIFT)
#define GC_PAGE_COUNT   (1 << (GC_THINGS_SHIFT - GC_PAGE_SHIFT))
#define GC_ARENA_SIZE   (offsetof(JSGCArena, base) + GC_THINGS_SIZE + GC_FLAGS_SIZE)
#define FIRST_THING_PAGE(a)   (((jsuword)(a)->base + GC_FLAGS_SIZE - 1) & ~GC_PAGE_MASK)
#define PAGE_TO_ARENA(pi)
#define PAGE_INDEX(pi)   ((size_t)((pi)->offsetInArena >> GC_PAGE_SHIFT))
#define THING_TO_PAGE(thing)   ((JSGCPageInfo *)((jsuword)(thing) & ~GC_PAGE_MASK))
#define PAGE_THING_GAP(n)   (((n) & ((n) - 1)) ? (GC_PAGE_SIZE % (n)) : (n))
#define GC_ITERATOR_TABLE_MIN   4
#define GC_ITERATOR_TABLE_LINEAR   1024
#define METER(x)   ((void) 0)
#define js_FinalizeDouble   NULL
#define GC_ROOTS_SIZE   256
#define GC_FINALIZE_LEN   1024
#define GC_RUNNING_CLOSE_HOOKS_PTR(cx)   (&(cx)->runtime->gcCloseState.runningCloseHook)
#define JS_PUSH_TEMP_CLOSE_LIST(cx, tempList)   JS_PUSH_TEMP_ROOT_MARKER(cx, mark_temp_close_list, &(tempList)->tvr)
#define JS_POP_TEMP_CLOSE_LIST(cx, tempList)
#define GC_TYPE_IS_STRING(t)
#define GC_TYPE_IS_XML(t)
#define GC_TYPE_IS_DEEP(t)   ((t) == GCX_OBJECT || GC_TYPE_IS_XML(t))
#define IS_DEEP_STRING(t, o)
#define GC_THING_IS_DEEP(t, o)   (GC_TYPE_IS_DEEP(t) || IS_DEEP_STRING(t, o))
#define RECURSION_TOO_DEEP()
#define GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap)
#define GC_MARK_JSVALS(cx, len, vec, name)

Typedefs

typedef struct JSGCPageInfo JSGCPageInfo
typedef struct JSPtrTableInfo JSPtrTableInfo
typedef void(* GCFinalizeOp )(JSContext *cx, JSGCThing *thing)
typedef struct JSGCRootHashEntry JSGCRootHashEntry
typedef struct GCRootMapArgs GCRootMapArgs
typedef struct JSTempCloseList JSTempCloseList
typedef struct JSGCLockHashEntry JSGCLockHashEntry

Functions

 JS_STATIC_ASSERT (sizeof(JSGCThing)==sizeof(JSGCPageInfo))
 JS_STATIC_ASSERT (sizeof(JSGCThing) >=sizeof(JSObject))
 JS_STATIC_ASSERT (sizeof(JSGCThing) >=sizeof(JSString))
 JS_STATIC_ASSERT (sizeof(JSGCThing) >=sizeof(jsdouble))
 JS_STATIC_ASSERT (GC_FLAGS_SIZE >=GC_PAGE_SIZE)
 JS_STATIC_ASSERT (sizeof(JSStackHeader) >=2 *sizeof(jsval))
static size_t PtrTableCapacity (size_t count, const JSPtrTableInfo *info)
static void FreePtrTable (JSPtrTable *table, const JSPtrTableInfo *info)
static JSBool AddToPtrTable (JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info, void *ptr)
static void ShrinkPtrTable (JSPtrTable *table, const JSPtrTableInfo *info, size_t newCount)
static JSBool NewGCArena (JSRuntime *rt, JSGCArenaList *arenaList)
static void DestroyGCArena (JSRuntime *rt, JSGCArenaList *arenaList, JSGCArena **ap)
static void InitGCArenaLists (JSRuntime *rt)
static void FinishGCArenaLists (JSRuntime *rt)
uint8js_GetGCThingFlags (void *thing)
JSRuntimejs_GetGCStringRuntime (JSString *str)
JSBool js_IsAboutToBeFinalized (JSContext *cx, void *thing)
intN js_ChangeExternalStringFinalizer (JSStringFinalizeOp oldop, JSStringFinalizeOp newop)
JSBool js_InitGC (JSRuntime *rt, uint32 maxbytes)
void js_FinishGC (JSRuntime *rt)
JSBool js_AddRoot (JSContext *cx, void *rp, const char *name)
JSBool js_AddRootRT (JSRuntime *rt, void *rp, const char *name)
JSBool js_RemoveRoot (JSRuntime *rt, void *rp)
 js_gcroot_mapper (JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number, void *arg)
uint32 js_MapGCRoots (JSRuntime *rt, JSGCRootMapFun map, void *data)
JSBool js_RegisterCloseableIterator (JSContext *cx, JSObject *obj)
static void CloseIteratorStates (JSContext *cx)
void js_RegisterGenerator (JSContext *cx, JSGenerator *gen)
static JSBool CanScheduleCloseHook (JSGenerator *gen)
static JSBool ShouldDeferCloseHook (JSContext *cx, JSGenerator *gen, JSBool *defer)
static void FindAndMarkObjectsToClose (JSContext *cx, JSGCInvocationKind gckind, JSGenerator **todoQueueTail)
static JSGenerator ** MarkScheduledGenerators (JSContext *cx)
 mark_temp_close_list (JSContext *cx, JSTempValueRooter *tvr)
JSBool js_RunCloseHooks (JSContext *cx)
voidjs_NewGCThing (JSContext *cx, uintN flags, size_t nbytes)
JSBool js_LockGCThing (JSContext *cx, void *thing)
JSBool js_LockGCThingRT (JSRuntime *rt, void *thing)
JSBool js_UnlockGCThingRT (JSRuntime *rt, void *thing)
static void gc_mark_atom_key_thing (void *thing, void *arg)
void js_MarkAtom (JSContext *cx, JSAtom *atom)
static void AddThingToUnscannedBag (JSRuntime *rt, void *thing, uint8 *flagp)
static void MarkGCThingChildren (JSContext *cx, void *thing, uint8 *flagp, JSBool shouldCheckRecursion)
static void ScanDelayedChildren (JSContext *cx)
void js_MarkGCThing (JSContext *cx, void *thing)
 gc_root_marker (JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
 gc_lock_marker (JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
void js_MarkStackFrame (JSContext *cx, JSStackFrame *fp)
static void MarkWeakRoots (JSContext *cx, JSWeakRoots *wr)
void js_GC (JSContext *cx, JSGCInvocationKind gckind)
void js_UpdateMallocCounter (JSContext *cx, size_t nbytes)

Variables

static const JSPtrTableInfo iteratorTableInfo
static GCFinalizeOp gc_finalizers [GCX_NTYPES]

Class Documentation

struct JSGCPageInfo

Definition at line 177 of file jsgc.c.

Class Members
jsuword offsetInArena
jsuword unscannedBitmap
struct JSGCArena

Definition at line 183 of file jsgc.c.

Collaboration diagram for JSGCArena:
Class Members
uint8 base
JSGCArenaList * list
JSGCArena * prev
JSGCArena * prevUnscanned
jsuword unscannedPages
struct JSPtrTableInfo

Definition at line 247 of file jsgc.c.

Class Members
uint16 linearGrowthThreshold
uint16 minCapacity
struct JSGCRootHashEntry

Definition at line 595 of file jsgc.c.

Class Members
JSDHashEntryHdr hdr
const char * name
void * root
struct GCRootMapArgs

Definition at line 893 of file jsgc.c.

Class Members
void * data
JSGCRootMapFun map
struct JSTempCloseList

Definition at line 1171 of file jsgc.c.

Collaboration diagram for JSTempCloseList:
Class Members
JSGenerator * head
JSTempValueRooter tvr
struct JSGCLockHashEntry

Definition at line 1620 of file jsgc.c.

Collaboration diagram for JSGCLockHashEntry:
Class Members
uint32 count
JSDHashEntryHdr hdr
const JSGCThing * thing

Define Documentation

Definition at line 198 of file jsgc.c.

Definition at line 195 of file jsgc.c.

#define GC_FINALIZE_LEN   1024

Definition at line 603 of file jsgc.c.

Definition at line 95 of file jsgc.c.

Definition at line 253 of file jsgc.c.

Definition at line 252 of file jsgc.c.

#define GC_MARK_JSVALS (   cx,
  len,
  vec,
  name 
)
Value:
JS_BEGIN_MACRO                                                            \
        jsval _v, *_vp, *_end;                                                \
                                                                              \
        for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) {                \
            _v = *_vp;                                                        \
            if (JSVAL_IS_GCTHING(_v))                                         \
                GC_MARK(cx, JSVAL_TO_GCTHING(_v), name);                      \
        }                                                                     \
    JS_END_MACRO

Definition at line 2532 of file jsgc.c.

Definition at line 175 of file jsgc.c.

Definition at line 173 of file jsgc.c.

Definition at line 172 of file jsgc.c.

Definition at line 174 of file jsgc.c.

#define GC_ROOTS_SIZE   256

Definition at line 602 of file jsgc.c.

#define GC_RUNNING_CLOSE_HOOKS_PTR (   cx)    (&(cx)->runtime->gcCloseState.runningCloseHook)

Definition at line 1167 of file jsgc.c.

#define GC_THING_IS_DEEP (   t,
  o 
)    (GC_TYPE_IS_DEEP(t) || IS_DEEP_STRING(t, o))

Definition at line 1617 of file jsgc.c.

#define GC_THINGS_SHIFT   13 /* 8KB for things on most platforms */

Definition at line 92 of file jsgc.c.

Definition at line 94 of file jsgc.c.

#define GC_TYPE_IS_DEEP (   t)    ((t) == GCX_OBJECT || GC_TYPE_IS_XML(t))

Definition at line 1612 of file jsgc.c.

Value:
((t) == GCX_STRING ||                         \
                                 (t) >= GCX_EXTERNAL_STRING)

Definition at line 1608 of file jsgc.c.

Value:
((unsigned)((t) - GCX_NAMESPACE) <=           \
                                 (unsigned)(GCX_XML - GCX_NAMESPACE))

Definition at line 1610 of file jsgc.c.

#define GET_GAP_AND_CHUNK_SPAN (   thingSize,
  thingsPerUnscannedChunk,
  pageGap 
)
Value:
JS_BEGIN_MACRO                                                            \
        if (0 == ((thingSize) & ((thingSize) - 1))) {                         \
            pageGap = (thingSize);                                            \
            thingsPerUnscannedChunk = ((GC_PAGE_SIZE / (thingSize))           \
                                       + JS_BITS_PER_WORD - 1)                \
                                      >> JS_BITS_PER_WORD_LOG2;               \
        } else {                                                              \
            pageGap = GC_PAGE_SIZE % (thingSize);                             \
            thingsPerUnscannedChunk = JS_HOWMANY(GC_PAGE_SIZE / (thingSize),  \
                                                 JS_BITS_PER_WORD);           \
        }                                                                     \
    JS_END_MACRO

Definition at line 2195 of file jsgc.c.

#define IS_DEEP_STRING (   t,
  o 
)
Value:
(GC_TYPE_IS_STRING(t) &&                      \
                                 JSSTRING_IS_DEPENDENT((JSString *)(o)))

Definition at line 1614 of file jsgc.c.

Definition at line 528 of file jsgc.c.

#define JS_POP_TEMP_CLOSE_LIST (   cx,
  tempList 
)
Value:
JS_BEGIN_MACRO                                                            \
        JS_ASSERT((tempList)->tvr.u.marker == mark_temp_close_list);          \
        JS_POP_TEMP_ROOT(cx, &(tempList)->tvr);                               \
    JS_END_MACRO

Definition at line 1189 of file jsgc.c.

#define JS_PUSH_TEMP_CLOSE_LIST (   cx,
  tempList 
)    JS_PUSH_TEMP_ROOT_MARKER(cx, mark_temp_close_list, &(tempList)->tvr)

Definition at line 1186 of file jsgc.c.

#define METER (   x)    ((void) 0)

Definition at line 381 of file jsgc.c.

#define PAGE_INDEX (   pi)    ((size_t)((pi)->offsetInArena >> GC_PAGE_SHIFT))

Definition at line 205 of file jsgc.c.

#define PAGE_THING_GAP (   n)    (((n) & ((n) - 1)) ? (GC_PAGE_SIZE % (n)) : (n))

Definition at line 221 of file jsgc.c.

Value:
((JSGCArena *)((jsuword)(pi) - (pi)->offsetInArena                        \
                   - offsetof(JSGCArena, base)))

Definition at line 201 of file jsgc.c.

Value:
(shouldCheckRecursion &&                        \
                               !JS_CHECK_STACK_SIZE(cx, stackDummy))

Definition at line 208 of file jsgc.c.


Typedef Documentation

Definition at line 525 of file jsgc.c.

typedef struct GCRootMapArgs GCRootMapArgs
typedef struct JSGCPageInfo JSGCPageInfo

Function Documentation

static void AddThingToUnscannedBag ( JSRuntime rt,
void thing,
uint8 flagp 
) [static]

Definition at line 2210 of file jsgc.c.

{
    JSGCPageInfo *pi;
    JSGCArena *arena;
    size_t thingSize;
    size_t thingsPerUnscannedChunk;
    size_t pageGap;
    size_t chunkIndex;
    jsuword bit;

    /* Things from delayed scanning bag are marked as GCF_MARK | GCF_FINAL. */
    JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK);
    *flagp |= GCF_FINAL;

    METER(rt->gcStats.unscanned++);
#ifdef DEBUG
    ++rt->gcUnscannedBagSize;
    METER(if (rt->gcUnscannedBagSize > rt->gcStats.maxunscanned)
              rt->gcStats.maxunscanned = rt->gcUnscannedBagSize);
#endif

    pi = THING_TO_PAGE(thing);
    arena = PAGE_TO_ARENA(pi);
    thingSize = arena->list->thingSize;
    GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap);
    chunkIndex = (((jsuword)thing & GC_PAGE_MASK) - pageGap) /
                 (thingSize * thingsPerUnscannedChunk);
    JS_ASSERT(chunkIndex < JS_BITS_PER_WORD);
    bit = (jsuword)1 << chunkIndex;
    if (pi->unscannedBitmap != 0) {
        JS_ASSERT(rt->gcUnscannedArenaStackTop);
        if (thingsPerUnscannedChunk != 1) {
            if (pi->unscannedBitmap & bit) {
                /* Chunk already contains things to scan later. */
                return;
            }
        } else {
            /*
             * The chunk must not contain things to scan later if there is
             * only one thing per chunk.
             */
            JS_ASSERT(!(pi->unscannedBitmap & bit));
        }
        pi->unscannedBitmap |= bit;
        JS_ASSERT(arena->unscannedPages & ((size_t)1 << PAGE_INDEX(pi)));
    } else {
        /*
         * The thing is the first unscanned thing in the page, set the bit
         * corresponding to this page arena->unscannedPages.
         */
        pi->unscannedBitmap = bit;
        JS_ASSERT(PAGE_INDEX(pi) < JS_BITS_PER_WORD);
        bit = (jsuword)1 << PAGE_INDEX(pi);
        JS_ASSERT(!(arena->unscannedPages & bit));
        if (arena->unscannedPages != 0) {
            arena->unscannedPages |= bit;
            JS_ASSERT(arena->prevUnscanned);
            JS_ASSERT(rt->gcUnscannedArenaStackTop);
        } else {
            /*
             * The thing is the first unscanned thing in the whole arena, push
             * the arena on the stack of unscanned arenas unless the arena
             * has already been pushed. We detect that through prevUnscanned
             * field which is NULL only for not yet pushed arenas. To ensure
             * that prevUnscanned != NULL even when the stack contains one
             * element, we make prevUnscanned for the arena at the bottom
             * to point to itself.
             *
             * See comments in ScanDelayedChildren.
             */
            arena->unscannedPages = bit;
            if (!arena->prevUnscanned) {
                if (!rt->gcUnscannedArenaStackTop) {
                    /* Stack was empty, mark the arena as bottom element. */
                    arena->prevUnscanned = arena;
                } else {
                    JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned);
                    arena->prevUnscanned = rt->gcUnscannedArenaStackTop;
                }
                rt->gcUnscannedArenaStackTop = arena;
            }
         }
     }
    JS_ASSERT(rt->gcUnscannedArenaStackTop);
}

Here is the caller graph for this function:

static JSBool AddToPtrTable ( JSContext cx,
JSPtrTable table,
const JSPtrTableInfo info,
void ptr 
) [static]

Definition at line 298 of file jsgc.c.

{
    size_t count, capacity;
    void **array;

    count = table->count;
    capacity = PtrTableCapacity(count, info);

    if (count == capacity) {
        if (capacity < info->minCapacity) {
            JS_ASSERT(capacity == 0);
            JS_ASSERT(!table->array);
            capacity = info->minCapacity;
        } else {
            /*
             * Simplify the overflow detection assuming pointer is bigger
             * than byte.
             */
            JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
            capacity = (capacity < info->linearGrowthThreshold)
                       ? 2 * capacity
                       : capacity + info->linearGrowthThreshold;
            if (capacity > (size_t)-1 / sizeof table->array[0])
                goto bad;
        }
        array = (void **) realloc(table->array,
                                  capacity * sizeof table->array[0]);
        if (!array)
            goto bad;
#ifdef DEBUG
        memset(array + count, JS_FREE_PATTERN,
               (capacity - count) * sizeof table->array[0]);
#endif
        table->array = array;
    }

    table->array[count] = ptr;
    table->count = count + 1;

    return JS_TRUE;

  bad:
    JS_ReportOutOfMemory(cx);
    return JS_FALSE;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static JSBool CanScheduleCloseHook ( JSGenerator gen) [static]

Definition at line 1005 of file jsgc.c.

{
    JSObject *parent;
    JSBool canSchedule;

    /* Avoid OBJ_GET_PARENT overhead as we are in GC. */
    parent = JSVAL_TO_OBJECT(gen->obj->slots[JSSLOT_PARENT]);
    canSchedule = *js_GetGCThingFlags(parent) & GCF_MARK;
#ifdef DEBUG_igor
    if (!canSchedule) {
        fprintf(stderr, "GEN: Kill without schedule, gen=%p parent=%p\n",
                (void *)gen, (void *)parent);
    }
#endif
    return canSchedule;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void CloseIteratorStates ( JSContext cx) [static]

Definition at line 954 of file jsgc.c.

{
    JSRuntime *rt;
    size_t count, newCount, i;
    void **array;
    JSObject *obj;

    rt = cx->runtime;
    count = rt->gcIteratorTable.count;
    array = rt->gcIteratorTable.array;

    newCount = 0;
    for (i = 0; i != count; ++i) {
        obj = (JSObject *)array[i];
        if (js_IsAboutToBeFinalized(cx, obj))
            js_CloseIteratorState(cx, obj);
        else
            array[newCount++] = obj;
    }
    ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void DestroyGCArena ( JSRuntime rt,
JSGCArenaList arenaList,
JSGCArena **  ap 
) [static]

Definition at line 429 of file jsgc.c.

{
    JSGCArena *a;
    uint32 *bytesptr;

    a = *ap;
    JS_ASSERT(a);
    bytesptr = (arenaList == &rt->gcArenaList[0])
               ? &rt->gcBytes
               : &rt->gcPrivateBytes;
    JS_ASSERT(*bytesptr >= GC_ARENA_SIZE);
    *bytesptr -= GC_ARENA_SIZE;
    METER(rt->gcStats.afree++);
    METER(--arenaList->stats.narenas);
    if (a == arenaList->last)
        arenaList->lastLimit = (uint16)(a->prev ? GC_THINGS_SIZE : 0);
    *ap = a->prev;

#ifdef DEBUG
    memset(a, JS_FREE_PATTERN, GC_ARENA_SIZE);
#endif
    free(a);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void FindAndMarkObjectsToClose ( JSContext cx,
JSGCInvocationKind  gckind,
JSGenerator **  todoQueueTail 
) [static]

Definition at line 1075 of file jsgc.c.

{
    JSRuntime *rt;
    JSGenerator *todo, **genp, *gen;

    rt = cx->runtime;
    todo = NULL;
    genp = &rt->gcCloseState.reachableList;
    while ((gen = *genp) != NULL) {
        if (*js_GetGCThingFlags(gen->obj) & GCF_MARK) {
            genp = &gen->next;
        } else {
            /* Generator must not be executing when it becomes unreachable. */
            JS_ASSERT(gen->state == JSGEN_NEWBORN ||
                      gen->state == JSGEN_OPEN ||
                      gen->state == JSGEN_CLOSED);

            *genp = gen->next;
            if (gen->state == JSGEN_OPEN &&
                js_FindFinallyHandler(gen->frame.script, gen->frame.pc) &&
                CanScheduleCloseHook(gen)) {
                /*
                 * Generator yielded inside a try with a finally block.
                 * Schedule it for closing.
                 *
                 * We keep generators that yielded outside try-with-finally
                 * with gen->state == JSGEN_OPEN. The finalizer must deal with
                 * open generators as we may skip the close hooks, see below.
                 */
                gen->next = NULL;
                *todoQueueTail = gen;
                todoQueueTail = &gen->next;
                if (!todo)
                    todo = gen;
                METER(JS_ASSERT(rt->gcStats.nclose));
                METER(rt->gcStats.nclose--);
                METER(rt->gcStats.closelater++);
                METER(rt->gcStats.maxcloselater
                      = JS_MAX(rt->gcStats.maxcloselater,
                               rt->gcStats.closelater));
            }
        }
    }

    if (gckind == GC_LAST_CONTEXT) {
        /*
         * Remove scheduled hooks on shutdown as it is too late to run them:
         * we do not allow execution of arbitrary scripts at this point.
         */
        rt->gcCloseState.todoQueue = NULL;
    } else {
        /*
         * Mark just-found unreachable generators *after* we scan the global
         * list to prevent a generator that refers to other unreachable
         * generators from keeping them on gcCloseState.reachableList.
         */
        for (gen = todo; gen; gen = gen->next)
            GC_MARK(cx, gen->obj, "newly scheduled generator");
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void FinishGCArenaLists ( JSRuntime rt) [static]

Definition at line 472 of file jsgc.c.

{
    uintN i;
    JSGCArenaList *arenaList;

    for (i = 0; i < GC_NUM_FREELISTS; i++) {
        arenaList = &rt->gcArenaList[i];
        while (arenaList->last)
            DestroyGCArena(rt, arenaList, &arenaList->last);
        arenaList->freeList = NULL;
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void FreePtrTable ( JSPtrTable table,
const JSPtrTableInfo info 
) [static]

Definition at line 286 of file jsgc.c.

{
    if (table->array) {
        JS_ASSERT(table->count > 0);
        free(table->array);
        table->array = NULL;
        table->count = 0;
    }
    JS_ASSERT(table->count == 0);
}

Here is the caller graph for this function:

gc_lock_marker ( JSDHashTable table,
JSDHashEntryHdr hdr,
uint32  num,
void arg 
)

Definition at line 2522 of file jsgc.c.

{
    JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
    void *thing = (void *)lhe->thing;
    JSContext *cx = (JSContext *)arg;

    GC_MARK(cx, thing, "locked object");
    return JS_DHASH_NEXT;
}

Here is the caller graph for this function:

static void gc_mark_atom_key_thing ( void thing,
void arg 
) [static]

Definition at line 1970 of file jsgc.c.

{
    JSContext *cx = (JSContext *) arg;

    GC_MARK(cx, thing, "atom");
}

Here is the caller graph for this function:

gc_root_marker ( JSDHashTable table,
JSDHashEntryHdr hdr,
uint32  num,
void arg 
)

Definition at line 2478 of file jsgc.c.

{
    JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
    jsval *rp = (jsval *)rhe->root;
    jsval v = *rp;

    /* Ignore null object and scalar values. */
    if (!JSVAL_IS_NULL(v) && JSVAL_IS_GCTHING(v)) {
        JSContext *cx = (JSContext *)arg;
#ifdef DEBUG
        JSBool root_points_to_gcArenaList = JS_FALSE;
        jsuword thing = (jsuword) JSVAL_TO_GCTHING(v);
        uintN i;
        JSGCArenaList *arenaList;
        JSGCArena *a;
        size_t limit;

        for (i = 0; i < GC_NUM_FREELISTS; i++) {
            arenaList = &cx->runtime->gcArenaList[i];
            limit = arenaList->lastLimit;
            for (a = arenaList->last; a; a = a->prev) {
                if (thing - FIRST_THING_PAGE(a) < limit) {
                    root_points_to_gcArenaList = JS_TRUE;
                    break;
                }
                limit = GC_THINGS_SIZE;
            }
        }
        if (!root_points_to_gcArenaList && rhe->name) {
            fprintf(stderr,
"JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
"invalid jsval.  This is usually caused by a missing call to JS_RemoveRoot.\n"
"The root's name is \"%s\".\n",
                    rhe->name);
        }
        JS_ASSERT(root_points_to_gcArenaList);
#endif

        GC_MARK(cx, JSVAL_TO_GCTHING(v), rhe->name ? rhe->name : "root");
    }
    return JS_DHASH_NEXT;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void InitGCArenaLists ( JSRuntime rt) [static]

Definition at line 454 of file jsgc.c.

{
    uintN i, thingSize;
    JSGCArenaList *arenaList;

    for (i = 0; i < GC_NUM_FREELISTS; i++) {
        arenaList = &rt->gcArenaList[i];
        thingSize = GC_FREELIST_NBYTES(i);
        JS_ASSERT((size_t)(uint16)thingSize == thingSize);
        arenaList->last = NULL;
        arenaList->lastLimit = 0;
        arenaList->thingSize = (uint16)thingSize;
        arenaList->freeList = NULL;
        METER(memset(&arenaList->stats, 0, sizeof arenaList->stats));
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

JSBool js_AddRoot ( JSContext cx,
void rp,
const char *  name 
)

Definition at line 751 of file jsgc.c.

{
    JSBool ok = js_AddRootRT(cx->runtime, rp, name);
    if (!ok)
        JS_ReportOutOfMemory(cx);
    return ok;
}

Here is the call graph for this function:

Here is the caller graph for this function:

JSBool js_AddRootRT ( JSRuntime rt,
void rp,
const char *  name 
)

Definition at line 760 of file jsgc.c.

{
    JSBool ok;
    JSGCRootHashEntry *rhe;

    /*
     * Due to the long-standing, but now removed, use of rt->gcLock across the
     * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
     * properly with a racing GC, without calling JS_AddRoot from a request.
     * We have to preserve API compatibility here, now that we avoid holding
     * rt->gcLock across the mark phase (including the root hashtable mark).
     *
     * If the GC is running and we're called on another thread, wait for this
     * GC activation to finish.  We can safely wait here (in the case where we
     * are called within a request on another thread's context) without fear
     * of deadlock because the GC doesn't set rt->gcRunning until after it has
     * waited for all active requests to end.
     */
    JS_LOCK_GC(rt);
#ifdef JS_THREADSAFE
    JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
    if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
        do {
            JS_AWAIT_GC_DONE(rt);
        } while (rt->gcLevel > 0);
    }
#endif
    rhe = (JSGCRootHashEntry *) JS_DHashTableOperate(&rt->gcRootsHash, rp,
                                                     JS_DHASH_ADD);
    if (rhe) {
        rhe->root = rp;
        rhe->name = name;
        ok = JS_TRUE;
    } else {
        ok = JS_FALSE;
    }
    JS_UNLOCK_GC(rt);
    return ok;
}

Here is the caller graph for this function:

intN js_ChangeExternalStringFinalizer ( JSStringFinalizeOp  oldop,
JSStringFinalizeOp  newop 
)

Definition at line 580 of file jsgc.c.

{
    uintN i;

    for (i = GCX_EXTERNAL_STRING; i < GCX_NTYPES; i++) {
        if (gc_finalizers[i] == (GCFinalizeOp) oldop) {
            gc_finalizers[i] = (GCFinalizeOp) newop;
            return (intN) i;
        }
    }
    return -1;
}

Here is the caller graph for this function:

Definition at line 720 of file jsgc.c.

{
#ifdef JS_ARENAMETER
    JS_DumpArenaStats(stdout);
#endif
#ifdef JS_GCMETER
    js_DumpGCStats(rt, stdout);
#endif

    FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
#if JS_HAS_GENERATORS
    rt->gcCloseState.reachableList = NULL;
    METER(rt->gcStats.nclose = 0);
    rt->gcCloseState.todoQueue = NULL;
#endif
    FinishGCArenaLists(rt);

    if (rt->gcRootsHash.ops) {
#ifdef DEBUG
        CheckLeakedRoots(rt);
#endif
        JS_DHashTableFinish(&rt->gcRootsHash);
        rt->gcRootsHash.ops = NULL;
    }
    if (rt->gcLocksHash) {
        JS_DHashTableDestroy(rt->gcLocksHash);
        rt->gcLocksHash = NULL;
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

void js_GC ( JSContext cx,
JSGCInvocationKind  gckind 
)

Definition at line 2645 of file jsgc.c.

{
    JSRuntime *rt;
    JSBool keepAtoms;
    uintN i, type;
    JSContext *iter, *acx;
#if JS_HAS_GENERATORS
    JSGenerator **genTodoTail;
#endif
    JSStackFrame *fp, *chain;
    JSStackHeader *sh;
    JSTempValueRooter *tvr;
    size_t nbytes, limit, offset;
    JSGCArena *a, **ap;
    uint8 flags, *flagp, *firstPage;
    JSGCThing *thing, *freeList;
    JSGCArenaList *arenaList;
    GCFinalizeOp finalizer;
    JSBool allClear;
#ifdef JS_THREADSAFE
    uint32 requestDebit;
#endif

    rt = cx->runtime;
#ifdef JS_THREADSAFE
    /* Avoid deadlock. */
    JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
#endif

    if (gckind == GC_LAST_DITCH) {
        /* The last ditch GC preserves all atoms and weak roots. */
        keepAtoms = JS_TRUE;
    } else {
        JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
        rt->gcPoke = JS_TRUE;

        /* Keep atoms when a suspended compile is running on another context. */
        keepAtoms = (rt->gcKeepAtoms != 0);
    }

    /*
     * Don't collect garbage if the runtime isn't up, and cx is not the last
     * context in the runtime.  The last context must force a GC, and nothing
     * should suppress that final collection or there may be shutdown leaks,
     * or runtime bloat until the next context is created.
     */
    if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
        return;

  restart_after_callback:
    /*
     * Let the API user decide to defer a GC if it wants to (unless this
     * is the last context).  Invoke the callback regardless.
     */
    if (rt->gcCallback &&
        !rt->gcCallback(cx, JSGC_BEGIN) &&
        gckind != GC_LAST_CONTEXT) {
        return;
    }

    /* Lock out other GC allocator and collector invocations. */
    if (gckind != GC_LAST_DITCH)
        JS_LOCK_GC(rt);

    /* Do nothing if no mutator has executed since the last GC. */
    if (!rt->gcPoke) {
        METER(rt->gcStats.nopoke++);
        if (gckind != GC_LAST_DITCH)
            JS_UNLOCK_GC(rt);
        return;
    }
    METER(rt->gcStats.poke++);
    rt->gcPoke = JS_FALSE;

#ifdef JS_THREADSAFE
    JS_ASSERT(cx->thread->id == js_CurrentThreadId());

    /* Bump gcLevel and return rather than nest on this thread. */
    if (rt->gcThread == cx->thread) {
        JS_ASSERT(rt->gcLevel > 0);
        rt->gcLevel++;
        METER(if (rt->gcLevel > rt->gcStats.maxlevel)
                  rt->gcStats.maxlevel = rt->gcLevel);
        if (gckind != GC_LAST_DITCH)
            JS_UNLOCK_GC(rt);
        return;
    }

    /*
     * If we're in one or more requests (possibly on more than one context)
     * running on the current thread, indicate, temporarily, that all these
     * requests are inactive.  If cx->thread is NULL, then cx is not using
     * the request model, and does not contribute to rt->requestCount.
     */
    requestDebit = 0;
    if (cx->thread) {
        JSCList *head, *link;

        /*
         * Check all contexts on cx->thread->contextList for active requests,
         * counting each such context against requestDebit.
         */
        head = &cx->thread->contextList;
        for (link = head->next; link != head; link = link->next) {
            acx = CX_FROM_THREAD_LINKS(link);
            JS_ASSERT(acx->thread == cx->thread);
            if (acx->requestDepth)
                requestDebit++;
        }
    } else {
        /*
         * We assert, but check anyway, in case someone is misusing the API.
         * Avoiding the loop over all of rt's contexts is a win in the event
         * that the GC runs only on request-less contexts with null threads,
         * in a special thread such as might be used by the UI/DOM/Layout
         * "mozilla" or "main" thread in Mozilla-the-browser.
         */
        JS_ASSERT(cx->requestDepth == 0);
        if (cx->requestDepth)
            requestDebit = 1;
    }
    if (requestDebit) {
        JS_ASSERT(requestDebit <= rt->requestCount);
        rt->requestCount -= requestDebit;
        if (rt->requestCount == 0)
            JS_NOTIFY_REQUEST_DONE(rt);
    }

    /* If another thread is already in GC, don't attempt GC; wait instead. */
    if (rt->gcLevel > 0) {
        /* Bump gcLevel to restart the current GC, so it finds new garbage. */
        rt->gcLevel++;
        METER(if (rt->gcLevel > rt->gcStats.maxlevel)
                  rt->gcStats.maxlevel = rt->gcLevel);

        /* Wait for the other thread to finish, then resume our request. */
        while (rt->gcLevel > 0)
            JS_AWAIT_GC_DONE(rt);
        if (requestDebit)
            rt->requestCount += requestDebit;
        if (gckind != GC_LAST_DITCH)
            JS_UNLOCK_GC(rt);
        return;
    }

    /* No other thread is in GC, so indicate that we're now in GC. */
    rt->gcLevel = 1;
    rt->gcThread = cx->thread;

    /* Wait for all other requests to finish. */
    while (rt->requestCount > 0)
        JS_AWAIT_REQUEST_DONE(rt);

#else  /* !JS_THREADSAFE */

    /* Bump gcLevel and return rather than nest; the outer gc will restart. */
    rt->gcLevel++;
    METER(if (rt->gcLevel > rt->gcStats.maxlevel)
              rt->gcStats.maxlevel = rt->gcLevel);
    if (rt->gcLevel > 1)
        return;

#endif /* !JS_THREADSAFE */

    /*
     * Set rt->gcRunning here within the GC lock, and after waiting for any
     * active requests to end, so that new requests that try to JS_AddRoot,
     * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
     * rt->gcLevel to drop to zero, while request-less calls to the *Root*
     * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
     * waiting for GC to finish.
     */
    rt->gcRunning = JS_TRUE;
    JS_UNLOCK_GC(rt);

    /* Reset malloc counter. */
    rt->gcMallocBytes = 0;

    /* Drop atoms held by the property cache, and clear property weak links. */
    js_DisablePropertyCache(cx);
    js_FlushPropertyCache(cx);
#ifdef DEBUG_scopemeters
  { extern void js_DumpScopeMeters(JSRuntime *rt);
    js_DumpScopeMeters(rt);
  }
#endif

#ifdef JS_THREADSAFE
    /*
     * Set all thread local freelists to NULL. We may visit a thread's
     * freelist more than once. To avoid redundant clearing we unroll the
     * current thread's step.
     *
     * Also, in case a JSScript wrapped within an object was finalized, we
     * null acx->thread->gsnCache.script and finish the cache's hashtable.
     * Note that js_DestroyScript, called from script_finalize, will have
     * already cleared cx->thread->gsnCache above during finalization, so we
     * don't have to here.
     */
    memset(cx->thread->gcFreeLists, 0, sizeof cx->thread->gcFreeLists);
    iter = NULL;
    while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
        if (!acx->thread || acx->thread == cx->thread)
            continue;
        memset(acx->thread->gcFreeLists, 0, sizeof acx->thread->gcFreeLists);
        GSN_CACHE_CLEAR(&acx->thread->gsnCache);
    }
#else
    /* The thread-unsafe case just has to clear the runtime's GSN cache. */
    GSN_CACHE_CLEAR(&rt->gsnCache);
#endif

restart:
    rt->gcNumber++;
    JS_ASSERT(!rt->gcUnscannedArenaStackTop);
    JS_ASSERT(rt->gcUnscannedBagSize == 0);

    /*
     * Mark phase.
     */
    JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_marker, cx);
    if (rt->gcLocksHash)
        JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_marker, cx);
    js_MarkAtomState(&rt->atomState, keepAtoms, gc_mark_atom_key_thing, cx);
    js_MarkWatchPoints(cx);
    js_MarkScriptFilenames(rt, keepAtoms);
    js_MarkNativeIteratorStates(cx);

#if JS_HAS_GENERATORS
    genTodoTail = MarkScheduledGenerators(cx);
    JS_ASSERT(!*genTodoTail);
#endif

    iter = NULL;
    while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL) {
        /*
         * Iterate frame chain and dormant chains. Temporarily tack current
         * frame onto the head of the dormant list to ease iteration.
         *
         * (NB: see comment on this whole "dormant" thing in js_Execute.)
         */
        chain = acx->fp;
        if (chain) {
            JS_ASSERT(!chain->dormantNext);
            chain->dormantNext = acx->dormantFrameChain;
        } else {
            chain = acx->dormantFrameChain;
        }

        for (fp = chain; fp; fp = chain = chain->dormantNext) {
            do {
                js_MarkStackFrame(cx, fp);
            } while ((fp = fp->down) != NULL);
        }

        /* Cleanup temporary "dormant" linkage. */
        if (acx->fp)
            acx->fp->dormantNext = NULL;

        /* Mark other roots-by-definition in acx. */
        GC_MARK(cx, acx->globalObject, "global object");
        MarkWeakRoots(cx, &acx->weakRoots);
        if (acx->throwing) {
            if (JSVAL_IS_GCTHING(acx->exception))
                GC_MARK(cx, JSVAL_TO_GCTHING(acx->exception), "exception");
        } else {
            /* Avoid keeping GC-ed junk stored in JSContext.exception. */
            acx->exception = JSVAL_NULL;
        }
#if JS_HAS_LVALUE_RETURN
        if (acx->rval2set && JSVAL_IS_GCTHING(acx->rval2))
            GC_MARK(cx, JSVAL_TO_GCTHING(acx->rval2), "rval2");
#endif

        for (sh = acx->stackHeaders; sh; sh = sh->down) {
            METER(rt->gcStats.stackseg++);
            METER(rt->gcStats.segslots += sh->nslots);
            GC_MARK_JSVALS(cx, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
        }

        if (acx->localRootStack)
            js_MarkLocalRoots(cx, acx->localRootStack);

        for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
            switch (tvr->count) {
              case JSTVU_SINGLE:
                if (JSVAL_IS_GCTHING(tvr->u.value)) {
                    GC_MARK(cx, JSVAL_TO_GCTHING(tvr->u.value),
                            "tvr->u.value");
                }
                break;
              case JSTVU_MARKER:
                tvr->u.marker(cx, tvr);
                break;
              case JSTVU_SPROP:
                MARK_SCOPE_PROPERTY(cx, tvr->u.sprop);
                break;
              case JSTVU_WEAK_ROOTS:
                MarkWeakRoots(cx, tvr->u.weakRoots);
                break;
              case JSTVU_SCRIPT:
                js_MarkScript(cx, tvr->u.script);
                break;
              default:
                JS_ASSERT(tvr->count >= 0);
                GC_MARK_JSVALS(cx, tvr->count, tvr->u.array, "tvr->u.array");
            }
        }

        if (acx->sharpObjectMap.depth > 0)
            js_GCMarkSharpMap(cx, &acx->sharpObjectMap);
    }

#ifdef DUMP_CALL_TABLE
    js_DumpCallTable(cx);
#endif

    /*
     * Mark children of things that caused too deep recursion during above
     * marking phase.
     */
    ScanDelayedChildren(cx);

#if JS_HAS_GENERATORS
    /*
     * Close phase: search and mark part. See comments in
     * FindAndMarkObjectsToClose for details.
     */
    FindAndMarkObjectsToClose(cx, gckind, genTodoTail);

    /*
     * Mark children of things that caused too deep recursion during the
     * just-completed marking part of the close phase.
     */
    ScanDelayedChildren(cx);
#endif

    JS_ASSERT(!cx->insideGCMarkCallback);
    if (rt->gcCallback) {
        cx->insideGCMarkCallback = JS_TRUE;
        (void) rt->gcCallback(cx, JSGC_MARK_END);
        JS_ASSERT(cx->insideGCMarkCallback);
        cx->insideGCMarkCallback = JS_FALSE;
    }
    JS_ASSERT(rt->gcUnscannedBagSize == 0);

    /* Finalize iterator states before the objects they iterate over. */
    CloseIteratorStates(cx);

    /*
     * Sweep phase.
     *
     * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
     * so that any attempt to allocate a GC-thing from a finalizer will fail,
     * rather than nest badly and leave the unmarked newborn to be swept.
     *
     * Finalize smaller objects before larger, to guarantee finalization of
     * GC-allocated obj->slots after obj.  See FreeSlots in jsobj.c.
     */
    for (i = 0; i < GC_NUM_FREELISTS; i++) {
        arenaList = &rt->gcArenaList[i];
        nbytes = GC_FREELIST_NBYTES(i);
        limit = arenaList->lastLimit;
        for (a = arenaList->last; a; a = a->prev) {
            JS_ASSERT(!a->prevUnscanned);
            JS_ASSERT(a->unscannedPages == 0);
            firstPage = (uint8 *) FIRST_THING_PAGE(a);
            for (offset = 0; offset != limit; offset += nbytes) {
                if ((offset & GC_PAGE_MASK) == 0) {
                    JS_ASSERT(((JSGCPageInfo *)(firstPage + offset))->
                              unscannedBitmap == 0);
                    offset += PAGE_THING_GAP(nbytes);
                }
                JS_ASSERT(offset < limit);
                flagp = a->base + offset / sizeof(JSGCThing);
                if (flagp >= firstPage)
                    flagp += GC_THINGS_SIZE;
                flags = *flagp;
                if (flags & GCF_MARK) {
                    *flagp &= ~GCF_MARK;
                } else if (!(flags & (GCF_LOCK | GCF_FINAL))) {
                    /* Call the finalizer with GCF_FINAL ORed into flags. */
                    type = flags & GCF_TYPEMASK;
                    finalizer = gc_finalizers[type];
                    if (finalizer) {
                        thing = (JSGCThing *)(firstPage + offset);
                        *flagp = (uint8)(flags | GCF_FINAL);
                        if (type >= GCX_EXTERNAL_STRING)
                            js_PurgeDeflatedStringCache(rt, (JSString *)thing);
                        finalizer(cx, thing);
                    }

                    /* Set flags to GCF_FINAL, signifying that thing is free. */
                    *flagp = GCF_FINAL;
                }
            }
            limit = GC_THINGS_SIZE;
        }
    }

    /*
     * Sweep the runtime's property tree after finalizing objects, in case any
     * had watchpoints referencing tree nodes.  Then sweep atoms, which may be
     * referenced from dead property ids.
     */
    js_SweepScopeProperties(rt);
    js_SweepAtomState(&rt->atomState);

    /*
     * Sweep script filenames after sweeping functions in the generic loop
     * above. In this way when a scripted function's finalizer destroys the
     * script and calls rt->destroyScriptHook, the hook can still access the
     * script's filename. See bug 323267.
     */
    js_SweepScriptFilenames(rt);

    /*
     * Free phase.
     * Free any unused arenas and rebuild the JSGCThing freelist.
     */
    for (i = 0; i < GC_NUM_FREELISTS; i++) {
        arenaList = &rt->gcArenaList[i];
        ap = &arenaList->last;
        a = *ap;
        if (!a)
            continue;

        allClear = JS_TRUE;
        arenaList->freeList = NULL;
        freeList = NULL;
        METER(arenaList->stats.nthings = 0);
        METER(arenaList->stats.freelen = 0);

        nbytes = GC_FREELIST_NBYTES(i);
        limit = arenaList->lastLimit;
        do {
            METER(size_t nfree = 0);
            firstPage = (uint8 *) FIRST_THING_PAGE(a);
            for (offset = 0; offset != limit; offset += nbytes) {
                if ((offset & GC_PAGE_MASK) == 0)
                    offset += PAGE_THING_GAP(nbytes);
                JS_ASSERT(offset < limit);
                flagp = a->base + offset / sizeof(JSGCThing);
                if (flagp >= firstPage)
                    flagp += GC_THINGS_SIZE;

                if (*flagp != GCF_FINAL) {
                    allClear = JS_FALSE;
                    METER(++arenaList->stats.nthings);
                } else {
                    thing = (JSGCThing *)(firstPage + offset);
                    thing->flagp = flagp;
                    thing->next = freeList;
                    freeList = thing;
                    METER(++nfree);
                }
            }
            if (allClear) {
                /*
                 * Forget just assembled free list head for the arena
                 * and destroy the arena itself.
                 */
                freeList = arenaList->freeList;
                DestroyGCArena(rt, arenaList, ap);
            } else {
                allClear = JS_TRUE;
                arenaList->freeList = freeList;
                ap = &a->prev;
                METER(arenaList->stats.freelen += nfree);
                METER(arenaList->stats.totalfreelen += nfree);
                METER(++arenaList->stats.totalarenas);
            }
            limit = GC_THINGS_SIZE;
        } while ((a = *ap) != NULL);
    }

    if (rt->gcCallback)
        (void) rt->gcCallback(cx, JSGC_FINALIZE_END);
#ifdef DEBUG_srcnotesize
  { extern void DumpSrcNoteSizeHist();
    DumpSrcNoteSizeHist();
    printf("GC HEAP SIZE %lu (%lu)\n",
           (unsigned long)rt->gcBytes, (unsigned long)rt->gcPrivateBytes);
  }
#endif

    JS_LOCK_GC(rt);

    /*
     * We want to restart GC if js_GC was called recursively or if any of the
     * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
     */
    if (rt->gcLevel > 1 || rt->gcPoke) {
        rt->gcLevel = 1;
        rt->gcPoke = JS_FALSE;
        JS_UNLOCK_GC(rt);
        goto restart;
    }
    js_EnablePropertyCache(cx);
    rt->gcLevel = 0;
    rt->gcLastBytes = rt->gcBytes;
    rt->gcRunning = JS_FALSE;

#ifdef JS_THREADSAFE
    /* If we were invoked during a request, pay back the temporary debit. */
    if (requestDebit)
        rt->requestCount += requestDebit;
    rt->gcThread = NULL;
    JS_NOTIFY_GC_DONE(rt);

    /*
     * Unlock unless we have GC_LAST_DITCH which requires locked GC on return.
     */
    if (gckind != GC_LAST_DITCH)
        JS_UNLOCK_GC(rt);
#endif

    /* Execute JSGC_END callback outside the lock. */
    if (rt->gcCallback) {
        JSWeakRoots savedWeakRoots;
        JSTempValueRooter tvr;

        if (gckind == GC_LAST_DITCH) {
            /*
             * We allow JSGC_END implementation to force a full GC or allocate
             * new GC things. Thus we must protect the weak roots from GC or
             * overwrites.
             */
            savedWeakRoots = cx->weakRoots;
            JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr);
            JS_KEEP_ATOMS(rt);
            JS_UNLOCK_GC(rt);
        }

        (void) rt->gcCallback(cx, JSGC_END);

        if (gckind == GC_LAST_DITCH) {
            JS_LOCK_GC(rt);
            JS_UNKEEP_ATOMS(rt);
            JS_POP_TEMP_ROOT(cx, &tvr);
        } else if (gckind == GC_LAST_CONTEXT && rt->gcPoke) {
            /*
             * On shutdown iterate until JSGC_END callback stops creating
             * garbage.
             */
            goto restart_after_callback;
        }
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

js_gcroot_mapper ( JSDHashTable table,
JSDHashEntryHdr hdr,
uint32  number,
void arg 
)

Definition at line 899 of file jsgc.c.

{
    GCRootMapArgs *args = (GCRootMapArgs *) arg;
    JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
    intN mapflags;
    JSDHashOperator op;

    mapflags = args->map(rhe->root, rhe->name, args->data);

#if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT &&                                     \
    JS_MAP_GCROOT_STOP == JS_DHASH_STOP &&                                     \
    JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
    op = (JSDHashOperator)mapflags;
#else
    op = JS_DHASH_NEXT;
    if (mapflags & JS_MAP_GCROOT_STOP)
        op |= JS_DHASH_STOP;
    if (mapflags & JS_MAP_GCROOT_REMOVE)
        op |= JS_DHASH_REMOVE;
#endif

    return op;
}

Here is the caller graph for this function:

Definition at line 503 of file jsgc.c.

{
    JSGCPageInfo *pi;
    JSGCArenaList *list;

    pi = THING_TO_PAGE(str);
    list = PAGE_TO_ARENA(pi)->list;

    JS_ASSERT(list->thingSize == sizeof(JSGCThing));
    JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);

    return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
}

Here is the call graph for this function:

Here is the caller graph for this function:

uint8* js_GetGCThingFlags ( void thing)

Definition at line 486 of file jsgc.c.

{
    JSGCPageInfo *pi;
    jsuword offsetInArena, thingIndex;

    pi = THING_TO_PAGE(thing);
    offsetInArena = pi->offsetInArena;
    JS_ASSERT(offsetInArena < GC_THINGS_SIZE);
    thingIndex = ((offsetInArena & ~GC_PAGE_MASK) |
                  ((jsuword)thing & GC_PAGE_MASK)) / sizeof(JSGCThing);
    JS_ASSERT(thingIndex < GC_PAGE_SIZE);
    if (thingIndex >= (offsetInArena & GC_PAGE_MASK))
        thingIndex += GC_THINGS_SIZE;
    return (uint8 *)pi - offsetInArena + thingIndex;
}

Here is the caller graph for this function:

JSBool js_InitGC ( JSRuntime rt,
uint32  maxbytes 
)

Definition at line 606 of file jsgc.c.

{
    InitGCArenaLists(rt);
    if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
                           sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
        rt->gcRootsHash.ops = NULL;
        return JS_FALSE;
    }
    rt->gcLocksHash = NULL;     /* create lazily */

    /*
     * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
     * for default backward API compatibility.
     */
    rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;

    return JS_TRUE;
}

Here is the call graph for this function:

Here is the caller graph for this function:

JSBool js_IsAboutToBeFinalized ( JSContext cx,
void thing 
)

Definition at line 518 of file jsgc.c.

{
    uint8 flags = *js_GetGCThingFlags(thing);

    return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
}

Here is the call graph for this function:

Here is the caller graph for this function:

JSBool js_LockGCThing ( JSContext cx,
void thing 
)

Definition at line 1593 of file jsgc.c.

{
    JSBool ok = js_LockGCThingRT(cx->runtime, thing);
    if (!ok)
        JS_ReportOutOfMemory(cx);
    return ok;
}

Here is the call graph for this function:

Here is the caller graph for this function:

JSBool js_LockGCThingRT ( JSRuntime rt,
void thing 
)

Definition at line 1627 of file jsgc.c.

{
    JSBool ok, deep;
    uint8 *flagp;
    uintN flags, lock, type;
    JSGCLockHashEntry *lhe;

    ok = JS_TRUE;
    if (!thing)
        return ok;

    flagp = js_GetGCThingFlags(thing);

    JS_LOCK_GC(rt);
    flags = *flagp;
    lock = (flags & GCF_LOCK);
    type = (flags & GCF_TYPEMASK);
    deep = GC_THING_IS_DEEP(type, thing);

    /*
     * Avoid adding a rt->gcLocksHash entry for shallow things until someone
     * nests a lock -- then start such an entry with a count of 2, not 1.
     */
    if (lock || deep) {
        if (!rt->gcLocksHash) {
            rt->gcLocksHash =
                JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
                                 sizeof(JSGCLockHashEntry),
                                 GC_ROOTS_SIZE);
            if (!rt->gcLocksHash) {
                ok = JS_FALSE;
                goto done;
            }
        } else if (lock == 0) {
#ifdef DEBUG
            JSDHashEntryHdr *hdr =
                JS_DHashTableOperate(rt->gcLocksHash, thing,
                                     JS_DHASH_LOOKUP);
            JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(hdr));
#endif
        }

        lhe = (JSGCLockHashEntry *)
            JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
        if (!lhe) {
            ok = JS_FALSE;
            goto done;
        }
        if (!lhe->thing) {
            lhe->thing = thing;
            lhe->count = deep ? 1 : 2;
        } else {
            JS_ASSERT(lhe->count >= 1);
            lhe->count++;
        }
    }

    *flagp = (uint8)(flags | GCF_LOCK);
    METER(rt->gcStats.lock++);
    ok = JS_TRUE;
done:
    JS_UNLOCK_GC(rt);
    return ok;
}

Here is the call graph for this function:

Here is the caller graph for this function:

uint32 js_MapGCRoots ( JSRuntime rt,
JSGCRootMapFun  map,
void data 
)

Definition at line 925 of file jsgc.c.

Here is the call graph for this function:

Here is the caller graph for this function:

void js_MarkAtom ( JSContext cx,
JSAtom atom 
)

Definition at line 1978 of file jsgc.c.

{
    jsval key;

    if (atom->flags & ATOM_MARK)
        return;
    atom->flags |= ATOM_MARK;
    key = ATOM_KEY(atom);
    if (JSVAL_IS_GCTHING(key)) {
#ifdef GC_MARK_DEBUG
        char name[32];

        if (JSVAL_IS_STRING(key)) {
            JS_snprintf(name, sizeof name, "'%s'",
                        JS_GetStringBytes(JSVAL_TO_STRING(key)));
        } else {
            JS_snprintf(name, sizeof name, "<%x>", key);
        }
#endif
        GC_MARK(cx, JSVAL_TO_GCTHING(key), name);
    }
    if (atom->flags & ATOM_HIDDEN)
        js_MarkAtom(cx, atom->entry.value);
}

Here is the call graph for this function:

Here is the caller graph for this function:

void js_MarkGCThing ( JSContext cx,
void thing 
)

Definition at line 2438 of file jsgc.c.

{
    uint8 *flagp;

    if (!thing)
        return;

    flagp = js_GetGCThingFlags(thing);
    JS_ASSERT(*flagp != GCF_FINAL);
    if (*flagp & GCF_MARK)
        return;
    *flagp |= GCF_MARK;

    if (!cx->insideGCMarkCallback) {
        MarkGCThingChildren(cx, thing, flagp, JS_TRUE);
    } else {
        /*
         * For API compatibility we allow for the callback to assume that
         * after it calls js_MarkGCThing for the last time, the callback
         * can start to finalize its own objects that are only referenced
         * by unmarked GC things.
         *
         * Since we do not know which call from inside the callback is the
         * last, we ensure that the unscanned bag is always empty when we
         * return to the callback and all marked things are scanned.
         *
         * As an optimization we do not check for the stack size here and
         * pass JS_FALSE as the last argument to MarkGCThingChildren.
         * Otherwise with low C stack the thing would be pushed to the bag
         * just to be feed to MarkGCThingChildren from inside
         * ScanDelayedChildren.
         */
        cx->insideGCMarkCallback = JS_FALSE;
        MarkGCThingChildren(cx, thing, flagp, JS_FALSE);
        ScanDelayedChildren(cx);
        cx->insideGCMarkCallback = JS_TRUE;
    }
}

Here is the call graph for this function:

Definition at line 2544 of file jsgc.c.

{
    uintN depth, nslots;

    if (fp->callobj)
        GC_MARK(cx, fp->callobj, "call object");
    if (fp->argsobj)
        GC_MARK(cx, fp->argsobj, "arguments object");
    if (fp->varobj)
        GC_MARK(cx, fp->varobj, "variables object");
    if (fp->script) {
        js_MarkScript(cx, fp->script);
        if (fp->spbase) {
            /*
             * Don't mark what has not been pushed yet, or what has been
             * popped already.
             */
            depth = fp->script->depth;
            nslots = (JS_UPTRDIFF(fp->sp, fp->spbase)
                      < depth * sizeof(jsval))
                     ? (uintN)(fp->sp - fp->spbase)
                     : depth;
            GC_MARK_JSVALS(cx, nslots, fp->spbase, "operand");
        }
    }

    /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
    JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) ||
              (fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags)));
    if (JSVAL_IS_GCTHING((jsval)fp->thisp))
        GC_MARK(cx, JSVAL_TO_GCTHING((jsval)fp->thisp), "this");

    if (fp->callee)
        GC_MARK(cx, fp->callee, "callee object");

    /*
     * Mark fp->argv, even though in the common case it will be marked via our
     * caller's frame, or via a JSStackHeader if fp was pushed by an external
     * invocation.
     *
     * The hard case is when there is not enough contiguous space in the stack
     * arena for actual, missing formal, and local root (JSFunctionSpec.extra)
     * slots.  In this case, fp->argv points to new space in a new arena, and
     * marking the caller's operand stack, or an external caller's allocated
     * stack tracked by a JSStackHeader, will not mark all the values stored
     * and addressable via fp->argv.
     *
     * So in summary, solely for the hard case of moving argv due to missing
     * formals and extra roots, we must mark actuals, missing formals, and any
     * local roots arrayed at fp->argv here.
     *
     * It would be good to avoid redundant marking of the same reference, in
     * the case where fp->argv does point into caller-allocated space tracked
     * by fp->down->spbase or cx->stackHeaders.  This would allow callbacks
     * such as the forthcoming rt->gcThingCallback (bug 333078) to compute JS
     * reference counts.  So this comment deserves a FIXME bug to cite.
     */
    if (fp->argv) {
        nslots = fp->argc;
        if (fp->fun) {
            if (fp->fun->nargs > nslots)
                nslots = fp->fun->nargs;
            if (!FUN_INTERPRETED(fp->fun))
                nslots += fp->fun->u.n.extra;
        }
        GC_MARK_JSVALS(cx, nslots + 2, fp->argv - 2, "arg");
    }
    if (JSVAL_IS_GCTHING(fp->rval))
        GC_MARK(cx, JSVAL_TO_GCTHING(fp->rval), "rval");
    if (fp->vars)
        GC_MARK_JSVALS(cx, fp->nvars, fp->vars, "var");
    GC_MARK(cx, fp->scopeChain, "scope chain");
    if (fp->sharpArray)
        GC_MARK(cx, fp->sharpArray, "sharp array");

    if (fp->xmlNamespace)
        GC_MARK(cx, fp->xmlNamespace, "xmlNamespace");
}

Here is the call graph for this function:

Here is the caller graph for this function:

void* js_NewGCThing ( JSContext cx,
uintN  flags,
size_t  nbytes 
)

Definition at line 1341 of file jsgc.c.

{
    JSRuntime *rt;
    uintN flindex;
    JSBool doGC;
    JSGCThing *thing;
    uint8 *flagp, *firstPage;
    JSGCArenaList *arenaList;
    jsuword offset;
    JSGCArena *a;
    JSLocalRootStack *lrs;
#ifdef JS_THREADSAFE
    JSBool gcLocked;
    uintN localMallocBytes;
    JSGCThing **flbase, **lastptr;
    JSGCThing *tmpthing;
    uint8 *tmpflagp;
    uintN maxFreeThings;         /* max to take from the global free list */
    METER(size_t nfree);
#endif

    rt = cx->runtime;
    METER(rt->gcStats.alloc++);        /* this is not thread-safe */
    nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing));
    flindex = GC_FREELIST_INDEX(nbytes);

#ifdef JS_THREADSAFE
    gcLocked = JS_FALSE;
    JS_ASSERT(cx->thread);
    flbase = cx->thread->gcFreeLists;
    JS_ASSERT(flbase);
    thing = flbase[flindex];
    localMallocBytes = cx->thread->gcMallocBytes;
    if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
        flagp = thing->flagp;
        flbase[flindex] = thing->next;
        METER(rt->gcStats.localalloc++);  /* this is not thread-safe */
        goto success;
    }

    JS_LOCK_GC(rt);
    gcLocked = JS_TRUE;

    /* Transfer thread-local counter to global one. */
    if (localMallocBytes != 0) {
        cx->thread->gcMallocBytes = 0;
        if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
            rt->gcMallocBytes = rt->gcMaxMallocBytes;
        else
            rt->gcMallocBytes += localMallocBytes;
    }
#endif
    JS_ASSERT(!rt->gcRunning);
    if (rt->gcRunning) {
        METER(rt->gcStats.finalfail++);
        JS_UNLOCK_GC(rt);
        return NULL;
    }

    doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes);
#ifdef JS_GC_ZEAL
    if (rt->gcZeal >= 1) {
        doGC = JS_TRUE;
        if (rt->gcZeal >= 2)
            rt->gcPoke = JS_TRUE;
    }
#endif /* !JS_GC_ZEAL */

    arenaList = &rt->gcArenaList[flindex];
    for (;;) {
        if (doGC) {
            /*
             * Keep rt->gcLock across the call into js_GC so we don't starve
             * and lose to racing threads who deplete the heap just after
             * js_GC has replenished it (or has synchronized with a racing
             * GC that collected a bunch of garbage).  This unfair scheduling
             * can happen on certain operating systems. For the gory details,
             * see bug 162779 at https://bugzilla.mozilla.org/.
             */
            js_GC(cx, GC_LAST_DITCH);
            METER(rt->gcStats.retry++);
        }

        /* Try to get thing from the free list. */
        thing = arenaList->freeList;
        if (thing) {
            arenaList->freeList = thing->next;
            flagp = thing->flagp;
            JS_ASSERT(*flagp & GCF_FINAL);
            METER(arenaList->stats.freelen--);
            METER(arenaList->stats.recycle++);

#ifdef JS_THREADSAFE
            /*
             * Refill the local free list by taking several things from the
             * global free list unless we are still at rt->gcMaxMallocBytes
             * barrier or the free list is already populated. The former
             * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
             * or no gcPoke. The latter is caused via allocating new things
             * in gcCallback(cx, JSGC_END).
             */
            if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
                break;
            tmpthing = arenaList->freeList;
            if (tmpthing) {
                maxFreeThings = MAX_THREAD_LOCAL_THINGS;
                do {
                    if (!tmpthing->next)
                        break;
                    tmpthing = tmpthing->next;
                } while (--maxFreeThings != 0);

                flbase[flindex] = arenaList->freeList;
                arenaList->freeList = tmpthing->next;
                tmpthing->next = NULL;
            }
#endif
            break;
        }

        /* Allocate from the tail of last arena or from new arena if we can. */
        if ((arenaList->last && arenaList->lastLimit != GC_THINGS_SIZE) ||
            NewGCArena(rt, arenaList)) {

            offset = arenaList->lastLimit;
            if ((offset & GC_PAGE_MASK) == 0) {
                /*
                 * Skip JSGCPageInfo record located at GC_PAGE_SIZE boundary.
                 */
                offset += PAGE_THING_GAP(nbytes);
            }
            JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
            arenaList->lastLimit = (uint16)(offset + nbytes);
            a = arenaList->last;
            firstPage = (uint8 *)FIRST_THING_PAGE(a);
            thing = (JSGCThing *)(firstPage + offset);
            flagp = a->base + offset / sizeof(JSGCThing);
            if (flagp >= firstPage)
                flagp += GC_THINGS_SIZE;
            METER(++arenaList->stats.nthings);
            METER(arenaList->stats.maxthings =
                  JS_MAX(arenaList->stats.nthings,
                         arenaList->stats.maxthings));

#ifdef JS_THREADSAFE
            /*
             * Refill the local free list by taking free things from the last
             * arena. Prefer to order free things by ascending address in the
             * (unscientific) hope of better cache locality.
             */
            if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
                break;
            METER(nfree = 0);
            lastptr = &flbase[flindex];
            maxFreeThings = MAX_THREAD_LOCAL_THINGS;
            for (offset = arenaList->lastLimit;
                 offset != GC_THINGS_SIZE && maxFreeThings-- != 0;
                 offset += nbytes) {
                if ((offset & GC_PAGE_MASK) == 0)
                    offset += PAGE_THING_GAP(nbytes);
                JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
                tmpflagp = a->base + offset / sizeof(JSGCThing);
                if (tmpflagp >= firstPage)
                    tmpflagp += GC_THINGS_SIZE;

                tmpthing = (JSGCThing *)(firstPage + offset);
                tmpthing->flagp = tmpflagp;
                *tmpflagp = GCF_FINAL;    /* signifying that thing is free */

                *lastptr = tmpthing;
                lastptr = &tmpthing->next;
                METER(++nfree);
            }
            arenaList->lastLimit = offset;
            *lastptr = NULL;
            METER(arenaList->stats.freelen += nfree);
#endif
            break;
        }

        /* Consider doing a "last ditch" GC unless already tried. */
        if (doGC)
            goto fail;
        rt->gcPoke = JS_TRUE;
        doGC = JS_TRUE;
    }

    /* We successfully allocated the thing. */
#ifdef JS_THREADSAFE
  success:
#endif
    lrs = cx->localRootStack;
    if (lrs) {
        /*
         * If we're in a local root scope, don't set newborn[type] at all, to
         * avoid entraining garbage from it for an unbounded amount of time
         * on this context.  A caller will leave the local root scope and pop
         * this reference, allowing thing to be GC'd if it has no other refs.
         * See JS_EnterLocalRootScope and related APIs.
         */
        if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
            /*
             * When we fail for a thing allocated through the tail of the last
             * arena, thing's flag byte is not initialized. So to prevent GC
             * accessing the uninitialized flags during the finalization, we
             * always mark the thing as final. See bug 337407.
             */
            *flagp = GCF_FINAL;
            goto fail;
        }
    } else {
        /*
         * No local root scope, so we're stuck with the old, fragile model of
         * depending on a pigeon-hole newborn per type per context.
         */
        cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
    }

    /* We can't fail now, so update flags and rt->gc{,Private}Bytes. */
    *flagp = (uint8)flags;

    /*
     * Clear thing before unlocking in case a GC run is about to scan it,
     * finding it via newborn[].
     */
    thing->next = NULL;
    thing->flagp = NULL;
#ifdef DEBUG_gchist
    gchist[gchpos].lastDitch = doGC;
    gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
    if (++gchpos == NGCHIST)
        gchpos = 0;
#endif
    METER(if (flags & GCF_LOCK) rt->gcStats.lockborn++);
    METER(++rt->gcArenaList[flindex].stats.totalnew);
#ifdef JS_THREADSAFE
    if (gcLocked)
        JS_UNLOCK_GC(rt);
#endif
    return thing;

fail:
#ifdef JS_THREADSAFE
    if (gcLocked)
        JS_UNLOCK_GC(rt);
#endif
    METER(rt->gcStats.fail++);
    JS_ReportOutOfMemory(cx);
    return NULL;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 939 of file jsgc.c.

{
    JSRuntime *rt;
    JSBool ok;

    rt = cx->runtime;
    JS_ASSERT(!rt->gcRunning);

    JS_LOCK_GC(rt);
    ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
    JS_UNLOCK_GC(rt);
    return ok;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 979 of file jsgc.c.

{
    JSRuntime *rt;

    rt = cx->runtime;
    JS_ASSERT(!rt->gcRunning);
    JS_ASSERT(rt->state != JSRTS_LANDING);
    JS_ASSERT(gen->state == JSGEN_NEWBORN);

    JS_LOCK_GC(rt);
    gen->next = rt->gcCloseState.reachableList;
    rt->gcCloseState.reachableList = gen;
    METER(rt->gcStats.nclose++);
    METER(rt->gcStats.maxnclose = JS_MAX(rt->gcStats.maxnclose,
                                         rt->gcStats.nclose));
    JS_UNLOCK_GC(rt);
}

Here is the caller graph for this function:

JSBool js_RemoveRoot ( JSRuntime rt,
void rp 
)

Definition at line 801 of file jsgc.c.

{
    /*
     * Due to the JS_RemoveRootRT API, we may be called outside of a request.
     * Same synchronization drill as above in js_AddRoot.
     */
    JS_LOCK_GC(rt);
#ifdef JS_THREADSAFE
    JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
    if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
        do {
            JS_AWAIT_GC_DONE(rt);
        } while (rt->gcLevel > 0);
    }
#endif
    (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
    rt->gcPoke = JS_TRUE;
    JS_UNLOCK_GC(rt);
    return JS_TRUE;
}

Here is the caller graph for this function:

Definition at line 1196 of file jsgc.c.

{
    JSRuntime *rt;
    JSTempCloseList tempList;
    JSStackFrame *fp;
    JSGenerator **genp, *gen;
    JSBool ok, defer;
#if JS_GCMETER
    uint32 deferCount;
#endif

    rt = cx->runtime;

    /*
     * It is OK to access todoQueue outside the lock here. When many threads
     * update the todo list, accessing some older value of todoQueue in the
     * worst case just delays the excution of close hooks.
     */
    if (!rt->gcCloseState.todoQueue)
        return JS_TRUE;

    /*
     * To prevent an infinite loop when a close hook creats more objects with
     * close hooks and then triggers GC we ignore recursive invocations of
     * js_RunCloseHooks and limit number of hooks to execute to the initial
     * size of the list.
     */
    if (*GC_RUNNING_CLOSE_HOOKS_PTR(cx))
        return JS_TRUE;

    *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_TRUE;

    JS_LOCK_GC(rt);
    tempList.head = rt->gcCloseState.todoQueue;
    JS_PUSH_TEMP_CLOSE_LIST(cx, &tempList);
    rt->gcCloseState.todoQueue = NULL;
    METER(rt->gcStats.closelater = 0);
    rt->gcPoke = JS_TRUE;
    JS_UNLOCK_GC(rt);

    /*
     * Set aside cx->fp since we do not want a close hook using caller or
     * other means to backtrace into whatever stack might be active when
     * running the hook. We store the current frame on the dormant list to
     * protect against GC that the hook can trigger.
     */
    fp = cx->fp;
    if (fp) {
        JS_ASSERT(!fp->dormantNext);
        fp->dormantNext = cx->dormantFrameChain;
        cx->dormantFrameChain = fp;
    }
    cx->fp = NULL;

    genp = &tempList.head;
    ok = JS_TRUE;
    while ((gen = *genp) != NULL) {
        ok = ShouldDeferCloseHook(cx, gen, &defer);
        if (!ok) {
            /* Quit ASAP discarding the hook. */
            *genp = gen->next;
            break;
        }
        if (defer) {
            genp = &gen->next;
            METER(deferCount++);
            continue;
        }
        ok = js_CloseGeneratorObject(cx, gen);

        /*
         * Unlink the generator after closing it to make sure it always stays
         * rooted through tempList.
         */
        *genp = gen->next;

        if (cx->throwing) {
            /*
             * Report the exception thrown by the close hook and continue to
             * execute the rest of the hooks.
             */
            if (!js_ReportUncaughtException(cx))
                JS_ClearPendingException(cx);
            ok = JS_TRUE;
        } else if (!ok) {
            /*
             * Assume this is a stop signal from the branch callback or
             * other quit ASAP condition. Break execution until the next
             * invocation of js_RunCloseHooks.
             */
            break;
        }
    }

    cx->fp = fp;
    if (fp) {
        JS_ASSERT(cx->dormantFrameChain == fp);
        cx->dormantFrameChain = fp->dormantNext;
        fp->dormantNext = NULL;
    }

    if (tempList.head) {
        /*
         * Some close hooks were not yet executed, put them back into the
         * scheduled list.
         */
        while ((gen = *genp) != NULL) {
            genp = &gen->next;
            METER(deferCount++);
        }

        /* Now genp is a pointer to the tail of tempList. */
        JS_LOCK_GC(rt);
        *genp = rt->gcCloseState.todoQueue;
        rt->gcCloseState.todoQueue = tempList.head;
        METER(rt->gcStats.closelater += deferCount);
        METER(rt->gcStats.maxcloselater
              = JS_MAX(rt->gcStats.maxcloselater, rt->gcStats.closelater));
        JS_UNLOCK_GC(rt);
    }

    JS_POP_TEMP_CLOSE_LIST(cx, &tempList);
    *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_FALSE;

    return ok;
}

Here is the call graph for this function:

Here is the caller graph for this function:

JS_STATIC_ASSERT ( sizeof(JSGCThing = =sizeof(JSGCPageInfo))
JS_STATIC_ASSERT ( sizeof(JSGCThing) >=sizeof(JSObject )
JS_STATIC_ASSERT ( sizeof(JSGCThing) >=sizeof(JSString )
JS_STATIC_ASSERT ( sizeof(JSGCThing) >=sizeof(jsdouble )
JS_STATIC_ASSERT ( GC_FLAGS_SIZE >=  GC_PAGE_SIZE)
JS_STATIC_ASSERT ( sizeof(JSStackHeader) >=2 *sizeof(jsval )
JSBool js_UnlockGCThingRT ( JSRuntime rt,
void thing 
)

Definition at line 1693 of file jsgc.c.

{
    uint8 *flagp, flags;
    JSGCLockHashEntry *lhe;

    if (!thing)
        return JS_TRUE;

    flagp = js_GetGCThingFlags(thing);
    JS_LOCK_GC(rt);
    flags = *flagp;

    if (flags & GCF_LOCK) {
        if (!rt->gcLocksHash ||
            (lhe = (JSGCLockHashEntry *)
                   JS_DHashTableOperate(rt->gcLocksHash, thing,
                                        JS_DHASH_LOOKUP),
             JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
            /* Shallow GC-thing with an implicit lock count of 1. */
            JS_ASSERT(!GC_THING_IS_DEEP(flags & GCF_TYPEMASK, thing));
        } else {
            /* Basis or nested unlock of a deep thing, or nested of shallow. */
            if (--lhe->count != 0)
                goto out;
            JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
        }
        *flagp = (uint8)(flags & ~GCF_LOCK);
    }

    rt->gcPoke = JS_TRUE;
out:
    METER(rt->gcStats.unlock++);
    JS_UNLOCK_GC(rt);
    return JS_TRUE;
}

Here is the call graph for this function:

Here is the caller graph for this function:

void js_UpdateMallocCounter ( JSContext cx,
size_t  nbytes 
)

Definition at line 3196 of file jsgc.c.

{
    uint32 *pbytes, bytes;

#ifdef JS_THREADSAFE
    pbytes = &cx->thread->gcMallocBytes;
#else
    pbytes = &cx->runtime->gcMallocBytes;
#endif
    bytes = *pbytes;
    *pbytes = ((uint32)-1 - bytes <= nbytes) ? (uint32)-1 : bytes + nbytes;
}

Here is the caller graph for this function:

Definition at line 1177 of file jsgc.c.

{
    JSTempCloseList *list = (JSTempCloseList *)tvr;
    JSGenerator *gen;

    for (gen = list->head; gen; gen = gen->next)
        GC_MARK(cx, gen->obj, "temp list generator");
}
static void MarkGCThingChildren ( JSContext cx,
void thing,
uint8 flagp,
JSBool  shouldCheckRecursion 
) [static]

Definition at line 2007 of file jsgc.c.

{
    JSRuntime *rt;
    JSObject *obj;
    jsval v, *vp, *end;
    void *next_thing;
    uint8 *next_flagp;
    JSString *str;
#ifdef JS_GCMETER
    uint32 tailCallNesting;
#endif
#ifdef GC_MARK_DEBUG
    JSScope *scope;
    char name[32];
#endif

    /*
     * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
     * uses the non-recursive code that otherwise would be called only on
     * a low C stack condition.
     */
#ifdef JS_GC_ASSUME_LOW_C_STACK
# define RECURSION_TOO_DEEP() shouldCheckRecursion
#else
    int stackDummy;
# define RECURSION_TOO_DEEP() (shouldCheckRecursion &&                        \
                               !JS_CHECK_STACK_SIZE(cx, stackDummy))
#endif

    rt = cx->runtime;
    METER(tailCallNesting = 0);
    METER(if (++rt->gcStats.cdepth > rt->gcStats.maxcdepth)
              rt->gcStats.maxcdepth = rt->gcStats.cdepth);

#ifndef GC_MARK_DEBUG
  start:
#endif
    JS_ASSERT(flagp);
    JS_ASSERT(*flagp & GCF_MARK); /* the caller must already mark the thing */
    METER(if (++rt->gcStats.depth > rt->gcStats.maxdepth)
              rt->gcStats.maxdepth = rt->gcStats.depth);
#ifdef GC_MARK_DEBUG
    if (js_DumpGCHeap)
        gc_dump_thing(cx, thing, js_DumpGCHeap);
#endif

    switch (*flagp & GCF_TYPEMASK) {
      case GCX_OBJECT:
        if (RECURSION_TOO_DEEP())
            goto add_to_unscanned_bag;
        /* If obj->slots is null, obj must be a newborn. */
        obj = (JSObject *) thing;
        vp = obj->slots;
        if (!vp)
            break;

        /* Mark slots if they are small enough to be GC-allocated. */
        if ((vp[-1] + 1) * sizeof(jsval) <= GC_NBYTES_MAX)
            GC_MARK(cx, vp - 1, "slots");

        /* Set up local variables to loop over unmarked things. */
        end = vp + ((obj->map->ops->mark)
                    ? obj->map->ops->mark(cx, obj, NULL)
                    : JS_MIN(obj->map->freeslot, obj->map->nslots));
        thing = NULL;
        flagp = NULL;
#ifdef GC_MARK_DEBUG
        scope = OBJ_IS_NATIVE(obj) ? OBJ_SCOPE(obj) : NULL;
#endif
        for (; vp != end; ++vp) {
            v = *vp;
            if (!JSVAL_IS_GCTHING(v) || v == JSVAL_NULL)
                continue;
            next_thing = JSVAL_TO_GCTHING(v);
            if (next_thing == thing)
                continue;
            next_flagp = js_GetGCThingFlags(next_thing);
            if (*next_flagp & GCF_MARK)
                continue;
            JS_ASSERT(*next_flagp != GCF_FINAL);
            if (thing) {
#ifdef GC_MARK_DEBUG
                GC_MARK(cx, thing, name);
#else
                *flagp |= GCF_MARK;
                MarkGCThingChildren(cx, thing, flagp, JS_TRUE);
#endif
                if (*next_flagp & GCF_MARK) {
                    /*
                     * This happens when recursive MarkGCThingChildren marks
                     * the thing with flags referred by *next_flagp.
                     */
                    thing = NULL;
                    continue;
                }
            }
#ifdef GC_MARK_DEBUG
            GetObjSlotName(scope, obj, vp - obj->slots, name, sizeof name);
#endif
            thing = next_thing;
            flagp = next_flagp;
        }
        if (thing) {
            /*
             * thing came from the last unmarked GC-thing slot and we
             * can optimize tail recursion.
             *
             * Since we already know that there is enough C stack space,
             * we clear shouldCheckRecursion to avoid extra checking in
             * RECURSION_TOO_DEEP.
             */
            shouldCheckRecursion = JS_FALSE;
            goto on_tail_recursion;
        }
        break;

#ifdef DEBUG
      case GCX_STRING:
        str = (JSString *)thing;
        JS_ASSERT(!JSSTRING_IS_DEPENDENT(str));
        break;
#endif

      case GCX_MUTABLE_STRING:
        str = (JSString *)thing;
        if (!JSSTRING_IS_DEPENDENT(str))
            break;
        thing = JSSTRDEP_BASE(str);
        flagp = js_GetGCThingFlags(thing);
        if (*flagp & GCF_MARK)
            break;
#ifdef GC_MARK_DEBUG
        strcpy(name, "base");
#endif
        /* Fallthrough to code to deal with the tail recursion. */

      on_tail_recursion:
#ifdef GC_MARK_DEBUG
        /*
         * Do not eliminate C recursion when debugging to allow
         * js_MarkNamedGCThing to build a full dump of live GC
         * things.
         */
        GC_MARK(cx, thing, name);
        break;
#else
        /* Eliminate tail recursion for the last unmarked child. */
        JS_ASSERT(*flagp != GCF_FINAL);
        METER(++tailCallNesting);
        *flagp |= GCF_MARK;
        goto start;
#endif

#if JS_HAS_XML_SUPPORT
      case GCX_NAMESPACE:
        if (RECURSION_TOO_DEEP())
            goto add_to_unscanned_bag;
        js_MarkXMLNamespace(cx, (JSXMLNamespace *)thing);
        break;

      case GCX_QNAME:
        if (RECURSION_TOO_DEEP())
            goto add_to_unscanned_bag;
        js_MarkXMLQName(cx, (JSXMLQName *)thing);
        break;

      case GCX_XML:
        if (RECURSION_TOO_DEEP())
            goto add_to_unscanned_bag;
        js_MarkXML(cx, (JSXML *)thing);
        break;
#endif
      add_to_unscanned_bag:
        AddThingToUnscannedBag(cx->runtime, thing, flagp);
        break;
    }

#undef RECURSION_TOO_DEEP

    METER(rt->gcStats.depth -= 1 + tailCallNesting);
    METER(rt->gcStats.cdepth--);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static JSGenerator** MarkScheduledGenerators ( JSContext cx) [static]

Definition at line 1142 of file jsgc.c.

{
    JSRuntime *rt;
    JSGenerator **genp, *gen;

    rt = cx->runtime;
    genp = &rt->gcCloseState.todoQueue;
    while ((gen = *genp) != NULL) {
        if (CanScheduleCloseHook(gen)) {
            GC_MARK(cx, gen->obj, "scheduled generator");
            genp = &gen->next;
        } else {
            /* Discard the generator from the list if its schedule is over. */
            *genp = gen->next;
            METER(JS_ASSERT(rt->gcStats.closelater > 0));
            METER(rt->gcStats.closelater--);
        }
    }
    return genp;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void MarkWeakRoots ( JSContext cx,
JSWeakRoots wr 
) [static]

Definition at line 2624 of file jsgc.c.

{
    uintN i;
    void *thing;

    for (i = 0; i < GCX_NTYPES; i++)
        GC_MARK(cx, wr->newborn[i], gc_typenames[i]);
    if (wr->lastAtom)
        GC_MARK_ATOM(cx, wr->lastAtom);
    if (JSVAL_IS_GCTHING(wr->lastInternalResult)) {
        thing = JSVAL_TO_GCTHING(wr->lastInternalResult);
        if (thing)
            GC_MARK(cx, thing, "lastInternalResult");
    }
}

Here is the caller graph for this function:

static JSBool NewGCArena ( JSRuntime rt,
JSGCArenaList arenaList 
) [static]

Definition at line 385 of file jsgc.c.

{
    JSGCArena *a;
    jsuword offset;
    JSGCPageInfo *pi;
    uint32 *bytesptr;

    /* Check if we are allowed and can allocate a new arena. */
    if (rt->gcBytes >= rt->gcMaxBytes)
        return JS_FALSE;
    a = (JSGCArena *)malloc(GC_ARENA_SIZE);
    if (!a)
        return JS_FALSE;

    /* Initialize the JSGCPageInfo records at the start of every thing page. */
    offset = (GC_PAGE_SIZE - ((jsuword)a->base & GC_PAGE_MASK)) & GC_PAGE_MASK;
    JS_ASSERT((jsuword)a->base + offset == FIRST_THING_PAGE(a));
    do {
        pi = (JSGCPageInfo *) (a->base + offset);
        pi->offsetInArena = offset;
        pi->unscannedBitmap = 0;
        offset += GC_PAGE_SIZE;
    } while (offset < GC_THINGS_SIZE);

    METER(++arenaList->stats.narenas);
    METER(arenaList->stats.maxarenas
          = JS_MAX(arenaList->stats.maxarenas, arenaList->stats.narenas));

    a->list = arenaList;
    a->prev = arenaList->last;
    a->prevUnscanned = NULL;
    a->unscannedPages = 0;
    arenaList->last = a;
    arenaList->lastLimit = 0;

    bytesptr = (arenaList == &rt->gcArenaList[0])
               ? &rt->gcBytes
               : &rt->gcPrivateBytes;
    *bytesptr += GC_ARENA_SIZE;

    return JS_TRUE;
}

Here is the caller graph for this function:

static size_t PtrTableCapacity ( size_t  count,
const JSPtrTableInfo info 
) [static]

Definition at line 262 of file jsgc.c.

{
    size_t linear, log, capacity;

    linear = info->linearGrowthThreshold;
    JS_ASSERT(info->minCapacity <= linear);

    if (count == 0) {
        capacity = 0;
    } else if (count < linear) {
        log = JS_CEILING_LOG2W(count);
        JS_ASSERT(log != JS_BITS_PER_WORD);
        capacity = (size_t)1 << log;
        if (capacity < info->minCapacity)
            capacity = info->minCapacity;
    } else {
        capacity = JS_ROUNDUP(count, linear);
    }

    JS_ASSERT(capacity >= count);
    return capacity;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void ScanDelayedChildren ( JSContext cx) [static]

Definition at line 2297 of file jsgc.c.

{
    JSRuntime *rt;
    JSGCArena *arena;
    size_t thingSize;
    size_t thingsPerUnscannedChunk;
    size_t pageGap;
    size_t pageIndex;
    JSGCPageInfo *pi;
    size_t chunkIndex;
    size_t thingOffset, thingLimit;
    JSGCThing *thing;
    uint8 *flagp;
    JSGCArena *prevArena;

    rt = cx->runtime;
    arena = rt->gcUnscannedArenaStackTop;
    if (!arena) {
        JS_ASSERT(rt->gcUnscannedBagSize == 0);
        return;
    }

  init_size:
    thingSize = arena->list->thingSize;
    GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap);
    for (;;) {
        /*
         * The following assert verifies that the current arena belongs to
         * the unscan stack since AddThingToUnscannedBag ensures that even
         * for stack's bottom prevUnscanned != NULL but rather points to self.
         */
        JS_ASSERT(arena->prevUnscanned);
        JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned);
        while (arena->unscannedPages != 0) {
            pageIndex = JS_FLOOR_LOG2W(arena->unscannedPages);
            JS_ASSERT(pageIndex < GC_PAGE_COUNT);
            pi = (JSGCPageInfo *)(FIRST_THING_PAGE(arena) +
                                  pageIndex * GC_PAGE_SIZE);
            JS_ASSERT(pi->unscannedBitmap);
            chunkIndex = JS_FLOOR_LOG2W(pi->unscannedBitmap);
            pi->unscannedBitmap &= ~((jsuword)1 << chunkIndex);
            if (pi->unscannedBitmap == 0)
                arena->unscannedPages &= ~((jsuword)1 << pageIndex);
            thingOffset = (pageGap
                           + chunkIndex * thingsPerUnscannedChunk * thingSize);
            JS_ASSERT(thingOffset >= sizeof(JSGCPageInfo));
            thingLimit = thingOffset + thingsPerUnscannedChunk * thingSize;
            if (thingsPerUnscannedChunk != 1) {
                /*
                 * thingLimit can go beyond the last allocated thing for the
                 * last chunk as the real limit can be inside the chunk.
                 */
                if (arena->list->last == arena &&
                    arena->list->lastLimit < (pageIndex * GC_PAGE_SIZE +
                                              thingLimit)) {
                    thingLimit = (arena->list->lastLimit -
                                  pageIndex * GC_PAGE_SIZE);
                } else if (thingLimit > GC_PAGE_SIZE) {
                    thingLimit = GC_PAGE_SIZE;
                }
                JS_ASSERT(thingLimit > thingOffset);
            }
            JS_ASSERT(arena->list->last != arena ||
                      arena->list->lastLimit >= (pageIndex * GC_PAGE_SIZE +
                                                 thingLimit));
            JS_ASSERT(thingLimit <= GC_PAGE_SIZE);

            for (; thingOffset != thingLimit; thingOffset += thingSize) {
                /*
                 * XXX: inline js_GetGCThingFlags() to use already available
                 * pi.
                 */
                thing = (void *)((jsuword)pi + thingOffset);
                flagp = js_GetGCThingFlags(thing);
                if (thingsPerUnscannedChunk != 1) {
                    /*
                     * Skip free or already scanned things that share the chunk
                     * with unscanned ones.
                     */
                    if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL))
                        continue;
                }
                JS_ASSERT((*flagp & (GCF_MARK|GCF_FINAL))
                              == (GCF_MARK|GCF_FINAL));
                *flagp &= ~GCF_FINAL;
#ifdef DEBUG
                JS_ASSERT(rt->gcUnscannedBagSize != 0);
                --rt->gcUnscannedBagSize;

                /*
                 * Check that GC thing type is consistent with the type of
                 * things that can be put to the unscanned bag.
                 */
                switch (*flagp & GCF_TYPEMASK) {
                  case GCX_OBJECT:
# if JS_HAS_XML_SUPPORT
                  case GCX_NAMESPACE:
                  case GCX_QNAME:
                  case GCX_XML:
# endif
                    break;
                  default:
                    JS_ASSERT(0);
                }
#endif
                MarkGCThingChildren(cx, thing, flagp, JS_FALSE);
            }
        }
        /*
         * We finished scanning of the arena but we can only pop it from
         * the stack if the arena is the stack's top.
         *
         * When MarkGCThingChildren from the above calls
         * AddThingToUnscannedBag and the latter pushes new arenas to the
         * stack, we have to skip popping of this arena until it becomes
         * the top of the stack again.
         */
        if (arena == rt->gcUnscannedArenaStackTop) {
            prevArena = arena->prevUnscanned;
            arena->prevUnscanned = NULL;
            if (arena == prevArena) {
                /*
                 * prevUnscanned points to itself and we reached the bottom
                 * of the stack.
                 */
                break;
            }
            rt->gcUnscannedArenaStackTop = arena = prevArena;
        } else {
            arena = rt->gcUnscannedArenaStackTop;
        }
        if (arena->list->thingSize != thingSize)
            goto init_size;
    }
    JS_ASSERT(rt->gcUnscannedArenaStackTop);
    JS_ASSERT(!rt->gcUnscannedArenaStackTop->prevUnscanned);
    rt->gcUnscannedArenaStackTop = NULL;
    JS_ASSERT(rt->gcUnscannedBagSize == 0);
}

Here is the call graph for this function:

Here is the caller graph for this function:

static JSBool ShouldDeferCloseHook ( JSContext cx,
JSGenerator gen,
JSBool defer 
) [static]

Definition at line 1034 of file jsgc.c.

{
    JSObject *parent, *obj;
    JSClass *clasp;
    JSExtendedClass *xclasp;

    /*
     * This is called outside any locks, so use thread-safe macros to access
     * parent and  classes.
     */
    *defer = JS_FALSE;
    parent = OBJ_GET_PARENT(cx, gen->obj);
    clasp = OBJ_GET_CLASS(cx, parent);
    if (clasp->flags & JSCLASS_IS_EXTENDED) {
        xclasp = (JSExtendedClass *)clasp;
        if (xclasp->outerObject) {
            obj = xclasp->outerObject(cx, parent);
            if (!obj)
                return JS_FALSE;
            OBJ_TO_INNER_OBJECT(cx, obj);
            if (!obj)
                return JS_FALSE;
            *defer = obj != parent;
        }
    }
#ifdef DEBUG_igor
    if (*defer) {
        fprintf(stderr, "GEN: deferring, gen=%p parent=%p\n",
                (void *)gen, (void *)parent);
    }
#endif
    return JS_TRUE;
}

Here is the call graph for this function:

Here is the caller graph for this function:

static void ShrinkPtrTable ( JSPtrTable table,
const JSPtrTableInfo info,
size_t  newCount 
) [static]

Definition at line 346 of file jsgc.c.

{
    size_t oldCapacity, capacity;
    void **array;

    JS_ASSERT(newCount <= table->count);
    if (newCount == table->count)
        return;

    oldCapacity = PtrTableCapacity(table->count, info);
    table->count = newCount;
    capacity = PtrTableCapacity(newCount, info);

    if (oldCapacity != capacity) {
        array = table->array;
        JS_ASSERT(array);
        if (capacity == 0) {
            free(array);
            table->array = NULL;
            return;
        }
        array = (void **) realloc(array, capacity * sizeof array[0]);
        if (array)
            table->array = array;
    }
#ifdef DEBUG
    memset(table->array + newCount, JS_FREE_PATTERN,
           (capacity - newCount) * sizeof table->array[0]);
#endif
}

Here is the call graph for this function:

Here is the caller graph for this function:


Variable Documentation

Initial value:

Definition at line 255 of file jsgc.c.