Back to index

lightning-sunbird  0.9+nobinonly
Classes | Defines | Typedefs | Enumerations | Functions
jsgc.h File Reference
#include "jsprvtd.h"
#include "jspubtd.h"
#include "jsdhash.h"
#include "jsutil.h"
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  JSPtrTable
struct  JSGCCloseState
struct  JSGCThing
struct  JSGCArenaList
struct  JSWeakRoots

Defines

#define GCX_OBJECT   0 /* JSObject */
#define GCX_STRING   1 /* JSString */
#define GCX_DOUBLE   2 /* jsdouble */
#define GCX_MUTABLE_STRING
#define GCX_PRIVATE   4 /* private (unscanned) data */
#define GCX_NAMESPACE   5 /* JSXMLNamespace */
#define GCX_QNAME   6 /* JSXMLQName */
#define GCX_XML   7 /* JSXML */
#define GCX_EXTERNAL_STRING   8 /* JSString w/ external chars */
#define GCX_NTYPES_LOG2   4 /* type index bits */
#define GCX_NTYPES   JS_BIT(GCX_NTYPES_LOG2)
#define GCF_TYPEMASK   JS_BITMASK(GCX_NTYPES_LOG2)
#define GCF_MARK   JS_BIT(GCX_NTYPES_LOG2)
#define GCF_FINAL   JS_BIT(GCX_NTYPES_LOG2 + 1)
#define GCF_SYSTEM   JS_BIT(GCX_NTYPES_LOG2 + 2)
#define GCF_LOCKSHIFT   (GCX_NTYPES_LOG2 + 3) /* lock bit shift */
#define GCF_LOCK   JS_BIT(GCF_LOCKSHIFT) /* lock request bit in API */
#define GCF_MUTABLE   2
#define GC_POKE(cx, oldval)   ((cx)->runtime->gcPoke = JS_TRUE)
#define GC_NBYTES_MAX   (10 * sizeof(JSGCThing))
#define GC_NUM_FREELISTS   (GC_NBYTES_MAX / sizeof(JSGCThing))
#define GC_FREELIST_NBYTES(i)   (((i) + 1) * sizeof(JSGCThing))
#define GC_FREELIST_INDEX(n)   (((n) / sizeof(JSGCThing)) - 1)
#define GC_MARK_ATOM(cx, atom)
#define GC_MARK(cx, thing, name)   js_MarkGCThing(cx, thing)
#define JS_CLEAR_WEAK_ROOTS(wr)   (memset((wr), 0, sizeof(JSWeakRoots)))

Typedefs

typedef struct JSPtrTable JSPtrTable
typedef struct JSGCCloseState JSGCCloseState
typedef enum JSGCInvocationKind JSGCInvocationKind
typedef struct JSGCArena
typedef struct JSGCArenaList

Enumerations

enum  JSGCInvocationKind { GC_NORMAL, GC_LAST_CONTEXT, GC_LAST_DITCH }

Functions

uint8js_GetGCThingFlags (void *thing)
JSRuntimejs_GetGCStringRuntime (JSString *str)
intN js_ChangeExternalStringFinalizer (JSStringFinalizeOp oldop, JSStringFinalizeOp newop)
JSBool js_InitGC (JSRuntime *rt, uint32 maxbytes)
void js_FinishGC (JSRuntime *rt)
JSBool js_AddRoot (JSContext *cx, void *rp, const char *name)
JSBool js_AddRootRT (JSRuntime *rt, void *rp, const char *name)
JSBool js_RemoveRoot (JSRuntime *rt, void *rp)
uint32 js_MapGCRoots (JSRuntime *rt, JSGCRootMapFun map, void *data)
JSBool js_RegisterCloseableIterator (JSContext *cx, JSObject *obj)
void js_RegisterGenerator (JSContext *cx, JSGenerator *gen)
JSBool js_RunCloseHooks (JSContext *cx)
voidjs_NewGCThing (JSContext *cx, uintN flags, size_t nbytes)
JSBool js_LockGCThing (JSContext *cx, void *thing)
JSBool js_LockGCThingRT (JSRuntime *rt, void *thing)
JSBool js_UnlockGCThingRT (JSRuntime *rt, void *thing)
JSBool js_IsAboutToBeFinalized (JSContext *cx, void *thing)
void js_MarkAtom (JSContext *cx, JSAtom *atom)
void js_MarkGCThing (JSContext *cx, void *thing)
void js_MarkStackFrame (JSContext *cx, JSStackFrame *fp)
void js_GC (JSContext *cx, JSGCInvocationKind gckind)
void js_UpdateMallocCounter (JSContext *cx, size_t nbytes)
 JS_STATIC_ASSERT (JSVAL_NULL==0)

Class Documentation

struct JSPtrTable

Definition at line 132 of file jsgc.h.

Class Members
void ** array
size_t count
struct JSGCCloseState

Definition at line 145 of file jsgc.h.

Collaboration diagram for JSGCCloseState:
Class Members
JSGenerator * reachableList
JSBool runningCloseHook
JSGenerator * todoQueue
struct JSGCThing

Definition at line 178 of file jsgc.h.

Collaboration diagram for JSGCThing:
Class Members
uint8 * flagp
JSGCThing * next
struct JSGCArenaList

Definition at line 332 of file jsgc.h.

Collaboration diagram for JSGCArenaList:
Class Members
JSGCThing * freeList
JSGCArena * last
uint16 lastLimit
uint16 thingSize
struct JSWeakRoots

Definition at line 343 of file jsgc.h.

Collaboration diagram for JSWeakRoots:
Class Members
JSAtom * lastAtom
jsval lastInternalResult
JSGCThing * newborn

Define Documentation

#define GC_FREELIST_INDEX (   n)    (((n) / sizeof(JSGCThing)) - 1)

Definition at line 186 of file jsgc.h.

#define GC_FREELIST_NBYTES (   i)    (((i) + 1) * sizeof(JSGCThing))

Definition at line 185 of file jsgc.h.

#define GC_MARK (   cx,
  thing,
  name 
)    js_MarkGCThing(cx, thing)

Definition at line 233 of file jsgc.h.

#define GC_MARK_ATOM (   cx,
  atom 
)
Value:
JS_BEGIN_MACRO                                                            \
        if (!((atom)->flags & ATOM_MARK))                                     \
            js_MarkAtom(cx, atom);                                            \
    JS_END_MACRO

Definition at line 207 of file jsgc.h.

#define GC_NBYTES_MAX   (10 * sizeof(JSGCThing))

Definition at line 183 of file jsgc.h.

Definition at line 184 of file jsgc.h.

#define GC_POKE (   cx,
  oldval 
)    ((cx)->runtime->gcPoke = JS_TRUE)

Definition at line 97 of file jsgc.h.

Definition at line 69 of file jsgc.h.

Definition at line 72 of file jsgc.h.

Definition at line 71 of file jsgc.h.

Definition at line 68 of file jsgc.h.

Definition at line 75 of file jsgc.h.

Definition at line 70 of file jsgc.h.

Definition at line 67 of file jsgc.h.

#define GCX_DOUBLE   2 /* jsdouble */

Definition at line 55 of file jsgc.h.

#define GCX_EXTERNAL_STRING   8 /* JSString w/ external chars */

Definition at line 61 of file jsgc.h.

Value:
3               /* JSString that's mutable --
                                                   single-threaded only! */

Definition at line 56 of file jsgc.h.

Definition at line 58 of file jsgc.h.

Definition at line 64 of file jsgc.h.

#define GCX_NTYPES_LOG2   4 /* type index bits */

Definition at line 63 of file jsgc.h.

#define GCX_OBJECT   0 /* JSObject */

Definition at line 53 of file jsgc.h.

#define GCX_PRIVATE   4 /* private (unscanned) data */

Definition at line 57 of file jsgc.h.

#define GCX_QNAME   6 /* JSXMLQName */

Definition at line 59 of file jsgc.h.

#define GCX_STRING   1 /* JSString */

Definition at line 54 of file jsgc.h.

#define GCX_XML   7 /* JSXML */

Definition at line 60 of file jsgc.h.

#define JS_CLEAR_WEAK_ROOTS (   wr)    (memset((wr), 0, sizeof(JSWeakRoots)))

Definition at line 355 of file jsgc.h.


Typedef Documentation

typedef struct JSGCArena

Definition at line 311 of file jsgc.h.

typedef struct JSGCArenaList

Definition at line 312 of file jsgc.h.

typedef struct JSPtrTable JSPtrTable

Enumeration Type Documentation

Enumerator:
GC_NORMAL 
GC_LAST_CONTEXT 
GC_LAST_DITCH 

Definition at line 243 of file jsgc.h.

                                {
    /* Normal invocation. */
    GC_NORMAL,

    /*
     * Called from js_DestroyContext for last JSContext in a JSRuntime, when
     * it is imperative that rt->gcPoke gets cleared early in js_GC.
     */
    GC_LAST_CONTEXT,

    /*
     * Called from js_NewGCThing as a last-ditch GC attempt. See comments
     * before js_GC definition for details.
     */
    GC_LAST_DITCH

Function Documentation

JSBool js_AddRoot ( JSContext cx,
void rp,
const char *  name 
)

Definition at line 751 of file jsgc.c.

{
    JSBool ok = js_AddRootRT(cx->runtime, rp, name);
    if (!ok)
        JS_ReportOutOfMemory(cx);
    return ok;
}

Here is the call graph for this function:

Here is the caller graph for this function:

JSBool js_AddRootRT ( JSRuntime rt,
void rp,
const char *  name 
)

Definition at line 760 of file jsgc.c.

{
    JSBool ok;
    JSGCRootHashEntry *rhe;

    /*
     * Due to the long-standing, but now removed, use of rt->gcLock across the
     * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
     * properly with a racing GC, without calling JS_AddRoot from a request.
     * We have to preserve API compatibility here, now that we avoid holding
     * rt->gcLock across the mark phase (including the root hashtable mark).
     *
     * If the GC is running and we're called on another thread, wait for this
     * GC activation to finish.  We can safely wait here (in the case where we
     * are called within a request on another thread's context) without fear
     * of deadlock because the GC doesn't set rt->gcRunning until after it has
     * waited for all active requests to end.
     */
    JS_LOCK_GC(rt);
#ifdef JS_THREADSAFE
    JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
    if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
        do {
            JS_AWAIT_GC_DONE(rt);
        } while (rt->gcLevel > 0);
    }
#endif
    rhe = (JSGCRootHashEntry *) JS_DHashTableOperate(&rt->gcRootsHash, rp,
                                                     JS_DHASH_ADD);
    if (rhe) {
        rhe->root = rp;
        rhe->name = name;
        ok = JS_TRUE;
    } else {
        ok = JS_FALSE;
    }
    JS_UNLOCK_GC(rt);
    return ok;
}

Here is the caller graph for this function:

intN js_ChangeExternalStringFinalizer ( JSStringFinalizeOp  oldop,
JSStringFinalizeOp  newop 
)

Definition at line 580 of file jsgc.c.

{
    uintN i;

    for (i = GCX_EXTERNAL_STRING; i < GCX_NTYPES; i++) {
        if (gc_finalizers[i] == (GCFinalizeOp) oldop) {
            gc_finalizers[i] = (GCFinalizeOp) newop;
            return (intN) i;
        }
    }
    return -1;
}

Here is the caller graph for this function:

Definition at line 720 of file jsgc.c.

{
#ifdef JS_ARENAMETER
    JS_DumpArenaStats(stdout);
#endif
#ifdef JS_GCMETER
    js_DumpGCStats(rt, stdout);
#endif

    FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
#if JS_HAS_GENERATORS
    rt->gcCloseState.reachableList = NULL;
    METER(rt->gcStats.nclose = 0);
    rt->gcCloseState.todoQueue = NULL;
#endif
    FinishGCArenaLists(rt);

    if (rt->gcRootsHash.ops) {
#ifdef DEBUG
        CheckLeakedRoots(rt);
#endif
        JS_DHashTableFinish(&rt->gcRootsHash);
        rt->gcRootsHash.ops = NULL;
    }
    if (rt->gcLocksHash) {
        JS_DHashTableDestroy(rt->gcLocksHash);
        rt->gcLocksHash = NULL;
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

void js_GC ( JSContext cx,
JSGCInvocationKind  gckind 
)

Definition at line 2645 of file jsgc.c.

{
    JSRuntime *rt;
    JSBool keepAtoms;
    uintN i, type;
    JSContext *iter, *acx;
#if JS_HAS_GENERATORS
    JSGenerator **genTodoTail;
#endif
    JSStackFrame *fp, *chain;
    JSStackHeader *sh;
    JSTempValueRooter *tvr;
    size_t nbytes, limit, offset;
    JSGCArena *a, **ap;
    uint8 flags, *flagp, *firstPage;
    JSGCThing *thing, *freeList;
    JSGCArenaList *arenaList;
    GCFinalizeOp finalizer;
    JSBool allClear;
#ifdef JS_THREADSAFE
    uint32 requestDebit;
#endif

    rt = cx->runtime;
#ifdef JS_THREADSAFE
    /* Avoid deadlock. */
    JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
#endif

    if (gckind == GC_LAST_DITCH) {
        /* The last ditch GC preserves all atoms and weak roots. */
        keepAtoms = JS_TRUE;
    } else {
        JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
        rt->gcPoke = JS_TRUE;

        /* Keep atoms when a suspended compile is running on another context. */
        keepAtoms = (rt->gcKeepAtoms != 0);
    }

    /*
     * Don't collect garbage if the runtime isn't up, and cx is not the last
     * context in the runtime.  The last context must force a GC, and nothing
     * should suppress that final collection or there may be shutdown leaks,
     * or runtime bloat until the next context is created.
     */
    if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
        return;

  restart_after_callback:
    /*
     * Let the API user decide to defer a GC if it wants to (unless this
     * is the last context).  Invoke the callback regardless.
     */
    if (rt->gcCallback &&
        !rt->gcCallback(cx, JSGC_BEGIN) &&
        gckind != GC_LAST_CONTEXT) {
        return;
    }

    /* Lock out other GC allocator and collector invocations. */
    if (gckind != GC_LAST_DITCH)
        JS_LOCK_GC(rt);

    /* Do nothing if no mutator has executed since the last GC. */
    if (!rt->gcPoke) {
        METER(rt->gcStats.nopoke++);
        if (gckind != GC_LAST_DITCH)
            JS_UNLOCK_GC(rt);
        return;
    }
    METER(rt->gcStats.poke++);
    rt->gcPoke = JS_FALSE;

#ifdef JS_THREADSAFE
    JS_ASSERT(cx->thread->id == js_CurrentThreadId());

    /* Bump gcLevel and return rather than nest on this thread. */
    if (rt->gcThread == cx->thread) {
        JS_ASSERT(rt->gcLevel > 0);
        rt->gcLevel++;
        METER(if (rt->gcLevel > rt->gcStats.maxlevel)
                  rt->gcStats.maxlevel = rt->gcLevel);
        if (gckind != GC_LAST_DITCH)
            JS_UNLOCK_GC(rt);
        return;
    }

    /*
     * If we're in one or more requests (possibly on more than one context)
     * running on the current thread, indicate, temporarily, that all these
     * requests are inactive.  If cx->thread is NULL, then cx is not using
     * the request model, and does not contribute to rt->requestCount.
     */
    requestDebit = 0;
    if (cx->thread) {
        JSCList *head, *link;

        /*
         * Check all contexts on cx->thread->contextList for active requests,
         * counting each such context against requestDebit.
         */
        head = &cx->thread->contextList;
        for (link = head->next; link != head; link = link->next) {
            acx = CX_FROM_THREAD_LINKS(link);
            JS_ASSERT(acx->thread == cx->thread);
            if (acx->requestDepth)
                requestDebit++;
        }
    } else {
        /*
         * We assert, but check anyway, in case someone is misusing the API.
         * Avoiding the loop over all of rt's contexts is a win in the event
         * that the GC runs only on request-less contexts with null threads,
         * in a special thread such as might be used by the UI/DOM/Layout
         * "mozilla" or "main" thread in Mozilla-the-browser.
         */
        JS_ASSERT(cx->requestDepth == 0);
        if (cx->requestDepth)
            requestDebit = 1;
    }
    if (requestDebit) {
        JS_ASSERT(requestDebit <= rt->requestCount);
        rt->requestCount -= requestDebit;
        if (rt->requestCount == 0)
            JS_NOTIFY_REQUEST_DONE(rt);
    }

    /* If another thread is already in GC, don't attempt GC; wait instead. */
    if (rt->gcLevel > 0) {
        /* Bump gcLevel to restart the current GC, so it finds new garbage. */
        rt->gcLevel++;
        METER(if (rt->gcLevel > rt->gcStats.maxlevel)
                  rt->gcStats.maxlevel = rt->gcLevel);

        /* Wait for the other thread to finish, then resume our request. */
        while (rt->gcLevel > 0)
            JS_AWAIT_GC_DONE(rt);
        if (requestDebit)
            rt->requestCount += requestDebit;
        if (gckind != GC_LAST_DITCH)
            JS_UNLOCK_GC(rt);
        return;
    }

    /* No other thread is in GC, so indicate that we're now in GC. */
    rt->gcLevel = 1;
    rt->gcThread = cx->thread;

    /* Wait for all other requests to finish. */
    while (rt->requestCount > 0)
        JS_AWAIT_REQUEST_DONE(rt);

#else  /* !JS_THREADSAFE */

    /* Bump gcLevel and return rather than nest; the outer gc will restart. */
    rt->gcLevel++;
    METER(if (rt->gcLevel > rt->gcStats.maxlevel)
              rt->gcStats.maxlevel = rt->gcLevel);
    if (rt->gcLevel > 1)
        return;

#endif /* !JS_THREADSAFE */

    /*
     * Set rt->gcRunning here within the GC lock, and after waiting for any
     * active requests to end, so that new requests that try to JS_AddRoot,
     * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
     * rt->gcLevel to drop to zero, while request-less calls to the *Root*
     * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
     * waiting for GC to finish.
     */
    rt->gcRunning = JS_TRUE;
    JS_UNLOCK_GC(rt);

    /* Reset malloc counter. */
    rt->gcMallocBytes = 0;

    /* Drop atoms held by the property cache, and clear property weak links. */
    js_DisablePropertyCache(cx);
    js_FlushPropertyCache(cx);
#ifdef DEBUG_scopemeters
  { extern void js_DumpScopeMeters(JSRuntime *rt);
    js_DumpScopeMeters(rt);
  }
#endif

#ifdef JS_THREADSAFE
    /*
     * Set all thread local freelists to NULL. We may visit a thread's
     * freelist more than once. To avoid redundant clearing we unroll the
     * current thread's step.
     *
     * Also, in case a JSScript wrapped within an object was finalized, we
     * null acx->thread->gsnCache.script and finish the cache's hashtable.
     * Note that js_DestroyScript, called from script_finalize, will have
     * already cleared cx->thread->gsnCache above during finalization, so we
     * don't have to here.
     */
    memset(cx->thread->gcFreeLists, 0, sizeof cx->thread->gcFreeLists);
    iter = NULL;
    while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
        if (!acx->thread || acx->thread == cx->thread)
            continue;
        memset(acx->thread->gcFreeLists, 0, sizeof acx->thread->gcFreeLists);
        GSN_CACHE_CLEAR(&acx->thread->gsnCache);
    }
#else
    /* The thread-unsafe case just has to clear the runtime's GSN cache. */
    GSN_CACHE_CLEAR(&rt->gsnCache);
#endif

restart:
    rt->gcNumber++;
    JS_ASSERT(!rt->gcUnscannedArenaStackTop);
    JS_ASSERT(rt->gcUnscannedBagSize == 0);

    /*
     * Mark phase.
     */
    JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_marker, cx);
    if (rt->gcLocksHash)
        JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_marker, cx);
    js_MarkAtomState(&rt->atomState, keepAtoms, gc_mark_atom_key_thing, cx);
    js_MarkWatchPoints(cx);
    js_MarkScriptFilenames(rt, keepAtoms);
    js_MarkNativeIteratorStates(cx);

#if JS_HAS_GENERATORS
    genTodoTail = MarkScheduledGenerators(cx);
    JS_ASSERT(!*genTodoTail);
#endif

    iter = NULL;
    while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL) {
        /*
         * Iterate frame chain and dormant chains. Temporarily tack current
         * frame onto the head of the dormant list to ease iteration.
         *
         * (NB: see comment on this whole "dormant" thing in js_Execute.)
         */
        chain = acx->fp;
        if (chain) {
            JS_ASSERT(!chain->dormantNext);
            chain->dormantNext = acx->dormantFrameChain;
        } else {
            chain = acx->dormantFrameChain;
        }

        for (fp = chain; fp; fp = chain = chain->dormantNext) {
            do {
                js_MarkStackFrame(cx, fp);
            } while ((fp = fp->down) != NULL);
        }

        /* Cleanup temporary "dormant" linkage. */
        if (acx->fp)
            acx->fp->dormantNext = NULL;

        /* Mark other roots-by-definition in acx. */
        GC_MARK(cx, acx->globalObject, "global object");
        MarkWeakRoots(cx, &acx->weakRoots);
        if (acx->throwing) {
            if (JSVAL_IS_GCTHING(acx->exception))
                GC_MARK(cx, JSVAL_TO_GCTHING(acx->exception), "exception");
        } else {
            /* Avoid keeping GC-ed junk stored in JSContext.exception. */
            acx->exception = JSVAL_NULL;
        }
#if JS_HAS_LVALUE_RETURN
        if (acx->rval2set && JSVAL_IS_GCTHING(acx->rval2))
            GC_MARK(cx, JSVAL_TO_GCTHING(acx->rval2), "rval2");
#endif

        for (sh = acx->stackHeaders; sh; sh = sh->down) {
            METER(rt->gcStats.stackseg++);
            METER(rt->gcStats.segslots += sh->nslots);
            GC_MARK_JSVALS(cx, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
        }

        if (acx->localRootStack)
            js_MarkLocalRoots(cx, acx->localRootStack);

        for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
            switch (tvr->count) {
              case JSTVU_SINGLE:
                if (JSVAL_IS_GCTHING(tvr->u.value)) {
                    GC_MARK(cx, JSVAL_TO_GCTHING(tvr->u.value),
                            "tvr->u.value");
                }
                break;
              case JSTVU_MARKER:
                tvr->u.marker(cx, tvr);
                break;
              case JSTVU_SPROP:
                MARK_SCOPE_PROPERTY(cx, tvr->u.sprop);
                break;
              case JSTVU_WEAK_ROOTS:
                MarkWeakRoots(cx, tvr->u.weakRoots);
                break;
              case JSTVU_SCRIPT:
                js_MarkScript(cx, tvr->u.script);
                break;
              default:
                JS_ASSERT(tvr->count >= 0);
                GC_MARK_JSVALS(cx, tvr->count, tvr->u.array, "tvr->u.array");
            }
        }

        if (acx->sharpObjectMap.depth > 0)
            js_GCMarkSharpMap(cx, &acx->sharpObjectMap);
    }

#ifdef DUMP_CALL_TABLE
    js_DumpCallTable(cx);
#endif

    /*
     * Mark children of things that caused too deep recursion during above
     * marking phase.
     */
    ScanDelayedChildren(cx);

#if JS_HAS_GENERATORS
    /*
     * Close phase: search and mark part. See comments in
     * FindAndMarkObjectsToClose for details.
     */
    FindAndMarkObjectsToClose(cx, gckind, genTodoTail);

    /*
     * Mark children of things that caused too deep recursion during the
     * just-completed marking part of the close phase.
     */
    ScanDelayedChildren(cx);
#endif

    JS_ASSERT(!cx->insideGCMarkCallback);
    if (rt->gcCallback) {
        cx->insideGCMarkCallback = JS_TRUE;
        (void) rt->gcCallback(cx, JSGC_MARK_END);
        JS_ASSERT(cx->insideGCMarkCallback);
        cx->insideGCMarkCallback = JS_FALSE;
    }
    JS_ASSERT(rt->gcUnscannedBagSize == 0);

    /* Finalize iterator states before the objects they iterate over. */
    CloseIteratorStates(cx);

    /*
     * Sweep phase.
     *
     * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
     * so that any attempt to allocate a GC-thing from a finalizer will fail,
     * rather than nest badly and leave the unmarked newborn to be swept.
     *
     * Finalize smaller objects before larger, to guarantee finalization of
     * GC-allocated obj->slots after obj.  See FreeSlots in jsobj.c.
     */
    for (i = 0; i < GC_NUM_FREELISTS; i++) {
        arenaList = &rt->gcArenaList[i];
        nbytes = GC_FREELIST_NBYTES(i);
        limit = arenaList->lastLimit;
        for (a = arenaList->last; a; a = a->prev) {
            JS_ASSERT(!a->prevUnscanned);
            JS_ASSERT(a->unscannedPages == 0);
            firstPage = (uint8 *) FIRST_THING_PAGE(a);
            for (offset = 0; offset != limit; offset += nbytes) {
                if ((offset & GC_PAGE_MASK) == 0) {
                    JS_ASSERT(((JSGCPageInfo *)(firstPage + offset))->
                              unscannedBitmap == 0);
                    offset += PAGE_THING_GAP(nbytes);
                }
                JS_ASSERT(offset < limit);
                flagp = a->base + offset / sizeof(JSGCThing);
                if (flagp >= firstPage)
                    flagp += GC_THINGS_SIZE;
                flags = *flagp;
                if (flags & GCF_MARK) {
                    *flagp &= ~GCF_MARK;
                } else if (!(flags & (GCF_LOCK | GCF_FINAL))) {
                    /* Call the finalizer with GCF_FINAL ORed into flags. */
                    type = flags & GCF_TYPEMASK;
                    finalizer = gc_finalizers[type];
                    if (finalizer) {
                        thing = (JSGCThing *)(firstPage + offset);
                        *flagp = (uint8)(flags | GCF_FINAL);
                        if (type >= GCX_EXTERNAL_STRING)
                            js_PurgeDeflatedStringCache(rt, (JSString *)thing);
                        finalizer(cx, thing);
                    }

                    /* Set flags to GCF_FINAL, signifying that thing is free. */
                    *flagp = GCF_FINAL;
                }
            }
            limit = GC_THINGS_SIZE;
        }
    }

    /*
     * Sweep the runtime's property tree after finalizing objects, in case any
     * had watchpoints referencing tree nodes.  Then sweep atoms, which may be
     * referenced from dead property ids.
     */
    js_SweepScopeProperties(rt);
    js_SweepAtomState(&rt->atomState);

    /*
     * Sweep script filenames after sweeping functions in the generic loop
     * above. In this way when a scripted function's finalizer destroys the
     * script and calls rt->destroyScriptHook, the hook can still access the
     * script's filename. See bug 323267.
     */
    js_SweepScriptFilenames(rt);

    /*
     * Free phase.
     * Free any unused arenas and rebuild the JSGCThing freelist.
     */
    for (i = 0; i < GC_NUM_FREELISTS; i++) {
        arenaList = &rt->gcArenaList[i];
        ap = &arenaList->last;
        a = *ap;
        if (!a)
            continue;

        allClear = JS_TRUE;
        arenaList->freeList = NULL;
        freeList = NULL;
        METER(arenaList->stats.nthings = 0);
        METER(arenaList->stats.freelen = 0);

        nbytes = GC_FREELIST_NBYTES(i);
        limit = arenaList->lastLimit;
        do {
            METER(size_t nfree = 0);
            firstPage = (uint8 *) FIRST_THING_PAGE(a);
            for (offset = 0; offset != limit; offset += nbytes) {
                if ((offset & GC_PAGE_MASK) == 0)
                    offset += PAGE_THING_GAP(nbytes);
                JS_ASSERT(offset < limit);
                flagp = a->base + offset / sizeof(JSGCThing);
                if (flagp >= firstPage)
                    flagp += GC_THINGS_SIZE;

                if (*flagp != GCF_FINAL) {
                    allClear = JS_FALSE;
                    METER(++arenaList->stats.nthings);
                } else {
                    thing = (JSGCThing *)(firstPage + offset);
                    thing->flagp = flagp;
                    thing->next = freeList;
                    freeList = thing;
                    METER(++nfree);
                }
            }
            if (allClear) {
                /*
                 * Forget just assembled free list head for the arena
                 * and destroy the arena itself.
                 */
                freeList = arenaList->freeList;
                DestroyGCArena(rt, arenaList, ap);
            } else {
                allClear = JS_TRUE;
                arenaList->freeList = freeList;
                ap = &a->prev;
                METER(arenaList->stats.freelen += nfree);
                METER(arenaList->stats.totalfreelen += nfree);
                METER(++arenaList->stats.totalarenas);
            }
            limit = GC_THINGS_SIZE;
        } while ((a = *ap) != NULL);
    }

    if (rt->gcCallback)
        (void) rt->gcCallback(cx, JSGC_FINALIZE_END);
#ifdef DEBUG_srcnotesize
  { extern void DumpSrcNoteSizeHist();
    DumpSrcNoteSizeHist();
    printf("GC HEAP SIZE %lu (%lu)\n",
           (unsigned long)rt->gcBytes, (unsigned long)rt->gcPrivateBytes);
  }
#endif

    JS_LOCK_GC(rt);

    /*
     * We want to restart GC if js_GC was called recursively or if any of the
     * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
     */
    if (rt->gcLevel > 1 || rt->gcPoke) {
        rt->gcLevel = 1;
        rt->gcPoke = JS_FALSE;
        JS_UNLOCK_GC(rt);
        goto restart;
    }
    js_EnablePropertyCache(cx);
    rt->gcLevel = 0;
    rt->gcLastBytes = rt->gcBytes;
    rt->gcRunning = JS_FALSE;

#ifdef JS_THREADSAFE
    /* If we were invoked during a request, pay back the temporary debit. */
    if (requestDebit)
        rt->requestCount += requestDebit;
    rt->gcThread = NULL;
    JS_NOTIFY_GC_DONE(rt);

    /*
     * Unlock unless we have GC_LAST_DITCH which requires locked GC on return.
     */
    if (gckind != GC_LAST_DITCH)
        JS_UNLOCK_GC(rt);
#endif

    /* Execute JSGC_END callback outside the lock. */
    if (rt->gcCallback) {
        JSWeakRoots savedWeakRoots;
        JSTempValueRooter tvr;

        if (gckind == GC_LAST_DITCH) {
            /*
             * We allow JSGC_END implementation to force a full GC or allocate
             * new GC things. Thus we must protect the weak roots from GC or
             * overwrites.
             */
            savedWeakRoots = cx->weakRoots;
            JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr);
            JS_KEEP_ATOMS(rt);
            JS_UNLOCK_GC(rt);
        }

        (void) rt->gcCallback(cx, JSGC_END);

        if (gckind == GC_LAST_DITCH) {
            JS_LOCK_GC(rt);
            JS_UNKEEP_ATOMS(rt);
            JS_POP_TEMP_ROOT(cx, &tvr);
        } else if (gckind == GC_LAST_CONTEXT && rt->gcPoke) {
            /*
             * On shutdown iterate until JSGC_END callback stops creating
             * garbage.
             */
            goto restart_after_callback;
        }
    }
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 503 of file jsgc.c.

{
    JSGCPageInfo *pi;
    JSGCArenaList *list;

    pi = THING_TO_PAGE(str);
    list = PAGE_TO_ARENA(pi)->list;

    JS_ASSERT(list->thingSize == sizeof(JSGCThing));
    JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);

    return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
}

Here is the call graph for this function:

Here is the caller graph for this function:

uint8* js_GetGCThingFlags ( void thing)

Definition at line 486 of file jsgc.c.

{
    JSGCPageInfo *pi;
    jsuword offsetInArena, thingIndex;

    pi = THING_TO_PAGE(thing);
    offsetInArena = pi->offsetInArena;
    JS_ASSERT(offsetInArena < GC_THINGS_SIZE);
    thingIndex = ((offsetInArena & ~GC_PAGE_MASK) |
                  ((jsuword)thing & GC_PAGE_MASK)) / sizeof(JSGCThing);
    JS_ASSERT(thingIndex < GC_PAGE_SIZE);
    if (thingIndex >= (offsetInArena & GC_PAGE_MASK))
        thingIndex += GC_THINGS_SIZE;
    return (uint8 *)pi - offsetInArena + thingIndex;
}

Here is the caller graph for this function:

JSBool js_InitGC ( JSRuntime rt,
uint32  maxbytes 
)

Definition at line 606 of file jsgc.c.

{
    InitGCArenaLists(rt);
    if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
                           sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
        rt->gcRootsHash.ops = NULL;
        return JS_FALSE;
    }
    rt->gcLocksHash = NULL;     /* create lazily */

    /*
     * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
     * for default backward API compatibility.
     */
    rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;

    return JS_TRUE;
}

Here is the call graph for this function:

Here is the caller graph for this function:

JSBool js_IsAboutToBeFinalized ( JSContext cx,
void thing 
)

Definition at line 518 of file jsgc.c.

{
    uint8 flags = *js_GetGCThingFlags(thing);

    return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
}

Here is the call graph for this function:

Here is the caller graph for this function:

JSBool js_LockGCThing ( JSContext cx,
void thing 
)

Definition at line 1593 of file jsgc.c.

{
    JSBool ok = js_LockGCThingRT(cx->runtime, thing);
    if (!ok)
        JS_ReportOutOfMemory(cx);
    return ok;
}

Here is the call graph for this function:

Here is the caller graph for this function:

JSBool js_LockGCThingRT ( JSRuntime rt,
void thing 
)

Definition at line 1627 of file jsgc.c.

{
    JSBool ok, deep;
    uint8 *flagp;
    uintN flags, lock, type;
    JSGCLockHashEntry *lhe;

    ok = JS_TRUE;
    if (!thing)
        return ok;

    flagp = js_GetGCThingFlags(thing);

    JS_LOCK_GC(rt);
    flags = *flagp;
    lock = (flags & GCF_LOCK);
    type = (flags & GCF_TYPEMASK);
    deep = GC_THING_IS_DEEP(type, thing);

    /*
     * Avoid adding a rt->gcLocksHash entry for shallow things until someone
     * nests a lock -- then start such an entry with a count of 2, not 1.
     */
    if (lock || deep) {
        if (!rt->gcLocksHash) {
            rt->gcLocksHash =
                JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
                                 sizeof(JSGCLockHashEntry),
                                 GC_ROOTS_SIZE);
            if (!rt->gcLocksHash) {
                ok = JS_FALSE;
                goto done;
            }
        } else if (lock == 0) {
#ifdef DEBUG
            JSDHashEntryHdr *hdr =
                JS_DHashTableOperate(rt->gcLocksHash, thing,
                                     JS_DHASH_LOOKUP);
            JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(hdr));
#endif
        }

        lhe = (JSGCLockHashEntry *)
            JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
        if (!lhe) {
            ok = JS_FALSE;
            goto done;
        }
        if (!lhe->thing) {
            lhe->thing = thing;
            lhe->count = deep ? 1 : 2;
        } else {
            JS_ASSERT(lhe->count >= 1);
            lhe->count++;
        }
    }

    *flagp = (uint8)(flags | GCF_LOCK);
    METER(rt->gcStats.lock++);
    ok = JS_TRUE;
done:
    JS_UNLOCK_GC(rt);
    return ok;
}

Here is the call graph for this function:

Here is the caller graph for this function:

uint32 js_MapGCRoots ( JSRuntime rt,
JSGCRootMapFun  map,
void data 
)

Definition at line 925 of file jsgc.c.

Here is the call graph for this function:

Here is the caller graph for this function:

void js_MarkAtom ( JSContext cx,
JSAtom atom 
)

Definition at line 1978 of file jsgc.c.

{
    jsval key;

    if (atom->flags & ATOM_MARK)
        return;
    atom->flags |= ATOM_MARK;
    key = ATOM_KEY(atom);
    if (JSVAL_IS_GCTHING(key)) {
#ifdef GC_MARK_DEBUG
        char name[32];

        if (JSVAL_IS_STRING(key)) {
            JS_snprintf(name, sizeof name, "'%s'",
                        JS_GetStringBytes(JSVAL_TO_STRING(key)));
        } else {
            JS_snprintf(name, sizeof name, "<%x>", key);
        }
#endif
        GC_MARK(cx, JSVAL_TO_GCTHING(key), name);
    }
    if (atom->flags & ATOM_HIDDEN)
        js_MarkAtom(cx, atom->entry.value);
}

Here is the call graph for this function:

Here is the caller graph for this function:

void js_MarkGCThing ( JSContext cx,
void thing 
)

Definition at line 2438 of file jsgc.c.

{
    uint8 *flagp;

    if (!thing)
        return;

    flagp = js_GetGCThingFlags(thing);
    JS_ASSERT(*flagp != GCF_FINAL);
    if (*flagp & GCF_MARK)
        return;
    *flagp |= GCF_MARK;

    if (!cx->insideGCMarkCallback) {
        MarkGCThingChildren(cx, thing, flagp, JS_TRUE);
    } else {
        /*
         * For API compatibility we allow for the callback to assume that
         * after it calls js_MarkGCThing for the last time, the callback
         * can start to finalize its own objects that are only referenced
         * by unmarked GC things.
         *
         * Since we do not know which call from inside the callback is the
         * last, we ensure that the unscanned bag is always empty when we
         * return to the callback and all marked things are scanned.
         *
         * As an optimization we do not check for the stack size here and
         * pass JS_FALSE as the last argument to MarkGCThingChildren.
         * Otherwise with low C stack the thing would be pushed to the bag
         * just to be feed to MarkGCThingChildren from inside
         * ScanDelayedChildren.
         */
        cx->insideGCMarkCallback = JS_FALSE;
        MarkGCThingChildren(cx, thing, flagp, JS_FALSE);
        ScanDelayedChildren(cx);
        cx->insideGCMarkCallback = JS_TRUE;
    }
}

Here is the call graph for this function:

Definition at line 2544 of file jsgc.c.

{
    uintN depth, nslots;

    if (fp->callobj)
        GC_MARK(cx, fp->callobj, "call object");
    if (fp->argsobj)
        GC_MARK(cx, fp->argsobj, "arguments object");
    if (fp->varobj)
        GC_MARK(cx, fp->varobj, "variables object");
    if (fp->script) {
        js_MarkScript(cx, fp->script);
        if (fp->spbase) {
            /*
             * Don't mark what has not been pushed yet, or what has been
             * popped already.
             */
            depth = fp->script->depth;
            nslots = (JS_UPTRDIFF(fp->sp, fp->spbase)
                      < depth * sizeof(jsval))
                     ? (uintN)(fp->sp - fp->spbase)
                     : depth;
            GC_MARK_JSVALS(cx, nslots, fp->spbase, "operand");
        }
    }

    /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
    JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) ||
              (fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags)));
    if (JSVAL_IS_GCTHING((jsval)fp->thisp))
        GC_MARK(cx, JSVAL_TO_GCTHING((jsval)fp->thisp), "this");

    if (fp->callee)
        GC_MARK(cx, fp->callee, "callee object");

    /*
     * Mark fp->argv, even though in the common case it will be marked via our
     * caller's frame, or via a JSStackHeader if fp was pushed by an external
     * invocation.
     *
     * The hard case is when there is not enough contiguous space in the stack
     * arena for actual, missing formal, and local root (JSFunctionSpec.extra)
     * slots.  In this case, fp->argv points to new space in a new arena, and
     * marking the caller's operand stack, or an external caller's allocated
     * stack tracked by a JSStackHeader, will not mark all the values stored
     * and addressable via fp->argv.
     *
     * So in summary, solely for the hard case of moving argv due to missing
     * formals and extra roots, we must mark actuals, missing formals, and any
     * local roots arrayed at fp->argv here.
     *
     * It would be good to avoid redundant marking of the same reference, in
     * the case where fp->argv does point into caller-allocated space tracked
     * by fp->down->spbase or cx->stackHeaders.  This would allow callbacks
     * such as the forthcoming rt->gcThingCallback (bug 333078) to compute JS
     * reference counts.  So this comment deserves a FIXME bug to cite.
     */
    if (fp->argv) {
        nslots = fp->argc;
        if (fp->fun) {
            if (fp->fun->nargs > nslots)
                nslots = fp->fun->nargs;
            if (!FUN_INTERPRETED(fp->fun))
                nslots += fp->fun->u.n.extra;
        }
        GC_MARK_JSVALS(cx, nslots + 2, fp->argv - 2, "arg");
    }
    if (JSVAL_IS_GCTHING(fp->rval))
        GC_MARK(cx, JSVAL_TO_GCTHING(fp->rval), "rval");
    if (fp->vars)
        GC_MARK_JSVALS(cx, fp->nvars, fp->vars, "var");
    GC_MARK(cx, fp->scopeChain, "scope chain");
    if (fp->sharpArray)
        GC_MARK(cx, fp->sharpArray, "sharp array");

    if (fp->xmlNamespace)
        GC_MARK(cx, fp->xmlNamespace, "xmlNamespace");
}

Here is the call graph for this function:

Here is the caller graph for this function:

void* js_NewGCThing ( JSContext cx,
uintN  flags,
size_t  nbytes 
)

Definition at line 1341 of file jsgc.c.

{
    JSRuntime *rt;
    uintN flindex;
    JSBool doGC;
    JSGCThing *thing;
    uint8 *flagp, *firstPage;
    JSGCArenaList *arenaList;
    jsuword offset;
    JSGCArena *a;
    JSLocalRootStack *lrs;
#ifdef JS_THREADSAFE
    JSBool gcLocked;
    uintN localMallocBytes;
    JSGCThing **flbase, **lastptr;
    JSGCThing *tmpthing;
    uint8 *tmpflagp;
    uintN maxFreeThings;         /* max to take from the global free list */
    METER(size_t nfree);
#endif

    rt = cx->runtime;
    METER(rt->gcStats.alloc++);        /* this is not thread-safe */
    nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing));
    flindex = GC_FREELIST_INDEX(nbytes);

#ifdef JS_THREADSAFE
    gcLocked = JS_FALSE;
    JS_ASSERT(cx->thread);
    flbase = cx->thread->gcFreeLists;
    JS_ASSERT(flbase);
    thing = flbase[flindex];
    localMallocBytes = cx->thread->gcMallocBytes;
    if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
        flagp = thing->flagp;
        flbase[flindex] = thing->next;
        METER(rt->gcStats.localalloc++);  /* this is not thread-safe */
        goto success;
    }

    JS_LOCK_GC(rt);
    gcLocked = JS_TRUE;

    /* Transfer thread-local counter to global one. */
    if (localMallocBytes != 0) {
        cx->thread->gcMallocBytes = 0;
        if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
            rt->gcMallocBytes = rt->gcMaxMallocBytes;
        else
            rt->gcMallocBytes += localMallocBytes;
    }
#endif
    JS_ASSERT(!rt->gcRunning);
    if (rt->gcRunning) {
        METER(rt->gcStats.finalfail++);
        JS_UNLOCK_GC(rt);
        return NULL;
    }

    doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes);
#ifdef JS_GC_ZEAL
    if (rt->gcZeal >= 1) {
        doGC = JS_TRUE;
        if (rt->gcZeal >= 2)
            rt->gcPoke = JS_TRUE;
    }
#endif /* !JS_GC_ZEAL */

    arenaList = &rt->gcArenaList[flindex];
    for (;;) {
        if (doGC) {
            /*
             * Keep rt->gcLock across the call into js_GC so we don't starve
             * and lose to racing threads who deplete the heap just after
             * js_GC has replenished it (or has synchronized with a racing
             * GC that collected a bunch of garbage).  This unfair scheduling
             * can happen on certain operating systems. For the gory details,
             * see bug 162779 at https://bugzilla.mozilla.org/.
             */
            js_GC(cx, GC_LAST_DITCH);
            METER(rt->gcStats.retry++);
        }

        /* Try to get thing from the free list. */
        thing = arenaList->freeList;
        if (thing) {
            arenaList->freeList = thing->next;
            flagp = thing->flagp;
            JS_ASSERT(*flagp & GCF_FINAL);
            METER(arenaList->stats.freelen--);
            METER(arenaList->stats.recycle++);

#ifdef JS_THREADSAFE
            /*
             * Refill the local free list by taking several things from the
             * global free list unless we are still at rt->gcMaxMallocBytes
             * barrier or the free list is already populated. The former
             * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
             * or no gcPoke. The latter is caused via allocating new things
             * in gcCallback(cx, JSGC_END).
             */
            if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
                break;
            tmpthing = arenaList->freeList;
            if (tmpthing) {
                maxFreeThings = MAX_THREAD_LOCAL_THINGS;
                do {
                    if (!tmpthing->next)
                        break;
                    tmpthing = tmpthing->next;
                } while (--maxFreeThings != 0);

                flbase[flindex] = arenaList->freeList;
                arenaList->freeList = tmpthing->next;
                tmpthing->next = NULL;
            }
#endif
            break;
        }

        /* Allocate from the tail of last arena or from new arena if we can. */
        if ((arenaList->last && arenaList->lastLimit != GC_THINGS_SIZE) ||
            NewGCArena(rt, arenaList)) {

            offset = arenaList->lastLimit;
            if ((offset & GC_PAGE_MASK) == 0) {
                /*
                 * Skip JSGCPageInfo record located at GC_PAGE_SIZE boundary.
                 */
                offset += PAGE_THING_GAP(nbytes);
            }
            JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
            arenaList->lastLimit = (uint16)(offset + nbytes);
            a = arenaList->last;
            firstPage = (uint8 *)FIRST_THING_PAGE(a);
            thing = (JSGCThing *)(firstPage + offset);
            flagp = a->base + offset / sizeof(JSGCThing);
            if (flagp >= firstPage)
                flagp += GC_THINGS_SIZE;
            METER(++arenaList->stats.nthings);
            METER(arenaList->stats.maxthings =
                  JS_MAX(arenaList->stats.nthings,
                         arenaList->stats.maxthings));

#ifdef JS_THREADSAFE
            /*
             * Refill the local free list by taking free things from the last
             * arena. Prefer to order free things by ascending address in the
             * (unscientific) hope of better cache locality.
             */
            if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
                break;
            METER(nfree = 0);
            lastptr = &flbase[flindex];
            maxFreeThings = MAX_THREAD_LOCAL_THINGS;
            for (offset = arenaList->lastLimit;
                 offset != GC_THINGS_SIZE && maxFreeThings-- != 0;
                 offset += nbytes) {
                if ((offset & GC_PAGE_MASK) == 0)
                    offset += PAGE_THING_GAP(nbytes);
                JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
                tmpflagp = a->base + offset / sizeof(JSGCThing);
                if (tmpflagp >= firstPage)
                    tmpflagp += GC_THINGS_SIZE;

                tmpthing = (JSGCThing *)(firstPage + offset);
                tmpthing->flagp = tmpflagp;
                *tmpflagp = GCF_FINAL;    /* signifying that thing is free */

                *lastptr = tmpthing;
                lastptr = &tmpthing->next;
                METER(++nfree);
            }
            arenaList->lastLimit = offset;
            *lastptr = NULL;
            METER(arenaList->stats.freelen += nfree);
#endif
            break;
        }

        /* Consider doing a "last ditch" GC unless already tried. */
        if (doGC)
            goto fail;
        rt->gcPoke = JS_TRUE;
        doGC = JS_TRUE;
    }

    /* We successfully allocated the thing. */
#ifdef JS_THREADSAFE
  success:
#endif
    lrs = cx->localRootStack;
    if (lrs) {
        /*
         * If we're in a local root scope, don't set newborn[type] at all, to
         * avoid entraining garbage from it for an unbounded amount of time
         * on this context.  A caller will leave the local root scope and pop
         * this reference, allowing thing to be GC'd if it has no other refs.
         * See JS_EnterLocalRootScope and related APIs.
         */
        if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
            /*
             * When we fail for a thing allocated through the tail of the last
             * arena, thing's flag byte is not initialized. So to prevent GC
             * accessing the uninitialized flags during the finalization, we
             * always mark the thing as final. See bug 337407.
             */
            *flagp = GCF_FINAL;
            goto fail;
        }
    } else {
        /*
         * No local root scope, so we're stuck with the old, fragile model of
         * depending on a pigeon-hole newborn per type per context.
         */
        cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
    }

    /* We can't fail now, so update flags and rt->gc{,Private}Bytes. */
    *flagp = (uint8)flags;

    /*
     * Clear thing before unlocking in case a GC run is about to scan it,
     * finding it via newborn[].
     */
    thing->next = NULL;
    thing->flagp = NULL;
#ifdef DEBUG_gchist
    gchist[gchpos].lastDitch = doGC;
    gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
    if (++gchpos == NGCHIST)
        gchpos = 0;
#endif
    METER(if (flags & GCF_LOCK) rt->gcStats.lockborn++);
    METER(++rt->gcArenaList[flindex].stats.totalnew);
#ifdef JS_THREADSAFE
    if (gcLocked)
        JS_UNLOCK_GC(rt);
#endif
    return thing;

fail:
#ifdef JS_THREADSAFE
    if (gcLocked)
        JS_UNLOCK_GC(rt);
#endif
    METER(rt->gcStats.fail++);
    JS_ReportOutOfMemory(cx);
    return NULL;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 939 of file jsgc.c.

{
    JSRuntime *rt;
    JSBool ok;

    rt = cx->runtime;
    JS_ASSERT(!rt->gcRunning);

    JS_LOCK_GC(rt);
    ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
    JS_UNLOCK_GC(rt);
    return ok;
}

Here is the call graph for this function:

Here is the caller graph for this function:

Definition at line 979 of file jsgc.c.

{
    JSRuntime *rt;

    rt = cx->runtime;
    JS_ASSERT(!rt->gcRunning);
    JS_ASSERT(rt->state != JSRTS_LANDING);
    JS_ASSERT(gen->state == JSGEN_NEWBORN);

    JS_LOCK_GC(rt);
    gen->next = rt->gcCloseState.reachableList;
    rt->gcCloseState.reachableList = gen;
    METER(rt->gcStats.nclose++);
    METER(rt->gcStats.maxnclose = JS_MAX(rt->gcStats.maxnclose,
                                         rt->gcStats.nclose));
    JS_UNLOCK_GC(rt);
}

Here is the caller graph for this function:

JSBool js_RemoveRoot ( JSRuntime rt,
void rp 
)

Definition at line 801 of file jsgc.c.

{
    /*
     * Due to the JS_RemoveRootRT API, we may be called outside of a request.
     * Same synchronization drill as above in js_AddRoot.
     */
    JS_LOCK_GC(rt);
#ifdef JS_THREADSAFE
    JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
    if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
        do {
            JS_AWAIT_GC_DONE(rt);
        } while (rt->gcLevel > 0);
    }
#endif
    (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
    rt->gcPoke = JS_TRUE;
    JS_UNLOCK_GC(rt);
    return JS_TRUE;
}

Here is the caller graph for this function:

Definition at line 1196 of file jsgc.c.

{
    JSRuntime *rt;
    JSTempCloseList tempList;
    JSStackFrame *fp;
    JSGenerator **genp, *gen;
    JSBool ok, defer;
#if JS_GCMETER
    uint32 deferCount;
#endif

    rt = cx->runtime;

    /*
     * It is OK to access todoQueue outside the lock here. When many threads
     * update the todo list, accessing some older value of todoQueue in the
     * worst case just delays the excution of close hooks.
     */
    if (!rt->gcCloseState.todoQueue)
        return JS_TRUE;

    /*
     * To prevent an infinite loop when a close hook creats more objects with
     * close hooks and then triggers GC we ignore recursive invocations of
     * js_RunCloseHooks and limit number of hooks to execute to the initial
     * size of the list.
     */
    if (*GC_RUNNING_CLOSE_HOOKS_PTR(cx))
        return JS_TRUE;

    *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_TRUE;

    JS_LOCK_GC(rt);
    tempList.head = rt->gcCloseState.todoQueue;
    JS_PUSH_TEMP_CLOSE_LIST(cx, &tempList);
    rt->gcCloseState.todoQueue = NULL;
    METER(rt->gcStats.closelater = 0);
    rt->gcPoke = JS_TRUE;
    JS_UNLOCK_GC(rt);

    /*
     * Set aside cx->fp since we do not want a close hook using caller or
     * other means to backtrace into whatever stack might be active when
     * running the hook. We store the current frame on the dormant list to
     * protect against GC that the hook can trigger.
     */
    fp = cx->fp;
    if (fp) {
        JS_ASSERT(!fp->dormantNext);
        fp->dormantNext = cx->dormantFrameChain;
        cx->dormantFrameChain = fp;
    }
    cx->fp = NULL;

    genp = &tempList.head;
    ok = JS_TRUE;
    while ((gen = *genp) != NULL) {
        ok = ShouldDeferCloseHook(cx, gen, &defer);
        if (!ok) {
            /* Quit ASAP discarding the hook. */
            *genp = gen->next;
            break;
        }
        if (defer) {
            genp = &gen->next;
            METER(deferCount++);
            continue;
        }
        ok = js_CloseGeneratorObject(cx, gen);

        /*
         * Unlink the generator after closing it to make sure it always stays
         * rooted through tempList.
         */
        *genp = gen->next;

        if (cx->throwing) {
            /*
             * Report the exception thrown by the close hook and continue to
             * execute the rest of the hooks.
             */
            if (!js_ReportUncaughtException(cx))
                JS_ClearPendingException(cx);
            ok = JS_TRUE;
        } else if (!ok) {
            /*
             * Assume this is a stop signal from the branch callback or
             * other quit ASAP condition. Break execution until the next
             * invocation of js_RunCloseHooks.
             */
            break;
        }
    }

    cx->fp = fp;
    if (fp) {
        JS_ASSERT(cx->dormantFrameChain == fp);
        cx->dormantFrameChain = fp->dormantNext;
        fp->dormantNext = NULL;
    }

    if (tempList.head) {
        /*
         * Some close hooks were not yet executed, put them back into the
         * scheduled list.
         */
        while ((gen = *genp) != NULL) {
            genp = &gen->next;
            METER(deferCount++);
        }

        /* Now genp is a pointer to the tail of tempList. */
        JS_LOCK_GC(rt);
        *genp = rt->gcCloseState.todoQueue;
        rt->gcCloseState.todoQueue = tempList.head;
        METER(rt->gcStats.closelater += deferCount);
        METER(rt->gcStats.maxcloselater
              = JS_MAX(rt->gcStats.maxcloselater, rt->gcStats.closelater));
        JS_UNLOCK_GC(rt);
    }

    JS_POP_TEMP_CLOSE_LIST(cx, &tempList);
    *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_FALSE;

    return ok;
}

Here is the call graph for this function:

Here is the caller graph for this function:

JSBool js_UnlockGCThingRT ( JSRuntime rt,
void thing 
)

Definition at line 1693 of file jsgc.c.

{
    uint8 *flagp, flags;
    JSGCLockHashEntry *lhe;

    if (!thing)
        return JS_TRUE;

    flagp = js_GetGCThingFlags(thing);
    JS_LOCK_GC(rt);
    flags = *flagp;

    if (flags & GCF_LOCK) {
        if (!rt->gcLocksHash ||
            (lhe = (JSGCLockHashEntry *)
                   JS_DHashTableOperate(rt->gcLocksHash, thing,
                                        JS_DHASH_LOOKUP),
             JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
            /* Shallow GC-thing with an implicit lock count of 1. */
            JS_ASSERT(!GC_THING_IS_DEEP(flags & GCF_TYPEMASK, thing));
        } else {
            /* Basis or nested unlock of a deep thing, or nested of shallow. */
            if (--lhe->count != 0)
                goto out;
            JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
        }
        *flagp = (uint8)(flags & ~GCF_LOCK);
    }

    rt->gcPoke = JS_TRUE;
out:
    METER(rt->gcStats.unlock++);
    JS_UNLOCK_GC(rt);
    return JS_TRUE;
}

Here is the call graph for this function:

Here is the caller graph for this function:

void js_UpdateMallocCounter ( JSContext cx,
size_t  nbytes 
)

Definition at line 3196 of file jsgc.c.

{
    uint32 *pbytes, bytes;

#ifdef JS_THREADSAFE
    pbytes = &cx->thread->gcMallocBytes;
#else
    pbytes = &cx->runtime->gcMallocBytes;
#endif
    bytes = *pbytes;
    *pbytes = ((uint32)-1 - bytes <= nbytes) ? (uint32)-1 : bytes + nbytes;
}

Here is the caller graph for this function: