Back to index

lightning-sunbird  0.9+nobinonly
jsgc.c
Go to the documentation of this file.
00001 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
00002  * vim: set ts=8 sw=4 et tw=78:
00003  *
00004  * ***** BEGIN LICENSE BLOCK *****
00005  * Version: MPL 1.1/GPL 2.0/LGPL 2.1
00006  *
00007  * The contents of this file are subject to the Mozilla Public License Version
00008  * 1.1 (the "License"); you may not use this file except in compliance with
00009  * the License. You may obtain a copy of the License at
00010  * http://www.mozilla.org/MPL/
00011  *
00012  * Software distributed under the License is distributed on an "AS IS" basis,
00013  * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
00014  * for the specific language governing rights and limitations under the
00015  * License.
00016  *
00017  * The Original Code is Mozilla Communicator client code, released
00018  * March 31, 1998.
00019  *
00020  * The Initial Developer of the Original Code is
00021  * Netscape Communications Corporation.
00022  * Portions created by the Initial Developer are Copyright (C) 1998
00023  * the Initial Developer. All Rights Reserved.
00024  *
00025  * Contributor(s):
00026  *
00027  * Alternatively, the contents of this file may be used under the terms of
00028  * either of the GNU General Public License Version 2 or later (the "GPL"),
00029  * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
00030  * in which case the provisions of the GPL or the LGPL are applicable instead
00031  * of those above. If you wish to allow use of your version of this file only
00032  * under the terms of either the GPL or the LGPL, and not to allow others to
00033  * use your version of this file under the terms of the MPL, indicate your
00034  * decision by deleting the provisions above and replace them with the notice
00035  * and other provisions required by the GPL or the LGPL. If you do not delete
00036  * the provisions above, a recipient may use your version of this file under
00037  * the terms of any one of the MPL, the GPL or the LGPL.
00038  *
00039  * ***** END LICENSE BLOCK ***** */
00040 
00041 /*
00042  * JS Mark-and-Sweep Garbage Collector.
00043  *
00044  * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
00045  * jsgc.h). It allocates from a special GC arena pool with each arena allocated
00046  * using malloc. It uses an ideally parallel array of flag bytes to hold the
00047  * mark bit, finalizer type index, etc.
00048  *
00049  * XXX swizzle page to freelist for better locality of reference
00050  */
00051 #include "jsstddef.h"
00052 #include <stdlib.h>     /* for free */
00053 #include <string.h>     /* for memset used when DEBUG */
00054 #include "jstypes.h"
00055 #include "jsutil.h" /* Added by JSIFY */
00056 #include "jshash.h" /* Added by JSIFY */
00057 #include "jsapi.h"
00058 #include "jsatom.h"
00059 #include "jsbit.h"
00060 #include "jsclist.h"
00061 #include "jscntxt.h"
00062 #include "jsconfig.h"
00063 #include "jsdbgapi.h"
00064 #include "jsexn.h"
00065 #include "jsfun.h"
00066 #include "jsgc.h"
00067 #include "jsinterp.h"
00068 #include "jsiter.h"
00069 #include "jslock.h"
00070 #include "jsnum.h"
00071 #include "jsobj.h"
00072 #include "jsscope.h"
00073 #include "jsscript.h"
00074 #include "jsstr.h"
00075 
00076 #if JS_HAS_XML_SUPPORT
00077 #include "jsxml.h"
00078 #endif
00079 
00080 /*
00081  * GC arena sizing depends on amortizing arena overhead using a large number
00082  * of things per arena, and on the thing/flags ratio of 8:1 on most platforms.
00083  *
00084  * On 64-bit platforms, we would have half as many things per arena because
00085  * pointers are twice as big, so we double the bytes for things per arena.
00086  * This preserves the 1024 byte flags sub-arena size, which relates to the
00087  * GC_PAGE_SIZE (see below for why).
00088  */
00089 #if JS_BYTES_PER_WORD == 8
00090 # define GC_THINGS_SHIFT 14     /* 16KB for things on Alpha, etc. */
00091 #else
00092 # define GC_THINGS_SHIFT 13     /* 8KB for things on most platforms */
00093 #endif
00094 #define GC_THINGS_SIZE  JS_BIT(GC_THINGS_SHIFT)
00095 #define GC_FLAGS_SIZE   (GC_THINGS_SIZE / sizeof(JSGCThing))
00096 
00097 /*
00098  * A GC arena contains one flag byte for each thing in its heap, and supports
00099  * O(1) lookup of a flag given its thing's address.
00100  *
00101  * To implement this, we take advantage of the thing/flags numerology: given
00102  * the 8K bytes worth of GC-things, there are 1K flag bytes. Within each 9K
00103  * allocation for things+flags there are always 8 consecutive 1K-pages each
00104  * aligned on 1K boundary. We use these pages to allocate things and the
00105  * remaining 1K of space before and after the aligned pages to store flags.
00106  * If we are really lucky and things+flags starts on a 1K boundary, then
00107  * flags would consist of a single 1K chunk that comes after 8K of things.
00108  * Otherwise there are 2 chunks of flags, one before and one after things.
00109  *
00110  * To be able to find the flag byte for a particular thing, we put a
00111  * JSGCPageInfo record at the beginning of each 1K-aligned page to hold that
00112  * page's offset from the beginning of things+flags allocation and we allocate
00113  * things after this record. Thus for each thing |thing_address & ~1023|
00114  * gives the address of a JSGCPageInfo record from which we read page_offset.
00115  * Due to page alignment
00116  *  (page_offset & ~1023) + (thing_address & 1023)
00117  * gives thing_offset from the beginning of 8K paged things. We then divide
00118  * thing_offset by sizeof(JSGCThing) to get thing_index.
00119  *
00120  * Now |page_address - page_offset| is things+flags arena_address and
00121  * (page_offset & 1023) is the offset of the first page from the start of
00122  * things+flags area. Thus if
00123  *  thing_index < (page_offset & 1023)
00124  * then
00125  *  allocation_start_address + thing_index < address_of_the_first_page
00126  * and we use
00127  *  allocation_start_address + thing_index
00128  * as the address to store thing's flags. If
00129  *  thing_index >= (page_offset & 1023),
00130  * then we use the chunk of flags that comes after the pages with things
00131  * and calculate the address for the flag byte as
00132  *  address_of_the_first_page + 8K + (thing_index - (page_offset & 1023))
00133  * which is just
00134  *  allocation_start_address + thing_index + 8K.
00135  *
00136  * When we allocate things with size equal to sizeof(JSGCThing), the overhead
00137  * of this scheme for 32 bit platforms is (8+8*(8+1))/(8+9K) or 0.87%
00138  * (assuming 4 bytes for each JSGCArena header, and 8 bytes for each
00139  * JSGCThing and JSGCPageInfo). When thing_size > 8, the scheme wastes the
00140  * flag byte for each extra 8 bytes beyond sizeof(JSGCThing) in thing_size
00141  * and the overhead is close to 1/8 or 12.5%.
00142  * FIXME: How can we avoid this overhead?
00143  *
00144  * Here's some ASCII art showing an arena:
00145  *
00146  *   split or the first 1-K aligned address.
00147  *     |
00148  *     V
00149  *  +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
00150  *  |fB|  tp0  |  tp1  |  tp2  |  tp3  |  tp4  |  tp5  |  tp6  |  tp7  | fA  |
00151  *  +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
00152  *              ^                                 ^
00153  *  tI ---------+                                 |
00154  *  tJ -------------------------------------------+
00155  *
00156  *  - fB are the "before split" flags, fA are the "after split" flags
00157  *  - tp0-tp7 are the 8 thing pages
00158  *  - thing tI points into tp1, whose flags are below the split, in fB
00159  *  - thing tJ points into tp5, clearly above the split
00160  *
00161  * In general, one of the thing pages will have some of its things' flags on
00162  * the low side of the split, and the rest of its things' flags on the high
00163  * side.  All the other pages have flags only below or only above.
00164  *
00165  * (If we need to implement card-marking for an incremental GC write barrier,
00166  * we can replace word-sized offsetInArena in JSGCPageInfo by pair of
00167  * uint8 card_mark and uint16 offsetInArena fields as the offset can not exceed
00168  * GC_THINGS_SIZE. This would gives an extremely efficient write barrier:
00169  * when mutating an object obj, just store a 1 byte at
00170  * (uint8 *) ((jsuword)obj & ~1023) on 32-bit platforms.)
00171  */
00172 #define GC_PAGE_SHIFT   10
00173 #define GC_PAGE_MASK    ((jsuword) JS_BITMASK(GC_PAGE_SHIFT))
00174 #define GC_PAGE_SIZE    JS_BIT(GC_PAGE_SHIFT)
00175 #define GC_PAGE_COUNT   (1 << (GC_THINGS_SHIFT - GC_PAGE_SHIFT))
00176 
00177 typedef struct JSGCPageInfo {
00178     jsuword     offsetInArena;          /* offset from the arena start */
00179     jsuword     unscannedBitmap;        /* bitset for fast search of marked
00180                                            but not yet scanned GC things */
00181 } JSGCPageInfo;
00182 
00183 struct JSGCArena {
00184     JSGCArenaList       *list;          /* allocation list for the arena */
00185     JSGCArena           *prev;          /* link field for allocation list */
00186     JSGCArena           *prevUnscanned; /* link field for the list of arenas
00187                                            with marked but not yet scanned
00188                                            things */
00189     jsuword             unscannedPages; /* bitset for fast search of pages
00190                                            with marked but not yet scanned
00191                                            things */
00192     uint8               base[1];        /* things+flags allocation area */
00193 };
00194 
00195 #define GC_ARENA_SIZE                                                         \
00196     (offsetof(JSGCArena, base) + GC_THINGS_SIZE + GC_FLAGS_SIZE)
00197 
00198 #define FIRST_THING_PAGE(a)                                                   \
00199     (((jsuword)(a)->base + GC_FLAGS_SIZE - 1) & ~GC_PAGE_MASK)
00200 
00201 #define PAGE_TO_ARENA(pi)                                                     \
00202     ((JSGCArena *)((jsuword)(pi) - (pi)->offsetInArena                        \
00203                    - offsetof(JSGCArena, base)))
00204 
00205 #define PAGE_INDEX(pi)                                                        \
00206     ((size_t)((pi)->offsetInArena >> GC_PAGE_SHIFT))
00207 
00208 #define THING_TO_PAGE(thing)                                                  \
00209     ((JSGCPageInfo *)((jsuword)(thing) & ~GC_PAGE_MASK))
00210 
00211 /*
00212  * Given a thing size n, return the size of the gap from the page start before
00213  * the first thing.  We know that any n not a power of two packs from
00214  * the end of the page leaving at least enough room for one JSGCPageInfo, but
00215  * not for another thing, at the front of the page (JS_ASSERTs below insist
00216  * on this).
00217  *
00218  * This works because all allocations are a multiple of sizeof(JSGCThing) ==
00219  * sizeof(JSGCPageInfo) in size.
00220  */
00221 #define PAGE_THING_GAP(n) (((n) & ((n) - 1)) ? (GC_PAGE_SIZE % (n)) : (n))
00222 
00223 #ifdef JS_THREADSAFE
00224 /*
00225  * The maximum number of things to put to the local free list by taking
00226  * several things from the global free list or from the tail of the last
00227  * allocated arena to amortize the cost of rt->gcLock.
00228  *
00229  * We use number 8 based on benchmarks from bug 312238.
00230  */
00231 #define MAX_THREAD_LOCAL_THINGS 8
00232 
00233 #endif
00234 
00235 JS_STATIC_ASSERT(sizeof(JSGCThing) == sizeof(JSGCPageInfo));
00236 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSObject));
00237 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
00238 JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
00239 JS_STATIC_ASSERT(GC_FLAGS_SIZE >= GC_PAGE_SIZE);
00240 JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
00241 
00242 /*
00243  * JSPtrTable capacity growth descriptor. The table grows by powers of two
00244  * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
00245  * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
00246  */
00247 typedef struct JSPtrTableInfo {
00248     uint16      minCapacity;
00249     uint16      linearGrowthThreshold;
00250 } JSPtrTableInfo;
00251 
00252 #define GC_ITERATOR_TABLE_MIN     4
00253 #define GC_ITERATOR_TABLE_LINEAR  1024
00254 
00255 static const JSPtrTableInfo iteratorTableInfo = {
00256     GC_ITERATOR_TABLE_MIN,
00257     GC_ITERATOR_TABLE_LINEAR
00258 };
00259 
00260 /* Calculate table capacity based on the current value of JSPtrTable.count. */
00261 static size_t
00262 PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
00263 {
00264     size_t linear, log, capacity;
00265 
00266     linear = info->linearGrowthThreshold;
00267     JS_ASSERT(info->minCapacity <= linear);
00268 
00269     if (count == 0) {
00270         capacity = 0;
00271     } else if (count < linear) {
00272         log = JS_CEILING_LOG2W(count);
00273         JS_ASSERT(log != JS_BITS_PER_WORD);
00274         capacity = (size_t)1 << log;
00275         if (capacity < info->minCapacity)
00276             capacity = info->minCapacity;
00277     } else {
00278         capacity = JS_ROUNDUP(count, linear);
00279     }
00280 
00281     JS_ASSERT(capacity >= count);
00282     return capacity;
00283 }
00284 
00285 static void
00286 FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
00287 {
00288     if (table->array) {
00289         JS_ASSERT(table->count > 0);
00290         free(table->array);
00291         table->array = NULL;
00292         table->count = 0;
00293     }
00294     JS_ASSERT(table->count == 0);
00295 }
00296 
00297 static JSBool
00298 AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
00299               void *ptr)
00300 {
00301     size_t count, capacity;
00302     void **array;
00303 
00304     count = table->count;
00305     capacity = PtrTableCapacity(count, info);
00306 
00307     if (count == capacity) {
00308         if (capacity < info->minCapacity) {
00309             JS_ASSERT(capacity == 0);
00310             JS_ASSERT(!table->array);
00311             capacity = info->minCapacity;
00312         } else {
00313             /*
00314              * Simplify the overflow detection assuming pointer is bigger
00315              * than byte.
00316              */
00317             JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
00318             capacity = (capacity < info->linearGrowthThreshold)
00319                        ? 2 * capacity
00320                        : capacity + info->linearGrowthThreshold;
00321             if (capacity > (size_t)-1 / sizeof table->array[0])
00322                 goto bad;
00323         }
00324         array = (void **) realloc(table->array,
00325                                   capacity * sizeof table->array[0]);
00326         if (!array)
00327             goto bad;
00328 #ifdef DEBUG
00329         memset(array + count, JS_FREE_PATTERN,
00330                (capacity - count) * sizeof table->array[0]);
00331 #endif
00332         table->array = array;
00333     }
00334 
00335     table->array[count] = ptr;
00336     table->count = count + 1;
00337 
00338     return JS_TRUE;
00339 
00340   bad:
00341     JS_ReportOutOfMemory(cx);
00342     return JS_FALSE;
00343 }
00344 
00345 static void
00346 ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
00347                size_t newCount)
00348 {
00349     size_t oldCapacity, capacity;
00350     void **array;
00351 
00352     JS_ASSERT(newCount <= table->count);
00353     if (newCount == table->count)
00354         return;
00355 
00356     oldCapacity = PtrTableCapacity(table->count, info);
00357     table->count = newCount;
00358     capacity = PtrTableCapacity(newCount, info);
00359 
00360     if (oldCapacity != capacity) {
00361         array = table->array;
00362         JS_ASSERT(array);
00363         if (capacity == 0) {
00364             free(array);
00365             table->array = NULL;
00366             return;
00367         }
00368         array = (void **) realloc(array, capacity * sizeof array[0]);
00369         if (array)
00370             table->array = array;
00371     }
00372 #ifdef DEBUG
00373     memset(table->array + newCount, JS_FREE_PATTERN,
00374            (capacity - newCount) * sizeof table->array[0]);
00375 #endif
00376 }
00377 
00378 #ifdef JS_GCMETER
00379 # define METER(x) x
00380 #else
00381 # define METER(x) ((void) 0)
00382 #endif
00383 
00384 static JSBool
00385 NewGCArena(JSRuntime *rt, JSGCArenaList *arenaList)
00386 {
00387     JSGCArena *a;
00388     jsuword offset;
00389     JSGCPageInfo *pi;
00390     uint32 *bytesptr;
00391 
00392     /* Check if we are allowed and can allocate a new arena. */
00393     if (rt->gcBytes >= rt->gcMaxBytes)
00394         return JS_FALSE;
00395     a = (JSGCArena *)malloc(GC_ARENA_SIZE);
00396     if (!a)
00397         return JS_FALSE;
00398 
00399     /* Initialize the JSGCPageInfo records at the start of every thing page. */
00400     offset = (GC_PAGE_SIZE - ((jsuword)a->base & GC_PAGE_MASK)) & GC_PAGE_MASK;
00401     JS_ASSERT((jsuword)a->base + offset == FIRST_THING_PAGE(a));
00402     do {
00403         pi = (JSGCPageInfo *) (a->base + offset);
00404         pi->offsetInArena = offset;
00405         pi->unscannedBitmap = 0;
00406         offset += GC_PAGE_SIZE;
00407     } while (offset < GC_THINGS_SIZE);
00408 
00409     METER(++arenaList->stats.narenas);
00410     METER(arenaList->stats.maxarenas
00411           = JS_MAX(arenaList->stats.maxarenas, arenaList->stats.narenas));
00412 
00413     a->list = arenaList;
00414     a->prev = arenaList->last;
00415     a->prevUnscanned = NULL;
00416     a->unscannedPages = 0;
00417     arenaList->last = a;
00418     arenaList->lastLimit = 0;
00419 
00420     bytesptr = (arenaList == &rt->gcArenaList[0])
00421                ? &rt->gcBytes
00422                : &rt->gcPrivateBytes;
00423     *bytesptr += GC_ARENA_SIZE;
00424 
00425     return JS_TRUE;
00426 }
00427 
00428 static void
00429 DestroyGCArena(JSRuntime *rt, JSGCArenaList *arenaList, JSGCArena **ap)
00430 {
00431     JSGCArena *a;
00432     uint32 *bytesptr;
00433 
00434     a = *ap;
00435     JS_ASSERT(a);
00436     bytesptr = (arenaList == &rt->gcArenaList[0])
00437                ? &rt->gcBytes
00438                : &rt->gcPrivateBytes;
00439     JS_ASSERT(*bytesptr >= GC_ARENA_SIZE);
00440     *bytesptr -= GC_ARENA_SIZE;
00441     METER(rt->gcStats.afree++);
00442     METER(--arenaList->stats.narenas);
00443     if (a == arenaList->last)
00444         arenaList->lastLimit = (uint16)(a->prev ? GC_THINGS_SIZE : 0);
00445     *ap = a->prev;
00446 
00447 #ifdef DEBUG
00448     memset(a, JS_FREE_PATTERN, GC_ARENA_SIZE);
00449 #endif
00450     free(a);
00451 }
00452 
00453 static void
00454 InitGCArenaLists(JSRuntime *rt)
00455 {
00456     uintN i, thingSize;
00457     JSGCArenaList *arenaList;
00458 
00459     for (i = 0; i < GC_NUM_FREELISTS; i++) {
00460         arenaList = &rt->gcArenaList[i];
00461         thingSize = GC_FREELIST_NBYTES(i);
00462         JS_ASSERT((size_t)(uint16)thingSize == thingSize);
00463         arenaList->last = NULL;
00464         arenaList->lastLimit = 0;
00465         arenaList->thingSize = (uint16)thingSize;
00466         arenaList->freeList = NULL;
00467         METER(memset(&arenaList->stats, 0, sizeof arenaList->stats));
00468     }
00469 }
00470 
00471 static void
00472 FinishGCArenaLists(JSRuntime *rt)
00473 {
00474     uintN i;
00475     JSGCArenaList *arenaList;
00476 
00477     for (i = 0; i < GC_NUM_FREELISTS; i++) {
00478         arenaList = &rt->gcArenaList[i];
00479         while (arenaList->last)
00480             DestroyGCArena(rt, arenaList, &arenaList->last);
00481         arenaList->freeList = NULL;
00482     }
00483 }
00484 
00485 uint8 *
00486 js_GetGCThingFlags(void *thing)
00487 {
00488     JSGCPageInfo *pi;
00489     jsuword offsetInArena, thingIndex;
00490 
00491     pi = THING_TO_PAGE(thing);
00492     offsetInArena = pi->offsetInArena;
00493     JS_ASSERT(offsetInArena < GC_THINGS_SIZE);
00494     thingIndex = ((offsetInArena & ~GC_PAGE_MASK) |
00495                   ((jsuword)thing & GC_PAGE_MASK)) / sizeof(JSGCThing);
00496     JS_ASSERT(thingIndex < GC_PAGE_SIZE);
00497     if (thingIndex >= (offsetInArena & GC_PAGE_MASK))
00498         thingIndex += GC_THINGS_SIZE;
00499     return (uint8 *)pi - offsetInArena + thingIndex;
00500 }
00501 
00502 JSRuntime*
00503 js_GetGCStringRuntime(JSString *str)
00504 {
00505     JSGCPageInfo *pi;
00506     JSGCArenaList *list;
00507 
00508     pi = THING_TO_PAGE(str);
00509     list = PAGE_TO_ARENA(pi)->list;
00510 
00511     JS_ASSERT(list->thingSize == sizeof(JSGCThing));
00512     JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
00513 
00514     return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
00515 }
00516 
00517 JSBool
00518 js_IsAboutToBeFinalized(JSContext *cx, void *thing)
00519 {
00520     uint8 flags = *js_GetGCThingFlags(thing);
00521 
00522     return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
00523 }
00524 
00525 typedef void (*GCFinalizeOp)(JSContext *cx, JSGCThing *thing);
00526 
00527 #ifndef DEBUG
00528 # define js_FinalizeDouble       NULL
00529 #endif
00530 
00531 #if !JS_HAS_XML_SUPPORT
00532 # define js_FinalizeXMLNamespace NULL
00533 # define js_FinalizeXMLQName     NULL
00534 # define js_FinalizeXML          NULL
00535 #endif
00536 
00537 static GCFinalizeOp gc_finalizers[GCX_NTYPES] = {
00538     (GCFinalizeOp) js_FinalizeObject,           /* GCX_OBJECT */
00539     (GCFinalizeOp) js_FinalizeString,           /* GCX_STRING */
00540     (GCFinalizeOp) js_FinalizeDouble,           /* GCX_DOUBLE */
00541     (GCFinalizeOp) js_FinalizeString,           /* GCX_MUTABLE_STRING */
00542     NULL,                                       /* GCX_PRIVATE */
00543     (GCFinalizeOp) js_FinalizeXMLNamespace,     /* GCX_NAMESPACE */
00544     (GCFinalizeOp) js_FinalizeXMLQName,         /* GCX_QNAME */
00545     (GCFinalizeOp) js_FinalizeXML,              /* GCX_XML */
00546     NULL,                                       /* GCX_EXTERNAL_STRING */
00547     NULL,
00548     NULL,
00549     NULL,
00550     NULL,
00551     NULL,
00552     NULL,
00553     NULL
00554 };
00555 
00556 #ifdef GC_MARK_DEBUG
00557 static const char newborn_external_string[] = "newborn external string";
00558 
00559 static const char *gc_typenames[GCX_NTYPES] = {
00560     "newborn object",
00561     "newborn string",
00562     "newborn double",
00563     "newborn mutable string",
00564     "newborn private",
00565     "newborn Namespace",
00566     "newborn QName",
00567     "newborn XML",
00568     newborn_external_string,
00569     newborn_external_string,
00570     newborn_external_string,
00571     newborn_external_string,
00572     newborn_external_string,
00573     newborn_external_string,
00574     newborn_external_string,
00575     newborn_external_string
00576 };
00577 #endif
00578 
00579 intN
00580 js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,
00581                                  JSStringFinalizeOp newop)
00582 {
00583     uintN i;
00584 
00585     for (i = GCX_EXTERNAL_STRING; i < GCX_NTYPES; i++) {
00586         if (gc_finalizers[i] == (GCFinalizeOp) oldop) {
00587             gc_finalizers[i] = (GCFinalizeOp) newop;
00588             return (intN) i;
00589         }
00590     }
00591     return -1;
00592 }
00593 
00594 /* This is compatible with JSDHashEntryStub. */
00595 typedef struct JSGCRootHashEntry {
00596     JSDHashEntryHdr hdr;
00597     void            *root;
00598     const char      *name;
00599 } JSGCRootHashEntry;
00600 
00601 /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
00602 #define GC_ROOTS_SIZE   256
00603 #define GC_FINALIZE_LEN 1024
00604 
00605 JSBool
00606 js_InitGC(JSRuntime *rt, uint32 maxbytes)
00607 {
00608     InitGCArenaLists(rt);
00609     if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
00610                            sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
00611         rt->gcRootsHash.ops = NULL;
00612         return JS_FALSE;
00613     }
00614     rt->gcLocksHash = NULL;     /* create lazily */
00615 
00616     /*
00617      * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
00618      * for default backward API compatibility.
00619      */
00620     rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
00621 
00622     return JS_TRUE;
00623 }
00624 
00625 #ifdef JS_GCMETER
00626 JS_FRIEND_API(void)
00627 js_DumpGCStats(JSRuntime *rt, FILE *fp)
00628 {
00629     uintN i;
00630     size_t totalThings, totalMaxThings, totalBytes;
00631 
00632     fprintf(fp, "\nGC allocation statistics:\n");
00633 
00634 #define UL(x)       ((unsigned long)(x))
00635 #define ULSTAT(x)   UL(rt->gcStats.x)
00636     totalThings = 0;
00637     totalMaxThings = 0;
00638     totalBytes = 0;
00639     for (i = 0; i < GC_NUM_FREELISTS; i++) {
00640         JSGCArenaList *list = &rt->gcArenaList[i];
00641         JSGCArenaStats *stats = &list->stats;
00642         if (stats->maxarenas == 0) {
00643             fprintf(fp, "ARENA LIST %u (thing size %lu): NEVER USED\n",
00644                     i, UL(GC_FREELIST_NBYTES(i)));
00645             continue;
00646         }
00647         fprintf(fp, "ARENA LIST %u (thing size %lu):\n",
00648                 i, UL(GC_FREELIST_NBYTES(i)));
00649         fprintf(fp, "                     arenas: %lu\n", UL(stats->narenas));
00650         fprintf(fp, "                 max arenas: %lu\n", UL(stats->maxarenas));
00651         fprintf(fp, "                     things: %lu\n", UL(stats->nthings));
00652         fprintf(fp, "                 max things: %lu\n", UL(stats->maxthings));
00653         fprintf(fp, "                  free list: %lu\n", UL(stats->freelen));
00654         fprintf(fp, "          free list density: %.1f%%\n",
00655                 stats->narenas == 0
00656                 ? 0.0
00657                 : (100.0 * list->thingSize * (jsdouble)stats->freelen /
00658                    (GC_THINGS_SIZE * (jsdouble)stats->narenas)));
00659         fprintf(fp, "  average free list density: %.1f%%\n",
00660                 stats->totalarenas == 0
00661                 ? 0.0
00662                 : (100.0 * list->thingSize * (jsdouble)stats->totalfreelen /
00663                    (GC_THINGS_SIZE * (jsdouble)stats->totalarenas)));
00664         fprintf(fp, "                   recycles: %lu\n", UL(stats->recycle));
00665         fprintf(fp, "        recycle/alloc ratio: %.2f\n",
00666                 (jsdouble)stats->recycle /
00667                 (jsdouble)(stats->totalnew - stats->recycle));
00668         totalThings += stats->nthings;
00669         totalMaxThings += stats->maxthings;
00670         totalBytes += GC_FREELIST_NBYTES(i) * stats->nthings;
00671     }
00672     fprintf(fp, "TOTAL STATS:\n");
00673     fprintf(fp, "     public bytes allocated: %lu\n", UL(rt->gcBytes));
00674     fprintf(fp, "    private bytes allocated: %lu\n", UL(rt->gcPrivateBytes));
00675     fprintf(fp, "             alloc attempts: %lu\n", ULSTAT(alloc));
00676 #ifdef JS_THREADSAFE
00677     fprintf(fp, "        alloc without locks: %1u\n", ULSTAT(localalloc));
00678 #endif
00679     fprintf(fp, "            total GC things: %lu\n", UL(totalThings));
00680     fprintf(fp, "        max total GC things: %lu\n", UL(totalMaxThings));
00681     fprintf(fp, "             GC things size: %lu\n", UL(totalBytes));
00682     fprintf(fp, "allocation retries after GC: %lu\n", ULSTAT(retry));
00683     fprintf(fp, "        allocation failures: %lu\n", ULSTAT(fail));
00684     fprintf(fp, "         things born locked: %lu\n", ULSTAT(lockborn));
00685     fprintf(fp, "           valid lock calls: %lu\n", ULSTAT(lock));
00686     fprintf(fp, "         valid unlock calls: %lu\n", ULSTAT(unlock));
00687     fprintf(fp, "       mark recursion depth: %lu\n", ULSTAT(depth));
00688     fprintf(fp, "     maximum mark recursion: %lu\n", ULSTAT(maxdepth));
00689     fprintf(fp, "     mark C recursion depth: %lu\n", ULSTAT(cdepth));
00690     fprintf(fp, "   maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
00691     fprintf(fp, "      delayed scan bag adds: %lu\n", ULSTAT(unscanned));
00692 #ifdef DEBUG
00693     fprintf(fp, "  max delayed scan bag size: %lu\n", ULSTAT(maxunscanned));
00694 #endif
00695     fprintf(fp, "   maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
00696     fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
00697     fprintf(fp, "           useless GC calls: %lu\n", ULSTAT(nopoke));
00698     fprintf(fp, "  thing arenas freed so far: %lu\n", ULSTAT(afree));
00699     fprintf(fp, "     stack segments scanned: %lu\n", ULSTAT(stackseg));
00700     fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
00701     fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
00702     fprintf(fp, "    max reachable closeable: %lu\n", ULSTAT(maxnclose));
00703     fprintf(fp, "      scheduled close hooks: %lu\n", ULSTAT(closelater));
00704     fprintf(fp, "  max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
00705 #undef UL
00706 #undef US
00707 
00708 #ifdef JS_ARENAMETER
00709     JS_DumpArenaStats(fp);
00710 #endif
00711 }
00712 #endif
00713 
00714 #ifdef DEBUG
00715 static void
00716 CheckLeakedRoots(JSRuntime *rt);
00717 #endif
00718 
00719 void
00720 js_FinishGC(JSRuntime *rt)
00721 {
00722 #ifdef JS_ARENAMETER
00723     JS_DumpArenaStats(stdout);
00724 #endif
00725 #ifdef JS_GCMETER
00726     js_DumpGCStats(rt, stdout);
00727 #endif
00728 
00729     FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
00730 #if JS_HAS_GENERATORS
00731     rt->gcCloseState.reachableList = NULL;
00732     METER(rt->gcStats.nclose = 0);
00733     rt->gcCloseState.todoQueue = NULL;
00734 #endif
00735     FinishGCArenaLists(rt);
00736 
00737     if (rt->gcRootsHash.ops) {
00738 #ifdef DEBUG
00739         CheckLeakedRoots(rt);
00740 #endif
00741         JS_DHashTableFinish(&rt->gcRootsHash);
00742         rt->gcRootsHash.ops = NULL;
00743     }
00744     if (rt->gcLocksHash) {
00745         JS_DHashTableDestroy(rt->gcLocksHash);
00746         rt->gcLocksHash = NULL;
00747     }
00748 }
00749 
00750 JSBool
00751 js_AddRoot(JSContext *cx, void *rp, const char *name)
00752 {
00753     JSBool ok = js_AddRootRT(cx->runtime, rp, name);
00754     if (!ok)
00755         JS_ReportOutOfMemory(cx);
00756     return ok;
00757 }
00758 
00759 JSBool
00760 js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
00761 {
00762     JSBool ok;
00763     JSGCRootHashEntry *rhe;
00764 
00765     /*
00766      * Due to the long-standing, but now removed, use of rt->gcLock across the
00767      * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
00768      * properly with a racing GC, without calling JS_AddRoot from a request.
00769      * We have to preserve API compatibility here, now that we avoid holding
00770      * rt->gcLock across the mark phase (including the root hashtable mark).
00771      *
00772      * If the GC is running and we're called on another thread, wait for this
00773      * GC activation to finish.  We can safely wait here (in the case where we
00774      * are called within a request on another thread's context) without fear
00775      * of deadlock because the GC doesn't set rt->gcRunning until after it has
00776      * waited for all active requests to end.
00777      */
00778     JS_LOCK_GC(rt);
00779 #ifdef JS_THREADSAFE
00780     JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
00781     if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
00782         do {
00783             JS_AWAIT_GC_DONE(rt);
00784         } while (rt->gcLevel > 0);
00785     }
00786 #endif
00787     rhe = (JSGCRootHashEntry *) JS_DHashTableOperate(&rt->gcRootsHash, rp,
00788                                                      JS_DHASH_ADD);
00789     if (rhe) {
00790         rhe->root = rp;
00791         rhe->name = name;
00792         ok = JS_TRUE;
00793     } else {
00794         ok = JS_FALSE;
00795     }
00796     JS_UNLOCK_GC(rt);
00797     return ok;
00798 }
00799 
00800 JSBool
00801 js_RemoveRoot(JSRuntime *rt, void *rp)
00802 {
00803     /*
00804      * Due to the JS_RemoveRootRT API, we may be called outside of a request.
00805      * Same synchronization drill as above in js_AddRoot.
00806      */
00807     JS_LOCK_GC(rt);
00808 #ifdef JS_THREADSAFE
00809     JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
00810     if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
00811         do {
00812             JS_AWAIT_GC_DONE(rt);
00813         } while (rt->gcLevel > 0);
00814     }
00815 #endif
00816     (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
00817     rt->gcPoke = JS_TRUE;
00818     JS_UNLOCK_GC(rt);
00819     return JS_TRUE;
00820 }
00821 
00822 #ifdef DEBUG
00823 
00824 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
00825 js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
00826 {
00827     uint32 *leakedroots = (uint32 *)arg;
00828     JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
00829 
00830     (*leakedroots)++;
00831     fprintf(stderr,
00832             "JS engine warning: leaking GC root \'%s\' at %p\n",
00833             rhe->name ? (char *)rhe->name : "", rhe->root);
00834 
00835     return JS_DHASH_NEXT;
00836 }
00837 
00838 static void
00839 CheckLeakedRoots(JSRuntime *rt)
00840 {
00841     uint32 leakedroots = 0;
00842 
00843     /* Warn (but don't assert) debug builds of any remaining roots. */
00844     JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
00845                            &leakedroots);
00846     if (leakedroots > 0) {
00847         if (leakedroots == 1) {
00848             fprintf(stderr,
00849 "JS engine warning: 1 GC root remains after destroying the JSRuntime.\n"
00850 "                   This root may point to freed memory. Objects reachable\n"
00851 "                   through it have not been finalized.\n");
00852         } else {
00853             fprintf(stderr,
00854 "JS engine warning: %lu GC roots remain after destroying the JSRuntime.\n"
00855 "                   These roots may point to freed memory. Objects reachable\n"
00856 "                   through them have not been finalized.\n",
00857                         (unsigned long) leakedroots);
00858         }
00859     }
00860 }
00861 
00862 typedef struct NamedRootDumpArgs {
00863     void (*dump)(const char *name, void *rp, void *data);
00864     void *data;
00865 } NamedRootDumpArgs;
00866 
00867 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
00868 js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
00869                      void *arg)
00870 {
00871     NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
00872     JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
00873 
00874     if (rhe->name)
00875         args->dump(rhe->name, rhe->root, args->data);
00876     return JS_DHASH_NEXT;
00877 }
00878 
00879 void
00880 js_DumpNamedRoots(JSRuntime *rt,
00881                   void (*dump)(const char *name, void *rp, void *data),
00882                   void *data)
00883 {
00884     NamedRootDumpArgs args;
00885 
00886     args.dump = dump;
00887     args.data = data;
00888     JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
00889 }
00890 
00891 #endif /* DEBUG */
00892 
00893 typedef struct GCRootMapArgs {
00894     JSGCRootMapFun map;
00895     void *data;
00896 } GCRootMapArgs;
00897 
00898 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
00899 js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
00900                  void *arg)
00901 {
00902     GCRootMapArgs *args = (GCRootMapArgs *) arg;
00903     JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
00904     intN mapflags;
00905     JSDHashOperator op;
00906 
00907     mapflags = args->map(rhe->root, rhe->name, args->data);
00908 
00909 #if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT &&                                     \
00910     JS_MAP_GCROOT_STOP == JS_DHASH_STOP &&                                     \
00911     JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
00912     op = (JSDHashOperator)mapflags;
00913 #else
00914     op = JS_DHASH_NEXT;
00915     if (mapflags & JS_MAP_GCROOT_STOP)
00916         op |= JS_DHASH_STOP;
00917     if (mapflags & JS_MAP_GCROOT_REMOVE)
00918         op |= JS_DHASH_REMOVE;
00919 #endif
00920 
00921     return op;
00922 }
00923 
00924 uint32
00925 js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
00926 {
00927     GCRootMapArgs args;
00928     uint32 rv;
00929 
00930     args.map = map;
00931     args.data = data;
00932     JS_LOCK_GC(rt);
00933     rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
00934     JS_UNLOCK_GC(rt);
00935     return rv;
00936 }
00937 
00938 JSBool
00939 js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
00940 {
00941     JSRuntime *rt;
00942     JSBool ok;
00943 
00944     rt = cx->runtime;
00945     JS_ASSERT(!rt->gcRunning);
00946 
00947     JS_LOCK_GC(rt);
00948     ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
00949     JS_UNLOCK_GC(rt);
00950     return ok;
00951 }
00952 
00953 static void
00954 CloseIteratorStates(JSContext *cx)
00955 {
00956     JSRuntime *rt;
00957     size_t count, newCount, i;
00958     void **array;
00959     JSObject *obj;
00960 
00961     rt = cx->runtime;
00962     count = rt->gcIteratorTable.count;
00963     array = rt->gcIteratorTable.array;
00964 
00965     newCount = 0;
00966     for (i = 0; i != count; ++i) {
00967         obj = (JSObject *)array[i];
00968         if (js_IsAboutToBeFinalized(cx, obj))
00969             js_CloseIteratorState(cx, obj);
00970         else
00971             array[newCount++] = obj;
00972     }
00973     ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
00974 }
00975 
00976 #if JS_HAS_GENERATORS
00977 
00978 void
00979 js_RegisterGenerator(JSContext *cx, JSGenerator *gen)
00980 {
00981     JSRuntime *rt;
00982 
00983     rt = cx->runtime;
00984     JS_ASSERT(!rt->gcRunning);
00985     JS_ASSERT(rt->state != JSRTS_LANDING);
00986     JS_ASSERT(gen->state == JSGEN_NEWBORN);
00987 
00988     JS_LOCK_GC(rt);
00989     gen->next = rt->gcCloseState.reachableList;
00990     rt->gcCloseState.reachableList = gen;
00991     METER(rt->gcStats.nclose++);
00992     METER(rt->gcStats.maxnclose = JS_MAX(rt->gcStats.maxnclose,
00993                                          rt->gcStats.nclose));
00994     JS_UNLOCK_GC(rt);
00995 }
00996 
00997 /*
00998  * We do not run close hooks when the parent scope of the generator instance
00999  * becomes unreachable to prevent denial-of-service and resource leakage from
01000  * misbehaved generators.
01001  *
01002  * Called from the GC.
01003  */
01004 static JSBool
01005 CanScheduleCloseHook(JSGenerator *gen)
01006 {
01007     JSObject *parent;
01008     JSBool canSchedule;
01009 
01010     /* Avoid OBJ_GET_PARENT overhead as we are in GC. */
01011     parent = JSVAL_TO_OBJECT(gen->obj->slots[JSSLOT_PARENT]);
01012     canSchedule = *js_GetGCThingFlags(parent) & GCF_MARK;
01013 #ifdef DEBUG_igor
01014     if (!canSchedule) {
01015         fprintf(stderr, "GEN: Kill without schedule, gen=%p parent=%p\n",
01016                 (void *)gen, (void *)parent);
01017     }
01018 #endif
01019     return canSchedule;
01020 }
01021 
01022 /*
01023  * Check if we should delay execution of the close hook.
01024  *
01025  * Called outside GC or any locks.
01026  *
01027  * XXX The current implementation is a hack that embeds the knowledge of the
01028  * browser embedding pending the resolution of bug 352788. In the browser we
01029  * must not close any generators that came from a page that is currently in
01030  * the browser history. We detect that using the fact in the browser the scope
01031  * is history if scope->outerObject->innerObject != scope.
01032  */
01033 static JSBool
01034 ShouldDeferCloseHook(JSContext *cx, JSGenerator *gen, JSBool *defer)
01035 {
01036     JSObject *parent, *obj;
01037     JSClass *clasp;
01038     JSExtendedClass *xclasp;
01039 
01040     /*
01041      * This is called outside any locks, so use thread-safe macros to access
01042      * parent and  classes.
01043      */
01044     *defer = JS_FALSE;
01045     parent = OBJ_GET_PARENT(cx, gen->obj);
01046     clasp = OBJ_GET_CLASS(cx, parent);
01047     if (clasp->flags & JSCLASS_IS_EXTENDED) {
01048         xclasp = (JSExtendedClass *)clasp;
01049         if (xclasp->outerObject) {
01050             obj = xclasp->outerObject(cx, parent);
01051             if (!obj)
01052                 return JS_FALSE;
01053             OBJ_TO_INNER_OBJECT(cx, obj);
01054             if (!obj)
01055                 return JS_FALSE;
01056             *defer = obj != parent;
01057         }
01058     }
01059 #ifdef DEBUG_igor
01060     if (*defer) {
01061         fprintf(stderr, "GEN: deferring, gen=%p parent=%p\n",
01062                 (void *)gen, (void *)parent);
01063     }
01064 #endif
01065     return JS_TRUE;
01066 }
01067 
01068 /*
01069  * Find all unreachable generators and move them to the todo queue from
01070  * rt->gcCloseState.reachableList to execute thier close hooks after the GC
01071  * cycle completes. To ensure liveness during the sweep phase we mark all
01072  * generators we are going to close later.
01073  */
01074 static void
01075 FindAndMarkObjectsToClose(JSContext *cx, JSGCInvocationKind gckind,
01076                           JSGenerator **todoQueueTail)
01077 {
01078     JSRuntime *rt;
01079     JSGenerator *todo, **genp, *gen;
01080 
01081     rt = cx->runtime;
01082     todo = NULL;
01083     genp = &rt->gcCloseState.reachableList;
01084     while ((gen = *genp) != NULL) {
01085         if (*js_GetGCThingFlags(gen->obj) & GCF_MARK) {
01086             genp = &gen->next;
01087         } else {
01088             /* Generator must not be executing when it becomes unreachable. */
01089             JS_ASSERT(gen->state == JSGEN_NEWBORN ||
01090                       gen->state == JSGEN_OPEN ||
01091                       gen->state == JSGEN_CLOSED);
01092 
01093             *genp = gen->next;
01094             if (gen->state == JSGEN_OPEN &&
01095                 js_FindFinallyHandler(gen->frame.script, gen->frame.pc) &&
01096                 CanScheduleCloseHook(gen)) {
01097                 /*
01098                  * Generator yielded inside a try with a finally block.
01099                  * Schedule it for closing.
01100                  *
01101                  * We keep generators that yielded outside try-with-finally
01102                  * with gen->state == JSGEN_OPEN. The finalizer must deal with
01103                  * open generators as we may skip the close hooks, see below.
01104                  */
01105                 gen->next = NULL;
01106                 *todoQueueTail = gen;
01107                 todoQueueTail = &gen->next;
01108                 if (!todo)
01109                     todo = gen;
01110                 METER(JS_ASSERT(rt->gcStats.nclose));
01111                 METER(rt->gcStats.nclose--);
01112                 METER(rt->gcStats.closelater++);
01113                 METER(rt->gcStats.maxcloselater
01114                       = JS_MAX(rt->gcStats.maxcloselater,
01115                                rt->gcStats.closelater));
01116             }
01117         }
01118     }
01119 
01120     if (gckind == GC_LAST_CONTEXT) {
01121         /*
01122          * Remove scheduled hooks on shutdown as it is too late to run them:
01123          * we do not allow execution of arbitrary scripts at this point.
01124          */
01125         rt->gcCloseState.todoQueue = NULL;
01126     } else {
01127         /*
01128          * Mark just-found unreachable generators *after* we scan the global
01129          * list to prevent a generator that refers to other unreachable
01130          * generators from keeping them on gcCloseState.reachableList.
01131          */
01132         for (gen = todo; gen; gen = gen->next)
01133             GC_MARK(cx, gen->obj, "newly scheduled generator");
01134     }
01135 }
01136 
01137 /*
01138  * Mark unreachable generators already scheduled to close and return the tail
01139  * pointer to JSGCCloseState.todoQueue.
01140  */
01141 static JSGenerator **
01142 MarkScheduledGenerators(JSContext *cx)
01143 {
01144     JSRuntime *rt;
01145     JSGenerator **genp, *gen;
01146 
01147     rt = cx->runtime;
01148     genp = &rt->gcCloseState.todoQueue;
01149     while ((gen = *genp) != NULL) {
01150         if (CanScheduleCloseHook(gen)) {
01151             GC_MARK(cx, gen->obj, "scheduled generator");
01152             genp = &gen->next;
01153         } else {
01154             /* Discard the generator from the list if its schedule is over. */
01155             *genp = gen->next;
01156             METER(JS_ASSERT(rt->gcStats.closelater > 0));
01157             METER(rt->gcStats.closelater--);
01158         }
01159     }
01160     return genp;
01161 }
01162 
01163 #ifdef JS_THREADSAFE
01164 # define GC_RUNNING_CLOSE_HOOKS_PTR(cx)                                       \
01165     (&(cx)->thread->gcRunningCloseHooks)
01166 #else
01167 # define GC_RUNNING_CLOSE_HOOKS_PTR(cx)                                       \
01168     (&(cx)->runtime->gcCloseState.runningCloseHook)
01169 #endif
01170 
01171 typedef struct JSTempCloseList {
01172     JSTempValueRooter   tvr;
01173     JSGenerator         *head;
01174 } JSTempCloseList;
01175 
01176 JS_STATIC_DLL_CALLBACK(void)
01177 mark_temp_close_list(JSContext *cx, JSTempValueRooter *tvr)
01178 {
01179     JSTempCloseList *list = (JSTempCloseList *)tvr;
01180     JSGenerator *gen;
01181 
01182     for (gen = list->head; gen; gen = gen->next)
01183         GC_MARK(cx, gen->obj, "temp list generator");
01184 }
01185 
01186 #define JS_PUSH_TEMP_CLOSE_LIST(cx, tempList)                                 \
01187     JS_PUSH_TEMP_ROOT_MARKER(cx, mark_temp_close_list, &(tempList)->tvr)
01188 
01189 #define JS_POP_TEMP_CLOSE_LIST(cx, tempList)                                  \
01190     JS_BEGIN_MACRO                                                            \
01191         JS_ASSERT((tempList)->tvr.u.marker == mark_temp_close_list);          \
01192         JS_POP_TEMP_ROOT(cx, &(tempList)->tvr);                               \
01193     JS_END_MACRO
01194 
01195 JSBool
01196 js_RunCloseHooks(JSContext *cx)
01197 {
01198     JSRuntime *rt;
01199     JSTempCloseList tempList;
01200     JSStackFrame *fp;
01201     JSGenerator **genp, *gen;
01202     JSBool ok, defer;
01203 #if JS_GCMETER
01204     uint32 deferCount;
01205 #endif
01206 
01207     rt = cx->runtime;
01208 
01209     /*
01210      * It is OK to access todoQueue outside the lock here. When many threads
01211      * update the todo list, accessing some older value of todoQueue in the
01212      * worst case just delays the excution of close hooks.
01213      */
01214     if (!rt->gcCloseState.todoQueue)
01215         return JS_TRUE;
01216 
01217     /*
01218      * To prevent an infinite loop when a close hook creats more objects with
01219      * close hooks and then triggers GC we ignore recursive invocations of
01220      * js_RunCloseHooks and limit number of hooks to execute to the initial
01221      * size of the list.
01222      */
01223     if (*GC_RUNNING_CLOSE_HOOKS_PTR(cx))
01224         return JS_TRUE;
01225 
01226     *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_TRUE;
01227 
01228     JS_LOCK_GC(rt);
01229     tempList.head = rt->gcCloseState.todoQueue;
01230     JS_PUSH_TEMP_CLOSE_LIST(cx, &tempList);
01231     rt->gcCloseState.todoQueue = NULL;
01232     METER(rt->gcStats.closelater = 0);
01233     rt->gcPoke = JS_TRUE;
01234     JS_UNLOCK_GC(rt);
01235 
01236     /*
01237      * Set aside cx->fp since we do not want a close hook using caller or
01238      * other means to backtrace into whatever stack might be active when
01239      * running the hook. We store the current frame on the dormant list to
01240      * protect against GC that the hook can trigger.
01241      */
01242     fp = cx->fp;
01243     if (fp) {
01244         JS_ASSERT(!fp->dormantNext);
01245         fp->dormantNext = cx->dormantFrameChain;
01246         cx->dormantFrameChain = fp;
01247     }
01248     cx->fp = NULL;
01249 
01250     genp = &tempList.head;
01251     ok = JS_TRUE;
01252     while ((gen = *genp) != NULL) {
01253         ok = ShouldDeferCloseHook(cx, gen, &defer);
01254         if (!ok) {
01255             /* Quit ASAP discarding the hook. */
01256             *genp = gen->next;
01257             break;
01258         }
01259         if (defer) {
01260             genp = &gen->next;
01261             METER(deferCount++);
01262             continue;
01263         }
01264         ok = js_CloseGeneratorObject(cx, gen);
01265 
01266         /*
01267          * Unlink the generator after closing it to make sure it always stays
01268          * rooted through tempList.
01269          */
01270         *genp = gen->next;
01271 
01272         if (cx->throwing) {
01273             /*
01274              * Report the exception thrown by the close hook and continue to
01275              * execute the rest of the hooks.
01276              */
01277             if (!js_ReportUncaughtException(cx))
01278                 JS_ClearPendingException(cx);
01279             ok = JS_TRUE;
01280         } else if (!ok) {
01281             /*
01282              * Assume this is a stop signal from the branch callback or
01283              * other quit ASAP condition. Break execution until the next
01284              * invocation of js_RunCloseHooks.
01285              */
01286             break;
01287         }
01288     }
01289 
01290     cx->fp = fp;
01291     if (fp) {
01292         JS_ASSERT(cx->dormantFrameChain == fp);
01293         cx->dormantFrameChain = fp->dormantNext;
01294         fp->dormantNext = NULL;
01295     }
01296 
01297     if (tempList.head) {
01298         /*
01299          * Some close hooks were not yet executed, put them back into the
01300          * scheduled list.
01301          */
01302         while ((gen = *genp) != NULL) {
01303             genp = &gen->next;
01304             METER(deferCount++);
01305         }
01306 
01307         /* Now genp is a pointer to the tail of tempList. */
01308         JS_LOCK_GC(rt);
01309         *genp = rt->gcCloseState.todoQueue;
01310         rt->gcCloseState.todoQueue = tempList.head;
01311         METER(rt->gcStats.closelater += deferCount);
01312         METER(rt->gcStats.maxcloselater
01313               = JS_MAX(rt->gcStats.maxcloselater, rt->gcStats.closelater));
01314         JS_UNLOCK_GC(rt);
01315     }
01316 
01317     JS_POP_TEMP_CLOSE_LIST(cx, &tempList);
01318     *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_FALSE;
01319 
01320     return ok;
01321 }
01322 
01323 #endif /* JS_HAS_GENERATORS */
01324 
01325 #if defined(DEBUG_brendan) || defined(DEBUG_timeless)
01326 #define DEBUG_gchist
01327 #endif
01328 
01329 #ifdef DEBUG_gchist
01330 #define NGCHIST 64
01331 
01332 static struct GCHist {
01333     JSBool      lastDitch;
01334     JSGCThing   *freeList;
01335 } gchist[NGCHIST];
01336 
01337 unsigned gchpos;
01338 #endif
01339 
01340 void *
01341 js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes)
01342 {
01343     JSRuntime *rt;
01344     uintN flindex;
01345     JSBool doGC;
01346     JSGCThing *thing;
01347     uint8 *flagp, *firstPage;
01348     JSGCArenaList *arenaList;
01349     jsuword offset;
01350     JSGCArena *a;
01351     JSLocalRootStack *lrs;
01352 #ifdef JS_THREADSAFE
01353     JSBool gcLocked;
01354     uintN localMallocBytes;
01355     JSGCThing **flbase, **lastptr;
01356     JSGCThing *tmpthing;
01357     uint8 *tmpflagp;
01358     uintN maxFreeThings;         /* max to take from the global free list */
01359     METER(size_t nfree);
01360 #endif
01361 
01362     rt = cx->runtime;
01363     METER(rt->gcStats.alloc++);        /* this is not thread-safe */
01364     nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing));
01365     flindex = GC_FREELIST_INDEX(nbytes);
01366 
01367 #ifdef JS_THREADSAFE
01368     gcLocked = JS_FALSE;
01369     JS_ASSERT(cx->thread);
01370     flbase = cx->thread->gcFreeLists;
01371     JS_ASSERT(flbase);
01372     thing = flbase[flindex];
01373     localMallocBytes = cx->thread->gcMallocBytes;
01374     if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
01375         flagp = thing->flagp;
01376         flbase[flindex] = thing->next;
01377         METER(rt->gcStats.localalloc++);  /* this is not thread-safe */
01378         goto success;
01379     }
01380 
01381     JS_LOCK_GC(rt);
01382     gcLocked = JS_TRUE;
01383 
01384     /* Transfer thread-local counter to global one. */
01385     if (localMallocBytes != 0) {
01386         cx->thread->gcMallocBytes = 0;
01387         if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
01388             rt->gcMallocBytes = rt->gcMaxMallocBytes;
01389         else
01390             rt->gcMallocBytes += localMallocBytes;
01391     }
01392 #endif
01393     JS_ASSERT(!rt->gcRunning);
01394     if (rt->gcRunning) {
01395         METER(rt->gcStats.finalfail++);
01396         JS_UNLOCK_GC(rt);
01397         return NULL;
01398     }
01399 
01400     doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes);
01401 #ifdef JS_GC_ZEAL
01402     if (rt->gcZeal >= 1) {
01403         doGC = JS_TRUE;
01404         if (rt->gcZeal >= 2)
01405             rt->gcPoke = JS_TRUE;
01406     }
01407 #endif /* !JS_GC_ZEAL */
01408 
01409     arenaList = &rt->gcArenaList[flindex];
01410     for (;;) {
01411         if (doGC) {
01412             /*
01413              * Keep rt->gcLock across the call into js_GC so we don't starve
01414              * and lose to racing threads who deplete the heap just after
01415              * js_GC has replenished it (or has synchronized with a racing
01416              * GC that collected a bunch of garbage).  This unfair scheduling
01417              * can happen on certain operating systems. For the gory details,
01418              * see bug 162779 at https://bugzilla.mozilla.org/.
01419              */
01420             js_GC(cx, GC_LAST_DITCH);
01421             METER(rt->gcStats.retry++);
01422         }
01423 
01424         /* Try to get thing from the free list. */
01425         thing = arenaList->freeList;
01426         if (thing) {
01427             arenaList->freeList = thing->next;
01428             flagp = thing->flagp;
01429             JS_ASSERT(*flagp & GCF_FINAL);
01430             METER(arenaList->stats.freelen--);
01431             METER(arenaList->stats.recycle++);
01432 
01433 #ifdef JS_THREADSAFE
01434             /*
01435              * Refill the local free list by taking several things from the
01436              * global free list unless we are still at rt->gcMaxMallocBytes
01437              * barrier or the free list is already populated. The former
01438              * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
01439              * or no gcPoke. The latter is caused via allocating new things
01440              * in gcCallback(cx, JSGC_END).
01441              */
01442             if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
01443                 break;
01444             tmpthing = arenaList->freeList;
01445             if (tmpthing) {
01446                 maxFreeThings = MAX_THREAD_LOCAL_THINGS;
01447                 do {
01448                     if (!tmpthing->next)
01449                         break;
01450                     tmpthing = tmpthing->next;
01451                 } while (--maxFreeThings != 0);
01452 
01453                 flbase[flindex] = arenaList->freeList;
01454                 arenaList->freeList = tmpthing->next;
01455                 tmpthing->next = NULL;
01456             }
01457 #endif
01458             break;
01459         }
01460 
01461         /* Allocate from the tail of last arena or from new arena if we can. */
01462         if ((arenaList->last && arenaList->lastLimit != GC_THINGS_SIZE) ||
01463             NewGCArena(rt, arenaList)) {
01464 
01465             offset = arenaList->lastLimit;
01466             if ((offset & GC_PAGE_MASK) == 0) {
01467                 /*
01468                  * Skip JSGCPageInfo record located at GC_PAGE_SIZE boundary.
01469                  */
01470                 offset += PAGE_THING_GAP(nbytes);
01471             }
01472             JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
01473             arenaList->lastLimit = (uint16)(offset + nbytes);
01474             a = arenaList->last;
01475             firstPage = (uint8 *)FIRST_THING_PAGE(a);
01476             thing = (JSGCThing *)(firstPage + offset);
01477             flagp = a->base + offset / sizeof(JSGCThing);
01478             if (flagp >= firstPage)
01479                 flagp += GC_THINGS_SIZE;
01480             METER(++arenaList->stats.nthings);
01481             METER(arenaList->stats.maxthings =
01482                   JS_MAX(arenaList->stats.nthings,
01483                          arenaList->stats.maxthings));
01484 
01485 #ifdef JS_THREADSAFE
01486             /*
01487              * Refill the local free list by taking free things from the last
01488              * arena. Prefer to order free things by ascending address in the
01489              * (unscientific) hope of better cache locality.
01490              */
01491             if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
01492                 break;
01493             METER(nfree = 0);
01494             lastptr = &flbase[flindex];
01495             maxFreeThings = MAX_THREAD_LOCAL_THINGS;
01496             for (offset = arenaList->lastLimit;
01497                  offset != GC_THINGS_SIZE && maxFreeThings-- != 0;
01498                  offset += nbytes) {
01499                 if ((offset & GC_PAGE_MASK) == 0)
01500                     offset += PAGE_THING_GAP(nbytes);
01501                 JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
01502                 tmpflagp = a->base + offset / sizeof(JSGCThing);
01503                 if (tmpflagp >= firstPage)
01504                     tmpflagp += GC_THINGS_SIZE;
01505 
01506                 tmpthing = (JSGCThing *)(firstPage + offset);
01507                 tmpthing->flagp = tmpflagp;
01508                 *tmpflagp = GCF_FINAL;    /* signifying that thing is free */
01509 
01510                 *lastptr = tmpthing;
01511                 lastptr = &tmpthing->next;
01512                 METER(++nfree);
01513             }
01514             arenaList->lastLimit = offset;
01515             *lastptr = NULL;
01516             METER(arenaList->stats.freelen += nfree);
01517 #endif
01518             break;
01519         }
01520 
01521         /* Consider doing a "last ditch" GC unless already tried. */
01522         if (doGC)
01523             goto fail;
01524         rt->gcPoke = JS_TRUE;
01525         doGC = JS_TRUE;
01526     }
01527 
01528     /* We successfully allocated the thing. */
01529 #ifdef JS_THREADSAFE
01530   success:
01531 #endif
01532     lrs = cx->localRootStack;
01533     if (lrs) {
01534         /*
01535          * If we're in a local root scope, don't set newborn[type] at all, to
01536          * avoid entraining garbage from it for an unbounded amount of time
01537          * on this context.  A caller will leave the local root scope and pop
01538          * this reference, allowing thing to be GC'd if it has no other refs.
01539          * See JS_EnterLocalRootScope and related APIs.
01540          */
01541         if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
01542             /*
01543              * When we fail for a thing allocated through the tail of the last
01544              * arena, thing's flag byte is not initialized. So to prevent GC
01545              * accessing the uninitialized flags during the finalization, we
01546              * always mark the thing as final. See bug 337407.
01547              */
01548             *flagp = GCF_FINAL;
01549             goto fail;
01550         }
01551     } else {
01552         /*
01553          * No local root scope, so we're stuck with the old, fragile model of
01554          * depending on a pigeon-hole newborn per type per context.
01555          */
01556         cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
01557     }
01558 
01559     /* We can't fail now, so update flags and rt->gc{,Private}Bytes. */
01560     *flagp = (uint8)flags;
01561 
01562     /*
01563      * Clear thing before unlocking in case a GC run is about to scan it,
01564      * finding it via newborn[].
01565      */
01566     thing->next = NULL;
01567     thing->flagp = NULL;
01568 #ifdef DEBUG_gchist
01569     gchist[gchpos].lastDitch = doGC;
01570     gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
01571     if (++gchpos == NGCHIST)
01572         gchpos = 0;
01573 #endif
01574     METER(if (flags & GCF_LOCK) rt->gcStats.lockborn++);
01575     METER(++rt->gcArenaList[flindex].stats.totalnew);
01576 #ifdef JS_THREADSAFE
01577     if (gcLocked)
01578         JS_UNLOCK_GC(rt);
01579 #endif
01580     return thing;
01581 
01582 fail:
01583 #ifdef JS_THREADSAFE
01584     if (gcLocked)
01585         JS_UNLOCK_GC(rt);
01586 #endif
01587     METER(rt->gcStats.fail++);
01588     JS_ReportOutOfMemory(cx);
01589     return NULL;
01590 }
01591 
01592 JSBool
01593 js_LockGCThing(JSContext *cx, void *thing)
01594 {
01595     JSBool ok = js_LockGCThingRT(cx->runtime, thing);
01596     if (!ok)
01597         JS_ReportOutOfMemory(cx);
01598     return ok;
01599 }
01600 
01601 /*
01602  * Deep GC-things can't be locked just by setting the GCF_LOCK bit, because
01603  * their descendants must be marked by the GC.  To find them during the mark
01604  * phase, they are added to rt->gcLocksHash, which is created lazily.
01605  *
01606  * NB: we depend on the order of GC-thing type indexes here!
01607  */
01608 #define GC_TYPE_IS_STRING(t)    ((t) == GCX_STRING ||                         \
01609                                  (t) >= GCX_EXTERNAL_STRING)
01610 #define GC_TYPE_IS_XML(t)       ((unsigned)((t) - GCX_NAMESPACE) <=           \
01611                                  (unsigned)(GCX_XML - GCX_NAMESPACE))
01612 #define GC_TYPE_IS_DEEP(t)      ((t) == GCX_OBJECT || GC_TYPE_IS_XML(t))
01613 
01614 #define IS_DEEP_STRING(t,o)     (GC_TYPE_IS_STRING(t) &&                      \
01615                                  JSSTRING_IS_DEPENDENT((JSString *)(o)))
01616 
01617 #define GC_THING_IS_DEEP(t,o)   (GC_TYPE_IS_DEEP(t) || IS_DEEP_STRING(t, o))
01618 
01619 /* This is compatible with JSDHashEntryStub. */
01620 typedef struct JSGCLockHashEntry {
01621     JSDHashEntryHdr hdr;
01622     const JSGCThing *thing;
01623     uint32          count;
01624 } JSGCLockHashEntry;
01625 
01626 JSBool
01627 js_LockGCThingRT(JSRuntime *rt, void *thing)
01628 {
01629     JSBool ok, deep;
01630     uint8 *flagp;
01631     uintN flags, lock, type;
01632     JSGCLockHashEntry *lhe;
01633 
01634     ok = JS_TRUE;
01635     if (!thing)
01636         return ok;
01637 
01638     flagp = js_GetGCThingFlags(thing);
01639 
01640     JS_LOCK_GC(rt);
01641     flags = *flagp;
01642     lock = (flags & GCF_LOCK);
01643     type = (flags & GCF_TYPEMASK);
01644     deep = GC_THING_IS_DEEP(type, thing);
01645 
01646     /*
01647      * Avoid adding a rt->gcLocksHash entry for shallow things until someone
01648      * nests a lock -- then start such an entry with a count of 2, not 1.
01649      */
01650     if (lock || deep) {
01651         if (!rt->gcLocksHash) {
01652             rt->gcLocksHash =
01653                 JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
01654                                  sizeof(JSGCLockHashEntry),
01655                                  GC_ROOTS_SIZE);
01656             if (!rt->gcLocksHash) {
01657                 ok = JS_FALSE;
01658                 goto done;
01659             }
01660         } else if (lock == 0) {
01661 #ifdef DEBUG
01662             JSDHashEntryHdr *hdr =
01663                 JS_DHashTableOperate(rt->gcLocksHash, thing,
01664                                      JS_DHASH_LOOKUP);
01665             JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(hdr));
01666 #endif
01667         }
01668 
01669         lhe = (JSGCLockHashEntry *)
01670             JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
01671         if (!lhe) {
01672             ok = JS_FALSE;
01673             goto done;
01674         }
01675         if (!lhe->thing) {
01676             lhe->thing = thing;
01677             lhe->count = deep ? 1 : 2;
01678         } else {
01679             JS_ASSERT(lhe->count >= 1);
01680             lhe->count++;
01681         }
01682     }
01683 
01684     *flagp = (uint8)(flags | GCF_LOCK);
01685     METER(rt->gcStats.lock++);
01686     ok = JS_TRUE;
01687 done:
01688     JS_UNLOCK_GC(rt);
01689     return ok;
01690 }
01691 
01692 JSBool
01693 js_UnlockGCThingRT(JSRuntime *rt, void *thing)
01694 {
01695     uint8 *flagp, flags;
01696     JSGCLockHashEntry *lhe;
01697 
01698     if (!thing)
01699         return JS_TRUE;
01700 
01701     flagp = js_GetGCThingFlags(thing);
01702     JS_LOCK_GC(rt);
01703     flags = *flagp;
01704 
01705     if (flags & GCF_LOCK) {
01706         if (!rt->gcLocksHash ||
01707             (lhe = (JSGCLockHashEntry *)
01708                    JS_DHashTableOperate(rt->gcLocksHash, thing,
01709                                         JS_DHASH_LOOKUP),
01710              JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
01711             /* Shallow GC-thing with an implicit lock count of 1. */
01712             JS_ASSERT(!GC_THING_IS_DEEP(flags & GCF_TYPEMASK, thing));
01713         } else {
01714             /* Basis or nested unlock of a deep thing, or nested of shallow. */
01715             if (--lhe->count != 0)
01716                 goto out;
01717             JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
01718         }
01719         *flagp = (uint8)(flags & ~GCF_LOCK);
01720     }
01721 
01722     rt->gcPoke = JS_TRUE;
01723 out:
01724     METER(rt->gcStats.unlock++);
01725     JS_UNLOCK_GC(rt);
01726     return JS_TRUE;
01727 }
01728 
01729 #ifdef GC_MARK_DEBUG
01730 
01731 #include <stdio.h>
01732 #include "jsprf.h"
01733 
01734 typedef struct GCMarkNode GCMarkNode;
01735 
01736 struct GCMarkNode {
01737     void        *thing;
01738     const char  *name;
01739     GCMarkNode  *next;
01740     GCMarkNode  *prev;
01741 };
01742 
01743 JS_FRIEND_DATA(FILE *) js_DumpGCHeap;
01744 JS_EXPORT_DATA(void *) js_LiveThingToFind;
01745 
01746 #ifdef HAVE_XPCONNECT
01747 #include "dump_xpc.h"
01748 #endif
01749 
01750 static void
01751 GetObjSlotName(JSScope *scope, JSObject *obj, uint32 slot, char *buf,
01752                size_t bufsize)
01753 {
01754     jsval nval;
01755     JSScopeProperty *sprop;
01756     JSClass *clasp;
01757     uint32 key;
01758     const char *slotname;
01759 
01760     if (!scope) {
01761         JS_snprintf(buf, bufsize, "**UNKNOWN OBJECT MAP ENTRY**");
01762         return;
01763     }
01764 
01765     sprop = SCOPE_LAST_PROP(scope);
01766     while (sprop && sprop->slot != slot)
01767         sprop = sprop->parent;
01768 
01769     if (!sprop) {
01770         switch (slot) {
01771           case JSSLOT_PROTO:
01772             JS_snprintf(buf, bufsize, "__proto__");
01773             break;
01774           case JSSLOT_PARENT:
01775             JS_snprintf(buf, bufsize, "__parent__");
01776             break;
01777           default:
01778             slotname = NULL;
01779             clasp = LOCKED_OBJ_GET_CLASS(obj);
01780             if (clasp->flags & JSCLASS_IS_GLOBAL) {
01781                 key = slot - JSSLOT_START(clasp);
01782 #define JS_PROTO(name,code,init) \
01783     if ((code) == key) { slotname = js_##name##_str; goto found; }
01784 #include "jsproto.tbl"
01785 #undef JS_PROTO
01786             }
01787           found:
01788             if (slotname)
01789                 JS_snprintf(buf, bufsize, "CLASS_OBJECT(%s)", slotname);
01790             else
01791                 JS_snprintf(buf, bufsize, "**UNKNOWN SLOT %ld**", (long)slot);
01792             break;
01793         }
01794     } else {
01795         nval = ID_TO_VALUE(sprop->id);
01796         if (JSVAL_IS_INT(nval)) {
01797             JS_snprintf(buf, bufsize, "%ld", (long)JSVAL_TO_INT(nval));
01798         } else if (JSVAL_IS_STRING(nval)) {
01799             JS_snprintf(buf, bufsize, "%s",
01800                         JS_GetStringBytes(JSVAL_TO_STRING(nval)));
01801         } else {
01802             JS_snprintf(buf, bufsize, "**FINALIZED ATOM KEY**");
01803         }
01804     }
01805 }
01806 
01807 static const char *
01808 gc_object_class_name(void* thing)
01809 {
01810     uint8 *flagp = js_GetGCThingFlags(thing);
01811     const char *className = "";
01812     static char depbuf[32];
01813 
01814     switch (*flagp & GCF_TYPEMASK) {
01815       case GCX_OBJECT: {
01816         JSObject  *obj = (JSObject *)thing;
01817         JSClass   *clasp = JSVAL_TO_PRIVATE(obj->slots[JSSLOT_CLASS]);
01818         className = clasp->name;
01819 #ifdef HAVE_XPCONNECT
01820         if (clasp->flags & JSCLASS_PRIVATE_IS_NSISUPPORTS) {
01821             jsval privateValue = obj->slots[JSSLOT_PRIVATE];
01822 
01823             JS_ASSERT(clasp->flags & JSCLASS_HAS_PRIVATE);
01824             if (!JSVAL_IS_VOID(privateValue)) {
01825                 void  *privateThing = JSVAL_TO_PRIVATE(privateValue);
01826                 const char *xpcClassName = GetXPCObjectClassName(privateThing);
01827 
01828                 if (xpcClassName)
01829                     className = xpcClassName;
01830             }
01831         }
01832 #endif
01833         break;
01834       }
01835 
01836       case GCX_STRING:
01837       case GCX_MUTABLE_STRING: {
01838         JSString *str = (JSString *)thing;
01839         if (JSSTRING_IS_DEPENDENT(str)) {
01840             JS_snprintf(depbuf, sizeof depbuf, "start:%u, length:%u",
01841                         JSSTRDEP_START(str), JSSTRDEP_LENGTH(str));
01842             className = depbuf;
01843         } else {
01844             className = "string";
01845         }
01846         break;
01847       }
01848 
01849       case GCX_DOUBLE:
01850         className = "double";
01851         break;
01852     }
01853 
01854     return className;
01855 }
01856 
01857 static void
01858 gc_dump_thing(JSContext *cx, JSGCThing *thing, FILE *fp)
01859 {
01860     GCMarkNode *prev = (GCMarkNode *)cx->gcCurrentMarkNode;
01861     GCMarkNode *next = NULL;
01862     char *path = NULL;
01863 
01864     while (prev) {
01865         next = prev;
01866         prev = prev->prev;
01867     }
01868     while (next) {
01869         uint8 nextFlags = *js_GetGCThingFlags(next->thing);
01870         if ((nextFlags & GCF_TYPEMASK) == GCX_OBJECT) {
01871             path = JS_sprintf_append(path, "%s(%s @ 0x%08p).",
01872                                      next->name,
01873                                      gc_object_class_name(next->thing),
01874                                      (JSObject*)next->thing);
01875         } else {
01876             path = JS_sprintf_append(path, "%s(%s).",
01877                                      next->name,
01878                                      gc_object_class_name(next->thing));
01879         }
01880         next = next->next;
01881     }
01882     if (!path)
01883         return;
01884 
01885     fprintf(fp, "%08lx ", (long)thing);
01886     switch (*js_GetGCThingFlags(thing) & GCF_TYPEMASK) {
01887       case GCX_OBJECT:
01888       {
01889         JSObject  *obj = (JSObject *)thing;
01890         jsval     privateValue = obj->slots[JSSLOT_PRIVATE];
01891         void      *privateThing = JSVAL_IS_VOID(privateValue)
01892                                   ? NULL
01893                                   : JSVAL_TO_PRIVATE(privateValue);
01894         const char *className = gc_object_class_name(thing);
01895         fprintf(fp, "object %8p %s", privateThing, className);
01896         break;
01897       }
01898 #if JS_HAS_XML_SUPPORT
01899       case GCX_NAMESPACE:
01900       {
01901         JSXMLNamespace *ns = (JSXMLNamespace *)thing;
01902         fprintf(fp, "namespace %s:%s",
01903                 JS_GetStringBytes(ns->prefix), JS_GetStringBytes(ns->uri));
01904         break;
01905       }
01906       case GCX_QNAME:
01907       {
01908         JSXMLQName *qn = (JSXMLQName *)thing;
01909         fprintf(fp, "qname %s(%s):%s",
01910                 JS_GetStringBytes(qn->prefix), JS_GetStringBytes(qn->uri),
01911                 JS_GetStringBytes(qn->localName));
01912         break;
01913       }
01914       case GCX_XML:
01915       {
01916         extern const char *js_xml_class_str[];
01917         JSXML *xml = (JSXML *)thing;
01918         fprintf(fp, "xml %8p %s", xml, js_xml_class_str[xml->xml_class]);
01919         break;
01920       }
01921 #endif
01922       case GCX_DOUBLE:
01923         fprintf(fp, "double %g", *(jsdouble *)thing);
01924         break;
01925       case GCX_PRIVATE:
01926         fprintf(fp, "private %8p", (void *)thing);
01927         break;
01928       default:
01929         fprintf(fp, "string %s", JS_GetStringBytes((JSString *)thing));
01930         break;
01931     }
01932     fprintf(fp, " via %s\n", path);
01933     free(path);
01934 }
01935 
01936 void
01937 js_MarkNamedGCThing(JSContext *cx, void *thing, const char *name)
01938 {
01939     GCMarkNode markNode;
01940 
01941     if (!thing)
01942         return;
01943 
01944     markNode.thing = thing;
01945     markNode.name  = name;
01946     markNode.next  = NULL;
01947     markNode.prev  = (GCMarkNode *)cx->gcCurrentMarkNode;
01948     if (markNode.prev)
01949         markNode.prev->next = &markNode;
01950     cx->gcCurrentMarkNode = &markNode;
01951 
01952     if (thing == js_LiveThingToFind) {
01953         /*
01954          * Dump js_LiveThingToFind each time we reach it during the marking
01955          * phase of GC to print all live references to the thing.
01956          */
01957         gc_dump_thing(cx, thing, stderr);
01958     }
01959 
01960     js_MarkGCThing(cx, thing);
01961 
01962     if (markNode.prev)
01963         markNode.prev->next = NULL;
01964     cx->gcCurrentMarkNode = markNode.prev;
01965 }
01966 
01967 #endif /* !GC_MARK_DEBUG */
01968 
01969 static void
01970 gc_mark_atom_key_thing(void *thing, void *arg)
01971 {
01972     JSContext *cx = (JSContext *) arg;
01973 
01974     GC_MARK(cx, thing, "atom");
01975 }
01976 
01977 void
01978 js_MarkAtom(JSContext *cx, JSAtom *atom)
01979 {
01980     jsval key;
01981 
01982     if (atom->flags & ATOM_MARK)
01983         return;
01984     atom->flags |= ATOM_MARK;
01985     key = ATOM_KEY(atom);
01986     if (JSVAL_IS_GCTHING(key)) {
01987 #ifdef GC_MARK_DEBUG
01988         char name[32];
01989 
01990         if (JSVAL_IS_STRING(key)) {
01991             JS_snprintf(name, sizeof name, "'%s'",
01992                         JS_GetStringBytes(JSVAL_TO_STRING(key)));
01993         } else {
01994             JS_snprintf(name, sizeof name, "<%x>", key);
01995         }
01996 #endif
01997         GC_MARK(cx, JSVAL_TO_GCTHING(key), name);
01998     }
01999     if (atom->flags & ATOM_HIDDEN)
02000         js_MarkAtom(cx, atom->entry.value);
02001 }
02002 
02003 static void
02004 AddThingToUnscannedBag(JSRuntime *rt, void *thing, uint8 *flagp);
02005 
02006 static void
02007 MarkGCThingChildren(JSContext *cx, void *thing, uint8 *flagp,
02008                     JSBool shouldCheckRecursion)
02009 {
02010     JSRuntime *rt;
02011     JSObject *obj;
02012     jsval v, *vp, *end;
02013     void *next_thing;
02014     uint8 *next_flagp;
02015     JSString *str;
02016 #ifdef JS_GCMETER
02017     uint32 tailCallNesting;
02018 #endif
02019 #ifdef GC_MARK_DEBUG
02020     JSScope *scope;
02021     char name[32];
02022 #endif
02023 
02024     /*
02025      * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
02026      * uses the non-recursive code that otherwise would be called only on
02027      * a low C stack condition.
02028      */
02029 #ifdef JS_GC_ASSUME_LOW_C_STACK
02030 # define RECURSION_TOO_DEEP() shouldCheckRecursion
02031 #else
02032     int stackDummy;
02033 # define RECURSION_TOO_DEEP() (shouldCheckRecursion &&                        \
02034                                !JS_CHECK_STACK_SIZE(cx, stackDummy))
02035 #endif
02036 
02037     rt = cx->runtime;
02038     METER(tailCallNesting = 0);
02039     METER(if (++rt->gcStats.cdepth > rt->gcStats.maxcdepth)
02040               rt->gcStats.maxcdepth = rt->gcStats.cdepth);
02041 
02042 #ifndef GC_MARK_DEBUG
02043   start:
02044 #endif
02045     JS_ASSERT(flagp);
02046     JS_ASSERT(*flagp & GCF_MARK); /* the caller must already mark the thing */
02047     METER(if (++rt->gcStats.depth > rt->gcStats.maxdepth)
02048               rt->gcStats.maxdepth = rt->gcStats.depth);
02049 #ifdef GC_MARK_DEBUG
02050     if (js_DumpGCHeap)
02051         gc_dump_thing(cx, thing, js_DumpGCHeap);
02052 #endif
02053 
02054     switch (*flagp & GCF_TYPEMASK) {
02055       case GCX_OBJECT:
02056         if (RECURSION_TOO_DEEP())
02057             goto add_to_unscanned_bag;
02058         /* If obj->slots is null, obj must be a newborn. */
02059         obj = (JSObject *) thing;
02060         vp = obj->slots;
02061         if (!vp)
02062             break;
02063 
02064         /* Mark slots if they are small enough to be GC-allocated. */
02065         if ((vp[-1] + 1) * sizeof(jsval) <= GC_NBYTES_MAX)
02066             GC_MARK(cx, vp - 1, "slots");
02067 
02068         /* Set up local variables to loop over unmarked things. */
02069         end = vp + ((obj->map->ops->mark)
02070                     ? obj->map->ops->mark(cx, obj, NULL)
02071                     : JS_MIN(obj->map->freeslot, obj->map->nslots));
02072         thing = NULL;
02073         flagp = NULL;
02074 #ifdef GC_MARK_DEBUG
02075         scope = OBJ_IS_NATIVE(obj) ? OBJ_SCOPE(obj) : NULL;
02076 #endif
02077         for (; vp != end; ++vp) {
02078             v = *vp;
02079             if (!JSVAL_IS_GCTHING(v) || v == JSVAL_NULL)
02080                 continue;
02081             next_thing = JSVAL_TO_GCTHING(v);
02082             if (next_thing == thing)
02083                 continue;
02084             next_flagp = js_GetGCThingFlags(next_thing);
02085             if (*next_flagp & GCF_MARK)
02086                 continue;
02087             JS_ASSERT(*next_flagp != GCF_FINAL);
02088             if (thing) {
02089 #ifdef GC_MARK_DEBUG
02090                 GC_MARK(cx, thing, name);
02091 #else
02092                 *flagp |= GCF_MARK;
02093                 MarkGCThingChildren(cx, thing, flagp, JS_TRUE);
02094 #endif
02095                 if (*next_flagp & GCF_MARK) {
02096                     /*
02097                      * This happens when recursive MarkGCThingChildren marks
02098                      * the thing with flags referred by *next_flagp.
02099                      */
02100                     thing = NULL;
02101                     continue;
02102                 }
02103             }
02104 #ifdef GC_MARK_DEBUG
02105             GetObjSlotName(scope, obj, vp - obj->slots, name, sizeof name);
02106 #endif
02107             thing = next_thing;
02108             flagp = next_flagp;
02109         }
02110         if (thing) {
02111             /*
02112              * thing came from the last unmarked GC-thing slot and we
02113              * can optimize tail recursion.
02114              *
02115              * Since we already know that there is enough C stack space,
02116              * we clear shouldCheckRecursion to avoid extra checking in
02117              * RECURSION_TOO_DEEP.
02118              */
02119             shouldCheckRecursion = JS_FALSE;
02120             goto on_tail_recursion;
02121         }
02122         break;
02123 
02124 #ifdef DEBUG
02125       case GCX_STRING:
02126         str = (JSString *)thing;
02127         JS_ASSERT(!JSSTRING_IS_DEPENDENT(str));
02128         break;
02129 #endif
02130 
02131       case GCX_MUTABLE_STRING:
02132         str = (JSString *)thing;
02133         if (!JSSTRING_IS_DEPENDENT(str))
02134             break;
02135         thing = JSSTRDEP_BASE(str);
02136         flagp = js_GetGCThingFlags(thing);
02137         if (*flagp & GCF_MARK)
02138             break;
02139 #ifdef GC_MARK_DEBUG
02140         strcpy(name, "base");
02141 #endif
02142         /* Fallthrough to code to deal with the tail recursion. */
02143 
02144       on_tail_recursion:
02145 #ifdef GC_MARK_DEBUG
02146         /*
02147          * Do not eliminate C recursion when debugging to allow
02148          * js_MarkNamedGCThing to build a full dump of live GC
02149          * things.
02150          */
02151         GC_MARK(cx, thing, name);
02152         break;
02153 #else
02154         /* Eliminate tail recursion for the last unmarked child. */
02155         JS_ASSERT(*flagp != GCF_FINAL);
02156         METER(++tailCallNesting);
02157         *flagp |= GCF_MARK;
02158         goto start;
02159 #endif
02160 
02161 #if JS_HAS_XML_SUPPORT
02162       case GCX_NAMESPACE:
02163         if (RECURSION_TOO_DEEP())
02164             goto add_to_unscanned_bag;
02165         js_MarkXMLNamespace(cx, (JSXMLNamespace *)thing);
02166         break;
02167 
02168       case GCX_QNAME:
02169         if (RECURSION_TOO_DEEP())
02170             goto add_to_unscanned_bag;
02171         js_MarkXMLQName(cx, (JSXMLQName *)thing);
02172         break;
02173 
02174       case GCX_XML:
02175         if (RECURSION_TOO_DEEP())
02176             goto add_to_unscanned_bag;
02177         js_MarkXML(cx, (JSXML *)thing);
02178         break;
02179 #endif
02180       add_to_unscanned_bag:
02181         AddThingToUnscannedBag(cx->runtime, thing, flagp);
02182         break;
02183     }
02184 
02185 #undef RECURSION_TOO_DEEP
02186 
02187     METER(rt->gcStats.depth -= 1 + tailCallNesting);
02188     METER(rt->gcStats.cdepth--);
02189 }
02190 
02191 /*
02192  * Avoid using PAGE_THING_GAP inside this macro to optimize the
02193  * thingsPerUnscannedChunk calculation when thingSize is a power of two.
02194  */
02195 #define GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap)   \
02196     JS_BEGIN_MACRO                                                            \
02197         if (0 == ((thingSize) & ((thingSize) - 1))) {                         \
02198             pageGap = (thingSize);                                            \
02199             thingsPerUnscannedChunk = ((GC_PAGE_SIZE / (thingSize))           \
02200                                        + JS_BITS_PER_WORD - 1)                \
02201                                       >> JS_BITS_PER_WORD_LOG2;               \
02202         } else {                                                              \
02203             pageGap = GC_PAGE_SIZE % (thingSize);                             \
02204             thingsPerUnscannedChunk = JS_HOWMANY(GC_PAGE_SIZE / (thingSize),  \
02205                                                  JS_BITS_PER_WORD);           \
02206         }                                                                     \
02207     JS_END_MACRO
02208 
02209 static void
02210 AddThingToUnscannedBag(JSRuntime *rt, void *thing, uint8 *flagp)
02211 {
02212     JSGCPageInfo *pi;
02213     JSGCArena *arena;
02214     size_t thingSize;
02215     size_t thingsPerUnscannedChunk;
02216     size_t pageGap;
02217     size_t chunkIndex;
02218     jsuword bit;
02219 
02220     /* Things from delayed scanning bag are marked as GCF_MARK | GCF_FINAL. */
02221     JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK);
02222     *flagp |= GCF_FINAL;
02223 
02224     METER(rt->gcStats.unscanned++);
02225 #ifdef DEBUG
02226     ++rt->gcUnscannedBagSize;
02227     METER(if (rt->gcUnscannedBagSize > rt->gcStats.maxunscanned)
02228               rt->gcStats.maxunscanned = rt->gcUnscannedBagSize);
02229 #endif
02230 
02231     pi = THING_TO_PAGE(thing);
02232     arena = PAGE_TO_ARENA(pi);
02233     thingSize = arena->list->thingSize;
02234     GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap);
02235     chunkIndex = (((jsuword)thing & GC_PAGE_MASK) - pageGap) /
02236                  (thingSize * thingsPerUnscannedChunk);
02237     JS_ASSERT(chunkIndex < JS_BITS_PER_WORD);
02238     bit = (jsuword)1 << chunkIndex;
02239     if (pi->unscannedBitmap != 0) {
02240         JS_ASSERT(rt->gcUnscannedArenaStackTop);
02241         if (thingsPerUnscannedChunk != 1) {
02242             if (pi->unscannedBitmap & bit) {
02243                 /* Chunk already contains things to scan later. */
02244                 return;
02245             }
02246         } else {
02247             /*
02248              * The chunk must not contain things to scan later if there is
02249              * only one thing per chunk.
02250              */
02251             JS_ASSERT(!(pi->unscannedBitmap & bit));
02252         }
02253         pi->unscannedBitmap |= bit;
02254         JS_ASSERT(arena->unscannedPages & ((size_t)1 << PAGE_INDEX(pi)));
02255     } else {
02256         /*
02257          * The thing is the first unscanned thing in the page, set the bit
02258          * corresponding to this page arena->unscannedPages.
02259          */
02260         pi->unscannedBitmap = bit;
02261         JS_ASSERT(PAGE_INDEX(pi) < JS_BITS_PER_WORD);
02262         bit = (jsuword)1 << PAGE_INDEX(pi);
02263         JS_ASSERT(!(arena->unscannedPages & bit));
02264         if (arena->unscannedPages != 0) {
02265             arena->unscannedPages |= bit;
02266             JS_ASSERT(arena->prevUnscanned);
02267             JS_ASSERT(rt->gcUnscannedArenaStackTop);
02268         } else {
02269             /*
02270              * The thing is the first unscanned thing in the whole arena, push
02271              * the arena on the stack of unscanned arenas unless the arena
02272              * has already been pushed. We detect that through prevUnscanned
02273              * field which is NULL only for not yet pushed arenas. To ensure
02274              * that prevUnscanned != NULL even when the stack contains one
02275              * element, we make prevUnscanned for the arena at the bottom
02276              * to point to itself.
02277              *
02278              * See comments in ScanDelayedChildren.
02279              */
02280             arena->unscannedPages = bit;
02281             if (!arena->prevUnscanned) {
02282                 if (!rt->gcUnscannedArenaStackTop) {
02283                     /* Stack was empty, mark the arena as bottom element. */
02284                     arena->prevUnscanned = arena;
02285                 } else {
02286                     JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned);
02287                     arena->prevUnscanned = rt->gcUnscannedArenaStackTop;
02288                 }
02289                 rt->gcUnscannedArenaStackTop = arena;
02290             }
02291          }
02292      }
02293     JS_ASSERT(rt->gcUnscannedArenaStackTop);
02294 }
02295 
02296 static void
02297 ScanDelayedChildren(JSContext *cx)
02298 {
02299     JSRuntime *rt;
02300     JSGCArena *arena;
02301     size_t thingSize;
02302     size_t thingsPerUnscannedChunk;
02303     size_t pageGap;
02304     size_t pageIndex;
02305     JSGCPageInfo *pi;
02306     size_t chunkIndex;
02307     size_t thingOffset, thingLimit;
02308     JSGCThing *thing;
02309     uint8 *flagp;
02310     JSGCArena *prevArena;
02311 
02312     rt = cx->runtime;
02313     arena = rt->gcUnscannedArenaStackTop;
02314     if (!arena) {
02315         JS_ASSERT(rt->gcUnscannedBagSize == 0);
02316         return;
02317     }
02318 
02319   init_size:
02320     thingSize = arena->list->thingSize;
02321     GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap);
02322     for (;;) {
02323         /*
02324          * The following assert verifies that the current arena belongs to
02325          * the unscan stack since AddThingToUnscannedBag ensures that even
02326          * for stack's bottom prevUnscanned != NULL but rather points to self.
02327          */
02328         JS_ASSERT(arena->prevUnscanned);
02329         JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned);
02330         while (arena->unscannedPages != 0) {
02331             pageIndex = JS_FLOOR_LOG2W(arena->unscannedPages);
02332             JS_ASSERT(pageIndex < GC_PAGE_COUNT);
02333             pi = (JSGCPageInfo *)(FIRST_THING_PAGE(arena) +
02334                                   pageIndex * GC_PAGE_SIZE);
02335             JS_ASSERT(pi->unscannedBitmap);
02336             chunkIndex = JS_FLOOR_LOG2W(pi->unscannedBitmap);
02337             pi->unscannedBitmap &= ~((jsuword)1 << chunkIndex);
02338             if (pi->unscannedBitmap == 0)
02339                 arena->unscannedPages &= ~((jsuword)1 << pageIndex);
02340             thingOffset = (pageGap
02341                            + chunkIndex * thingsPerUnscannedChunk * thingSize);
02342             JS_ASSERT(thingOffset >= sizeof(JSGCPageInfo));
02343             thingLimit = thingOffset + thingsPerUnscannedChunk * thingSize;
02344             if (thingsPerUnscannedChunk != 1) {
02345                 /*
02346                  * thingLimit can go beyond the last allocated thing for the
02347                  * last chunk as the real limit can be inside the chunk.
02348                  */
02349                 if (arena->list->last == arena &&
02350                     arena->list->lastLimit < (pageIndex * GC_PAGE_SIZE +
02351                                               thingLimit)) {
02352                     thingLimit = (arena->list->lastLimit -
02353                                   pageIndex * GC_PAGE_SIZE);
02354                 } else if (thingLimit > GC_PAGE_SIZE) {
02355                     thingLimit = GC_PAGE_SIZE;
02356                 }
02357                 JS_ASSERT(thingLimit > thingOffset);
02358             }
02359             JS_ASSERT(arena->list->last != arena ||
02360                       arena->list->lastLimit >= (pageIndex * GC_PAGE_SIZE +
02361                                                  thingLimit));
02362             JS_ASSERT(thingLimit <= GC_PAGE_SIZE);
02363 
02364             for (; thingOffset != thingLimit; thingOffset += thingSize) {
02365                 /*
02366                  * XXX: inline js_GetGCThingFlags() to use already available
02367                  * pi.
02368                  */
02369                 thing = (void *)((jsuword)pi + thingOffset);
02370                 flagp = js_GetGCThingFlags(thing);
02371                 if (thingsPerUnscannedChunk != 1) {
02372                     /*
02373                      * Skip free or already scanned things that share the chunk
02374                      * with unscanned ones.
02375                      */
02376                     if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL))
02377                         continue;
02378                 }
02379                 JS_ASSERT((*flagp & (GCF_MARK|GCF_FINAL))
02380                               == (GCF_MARK|GCF_FINAL));
02381                 *flagp &= ~GCF_FINAL;
02382 #ifdef DEBUG
02383                 JS_ASSERT(rt->gcUnscannedBagSize != 0);
02384                 --rt->gcUnscannedBagSize;
02385 
02386                 /*
02387                  * Check that GC thing type is consistent with the type of
02388                  * things that can be put to the unscanned bag.
02389                  */
02390                 switch (*flagp & GCF_TYPEMASK) {
02391                   case GCX_OBJECT:
02392 # if JS_HAS_XML_SUPPORT
02393                   case GCX_NAMESPACE:
02394                   case GCX_QNAME:
02395                   case GCX_XML:
02396 # endif
02397                     break;
02398                   default:
02399                     JS_ASSERT(0);
02400                 }
02401 #endif
02402                 MarkGCThingChildren(cx, thing, flagp, JS_FALSE);
02403             }
02404         }
02405         /*
02406          * We finished scanning of the arena but we can only pop it from
02407          * the stack if the arena is the stack's top.
02408          *
02409          * When MarkGCThingChildren from the above calls
02410          * AddThingToUnscannedBag and the latter pushes new arenas to the
02411          * stack, we have to skip popping of this arena until it becomes
02412          * the top of the stack again.
02413          */
02414         if (arena == rt->gcUnscannedArenaStackTop) {
02415             prevArena = arena->prevUnscanned;
02416             arena->prevUnscanned = NULL;
02417             if (arena == prevArena) {
02418                 /*
02419                  * prevUnscanned points to itself and we reached the bottom
02420                  * of the stack.
02421                  */
02422                 break;
02423             }
02424             rt->gcUnscannedArenaStackTop = arena = prevArena;
02425         } else {
02426             arena = rt->gcUnscannedArenaStackTop;
02427         }
02428         if (arena->list->thingSize != thingSize)
02429             goto init_size;
02430     }
02431     JS_ASSERT(rt->gcUnscannedArenaStackTop);
02432     JS_ASSERT(!rt->gcUnscannedArenaStackTop->prevUnscanned);
02433     rt->gcUnscannedArenaStackTop = NULL;
02434     JS_ASSERT(rt->gcUnscannedBagSize == 0);
02435 }
02436 
02437 void
02438 js_MarkGCThing(JSContext *cx, void *thing)
02439 {
02440     uint8 *flagp;
02441 
02442     if (!thing)
02443         return;
02444 
02445     flagp = js_GetGCThingFlags(thing);
02446     JS_ASSERT(*flagp != GCF_FINAL);
02447     if (*flagp & GCF_MARK)
02448         return;
02449     *flagp |= GCF_MARK;
02450 
02451     if (!cx->insideGCMarkCallback) {
02452         MarkGCThingChildren(cx, thing, flagp, JS_TRUE);
02453     } else {
02454         /*
02455          * For API compatibility we allow for the callback to assume that
02456          * after it calls js_MarkGCThing for the last time, the callback
02457          * can start to finalize its own objects that are only referenced
02458          * by unmarked GC things.
02459          *
02460          * Since we do not know which call from inside the callback is the
02461          * last, we ensure that the unscanned bag is always empty when we
02462          * return to the callback and all marked things are scanned.
02463          *
02464          * As an optimization we do not check for the stack size here and
02465          * pass JS_FALSE as the last argument to MarkGCThingChildren.
02466          * Otherwise with low C stack the thing would be pushed to the bag
02467          * just to be feed to MarkGCThingChildren from inside
02468          * ScanDelayedChildren.
02469          */
02470         cx->insideGCMarkCallback = JS_FALSE;
02471         MarkGCThingChildren(cx, thing, flagp, JS_FALSE);
02472         ScanDelayedChildren(cx);
02473         cx->insideGCMarkCallback = JS_TRUE;
02474     }
02475 }
02476 
02477 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
02478 gc_root_marker(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
02479 {
02480     JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
02481     jsval *rp = (jsval *)rhe->root;
02482     jsval v = *rp;
02483 
02484     /* Ignore null object and scalar values. */
02485     if (!JSVAL_IS_NULL(v) && JSVAL_IS_GCTHING(v)) {
02486         JSContext *cx = (JSContext *)arg;
02487 #ifdef DEBUG
02488         JSBool root_points_to_gcArenaList = JS_FALSE;
02489         jsuword thing = (jsuword) JSVAL_TO_GCTHING(v);
02490         uintN i;
02491         JSGCArenaList *arenaList;
02492         JSGCArena *a;
02493         size_t limit;
02494 
02495         for (i = 0; i < GC_NUM_FREELISTS; i++) {
02496             arenaList = &cx->runtime->gcArenaList[i];
02497             limit = arenaList->lastLimit;
02498             for (a = arenaList->last; a; a = a->prev) {
02499                 if (thing - FIRST_THING_PAGE(a) < limit) {
02500                     root_points_to_gcArenaList = JS_TRUE;
02501                     break;
02502                 }
02503                 limit = GC_THINGS_SIZE;
02504             }
02505         }
02506         if (!root_points_to_gcArenaList && rhe->name) {
02507             fprintf(stderr,
02508 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
02509 "invalid jsval.  This is usually caused by a missing call to JS_RemoveRoot.\n"
02510 "The root's name is \"%s\".\n",
02511                     rhe->name);
02512         }
02513         JS_ASSERT(root_points_to_gcArenaList);
02514 #endif
02515 
02516         GC_MARK(cx, JSVAL_TO_GCTHING(v), rhe->name ? rhe->name : "root");
02517     }
02518     return JS_DHASH_NEXT;
02519 }
02520 
02521 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
02522 gc_lock_marker(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
02523 {
02524     JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
02525     void *thing = (void *)lhe->thing;
02526     JSContext *cx = (JSContext *)arg;
02527 
02528     GC_MARK(cx, thing, "locked object");
02529     return JS_DHASH_NEXT;
02530 }
02531 
02532 #define GC_MARK_JSVALS(cx, len, vec, name)                                    \
02533     JS_BEGIN_MACRO                                                            \
02534         jsval _v, *_vp, *_end;                                                \
02535                                                                               \
02536         for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) {                \
02537             _v = *_vp;                                                        \
02538             if (JSVAL_IS_GCTHING(_v))                                         \
02539                 GC_MARK(cx, JSVAL_TO_GCTHING(_v), name);                      \
02540         }                                                                     \
02541     JS_END_MACRO
02542 
02543 void
02544 js_MarkStackFrame(JSContext *cx, JSStackFrame *fp)
02545 {
02546     uintN depth, nslots;
02547 
02548     if (fp->callobj)
02549         GC_MARK(cx, fp->callobj, "call object");
02550     if (fp->argsobj)
02551         GC_MARK(cx, fp->argsobj, "arguments object");
02552     if (fp->varobj)
02553         GC_MARK(cx, fp->varobj, "variables object");
02554     if (fp->script) {
02555         js_MarkScript(cx, fp->script);
02556         if (fp->spbase) {
02557             /*
02558              * Don't mark what has not been pushed yet, or what has been
02559              * popped already.
02560              */
02561             depth = fp->script->depth;
02562             nslots = (JS_UPTRDIFF(fp->sp, fp->spbase)
02563                       < depth * sizeof(jsval))
02564                      ? (uintN)(fp->sp - fp->spbase)
02565                      : depth;
02566             GC_MARK_JSVALS(cx, nslots, fp->spbase, "operand");
02567         }
02568     }
02569 
02570     /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
02571     JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) ||
02572               (fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags)));
02573     if (JSVAL_IS_GCTHING((jsval)fp->thisp))
02574         GC_MARK(cx, JSVAL_TO_GCTHING((jsval)fp->thisp), "this");
02575 
02576     if (fp->callee)
02577         GC_MARK(cx, fp->callee, "callee object");
02578 
02579     /*
02580      * Mark fp->argv, even though in the common case it will be marked via our
02581      * caller's frame, or via a JSStackHeader if fp was pushed by an external
02582      * invocation.
02583      *
02584      * The hard case is when there is not enough contiguous space in the stack
02585      * arena for actual, missing formal, and local root (JSFunctionSpec.extra)
02586      * slots.  In this case, fp->argv points to new space in a new arena, and
02587      * marking the caller's operand stack, or an external caller's allocated
02588      * stack tracked by a JSStackHeader, will not mark all the values stored
02589      * and addressable via fp->argv.
02590      *
02591      * So in summary, solely for the hard case of moving argv due to missing
02592      * formals and extra roots, we must mark actuals, missing formals, and any
02593      * local roots arrayed at fp->argv here.
02594      *
02595      * It would be good to avoid redundant marking of the same reference, in
02596      * the case where fp->argv does point into caller-allocated space tracked
02597      * by fp->down->spbase or cx->stackHeaders.  This would allow callbacks
02598      * such as the forthcoming rt->gcThingCallback (bug 333078) to compute JS
02599      * reference counts.  So this comment deserves a FIXME bug to cite.
02600      */
02601     if (fp->argv) {
02602         nslots = fp->argc;
02603         if (fp->fun) {
02604             if (fp->fun->nargs > nslots)
02605                 nslots = fp->fun->nargs;
02606             if (!FUN_INTERPRETED(fp->fun))
02607                 nslots += fp->fun->u.n.extra;
02608         }
02609         GC_MARK_JSVALS(cx, nslots + 2, fp->argv - 2, "arg");
02610     }
02611     if (JSVAL_IS_GCTHING(fp->rval))
02612         GC_MARK(cx, JSVAL_TO_GCTHING(fp->rval), "rval");
02613     if (fp->vars)
02614         GC_MARK_JSVALS(cx, fp->nvars, fp->vars, "var");
02615     GC_MARK(cx, fp->scopeChain, "scope chain");
02616     if (fp->sharpArray)
02617         GC_MARK(cx, fp->sharpArray, "sharp array");
02618 
02619     if (fp->xmlNamespace)
02620         GC_MARK(cx, fp->xmlNamespace, "xmlNamespace");
02621 }
02622 
02623 static void
02624 MarkWeakRoots(JSContext *cx, JSWeakRoots *wr)
02625 {
02626     uintN i;
02627     void *thing;
02628 
02629     for (i = 0; i < GCX_NTYPES; i++)
02630         GC_MARK(cx, wr->newborn[i], gc_typenames[i]);
02631     if (wr->lastAtom)
02632         GC_MARK_ATOM(cx, wr->lastAtom);
02633     if (JSVAL_IS_GCTHING(wr->lastInternalResult)) {
02634         thing = JSVAL_TO_GCTHING(wr->lastInternalResult);
02635         if (thing)
02636             GC_MARK(cx, thing, "lastInternalResult");
02637     }
02638 }
02639 
02640 /*
02641  * When gckind is GC_LAST_DITCH, it indicates a call from js_NewGCThing with
02642  * rt->gcLock already held and when the lock should be kept on return.
02643  */
02644 void
02645 js_GC(JSContext *cx, JSGCInvocationKind gckind)
02646 {
02647     JSRuntime *rt;
02648     JSBool keepAtoms;
02649     uintN i, type;
02650     JSContext *iter, *acx;
02651 #if JS_HAS_GENERATORS
02652     JSGenerator **genTodoTail;
02653 #endif
02654     JSStackFrame *fp, *chain;
02655     JSStackHeader *sh;
02656     JSTempValueRooter *tvr;
02657     size_t nbytes, limit, offset;
02658     JSGCArena *a, **ap;
02659     uint8 flags, *flagp, *firstPage;
02660     JSGCThing *thing, *freeList;
02661     JSGCArenaList *arenaList;
02662     GCFinalizeOp finalizer;
02663     JSBool allClear;
02664 #ifdef JS_THREADSAFE
02665     uint32 requestDebit;
02666 #endif
02667 
02668     rt = cx->runtime;
02669 #ifdef JS_THREADSAFE
02670     /* Avoid deadlock. */
02671     JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
02672 #endif
02673 
02674     if (gckind == GC_LAST_DITCH) {
02675         /* The last ditch GC preserves all atoms and weak roots. */
02676         keepAtoms = JS_TRUE;
02677     } else {
02678         JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
02679         rt->gcPoke = JS_TRUE;
02680 
02681         /* Keep atoms when a suspended compile is running on another context. */
02682         keepAtoms = (rt->gcKeepAtoms != 0);
02683     }
02684 
02685     /*
02686      * Don't collect garbage if the runtime isn't up, and cx is not the last
02687      * context in the runtime.  The last context must force a GC, and nothing
02688      * should suppress that final collection or there may be shutdown leaks,
02689      * or runtime bloat until the next context is created.
02690      */
02691     if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
02692         return;
02693 
02694   restart_after_callback:
02695     /*
02696      * Let the API user decide to defer a GC if it wants to (unless this
02697      * is the last context).  Invoke the callback regardless.
02698      */
02699     if (rt->gcCallback &&
02700         !rt->gcCallback(cx, JSGC_BEGIN) &&
02701         gckind != GC_LAST_CONTEXT) {
02702         return;
02703     }
02704 
02705     /* Lock out other GC allocator and collector invocations. */
02706     if (gckind != GC_LAST_DITCH)
02707         JS_LOCK_GC(rt);
02708 
02709     /* Do nothing if no mutator has executed since the last GC. */
02710     if (!rt->gcPoke) {
02711         METER(rt->gcStats.nopoke++);
02712         if (gckind != GC_LAST_DITCH)
02713             JS_UNLOCK_GC(rt);
02714         return;
02715     }
02716     METER(rt->gcStats.poke++);
02717     rt->gcPoke = JS_FALSE;
02718 
02719 #ifdef JS_THREADSAFE
02720     JS_ASSERT(cx->thread->id == js_CurrentThreadId());
02721 
02722     /* Bump gcLevel and return rather than nest on this thread. */
02723     if (rt->gcThread == cx->thread) {
02724         JS_ASSERT(rt->gcLevel > 0);
02725         rt->gcLevel++;
02726         METER(if (rt->gcLevel > rt->gcStats.maxlevel)
02727                   rt->gcStats.maxlevel = rt->gcLevel);
02728         if (gckind != GC_LAST_DITCH)
02729             JS_UNLOCK_GC(rt);
02730         return;
02731     }
02732 
02733     /*
02734      * If we're in one or more requests (possibly on more than one context)
02735      * running on the current thread, indicate, temporarily, that all these
02736      * requests are inactive.  If cx->thread is NULL, then cx is not using
02737      * the request model, and does not contribute to rt->requestCount.
02738      */
02739     requestDebit = 0;
02740     if (cx->thread) {
02741         JSCList *head, *link;
02742 
02743         /*
02744          * Check all contexts on cx->thread->contextList for active requests,
02745          * counting each such context against requestDebit.
02746          */
02747         head = &cx->thread->contextList;
02748         for (link = head->next; link != head; link = link->next) {
02749             acx = CX_FROM_THREAD_LINKS(link);
02750             JS_ASSERT(acx->thread == cx->thread);
02751             if (acx->requestDepth)
02752                 requestDebit++;
02753         }
02754     } else {
02755         /*
02756          * We assert, but check anyway, in case someone is misusing the API.
02757          * Avoiding the loop over all of rt's contexts is a win in the event
02758          * that the GC runs only on request-less contexts with null threads,
02759          * in a special thread such as might be used by the UI/DOM/Layout
02760          * "mozilla" or "main" thread in Mozilla-the-browser.
02761          */
02762         JS_ASSERT(cx->requestDepth == 0);
02763         if (cx->requestDepth)
02764             requestDebit = 1;
02765     }
02766     if (requestDebit) {
02767         JS_ASSERT(requestDebit <= rt->requestCount);
02768         rt->requestCount -= requestDebit;
02769         if (rt->requestCount == 0)
02770             JS_NOTIFY_REQUEST_DONE(rt);
02771     }
02772 
02773     /* If another thread is already in GC, don't attempt GC; wait instead. */
02774     if (rt->gcLevel > 0) {
02775         /* Bump gcLevel to restart the current GC, so it finds new garbage. */
02776         rt->gcLevel++;
02777         METER(if (rt->gcLevel > rt->gcStats.maxlevel)
02778                   rt->gcStats.maxlevel = rt->gcLevel);
02779 
02780         /* Wait for the other thread to finish, then resume our request. */
02781         while (rt->gcLevel > 0)
02782             JS_AWAIT_GC_DONE(rt);
02783         if (requestDebit)
02784             rt->requestCount += requestDebit;
02785         if (gckind != GC_LAST_DITCH)
02786             JS_UNLOCK_GC(rt);
02787         return;
02788     }
02789 
02790     /* No other thread is in GC, so indicate that we're now in GC. */
02791     rt->gcLevel = 1;
02792     rt->gcThread = cx->thread;
02793 
02794     /* Wait for all other requests to finish. */
02795     while (rt->requestCount > 0)
02796         JS_AWAIT_REQUEST_DONE(rt);
02797 
02798 #else  /* !JS_THREADSAFE */
02799 
02800     /* Bump gcLevel and return rather than nest; the outer gc will restart. */
02801     rt->gcLevel++;
02802     METER(if (rt->gcLevel > rt->gcStats.maxlevel)
02803               rt->gcStats.maxlevel = rt->gcLevel);
02804     if (rt->gcLevel > 1)
02805         return;
02806 
02807 #endif /* !JS_THREADSAFE */
02808 
02809     /*
02810      * Set rt->gcRunning here within the GC lock, and after waiting for any
02811      * active requests to end, so that new requests that try to JS_AddRoot,
02812      * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
02813      * rt->gcLevel to drop to zero, while request-less calls to the *Root*
02814      * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
02815      * waiting for GC to finish.
02816      */
02817     rt->gcRunning = JS_TRUE;
02818     JS_UNLOCK_GC(rt);
02819 
02820     /* Reset malloc counter. */
02821     rt->gcMallocBytes = 0;
02822 
02823     /* Drop atoms held by the property cache, and clear property weak links. */
02824     js_DisablePropertyCache(cx);
02825     js_FlushPropertyCache(cx);
02826 #ifdef DEBUG_scopemeters
02827   { extern void js_DumpScopeMeters(JSRuntime *rt);
02828     js_DumpScopeMeters(rt);
02829   }
02830 #endif
02831 
02832 #ifdef JS_THREADSAFE
02833     /*
02834      * Set all thread local freelists to NULL. We may visit a thread's
02835      * freelist more than once. To avoid redundant clearing we unroll the
02836      * current thread's step.
02837      *
02838      * Also, in case a JSScript wrapped within an object was finalized, we
02839      * null acx->thread->gsnCache.script and finish the cache's hashtable.
02840      * Note that js_DestroyScript, called from script_finalize, will have
02841      * already cleared cx->thread->gsnCache above during finalization, so we
02842      * don't have to here.
02843      */
02844     memset(cx->thread->gcFreeLists, 0, sizeof cx->thread->gcFreeLists);
02845     iter = NULL;
02846     while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
02847         if (!acx->thread || acx->thread == cx->thread)
02848             continue;
02849         memset(acx->thread->gcFreeLists, 0, sizeof acx->thread->gcFreeLists);
02850         GSN_CACHE_CLEAR(&acx->thread->gsnCache);
02851     }
02852 #else
02853     /* The thread-unsafe case just has to clear the runtime's GSN cache. */
02854     GSN_CACHE_CLEAR(&rt->gsnCache);
02855 #endif
02856 
02857 restart:
02858     rt->gcNumber++;
02859     JS_ASSERT(!rt->gcUnscannedArenaStackTop);
02860     JS_ASSERT(rt->gcUnscannedBagSize == 0);
02861 
02862     /*
02863      * Mark phase.
02864      */
02865     JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_marker, cx);
02866     if (rt->gcLocksHash)
02867         JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_marker, cx);
02868     js_MarkAtomState(&rt->atomState, keepAtoms, gc_mark_atom_key_thing, cx);
02869     js_MarkWatchPoints(cx);
02870     js_MarkScriptFilenames(rt, keepAtoms);
02871     js_MarkNativeIteratorStates(cx);
02872 
02873 #if JS_HAS_GENERATORS
02874     genTodoTail = MarkScheduledGenerators(cx);
02875     JS_ASSERT(!*genTodoTail);
02876 #endif
02877 
02878     iter = NULL;
02879     while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL) {
02880         /*
02881          * Iterate frame chain and dormant chains. Temporarily tack current
02882          * frame onto the head of the dormant list to ease iteration.
02883          *
02884          * (NB: see comment on this whole "dormant" thing in js_Execute.)
02885          */
02886         chain = acx->fp;
02887         if (chain) {
02888             JS_ASSERT(!chain->dormantNext);
02889             chain->dormantNext = acx->dormantFrameChain;
02890         } else {
02891             chain = acx->dormantFrameChain;
02892         }
02893 
02894         for (fp = chain; fp; fp = chain = chain->dormantNext) {
02895             do {
02896                 js_MarkStackFrame(cx, fp);
02897             } while ((fp = fp->down) != NULL);
02898         }
02899 
02900         /* Cleanup temporary "dormant" linkage. */
02901         if (acx->fp)
02902             acx->fp->dormantNext = NULL;
02903 
02904         /* Mark other roots-by-definition in acx. */
02905         GC_MARK(cx, acx->globalObject, "global object");
02906         MarkWeakRoots(cx, &acx->weakRoots);
02907         if (acx->throwing) {
02908             if (JSVAL_IS_GCTHING(acx->exception))
02909                 GC_MARK(cx, JSVAL_TO_GCTHING(acx->exception), "exception");
02910         } else {
02911             /* Avoid keeping GC-ed junk stored in JSContext.exception. */
02912             acx->exception = JSVAL_NULL;
02913         }
02914 #if JS_HAS_LVALUE_RETURN
02915         if (acx->rval2set && JSVAL_IS_GCTHING(acx->rval2))
02916             GC_MARK(cx, JSVAL_TO_GCTHING(acx->rval2), "rval2");
02917 #endif
02918 
02919         for (sh = acx->stackHeaders; sh; sh = sh->down) {
02920             METER(rt->gcStats.stackseg++);
02921             METER(rt->gcStats.segslots += sh->nslots);
02922             GC_MARK_JSVALS(cx, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
02923         }
02924 
02925         if (acx->localRootStack)
02926             js_MarkLocalRoots(cx, acx->localRootStack);
02927 
02928         for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
02929             switch (tvr->count) {
02930               case JSTVU_SINGLE:
02931                 if (JSVAL_IS_GCTHING(tvr->u.value)) {
02932                     GC_MARK(cx, JSVAL_TO_GCTHING(tvr->u.value),
02933                             "tvr->u.value");
02934                 }
02935                 break;
02936               case JSTVU_MARKER:
02937                 tvr->u.marker(cx, tvr);
02938                 break;
02939               case JSTVU_SPROP:
02940                 MARK_SCOPE_PROPERTY(cx, tvr->u.sprop);
02941                 break;
02942               case JSTVU_WEAK_ROOTS:
02943                 MarkWeakRoots(cx, tvr->u.weakRoots);
02944                 break;
02945               case JSTVU_SCRIPT:
02946                 js_MarkScript(cx, tvr->u.script);
02947                 break;
02948               default:
02949                 JS_ASSERT(tvr->count >= 0);
02950                 GC_MARK_JSVALS(cx, tvr->count, tvr->u.array, "tvr->u.array");
02951             }
02952         }
02953 
02954         if (acx->sharpObjectMap.depth > 0)
02955             js_GCMarkSharpMap(cx, &acx->sharpObjectMap);
02956     }
02957 
02958 #ifdef DUMP_CALL_TABLE
02959     js_DumpCallTable(cx);
02960 #endif
02961 
02962     /*
02963      * Mark children of things that caused too deep recursion during above
02964      * marking phase.
02965      */
02966     ScanDelayedChildren(cx);
02967 
02968 #if JS_HAS_GENERATORS
02969     /*
02970      * Close phase: search and mark part. See comments in
02971      * FindAndMarkObjectsToClose for details.
02972      */
02973     FindAndMarkObjectsToClose(cx, gckind, genTodoTail);
02974 
02975     /*
02976      * Mark children of things that caused too deep recursion during the
02977      * just-completed marking part of the close phase.
02978      */
02979     ScanDelayedChildren(cx);
02980 #endif
02981 
02982     JS_ASSERT(!cx->insideGCMarkCallback);
02983     if (rt->gcCallback) {
02984         cx->insideGCMarkCallback = JS_TRUE;
02985         (void) rt->gcCallback(cx, JSGC_MARK_END);
02986         JS_ASSERT(cx->insideGCMarkCallback);
02987         cx->insideGCMarkCallback = JS_FALSE;
02988     }
02989     JS_ASSERT(rt->gcUnscannedBagSize == 0);
02990 
02991     /* Finalize iterator states before the objects they iterate over. */
02992     CloseIteratorStates(cx);
02993 
02994     /*
02995      * Sweep phase.
02996      *
02997      * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
02998      * so that any attempt to allocate a GC-thing from a finalizer will fail,
02999      * rather than nest badly and leave the unmarked newborn to be swept.
03000      *
03001      * Finalize smaller objects before larger, to guarantee finalization of
03002      * GC-allocated obj->slots after obj.  See FreeSlots in jsobj.c.
03003      */
03004     for (i = 0; i < GC_NUM_FREELISTS; i++) {
03005         arenaList = &rt->gcArenaList[i];
03006         nbytes = GC_FREELIST_NBYTES(i);
03007         limit = arenaList->lastLimit;
03008         for (a = arenaList->last; a; a = a->prev) {
03009             JS_ASSERT(!a->prevUnscanned);
03010             JS_ASSERT(a->unscannedPages == 0);
03011             firstPage = (uint8 *) FIRST_THING_PAGE(a);
03012             for (offset = 0; offset != limit; offset += nbytes) {
03013                 if ((offset & GC_PAGE_MASK) == 0) {
03014                     JS_ASSERT(((JSGCPageInfo *)(firstPage + offset))->
03015                               unscannedBitmap == 0);
03016                     offset += PAGE_THING_GAP(nbytes);
03017                 }
03018                 JS_ASSERT(offset < limit);
03019                 flagp = a->base + offset / sizeof(JSGCThing);
03020                 if (flagp >= firstPage)
03021                     flagp += GC_THINGS_SIZE;
03022                 flags = *flagp;
03023                 if (flags & GCF_MARK) {
03024                     *flagp &= ~GCF_MARK;
03025                 } else if (!(flags & (GCF_LOCK | GCF_FINAL))) {
03026                     /* Call the finalizer with GCF_FINAL ORed into flags. */
03027                     type = flags & GCF_TYPEMASK;
03028                     finalizer = gc_finalizers[type];
03029                     if (finalizer) {
03030                         thing = (JSGCThing *)(firstPage + offset);
03031                         *flagp = (uint8)(flags | GCF_FINAL);
03032                         if (type >= GCX_EXTERNAL_STRING)
03033                             js_PurgeDeflatedStringCache(rt, (JSString *)thing);
03034                         finalizer(cx, thing);
03035                     }
03036 
03037                     /* Set flags to GCF_FINAL, signifying that thing is free. */
03038                     *flagp = GCF_FINAL;
03039                 }
03040             }
03041             limit = GC_THINGS_SIZE;
03042         }
03043     }
03044 
03045     /*
03046      * Sweep the runtime's property tree after finalizing objects, in case any
03047      * had watchpoints referencing tree nodes.  Then sweep atoms, which may be
03048      * referenced from dead property ids.
03049      */
03050     js_SweepScopeProperties(rt);
03051     js_SweepAtomState(&rt->atomState);
03052 
03053     /*
03054      * Sweep script filenames after sweeping functions in the generic loop
03055      * above. In this way when a scripted function's finalizer destroys the
03056      * script and calls rt->destroyScriptHook, the hook can still access the
03057      * script's filename. See bug 323267.
03058      */
03059     js_SweepScriptFilenames(rt);
03060 
03061     /*
03062      * Free phase.
03063      * Free any unused arenas and rebuild the JSGCThing freelist.
03064      */
03065     for (i = 0; i < GC_NUM_FREELISTS; i++) {
03066         arenaList = &rt->gcArenaList[i];
03067         ap = &arenaList->last;
03068         a = *ap;
03069         if (!a)
03070             continue;
03071 
03072         allClear = JS_TRUE;
03073         arenaList->freeList = NULL;
03074         freeList = NULL;
03075         METER(arenaList->stats.nthings = 0);
03076         METER(arenaList->stats.freelen = 0);
03077 
03078         nbytes = GC_FREELIST_NBYTES(i);
03079         limit = arenaList->lastLimit;
03080         do {
03081             METER(size_t nfree = 0);
03082             firstPage = (uint8 *) FIRST_THING_PAGE(a);
03083             for (offset = 0; offset != limit; offset += nbytes) {
03084                 if ((offset & GC_PAGE_MASK) == 0)
03085                     offset += PAGE_THING_GAP(nbytes);
03086                 JS_ASSERT(offset < limit);
03087                 flagp = a->base + offset / sizeof(JSGCThing);
03088                 if (flagp >= firstPage)
03089                     flagp += GC_THINGS_SIZE;
03090 
03091                 if (*flagp != GCF_FINAL) {
03092                     allClear = JS_FALSE;
03093                     METER(++arenaList->stats.nthings);
03094                 } else {
03095                     thing = (JSGCThing *)(firstPage + offset);
03096                     thing->flagp = flagp;
03097                     thing->next = freeList;
03098                     freeList = thing;
03099                     METER(++nfree);
03100                 }
03101             }
03102             if (allClear) {
03103                 /*
03104                  * Forget just assembled free list head for the arena
03105                  * and destroy the arena itself.
03106                  */
03107                 freeList = arenaList->freeList;
03108                 DestroyGCArena(rt, arenaList, ap);
03109             } else {
03110                 allClear = JS_TRUE;
03111                 arenaList->freeList = freeList;
03112                 ap = &a->prev;
03113                 METER(arenaList->stats.freelen += nfree);
03114                 METER(arenaList->stats.totalfreelen += nfree);
03115                 METER(++arenaList->stats.totalarenas);
03116             }
03117             limit = GC_THINGS_SIZE;
03118         } while ((a = *ap) != NULL);
03119     }
03120 
03121     if (rt->gcCallback)
03122         (void) rt->gcCallback(cx, JSGC_FINALIZE_END);
03123 #ifdef DEBUG_srcnotesize
03124   { extern void DumpSrcNoteSizeHist();
03125     DumpSrcNoteSizeHist();
03126     printf("GC HEAP SIZE %lu (%lu)\n",
03127            (unsigned long)rt->gcBytes, (unsigned long)rt->gcPrivateBytes);
03128   }
03129 #endif
03130 
03131     JS_LOCK_GC(rt);
03132 
03133     /*
03134      * We want to restart GC if js_GC was called recursively or if any of the
03135      * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
03136      */
03137     if (rt->gcLevel > 1 || rt->gcPoke) {
03138         rt->gcLevel = 1;
03139         rt->gcPoke = JS_FALSE;
03140         JS_UNLOCK_GC(rt);
03141         goto restart;
03142     }
03143     js_EnablePropertyCache(cx);
03144     rt->gcLevel = 0;
03145     rt->gcLastBytes = rt->gcBytes;
03146     rt->gcRunning = JS_FALSE;
03147 
03148 #ifdef JS_THREADSAFE
03149     /* If we were invoked during a request, pay back the temporary debit. */
03150     if (requestDebit)
03151         rt->requestCount += requestDebit;
03152     rt->gcThread = NULL;
03153     JS_NOTIFY_GC_DONE(rt);
03154 
03155     /*
03156      * Unlock unless we have GC_LAST_DITCH which requires locked GC on return.
03157      */
03158     if (gckind != GC_LAST_DITCH)
03159         JS_UNLOCK_GC(rt);
03160 #endif
03161 
03162     /* Execute JSGC_END callback outside the lock. */
03163     if (rt->gcCallback) {
03164         JSWeakRoots savedWeakRoots;
03165         JSTempValueRooter tvr;
03166 
03167         if (gckind == GC_LAST_DITCH) {
03168             /*
03169              * We allow JSGC_END implementation to force a full GC or allocate
03170              * new GC things. Thus we must protect the weak roots from GC or
03171              * overwrites.
03172              */
03173             savedWeakRoots = cx->weakRoots;
03174             JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr);
03175             JS_KEEP_ATOMS(rt);
03176             JS_UNLOCK_GC(rt);
03177         }
03178 
03179         (void) rt->gcCallback(cx, JSGC_END);
03180 
03181         if (gckind == GC_LAST_DITCH) {
03182             JS_LOCK_GC(rt);
03183             JS_UNKEEP_ATOMS(rt);
03184             JS_POP_TEMP_ROOT(cx, &tvr);
03185         } else if (gckind == GC_LAST_CONTEXT && rt->gcPoke) {
03186             /*
03187              * On shutdown iterate until JSGC_END callback stops creating
03188              * garbage.
03189              */
03190             goto restart_after_callback;
03191         }
03192     }
03193 }
03194 
03195 void
03196 js_UpdateMallocCounter(JSContext *cx, size_t nbytes)
03197 {
03198     uint32 *pbytes, bytes;
03199 
03200 #ifdef JS_THREADSAFE
03201     pbytes = &cx->thread->gcMallocBytes;
03202 #else
03203     pbytes = &cx->runtime->gcMallocBytes;
03204 #endif
03205     bytes = *pbytes;
03206     *pbytes = ((uint32)-1 - bytes <= nbytes) ? (uint32)-1 : bytes + nbytes;
03207 }