Back to index

lightning-sunbird  0.9+nobinonly
jslock.c
Go to the documentation of this file.
00001 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
00002  *
00003  * ***** BEGIN LICENSE BLOCK *****
00004  * Version: MPL 1.1/GPL 2.0/LGPL 2.1
00005  *
00006  * The contents of this file are subject to the Mozilla Public License Version
00007  * 1.1 (the "License"); you may not use this file except in compliance with
00008  * the License. You may obtain a copy of the License at
00009  * http://www.mozilla.org/MPL/
00010  *
00011  * Software distributed under the License is distributed on an "AS IS" basis,
00012  * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
00013  * for the specific language governing rights and limitations under the
00014  * License.
00015  *
00016  * The Original Code is Mozilla Communicator client code, released
00017  * March 31, 1998.
00018  *
00019  * The Initial Developer of the Original Code is
00020  * Netscape Communications Corporation.
00021  * Portions created by the Initial Developer are Copyright (C) 1998
00022  * the Initial Developer. All Rights Reserved.
00023  *
00024  * Contributor(s):
00025  *
00026  * Alternatively, the contents of this file may be used under the terms of
00027  * either of the GNU General Public License Version 2 or later (the "GPL"),
00028  * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
00029  * in which case the provisions of the GPL or the LGPL are applicable instead
00030  * of those above. If you wish to allow use of your version of this file only
00031  * under the terms of either the GPL or the LGPL, and not to allow others to
00032  * use your version of this file under the terms of the MPL, indicate your
00033  * decision by deleting the provisions above and replace them with the notice
00034  * and other provisions required by the GPL or the LGPL. If you do not delete
00035  * the provisions above, a recipient may use your version of this file under
00036  * the terms of any one of the MPL, the GPL or the LGPL.
00037  *
00038  * ***** END LICENSE BLOCK ***** */
00039 
00040 #ifdef JS_THREADSAFE
00041 
00042 /*
00043  * JS locking stubs.
00044  */
00045 #include "jsstddef.h"
00046 #include <stdlib.h>
00047 #include "jspubtd.h"
00048 #include "jsutil.h" /* Added by JSIFY */
00049 #include "jstypes.h"
00050 #include "jsbit.h"
00051 #include "jscntxt.h"
00052 #include "jsdtoa.h"
00053 #include "jsgc.h"
00054 #include "jslock.h"
00055 #include "jsscope.h"
00056 #include "jsstr.h"
00057 
00058 #define ReadWord(W) (W)
00059 
00060 #ifndef NSPR_LOCK
00061 
00062 #include <memory.h>
00063 
00064 static PRLock **global_locks;
00065 static uint32 global_lock_count = 1;
00066 static uint32 global_locks_log2 = 0;
00067 static uint32 global_locks_mask = 0;
00068 
00069 #define GLOBAL_LOCK_INDEX(id)   (((uint32)(id) >> 2) & global_locks_mask)
00070 
00071 static void
00072 js_LockGlobal(void *id)
00073 {
00074     uint32 i = GLOBAL_LOCK_INDEX(id);
00075     PR_Lock(global_locks[i]);
00076 }
00077 
00078 static void
00079 js_UnlockGlobal(void *id)
00080 {
00081     uint32 i = GLOBAL_LOCK_INDEX(id);
00082     PR_Unlock(global_locks[i]);
00083 }
00084 
00085 /* Exclude Alpha NT. */
00086 #if defined(_WIN32) && defined(_M_IX86)
00087 #pragma warning( disable : 4035 )
00088 
00089 static JS_INLINE int
00090 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
00091 {
00092     __asm {
00093         mov eax, ov
00094         mov ecx, nv
00095         mov ebx, w
00096         lock cmpxchg [ebx], ecx
00097         sete al
00098         and eax, 1h
00099     }
00100 }
00101 
00102 #elif defined(__GNUC__) && defined(__i386__)
00103 
00104 /* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
00105 static JS_INLINE int
00106 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
00107 {
00108     unsigned int res;
00109 
00110     __asm__ __volatile__ (
00111                           "lock\n"
00112                           "cmpxchgl %2, (%1)\n"
00113                           "sete %%al\n"
00114                           "andl $1, %%eax\n"
00115                           : "=a" (res)
00116                           : "r" (w), "r" (nv), "a" (ov)
00117                           : "cc", "memory");
00118     return (int)res;
00119 }
00120 
00121 #elif (defined(__USLC__) || defined(_SCO_DS)) && defined(i386)
00122 
00123 /* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
00124 
00125 asm int
00126 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
00127 {
00128 %ureg w, nv;
00129        movl   ov,%eax
00130        lock
00131        cmpxchgl nv,(w)
00132        sete   %al
00133        andl   $1,%eax
00134 %ureg w;  mem ov, nv;
00135        movl   ov,%eax
00136        movl   nv,%ecx
00137        lock
00138        cmpxchgl %ecx,(w)
00139        sete   %al
00140        andl   $1,%eax
00141 %ureg nv;
00142        movl   ov,%eax
00143        movl   w,%edx
00144        lock
00145        cmpxchgl nv,(%edx)
00146        sete   %al
00147        andl   $1,%eax
00148 %mem w, ov, nv;
00149        movl   ov,%eax
00150        movl   nv,%ecx
00151        movl   w,%edx
00152        lock
00153        cmpxchgl %ecx,(%edx)
00154        sete   %al
00155        andl   $1,%eax
00156 }
00157 #pragma asm full_optimization js_CompareAndSwap
00158 
00159 #elif defined(SOLARIS) && defined(sparc) && defined(ULTRA_SPARC)
00160 
00161 static JS_INLINE int
00162 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
00163 {
00164 #if defined(__GNUC__)
00165     unsigned int res;
00166     JS_ASSERT(ov != nv);
00167     asm volatile ("\
00168 stbar\n\
00169 cas [%1],%2,%3\n\
00170 cmp %2,%3\n\
00171 be,a 1f\n\
00172 mov 1,%0\n\
00173 mov 0,%0\n\
00174 1:"
00175                   : "=r" (res)
00176                   : "r" (w), "r" (ov), "r" (nv));
00177     return (int)res;
00178 #else /* !__GNUC__ */
00179     extern int compare_and_swap(jsword*, jsword, jsword);
00180     JS_ASSERT(ov != nv);
00181     return compare_and_swap(w, ov, nv);
00182 #endif
00183 }
00184 
00185 #elif defined(AIX)
00186 
00187 #include <sys/atomic_op.h>
00188 
00189 static JS_INLINE int
00190 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
00191 {
00192     return !_check_lock((atomic_p)w, ov, nv);
00193 }
00194 
00195 #else
00196 
00197 #error "Define NSPR_LOCK if your platform lacks a compare-and-swap instruction."
00198 
00199 #endif /* arch-tests */
00200 
00201 #endif /* !NSPR_LOCK */
00202 
00203 void
00204 js_InitLock(JSThinLock *tl)
00205 {
00206 #ifdef NSPR_LOCK
00207     tl->owner = 0;
00208     tl->fat = (JSFatLock*)JS_NEW_LOCK();
00209 #else
00210     memset(tl, 0, sizeof(JSThinLock));
00211 #endif
00212 }
00213 
00214 void
00215 js_FinishLock(JSThinLock *tl)
00216 {
00217 #ifdef NSPR_LOCK
00218     tl->owner = 0xdeadbeef;
00219     if (tl->fat)
00220         JS_DESTROY_LOCK(((JSLock*)tl->fat));
00221 #else
00222     JS_ASSERT(tl->owner == 0);
00223     JS_ASSERT(tl->fat == NULL);
00224 #endif
00225 }
00226 
00227 static void js_Dequeue(JSThinLock *);
00228 
00229 #ifdef DEBUG_SCOPE_COUNT
00230 
00231 #include <stdio.h>
00232 #include "jsdhash.h"
00233 
00234 static FILE *logfp;
00235 static JSDHashTable logtbl;
00236 
00237 typedef struct logentry {
00238     JSDHashEntryStub stub;
00239     char             op;
00240     const char       *file;
00241     int              line;
00242 } logentry;
00243 
00244 static void
00245 logit(JSScope *scope, char op, const char *file, int line)
00246 {
00247     logentry *entry;
00248 
00249     if (!logfp) {
00250         logfp = fopen("/tmp/scope.log", "w");
00251         if (!logfp)
00252             return;
00253         setvbuf(logfp, NULL, _IONBF, 0);
00254     }
00255     fprintf(logfp, "%p %c %s %d\n", scope, op, file, line);
00256 
00257     if (!logtbl.entryStore &&
00258         !JS_DHashTableInit(&logtbl, JS_DHashGetStubOps(), NULL,
00259                            sizeof(logentry), 100)) {
00260         return;
00261     }
00262     entry = (logentry *) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_ADD);
00263     if (!entry)
00264         return;
00265     entry->stub.key = scope;
00266     entry->op = op;
00267     entry->file = file;
00268     entry->line = line;
00269 }
00270 
00271 void
00272 js_unlog_scope(JSScope *scope)
00273 {
00274     if (!logtbl.entryStore)
00275         return;
00276     (void) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_REMOVE);
00277 }
00278 
00279 # define LOGIT(scope,op) logit(scope, op, __FILE__, __LINE__)
00280 
00281 #else
00282 
00283 # define LOGIT(scope,op) /* nothing */
00284 
00285 #endif /* DEBUG_SCOPE_COUNT */
00286 
00287 /*
00288  * Return true if scope's ownercx, or the ownercx of a single-threaded scope
00289  * for which ownercx is waiting to become multi-threaded and shared, is cx.
00290  * That condition implies deadlock in ClaimScope if cx's thread were to wait
00291  * to share scope.
00292  *
00293  * (i) rt->gcLock held
00294  */
00295 static JSBool
00296 WillDeadlock(JSScope *scope, JSContext *cx)
00297 {
00298     JSContext *ownercx;
00299 
00300     do {
00301         ownercx = scope->ownercx;
00302         if (ownercx == cx) {
00303             JS_RUNTIME_METER(cx->runtime, deadlocksAvoided);
00304             return JS_TRUE;
00305         }
00306     } while (ownercx && (scope = ownercx->scopeToShare) != NULL);
00307     return JS_FALSE;
00308 }
00309 
00310 /*
00311  * Make scope multi-threaded, i.e. share its ownership among contexts in rt
00312  * using a "thin" or (if necessary due to contention) "fat" lock.  Called only
00313  * from ClaimScope, immediately below, when we detect deadlock were we to wait
00314  * for scope's lock, because its ownercx is waiting on a scope owned by the
00315  * calling cx.
00316  *
00317  * (i) rt->gcLock held
00318  */
00319 static void
00320 ShareScope(JSRuntime *rt, JSScope *scope)
00321 {
00322     JSScope **todop;
00323 
00324     if (scope->u.link) {
00325         for (todop = &rt->scopeSharingTodo; *todop != scope;
00326              todop = &(*todop)->u.link) {
00327             JS_ASSERT(*todop != NO_SCOPE_SHARING_TODO);
00328         }
00329         *todop = scope->u.link;
00330         scope->u.link = NULL;       /* null u.link for sanity ASAP */
00331         JS_NOTIFY_ALL_CONDVAR(rt->scopeSharingDone);
00332     }
00333     js_InitLock(&scope->lock);
00334     if (scope == rt->setSlotScope) {
00335         /*
00336          * Nesting locks on another thread that's using scope->ownercx: give
00337          * the held lock a reentrancy count of 1 and set its lock.owner field
00338          * directly (no compare-and-swap needed while scope->ownercx is still
00339          * non-null).  See below in ClaimScope, before the ShareScope call,
00340          * for more on why this is necessary.
00341          *
00342          * If NSPR_LOCK is defined, we cannot deadlock holding rt->gcLock and
00343          * acquiring scope->lock.fat here, against another thread holding that
00344          * fat lock and trying to grab rt->gcLock.  This is because no other
00345          * thread can attempt to acquire scope->lock.fat until scope->ownercx
00346          * is null *and* our thread has released rt->gcLock, which interlocks
00347          * scope->ownercx's transition to null against tests of that member
00348          * in ClaimScope.
00349          */
00350         scope->lock.owner = CX_THINLOCK_ID(scope->ownercx);
00351 #ifdef NSPR_LOCK
00352         JS_ACQUIRE_LOCK((JSLock*)scope->lock.fat);
00353 #endif
00354         scope->u.count = 1;
00355     } else {
00356         scope->u.count = 0;
00357     }
00358     js_FinishSharingScope(rt, scope);
00359 }
00360 
00361 /*
00362  * js_FinishSharingScope is the tail part of ShareScope, split out to become a
00363  * subroutine of JS_EndRequest too.  The bulk of the work here involves making
00364  * mutable strings in the scope's object's slots be immutable.  We have to do
00365  * this because such strings will soon be available to multiple threads, so
00366  * their buffers can't be realloc'd any longer in js_ConcatStrings, and their
00367  * members can't be modified by js_ConcatStrings, js_MinimizeDependentStrings,
00368  * or js_UndependString.
00369  *
00370  * The last bit of work done by js_FinishSharingScope nulls scope->ownercx and
00371  * updates rt->sharedScopes.
00372  */
00373 #define MAKE_STRING_IMMUTABLE(rt, v, vp)                                      \
00374     JS_BEGIN_MACRO                                                            \
00375         JSString *str_ = JSVAL_TO_STRING(v);                                  \
00376         uint8 *flagp_ = js_GetGCThingFlags(str_);                             \
00377         if (*flagp_ & GCF_MUTABLE) {                                          \
00378             if (JSSTRING_IS_DEPENDENT(str_) &&                                \
00379                 !js_UndependString(NULL, str_)) {                             \
00380                 JS_RUNTIME_METER(rt, badUndependStrings);                     \
00381                 *vp = JSVAL_VOID;                                             \
00382             } else {                                                          \
00383                 *flagp_ &= ~GCF_MUTABLE;                                      \
00384             }                                                                 \
00385         }                                                                     \
00386     JS_END_MACRO
00387 
00388 void
00389 js_FinishSharingScope(JSRuntime *rt, JSScope *scope)
00390 {
00391     JSObject *obj;
00392     uint32 nslots;
00393     jsval v, *vp, *end;
00394 
00395     obj = scope->object;
00396     nslots = JS_MIN(obj->map->freeslot, obj->map->nslots);
00397     for (vp = obj->slots, end = vp + nslots; vp < end; vp++) {
00398         v = *vp;
00399         if (JSVAL_IS_STRING(v))
00400             MAKE_STRING_IMMUTABLE(rt, v, vp);
00401     }
00402 
00403     scope->ownercx = NULL;  /* NB: set last, after lock init */
00404     JS_RUNTIME_METER(rt, sharedScopes);
00405 }
00406 
00407 /*
00408  * Given a scope with apparently non-null ownercx different from cx, try to
00409  * set ownercx to cx, claiming exclusive (single-threaded) ownership of scope.
00410  * If we claim ownership, return true.  Otherwise, we wait for ownercx to be
00411  * set to null (indicating that scope is multi-threaded); or if waiting would
00412  * deadlock, we set ownercx to null ourselves via ShareScope.  In any case,
00413  * once ownercx is null we return false.
00414  */
00415 static JSBool
00416 ClaimScope(JSScope *scope, JSContext *cx)
00417 {
00418     JSRuntime *rt;
00419     JSContext *ownercx;
00420     jsrefcount saveDepth;
00421     PRStatus stat;
00422 
00423     rt = cx->runtime;
00424     JS_RUNTIME_METER(rt, claimAttempts);
00425     JS_LOCK_GC(rt);
00426 
00427     /* Reload in case ownercx went away while we blocked on the lock. */
00428     while ((ownercx = scope->ownercx) != NULL) {
00429         /*
00430          * Avoid selflock if ownercx is dead, or is not running a request, or
00431          * has the same thread as cx.  Set scope->ownercx to cx so that the
00432          * matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
00433          * fast path around the corresponding js_UnlockScope or js_UnlockObj
00434          * function call.
00435          *
00436          * If scope->u.link is non-null, scope has already been inserted on
00437          * the rt->scopeSharingTodo list, because another thread's context
00438          * already wanted to lock scope while ownercx was running a request.
00439          * We can't claim any scope whose u.link is non-null at this point,
00440          * even if ownercx->requestDepth is 0 (see below where we suspend our
00441          * request before waiting on rt->scopeSharingDone).
00442          */
00443         if (!scope->u.link &&
00444             (!js_ValidContextPointer(rt, ownercx) ||
00445              !ownercx->requestDepth ||
00446              ownercx->thread == cx->thread)) {
00447             JS_ASSERT(scope->u.count == 0);
00448             scope->ownercx = cx;
00449             JS_UNLOCK_GC(rt);
00450             JS_RUNTIME_METER(rt, claimedScopes);
00451             return JS_TRUE;
00452         }
00453 
00454         /*
00455          * Avoid deadlock if scope's owner context is waiting on a scope that
00456          * we own, by revoking scope's ownership.  This approach to deadlock
00457          * avoidance works because the engine never nests scope locks, except
00458          * for the notable case of js_SetProtoOrParent (see jsobj.c).
00459          *
00460          * If cx could hold locks on ownercx->scopeToShare, or if ownercx
00461          * could hold locks on scope, we would need to keep reentrancy counts
00462          * for all such "flyweight" (ownercx != NULL) locks, so that control
00463          * would unwind properly once these locks became "thin" or "fat".
00464          * Apart from the js_SetProtoOrParent exception, the engine promotes
00465          * a scope from exclusive to shared access only when locking, never
00466          * when holding or unlocking.
00467          *
00468          * If ownercx's thread is calling js_SetProtoOrParent, trying to lock
00469          * the inner scope (the scope of the object being set as the prototype
00470          * of the outer object), ShareScope will find the outer object's scope
00471          * at rt->setSlotScope.  If it's the same as scope, we give it a lock
00472          * held by ownercx's thread with reentrancy count of 1, then we return
00473          * here and break.  After that we unwind to js_[GS]etSlotThreadSafe or
00474          * js_LockScope (our caller), where we wait on the newly-fattened lock
00475          * until ownercx's thread unwinds from js_SetProtoOrParent.
00476          *
00477          * Avoid deadlock before any of this scope/context cycle detection if
00478          * cx is on the active GC's thread, because in that case, no requests
00479          * will run until the GC completes.  Any scope wanted by the GC (from
00480          * a finalizer) that can't be claimed must be slated for sharing.
00481          */
00482         if (rt->gcThread == cx->thread ||
00483             (ownercx->scopeToShare &&
00484              WillDeadlock(ownercx->scopeToShare, cx))) {
00485             ShareScope(rt, scope);
00486             break;
00487         }
00488 
00489         /*
00490          * Thanks to the non-zero NO_SCOPE_SHARING_TODO link terminator, we
00491          * can decide whether scope is on rt->scopeSharingTodo with a single
00492          * non-null test, and avoid double-insertion bugs.
00493          */
00494         if (!scope->u.link) {
00495             scope->u.link = rt->scopeSharingTodo;
00496             rt->scopeSharingTodo = scope;
00497             js_HoldObjectMap(cx, &scope->map);
00498         }
00499 
00500         /*
00501          * Inline JS_SuspendRequest before we wait on rt->scopeSharingDone,
00502          * saving and clearing cx->requestDepth so we don't deadlock if the
00503          * GC needs to run on ownercx.
00504          *
00505          * Unlike JS_SuspendRequest and JS_EndRequest, we must take care not
00506          * to decrement rt->requestCount if cx is active on the GC's thread,
00507          * because the GC has already reduced rt->requestCount to exclude all
00508          * such such contexts.
00509          */
00510         saveDepth = cx->requestDepth;
00511         if (saveDepth) {
00512             cx->requestDepth = 0;
00513             if (rt->gcThread != cx->thread) {
00514                 JS_ASSERT(rt->requestCount > 0);
00515                 rt->requestCount--;
00516                 if (rt->requestCount == 0)
00517                     JS_NOTIFY_REQUEST_DONE(rt);
00518             }
00519         }
00520 
00521         /*
00522          * We know that some other thread's context owns scope, which is now
00523          * linked onto rt->scopeSharingTodo, awaiting the end of that other
00524          * thread's request.  So it is safe to wait on rt->scopeSharingDone.
00525          */
00526         cx->scopeToShare = scope;
00527         stat = PR_WaitCondVar(rt->scopeSharingDone, PR_INTERVAL_NO_TIMEOUT);
00528         JS_ASSERT(stat != PR_FAILURE);
00529 
00530         /*
00531          * Inline JS_ResumeRequest after waiting on rt->scopeSharingDone,
00532          * restoring cx->requestDepth.  Same note as above for the inlined,
00533          * specialized JS_SuspendRequest code: beware rt->gcThread.
00534          */
00535         if (saveDepth) {
00536             if (rt->gcThread != cx->thread) {
00537                 while (rt->gcLevel > 0)
00538                     JS_AWAIT_GC_DONE(rt);
00539                 rt->requestCount++;
00540             }
00541             cx->requestDepth = saveDepth;
00542         }
00543 
00544         /*
00545          * Don't clear cx->scopeToShare until after we're through waiting on
00546          * all condition variables protected by rt->gcLock -- that includes
00547          * rt->scopeSharingDone *and* rt->gcDone (hidden in JS_AWAIT_GC_DONE,
00548          * in the inlined JS_ResumeRequest code immediately above).
00549          *
00550          * Otherwise, the GC could easily deadlock with another thread that
00551          * owns a scope wanted by a finalizer.  By keeping cx->scopeToShare
00552          * set till here, we ensure that such deadlocks are detected, which
00553          * results in the finalized object's scope being shared (it must, of
00554          * course, have other, live objects sharing it).
00555          */
00556         cx->scopeToShare = NULL;
00557     }
00558 
00559     JS_UNLOCK_GC(rt);
00560     return JS_FALSE;
00561 }
00562 
00563 /* Exported to js.c, which calls it via OBJ_GET_* and JSVAL_IS_* macros. */
00564 JS_FRIEND_API(jsval)
00565 js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot)
00566 {
00567     jsval v;
00568     JSScope *scope;
00569 #ifndef NSPR_LOCK
00570     JSThinLock *tl;
00571     jsword me;
00572 #endif
00573 
00574     /*
00575      * We handle non-native objects via JSObjectOps.getRequiredSlot, treating
00576      * all slots starting from 0 as required slots.  A property definition or
00577      * some prior arrangement must have allocated slot.
00578      *
00579      * Note once again (see jspubtd.h, before JSGetRequiredSlotOp's typedef)
00580      * the crucial distinction between a |required slot number| that's passed
00581      * to the get/setRequiredSlot JSObjectOps, and a |reserved slot index|
00582      * passed to the JS_Get/SetReservedSlot APIs.
00583      */
00584     if (!OBJ_IS_NATIVE(obj))
00585         return OBJ_GET_REQUIRED_SLOT(cx, obj, slot);
00586 
00587     /*
00588      * Native object locking is inlined here to optimize the single-threaded
00589      * and contention-free multi-threaded cases.
00590      */
00591     scope = OBJ_SCOPE(obj);
00592     JS_ASSERT(scope->ownercx != cx);
00593     JS_ASSERT(obj->slots && slot < obj->map->freeslot);
00594 
00595     /*
00596      * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h).
00597      * Also avoid locking an object owning a sealed scope.  If neither of those
00598      * special cases applies, try to claim scope's flyweight lock from whatever
00599      * context may have had it in an earlier request.
00600      */
00601     if (CX_THREAD_IS_RUNNING_GC(cx) ||
00602         (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
00603         (scope->ownercx && ClaimScope(scope, cx))) {
00604         return obj->slots[slot];
00605     }
00606 
00607 #ifndef NSPR_LOCK
00608     tl = &scope->lock;
00609     me = CX_THINLOCK_ID(cx);
00610     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
00611     if (js_CompareAndSwap(&tl->owner, 0, me)) {
00612         /*
00613          * Got the lock with one compare-and-swap.  Even so, someone else may
00614          * have mutated obj so it now has its own scope and lock, which would
00615          * require either a restart from the top of this routine, or a thin
00616          * lock release followed by fat lock acquisition.
00617          */
00618         if (scope == OBJ_SCOPE(obj)) {
00619             v = obj->slots[slot];
00620             if (!js_CompareAndSwap(&tl->owner, me, 0)) {
00621                 /* Assert that scope locks never revert to flyweight. */
00622                 JS_ASSERT(scope->ownercx != cx);
00623                 LOGIT(scope, '1');
00624                 scope->u.count = 1;
00625                 js_UnlockObj(cx, obj);
00626             }
00627             return v;
00628         }
00629         if (!js_CompareAndSwap(&tl->owner, me, 0))
00630             js_Dequeue(tl);
00631     }
00632     else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
00633         return obj->slots[slot];
00634     }
00635 #endif
00636 
00637     js_LockObj(cx, obj);
00638     v = obj->slots[slot];
00639 
00640     /*
00641      * Test whether cx took ownership of obj's scope during js_LockObj.
00642      *
00643      * This does not mean that a given scope reverted to flyweight from "thin"
00644      * or "fat" -- it does mean that obj's map pointer changed due to another
00645      * thread setting a property, requiring obj to cease sharing a prototype
00646      * object's scope (whose lock was not flyweight, else we wouldn't be here
00647      * in the first place!).
00648      */
00649     scope = OBJ_SCOPE(obj);
00650     if (scope->ownercx != cx)
00651         js_UnlockScope(cx, scope);
00652     return v;
00653 }
00654 
00655 void
00656 js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
00657 {
00658     JSScope *scope;
00659 #ifndef NSPR_LOCK
00660     JSThinLock *tl;
00661     jsword me;
00662 #endif
00663 
00664     /* Any string stored in a thread-safe object must be immutable. */
00665     if (JSVAL_IS_STRING(v))
00666         MAKE_STRING_IMMUTABLE(cx->runtime, v, &v);
00667 
00668     /*
00669      * We handle non-native objects via JSObjectOps.setRequiredSlot, as above
00670      * for the Get case.
00671      */
00672     if (!OBJ_IS_NATIVE(obj)) {
00673         OBJ_SET_REQUIRED_SLOT(cx, obj, slot, v);
00674         return;
00675     }
00676 
00677     /*
00678      * Native object locking is inlined here to optimize the single-threaded
00679      * and contention-free multi-threaded cases.
00680      */
00681     scope = OBJ_SCOPE(obj);
00682     JS_ASSERT(scope->ownercx != cx);
00683     JS_ASSERT(obj->slots && slot < obj->map->freeslot);
00684 
00685     /*
00686      * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h).
00687      * Also avoid locking an object owning a sealed scope.  If neither of those
00688      * special cases applies, try to claim scope's flyweight lock from whatever
00689      * context may have had it in an earlier request.
00690      */
00691     if (CX_THREAD_IS_RUNNING_GC(cx) ||
00692         (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
00693         (scope->ownercx && ClaimScope(scope, cx))) {
00694         obj->slots[slot] = v;
00695         return;
00696     }
00697 
00698 #ifndef NSPR_LOCK
00699     tl = &scope->lock;
00700     me = CX_THINLOCK_ID(cx);
00701     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
00702     if (js_CompareAndSwap(&tl->owner, 0, me)) {
00703         if (scope == OBJ_SCOPE(obj)) {
00704             obj->slots[slot] = v;
00705             if (!js_CompareAndSwap(&tl->owner, me, 0)) {
00706                 /* Assert that scope locks never revert to flyweight. */
00707                 JS_ASSERT(scope->ownercx != cx);
00708                 LOGIT(scope, '1');
00709                 scope->u.count = 1;
00710                 js_UnlockObj(cx, obj);
00711             }
00712             return;
00713         }
00714         if (!js_CompareAndSwap(&tl->owner, me, 0))
00715             js_Dequeue(tl);
00716     }
00717     else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
00718         obj->slots[slot] = v;
00719         return;
00720     }
00721 #endif
00722 
00723     js_LockObj(cx, obj);
00724     obj->slots[slot] = v;
00725 
00726     /*
00727      * Same drill as above, in js_GetSlotThreadSafe.  Note that we cannot
00728      * assume obj has its own mutable scope (where scope->object == obj) yet,
00729      * because OBJ_SET_SLOT is called for the "universal", common slots such
00730      * as JSSLOT_PROTO and JSSLOT_PARENT, without a prior js_GetMutableScope.
00731      * See also the JSPROP_SHARED attribute and its usage.
00732      */
00733     scope = OBJ_SCOPE(obj);
00734     if (scope->ownercx != cx)
00735         js_UnlockScope(cx, scope);
00736 }
00737 
00738 #ifndef NSPR_LOCK
00739 
00740 static JSFatLock *
00741 NewFatlock()
00742 {
00743     JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
00744     if (!fl) return NULL;
00745     fl->susp = 0;
00746     fl->next = NULL;
00747     fl->prevp = NULL;
00748     fl->slock = PR_NewLock();
00749     fl->svar = PR_NewCondVar(fl->slock);
00750     return fl;
00751 }
00752 
00753 static void
00754 DestroyFatlock(JSFatLock *fl)
00755 {
00756     PR_DestroyLock(fl->slock);
00757     PR_DestroyCondVar(fl->svar);
00758     free(fl);
00759 }
00760 
00761 static JSFatLock *
00762 ListOfFatlocks(int listc)
00763 {
00764     JSFatLock *m;
00765     JSFatLock *m0;
00766     int i;
00767 
00768     JS_ASSERT(listc>0);
00769     m0 = m = NewFatlock();
00770     for (i=1; i<listc; i++) {
00771         m->next = NewFatlock();
00772         m = m->next;
00773     }
00774     return m0;
00775 }
00776 
00777 static void
00778 DeleteListOfFatlocks(JSFatLock *m)
00779 {
00780     JSFatLock *m0;
00781     for (; m; m=m0) {
00782         m0 = m->next;
00783         DestroyFatlock(m);
00784     }
00785 }
00786 
00787 static JSFatLockTable *fl_list_table = NULL;
00788 static uint32          fl_list_table_len = 0;
00789 static uint32          fl_list_chunk_len = 0;
00790 
00791 static JSFatLock *
00792 GetFatlock(void *id)
00793 {
00794     JSFatLock *m;
00795 
00796     uint32 i = GLOBAL_LOCK_INDEX(id);
00797     if (fl_list_table[i].free == NULL) {
00798 #ifdef DEBUG
00799         if (fl_list_table[i].taken)
00800             printf("Ran out of fat locks!\n");
00801 #endif
00802         fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len);
00803     }
00804     m = fl_list_table[i].free;
00805     fl_list_table[i].free = m->next;
00806     m->susp = 0;
00807     m->next = fl_list_table[i].taken;
00808     m->prevp = &fl_list_table[i].taken;
00809     if (fl_list_table[i].taken)
00810         fl_list_table[i].taken->prevp = &m->next;
00811     fl_list_table[i].taken = m;
00812     return m;
00813 }
00814 
00815 static void
00816 PutFatlock(JSFatLock *m, void *id)
00817 {
00818     uint32 i;
00819     if (m == NULL)
00820         return;
00821 
00822     /* Unlink m from fl_list_table[i].taken. */
00823     *m->prevp = m->next;
00824     if (m->next)
00825         m->next->prevp = m->prevp;
00826 
00827     /* Insert m in fl_list_table[i].free. */
00828     i = GLOBAL_LOCK_INDEX(id);
00829     m->next = fl_list_table[i].free;
00830     fl_list_table[i].free = m;
00831 }
00832 
00833 #endif /* !NSPR_LOCK */
00834 
00835 JSBool
00836 js_SetupLocks(int listc, int globc)
00837 {
00838 #ifndef NSPR_LOCK
00839     uint32 i;
00840 
00841     if (global_locks)
00842         return JS_TRUE;
00843 #ifdef DEBUG
00844     if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
00845         printf("Bad number %d in js_SetupLocks()!\n", listc);
00846     if (globc > 100 || globc < 0)   /* globc == number of global locks */
00847         printf("Bad number %d in js_SetupLocks()!\n", listc);
00848 #endif
00849     global_locks_log2 = JS_CeilingLog2(globc);
00850     global_locks_mask = JS_BITMASK(global_locks_log2);
00851     global_lock_count = JS_BIT(global_locks_log2);
00852     global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*));
00853     if (!global_locks)
00854         return JS_FALSE;
00855     for (i = 0; i < global_lock_count; i++) {
00856         global_locks[i] = PR_NewLock();
00857         if (!global_locks[i]) {
00858             global_lock_count = i;
00859             js_CleanupLocks();
00860             return JS_FALSE;
00861         }
00862     }
00863     fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable));
00864     if (!fl_list_table) {
00865         js_CleanupLocks();
00866         return JS_FALSE;
00867     }
00868     fl_list_table_len = global_lock_count;
00869     for (i = 0; i < global_lock_count; i++)
00870         fl_list_table[i].free = fl_list_table[i].taken = NULL;
00871     fl_list_chunk_len = listc;
00872 #endif /* !NSPR_LOCK */
00873     return JS_TRUE;
00874 }
00875 
00876 void
00877 js_CleanupLocks()
00878 {
00879 #ifndef NSPR_LOCK
00880     uint32 i;
00881 
00882     if (global_locks) {
00883         for (i = 0; i < global_lock_count; i++)
00884             PR_DestroyLock(global_locks[i]);
00885         free(global_locks);
00886         global_locks = NULL;
00887         global_lock_count = 1;
00888         global_locks_log2 = 0;
00889         global_locks_mask = 0;
00890     }
00891     if (fl_list_table) {
00892         for (i = 0; i < fl_list_table_len; i++) {
00893             DeleteListOfFatlocks(fl_list_table[i].free);
00894             fl_list_table[i].free = NULL;
00895             DeleteListOfFatlocks(fl_list_table[i].taken);
00896             fl_list_table[i].taken = NULL;
00897         }
00898         free(fl_list_table);
00899         fl_list_table = NULL;
00900         fl_list_table_len = 0;
00901     }
00902 #endif /* !NSPR_LOCK */
00903 }
00904 
00905 #ifndef NSPR_LOCK
00906 
00907 /*
00908  * Fast locking and unlocking is implemented by delaying the allocation of a
00909  * system lock (fat lock) until contention.  As long as a locking thread A
00910  * runs uncontended, the lock is represented solely by storing A's identity in
00911  * the object being locked.
00912  *
00913  * If another thread B tries to lock the object currently locked by A, B is
00914  * enqueued into a fat lock structure (which might have to be allocated and
00915  * pointed to by the object), and suspended using NSPR conditional variables
00916  * (wait).  A wait bit (Bacon bit) is set in the lock word of the object,
00917  * signalling to A that when releasing the lock, B must be dequeued and
00918  * notified.
00919  *
00920  * The basic operation of the locking primitives (js_Lock, js_Unlock,
00921  * js_Enqueue, and js_Dequeue) is compare-and-swap.  Hence, when locking into
00922  * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
00923  * is unlocked.  Similarly, when unlocking p, if compare-and-swap(p, A, 0)
00924  * succeeds this implies that p is uncontended (no one is waiting because the
00925  * wait bit is not set).
00926  *
00927  * When dequeueing, the lock is released, and one of the threads suspended on
00928  * the lock is notified.  If other threads still are waiting, the wait bit is
00929  * kept (in js_Enqueue), and if not, the fat lock is deallocated.
00930  *
00931  * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
00932  * are serialized using a global lock.  For scalability, a hashtable of global
00933  * locks is used, which is indexed modulo the thin lock pointer.
00934  */
00935 
00936 /*
00937  * Invariants:
00938  * (i)  global lock is held
00939  * (ii) fl->susp >= 0
00940  */
00941 static int
00942 js_SuspendThread(JSThinLock *tl)
00943 {
00944     JSFatLock *fl;
00945     PRStatus stat;
00946 
00947     if (tl->fat == NULL)
00948         fl = tl->fat = GetFatlock(tl);
00949     else
00950         fl = tl->fat;
00951     JS_ASSERT(fl->susp >= 0);
00952     fl->susp++;
00953     PR_Lock(fl->slock);
00954     js_UnlockGlobal(tl);
00955     stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
00956     JS_ASSERT(stat != PR_FAILURE);
00957     PR_Unlock(fl->slock);
00958     js_LockGlobal(tl);
00959     fl->susp--;
00960     if (fl->susp == 0) {
00961         PutFatlock(fl, tl);
00962         tl->fat = NULL;
00963     }
00964     return tl->fat == NULL;
00965 }
00966 
00967 /*
00968  * (i)  global lock is held
00969  * (ii) fl->susp > 0
00970  */
00971 static void
00972 js_ResumeThread(JSThinLock *tl)
00973 {
00974     JSFatLock *fl = tl->fat;
00975     PRStatus stat;
00976 
00977     JS_ASSERT(fl != NULL);
00978     JS_ASSERT(fl->susp > 0);
00979     PR_Lock(fl->slock);
00980     js_UnlockGlobal(tl);
00981     stat = PR_NotifyCondVar(fl->svar);
00982     JS_ASSERT(stat != PR_FAILURE);
00983     PR_Unlock(fl->slock);
00984 }
00985 
00986 static void
00987 js_Enqueue(JSThinLock *tl, jsword me)
00988 {
00989     jsword o, n;
00990 
00991     js_LockGlobal(tl);
00992     for (;;) {
00993         o = ReadWord(tl->owner);
00994         n = Thin_SetWait(o);
00995         if (o != 0 && js_CompareAndSwap(&tl->owner, o, n)) {
00996             if (js_SuspendThread(tl))
00997                 me = Thin_RemoveWait(me);
00998             else
00999                 me = Thin_SetWait(me);
01000         }
01001         else if (js_CompareAndSwap(&tl->owner, 0, me)) {
01002             js_UnlockGlobal(tl);
01003             return;
01004         }
01005     }
01006 }
01007 
01008 static void
01009 js_Dequeue(JSThinLock *tl)
01010 {
01011     jsword o;
01012 
01013     js_LockGlobal(tl);
01014     o = ReadWord(tl->owner);
01015     JS_ASSERT(Thin_GetWait(o) != 0);
01016     JS_ASSERT(tl->fat != NULL);
01017     if (!js_CompareAndSwap(&tl->owner, o, 0)) /* release it */
01018         JS_ASSERT(0);
01019     js_ResumeThread(tl);
01020 }
01021 
01022 JS_INLINE void
01023 js_Lock(JSThinLock *tl, jsword me)
01024 {
01025     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
01026     if (js_CompareAndSwap(&tl->owner, 0, me))
01027         return;
01028     if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
01029         js_Enqueue(tl, me);
01030 #ifdef DEBUG
01031     else
01032         JS_ASSERT(0);
01033 #endif
01034 }
01035 
01036 JS_INLINE void
01037 js_Unlock(JSThinLock *tl, jsword me)
01038 {
01039     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
01040 
01041     /*
01042      * Since we can race with the CompareAndSwap in js_Enqueue, we need
01043      * to use a C_A_S here as well -- Arjan van de Ven 30/1/08
01044      */
01045     if (js_CompareAndSwap(&tl->owner, me, 0))
01046         return;
01047 
01048     JS_ASSERT(Thin_GetWait(tl->owner));
01049     if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
01050         js_Dequeue(tl);
01051 #ifdef DEBUG
01052     else
01053         JS_ASSERT(0);   /* unbalanced unlock */
01054 #endif
01055 }
01056 
01057 #endif /* !NSPR_LOCK */
01058 
01059 void
01060 js_LockRuntime(JSRuntime *rt)
01061 {
01062     PR_Lock(rt->rtLock);
01063 #ifdef DEBUG
01064     rt->rtLockOwner = js_CurrentThreadId();
01065 #endif
01066 }
01067 
01068 void
01069 js_UnlockRuntime(JSRuntime *rt)
01070 {
01071 #ifdef DEBUG
01072     rt->rtLockOwner = 0;
01073 #endif
01074     PR_Unlock(rt->rtLock);
01075 }
01076 
01077 void
01078 js_LockScope(JSContext *cx, JSScope *scope)
01079 {
01080     jsword me = CX_THINLOCK_ID(cx);
01081 
01082     JS_ASSERT(CURRENT_THREAD_IS_ME(me));
01083     JS_ASSERT(scope->ownercx != cx);
01084     if (CX_THREAD_IS_RUNNING_GC(cx))
01085         return;
01086     if (scope->ownercx && ClaimScope(scope, cx))
01087         return;
01088 
01089     if (Thin_RemoveWait(ReadWord(scope->lock.owner)) == me) {
01090         JS_ASSERT(scope->u.count > 0);
01091         LOGIT(scope, '+');
01092         scope->u.count++;
01093     } else {
01094         JSThinLock *tl = &scope->lock;
01095         JS_LOCK0(tl, me);
01096         JS_ASSERT(scope->u.count == 0);
01097         LOGIT(scope, '1');
01098         scope->u.count = 1;
01099     }
01100 }
01101 
01102 void
01103 js_UnlockScope(JSContext *cx, JSScope *scope)
01104 {
01105     jsword me = CX_THINLOCK_ID(cx);
01106 
01107     /* We hope compilers use me instead of reloading cx->thread in the macro. */
01108     if (CX_THREAD_IS_RUNNING_GC(cx))
01109         return;
01110     if (cx->lockedSealedScope == scope) {
01111         cx->lockedSealedScope = NULL;
01112         return;
01113     }
01114 
01115     /*
01116      * If scope->ownercx is not null, it's likely that two contexts not using
01117      * requests nested locks for scope.  The first context, cx here, claimed
01118      * scope; the second, scope->ownercx here, re-claimed it because the first
01119      * was not in a request, or was on the same thread.  We don't want to keep
01120      * track of such nesting, because it penalizes the common non-nested case.
01121      * Instead of asserting here and silently coping, we simply re-claim scope
01122      * for cx and return.
01123      *
01124      * See http://bugzilla.mozilla.org/show_bug.cgi?id=229200 for a real world
01125      * case where an asymmetric thread model (Mozilla's main thread is known
01126      * to be the only thread that runs the GC) combined with multiple contexts
01127      * per thread has led to such request-less nesting.
01128      */
01129     if (scope->ownercx) {
01130         JS_ASSERT(scope->u.count == 0);
01131         JS_ASSERT(scope->lock.owner == 0);
01132         scope->ownercx = cx;
01133         return;
01134     }
01135 
01136     JS_ASSERT(scope->u.count > 0);
01137     if (Thin_RemoveWait(ReadWord(scope->lock.owner)) != me) {
01138         JS_ASSERT(0);   /* unbalanced unlock */
01139         return;
01140     }
01141     LOGIT(scope, '-');
01142     if (--scope->u.count == 0) {
01143         JSThinLock *tl = &scope->lock;
01144         JS_UNLOCK0(tl, me);
01145     }
01146 }
01147 
01148 /*
01149  * NB: oldscope may be null if our caller is js_GetMutableScope and it just
01150  * dropped the last reference to oldscope.
01151  */
01152 void
01153 js_TransferScopeLock(JSContext *cx, JSScope *oldscope, JSScope *newscope)
01154 {
01155     jsword me;
01156     JSThinLock *tl;
01157 
01158     JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, newscope));
01159 
01160     /*
01161      * If the last reference to oldscope went away, newscope needs no lock
01162      * state update.
01163      */
01164     if (!oldscope)
01165         return;
01166     JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, oldscope));
01167 
01168     /*
01169      * Special case in js_LockScope and js_UnlockScope for the GC calling
01170      * code that locks, unlocks, or mutates.  Nothing to do in these cases,
01171      * because scope and newscope were "locked" by the GC thread, so neither
01172      * was actually locked.
01173      */
01174     if (CX_THREAD_IS_RUNNING_GC(cx))
01175         return;
01176 
01177     /*
01178      * Special case in js_LockObj and js_UnlockScope for locking the sealed
01179      * scope of an object that owns that scope (the prototype or mutated obj
01180      * for which OBJ_SCOPE(obj)->object == obj), and unlocking it.
01181      */
01182     JS_ASSERT(cx->lockedSealedScope != newscope);
01183     if (cx->lockedSealedScope == oldscope) {
01184         JS_ASSERT(newscope->ownercx == cx ||
01185                   (!newscope->ownercx && newscope->u.count == 1));
01186         cx->lockedSealedScope = NULL;
01187         return;
01188     }
01189 
01190     /*
01191      * If oldscope is single-threaded, there's nothing to do.
01192      */
01193     if (oldscope->ownercx) {
01194         JS_ASSERT(oldscope->ownercx == cx);
01195         JS_ASSERT(newscope->ownercx == cx ||
01196                   (!newscope->ownercx && newscope->u.count == 1));
01197         return;
01198     }
01199 
01200     /*
01201      * We transfer oldscope->u.count only if newscope is not single-threaded.
01202      * Flow unwinds from here through some number of JS_UNLOCK_SCOPE and/or
01203      * JS_UNLOCK_OBJ macro calls, which will decrement newscope->u.count only
01204      * if they find newscope->ownercx != cx.
01205      */
01206     if (newscope->ownercx != cx) {
01207         JS_ASSERT(!newscope->ownercx);
01208         newscope->u.count = oldscope->u.count;
01209     }
01210 
01211     /*
01212      * Reset oldscope's lock state so that it is completely unlocked.
01213      */
01214     LOGIT(oldscope, '0');
01215     oldscope->u.count = 0;
01216     tl = &oldscope->lock;
01217     me = CX_THINLOCK_ID(cx);
01218     JS_UNLOCK0(tl, me);
01219 }
01220 
01221 void
01222 js_LockObj(JSContext *cx, JSObject *obj)
01223 {
01224     JSScope *scope;
01225 
01226     JS_ASSERT(OBJ_IS_NATIVE(obj));
01227 
01228     /*
01229      * We must test whether the GC is calling and return without mutating any
01230      * state, especially cx->lockedSealedScope.  Note asymmetry with respect to
01231      * js_UnlockObj, which is a thin-layer on top of js_UnlockScope.
01232      */
01233     if (CX_THREAD_IS_RUNNING_GC(cx))
01234         return;
01235 
01236     for (;;) {
01237         scope = OBJ_SCOPE(obj);
01238         if (SCOPE_IS_SEALED(scope) && scope->object == obj &&
01239             !cx->lockedSealedScope) {
01240             cx->lockedSealedScope = scope;
01241             return;
01242         }
01243 
01244         js_LockScope(cx, scope);
01245 
01246         /* If obj still has this scope, we're done. */
01247         if (scope == OBJ_SCOPE(obj))
01248             return;
01249 
01250         /* Lost a race with a mutator; retry with obj's new scope. */
01251         js_UnlockScope(cx, scope);
01252     }
01253 }
01254 
01255 void
01256 js_UnlockObj(JSContext *cx, JSObject *obj)
01257 {
01258     JS_ASSERT(OBJ_IS_NATIVE(obj));
01259     js_UnlockScope(cx, OBJ_SCOPE(obj));
01260 }
01261 
01262 #ifdef DEBUG
01263 
01264 JSBool
01265 js_IsRuntimeLocked(JSRuntime *rt)
01266 {
01267     return js_CurrentThreadId() == rt->rtLockOwner;
01268 }
01269 
01270 JSBool
01271 js_IsObjLocked(JSContext *cx, JSObject *obj)
01272 {
01273     JSScope *scope = OBJ_SCOPE(obj);
01274 
01275     return MAP_IS_NATIVE(&scope->map) && js_IsScopeLocked(cx, scope);
01276 }
01277 
01278 JSBool
01279 js_IsScopeLocked(JSContext *cx, JSScope *scope)
01280 {
01281     /* Special case: the GC locking any object's scope, see js_LockScope. */
01282     if (CX_THREAD_IS_RUNNING_GC(cx))
01283         return JS_TRUE;
01284 
01285     /* Special case: locked object owning a sealed scope, see js_LockObj. */
01286     if (cx->lockedSealedScope == scope)
01287         return JS_TRUE;
01288 
01289     /*
01290      * General case: the scope is either exclusively owned (by cx), or it has
01291      * a thin or fat lock to cope with shared (concurrent) ownership.
01292      */
01293     if (scope->ownercx) {
01294         JS_ASSERT(scope->ownercx == cx || scope->ownercx->thread == cx->thread);
01295         return JS_TRUE;
01296     }
01297     return js_CurrentThreadId() ==
01298            ((JSThread *)Thin_RemoveWait(ReadWord(scope->lock.owner)))->id;
01299 }
01300 
01301 #endif /* DEBUG */
01302 #endif /* JS_THREADSAFE */