1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 *
3 * ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 *
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
10 *
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
15 *
16 * The Original Code is Mozilla Communicator client code, released
17 * March 31, 1998.
18 *
19 * The Initial Developer of the Original Code is
20 * Netscape Communications Corporation.
21 * Portions created by the Initial Developer are Copyright (C) 1998
22 * the Initial Developer. All Rights Reserved.
23 *
24 * Contributor(s):
25 *
26 * Alternatively, the contents of this file may be used under the terms of
27 * either of the GNU General Public License Version 2 or later (the "GPL"),
28 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
37 *
38 * ***** END LICENSE BLOCK ***** */
40 #ifdef JS_THREADSAFE
42 /*
43 * JS locking stubs.
44 */
45 #include "jsstddef.h"
46 #include <stdlib.h>
47 #include "jspubtd.h"
48 #include "prthread.h"
49 #include "jsutil.h" /* Added by JSIFY */
50 #include "jstypes.h"
51 #include "jsbit.h"
52 #include "jscntxt.h"
53 #include "jsdtoa.h"
54 #include "jsgc.h"
55 #include "jslock.h"
56 #include "jsscope.h"
57 #include "jsstr.h"
59 #define ReadWord(W) (W)
61 #ifndef NSPR_LOCK
63 #include <memory.h>
65 static PRLock **global_locks;
66 static uint32 global_lock_count = 1;
67 static uint32 global_locks_log2 = 0;
68 static uint32 global_locks_mask = 0;
70 #define GLOBAL_LOCK_INDEX(id) (((uint32)(id) >> 2) & global_locks_mask)
72 static void
73 js_LockGlobal(void *id)
74 {
75 uint32 i = GLOBAL_LOCK_INDEX(id);
76 PR_Lock(global_locks[i]);
77 }
79 static void
80 js_UnlockGlobal(void *id)
81 {
82 uint32 i = GLOBAL_LOCK_INDEX(id);
83 PR_Unlock(global_locks[i]);
84 }
86 /* Exclude Alpha NT. */
87 #if defined(_WIN32) && defined(_M_IX86)
88 #pragma warning( disable : 4035 )
90 static JS_INLINE int
91 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
92 {
93 __asm {
94 mov eax, ov
95 mov ecx, nv
96 mov ebx, w
97 lock cmpxchg [ebx], ecx
98 sete al
99 and eax, 1h
100 }
101 }
103 #elif defined(__GNUC__) && defined(__i386__)
105 /* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
106 static JS_INLINE int
107 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
108 {
109 unsigned int res;
111 __asm__ __volatile__ (
112 "lock\n"
113 "cmpxchgl %2, (%1)\n"
114 "sete %%al\n"
115 "andl $1, %%eax\n"
116 : "=a" (res)
117 : "r" (w), "r" (nv), "a" (ov)
118 : "cc", "memory");
119 return (int)res;
120 }
122 #elif defined(SOLARIS) && defined(sparc) && defined(ULTRA_SPARC)
124 static JS_INLINE int
125 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
126 {
127 #if defined(__GNUC__)
128 unsigned int res;
129 JS_ASSERT(ov != nv);
130 asm volatile ("\
131 stbar\n\
132 cas [%1],%2,%3\n\
133 cmp %2,%3\n\
134 be,a 1f\n\
135 mov 1,%0\n\
136 mov 0,%0\n\
137 1:"
138 : "=r" (res)
139 : "r" (w), "r" (ov), "r" (nv));
140 return (int)res;
141 #else /* !__GNUC__ */
142 extern int compare_and_swap(jsword*, jsword, jsword);
143 JS_ASSERT(ov != nv);
144 return compare_and_swap(w, ov, nv);
145 #endif
146 }
148 #elif defined(AIX)
150 #include <sys/atomic_op.h>
152 static JS_INLINE int
153 js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
154 {
155 return !_check_lock((atomic_p)w, ov, nv);
156 }
158 #else
160 #error "Define NSPR_LOCK if your platform lacks a compare-and-swap instruction."
162 #endif /* arch-tests */
164 #endif /* !NSPR_LOCK */
166 jsword
167 js_CurrentThreadId()
168 {
169 return CurrentThreadId();
170 }
172 void
173 js_InitLock(JSThinLock *tl)
174 {
175 #ifdef NSPR_LOCK
176 tl->owner = 0;
177 tl->fat = (JSFatLock*)JS_NEW_LOCK();
178 #else
179 memset(tl, 0, sizeof(JSThinLock));
180 #endif
181 }
183 void
184 js_FinishLock(JSThinLock *tl)
185 {
186 #ifdef NSPR_LOCK
187 tl->owner = 0xdeadbeef;
188 if (tl->fat)
189 JS_DESTROY_LOCK(((JSLock*)tl->fat));
190 #else
191 JS_ASSERT(tl->owner == 0);
192 JS_ASSERT(tl->fat == NULL);
193 #endif
194 }
196 static void js_Dequeue(JSThinLock *);
198 #ifdef DEBUG_SCOPE_COUNT
200 #include <stdio.h>
201 #include "jsdhash.h"
203 static FILE *logfp;
204 static JSDHashTable logtbl;
206 typedef struct logentry {
207 JSDHashEntryStub stub;
208 char op;
209 const char *file;
210 int line;
211 } logentry;
213 static void
214 logit(JSScope *scope, char op, const char *file, int line)
215 {
216 logentry *entry;
218 if (!logfp) {
219 logfp = fopen("/tmp/scope.log", "w");
220 if (!logfp)
221 return;
222 setvbuf(logfp, NULL, _IONBF, 0);
223 }
224 fprintf(logfp, "%p %c %s %d\n", scope, op, file, line);
226 if (!logtbl.entryStore &&
227 !JS_DHashTableInit(&logtbl, JS_DHashGetStubOps(), NULL,
228 sizeof(logentry), 100)) {
229 return;
230 }
231 entry = (logentry *) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_ADD);
232 if (!entry)
233 return;
234 entry->stub.key = scope;
235 entry->op = op;
236 entry->file = file;
237 entry->line = line;
238 }
240 void
241 js_unlog_scope(JSScope *scope)
242 {
243 if (!logtbl.entryStore)
244 return;
245 (void) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_REMOVE);
246 }
248 # define LOGIT(scope,op) logit(scope, op, __FILE__, __LINE__)
250 #else
252 # define LOGIT(scope,op) /* nothing */
254 #endif /* DEBUG_SCOPE_COUNT */
256 /*
257 * Return true if scope's ownercx, or the ownercx of a single-threaded scope
258 * for which ownercx is waiting to become multi-threaded and shared, is cx.
259 * That condition implies deadlock in ClaimScope if cx's thread were to wait
260 * to share scope.
261 *
262 * (i) rt->gcLock held
263 */
264 static JSBool
265 WillDeadlock(JSScope *scope, JSContext *cx)
266 {
267 JSContext *ownercx;
269 do {
270 ownercx = scope->ownercx;
271 if (ownercx == cx) {
272 JS_RUNTIME_METER(cx->runtime, deadlocksAvoided);
273 return JS_TRUE;
274 }
275 } while (ownercx && (scope = ownercx->scopeToShare) != NULL);
276 return JS_FALSE;
277 }
279 /*
280 * Make scope multi-threaded, i.e. share its ownership among contexts in rt
281 * using a "thin" or (if necessary due to contention) "fat" lock. Called only
282 * from ClaimScope, immediately below, when we detect deadlock were we to wait
283 * for scope's lock, because its ownercx is waiting on a scope owned by the
284 * calling cx.
285 *
286 * (i) rt->gcLock held
287 */
288 static void
289 ShareScope(JSRuntime *rt, JSScope *scope)
290 {
291 JSScope **todop;
293 if (scope->u.link) {
294 for (todop = &rt->scopeSharingTodo; *todop != scope;
295 todop = &(*todop)->u.link) {
296 JS_ASSERT(*todop != NO_SCOPE_SHARING_TODO);
297 }
298 *todop = scope->u.link;
299 scope->u.link = NULL; /* null u.link for sanity ASAP */
300 JS_NOTIFY_ALL_CONDVAR(rt->scopeSharingDone);
301 }
302 js_InitLock(&scope->lock);
303 if (scope == rt->setSlotScope) {
304 /*
305 * Nesting locks on another thread that's using scope->ownercx: give
306 * the held lock a reentrancy count of 1 and set its lock.owner field
307 * directly (no compare-and-swap needed while scope->ownercx is still
308 * non-null). See below in ClaimScope, before the ShareScope call,
309 * for more on why this is necessary.
310 *
311 * If NSPR_LOCK is defined, we cannot deadlock holding rt->gcLock and
312 * acquiring scope->lock.fat here, against another thread holding that
313 * fat lock and trying to grab rt->gcLock. This is because no other
314 * thread can attempt to acquire scope->lock.fat until scope->ownercx
315 * is null *and* our thread has released rt->gcLock, which interlocks
316 * scope->ownercx's transition to null against tests of that member
317 * in ClaimScope.
318 */
319 scope->lock.owner = scope->ownercx->thread;
320 #ifdef NSPR_LOCK
321 JS_ACQUIRE_LOCK((JSLock*)scope->lock.fat);
322 #endif
323 scope->u.count = 1;
324 } else {
325 scope->u.count = 0;
326 }
327 js_FinishSharingScope(rt, scope);
328 }
330 /*
331 * js_FinishSharingScope is the tail part of ShareScope, split out to become a
332 * subroutine of JS_EndRequest too. The bulk of the work here involves making
333 * mutable strings in the scope's object's slots be immutable. We have to do
334 * this because such strings will soon be available to multiple threads, so
335 * their buffers can't be realloc'd any longer in js_ConcatStrings, and their
336 * members can't be modified by js_ConcatStrings, js_MinimizeDependentStrings,
337 * or js_UndependString.
338 *
339 * The last bit of work done by js_FinishSharingScope nulls scope->ownercx and
340 * updates rt->sharedScopes.
341 */
342 #define MAKE_STRING_IMMUTABLE(rt, v, vp) \
343 JS_BEGIN_MACRO \
344 JSString *str_ = JSVAL_TO_STRING(v); \
345 uint8 *flagp_ = js_GetGCThingFlags(str_); \
346 if (*flagp_ & GCF_MUTABLE) { \
347 if (JSSTRING_IS_DEPENDENT(str_) && \
348 !js_UndependString(NULL, str_)) { \
349 JS_RUNTIME_METER(rt, badUndependStrings); \
350 *vp = JSVAL_VOID; \
351 } else { \
352 *flagp_ &= ~GCF_MUTABLE; \
353 } \
354 } \
355 JS_END_MACRO
357 void
358 js_FinishSharingScope(JSRuntime *rt, JSScope *scope)
359 {
360 JSObject *obj;
361 uint32 nslots;
362 jsval v, *vp, *end;
364 obj = scope->object;
365 nslots = JS_MIN(obj->map->freeslot, obj->map->nslots);
366 for (vp = obj->slots, end = vp + nslots; vp < end; vp++) {
367 v = *vp;
368 if (JSVAL_IS_STRING(v))
369 MAKE_STRING_IMMUTABLE(rt, v, vp);
370 }
372 scope->ownercx = NULL; /* NB: set last, after lock init */
373 JS_RUNTIME_METER(rt, sharedScopes);
374 }
376 /*
377 * Given a scope with apparently non-null ownercx different from cx, try to
378 * set ownercx to cx, claiming exclusive (single-threaded) ownership of scope.
379 * If we claim ownership, return true. Otherwise, we wait for ownercx to be
380 * set to null (indicating that scope is multi-threaded); or if waiting would
381 * deadlock, we set ownercx to null ourselves via ShareScope. In any case,
382 * once ownercx is null we return false.
383 */
384 static JSBool
385 ClaimScope(JSScope *scope, JSContext *cx)
386 {
387 JSRuntime *rt;
388 JSContext *ownercx;
389 jsrefcount saveDepth;
390 PRStatus stat;
392 rt = cx->runtime;
393 JS_RUNTIME_METER(rt, claimAttempts);
394 JS_LOCK_GC(rt);
396 /* Reload in case ownercx went away while we blocked on the lock. */
397 while ((ownercx = scope->ownercx) != NULL) {
398 /*
399 * Avoid selflock if ownercx is dead, or is not running a request, or
400 * has the same thread as cx. Set scope->ownercx to cx so that the
401 * matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
402 * fast path around the corresponding js_UnlockScope or js_UnlockObj
403 * function call.
404 *
405 * If scope->u.link is non-null, scope has already been inserted on
406 * the rt->scopeSharingTodo list, because another thread's context
407 * already wanted to lock scope while ownercx was running a request.
408 * We can't claim any scope whose u.link is non-null at this point,
409 * even if ownercx->requestDepth is 0 (see below where we suspend our
410 * request before waiting on rt->scopeSharingDone).
411 */
412 if (!scope->u.link &&
413 (!js_ValidContextPointer(rt, ownercx) ||
414 !ownercx->requestDepth ||
415 ownercx->thread == cx->thread)) {
416 JS_ASSERT(scope->u.count == 0);
417 scope->ownercx = cx;
418 JS_UNLOCK_GC(rt);
419 JS_RUNTIME_METER(rt, claimedScopes);
420 return JS_TRUE;
421 }
423 /*
424 * Avoid deadlock if scope's owner context is waiting on a scope that
425 * we own, by revoking scope's ownership. This approach to deadlock
426 * avoidance works because the engine never nests scope locks, except
427 * for the notable case of js_SetProtoOrParent (see jsobj.c).
428 *
429 * If cx could hold locks on ownercx->scopeToShare, or if ownercx
430 * could hold locks on scope, we would need to keep reentrancy counts
431 * for all such "flyweight" (ownercx != NULL) locks, so that control
432 * would unwind properly once these locks became "thin" or "fat".
433 * Apart from the js_SetProtoOrParent exception, the engine promotes
434 * a scope from exclusive to shared access only when locking, never
435 * when holding or unlocking.
436 *
437 * If ownercx's thread is calling js_SetProtoOrParent, trying to lock
438 * the inner scope (the scope of the object being set as the prototype
439 * of the outer object), ShareScope will find the outer object's scope
440 * at rt->setSlotScope. If it's the same as scope, we give it a lock
441 * held by ownercx's thread with reentrancy count of 1, then we return
442 * here and break. After that we unwind to js_[GS]etSlotThreadSafe or
443 * js_LockScope (our caller), where we wait on the newly-fattened lock
444 * until ownercx's thread unwinds from js_SetProtoOrParent.
445 *
446 * Avoid deadlock before any of this scope/context cycle detection if
447 * cx is on the active GC's thread, because in that case, no requests
448 * will run until the GC completes. Any scope wanted by the GC (from
449 * a finalizer) that can't be claimed must be slated for sharing.
450 */
451 if (rt->gcThread == cx->thread ||
452 (ownercx->scopeToShare &&
453 WillDeadlock(ownercx->scopeToShare, cx))) {
454 ShareScope(rt, scope);
455 break;
456 }
458 /*
459 * Thanks to the non-zero NO_SCOPE_SHARING_TODO link terminator, we
460 * can decide whether scope is on rt->scopeSharingTodo with a single
461 * non-null test, and avoid double-insertion bugs.
462 */
463 if (!scope->u.link) {
464 scope->u.link = rt->scopeSharingTodo;
465 rt->scopeSharingTodo = scope;
466 js_HoldObjectMap(cx, &scope->map);
467 }
469 /*
470 * Inline JS_SuspendRequest before we wait on rt->scopeSharingDone,
471 * saving and clearing cx->requestDepth so we don't deadlock if the
472 * GC needs to run on ownercx.
473 *
474 * Unlike JS_SuspendRequest and JS_EndRequest, we must take care not
475 * to decrement rt->requestCount if cx is active on the GC's thread,
476 * because the GC has already reduced rt->requestCount to exclude all
477 * such such contexts.
478 */
479 saveDepth = cx->requestDepth;
480 if (saveDepth) {
481 cx->requestDepth = 0;
482 if (rt->gcThread != cx->thread) {
483 JS_ASSERT(rt->requestCount > 0);
484 rt->requestCount--;
485 if (rt->requestCount == 0)
486 JS_NOTIFY_REQUEST_DONE(rt);
487 }
488 }
490 /*
491 * We know that some other thread's context owns scope, which is now
492 * linked onto rt->scopeSharingTodo, awaiting the end of that other
493 * thread's request. So it is safe to wait on rt->scopeSharingDone.
494 */
495 cx->scopeToShare = scope;
496 stat = PR_WaitCondVar(rt->scopeSharingDone, PR_INTERVAL_NO_TIMEOUT);
497 JS_ASSERT(stat != PR_FAILURE);
499 /*
500 * Inline JS_ResumeRequest after waiting on rt->scopeSharingDone,
501 * restoring cx->requestDepth. Same note as above for the inlined,
502 * specialized JS_SuspendRequest code: beware rt->gcThread.
503 */
504 if (saveDepth) {
505 if (rt->gcThread != cx->thread) {
506 while (rt->gcLevel > 0)
507 JS_AWAIT_GC_DONE(rt);
508 rt->requestCount++;
509 }
510 cx->requestDepth = saveDepth;
511 }
513 /*
514 * Don't clear cx->scopeToShare until after we're through waiting on
515 * all condition variables protected by rt->gcLock -- that includes
516 * rt->scopeSharingDone *and* rt->gcDone (hidden in JS_AWAIT_GC_DONE,
517 * in the inlined JS_ResumeRequest code immediately above).
518 *
519 * Otherwise, the GC could easily deadlock with another thread that
520 * owns a scope wanted by a finalizer. By keeping cx->scopeToShare
521 * set till here, we ensure that such deadlocks are detected, which
522 * results in the finalized object's scope being shared (it must, of
523 * course, have other, live objects sharing it).
524 */
525 cx->scopeToShare = NULL;
526 }
528 JS_UNLOCK_GC(rt);
529 return JS_FALSE;
530 }
532 /* Exported to js.c, which calls it via OBJ_GET_* and JSVAL_IS_* macros. */
533 JS_FRIEND_API(jsval)
534 js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot)
535 {
536 jsval v;
537 JSScope *scope;
538 #ifndef NSPR_LOCK
539 JSThinLock *tl;
540 jsword me;
541 #endif
543 /*
544 * We handle non-native objects via JSObjectOps.getRequiredSlot, treating
545 * all slots starting from 0 as required slots. A property definition or
546 * some prior arrangement must have allocated slot.
547 *
548 * Note once again (see jspubtd.h, before JSGetRequiredSlotOp's typedef)
549 * the crucial distinction between a |required slot number| that's passed
550 * to the get/setRequiredSlot JSObjectOps, and a |reserved slot index|
551 * passed to the JS_Get/SetReservedSlot APIs.
552 */
553 if (!OBJ_IS_NATIVE(obj))
554 return OBJ_GET_REQUIRED_SLOT(cx, obj, slot);
556 /*
557 * Native object locking is inlined here to optimize the single-threaded
558 * and contention-free multi-threaded cases.
559 */
560 scope = OBJ_SCOPE(obj);
561 JS_ASSERT(scope->ownercx != cx);
562 JS_ASSERT(obj->slots && slot < obj->map->freeslot);
564 /*
565 * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h).
566 * Also avoid locking an object owning a sealed scope. If neither of those
567 * special cases applies, try to claim scope's flyweight lock from whatever
568 * context may have had it in an earlier request.
569 */
570 if (CX_THREAD_IS_RUNNING_GC(cx) ||
571 (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
572 (scope->ownercx && ClaimScope(scope, cx))) {
573 return obj->slots[slot];
574 }
576 #ifndef NSPR_LOCK
577 tl = &scope->lock;
578 me = cx->thread;
579 JS_ASSERT(me == CurrentThreadId());
580 if (js_CompareAndSwap(&tl->owner, 0, me)) {
581 /*
582 * Got the lock with one compare-and-swap. Even so, someone else may
583 * have mutated obj so it now has its own scope and lock, which would
584 * require either a restart from the top of this routine, or a thin
585 * lock release followed by fat lock acquisition.
586 */
587 if (scope == OBJ_SCOPE(obj)) {
588 v = obj->slots[slot];
589 if (!js_CompareAndSwap(&tl->owner, me, 0)) {
590 /* Assert that scope locks never revert to flyweight. */
591 JS_ASSERT(scope->ownercx != cx);
592 LOGIT(scope, '1');
593 scope->u.count = 1;
594 js_UnlockObj(cx, obj);
595 }
596 return v;
597 }
598 if (!js_CompareAndSwap(&tl->owner, me, 0))
599 js_Dequeue(tl);
600 }
601 else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
602 return obj->slots[slot];
603 }
604 #endif
606 js_LockObj(cx, obj);
607 v = obj->slots[slot];
609 /*
610 * Test whether cx took ownership of obj's scope during js_LockObj.
611 *
612 * This does not mean that a given scope reverted to flyweight from "thin"
613 * or "fat" -- it does mean that obj's map pointer changed due to another
614 * thread setting a property, requiring obj to cease sharing a prototype
615 * object's scope (whose lock was not flyweight, else we wouldn't be here
616 * in the first place!).
617 */
618 scope = OBJ_SCOPE(obj);
619 if (scope->ownercx != cx)
620 js_UnlockScope(cx, scope);
621 return v;
622 }
624 void
625 js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
626 {
627 JSScope *scope;
628 #ifndef NSPR_LOCK
629 JSThinLock *tl;
630 jsword me;
631 #endif
633 /* Any string stored in a thread-safe object must be immutable. */
634 if (JSVAL_IS_STRING(v))
635 MAKE_STRING_IMMUTABLE(cx->runtime, v, &v);
637 /*
638 * We handle non-native objects via JSObjectOps.setRequiredSlot, as above
639 * for the Get case.
640 */
641 if (!OBJ_IS_NATIVE(obj)) {
642 OBJ_SET_REQUIRED_SLOT(cx, obj, slot, v);
643 return;
644 }
646 /*
647 * Native object locking is inlined here to optimize the single-threaded
648 * and contention-free multi-threaded cases.
649 */
650 scope = OBJ_SCOPE(obj);
651 JS_ASSERT(scope->ownercx != cx);
652 JS_ASSERT(obj->slots && slot < obj->map->freeslot);
654 /*
655 * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h).
656 * Also avoid locking an object owning a sealed scope. If neither of those
657 * special cases applies, try to claim scope's flyweight lock from whatever
658 * context may have had it in an earlier request.
659 */
660 if (CX_THREAD_IS_RUNNING_GC(cx) ||
661 (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
662 (scope->ownercx && ClaimScope(scope, cx))) {
663 obj->slots[slot] = v;
664 return;
665 }
667 #ifndef NSPR_LOCK
668 tl = &scope->lock;
669 me = cx->thread;
670 JS_ASSERT(me == CurrentThreadId());
671 if (js_CompareAndSwap(&tl->owner, 0, me)) {
672 if (scope == OBJ_SCOPE(obj)) {
673 obj->slots[slot] = v;
674 if (!js_CompareAndSwap(&tl->owner, me, 0)) {
675 /* Assert that scope locks never revert to flyweight. */
676 JS_ASSERT(scope->ownercx != cx);
677 LOGIT(scope, '1');
678 scope->u.count = 1;
679 js_UnlockObj(cx, obj);
680 }
681 return;
682 }
683 if (!js_CompareAndSwap(&tl->owner, me, 0))
684 js_Dequeue(tl);
685 }
686 else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
687 obj->slots[slot] = v;
688 return;
689 }
690 #endif
692 js_LockObj(cx, obj);
693 obj->slots[slot] = v;
695 /*
696 * Same drill as above, in js_GetSlotThreadSafe. Note that we cannot
697 * assume obj has its own mutable scope (where scope->object == obj) yet,
698 * because OBJ_SET_SLOT is called for the "universal", common slots such
699 * as JSSLOT_PROTO and JSSLOT_PARENT, without a prior js_GetMutableScope.
700 * See also the JSPROP_SHARED attribute and its usage.
701 */
702 scope = OBJ_SCOPE(obj);
703 if (scope->ownercx != cx)
704 js_UnlockScope(cx, scope);
705 }
707 #ifndef NSPR_LOCK
709 static JSFatLock *
710 NewFatlock()
711 {
712 JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
713 if (!fl) return NULL;
714 fl->susp = 0;
715 fl->next = NULL;
716 fl->prevp = NULL;
717 fl->slock = PR_NewLock();
718 fl->svar = PR_NewCondVar(fl->slock);
719 return fl;
720 }
722 static void
723 DestroyFatlock(JSFatLock *fl)
724 {
725 PR_DestroyLock(fl->slock);
726 PR_DestroyCondVar(fl->svar);
727 free(fl);
728 }
730 static JSFatLock *
731 ListOfFatlocks(int listc)
732 {
733 JSFatLock *m;
734 JSFatLock *m0;
735 int i;
737 JS_ASSERT(listc>0);
738 m0 = m = NewFatlock();
739 for (i=1; i<listc; i++) {
740 m->next = NewFatlock();
741 m = m->next;
742 }
743 return m0;
744 }
746 static void
747 DeleteListOfFatlocks(JSFatLock *m)
748 {
749 JSFatLock *m0;
750 for (; m; m=m0) {
751 m0 = m->next;
752 DestroyFatlock(m);
753 }
754 }
756 static JSFatLockTable *fl_list_table = NULL;
757 static uint32 fl_list_table_len = 0;
758 static uint32 fl_list_chunk_len = 0;
760 static JSFatLock *
761 GetFatlock(void *id)
762 {
763 JSFatLock *m;
765 uint32 i = GLOBAL_LOCK_INDEX(id);
766 if (fl_list_table[i].free == NULL) {
767 #ifdef DEBUG
768 if (fl_list_table[i].taken)
769 printf("Ran out of fat locks!\n");
770 #endif
771 fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len);
772 }
773 m = fl_list_table[i].free;
774 fl_list_table[i].free = m->next;
775 m->susp = 0;
776 m->next = fl_list_table[i].taken;
777 m->prevp = &fl_list_table[i].taken;
778 if (fl_list_table[i].taken)
779 fl_list_table[i].taken->prevp = &m->next;
780 fl_list_table[i].taken = m;
781 return m;
782 }
784 static void
785 PutFatlock(JSFatLock *m, void *id)
786 {
787 uint32 i;
788 if (m == NULL)
789 return;
791 /* Unlink m from fl_list_table[i].taken. */
792 *m->prevp = m->next;
793 if (m->next)
794 m->next->prevp = m->prevp;
796 /* Insert m in fl_list_table[i].free. */
797 i = GLOBAL_LOCK_INDEX(id);
798 m->next = fl_list_table[i].free;
799 fl_list_table[i].free = m;
800 }
802 #endif /* !NSPR_LOCK */
804 JSBool
805 js_SetupLocks(int listc, int globc)
806 {
807 #ifndef NSPR_LOCK
808 uint32 i;
810 if (global_locks)
811 return JS_TRUE;
812 #ifdef DEBUG
813 if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
814 printf("Bad number %d in js_SetupLocks()!\n", listc);
815 if (globc > 100 || globc < 0) /* globc == number of global locks */
816 printf("Bad number %d in js_SetupLocks()!\n", listc);
817 #endif
818 global_locks_log2 = JS_CeilingLog2(globc);
819 global_locks_mask = JS_BITMASK(global_locks_log2);
820 global_lock_count = JS_BIT(global_locks_log2);
821 global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*));
822 if (!global_locks)
823 return JS_FALSE;
824 for (i = 0; i < global_lock_count; i++) {
825 global_locks[i] = PR_NewLock();
826 if (!global_locks[i]) {
827 global_lock_count = i;
828 js_CleanupLocks();
829 return JS_FALSE;
830 }
831 }
832 fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable));
833 if (!fl_list_table) {
834 js_CleanupLocks();
835 return JS_FALSE;
836 }
837 fl_list_table_len = global_lock_count;
838 for (i = 0; i < global_lock_count; i++)
839 fl_list_table[i].free = fl_list_table[i].taken = NULL;
840 fl_list_chunk_len = listc;
841 #endif /* !NSPR_LOCK */
842 return JS_TRUE;
843 }
845 void
846 js_CleanupLocks()
847 {
848 #ifndef NSPR_LOCK
849 uint32 i;
851 if (global_locks) {
852 for (i = 0; i < global_lock_count; i++)
853 PR_DestroyLock(global_locks[i]);
854 free(global_locks);
855 global_locks = NULL;
856 global_lock_count = 1;
857 global_locks_log2 = 0;
858 global_locks_mask = 0;
859 }
860 if (fl_list_table) {
861 for (i = 0; i < fl_list_table_len; i++) {
862 DeleteListOfFatlocks(fl_list_table[i].free);
863 fl_list_table[i].free = NULL;
864 DeleteListOfFatlocks(fl_list_table[i].taken);
865 fl_list_table[i].taken = NULL;
866 }
867 free(fl_list_table);
868 fl_list_table = NULL;
869 fl_list_table_len = 0;
870 }
871 #endif /* !NSPR_LOCK */
872 }
874 void
875 js_InitContextForLocking(JSContext *cx)
876 {
877 cx->thread = CurrentThreadId();
878 JS_ASSERT(Thin_GetWait(cx->thread) == 0);
879 }
881 #ifndef NSPR_LOCK
883 /*
884 * Fast locking and unlocking is implemented by delaying the allocation of a
885 * system lock (fat lock) until contention. As long as a locking thread A
886 * runs uncontended, the lock is represented solely by storing A's identity in
887 * the object being locked.
888 *
889 * If another thread B tries to lock the object currently locked by A, B is
890 * enqueued into a fat lock structure (which might have to be allocated and
891 * pointed to by the object), and suspended using NSPR conditional variables
892 * (wait). A wait bit (Bacon bit) is set in the lock word of the object,
893 * signalling to A that when releasing the lock, B must be dequeued and
894 * notified.
895 *
896 * The basic operation of the locking primitives (js_Lock, js_Unlock,
897 * js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into
898 * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
899 * is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0)
900 * succeeds this implies that p is uncontended (no one is waiting because the
901 * wait bit is not set).
902 *
903 * When dequeueing, the lock is released, and one of the threads suspended on
904 * the lock is notified. If other threads still are waiting, the wait bit is
905 * kept (in js_Enqueue), and if not, the fat lock is deallocated.
906 *
907 * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
908 * are serialized using a global lock. For scalability, a hashtable of global
909 * locks is used, which is indexed modulo the thin lock pointer.
910 */
912 /*
913 * Invariants:
914 * (i) global lock is held
915 * (ii) fl->susp >= 0
916 */
917 static int
918 js_SuspendThread(JSThinLock *tl)
919 {
920 JSFatLock *fl;
921 PRStatus stat;
923 if (tl->fat == NULL)
924 fl = tl->fat = GetFatlock(tl);
925 else
926 fl = tl->fat;
927 JS_ASSERT(fl->susp >= 0);
928 fl->susp++;
929 PR_Lock(fl->slock);
930 js_UnlockGlobal(tl);
931 stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
932 JS_ASSERT(stat != PR_FAILURE);
933 PR_Unlock(fl->slock);
934 js_LockGlobal(tl);
935 fl->susp--;
936 if (fl->susp == 0) {
937 PutFatlock(fl, tl);
938 tl->fat = NULL;
939 }
940 return tl->fat == NULL;
941 }
943 /*
944 * (i) global lock is held
945 * (ii) fl->susp > 0
946 */
947 static void
948 js_ResumeThread(JSThinLock *tl)
949 {
950 JSFatLock *fl = tl->fat;
951 PRStatus stat;
953 JS_ASSERT(fl != NULL);
954 JS_ASSERT(fl->susp > 0);
955 PR_Lock(fl->slock);
956 js_UnlockGlobal(tl);
957 stat = PR_NotifyCondVar(fl->svar);
958 JS_ASSERT(stat != PR_FAILURE);
959 PR_Unlock(fl->slock);
960 }
962 static void
963 js_Enqueue(JSThinLock *tl, jsword me)
964 {
965 jsword o, n;
967 js_LockGlobal(tl);
968 for (;;) {
969 o = ReadWord(tl->owner);
970 n = Thin_SetWait(o);
971 if (o != 0 && js_CompareAndSwap(&tl->owner, o, n)) {
972 if (js_SuspendThread(tl))
973 me = Thin_RemoveWait(me);
974 else
975 me = Thin_SetWait(me);
976 }
977 else if (js_CompareAndSwap(&tl->owner, 0, me)) {
978 js_UnlockGlobal(tl);
979 return;
980 }
981 }
982 }
984 static void
985 js_Dequeue(JSThinLock *tl)
986 {
987 jsword o;
989 js_LockGlobal(tl);
990 o = ReadWord(tl->owner);
991 JS_ASSERT(Thin_GetWait(o) != 0);
992 JS_ASSERT(tl->fat != NULL);
993 if (!js_CompareAndSwap(&tl->owner, o, 0)) /* release it */
994 JS_ASSERT(0);
995 js_ResumeThread(tl);
996 }
998 JS_INLINE void
999 js_Lock(JSThinLock *tl, jsword me)
1000 {
1001 JS_ASSERT(me == CurrentThreadId());
1002 if (js_CompareAndSwap(&tl->owner, 0, me))
1003 return;
1004 if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
1005 js_Enqueue(tl, me);
1006 #ifdef DEBUG
1007 else
1008 JS_ASSERT(0);
1009 #endif
1010 }
1012 JS_INLINE void
1013 js_Unlock(JSThinLock *tl, jsword me)
1014 {
1015 JS_ASSERT(me == CurrentThreadId());
1016 if (js_CompareAndSwap(&tl->owner, me, 0))
1017 return;
1018 if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
1019 js_Dequeue(tl);
1020 #ifdef DEBUG
1021 else
1022 JS_ASSERT(0);
1023 #endif
1024 }
1026 #endif /* !NSPR_LOCK */
1028 void
1029 js_LockRuntime(JSRuntime *rt)
1030 {
1031 PR_Lock(rt->rtLock);
1032 #ifdef DEBUG
1033 rt->rtLockOwner = CurrentThreadId();
1034 #endif
1035 }
1037 void
1038 js_UnlockRuntime(JSRuntime *rt)
1039 {
1040 #ifdef DEBUG
1041 rt->rtLockOwner = 0;
1042 #endif
1043 PR_Unlock(rt->rtLock);
1044 }
1046 void
1047 js_LockScope(JSContext *cx, JSScope *scope)
1048 {
1049 jsword me = cx->thread;
1051 JS_ASSERT(me == CurrentThreadId());
1052 JS_ASSERT(scope->ownercx != cx);
1053 if (CX_THREAD_IS_RUNNING_GC(cx))
1054 return;
1055 if (scope->ownercx && ClaimScope(scope, cx))
1056 return;
1058 if (Thin_RemoveWait(ReadWord(scope->lock.owner)) == me) {
1059 JS_ASSERT(scope->u.count > 0);
1060 LOGIT(scope, '+');
1061 scope->u.count++;
1062 } else {
1063 JSThinLock *tl = &scope->lock;
1064 JS_LOCK0(tl, me);
1065 JS_ASSERT(scope->u.count == 0);
1066 LOGIT(scope, '1');
1067 scope->u.count = 1;
1068 }
1069 }
1071 void
1072 js_UnlockScope(JSContext *cx, JSScope *scope)
1073 {
1074 jsword me = cx->thread;
1076 /* We hope compilers use me instead of reloading cx->thread in the macro. */
1077 if (CX_THREAD_IS_RUNNING_GC(cx))
1078 return;
1079 if (cx->lockedSealedScope == scope) {
1080 cx->lockedSealedScope = NULL;
1081 return;
1082 }
1084 /*
1085 * If scope->ownercx is not null, it's likely that two contexts not using
1086 * requests nested locks for scope. The first context, cx here, claimed
1087 * scope; the second, scope->ownercx here, re-claimed it because the first
1088 * was not in a request, or was on the same thread. We don't want to keep
1089 * track of such nesting, because it penalizes the common non-nested case.
1090 * Instead of asserting here and silently coping, we simply re-claim scope
1091 * for cx and return.
1092 *
1093 * See http://bugzilla.mozilla.org/show_bug.cgi?id=229200 for a real world
1094 * case where an asymmetric thread model (Mozilla's main thread is known
1095 * to be the only thread that runs the GC) combined with multiple contexts
1096 * per thread has led to such request-less nesting.
1097 */
1098 if (scope->ownercx) {
1099 JS_ASSERT(scope->u.count == 0);
1100 JS_ASSERT(scope->lock.owner == 0);
1101 scope->ownercx = cx;
1102 return;
1103 }
1105 JS_ASSERT(scope->u.count > 0);
1106 if (Thin_RemoveWait(ReadWord(scope->lock.owner)) != me) {
1107 JS_ASSERT(0); /* unbalanced unlock */
1108 return;
1109 }
1110 LOGIT(scope, '-');
1111 if (--scope->u.count == 0) {
1112 JSThinLock *tl = &scope->lock;
1113 JS_UNLOCK0(tl, me);
1114 }
1115 }
1117 /*
1118 * NB: oldscope may be null if our caller is js_GetMutableScope and it just
1119 * dropped the last reference to oldscope.
1120 */
1121 void
1122 js_TransferScopeLock(JSContext *cx, JSScope *oldscope, JSScope *newscope)
1123 {
1124 jsword me;
1125 JSThinLock *tl;
1127 JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, newscope));
1129 /*
1130 * If the last reference to oldscope went away, newscope needs no lock
1131 * state update.
1132 */
1133 if (!oldscope)
1134 return;
1135 JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, oldscope));
1137 /*
1138 * Special case in js_LockScope and js_UnlockScope for the GC calling
1139 * code that locks, unlocks, or mutates. Nothing to do in these cases,
1140 * because scope and newscope were "locked" by the GC thread, so neither
1141 * was actually locked.
1142 */
1143 if (CX_THREAD_IS_RUNNING_GC(cx))
1144 return;
1146 /*
1147 * Special case in js_LockObj and js_UnlockScope for locking the sealed
1148 * scope of an object that owns that scope (the prototype or mutated obj
1149 * for which OBJ_SCOPE(obj)->object == obj), and unlocking it.
1150 */
1151 JS_ASSERT(cx->lockedSealedScope != newscope);
1152 if (cx->lockedSealedScope == oldscope) {
1153 JS_ASSERT(newscope->ownercx == cx ||
1154 (!newscope->ownercx && newscope->u.count == 1));
1155 cx->lockedSealedScope = NULL;
1156 return;
1157 }
1159 /*
1160 * If oldscope is single-threaded, there's nothing to do.
1161 */
1162 if (oldscope->ownercx) {
1163 JS_ASSERT(oldscope->ownercx == cx);
1164 JS_ASSERT(newscope->ownercx == cx ||
1165 (!newscope->ownercx && newscope->u.count == 1));
1166 return;
1167 }
1169 /*
1170 * We transfer oldscope->u.count only if newscope is not single-threaded.
1171 * Flow unwinds from here through some number of JS_UNLOCK_SCOPE and/or
1172 * JS_UNLOCK_OBJ macro calls, which will decrement newscope->u.count only
1173 * if they find newscope->ownercx != cx.
1174 */
1175 if (newscope->ownercx != cx) {
1176 JS_ASSERT(!newscope->ownercx);
1177 newscope->u.count = oldscope->u.count;
1178 }
1180 /*
1181 * Reset oldscope's lock state so that it is completely unlocked.
1182 */
1183 LOGIT(oldscope, '0');
1184 oldscope->u.count = 0;
1185 tl = &oldscope->lock;
1186 me = cx->thread;
1187 JS_UNLOCK0(tl, me);
1188 }
1190 void
1191 js_LockObj(JSContext *cx, JSObject *obj)
1192 {
1193 JSScope *scope;
1195 JS_ASSERT(OBJ_IS_NATIVE(obj));
1196 for (;;) {
1197 scope = OBJ_SCOPE(obj);
1198 if (SCOPE_IS_SEALED(scope) && scope->object == obj &&
1199 !cx->lockedSealedScope) {
1200 cx->lockedSealedScope = scope;
1201 return;
1202 }
1204 js_LockScope(cx, scope);
1206 /* If obj still has this scope, we're done. */
1207 if (scope == OBJ_SCOPE(obj))
1208 return;
1210 /* Lost a race with a mutator; retry with obj's new scope. */
1211 js_UnlockScope(cx, scope);
1212 }
1213 }
1215 void
1216 js_UnlockObj(JSContext *cx, JSObject *obj)
1217 {
1218 JS_ASSERT(OBJ_IS_NATIVE(obj));
1219 js_UnlockScope(cx, OBJ_SCOPE(obj));
1220 }
1222 #ifdef DEBUG
1224 JSBool
1225 js_IsRuntimeLocked(JSRuntime *rt)
1226 {
1227 return CurrentThreadId() == rt->rtLockOwner;
1228 }
1230 JSBool
1231 js_IsObjLocked(JSContext *cx, JSObject *obj)
1232 {
1233 JSScope *scope = OBJ_SCOPE(obj);
1235 return MAP_IS_NATIVE(&scope->map) && js_IsScopeLocked(cx, scope);
1236 }
1238 JSBool
1239 js_IsScopeLocked(JSContext *cx, JSScope *scope)
1240 {
1241 /* Special case: the GC locking any object's scope, see js_LockScope. */
1242 if (CX_THREAD_IS_RUNNING_GC(cx))
1243 return JS_TRUE;
1245 /* Special case: locked object owning a sealed scope, see js_LockObj. */
1246 if (cx->lockedSealedScope == scope)
1247 return JS_TRUE;
1249 /*
1250 * General case: the scope is either exclusively owned (by cx), or it has
1251 * a thin or fat lock to cope with shared (concurrent) ownership.
1252 */
1253 if (scope->ownercx) {
1254 JS_ASSERT(scope->ownercx == cx);
1255 return JS_TRUE;
1256 }
1257 return CurrentThreadId() == Thin_RemoveWait(ReadWord(scope->lock.owner));
1258 }
1260 #endif /* DEBUG */
1261 #endif /* JS_THREADSAFE */