1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 *
3 * ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 *
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
10 *
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
15 *
16 * The Original Code is Mozilla Communicator client code, released
17 * March 31, 1998.
18 *
19 * The Initial Developer of the Original Code is
20 * Netscape Communications Corporation.
21 * Portions created by the Initial Developer are Copyright (C) 1998
22 * the Initial Developer. All Rights Reserved.
23 *
24 * Contributor(s):
25 *
26 * Alternatively, the contents of this file may be used under the terms of
27 * either of the GNU General Public License Version 2 or later (the "GPL"),
28 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
37 *
38 * ***** END LICENSE BLOCK ***** */
40 /*
41 * JS Mark-and-Sweep Garbage Collector.
42 *
43 * This GC allocates only fixed-sized things big enough to contain two words
44 * (pointers) on any host architecture. It allocates from an arena pool (see
45 * jsarena.h). It uses an ideally parallel array of flag bytes to hold the
46 * mark bit, finalizer type index, etc.
47 *
48 * XXX swizzle page to freelist for better locality of reference
49 */
50 #include "jsstddef.h"
51 #include <stdlib.h> /* for free, called by JS_ARENA_DESTROY */
52 #include <string.h> /* for memset, called by jsarena.h macros if DEBUG */
53 #include "jstypes.h"
54 #include "jsarena.h" /* Added by JSIFY */
55 #include "jsutil.h" /* Added by JSIFY */
56 #include "jshash.h" /* Added by JSIFY */
57 #include "jsapi.h"
58 #include "jsatom.h"
59 #include "jscntxt.h"
60 #include "jsconfig.h"
61 #include "jsdbgapi.h"
62 #include "jsfun.h"
63 #include "jsgc.h"
64 #include "jsinterp.h"
65 #include "jslock.h"
66 #include "jsnum.h"
67 #include "jsobj.h"
68 #include "jsscope.h"
69 #include "jsscript.h"
70 #include "jsstr.h"
72 /*
73 * GC arena sizing depends on amortizing arena overhead using a large number
74 * of things per arena, and on the thing/flags ratio of 8:1 on most platforms.
75 *
76 * On 64-bit platforms, we would have half as many things per arena because
77 * pointers are twice as big, so we double the bytes for things per arena.
78 * This preserves the 1024 byte flags sub-arena size, which relates to the
79 * GC_PAGE_SIZE (see below for why).
80 */
81 #if JS_BYTES_PER_WORD == 8
82 # define GC_THINGS_SHIFT 14 /* 16KB for things on Alpha, etc. */
83 #else
84 # define GC_THINGS_SHIFT 13 /* 8KB for things on most platforms */
85 #endif
86 #define GC_THINGS_SIZE JS_BIT(GC_THINGS_SHIFT)
87 #define GC_FLAGS_SIZE (GC_THINGS_SIZE / sizeof(JSGCThing))
88 #define GC_ARENA_SIZE (GC_THINGS_SIZE + GC_FLAGS_SIZE)
90 /*
91 * The private JSGCThing struct, which describes a gcFreeList element.
92 */
93 struct JSGCThing {
94 JSGCThing *next;
95 uint8 *flagp;
96 };
98 /*
99 * A GC arena contains one flag byte for each thing in its heap, and supports
100 * O(1) lookup of a flag given its thing's address.
101 *
102 * To implement this, we take advantage of the thing/flags numerology: given
103 * the 8K bytes worth of GC-things, there are 1K flag bytes. We mask a thing's
104 * address with ~1023 to find a JSGCPageInfo record at the front of a mythical
105 * "GC page" within the larger 8K thing arena. That JSGCPageInfo contains a
106 * pointer to the 128 flag bytes corresponding to the things in the page, so we
107 * index into this flags array using the thing's index within its page.
108 *
109 * To align thing pages on 1024-byte boundaries, we must allocate the 9KB of
110 * flags+things arena payload, then find the first 0 mod 1024 boundary after
111 * the first payload address. That's where things start, with a JSGCPageInfo
112 * taking up the first thing-slot, as usual for 0 mod 1024 byte boundaries.
113 * The effect of this alignment trick is to split the flags into at most 2
114 * discontiguous spans, one before the things and one after (if we're really
115 * lucky, and the arena payload starts on a 0 mod 1024 byte boundary, no need
116 * to split).
117 *
118 * The overhead of this scheme for most platforms is (16+8*(8+1))/(16+9K) or
119 * .95% (assuming 16 byte JSArena header size, and 8 byte JSGCThing size).
120 *
121 * Here's some ASCII art showing an arena:
122 *
123 * split
124 * |
125 * V
126 * +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
127 * |fB| tp0 | tp1 | tp2 | tp3 | tp4 | tp5 | tp6 | tp7 | fA |
128 * +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
129 * ^ ^
130 * tI ---------+ |
131 * tJ -------------------------------------------+
132 *
133 * - fB are the "before split" flags, fA are the "after split" flags
134 * - tp0-tp7 are the 8 thing pages
135 * - thing tI points into tp1, whose flags are below the split, in fB
136 * - thing tJ points into tp5, clearly above the split
137 *
138 * In general, one of the thing pages will have some of its things' flags on
139 * the low side of the split, and the rest of its things' flags on the high
140 * side. All the other pages have flags only below or only above. Therefore
141 * we'll have to test something to decide whether the split divides flags in
142 * a given thing's page. So we store the split pointer (the pointer to tp0)
143 * in each JSGCPageInfo, along with the flags pointer for the 128 flag bytes
144 * ideally starting, for tp0 things, at the beginning of the arena's payload
145 * (at the start of fB).
146 *
147 * That is, each JSGCPageInfo's flags pointer is 128 bytes from the previous,
148 * or at the start of the arena if there is no previous page in this arena.
149 * Thus these ideal 128-byte flag pages run contiguously from the start of the
150 * arena (right over the split!), and the JSGCPageInfo flags pointers contain
151 * no discontinuities over the split created by the thing pages. So if, for a
152 * given JSGCPageInfo *pi, we find that
153 *
154 * pi->flags + ((jsuword)thing % 1024) / sizeof(JSGCThing) >= pi->split
155 *
156 * then we must add GC_THINGS_SIZE to the nominal flags pointer to jump over
157 * all the thing pages that split the flags into two discontiguous spans.
158 *
159 * (If we need to implement card-marking for an incremental GC write barrier,
160 * we can use the low byte of the pi->split pointer as the card-mark, for an
161 * extremely efficient write barrier: when mutating an object obj, just store
162 * a 1 byte at (uint8 *) ((jsuword)obj & ~1023) for little-endian platforms.
163 * When finding flags, we'll of course have to mask split with ~255, but it is
164 * guaranteed to be 1024-byte aligned, so no information is lost by overlaying
165 * the card-mark byte on split's low byte.)
166 */
167 #define GC_PAGE_SHIFT 10
168 #define GC_PAGE_MASK ((jsuword) JS_BITMASK(GC_PAGE_SHIFT))
169 #define GC_PAGE_SIZE JS_BIT(GC_PAGE_SHIFT)
171 typedef struct JSGCPageInfo {
172 uint8 *split;
173 uint8 *flags;
174 } JSGCPageInfo;
176 #define FIRST_THING_PAGE(a) (((a)->base + GC_FLAGS_SIZE) & ~GC_PAGE_MASK)
178 static JSGCThing *
179 gc_new_arena(JSArenaPool *pool)
180 {
181 uint8 *flagp, *split, *pagep, *limit;
182 JSArena *a;
183 JSGCThing *thing;
184 JSGCPageInfo *pi;
186 /* Use JS_ArenaAllocate to grab another 9K-net-size hunk of space. */
187 flagp = (uint8 *) JS_ArenaAllocate(pool, GC_ARENA_SIZE);
188 if (!flagp)
189 return NULL;
190 a = pool->current;
192 /* Reset a->avail to start at the flags split, aka the first thing page. */
193 a->avail = FIRST_THING_PAGE(a);
194 split = pagep = (uint8 *) a->avail;
195 a->avail += sizeof(JSGCPageInfo);
196 thing = (JSGCThing *) a->avail;
197 a->avail += sizeof(JSGCThing);
199 /* Initialize the JSGCPageInfo records at the start of every thing page. */
200 limit = pagep + GC_THINGS_SIZE;
201 do {
202 pi = (JSGCPageInfo *) pagep;
203 pi->split = split;
204 pi->flags = flagp;
205 flagp += GC_PAGE_SIZE >> (GC_THINGS_SHIFT - GC_PAGE_SHIFT);
206 pagep += GC_PAGE_SIZE;
207 } while (pagep < limit);
208 return thing;
209 }
211 uint8 *
212 js_GetGCThingFlags(void *thing)
213 {
214 JSGCPageInfo *pi;
215 uint8 *flagp;
217 pi = (JSGCPageInfo *) ((jsuword)thing & ~GC_PAGE_MASK);
218 flagp = pi->flags + ((jsuword)thing & GC_PAGE_MASK) / sizeof(JSGCThing);
219 if (flagp >= pi->split)
220 flagp += GC_THINGS_SIZE;
221 return flagp;
222 }
224 JSBool
225 js_IsAboutToBeFinalized(JSContext *cx, void *thing)
226 {
227 uint8 flags = *js_GetGCThingFlags(thing);
229 return !(flags & (GCF_MARK | GCF_LOCKMASK | GCF_FINAL));
230 }
232 typedef void (*GCFinalizeOp)(JSContext *cx, JSGCThing *thing);
234 static GCFinalizeOp gc_finalizers[GCX_NTYPES];
236 intN
237 js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,
238 JSStringFinalizeOp newop)
239 {
240 uintN i;
242 for (i = GCX_EXTERNAL_STRING; i < GCX_NTYPES; i++) {
243 if (gc_finalizers[i] == (GCFinalizeOp) oldop) {
244 gc_finalizers[i] = (GCFinalizeOp) newop;
245 return (intN) i;
246 }
247 }
248 return -1;
249 }
251 #ifdef JS_GCMETER
252 #define METER(x) x
253 #else
254 #define METER(x) /* nothing */
255 #endif
257 /* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
258 #define GC_ROOTS_SIZE 256
259 #define GC_FINALIZE_LEN 1024
261 JSBool
262 js_InitGC(JSRuntime *rt, uint32 maxbytes)
263 {
264 JS_ASSERT(sizeof(JSGCThing) == sizeof(JSGCPageInfo));
265 JS_ASSERT(sizeof(JSGCThing) >= sizeof(JSObject));
266 JS_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
267 JS_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
268 JS_ASSERT(GC_FLAGS_SIZE >= GC_PAGE_SIZE);
269 JS_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
271 if (!gc_finalizers[GCX_OBJECT]) {
272 gc_finalizers[GCX_OBJECT] = (GCFinalizeOp)js_FinalizeObject;
273 gc_finalizers[GCX_STRING] = (GCFinalizeOp)js_FinalizeString;
274 #ifdef DEBUG
275 gc_finalizers[GCX_DOUBLE] = (GCFinalizeOp)js_FinalizeDouble;
276 #endif
277 gc_finalizers[GCX_MUTABLE_STRING] = (GCFinalizeOp)js_FinalizeString;
278 }
280 JS_InitArenaPool(&rt->gcArenaPool, "gc-arena", GC_ARENA_SIZE,
281 sizeof(JSGCThing));
282 if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
283 sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
284 rt->gcRootsHash.ops = NULL;
285 return JS_FALSE;
286 }
287 rt->gcLocksHash = NULL; /* create lazily */
288 rt->gcMaxBytes = maxbytes;
289 return JS_TRUE;
290 }
292 #ifdef JS_GCMETER
293 void
294 js_DumpGCStats(JSRuntime *rt, FILE *fp)
295 {
296 fprintf(fp, "\nGC allocation statistics:\n");
297 fprintf(fp, " bytes currently allocated: %lu\n", rt->gcBytes);
298 fprintf(fp, " alloc attempts: %lu\n", rt->gcStats.alloc);
299 fprintf(fp, " GC freelist length: %lu\n", rt->gcStats.freelen);
300 fprintf(fp, " recycles through GC freelist: %lu\n", rt->gcStats.recycle);
301 fprintf(fp, "alloc retries after running GC: %lu\n", rt->gcStats.retry);
302 fprintf(fp, " allocation failures: %lu\n", rt->gcStats.fail);
303 fprintf(fp, " valid lock calls: %lu\n", rt->gcStats.lock);
304 fprintf(fp, " valid unlock calls: %lu\n", rt->gcStats.unlock);
305 fprintf(fp, " locks that hit stuck counts: %lu\n", rt->gcStats.stuck);
306 fprintf(fp, " unlocks that saw stuck counts: %lu\n", rt->gcStats.unstuck);
307 fprintf(fp, " mark recursion depth: %lu\n", rt->gcStats.depth);
308 fprintf(fp, " maximum mark recursion depth: %lu\n", rt->gcStats.maxdepth);
309 fprintf(fp, " maximum GC nesting level: %lu\n", rt->gcStats.maxlevel);
310 fprintf(fp, " potentially useful GC calls: %lu\n", rt->gcStats.poke);
311 fprintf(fp, " useless GC calls: %lu\n", rt->gcStats.nopoke);
312 fprintf(fp, " thing arenas freed so far: %lu\n", rt->gcStats.afree);
313 fprintf(fp, " extra stack segments scanned: %lu\n", rt->gcStats.stackseg);
314 fprintf(fp, " stack segment slots scanned: %lu\n", rt->gcStats.segslots);
315 #ifdef JS_ARENAMETER
316 JS_DumpArenaStats(fp);
317 #endif
318 }
319 #endif
321 #ifdef DEBUG
322 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
323 js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
324 {
325 uint32 *leakedroots = (uint32 *)arg;
326 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
328 (*leakedroots)++;
329 fprintf(stderr,
330 "JS engine warning: leaking GC root \'%s\' at %p\n",
331 rhe->name ? (char *)rhe->name : "", rhe->root);
333 return JS_DHASH_NEXT;
334 }
335 #endif
337 void
338 js_FinishGC(JSRuntime *rt)
339 {
340 #ifdef JS_ARENAMETER
341 JS_DumpArenaStats(stdout);
342 #endif
343 #ifdef JS_GCMETER
344 js_DumpGCStats(rt, stdout);
345 #endif
346 JS_FinishArenaPool(&rt->gcArenaPool);
347 JS_ArenaFinish();
349 if (rt->gcRootsHash.ops) {
350 #ifdef DEBUG
351 uint32 leakedroots = 0;
353 /* Warn (but don't assert) debug builds of any remaining roots. */
354 JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
355 &leakedroots);
356 if (leakedroots > 0) {
357 if (leakedroots == 1) {
358 fprintf(stderr,
359 "JS engine warning: 1 GC root remains after destroying the JSRuntime.\n"
360 " This root may point to freed memory. Objects reachable\n"
361 " through it have not been finalized.\n");
362 } else {
363 fprintf(stderr,
364 "JS engine warning: %lu GC roots remain after destroying the JSRuntime.\n"
365 " These roots may point to freed memory. Objects reachable\n"
366 " through them have not been finalized.\n",
367 (unsigned long) leakedroots);
368 }
369 }
370 #endif
372 JS_DHashTableFinish(&rt->gcRootsHash);
373 rt->gcRootsHash.ops = NULL;
374 }
375 if (rt->gcLocksHash) {
376 JS_DHashTableDestroy(rt->gcLocksHash);
377 rt->gcLocksHash = NULL;
378 }
379 rt->gcFreeList = NULL;
380 }
382 JSBool
383 js_AddRoot(JSContext *cx, void *rp, const char *name)
384 {
385 JSBool ok = js_AddRootRT(cx->runtime, rp, name);
386 if (!ok)
387 JS_ReportOutOfMemory(cx);
388 return ok;
389 }
391 JSBool
392 js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
393 {
394 JSBool ok;
395 JSGCRootHashEntry *rhe;
397 /*
398 * Due to the long-standing, but now removed, use of rt->gcLock across the
399 * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
400 * properly with a racing GC, without calling JS_AddRoot from a request.
401 * We have to preserve API compatibility here, now that we avoid holding
402 * rt->gcLock across the mark phase (including the root hashtable mark).
403 *
404 * If the GC is running and we're called on another thread, wait for this
405 * GC activation to finish. We can safely wait here (in the case where we
406 * are called within a request on another thread's context) without fear
407 * of deadlock because the GC doesn't set rt->gcRunning until after it has
408 * waited for all active requests to end.
409 */
410 JS_LOCK_GC(rt);
411 #ifdef JS_THREADSAFE
412 JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
413 if (rt->gcRunning && rt->gcThread != js_CurrentThreadId()) {
414 do {
415 JS_AWAIT_GC_DONE(rt);
416 } while (rt->gcLevel > 0);
417 }
418 #endif
419 rhe = (JSGCRootHashEntry *) JS_DHashTableOperate(&rt->gcRootsHash, rp,
420 JS_DHASH_ADD);
421 if (rhe) {
422 rhe->root = rp;
423 rhe->name = name;
424 ok = JS_TRUE;
425 } else {
426 ok = JS_FALSE;
427 }
428 JS_UNLOCK_GC(rt);
429 return ok;
430 }
432 JSBool
433 js_RemoveRoot(JSRuntime *rt, void *rp)
434 {
435 /*
436 * Due to the JS_RemoveRootRT API, we may be called outside of a request.
437 * Same synchronization drill as above in js_AddRoot.
438 */
439 JS_LOCK_GC(rt);
440 #ifdef JS_THREADSAFE
441 JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
442 if (rt->gcRunning && rt->gcThread != js_CurrentThreadId()) {
443 do {
444 JS_AWAIT_GC_DONE(rt);
445 } while (rt->gcLevel > 0);
446 }
447 #endif
448 (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
449 rt->gcPoke = JS_TRUE;
450 JS_UNLOCK_GC(rt);
451 return JS_TRUE;
452 }
454 void *
455 js_AllocGCThing(JSContext *cx, uintN flags)
456 {
457 JSBool tried_gc;
458 JSRuntime *rt;
459 JSGCThing *thing;
460 uint8 *flagp;
461 JSLocalRootStack *lrs;
463 #ifdef TOO_MUCH_GC
464 js_GC(cx, GC_KEEP_ATOMS);
465 tried_gc = JS_TRUE;
466 #else
467 tried_gc = JS_FALSE;
468 #endif
470 rt = cx->runtime;
471 JS_LOCK_GC(rt);
472 JS_ASSERT(!rt->gcRunning);
473 if (rt->gcRunning) {
474 METER(rt->gcStats.finalfail++);
475 JS_UNLOCK_GC(rt);
476 return NULL;
477 }
478 METER(rt->gcStats.alloc++);
479 retry:
480 thing = rt->gcFreeList;
481 if (thing) {
482 rt->gcFreeList = thing->next;
483 flagp = thing->flagp;
484 METER(rt->gcStats.freelen--);
485 METER(rt->gcStats.recycle++);
486 } else {
487 if (rt->gcBytes < rt->gcMaxBytes &&
488 (tried_gc || rt->gcMallocBytes < rt->gcMaxBytes))
489 {
490 /*
491 * Inline form of JS_ARENA_ALLOCATE adapted to truncate the current
492 * arena's limit to a GC_PAGE_SIZE boundary, and to skip over every
493 * GC_PAGE_SIZE-byte-aligned thing (which is actually not a thing,
494 * it's a JSGCPageInfo record).
495 */
496 JSArenaPool *pool = &rt->gcArenaPool;
497 JSArena *a = pool->current;
498 size_t nb = sizeof(JSGCThing);
499 jsuword p = a->avail;
500 jsuword q = p + nb;
502 if (q > (a->limit & ~GC_PAGE_MASK)) {
503 thing = gc_new_arena(pool);
504 } else {
505 if ((p & GC_PAGE_MASK) == 0) {
506 /* Beware, p points to a JSGCPageInfo record! */
507 p = q;
508 q += nb;
509 JS_ArenaCountAllocation(pool, nb);
510 }
511 a->avail = q;
512 thing = (JSGCThing *)p;
513 }
514 JS_ArenaCountAllocation(pool, nb);
515 }
517 /*
518 * Consider doing a "last ditch" GC if thing couldn't be allocated.
519 *
520 * Keep rt->gcLock across the call into js_GC so we don't starve and
521 * lose to racing threads who deplete the heap just after js_GC has
522 * replenished it (or has synchronized with a racing GC that collected
523 * a bunch of garbage). This unfair scheduling can happen on certain
524 * operating systems. For the gory details, see Mozilla bug 162779
525 * (http://bugzilla.mozilla.org/show_bug.cgi?id=162779).
526 */
527 if (!thing) {
528 if (!tried_gc) {
529 rt->gcPoke = JS_TRUE;
530 js_GC(cx, GC_KEEP_ATOMS | GC_ALREADY_LOCKED);
531 tried_gc = JS_TRUE;
532 METER(rt->gcStats.retry++);
533 goto retry;
534 }
535 goto fail;
536 }
538 /* Find the flags pointer given thing's address. */
539 flagp = js_GetGCThingFlags(thing);
540 }
542 lrs = cx->localRootStack;
543 if (lrs) {
544 /*
545 * If we're in a local root scope, don't set cx->newborn[type] at all,
546 * to avoid entraining garbage from it for an unbounded amount of time
547 * on this context. A caller will leave the local root scope and pop
548 * this reference, allowing thing to be GC'd if it has no other refs.
549 * See JS_EnterLocalRootScope and related APIs.
550 */
551 if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0)
552 goto fail;
553 } else {
554 /*
555 * No local root scope, so we're stuck with the old, fragile model of
556 * depending on a pigeon-hole newborn per type per context.
557 */
558 cx->newborn[flags & GCF_TYPEMASK] = thing;
559 }
561 /* We can't fail now, so update flags and rt->gcBytes. */
562 *flagp = (uint8)flags;
563 rt->gcBytes += sizeof(JSGCThing) + sizeof(uint8);
565 /*
566 * Clear thing before unlocking in case a GC run is about to scan it,
567 * finding it via cx->newborn[].
568 */
569 thing->next = NULL;
570 thing->flagp = NULL;
571 JS_UNLOCK_GC(rt);
572 return thing;
574 fail:
575 METER(rt->gcStats.fail++);
576 JS_UNLOCK_GC(rt);
577 JS_ReportOutOfMemory(cx);
578 return NULL;
579 }
581 JSBool
582 js_LockGCThing(JSContext *cx, void *thing)
583 {
584 JSBool ok = js_LockGCThingRT(cx->runtime, thing);
585 if (!ok)
586 JS_ReportOutOfMemory(cx);
587 return ok;
588 }
590 JSBool
591 js_LockGCThingRT(JSRuntime *rt, void *thing)
592 {
593 uint8 *flagp, flags, lockbits;
594 JSBool ok;
595 JSGCLockHashEntry *lhe;
597 if (!thing)
598 return JS_TRUE;
599 flagp = js_GetGCThingFlags(thing);
600 flags = *flagp;
602 ok = JS_FALSE;
603 JS_LOCK_GC(rt);
604 lockbits = (flags & GCF_LOCKMASK);
606 if (lockbits != GCF_LOCKMASK) {
607 if ((flags & GCF_TYPEMASK) == GCX_OBJECT) {
608 /* Objects may require "deep locking", i.e., rooting by value. */
609 if (lockbits == 0) {
610 if (!rt->gcLocksHash) {
611 rt->gcLocksHash =
612 JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
613 sizeof(JSGCLockHashEntry),
614 GC_ROOTS_SIZE);
615 if (!rt->gcLocksHash)
616 goto error;
617 } else {
618 #ifdef DEBUG
619 JSDHashEntryHdr *hdr =
620 JS_DHashTableOperate(rt->gcLocksHash, thing,
621 JS_DHASH_LOOKUP);
622 JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(hdr));
623 #endif
624 }
625 lhe = (JSGCLockHashEntry *)
626 JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
627 if (!lhe)
628 goto error;
629 lhe->thing = thing;
630 lhe->count = 1;
631 *flagp = (uint8)(flags + GCF_LOCK);
632 } else {
633 JS_ASSERT(lockbits == GCF_LOCK);
634 lhe = (JSGCLockHashEntry *)
635 JS_DHashTableOperate(rt->gcLocksHash, thing,
636 JS_DHASH_LOOKUP);
637 JS_ASSERT(JS_DHASH_ENTRY_IS_BUSY(&lhe->hdr));
638 if (JS_DHASH_ENTRY_IS_BUSY(&lhe->hdr)) {
639 JS_ASSERT(lhe->count >= 1);
640 lhe->count++;
641 }
642 }
643 } else {
644 *flagp = (uint8)(flags + GCF_LOCK);
645 }
646 } else {
647 METER(rt->gcStats.stuck++);
648 }
650 METER(rt->gcStats.lock++);
651 ok = JS_TRUE;
652 error:
653 JS_UNLOCK_GC(rt);
654 return ok;
655 }
657 JSBool
658 js_UnlockGCThingRT(JSRuntime *rt, void *thing)
659 {
660 uint8 *flagp, flags, lockbits;
661 JSGCLockHashEntry *lhe;
663 if (!thing)
664 return JS_TRUE;
665 flagp = js_GetGCThingFlags(thing);
666 flags = *flagp;
668 JS_LOCK_GC(rt);
669 lockbits = (flags & GCF_LOCKMASK);
671 if (lockbits != GCF_LOCKMASK) {
672 if ((flags & GCF_TYPEMASK) == GCX_OBJECT) {
673 /* Defend against a call on an unlocked object. */
674 if (lockbits != 0) {
675 JS_ASSERT(lockbits == GCF_LOCK);
676 lhe = (JSGCLockHashEntry *)
677 JS_DHashTableOperate(rt->gcLocksHash, thing,
678 JS_DHASH_LOOKUP);
679 JS_ASSERT(JS_DHASH_ENTRY_IS_BUSY(&lhe->hdr));
680 if (JS_DHASH_ENTRY_IS_BUSY(&lhe->hdr) &&
681 --lhe->count == 0) {
682 (void) JS_DHashTableOperate(rt->gcLocksHash, thing,
683 JS_DHASH_REMOVE);
684 *flagp = (uint8)(flags & ~GCF_LOCKMASK);
685 }
686 }
687 } else {
688 *flagp = (uint8)(flags - GCF_LOCK);
689 }
690 } else {
691 METER(rt->gcStats.unstuck++);
692 }
694 rt->gcPoke = JS_TRUE;
695 METER(rt->gcStats.unlock++);
696 JS_UNLOCK_GC(rt);
697 return JS_TRUE;
698 }
700 #ifdef GC_MARK_DEBUG
702 #include <stdio.h>
703 #include <stdlib.h>
704 #include "jsprf.h"
706 JS_FRIEND_DATA(FILE *) js_DumpGCHeap;
707 JS_EXPORT_DATA(void *) js_LiveThingToFind;
709 #ifdef HAVE_XPCONNECT
710 #include "dump_xpc.h"
711 #endif
713 static const char *
714 gc_object_class_name(void* thing)
715 {
716 uint8 *flagp = js_GetGCThingFlags(thing);
717 const char *className = "";
718 static char depbuf[32];
720 switch (*flagp & GCF_TYPEMASK) {
721 case GCX_OBJECT: {
722 JSObject *obj = (JSObject *)thing;
723 JSClass *clasp = JSVAL_TO_PRIVATE(obj->slots[JSSLOT_CLASS]);
724 className = clasp->name;
725 #ifdef HAVE_XPCONNECT
726 if (clasp->flags & JSCLASS_PRIVATE_IS_NSISUPPORTS) {
727 jsval privateValue = obj->slots[JSSLOT_PRIVATE];
729 JS_ASSERT(clasp->flags & JSCLASS_HAS_PRIVATE);
730 if (!JSVAL_IS_VOID(privateValue)) {
731 void *privateThing = JSVAL_TO_PRIVATE(privateValue);
732 const char *xpcClassName = GetXPCObjectClassName(privateThing);
734 if (xpcClassName)
735 className = xpcClassName;
736 }
737 }
738 #endif
739 break;
740 }
742 case GCX_STRING:
743 case GCX_MUTABLE_STRING: {
744 JSString *str = (JSString *)thing;
745 if (JSSTRING_IS_DEPENDENT(str)) {
746 JS_snprintf(depbuf, sizeof depbuf, "start:%u, length:%u",
747 JSSTRDEP_START(str), JSSTRDEP_LENGTH(str));
748 className = depbuf;
749 } else {
750 className = "string";
751 }
752 break;
753 }
755 case GCX_DOUBLE:
756 className = "double";
757 break;
758 }
760 return className;
761 }
763 static void
764 gc_dump_thing(JSGCThing *thing, uint8 flags, GCMarkNode *prev, FILE *fp)
765 {
766 GCMarkNode *next = NULL;
767 char *path = NULL;
769 while (prev) {
770 next = prev;
771 prev = prev->prev;
772 }
773 while (next) {
774 path = JS_sprintf_append(path, "%s(%s).",
775 next->name,
776 gc_object_class_name(next->thing));
777 next = next->next;
778 }
779 if (!path)
780 return;
782 fprintf(fp, "%08lx ", (long)thing);
783 switch (flags & GCF_TYPEMASK) {
784 case GCX_OBJECT:
785 {
786 JSObject *obj = (JSObject *)thing;
787 jsval privateValue = obj->slots[JSSLOT_PRIVATE];
788 void *privateThing = JSVAL_IS_VOID(privateValue)
789 ? NULL
790 : JSVAL_TO_PRIVATE(privateValue);
791 const char *className = gc_object_class_name(thing);
792 fprintf(fp, "object %8p %s", privateThing, className);
793 break;
794 }
795 case GCX_DOUBLE:
796 fprintf(fp, "double %g", *(jsdouble *)thing);
797 break;
798 default:
799 fprintf(fp, "string %s", JS_GetStringBytes((JSString *)thing));
800 break;
801 }
802 fprintf(fp, " via %s\n", path);
803 free(path);
804 }
806 #endif /* !GC_MARK_DEBUG */
808 static void
809 gc_mark_atom_key_thing(void *thing, void *arg)
810 {
811 JSContext *cx = (JSContext *) arg;
813 GC_MARK(cx, thing, "atom", NULL);
814 }
816 void
817 js_MarkAtom(JSContext *cx, JSAtom *atom, void *arg)
818 {
819 jsval key;
821 if (atom->flags & ATOM_MARK)
822 return;
823 atom->flags |= ATOM_MARK;
824 key = ATOM_KEY(atom);
825 if (JSVAL_IS_GCTHING(key)) {
826 #ifdef GC_MARK_DEBUG
827 char name[32];
829 if (JSVAL_IS_STRING(key)) {
830 JS_snprintf(name, sizeof name, "'%s'",
831 JS_GetStringBytes(JSVAL_TO_STRING(key)));
832 } else {
833 JS_snprintf(name, sizeof name, "<%x>", key);
834 }
835 #endif
836 GC_MARK(cx, JSVAL_TO_GCTHING(key), name, arg);
837 }
838 }
840 void
841 js_MarkGCThing(JSContext *cx, void *thing, void *arg)
842 {
843 uint8 flags, *flagp;
844 JSRuntime *rt;
845 JSObject *obj;
846 uint32 nslots;
847 jsval v, *vp, *end;
848 JSString *str;
849 #ifdef GC_MARK_DEBUG
850 JSScope *scope;
851 JSScopeProperty *sprop;
852 #endif
854 if (!thing)
855 return;
857 flagp = js_GetGCThingFlags(thing);
858 flags = *flagp;
859 JS_ASSERT(flags != GCF_FINAL);
860 #ifdef GC_MARK_DEBUG
861 if (js_LiveThingToFind == thing)
862 gc_dump_thing(thing, flags, arg, stderr);
863 #endif
865 if (flags & GCF_MARK)
866 return;
868 *flagp |= GCF_MARK;
869 rt = cx->runtime;
870 METER(if (++rt->gcStats.depth > rt->gcStats.maxdepth)
871 rt->gcStats.maxdepth = rt->gcStats.depth);
873 #ifdef GC_MARK_DEBUG
874 if (js_DumpGCHeap)
875 gc_dump_thing(thing, flags, arg, js_DumpGCHeap);
876 #endif
878 switch (flags & GCF_TYPEMASK) {
879 case GCX_OBJECT:
880 obj = (JSObject *) thing;
881 vp = obj->slots;
882 if (!vp) {
883 /* If obj->slots is null, obj must be a newborn. */
884 JS_ASSERT(!obj->map);
885 goto out;
886 }
887 nslots = (obj->map->ops->mark)
888 ? obj->map->ops->mark(cx, obj, arg)
889 : JS_MIN(obj->map->freeslot, obj->map->nslots);
890 #ifdef GC_MARK_DEBUG
891 scope = OBJ_IS_NATIVE(obj) ? OBJ_SCOPE(obj) : NULL;
892 #endif
893 for (end = vp + nslots; vp < end; vp++) {
894 v = *vp;
895 if (JSVAL_IS_GCTHING(v)) {
896 #ifdef GC_MARK_DEBUG
897 char name[32];
899 if (scope) {
900 uint32 slot;
901 jsval nval;
903 slot = vp - obj->slots;
904 for (sprop = SCOPE_LAST_PROP(scope); ;
905 sprop = sprop->parent) {
906 if (!sprop) {
907 switch (slot) {
908 case JSSLOT_PROTO:
909 strcpy(name, "__proto__");
910 break;
911 case JSSLOT_PARENT:
912 strcpy(name, "__parent__");
913 break;
914 case JSSLOT_PRIVATE:
915 strcpy(name, "__private__");
916 break;
917 default:
918 JS_snprintf(name, sizeof name,
919 "**UNKNOWN SLOT %ld**",
920 (long)slot);
921 break;
922 }
923 break;
924 }
925 if (sprop->slot == slot) {
926 nval = ID_TO_VALUE(sprop->id);
927 if (JSVAL_IS_INT(nval)) {
928 JS_snprintf(name, sizeof name, "%ld",
929 (long)JSVAL_TO_INT(nval));
930 } else if (JSVAL_IS_STRING(nval)) {
931 JS_snprintf(name, sizeof name, "%s",
932 JS_GetStringBytes(JSVAL_TO_STRING(nval)));
933 } else {
934 strcpy(name, "**FINALIZED ATOM KEY**");
935 }
936 break;
937 }
938 }
939 } else {
940 strcpy(name, "**UNKNOWN OBJECT MAP ENTRY**");
941 }
942 #endif
943 GC_MARK(cx, JSVAL_TO_GCTHING(v), name, arg);
944 }
945 }
946 break;
948 #ifdef DEBUG
949 case GCX_STRING:
950 str = (JSString *)thing;
951 JS_ASSERT(!JSSTRING_IS_DEPENDENT(str));
952 break;
953 #endif
955 case GCX_MUTABLE_STRING:
956 str = (JSString *)thing;
957 if (JSSTRING_IS_DEPENDENT(str))
958 GC_MARK(cx, JSSTRDEP_BASE(str), "base", arg);
959 break;
960 }
962 out:
963 METER(rt->gcStats.depth--);
964 }
966 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
967 gc_root_marker(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
968 {
969 JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
970 jsval *rp = (jsval *)rhe->root;
971 jsval v = *rp;
973 /* Ignore null object and scalar values. */
974 if (!JSVAL_IS_NULL(v) && JSVAL_IS_GCTHING(v)) {
975 JSContext *cx = (JSContext *)arg;
976 #ifdef DEBUG
977 JSArena *a;
978 jsuword firstpage;
979 JSBool root_points_to_gcArenaPool = JS_FALSE;
980 void *thing = JSVAL_TO_GCTHING(v);
982 for (a = cx->runtime->gcArenaPool.first.next; a; a = a->next) {
983 firstpage = FIRST_THING_PAGE(a);
984 if (JS_UPTRDIFF(thing, firstpage) < a->avail - firstpage) {
985 root_points_to_gcArenaPool = JS_TRUE;
986 break;
987 }
988 }
989 if (!root_points_to_gcArenaPool && rhe->name) {
990 fprintf(stderr,
991 "JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
992 "invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
993 "The root's name is \"%s\".\n",
994 rhe->name);
995 }
996 JS_ASSERT(root_points_to_gcArenaPool);
997 #endif
999 GC_MARK(cx, JSVAL_TO_GCTHING(v), rhe->name ? rhe->name : "root", NULL);
1000 }
1001 return JS_DHASH_NEXT;
1002 }
1004 JS_STATIC_DLL_CALLBACK(JSDHashOperator)
1005 gc_lock_marker(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
1006 {
1007 JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
1008 void *thing = (void *)lhe->thing;
1009 JSContext *cx = (JSContext *)arg;
1011 GC_MARK(cx, thing, "locked object", NULL);
1012 return JS_DHASH_NEXT;
1013 }
1015 void
1016 js_ForceGC(JSContext *cx, uintN gcflags)
1017 {
1018 uintN i;
1020 for (i = 0; i < GCX_NTYPES; i++)
1021 cx->newborn[i] = NULL;
1022 cx->lastAtom = NULL;
1023 cx->runtime->gcPoke = JS_TRUE;
1024 js_GC(cx, gcflags);
1025 JS_ArenaFinish();
1026 }
1028 #define GC_MARK_JSVALS(cx, len, vec, name) \
1029 JS_BEGIN_MACRO \
1030 jsval _v, *_vp, *_end; \
1031 \
1032 for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
1033 _v = *_vp; \
1034 if (JSVAL_IS_GCTHING(_v)) \
1035 GC_MARK(cx, JSVAL_TO_GCTHING(_v), name, NULL); \
1036 } \
1037 JS_END_MACRO
1039 void
1040 js_GC(JSContext *cx, uintN gcflags)
1041 {
1042 JSRuntime *rt;
1043 JSContext *iter, *acx;
1044 JSStackFrame *fp, *chain;
1045 uintN i, depth, nslots, type;
1046 JSStackHeader *sh;
1047 JSArena *a, **ap;
1048 uint8 flags, *flagp, *split;
1049 JSGCThing *thing, *limit, **flp, **oflp;
1050 GCFinalizeOp finalizer;
1051 JSBool all_clear;
1052 #ifdef JS_THREADSAFE
1053 jsword currentThread;
1054 uint32 requestDebit;
1055 #endif
1057 rt = cx->runtime;
1058 #ifdef JS_THREADSAFE
1059 /* Avoid deadlock. */
1060 JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
1061 #endif
1063 /*
1064 * Don't collect garbage if the runtime isn't up, and cx is not the last
1065 * context in the runtime. The last context must force a GC, and nothing
1066 * should suppress that final collection or there may be shutdown leaks,
1067 * or runtime bloat until the next context is created.
1068 */
1069 if (rt->state != JSRTS_UP && !(gcflags & GC_LAST_CONTEXT))
1070 return;
1072 /*
1073 * Let the API user decide to defer a GC if it wants to (unless this
1074 * is the last context). Invoke the callback regardless.
1075 */
1076 if (rt->gcCallback) {
1077 if (!rt->gcCallback(cx, JSGC_BEGIN) && !(gcflags & GC_LAST_CONTEXT))
1078 return;
1079 }
1081 /* Lock out other GC allocator and collector invocations. */
1082 if (!(gcflags & GC_ALREADY_LOCKED))
1083 JS_LOCK_GC(rt);
1085 /* Do nothing if no assignment has executed since the last GC. */
1086 if (!rt->gcPoke) {
1087 METER(rt->gcStats.nopoke++);
1088 if (!(gcflags & GC_ALREADY_LOCKED))
1089 JS_UNLOCK_GC(rt);
1090 return;
1091 }
1092 METER(rt->gcStats.poke++);
1094 #ifdef JS_THREADSAFE
1095 /* Bump gcLevel and return rather than nest on this thread. */
1096 currentThread = js_CurrentThreadId();
1097 if (rt->gcThread == currentThread) {
1098 JS_ASSERT(rt->gcLevel > 0);
1099 rt->gcLevel++;
1100 METER(if (rt->gcLevel > rt->gcStats.maxlevel)
1101 rt->gcStats.maxlevel = rt->gcLevel);
1102 if (!(gcflags & GC_ALREADY_LOCKED))
1103 JS_UNLOCK_GC(rt);
1104 return;
1105 }
1107 /*
1108 * If we're in one or more requests (possibly on more than one context)
1109 * running on the current thread, indicate, temporarily, that all these
1110 * requests are inactive. NB: if cx->thread is 0, then cx is not using
1111 * the request model, and does not contribute to rt->requestCount.
1112 */
1113 requestDebit = 0;
1114 if (cx->thread) {
1115 /*
1116 * Check all contexts for any with the same thread-id. XXX should we
1117 * keep a sub-list of contexts having the same id?
1118 */
1119 iter = NULL;
1120 while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
1121 if (acx->thread == cx->thread && acx->requestDepth)
1122 requestDebit++;
1123 }
1124 } else {
1125 /*
1126 * We assert, but check anyway, in case someone is misusing the API.
1127 * Avoiding the loop over all of rt's contexts is a win in the event
1128 * that the GC runs only on request-less contexts with 0 thread-ids,
1129 * in a special thread such as might be used by the UI/DOM/Layout
1130 * "mozilla" or "main" thread in Mozilla-the-browser.
1131 */
1132 JS_ASSERT(cx->requestDepth == 0);
1133 if (cx->requestDepth)
1134 requestDebit = 1;
1135 }
1136 if (requestDebit) {
1137 JS_ASSERT(requestDebit <= rt->requestCount);
1138 rt->requestCount -= requestDebit;
1139 if (rt->requestCount == 0)
1140 JS_NOTIFY_REQUEST_DONE(rt);
1141 }
1143 /* If another thread is already in GC, don't attempt GC; wait instead. */
1144 if (rt->gcLevel > 0) {
1145 /* Bump gcLevel to restart the current GC, so it finds new garbage. */
1146 rt->gcLevel++;
1147 METER(if (rt->gcLevel > rt->gcStats.maxlevel)
1148 rt->gcStats.maxlevel = rt->gcLevel);
1150 /* Wait for the other thread to finish, then resume our request. */
1151 while (rt->gcLevel > 0)
1152 JS_AWAIT_GC_DONE(rt);
1153 if (requestDebit)
1154 rt->requestCount += requestDebit;
1155 if (!(gcflags & GC_ALREADY_LOCKED))
1156 JS_UNLOCK_GC(rt);
1157 return;
1158 }
1160 /* No other thread is in GC, so indicate that we're now in GC. */
1161 rt->gcLevel = 1;
1162 rt->gcThread = currentThread;
1164 /* Wait for all other requests to finish. */
1165 while (rt->requestCount > 0)
1166 JS_AWAIT_REQUEST_DONE(rt);
1168 #else /* !JS_THREADSAFE */
1170 /* Bump gcLevel and return rather than nest; the outer gc will restart. */
1171 rt->gcLevel++;
1172 METER(if (rt->gcLevel > rt->gcStats.maxlevel)
1173 rt->gcStats.maxlevel = rt->gcLevel);
1174 if (rt->gcLevel > 1)
1175 return;
1177 #endif /* !JS_THREADSAFE */
1179 /*
1180 * Set rt->gcRunning here within the GC lock, and after waiting for any
1181 * active requests to end, so that new requests that try to JS_AddRoot,
1182 * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
1183 * rt->gcLevel to drop to zero, while request-less calls to the *Root*
1184 * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
1185 * waiting for GC to finish.
1186 */
1187 rt->gcRunning = JS_TRUE;
1188 JS_UNLOCK_GC(rt);
1190 /* If a suspended compile is running on another context, keep atoms. */
1191 if (rt->gcKeepAtoms)
1192 gcflags |= GC_KEEP_ATOMS;
1194 /* Reset malloc counter. */
1195 rt->gcMallocBytes = 0;
1197 /* Drop atoms held by the property cache, and clear property weak links. */
1198 js_DisablePropertyCache(cx);
1199 js_FlushPropertyCache(cx);
1200 #ifdef DEBUG_brendan
1201 { extern void js_DumpScopeMeters(JSRuntime *rt);
1202 js_DumpScopeMeters(rt);
1203 }
1204 #endif
1206 restart:
1207 rt->gcNumber++;
1209 /*
1210 * Mark phase.
1211 */
1212 JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_marker, cx);
1213 if (rt->gcLocksHash)
1214 JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_marker, cx);
1215 js_MarkAtomState(&rt->atomState, gcflags, gc_mark_atom_key_thing, cx);
1216 js_MarkWatchPoints(rt);
1217 iter = NULL;
1218 while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL) {
1219 /*
1220 * Iterate frame chain and dormant chains. Temporarily tack current
1221 * frame onto the head of the dormant list to ease iteration.
1222 *
1223 * (NB: see comment on this whole "dormant" thing in js_Execute.)
1224 */
1225 chain = acx->fp;
1226 if (chain) {
1227 JS_ASSERT(!chain->dormantNext);
1228 chain->dormantNext = acx->dormantFrameChain;
1229 } else {
1230 chain = acx->dormantFrameChain;
1231 }
1233 for (fp = chain; fp; fp = chain = chain->dormantNext) {
1234 do {
1235 if (fp->callobj)
1236 GC_MARK(cx, fp->callobj, "call object", NULL);
1237 if (fp->argsobj)
1238 GC_MARK(cx, fp->argsobj, "arguments object", NULL);
1239 if (fp->varobj)
1240 GC_MARK(cx, fp->varobj, "variables object", NULL);
1241 if (fp->script) {
1242 js_MarkScript(cx, fp->script, NULL);
1243 if (fp->spbase) {
1244 /*
1245 * Don't mark what has not been pushed yet, or what
1246 * has been popped already.
1247 */
1248 depth = fp->script->depth;
1249 nslots = (JS_UPTRDIFF(fp->sp, fp->spbase)
1250 < depth * sizeof(jsval))
1251 ? (uintN)(fp->sp - fp->spbase)
1252 : depth;
1253 GC_MARK_JSVALS(cx, nslots, fp->spbase, "operand");
1254 }
1255 }
1256 GC_MARK(cx, fp->thisp, "this", NULL);
1257 if (fp->argv) {
1258 nslots = fp->argc;
1259 if (fp->fun && fp->fun->nargs > nslots)
1260 nslots = fp->fun->nargs;
1261 GC_MARK_JSVALS(cx, nslots, fp->argv, "arg");
1262 }
1263 if (JSVAL_IS_GCTHING(fp->rval))
1264 GC_MARK(cx, JSVAL_TO_GCTHING(fp->rval), "rval", NULL);
1265 if (fp->vars)
1266 GC_MARK_JSVALS(cx, fp->nvars, fp->vars, "var");
1267 GC_MARK(cx, fp->scopeChain, "scope chain", NULL);
1268 if (fp->sharpArray)
1269 GC_MARK(cx, fp->sharpArray, "sharp array", NULL);
1270 } while ((fp = fp->down) != NULL);
1271 }
1273 /* Cleanup temporary "dormant" linkage. */
1274 if (acx->fp)
1275 acx->fp->dormantNext = NULL;
1277 /* Mark other roots-by-definition in acx. */
1278 GC_MARK(cx, acx->globalObject, "global object", NULL);
1279 GC_MARK(cx, acx->newborn[GCX_OBJECT], "newborn object", NULL);
1280 GC_MARK(cx, acx->newborn[GCX_STRING], "newborn string", NULL);
1281 GC_MARK(cx, acx->newborn[GCX_DOUBLE], "newborn double", NULL);
1282 GC_MARK(cx, acx->newborn[GCX_MUTABLE_STRING], "newborn mutable string",
1283 NULL);
1284 for (i = GCX_EXTERNAL_STRING; i < GCX_NTYPES; i++)
1285 GC_MARK(cx, acx->newborn[i], "newborn external string", NULL);
1286 if (acx->lastAtom)
1287 GC_MARK_ATOM(cx, acx->lastAtom, NULL);
1288 #if JS_HAS_EXCEPTIONS
1289 if (acx->throwing && JSVAL_IS_GCTHING(acx->exception))
1290 GC_MARK(cx, JSVAL_TO_GCTHING(acx->exception), "exception", NULL);
1291 #endif
1292 #if JS_HAS_LVALUE_RETURN
1293 if (acx->rval2set && JSVAL_IS_GCTHING(acx->rval2))
1294 GC_MARK(cx, JSVAL_TO_GCTHING(acx->rval2), "rval2", NULL);
1295 #endif
1297 for (sh = acx->stackHeaders; sh; sh = sh->down) {
1298 METER(rt->gcStats.stackseg++);
1299 METER(rt->gcStats.segslots += sh->nslots);
1300 GC_MARK_JSVALS(cx, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
1301 }
1303 if (acx->localRootStack)
1304 js_MarkLocalRoots(cx, acx->localRootStack);
1305 }
1306 #ifdef DUMP_CALL_TABLE
1307 js_DumpCallTable(cx);
1308 #endif
1310 if (rt->gcCallback)
1311 (void) rt->gcCallback(cx, JSGC_MARK_END);
1313 /*
1314 * Sweep phase.
1315 * Finalize as we sweep, outside of rt->gcLock, but with rt->gcRunning set
1316 * so that any attempt to allocate a GC-thing from a finalizer will fail,
1317 * rather than nest badly and leave the unmarked newborn to be swept.
1318 */
1319 js_SweepAtomState(&rt->atomState);
1320 js_SweepScopeProperties(rt);
1321 js_SweepScriptFilenames(rt);
1322 for (a = rt->gcArenaPool.first.next; a; a = a->next) {
1323 flagp = (uint8 *) a->base;
1324 split = (uint8 *) FIRST_THING_PAGE(a);
1325 limit = (JSGCThing *) a->avail;
1326 for (thing = (JSGCThing *) split; thing < limit; thing++) {
1327 if (((jsuword)thing & GC_PAGE_MASK) == 0) {
1328 flagp++;
1329 thing++;
1330 }
1331 flags = *flagp;
1332 if (flags & GCF_MARK) {
1333 *flagp &= ~GCF_MARK;
1334 } else if (!(flags & (GCF_LOCKMASK | GCF_FINAL))) {
1335 /* Call the finalizer with GCF_FINAL ORed into flags. */
1336 type = flags & GCF_TYPEMASK;
1337 finalizer = gc_finalizers[type];
1338 if (finalizer) {
1339 *flagp = (uint8)(flags | GCF_FINAL);
1340 if (type >= GCX_EXTERNAL_STRING)
1341 js_PurgeDeflatedStringCache((JSString *)thing);
1342 finalizer(cx, thing);
1343 }
1345 /* Set flags to GCF_FINAL, signifying that thing is free. */
1346 *flagp = GCF_FINAL;
1348 JS_ASSERT(rt->gcBytes >= sizeof(JSGCThing) + sizeof(uint8));
1349 rt->gcBytes -= sizeof(JSGCThing) + sizeof(uint8);
1350 }
1351 if (++flagp == split)
1352 flagp += GC_THINGS_SIZE;
1353 }
1354 }
1356 /*
1357 * Free phase.
1358 * Free any unused arenas and rebuild the JSGCThing freelist.
1359 */
1360 ap = &rt->gcArenaPool.first.next;
1361 a = *ap;
1362 if (!a)
1363 goto out;
1364 all_clear = JS_TRUE;
1365 flp = oflp = &rt->gcFreeList;
1366 *flp = NULL;
1367 METER(rt->gcStats.freelen = 0);
1369 do {
1370 flagp = (uint8 *) a->base;
1371 split = (uint8 *) FIRST_THING_PAGE(a);
1372 limit = (JSGCThing *) a->avail;
1373 for (thing = (JSGCThing *) split; thing < limit; thing++) {
1374 if (((jsuword)thing & GC_PAGE_MASK) == 0) {
1375 flagp++;
1376 thing++;
1377 }
1378 if (*flagp != GCF_FINAL) {
1379 all_clear = JS_FALSE;
1380 } else {
1381 thing->flagp = flagp;
1382 *flp = thing;
1383 flp = &thing->next;
1384 METER(rt->gcStats.freelen++);
1385 }
1386 if (++flagp == split)
1387 flagp += GC_THINGS_SIZE;
1388 }
1390 if (all_clear) {
1391 JS_ARENA_DESTROY(&rt->gcArenaPool, a, ap);
1392 flp = oflp;
1393 METER(rt->gcStats.afree++);
1394 } else {
1395 ap = &a->next;
1396 all_clear = JS_TRUE;
1397 oflp = flp;
1398 }
1399 } while ((a = *ap) != NULL);
1401 /* Terminate the new freelist. */
1402 *flp = NULL;
1404 if (rt->gcCallback)
1405 (void) rt->gcCallback(cx, JSGC_FINALIZE_END);
1406 #ifdef DEBUG_brendan
1407 { extern void DumpSrcNoteSizeHist();
1408 DumpSrcNoteSizeHist();
1409 }
1410 #endif
1412 out:
1413 JS_LOCK_GC(rt);
1414 if (rt->gcLevel > 1) {
1415 rt->gcLevel = 1;
1416 JS_UNLOCK_GC(rt);
1417 goto restart;
1418 }
1419 js_EnablePropertyCache(cx);
1420 rt->gcLevel = 0;
1421 rt->gcLastBytes = rt->gcBytes;
1422 rt->gcPoke = rt->gcRunning = JS_FALSE;
1424 #ifdef JS_THREADSAFE
1425 /* If we were invoked during a request, pay back the temporary debit. */
1426 if (requestDebit)
1427 rt->requestCount += requestDebit;
1428 rt->gcThread = 0;
1429 JS_NOTIFY_GC_DONE(rt);
1430 if (!(gcflags & GC_ALREADY_LOCKED))
1431 JS_UNLOCK_GC(rt);
1432 #endif
1434 if (rt->gcCallback) {
1435 if (gcflags & GC_ALREADY_LOCKED)
1436 JS_UNLOCK_GC(rt);
1437 (void) rt->gcCallback(cx, JSGC_END);
1438 if (gcflags & GC_ALREADY_LOCKED)
1439 JS_LOCK_GC(rt);
1440 }
1441 }