1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 *
3 * ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 *
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
10 *
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
15 *
16 * The Original Code is Mozilla Communicator client code, released
17 * March 31, 1998.
18 *
19 * The Initial Developer of the Original Code is
20 * Netscape Communications Corporation.
21 * Portions created by the Initial Developer are Copyright (C) 1998
22 * the Initial Developer. All Rights Reserved.
23 *
24 * Contributor(s):
25 *
26 * Alternatively, the contents of this file may be used under the terms of
27 * either of the GNU General Public License Version 2 or later (the "GPL"),
28 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
37 *
38 * ***** END LICENSE BLOCK ***** */
40 /*
41 * Lifetime-based fast allocation, inspired by much prior art, including
42 * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
43 * David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
44 */
45 #include "jsstddef.h"
46 #include <stdlib.h>
47 #include <string.h>
48 #include "jstypes.h"
49 #include "jsbit.h"
50 #include "jsarena.h" /* Added by JSIFY */
51 #include "jsutil.h" /* Added by JSIFY */
52 #include "jslock.h"
54 static JSArena *arena_freelist;
56 #ifdef JS_THREADSAFE
57 static JSLock *arena_freelist_lock;
58 #endif
60 #ifdef JS_ARENAMETER
61 static JSArenaStats *arena_stats_list;
63 #define COUNT(pool,what) (pool)->stats.what++
64 #else
65 #define COUNT(pool,what) /* nothing */
66 #endif
68 #define JS_ARENA_DEFAULT_ALIGN sizeof(double)
70 JS_PUBLIC_API(void)
71 JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size, size_t align)
72 {
73 #ifdef JS_THREADSAFE
74 /* Must come through here once in primordial thread to init safely! */
75 if (!arena_freelist_lock) {
76 arena_freelist_lock = JS_NEW_LOCK();
77 JS_ASSERT(arena_freelist_lock);
78 }
79 #endif
80 if (align == 0)
81 align = JS_ARENA_DEFAULT_ALIGN;
82 pool->mask = JS_BITMASK(JS_CeilingLog2(align));
83 pool->first.next = NULL;
84 pool->first.base = pool->first.avail = pool->first.limit =
85 JS_ARENA_ALIGN(pool, &pool->first + 1);
86 pool->current = &pool->first;
87 pool->arenasize = size;
88 #ifdef JS_ARENAMETER
89 memset(&pool->stats, 0, sizeof pool->stats);
90 pool->stats.name = strdup(name);
91 pool->stats.next = arena_stats_list;
92 arena_stats_list = &pool->stats;
93 #endif
94 }
96 /*
97 * An allocation that consumes more than pool->arenasize also has a header
98 * pointing back to its previous arena's next member. This header is not
99 * included in [a->base, a->limit), so its space can't be wrongly claimed.
100 *
101 * As the header is a pointer, it must be well-aligned. If pool->mask is
102 * greater than or equal to POINTER_MASK, the header just preceding a->base
103 * for an oversized arena a is well-aligned, because a->base is well-aligned.
104 * However, we may need to add more space to pad the JSArena ** back-pointer
105 * so that it lies just behind a->base, because a might not be aligned such
106 * that (jsuword)(a + 1) is on a pointer boundary.
107 *
108 * By how much must we pad? Let M be the alignment modulus for pool and P
109 * the modulus for a pointer. Given M >= P, the greatest distance between a
110 * pointer aligned on an M boundary and one aligned on a P boundary is M-P.
111 * If M and P are powers of two, then M-P = (pool->mask - POINTER_MASK).
112 *
113 * How much extra padding might spill over unused into the remainder of the
114 * allocation, in the worst case (where M > P)?
115 *
116 * If we add M-P to the nominal back-pointer address and then round down to
117 * align on a P boundary, we will use at most M-P bytes of padding, and at
118 * least P (M > P => M >= 2P; M == 2P gives the least padding, P). So if we
119 * use P bytes of padding, then we will overallocate a by P+M-1 bytes, as we
120 * also add M-1 to the estimated size in case malloc returns an odd pointer.
121 * a->limit must include this overestimation to satisfy a->avail in [a->base,
122 * a->limit].
123 *
124 * Similarly, if pool->mask is less than POINTER_MASK, we must include enough
125 * space in the header size to align the back-pointer on a P boundary so that
126 * it can be found by subtracting P from a->base. This means a->base must be
127 * on a P boundary, even though subsequent allocations from a may be aligned
128 * on a lesser (M) boundary. Given powers of two M and P as above, the extra
129 * space needed when P > M is P-M or POINTER_MASK - pool->mask.
130 *
131 * The size of a header including padding is given by the HEADER_SIZE macro,
132 * below, for any pool (for any value of M).
133 *
134 * The mask to align a->base for any pool is (pool->mask | POINTER_MASK), or
135 * HEADER_BASE_MASK(pool).
136 *
137 * PTR_TO_HEADER computes the address of the back-pointer, given an oversized
138 * allocation at p. By definition, p must be a->base for the arena a that
139 * contains p. GET_HEADER and SET_HEADER operate on an oversized arena a, in
140 * the case of SET_HEADER with back-pointer ap.
141 */
142 #define POINTER_MASK ((jsuword)(JS_ALIGN_OF_POINTER - 1))
143 #define HEADER_SIZE(pool) (sizeof(JSArena **) \
144 + (((pool)->mask < POINTER_MASK) \
145 ? POINTER_MASK - (pool)->mask \
146 : (pool)->mask - POINTER_MASK))
147 #define HEADER_BASE_MASK(pool) ((pool)->mask | POINTER_MASK)
148 #define PTR_TO_HEADER(pool,p) (JS_ASSERT(((jsuword)(p) \
149 & HEADER_BASE_MASK(pool)) \
150 == 0), \
151 (JSArena ***)(p) - 1)
152 #define GET_HEADER(pool,a) (*PTR_TO_HEADER(pool, (a)->base))
153 #define SET_HEADER(pool,a,ap) (*PTR_TO_HEADER(pool, (a)->base) = (ap))
155 JS_PUBLIC_API(void *)
156 JS_ArenaAllocate(JSArenaPool *pool, size_t nb)
157 {
158 JSArena **ap, **bp, *a, *b;
159 jsuword extra, hdrsz, gross, sz;
160 void *p;
162 /* Search pool from current forward till we find or make enough space. */
163 JS_ASSERT((nb & pool->mask) == 0);
164 for (a = pool->current; a->avail + nb > a->limit; pool->current = a) {
165 ap = &a->next;
166 if (!*ap) {
167 /* Not enough space in pool -- try to reclaim a free arena. */
168 extra = (nb > pool->arenasize) ? HEADER_SIZE(pool) : 0;
169 hdrsz = sizeof *a + extra + pool->mask;
170 gross = hdrsz + JS_MAX(nb, pool->arenasize);
171 bp = &arena_freelist;
172 JS_ACQUIRE_LOCK(arena_freelist_lock);
173 while ((b = *bp) != NULL) {
174 /*
175 * Insist on exact arenasize match if nb is not greater than
176 * arenasize. Otherwise take any arena big enough, but not by
177 * more than gross + arenasize.
178 */
179 sz = JS_UPTRDIFF(b->limit, b);
180 if (extra
181 ? sz >= gross && sz <= gross + pool->arenasize
182 : sz == gross) {
183 *bp = b->next;
184 JS_RELEASE_LOCK(arena_freelist_lock);
185 b->next = NULL;
186 COUNT(pool, nreclaims);
187 goto claim;
188 }
189 bp = &b->next;
190 }
192 /* Nothing big enough on the freelist, so we must malloc. */
193 JS_RELEASE_LOCK(arena_freelist_lock);
194 b = (JSArena *) malloc(gross);
195 if (!b)
196 return 0;
197 b->next = NULL;
198 b->limit = (jsuword)b + gross;
199 JS_COUNT_ARENA(pool,++);
200 COUNT(pool, nmallocs);
202 claim:
203 /* If oversized, store ap in the header, just before a->base. */
204 *ap = a = b;
205 JS_ASSERT(gross <= JS_UPTRDIFF(a->limit, a));
206 if (extra) {
207 a->base = a->avail =
208 ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
209 SET_HEADER(pool, a, ap);
210 } else {
211 a->base = a->avail = JS_ARENA_ALIGN(pool, a + 1);
212 }
213 continue;
214 }
215 a = *ap; /* move to next arena */
216 }
218 p = (void *)a->avail;
219 a->avail += nb;
220 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
221 return p;
222 }
224 JS_PUBLIC_API(void *)
225 JS_ArenaRealloc(JSArenaPool *pool, void *p, size_t size, size_t incr)
226 {
227 JSArena **ap, *a, *b;
228 jsuword boff, aoff, extra, hdrsz, gross;
230 /*
231 * Use the oversized-single-allocation header to avoid searching for ap.
232 * See JS_ArenaAllocate, the SET_HEADER call.
233 */
234 if (size > pool->arenasize) {
235 ap = *PTR_TO_HEADER(pool, p);
236 a = *ap;
237 } else {
238 ap = &pool->first.next;
239 while ((a = *ap) != pool->current)
240 ap = &a->next;
241 }
243 JS_ASSERT(a->base == (jsuword)p);
244 boff = JS_UPTRDIFF(a->base, a);
245 aoff = size + incr;
246 JS_ASSERT(aoff > pool->arenasize);
247 extra = HEADER_SIZE(pool); /* oversized header holds ap */
248 hdrsz = sizeof *a + extra + pool->mask; /* header and alignment slop */
249 gross = hdrsz + aoff;
250 a = (JSArena *) realloc(a, gross);
251 if (!a)
252 return NULL;
253 #ifdef JS_ARENAMETER
254 pool->stats.nreallocs++;
255 #endif
257 if (a != *ap) {
258 /* Oops, realloc moved the allocation: update other pointers to a. */
259 if (pool->current == *ap)
260 pool->current = a;
261 b = a->next;
262 if (b && b->avail - b->base > pool->arenasize) {
263 JS_ASSERT(GET_HEADER(pool, b) == &(*ap)->next);
264 SET_HEADER(pool, b, &a->next);
265 }
267 /* Now update *ap, the next link of the arena before a. */
268 *ap = a;
269 }
271 a->base = ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
272 a->limit = (jsuword)a + gross;
273 a->avail = JS_ARENA_ALIGN(pool, a->base + aoff);
274 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
276 /* Check whether realloc aligned differently, and copy if necessary. */
277 if (boff != JS_UPTRDIFF(a->base, a))
278 memmove((void *)a->base, (char *)a + boff, size);
280 /* Store ap in the oversized-load arena header. */
281 SET_HEADER(pool, a, ap);
282 return (void *)a->base;
283 }
285 JS_PUBLIC_API(void *)
286 JS_ArenaGrow(JSArenaPool *pool, void *p, size_t size, size_t incr)
287 {
288 void *newp;
290 /*
291 * If p points to an oversized allocation, it owns an entire arena, so we
292 * can simply realloc the arena.
293 */
294 if (size > pool->arenasize)
295 return JS_ArenaRealloc(pool, p, size, incr);
297 JS_ARENA_ALLOCATE(newp, pool, size + incr);
298 if (newp)
299 memcpy(newp, p, size);
300 return newp;
301 }
303 /*
304 * Free tail arenas linked after head, which may not be the true list head.
305 * Reset pool->current to point to head in case it pointed at a tail arena.
306 */
307 static void
308 FreeArenaList(JSArenaPool *pool, JSArena *head, JSBool reallyFree)
309 {
310 JSArena **ap, *a;
312 ap = &head->next;
313 a = *ap;
314 if (!a)
315 return;
317 #ifdef DEBUG
318 do {
319 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
320 a->avail = a->base;
321 JS_CLEAR_UNUSED(a);
322 } while ((a = a->next) != NULL);
323 a = *ap;
324 #endif
326 if (reallyFree) {
327 do {
328 *ap = a->next;
329 JS_CLEAR_ARENA(a);
330 JS_COUNT_ARENA(pool,--);
331 free(a);
332 } while ((a = *ap) != NULL);
333 } else {
334 /* Insert the whole arena chain at the front of the freelist. */
335 do {
336 ap = &(*ap)->next;
337 } while (*ap);
338 JS_ACQUIRE_LOCK(arena_freelist_lock);
339 *ap = arena_freelist;
340 arena_freelist = a;
341 JS_RELEASE_LOCK(arena_freelist_lock);
342 head->next = NULL;
343 }
345 pool->current = head;
346 }
348 JS_PUBLIC_API(void)
349 JS_ArenaRelease(JSArenaPool *pool, char *mark)
350 {
351 JSArena *a;
353 for (a = &pool->first; a; a = a->next) {
354 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
356 if (JS_UPTRDIFF(mark, a->base) <= JS_UPTRDIFF(a->avail, a->base)) {
357 a->avail = JS_ARENA_ALIGN(pool, mark);
358 JS_ASSERT(a->avail <= a->limit);
359 FreeArenaList(pool, a, JS_TRUE);
360 return;
361 }
362 }
363 }
365 JS_PUBLIC_API(void)
366 JS_ArenaFreeAllocation(JSArenaPool *pool, void *p, size_t size)
367 {
368 JSArena **ap, *a, *b;
369 jsuword q;
371 /*
372 * If the allocation is oversized, it consumes an entire arena, and it has
373 * a header just before the allocation pointing back to its predecessor's
374 * next member. Otherwise, we have to search pool for a.
375 */
376 if (size > pool->arenasize) {
377 ap = *PTR_TO_HEADER(pool, p);
378 a = *ap;
379 } else {
380 q = (jsuword)p + size;
381 q = JS_ARENA_ALIGN(pool, q);
382 ap = &pool->first.next;
383 while ((a = *ap) != NULL) {
384 JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
386 if (a->avail == q) {
387 /*
388 * If a is consumed by the allocation at p, we can free it to
389 * the malloc heap.
390 */
391 if (a->base == (jsuword)p)
392 break;
394 /*
395 * We can't free a, but we can "retract" its avail cursor --
396 * whether there are others after it in pool.
397 */
398 a->avail = (jsuword)p;
399 return;
400 }
401 ap = &a->next;
402 }
403 }
405 /*
406 * At this point, a is doomed, so ensure that pool->current doesn't point
407 * at it. We must preserve LIFO order of mark/release cursors, so we use
408 * the oversized-allocation arena's back pointer (or if not oversized, we
409 * use the result of searching the entire pool) to compute the address of
410 * the arena that precedes a.
411 */
412 if (pool->current == a)
413 pool->current = (JSArena *) ((char *)ap - offsetof(JSArena, next));
415 /*
416 * This is a non-LIFO deallocation, so take care to fix up a->next's back
417 * pointer in its header, if a->next is oversized.
418 */
419 *ap = b = a->next;
420 if (b && b->avail - b->base > pool->arenasize) {
421 JS_ASSERT(GET_HEADER(pool, b) == &a->next);
422 SET_HEADER(pool, b, ap);
423 }
424 JS_CLEAR_ARENA(a);
425 JS_COUNT_ARENA(pool,--);
426 free(a);
427 }
429 JS_PUBLIC_API(void)
430 JS_FreeArenaPool(JSArenaPool *pool)
431 {
432 FreeArenaList(pool, &pool->first, JS_FALSE);
433 COUNT(pool, ndeallocs);
434 }
436 JS_PUBLIC_API(void)
437 JS_FinishArenaPool(JSArenaPool *pool)
438 {
439 FreeArenaList(pool, &pool->first, JS_TRUE);
440 #ifdef JS_ARENAMETER
441 {
442 JSArenaStats *stats, **statsp;
444 if (pool->stats.name)
445 free(pool->stats.name);
446 for (statsp = &arena_stats_list; (stats = *statsp) != 0;
447 statsp = &stats->next) {
448 if (stats == &pool->stats) {
449 *statsp = stats->next;
450 return;
451 }
452 }
453 }
454 #endif
455 }
457 JS_PUBLIC_API(void)
458 JS_ArenaFinish()
459 {
460 JSArena *a, *next;
462 JS_ACQUIRE_LOCK(arena_freelist_lock);
463 a = arena_freelist;
464 arena_freelist = NULL;
465 JS_RELEASE_LOCK(arena_freelist_lock);
466 for (; a; a = next) {
467 next = a->next;
468 free(a);
469 }
470 }
472 JS_PUBLIC_API(void)
473 JS_ArenaShutDown(void)
474 {
475 #ifdef JS_THREADSAFE
476 /* Must come through here once in the process's last thread! */
477 if (arena_freelist_lock) {
478 JS_DESTROY_LOCK(arena_freelist_lock);
479 arena_freelist_lock = NULL;
480 }
481 #endif
482 }
484 #ifdef JS_ARENAMETER
485 JS_PUBLIC_API(void)
486 JS_ArenaCountAllocation(JSArenaPool *pool, size_t nb)
487 {
488 pool->stats.nallocs++;
489 pool->stats.nbytes += nb;
490 if (nb > pool->stats.maxalloc)
491 pool->stats.maxalloc = nb;
492 pool->stats.variance += nb * nb;
493 }
495 JS_PUBLIC_API(void)
496 JS_ArenaCountInplaceGrowth(JSArenaPool *pool, size_t size, size_t incr)
497 {
498 pool->stats.ninplace++;
499 }
501 JS_PUBLIC_API(void)
502 JS_ArenaCountGrowth(JSArenaPool *pool, size_t size, size_t incr)
503 {
504 pool->stats.ngrows++;
505 pool->stats.nbytes += incr;
506 pool->stats.variance -= size * size;
507 size += incr;
508 if (size > pool->stats.maxalloc)
509 pool->stats.maxalloc = size;
510 pool->stats.variance += size * size;
511 }
513 JS_PUBLIC_API(void)
514 JS_ArenaCountRelease(JSArenaPool *pool, char *mark)
515 {
516 pool->stats.nreleases++;
517 }
519 JS_PUBLIC_API(void)
520 JS_ArenaCountRetract(JSArenaPool *pool, char *mark)
521 {
522 pool->stats.nfastrels++;
523 }
525 #include <math.h>
526 #include <stdio.h>
528 JS_PUBLIC_API(void)
529 JS_DumpArenaStats(FILE *fp)
530 {
531 JSArenaStats *stats;
532 uint32 nallocs, nbytes;
533 double mean, variance, sigma;
535 for (stats = arena_stats_list; stats; stats = stats->next) {
536 nallocs = stats->nallocs;
537 if (nallocs != 0) {
538 nbytes = stats->nbytes;
539 mean = (double)nbytes / nallocs;
540 variance = stats->variance * nallocs - nbytes * nbytes;
541 if (variance < 0 || nallocs == 1)
542 variance = 0;
543 else
544 variance /= nallocs * (nallocs - 1);
545 sigma = sqrt(variance);
546 } else {
547 mean = variance = sigma = 0;
548 }
550 fprintf(fp, "\n%s allocation statistics:\n", stats->name);
551 fprintf(fp, " number of arenas: %u\n", stats->narenas);
552 fprintf(fp, " number of allocations: %u\n", stats->nallocs);
553 fprintf(fp, " number of free arena reclaims: %u\n", stats->nreclaims);
554 fprintf(fp, " number of malloc calls: %u\n", stats->nmallocs);
555 fprintf(fp, " number of deallocations: %u\n", stats->ndeallocs);
556 fprintf(fp, " number of allocation growths: %u\n", stats->ngrows);
557 fprintf(fp, " number of in-place growths: %u\n", stats->ninplace);
558 fprintf(fp, " number of realloc'ing growths: %u\n", stats->nreallocs);
559 fprintf(fp, "number of released allocations: %u\n", stats->nreleases);
560 fprintf(fp, " number of fast releases: %u\n", stats->nfastrels);
561 fprintf(fp, " total bytes allocated: %u\n", stats->nbytes);
562 fprintf(fp, " mean allocation size: %g\n", mean);
563 fprintf(fp, " standard deviation: %g\n", sigma);
564 fprintf(fp, " maximum allocation size: %u\n", stats->maxalloc);
565 }
566 }
567 #endif /* JS_ARENAMETER */