Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
gc_priv.h
00001 /* 00002 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers 00003 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. 00004 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. 00005 * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved. 00006 * 00007 * 00008 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED 00009 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. 00010 * 00011 * Permission is hereby granted to use or copy this program 00012 * for any purpose, provided the above notices are retained on all copies. 00013 * Permission to modify the code and to distribute modified code is granted, 00014 * provided the above notices are retained, and a notice that the code was 00015 * modified is included with the above copyright notice. 00016 */ 00017 00018 00019 # ifndef GC_PRIVATE_H 00020 # define GC_PRIVATE_H 00021 00022 #if defined(mips) && defined(SYSTYPE_BSD) && defined(sony_news) 00023 /* sony RISC NEWS, NEWSOS 4 */ 00024 # define BSD_TIME 00025 /* typedef long ptrdiff_t; -- necessary on some really old systems */ 00026 #endif 00027 00028 #if defined(mips) && defined(SYSTYPE_BSD43) 00029 /* MIPS RISCOS 4 */ 00030 # define BSD_TIME 00031 #endif 00032 00033 #ifdef DGUX 00034 # include <sys/types.h> 00035 # include <sys/time.h> 00036 # include <sys/resource.h> 00037 #endif /* DGUX */ 00038 00039 #ifdef BSD_TIME 00040 # include <sys/types.h> 00041 # include <sys/time.h> 00042 # include <sys/resource.h> 00043 #endif /* BSD_TIME */ 00044 00045 # ifndef _GC_H 00046 # include "../gc.h" 00047 # endif 00048 00049 # ifndef GC_MARK_H 00050 # include "../gc_mark.h" 00051 # endif 00052 00053 typedef GC_word word; 00054 typedef GC_signed_word signed_word; 00055 00056 typedef int GC_bool; 00057 # define TRUE 1 00058 # define FALSE 0 00059 00060 typedef char * ptr_t; /* A generic pointer to which we can add */ 00061 /* byte displacements. */ 00062 /* Preferably identical to caddr_t, if it */ 00063 /* exists. */ 00064 00065 # ifndef GCCONFIG_H 00066 # include "gcconfig.h" 00067 # endif 00068 00069 # ifndef HEADERS_H 00070 # include "gc_hdrs.h" 00071 # endif 00072 00073 #if defined(__STDC__) 00074 # include <stdlib.h> 00075 # if !(defined( sony_news ) ) 00076 # include <stddef.h> 00077 # endif 00078 # define VOLATILE volatile 00079 #else 00080 # ifdef MSWIN32 00081 # include <stdlib.h> 00082 # endif 00083 # define VOLATILE 00084 #endif 00085 00086 #if 0 /* defined(__GNUC__) doesn't work yet */ 00087 # define EXPECT(expr, outcome) __builtin_expect(expr,outcome) 00088 /* Equivalent to (expr), but predict that usually (expr)==outcome. */ 00089 #else 00090 # define EXPECT(expr, outcome) (expr) 00091 #endif /* __GNUC__ */ 00092 00093 # ifndef GC_LOCKS_H 00094 # include "gc_locks.h" 00095 # endif 00096 00097 # ifdef STACK_GROWS_DOWN 00098 # define COOLER_THAN > 00099 # define HOTTER_THAN < 00100 # define MAKE_COOLER(x,y) if ((word)(x)+(y) > (word)(x)) {(x) += (y);} \ 00101 else {(x) = (word)ONES;} 00102 # define MAKE_HOTTER(x,y) (x) -= (y) 00103 # else 00104 # define COOLER_THAN < 00105 # define HOTTER_THAN > 00106 # define MAKE_COOLER(x,y) if ((word)(x)-(y) < (word)(x)) {(x) -= (y);} else {(x) = 0;} 00107 # define MAKE_HOTTER(x,y) (x) += (y) 00108 # endif 00109 00110 #if defined(AMIGA) && defined(__SASC) 00111 # define GC_FAR __far 00112 #else 00113 # define GC_FAR 00114 #endif 00115 00116 00117 /*********************************/ 00118 /* */ 00119 /* Definitions for conservative */ 00120 /* collector */ 00121 /* */ 00122 /*********************************/ 00123 00124 /*********************************/ 00125 /* */ 00126 /* Easily changeable parameters */ 00127 /* */ 00128 /*********************************/ 00129 00130 /* #define STUBBORN_ALLOC */ 00131 /* Enable stubborm allocation, and thus a limited */ 00132 /* form of incremental collection w/o dirty bits. */ 00133 00134 /* #define ALL_INTERIOR_POINTERS */ 00135 /* Forces all pointers into the interior of an */ 00136 /* object to be considered valid. Also causes the */ 00137 /* sizes of all objects to be inflated by at least */ 00138 /* one byte. This should suffice to guarantee */ 00139 /* that in the presence of a compiler that does */ 00140 /* not perform garbage-collector-unsafe */ 00141 /* optimizations, all portable, strictly ANSI */ 00142 /* conforming C programs should be safely usable */ 00143 /* with malloc replaced by GC_malloc and free */ 00144 /* calls removed. There are several disadvantages: */ 00145 /* 1. There are probably no interesting, portable, */ 00146 /* strictly ANSI conforming C programs. */ 00147 /* 2. This option makes it hard for the collector */ 00148 /* to allocate space that is not ``pointed to'' */ 00149 /* by integers, etc. Under SunOS 4.X with a */ 00150 /* statically linked libc, we empiricaly */ 00151 /* observed that it would be difficult to */ 00152 /* allocate individual objects larger than 100K. */ 00153 /* Even if only smaller objects are allocated, */ 00154 /* more swap space is likely to be needed. */ 00155 /* Fortunately, much of this will never be */ 00156 /* touched. */ 00157 /* If you can easily avoid using this option, do. */ 00158 /* If not, try to keep individual objects small. */ 00159 /* This is now really controlled at startup, */ 00160 /* through GC_all_interior_pointers. */ 00161 00162 #define PRINTSTATS /* Print garbage collection statistics */ 00163 /* For less verbose output, undefine in reclaim.c */ 00164 00165 #define PRINTTIMES /* Print the amount of time consumed by each garbage */ 00166 /* collection. */ 00167 00168 #define PRINTBLOCKS /* Print object sizes associated with heap blocks, */ 00169 /* whether the objects are atomic or composite, and */ 00170 /* whether or not the block was found to be empty */ 00171 /* during the reclaim phase. Typically generates */ 00172 /* about one screenful per garbage collection. */ 00173 #undef PRINTBLOCKS 00174 00175 #ifdef SILENT 00176 # ifdef PRINTSTATS 00177 # undef PRINTSTATS 00178 # endif 00179 # ifdef PRINTTIMES 00180 # undef PRINTTIMES 00181 # endif 00182 # ifdef PRINTNBLOCKS 00183 # undef PRINTNBLOCKS 00184 # endif 00185 #endif 00186 00187 #if defined(PRINTSTATS) && !defined(GATHERSTATS) 00188 # define GATHERSTATS 00189 #endif 00190 00191 #if defined(PRINTSTATS) || !defined(SMALL_CONFIG) 00192 # define CONDPRINT /* Print some things if GC_print_stats is set */ 00193 #endif 00194 00195 #define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers() 00196 00197 #define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */ 00198 /* free lists are actually maintained. This applies */ 00199 /* only to the top level routines in misc.c, not to */ 00200 /* user generated code that calls GC_allocobj and */ 00201 /* GC_allocaobj directly. */ 00202 /* Slows down average programs slightly. May however */ 00203 /* substantially reduce fragmentation if allocation */ 00204 /* request sizes are widely scattered. */ 00205 /* May save significant amounts of space for obj_map */ 00206 /* entries. */ 00207 00208 #if defined(USE_MARK_BYTES) && !defined(ALIGN_DOUBLE) 00209 # define ALIGN_DOUBLE 00210 /* We use one byte for every 2 words, which doesn't allow for */ 00211 /* odd numbered words to have mark bits. */ 00212 #endif 00213 00214 #if defined(GC_GCJ_SUPPORT) && ALIGNMENT < 8 && !defined(ALIGN_DOUBLE) 00215 /* GCJ's Hashtable synchronization code requires 64-bit alignment. */ 00216 # define ALIGN_DOUBLE 00217 #endif 00218 00219 /* ALIGN_DOUBLE requires MERGE_SIZES at present. */ 00220 # if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES) 00221 # define MERGE_SIZES 00222 # endif 00223 00224 #if !defined(DONT_ADD_BYTE_AT_END) 00225 # define EXTRA_BYTES GC_all_interior_pointers 00226 #else 00227 # define EXTRA_BYTES 0 00228 #endif 00229 00230 00231 # ifndef LARGE_CONFIG 00232 # define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */ 00233 /* Must be multiple of largest page size. */ 00234 # define MAXHINCR 2048 /* Maximum heap increment, in blocks */ 00235 # else 00236 # define MINHINCR 64 00237 # define MAXHINCR 4096 00238 # endif 00239 00240 # define TIME_LIMIT 50 /* We try to keep pause times from exceeding */ 00241 /* this by much. In milliseconds. */ 00242 00243 # define BL_LIMIT GC_black_list_spacing 00244 /* If we need a block of N bytes, and we have */ 00245 /* a block of N + BL_LIMIT bytes available, */ 00246 /* and N > BL_LIMIT, */ 00247 /* but all possible positions in it are */ 00248 /* blacklisted, we just use it anyway (and */ 00249 /* print a warning, if warnings are enabled). */ 00250 /* This risks subsequently leaking the block */ 00251 /* due to a false reference. But not using */ 00252 /* the block risks unreasonable immediate */ 00253 /* heap growth. */ 00254 00255 /*********************************/ 00256 /* */ 00257 /* Stack saving for debugging */ 00258 /* */ 00259 /*********************************/ 00260 00261 #ifdef SAVE_CALL_CHAIN 00262 00263 /* Fill in the pc and argument information for up to NFRAMES of my */ 00264 /* callers. Ignore my frame and my callers frame. */ 00265 struct callinfo; 00266 void GC_save_callers GC_PROTO((struct callinfo info[NFRAMES])); 00267 00268 void GC_print_callers GC_PROTO((struct callinfo info[NFRAMES])); 00269 00270 #endif 00271 00272 #ifdef NEED_CALLINFO 00273 struct callinfo { 00274 word ci_pc; /* Caller, not callee, pc */ 00275 # if NARGS > 0 00276 word ci_arg[NARGS]; /* bit-wise complement to avoid retention */ 00277 # endif 00278 # if defined(ALIGN_DOUBLE) && (NFRAMES * (NARGS + 1)) % 2 == 1 00279 /* Likely alignment problem. */ 00280 word ci_dummy; 00281 # endif 00282 }; 00283 #endif 00284 00285 00286 /*********************************/ 00287 /* */ 00288 /* OS interface routines */ 00289 /* */ 00290 /*********************************/ 00291 00292 #ifdef BSD_TIME 00293 # undef CLOCK_TYPE 00294 # undef GET_TIME 00295 # undef MS_TIME_DIFF 00296 # define CLOCK_TYPE struct timeval 00297 # define GET_TIME(x) { struct rusage rusage; \ 00298 getrusage (RUSAGE_SELF, &rusage); \ 00299 x = rusage.ru_utime; } 00300 # define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \ 00301 + (double) (a.tv_usec - b.tv_usec) / 1000.0) 00302 #else /* !BSD_TIME */ 00303 # if defined(MSWIN32) || defined(MSWINCE) 00304 # include <windows.h> 00305 # include <winbase.h> 00306 # define CLOCK_TYPE DWORD 00307 # define GET_TIME(x) x = GetTickCount() 00308 # define MS_TIME_DIFF(a,b) ((long)((a)-(b))) 00309 # else /* !MSWIN32, !MSWINCE, !BSD_TIME */ 00310 # include <time.h> 00311 # if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4) 00312 clock_t clock(); /* Not in time.h, where it belongs */ 00313 # endif 00314 # if defined(FREEBSD) && !defined(CLOCKS_PER_SEC) 00315 # include <machine/limits.h> 00316 # define CLOCKS_PER_SEC CLK_TCK 00317 # endif 00318 # if !defined(CLOCKS_PER_SEC) 00319 # define CLOCKS_PER_SEC 1000000 00320 /* 00321 * This is technically a bug in the implementation. ANSI requires that 00322 * CLOCKS_PER_SEC be defined. But at least under SunOS4.1.1, it isn't. 00323 * Also note that the combination of ANSI C and POSIX is incredibly gross 00324 * here. The type clock_t is used by both clock() and times(). But on 00325 * some machines these use different notions of a clock tick, CLOCKS_PER_SEC 00326 * seems to apply only to clock. Hence we use it here. On many machines, 00327 * including SunOS, clock actually uses units of microseconds (which are 00328 * not really clock ticks). 00329 */ 00330 # endif 00331 # define CLOCK_TYPE clock_t 00332 # define GET_TIME(x) x = clock() 00333 # define MS_TIME_DIFF(a,b) ((unsigned long) \ 00334 (1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC)) 00335 # endif /* !MSWIN32 */ 00336 #endif /* !BSD_TIME */ 00337 00338 /* We use bzero and bcopy internally. They may not be available. */ 00339 # if defined(SPARC) && defined(SUNOS4) 00340 # define BCOPY_EXISTS 00341 # endif 00342 # if defined(M68K) && defined(AMIGA) 00343 # define BCOPY_EXISTS 00344 # endif 00345 # if defined(M68K) && defined(NEXT) 00346 # define BCOPY_EXISTS 00347 # endif 00348 # if defined(VAX) 00349 # define BCOPY_EXISTS 00350 # endif 00351 # if defined(AMIGA) 00352 # include <string.h> 00353 # define BCOPY_EXISTS 00354 # endif 00355 # if defined(DARWIN) 00356 # include <string.h> 00357 # define BCOPY_EXISTS 00358 # endif 00359 00360 # ifndef BCOPY_EXISTS 00361 # include <string.h> 00362 # define BCOPY(x,y,n) memcpy(y, x, (size_t)(n)) 00363 # define BZERO(x,n) memset(x, 0, (size_t)(n)) 00364 # else 00365 # define BCOPY(x,y,n) bcopy((char *)(x),(char *)(y),(int)(n)) 00366 # define BZERO(x,n) bzero((char *)(x),(int)(n)) 00367 # endif 00368 00369 /* Delay any interrupts or signals that may abort this thread. Data */ 00370 /* structures are in a consistent state outside this pair of calls. */ 00371 /* ANSI C allows both to be empty (though the standard isn't very */ 00372 /* clear on that point). Standard malloc implementations are usually */ 00373 /* neither interruptable nor thread-safe, and thus correspond to */ 00374 /* empty definitions. */ 00375 /* It probably doesn't make any sense to declare these to be nonempty */ 00376 /* if the code is being optimized, since signal safety relies on some */ 00377 /* ordering constraints that are typically not obeyed by optimizing */ 00378 /* compilers. */ 00379 # ifdef PCR 00380 # define DISABLE_SIGNALS() \ 00381 PCR_Th_SetSigMask(PCR_allSigsBlocked,&GC_old_sig_mask) 00382 # define ENABLE_SIGNALS() \ 00383 PCR_Th_SetSigMask(&GC_old_sig_mask, NIL) 00384 # else 00385 # if defined(THREADS) || defined(AMIGA) \ 00386 || defined(MSWIN32) || defined(MSWINCE) || defined(MACOS) \ 00387 || defined(DJGPP) || defined(NO_SIGNALS) 00388 /* Also useful for debugging. */ 00389 /* Should probably use thr_sigsetmask for GC_SOLARIS_THREADS. */ 00390 # define DISABLE_SIGNALS() 00391 # define ENABLE_SIGNALS() 00392 # else 00393 # define DISABLE_SIGNALS() GC_disable_signals() 00394 void GC_disable_signals(); 00395 # define ENABLE_SIGNALS() GC_enable_signals() 00396 void GC_enable_signals(); 00397 # endif 00398 # endif 00399 00400 /* 00401 * Stop and restart mutator threads. 00402 */ 00403 # ifdef PCR 00404 # include "th/PCR_ThCtl.h" 00405 # define STOP_WORLD() \ 00406 PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \ 00407 PCR_allSigsBlocked, \ 00408 PCR_waitForever) 00409 # define START_WORLD() \ 00410 PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \ 00411 PCR_allSigsBlocked, \ 00412 PCR_waitForever); 00413 # else 00414 # if defined(GC_SOLARIS_THREADS) || defined(GC_WIN32_THREADS) \ 00415 || defined(GC_PTHREADS) 00416 void GC_stop_world(); 00417 void GC_start_world(); 00418 # define STOP_WORLD() GC_stop_world() 00419 # define START_WORLD() GC_start_world() 00420 # else 00421 # define STOP_WORLD() 00422 # define START_WORLD() 00423 # endif 00424 # endif 00425 00426 /* Abandon ship */ 00427 # ifdef PCR 00428 # define ABORT(s) PCR_Base_Panic(s) 00429 # else 00430 # ifdef SMALL_CONFIG 00431 # define ABORT(msg) abort(); 00432 # else 00433 GC_API void GC_abort GC_PROTO((GC_CONST char * msg)); 00434 # define ABORT(msg) GC_abort(msg); 00435 # endif 00436 # endif 00437 00438 /* Exit abnormally, but without making a mess (e.g. out of memory) */ 00439 # ifdef PCR 00440 # define EXIT() PCR_Base_Exit(1,PCR_waitForever) 00441 # else 00442 # define EXIT() (void)exit(1) 00443 # endif 00444 00445 /* Print warning message, e.g. almost out of memory. */ 00446 # define WARN(msg,arg) (*GC_current_warn_proc)("GC Warning: " msg, (GC_word)(arg)) 00447 extern GC_warn_proc GC_current_warn_proc; 00448 00449 /* Get environment entry */ 00450 #if !defined(NO_GETENV) 00451 # if defined(EMPTY_GETENV_RESULTS) 00452 /* Workaround for a reputed Wine bug. */ 00453 static inline char * fixed_getenv(const char *name) 00454 { 00455 char * tmp = getenv(name); 00456 if (tmp == 0 || strlen(tmp) == 0) 00457 return 0; 00458 return tmp; 00459 } 00460 # define GETENV(name) fixed_getenv(name) 00461 # else 00462 # define GETENV(name) getenv(name) 00463 # endif 00464 #else 00465 # define GETENV(name) 0 00466 #endif 00467 00468 /*********************************/ 00469 /* */ 00470 /* Word-size-dependent defines */ 00471 /* */ 00472 /*********************************/ 00473 00474 #if CPP_WORDSZ == 32 00475 # define WORDS_TO_BYTES(x) ((x)<<2) 00476 # define BYTES_TO_WORDS(x) ((x)>>2) 00477 # define LOGWL ((word)5) /* log[2] of CPP_WORDSZ */ 00478 # define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */ 00479 # if ALIGNMENT != 4 00480 # define UNALIGNED 00481 # endif 00482 #endif 00483 00484 #if CPP_WORDSZ == 64 00485 # define WORDS_TO_BYTES(x) ((x)<<3) 00486 # define BYTES_TO_WORDS(x) ((x)>>3) 00487 # define LOGWL ((word)6) /* log[2] of CPP_WORDSZ */ 00488 # define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */ 00489 # if ALIGNMENT != 8 00490 # define UNALIGNED 00491 # endif 00492 #endif 00493 00494 #define WORDSZ ((word)CPP_WORDSZ) 00495 #define SIGNB ((word)1 << (WORDSZ-1)) 00496 #define BYTES_PER_WORD ((word)(sizeof (word))) 00497 #define ONES ((word)(signed_word)(-1)) 00498 #define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */ 00499 00500 /*********************/ 00501 /* */ 00502 /* Size Parameters */ 00503 /* */ 00504 /*********************/ 00505 00506 /* heap block size, bytes. Should be power of 2 */ 00507 00508 #ifndef HBLKSIZE 00509 # ifdef SMALL_CONFIG 00510 # define CPP_LOG_HBLKSIZE 10 00511 # else 00512 # if (CPP_WORDSZ == 32) || (defined(HPUX) && defined(HP_PA)) 00513 /* HPUX/PA seems to use 4K pages with the 64 bit ABI */ 00514 # define CPP_LOG_HBLKSIZE 12 00515 # else 00516 # define CPP_LOG_HBLKSIZE 13 00517 # endif 00518 # endif 00519 #else 00520 # if HBLKSIZE == 512 00521 # define CPP_LOG_HBLKSIZE 9 00522 # endif 00523 # if HBLKSIZE == 1024 00524 # define CPP_LOG_HBLKSIZE 10 00525 # endif 00526 # if HBLKSIZE == 2048 00527 # define CPP_LOG_HBLKSIZE 11 00528 # endif 00529 # if HBLKSIZE == 4096 00530 # define CPP_LOG_HBLKSIZE 12 00531 # endif 00532 # if HBLKSIZE == 8192 00533 # define CPP_LOG_HBLKSIZE 13 00534 # endif 00535 # if HBLKSIZE == 16384 00536 # define CPP_LOG_HBLKSIZE 14 00537 # endif 00538 # ifndef CPP_LOG_HBLKSIZE 00539 --> fix HBLKSIZE 00540 # endif 00541 # undef HBLKSIZE 00542 #endif 00543 # define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE) 00544 # define LOG_HBLKSIZE ((word)CPP_LOG_HBLKSIZE) 00545 # define HBLKSIZE ((word)CPP_HBLKSIZE) 00546 00547 00548 /* max size objects supported by freelist (larger objects may be */ 00549 /* allocated, but less efficiently) */ 00550 00551 #define CPP_MAXOBJBYTES (CPP_HBLKSIZE/2) 00552 #define MAXOBJBYTES ((word)CPP_MAXOBJBYTES) 00553 #define CPP_MAXOBJSZ BYTES_TO_WORDS(CPP_MAXOBJBYTES) 00554 #define MAXOBJSZ ((word)CPP_MAXOBJSZ) 00555 00556 # define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE) 00557 00558 # define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q) 00559 /* Equivalent to subtracting 2 hblk pointers. */ 00560 /* We do it this way because a compiler should */ 00561 /* find it hard to use an integer division */ 00562 /* instead of a shift. The bundled SunOS 4.1 */ 00563 /* o.w. sometimes pessimizes the subtraction to */ 00564 /* involve a call to .div. */ 00565 00566 # define modHBLKSZ(n) ((n) & (HBLKSIZE-1)) 00567 00568 # define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1))) 00569 00570 # define HBLKDISPL(objptr) (((word) (objptr)) & (HBLKSIZE-1)) 00571 00572 /* Round up byte allocation requests to integral number of words, etc. */ 00573 # define ROUNDED_UP_WORDS(n) \ 00574 BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1 + EXTRA_BYTES)) 00575 # ifdef ALIGN_DOUBLE 00576 # define ALIGNED_WORDS(n) \ 00577 (BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2) - 1 + EXTRA_BYTES) & ~1) 00578 # else 00579 # define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n) 00580 # endif 00581 # define SMALL_OBJ(bytes) ((bytes) <= (MAXOBJBYTES - EXTRA_BYTES)) 00582 # define ADD_SLOP(bytes) ((bytes) + EXTRA_BYTES) 00583 # ifndef MIN_WORDS 00584 /* MIN_WORDS is the size of the smallest allocated object. */ 00585 /* 1 and 2 are the only valid values. */ 00586 /* 2 must be used if: */ 00587 /* - GC_gcj_malloc can be used for objects of requested */ 00588 /* size smaller than 2 words, or */ 00589 /* - USE_MARK_BYTES is defined. */ 00590 # if defined(USE_MARK_BYTES) || defined(GC_GCJ_SUPPORT) 00591 # define MIN_WORDS 2 /* Smallest allocated object. */ 00592 # else 00593 # define MIN_WORDS 1 00594 # endif 00595 # endif 00596 00597 00598 /* 00599 * Hash table representation of sets of pages. This assumes it is 00600 * OK to add spurious entries to sets. 00601 * Used by black-listing code, and perhaps by dirty bit maintenance code. 00602 */ 00603 00604 # ifdef LARGE_CONFIG 00605 # define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */ 00606 /* which is >= 4GB. Each table takes */ 00607 /* 128KB, some of which may never be */ 00608 /* touched. */ 00609 # else 00610 # ifdef SMALL_CONFIG 00611 # define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */ 00612 /* to more than 16K hblks = 64MB. */ 00613 /* Each hash table occupies 2K bytes. */ 00614 # else /* default "medium" configuration */ 00615 # define LOG_PHT_ENTRIES 16 /* Collisions are likely if heap grows */ 00616 /* to more than 64K hblks >= 256MB. */ 00617 /* Each hash table occupies 8K bytes. */ 00618 /* Even for somewhat smaller heaps, */ 00619 /* say half that, collisions may be an */ 00620 /* issue because we blacklist */ 00621 /* addresses outside the heap. */ 00622 # endif 00623 # endif 00624 # define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES) 00625 # define PHT_SIZE (PHT_ENTRIES >> LOGWL) 00626 typedef word page_hash_table[PHT_SIZE]; 00627 00628 # define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1)) 00629 00630 # define get_pht_entry_from_index(bl, index) \ 00631 (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1) 00632 # define set_pht_entry_from_index(bl, index) \ 00633 (bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index) 00634 # define clear_pht_entry_from_index(bl, index) \ 00635 (bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index)) 00636 /* And a dumb but thread-safe version of set_pht_entry_from_index. */ 00637 /* This sets (many) extra bits. */ 00638 # define set_pht_entry_from_index_safe(bl, index) \ 00639 (bl)[divWORDSZ(index)] = ONES 00640 00641 00642 00643 /********************************************/ 00644 /* */ 00645 /* H e a p B l o c k s */ 00646 /* */ 00647 /********************************************/ 00648 00649 /* heap block header */ 00650 #define HBLKMASK (HBLKSIZE-1) 00651 00652 #define BITS_PER_HBLK (CPP_HBLKSIZE * 8) 00653 00654 #define MARK_BITS_PER_HBLK (BITS_PER_HBLK/CPP_WORDSZ) 00655 /* upper bound */ 00656 /* We allocate 1 bit/word, unless USE_MARK_BYTES */ 00657 /* is defined. Only the first word */ 00658 /* in each object is actually marked. */ 00659 00660 # ifdef USE_MARK_BYTES 00661 # define MARK_BITS_SZ (MARK_BITS_PER_HBLK/2) 00662 /* Unlike the other case, this is in units of bytes. */ 00663 /* We actually allocate only every second mark bit, since we */ 00664 /* force all objects to be doubleword aligned. */ 00665 /* However, each mark bit is allocated as a byte. */ 00666 # else 00667 # define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ) 00668 # endif 00669 00670 /* We maintain layout maps for heap blocks containing objects of a given */ 00671 /* size. Each entry in this map describes a byte offset and has the */ 00672 /* following type. */ 00673 typedef unsigned char map_entry_type; 00674 00675 struct hblkhdr { 00676 word hb_sz; /* If in use, size in words, of objects in the block. */ 00677 /* if free, the size in bytes of the whole block */ 00678 struct hblk * hb_next; /* Link field for hblk free list */ 00679 /* and for lists of chunks waiting to be */ 00680 /* reclaimed. */ 00681 struct hblk * hb_prev; /* Backwards link for free list. */ 00682 word hb_descr; /* object descriptor for marking. See */ 00683 /* mark.h. */ 00684 map_entry_type * hb_map; 00685 /* A pointer to a pointer validity map of the block. */ 00686 /* See GC_obj_map. */ 00687 /* Valid for all blocks with headers. */ 00688 /* Free blocks point to GC_invalid_map. */ 00689 unsigned char hb_obj_kind; 00690 /* Kind of objects in the block. Each kind */ 00691 /* identifies a mark procedure and a set of */ 00692 /* list headers. Sometimes called regions. */ 00693 unsigned char hb_flags; 00694 # define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */ 00695 /* point to the first page of */ 00696 /* this object. */ 00697 # define WAS_UNMAPPED 2 /* This is a free block, which has */ 00698 /* been unmapped from the address */ 00699 /* space. */ 00700 /* GC_remap must be invoked on it */ 00701 /* before it can be reallocated. */ 00702 /* Only set with USE_MUNMAP. */ 00703 unsigned short hb_last_reclaimed; 00704 /* Value of GC_gc_no when block was */ 00705 /* last allocated or swept. May wrap. */ 00706 /* For a free block, this is maintained */ 00707 /* only for USE_MUNMAP, and indicates */ 00708 /* when the header was allocated, or */ 00709 /* when the size of the block last */ 00710 /* changed. */ 00711 # ifdef USE_MARK_BYTES 00712 union { 00713 char _hb_marks[MARK_BITS_SZ]; 00714 /* The i'th byte is 1 if the object */ 00715 /* starting at word 2i is marked, 0 o.w. */ 00716 word dummy; /* Force word alignment of mark bytes. */ 00717 } _mark_byte_union; 00718 # define hb_marks _mark_byte_union._hb_marks 00719 # else 00720 word hb_marks[MARK_BITS_SZ]; 00721 /* Bit i in the array refers to the */ 00722 /* object starting at the ith word (header */ 00723 /* INCLUDED) in the heap block. */ 00724 /* The lsb of word 0 is numbered 0. */ 00725 /* Unused bits are invalid, and are */ 00726 /* occasionally set, e.g for uncollectable */ 00727 /* objects. */ 00728 # endif /* !USE_MARK_BYTES */ 00729 }; 00730 00731 /* heap block body */ 00732 00733 # define BODY_SZ (HBLKSIZE/sizeof(word)) 00734 00735 struct hblk { 00736 word hb_body[BODY_SZ]; 00737 }; 00738 00739 # define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map) 00740 00741 # define OBJ_SZ_TO_BLOCKS(sz) \ 00742 divHBLKSZ(WORDS_TO_BYTES(sz) + HBLKSIZE-1) 00743 /* Size of block (in units of HBLKSIZE) needed to hold objects of */ 00744 /* given sz (in words). */ 00745 00746 /* Object free list link */ 00747 # define obj_link(p) (*(ptr_t *)(p)) 00748 00749 # define LOG_MAX_MARK_PROCS 6 00750 # define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS) 00751 00752 /* Root sets. Logically private to mark_rts.c. But we don't want the */ 00753 /* tables scanned, so we put them here. */ 00754 /* MAX_ROOT_SETS is the maximum number of ranges that can be */ 00755 /* registered as static roots. */ 00756 # ifdef LARGE_CONFIG 00757 # define MAX_ROOT_SETS 4096 00758 # else 00759 # ifdef PCR 00760 # define MAX_ROOT_SETS 1024 00761 # else 00762 # if defined(MSWIN32) || defined(MSWINCE) 00763 # define MAX_ROOT_SETS 1024 00764 /* Under NT, we add only written pages, which can result */ 00765 /* in many small root sets. */ 00766 # else 00767 # define MAX_ROOT_SETS 256 00768 # endif 00769 # endif 00770 # endif 00771 00772 # define MAX_EXCLUSIONS (MAX_ROOT_SETS/4) 00773 /* Maximum number of segments that can be excluded from root sets. */ 00774 00775 /* 00776 * Data structure for excluded static roots. 00777 */ 00778 struct exclusion { 00779 ptr_t e_start; 00780 ptr_t e_end; 00781 }; 00782 00783 /* Data structure for list of root sets. */ 00784 /* We keep a hash table, so that we can filter out duplicate additions. */ 00785 /* Under Win32, we need to do a better job of filtering overlaps, so */ 00786 /* we resort to sequential search, and pay the price. */ 00787 struct roots { 00788 ptr_t r_start; 00789 ptr_t r_end; 00790 # if !defined(MSWIN32) && !defined(MSWINCE) 00791 struct roots * r_next; 00792 # endif 00793 GC_bool r_tmp; 00794 /* Delete before registering new dynamic libraries */ 00795 }; 00796 00797 #if !defined(MSWIN32) && !defined(MSWINCE) 00798 /* Size of hash table index to roots. */ 00799 # define LOG_RT_SIZE 6 00800 # define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */ 00801 #endif 00802 00803 /* Lists of all heap blocks and free lists */ 00804 /* as well as other random data structures */ 00805 /* that should not be scanned by the */ 00806 /* collector. */ 00807 /* These are grouped together in a struct */ 00808 /* so that they can be easily skipped by the */ 00809 /* GC_mark routine. */ 00810 /* The ordering is weird to make GC_malloc */ 00811 /* faster by keeping the important fields */ 00812 /* sufficiently close together that a */ 00813 /* single load of a base register will do. */ 00814 /* Scalars that could easily appear to */ 00815 /* be pointers are also put here. */ 00816 /* The main fields should precede any */ 00817 /* conditionally included fields, so that */ 00818 /* gc_inl.h will work even if a different set */ 00819 /* of macros is defined when the client is */ 00820 /* compiled. */ 00821 00822 struct _GC_arrays { 00823 word _heapsize; 00824 word _max_heapsize; 00825 word _requested_heapsize; /* Heap size due to explicit expansion */ 00826 ptr_t _last_heap_addr; 00827 ptr_t _prev_heap_addr; 00828 word _large_free_bytes; 00829 /* Total bytes contained in blocks on large object free */ 00830 /* list. */ 00831 word _large_allocd_bytes; 00832 /* Total number of bytes in allocated large objects blocks. */ 00833 /* For the purposes of this counter and the next one only, a */ 00834 /* large object is one that occupies a block of at least */ 00835 /* 2*HBLKSIZE. */ 00836 word _max_large_allocd_bytes; 00837 /* Maximum number of bytes that were ever allocated in */ 00838 /* large object blocks. This is used to help decide when it */ 00839 /* is safe to split up a large block. */ 00840 word _words_allocd_before_gc; 00841 /* Number of words allocated before this */ 00842 /* collection cycle. */ 00843 # ifndef SEPARATE_GLOBALS 00844 word _words_allocd; 00845 /* Number of words allocated during this collection cycle */ 00846 # endif 00847 word _words_wasted; 00848 /* Number of words wasted due to internal fragmentation */ 00849 /* in large objects, or due to dropping blacklisted */ 00850 /* blocks, since last gc. Approximate. */ 00851 word _words_finalized; 00852 /* Approximate number of words in objects (and headers) */ 00853 /* That became ready for finalization in the last */ 00854 /* collection. */ 00855 word _non_gc_bytes_at_gc; 00856 /* Number of explicitly managed bytes of storage */ 00857 /* at last collection. */ 00858 word _mem_freed; 00859 /* Number of explicitly deallocated words of memory */ 00860 /* since last collection. */ 00861 word _finalizer_mem_freed; 00862 /* Words of memory explicitly deallocated while */ 00863 /* finalizers were running. Used to approximate mem. */ 00864 /* explicitly deallocated by finalizers. */ 00865 ptr_t _scratch_end_ptr; 00866 ptr_t _scratch_last_end_ptr; 00867 /* Used by headers.c, and can easily appear to point to */ 00868 /* heap. */ 00869 GC_mark_proc _mark_procs[MAX_MARK_PROCS]; 00870 /* Table of user-defined mark procedures. There is */ 00871 /* a small number of these, which can be referenced */ 00872 /* by DS_PROC mark descriptors. See gc_mark.h. */ 00873 00874 # ifndef SEPARATE_GLOBALS 00875 ptr_t _objfreelist[MAXOBJSZ+1]; 00876 /* free list for objects */ 00877 ptr_t _aobjfreelist[MAXOBJSZ+1]; 00878 /* free list for atomic objs */ 00879 # endif 00880 00881 ptr_t _uobjfreelist[MAXOBJSZ+1]; 00882 /* uncollectable but traced objs */ 00883 /* objects on this and auobjfreelist */ 00884 /* are always marked, except during */ 00885 /* garbage collections. */ 00886 # ifdef ATOMIC_UNCOLLECTABLE 00887 ptr_t _auobjfreelist[MAXOBJSZ+1]; 00888 # endif 00889 /* uncollectable but traced objs */ 00890 00891 # ifdef GATHERSTATS 00892 word _composite_in_use; 00893 /* Number of words in accessible composite */ 00894 /* objects. */ 00895 word _atomic_in_use; 00896 /* Number of words in accessible atomic */ 00897 /* objects. */ 00898 # endif 00899 # ifdef USE_MUNMAP 00900 word _unmapped_bytes; 00901 # endif 00902 # ifdef MERGE_SIZES 00903 unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)]; 00904 /* Number of words to allocate for a given allocation request in */ 00905 /* bytes. */ 00906 # endif 00907 00908 # ifdef STUBBORN_ALLOC 00909 ptr_t _sobjfreelist[MAXOBJSZ+1]; 00910 # endif 00911 /* free list for immutable objects */ 00912 map_entry_type * _obj_map[MAXOBJSZ+1]; 00913 /* If not NIL, then a pointer to a map of valid */ 00914 /* object addresses. _obj_map[sz][i] is j if the */ 00915 /* address block_start+i is a valid pointer */ 00916 /* to an object at block_start + */ 00917 /* WORDS_TO_BYTES(BYTES_TO_WORDS(i) - j) */ 00918 /* I.e. j is a word displacement from the */ 00919 /* object beginning. */ 00920 /* The entry is OBJ_INVALID if the corresponding */ 00921 /* address is not a valid pointer. It is */ 00922 /* OFFSET_TOO_BIG if the value j would be too */ 00923 /* large to fit in the entry. (Note that the */ 00924 /* size of these entries matters, both for */ 00925 /* space consumption and for cache utilization.) */ 00926 # define OFFSET_TOO_BIG 0xfe 00927 # define OBJ_INVALID 0xff 00928 # define MAP_ENTRY(map, bytes) (map)[bytes] 00929 # define MAP_ENTRIES HBLKSIZE 00930 # define MAP_SIZE MAP_ENTRIES 00931 # define CPP_MAX_OFFSET (OFFSET_TOO_BIG - 1) 00932 # define MAX_OFFSET ((word)CPP_MAX_OFFSET) 00933 /* The following are used only if GC_all_interior_ptrs != 0 */ 00934 # define VALID_OFFSET_SZ \ 00935 (CPP_MAX_OFFSET > WORDS_TO_BYTES(CPP_MAXOBJSZ)? \ 00936 CPP_MAX_OFFSET+1 \ 00937 : WORDS_TO_BYTES(CPP_MAXOBJSZ)+1) 00938 char _valid_offsets[VALID_OFFSET_SZ]; 00939 /* GC_valid_offsets[i] == TRUE ==> i */ 00940 /* is registered as a displacement. */ 00941 char _modws_valid_offsets[sizeof(word)]; 00942 /* GC_valid_offsets[i] ==> */ 00943 /* GC_modws_valid_offsets[i%sizeof(word)] */ 00944 # define OFFSET_VALID(displ) \ 00945 (GC_all_interior_pointers || GC_valid_offsets[displ]) 00946 # ifdef STUBBORN_ALLOC 00947 page_hash_table _changed_pages; 00948 /* Stubborn object pages that were changes since last call to */ 00949 /* GC_read_changed. */ 00950 page_hash_table _prev_changed_pages; 00951 /* Stubborn object pages that were changes before last call to */ 00952 /* GC_read_changed. */ 00953 # endif 00954 # if defined(PROC_VDB) || defined(MPROTECT_VDB) 00955 page_hash_table _grungy_pages; /* Pages that were dirty at last */ 00956 /* GC_read_dirty. */ 00957 # endif 00958 # ifdef MPROTECT_VDB 00959 VOLATILE page_hash_table _dirty_pages; 00960 /* Pages dirtied since last GC_read_dirty. */ 00961 # endif 00962 # ifdef PROC_VDB 00963 page_hash_table _written_pages; /* Pages ever dirtied */ 00964 # endif 00965 # ifdef LARGE_CONFIG 00966 # if CPP_WORDSZ > 32 00967 # define MAX_HEAP_SECTS 4096 /* overflows at roughly 64 GB */ 00968 # else 00969 # define MAX_HEAP_SECTS 768 /* Separately added heap sections. */ 00970 # endif 00971 # else 00972 # ifdef SMALL_CONFIG 00973 # define MAX_HEAP_SECTS 128 /* Roughly 256MB (128*2048*1K) */ 00974 # else 00975 # define MAX_HEAP_SECTS 384 /* Roughly 3GB */ 00976 # endif 00977 # endif 00978 struct HeapSect { 00979 ptr_t hs_start; word hs_bytes; 00980 } _heap_sects[MAX_HEAP_SECTS]; 00981 # if defined(MSWIN32) || defined(MSWINCE) 00982 ptr_t _heap_bases[MAX_HEAP_SECTS]; 00983 /* Start address of memory regions obtained from kernel. */ 00984 # endif 00985 # ifdef MSWINCE 00986 word _heap_lengths[MAX_HEAP_SECTS]; 00987 /* Commited lengths of memory regions obtained from kernel. */ 00988 # endif 00989 struct roots _static_roots[MAX_ROOT_SETS]; 00990 # if !defined(MSWIN32) && !defined(MSWINCE) 00991 struct roots * _root_index[RT_SIZE]; 00992 # endif 00993 struct exclusion _excl_table[MAX_EXCLUSIONS]; 00994 /* Block header index; see gc_headers.h */ 00995 bottom_index * _all_nils; 00996 bottom_index * _top_index [TOP_SZ]; 00997 #ifdef SAVE_CALL_CHAIN 00998 struct callinfo _last_stack[NFRAMES]; /* Stack at last garbage collection.*/ 00999 /* Useful for debugging mysterious */ 01000 /* object disappearances. */ 01001 /* In the multithreaded case, we */ 01002 /* currently only save the calling */ 01003 /* stack. */ 01004 #endif 01005 }; 01006 01007 GC_API GC_FAR struct _GC_arrays GC_arrays; 01008 01009 # ifndef SEPARATE_GLOBALS 01010 # define GC_objfreelist GC_arrays._objfreelist 01011 # define GC_aobjfreelist GC_arrays._aobjfreelist 01012 # define GC_words_allocd GC_arrays._words_allocd 01013 # endif 01014 # define GC_uobjfreelist GC_arrays._uobjfreelist 01015 # ifdef ATOMIC_UNCOLLECTABLE 01016 # define GC_auobjfreelist GC_arrays._auobjfreelist 01017 # endif 01018 # define GC_sobjfreelist GC_arrays._sobjfreelist 01019 # define GC_valid_offsets GC_arrays._valid_offsets 01020 # define GC_modws_valid_offsets GC_arrays._modws_valid_offsets 01021 # ifdef STUBBORN_ALLOC 01022 # define GC_changed_pages GC_arrays._changed_pages 01023 # define GC_prev_changed_pages GC_arrays._prev_changed_pages 01024 # endif 01025 # define GC_obj_map GC_arrays._obj_map 01026 # define GC_last_heap_addr GC_arrays._last_heap_addr 01027 # define GC_prev_heap_addr GC_arrays._prev_heap_addr 01028 # define GC_words_wasted GC_arrays._words_wasted 01029 # define GC_large_free_bytes GC_arrays._large_free_bytes 01030 # define GC_large_allocd_bytes GC_arrays._large_allocd_bytes 01031 # define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes 01032 # define GC_words_finalized GC_arrays._words_finalized 01033 # define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc 01034 # define GC_mem_freed GC_arrays._mem_freed 01035 # define GC_finalizer_mem_freed GC_arrays._finalizer_mem_freed 01036 # define GC_scratch_end_ptr GC_arrays._scratch_end_ptr 01037 # define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr 01038 # define GC_mark_procs GC_arrays._mark_procs 01039 # define GC_heapsize GC_arrays._heapsize 01040 # define GC_max_heapsize GC_arrays._max_heapsize 01041 # define GC_requested_heapsize GC_arrays._requested_heapsize 01042 # define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc 01043 # define GC_heap_sects GC_arrays._heap_sects 01044 # define GC_last_stack GC_arrays._last_stack 01045 # ifdef USE_MUNMAP 01046 # define GC_unmapped_bytes GC_arrays._unmapped_bytes 01047 # endif 01048 # if defined(MSWIN32) || defined(MSWINCE) 01049 # define GC_heap_bases GC_arrays._heap_bases 01050 # endif 01051 # ifdef MSWINCE 01052 # define GC_heap_lengths GC_arrays._heap_lengths 01053 # endif 01054 # define GC_static_roots GC_arrays._static_roots 01055 # define GC_root_index GC_arrays._root_index 01056 # define GC_excl_table GC_arrays._excl_table 01057 # define GC_all_nils GC_arrays._all_nils 01058 # define GC_top_index GC_arrays._top_index 01059 # if defined(PROC_VDB) || defined(MPROTECT_VDB) 01060 # define GC_grungy_pages GC_arrays._grungy_pages 01061 # endif 01062 # ifdef MPROTECT_VDB 01063 # define GC_dirty_pages GC_arrays._dirty_pages 01064 # endif 01065 # ifdef PROC_VDB 01066 # define GC_written_pages GC_arrays._written_pages 01067 # endif 01068 # ifdef GATHERSTATS 01069 # define GC_composite_in_use GC_arrays._composite_in_use 01070 # define GC_atomic_in_use GC_arrays._atomic_in_use 01071 # endif 01072 # ifdef MERGE_SIZES 01073 # define GC_size_map GC_arrays._size_map 01074 # endif 01075 01076 # define beginGC_arrays ((ptr_t)(&GC_arrays)) 01077 # define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays)) 01078 01079 #define USED_HEAP_SIZE (GC_heapsize - GC_large_free_bytes) 01080 01081 /* Object kinds: */ 01082 # define MAXOBJKINDS 16 01083 01084 extern struct obj_kind { 01085 ptr_t *ok_freelist; /* Array of free listheaders for this kind of object */ 01086 /* Point either to GC_arrays or to storage allocated */ 01087 /* with GC_scratch_alloc. */ 01088 struct hblk **ok_reclaim_list; 01089 /* List headers for lists of blocks waiting to be */ 01090 /* swept. */ 01091 word ok_descriptor; /* Descriptor template for objects in this */ 01092 /* block. */ 01093 GC_bool ok_relocate_descr; 01094 /* Add object size in bytes to descriptor */ 01095 /* template to obtain descriptor. Otherwise */ 01096 /* template is used as is. */ 01097 GC_bool ok_init; /* Clear objects before putting them on the free list. */ 01098 } GC_obj_kinds[MAXOBJKINDS]; 01099 01100 # define beginGC_obj_kinds ((ptr_t)(&GC_obj_kinds)) 01101 # define endGC_obj_kinds (beginGC_obj_kinds + (sizeof GC_obj_kinds)) 01102 01103 /* Variables that used to be in GC_arrays, but need to be accessed by */ 01104 /* inline allocation code. If they were in GC_arrays, the inlined */ 01105 /* allocation code would include GC_arrays offsets (as it did), which */ 01106 /* introduce maintenance problems. */ 01107 01108 #ifdef SEPARATE_GLOBALS 01109 word GC_words_allocd; 01110 /* Number of words allocated during this collection cycle */ 01111 ptr_t GC_objfreelist[MAXOBJSZ+1]; 01112 /* free list for NORMAL objects */ 01113 # define beginGC_objfreelist ((ptr_t)(&GC_objfreelist)) 01114 # define endGC_objfreelist (beginGC_objfreelist + sizeof(GC_objfreelist)) 01115 01116 ptr_t GC_aobjfreelist[MAXOBJSZ+1]; 01117 /* free list for atomic (PTRFREE) objs */ 01118 # define beginGC_aobjfreelist ((ptr_t)(&GC_aobjfreelist)) 01119 # define endGC_aobjfreelist (beginGC_aobjfreelist + sizeof(GC_aobjfreelist)) 01120 #endif 01121 01122 /* Predefined kinds: */ 01123 # define PTRFREE 0 01124 # define NORMAL 1 01125 # define UNCOLLECTABLE 2 01126 # ifdef ATOMIC_UNCOLLECTABLE 01127 # define AUNCOLLECTABLE 3 01128 # define STUBBORN 4 01129 # define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE) 01130 # else 01131 # define STUBBORN 3 01132 # define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE) 01133 # endif 01134 01135 extern int GC_n_kinds; 01136 01137 GC_API word GC_fo_entries; 01138 01139 extern word GC_n_heap_sects; /* Number of separately added heap */ 01140 /* sections. */ 01141 01142 extern word GC_page_size; 01143 01144 # if defined(MSWIN32) || defined(MSWINCE) 01145 struct _SYSTEM_INFO; 01146 extern struct _SYSTEM_INFO GC_sysinfo; 01147 extern word GC_n_heap_bases; /* See GC_heap_bases. */ 01148 # endif 01149 01150 extern word GC_total_stack_black_listed; 01151 /* Number of bytes on stack blacklist. */ 01152 01153 extern word GC_black_list_spacing; 01154 /* Average number of bytes between blacklisted */ 01155 /* blocks. Approximate. */ 01156 /* Counts only blocks that are */ 01157 /* "stack-blacklisted", i.e. that are */ 01158 /* problematic in the interior of an object. */ 01159 01160 extern map_entry_type * GC_invalid_map; 01161 /* Pointer to the nowhere valid hblk map */ 01162 /* Blocks pointing to this map are free. */ 01163 01164 extern struct hblk * GC_hblkfreelist[]; 01165 /* List of completely empty heap blocks */ 01166 /* Linked through hb_next field of */ 01167 /* header structure associated with */ 01168 /* block. */ 01169 01170 extern GC_bool GC_objects_are_marked; /* There are marked objects in */ 01171 /* the heap. */ 01172 01173 #ifndef SMALL_CONFIG 01174 extern GC_bool GC_incremental; 01175 /* Using incremental/generational collection. */ 01176 # define TRUE_INCREMENTAL \ 01177 (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED) 01178 /* True incremental, not just generational, mode */ 01179 #else 01180 # define GC_incremental FALSE 01181 /* Hopefully allow optimizer to remove some code. */ 01182 # define TRUE_INCREMENTAL FALSE 01183 #endif 01184 01185 extern GC_bool GC_dirty_maintained; 01186 /* Dirty bits are being maintained, */ 01187 /* either for incremental collection, */ 01188 /* or to limit the root set. */ 01189 01190 extern word GC_root_size; /* Total size of registered root sections */ 01191 01192 extern GC_bool GC_debugging_started; /* GC_debug_malloc has been called. */ 01193 01194 extern long GC_large_alloc_warn_interval; 01195 /* Interval between unsuppressed warnings. */ 01196 01197 extern long GC_large_alloc_warn_suppressed; 01198 /* Number of warnings suppressed so far. */ 01199 01200 #ifdef THREADS 01201 extern GC_bool GC_world_stopped; 01202 #endif 01203 01204 /* Operations */ 01205 # ifndef abs 01206 # define abs(x) ((x) < 0? (-(x)) : (x)) 01207 # endif 01208 01209 01210 /* Marks are in a reserved area in */ 01211 /* each heap block. Each word has one mark bit associated */ 01212 /* with it. Only those corresponding to the beginning of an */ 01213 /* object are used. */ 01214 01215 /* Set mark bit correctly, even if mark bits may be concurrently */ 01216 /* accessed. */ 01217 #ifdef PARALLEL_MARK 01218 # define OR_WORD(addr, bits) \ 01219 { word old; \ 01220 do { \ 01221 old = *((volatile word *)addr); \ 01222 } while (!GC_compare_and_exchange((addr), old, old | (bits))); \ 01223 } 01224 # define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \ 01225 { word old; \ 01226 word my_bits = (bits); \ 01227 do { \ 01228 old = *((volatile word *)addr); \ 01229 if (old & my_bits) goto exit_label; \ 01230 } while (!GC_compare_and_exchange((addr), old, old | my_bits)); \ 01231 } 01232 #else 01233 # define OR_WORD(addr, bits) *(addr) |= (bits) 01234 # define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \ 01235 { \ 01236 word old = *(addr); \ 01237 word my_bits = (bits); \ 01238 if (old & my_bits) goto exit_label; \ 01239 *(addr) = (old | my_bits); \ 01240 } 01241 #endif 01242 01243 /* Mark bit operations */ 01244 01245 /* 01246 * Retrieve, set, clear the mark bit corresponding 01247 * to the nth word in a given heap block. 01248 * 01249 * (Recall that bit n corresponds to object beginning at word n 01250 * relative to the beginning of the block, including unused words) 01251 */ 01252 01253 #ifdef USE_MARK_BYTES 01254 # define mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n) >> 1]) 01255 # define set_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n)>>1]) = 1 01256 # define clear_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n)>>1]) = 0 01257 #else /* !USE_MARK_BYTES */ 01258 # define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \ 01259 >> (modWORDSZ(n))) & (word)1) 01260 # define set_mark_bit_from_hdr(hhdr,n) \ 01261 OR_WORD((hhdr)->hb_marks+divWORDSZ(n), \ 01262 (word)1 << modWORDSZ(n)) 01263 # define clear_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \ 01264 &= ~((word)1 << modWORDSZ(n)) 01265 #endif /* !USE_MARK_BYTES */ 01266 01267 /* Important internal collector routines */ 01268 01269 ptr_t GC_approx_sp GC_PROTO((void)); 01270 01271 GC_bool GC_should_collect GC_PROTO((void)); 01272 01273 void GC_apply_to_all_blocks GC_PROTO(( \ 01274 void (*fn) GC_PROTO((struct hblk *h, word client_data)), \ 01275 word client_data)); 01276 /* Invoke fn(hbp, client_data) for each */ 01277 /* allocated heap block. */ 01278 struct hblk * GC_next_used_block GC_PROTO((struct hblk * h)); 01279 /* Return first in-use block >= h */ 01280 struct hblk * GC_prev_block GC_PROTO((struct hblk * h)); 01281 /* Return last block <= h. Returned block */ 01282 /* is managed by GC, but may or may not be in */ 01283 /* use. */ 01284 void GC_mark_init GC_PROTO((void)); 01285 void GC_clear_marks GC_PROTO((void)); /* Clear mark bits for all heap objects. */ 01286 void GC_invalidate_mark_state GC_PROTO((void)); 01287 /* Tell the marker that marked */ 01288 /* objects may point to unmarked */ 01289 /* ones, and roots may point to */ 01290 /* unmarked objects. */ 01291 /* Reset mark stack. */ 01292 GC_bool GC_mark_stack_empty GC_PROTO((void)); 01293 GC_bool GC_mark_some GC_PROTO((ptr_t cold_gc_frame)); 01294 /* Perform about one pages worth of marking */ 01295 /* work of whatever kind is needed. Returns */ 01296 /* quickly if no collection is in progress. */ 01297 /* Return TRUE if mark phase finished. */ 01298 void GC_initiate_gc GC_PROTO((void)); 01299 /* initiate collection. */ 01300 /* If the mark state is invalid, this */ 01301 /* becomes full colleection. Otherwise */ 01302 /* it's partial. */ 01303 void GC_push_all GC_PROTO((ptr_t bottom, ptr_t top)); 01304 /* Push everything in a range */ 01305 /* onto mark stack. */ 01306 void GC_push_selected GC_PROTO(( \ 01307 ptr_t bottom, \ 01308 ptr_t top, \ 01309 int (*dirty_fn) GC_PROTO((struct hblk *h)), \ 01310 void (*push_fn) GC_PROTO((ptr_t bottom, ptr_t top)) )); 01311 /* Push all pages h in [b,t) s.t. */ 01312 /* select_fn(h) != 0 onto mark stack. */ 01313 #ifndef SMALL_CONFIG 01314 void GC_push_conditional GC_PROTO((ptr_t b, ptr_t t, GC_bool all)); 01315 #else 01316 # define GC_push_conditional(b, t, all) GC_push_all(b, t) 01317 #endif 01318 /* Do either of the above, depending */ 01319 /* on the third arg. */ 01320 void GC_push_all_stack GC_PROTO((ptr_t b, ptr_t t)); 01321 /* As above, but consider */ 01322 /* interior pointers as valid */ 01323 void GC_push_all_eager GC_PROTO((ptr_t b, ptr_t t)); 01324 /* Same as GC_push_all_stack, but */ 01325 /* ensures that stack is scanned */ 01326 /* immediately, not just scheduled */ 01327 /* for scanning. */ 01328 #ifndef THREADS 01329 void GC_push_all_stack_partially_eager GC_PROTO(( \ 01330 ptr_t bottom, ptr_t top, ptr_t cold_gc_frame )); 01331 /* Similar to GC_push_all_eager, but only the */ 01332 /* part hotter than cold_gc_frame is scanned */ 01333 /* immediately. Needed to ensure that callee- */ 01334 /* save registers are not missed. */ 01335 #else 01336 /* In the threads case, we push part of the current thread stack */ 01337 /* with GC_push_all_eager when we push the registers. This gets the */ 01338 /* callee-save registers that may disappear. The remainder of the */ 01339 /* stacks are scheduled for scanning in *GC_push_other_roots, which */ 01340 /* is thread-package-specific. */ 01341 #endif 01342 void GC_push_current_stack GC_PROTO((ptr_t cold_gc_frame)); 01343 /* Push enough of the current stack eagerly to */ 01344 /* ensure that callee-save registers saved in */ 01345 /* GC frames are scanned. */ 01346 /* In the non-threads case, schedule entire */ 01347 /* stack for scanning. */ 01348 void GC_push_roots GC_PROTO((GC_bool all, ptr_t cold_gc_frame)); 01349 /* Push all or dirty roots. */ 01350 extern void (*GC_push_other_roots) GC_PROTO((void)); 01351 /* Push system or application specific roots */ 01352 /* onto the mark stack. In some environments */ 01353 /* (e.g. threads environments) this is */ 01354 /* predfined to be non-zero. A client supplied */ 01355 /* replacement should also call the original */ 01356 /* function. */ 01357 extern void GC_push_gc_structures GC_PROTO((void)); 01358 /* Push GC internal roots. These are normally */ 01359 /* included in the static data segment, and */ 01360 /* Thus implicitly pushed. But we must do this */ 01361 /* explicitly if normal root processing is */ 01362 /* disabled. Calls the following: */ 01363 extern void GC_push_finalizer_structures GC_PROTO((void)); 01364 extern void GC_push_stubborn_structures GC_PROTO((void)); 01365 # ifdef THREADS 01366 extern void GC_push_thread_structures GC_PROTO((void)); 01367 # endif 01368 extern void (*GC_start_call_back) GC_PROTO((void)); 01369 /* Called at start of full collections. */ 01370 /* Not called if 0. Called with allocation */ 01371 /* lock held. */ 01372 /* 0 by default. */ 01373 # if defined(USE_GENERIC_PUSH_REGS) 01374 void GC_generic_push_regs GC_PROTO((ptr_t cold_gc_frame)); 01375 # else 01376 void GC_push_regs GC_PROTO((void)); 01377 # endif 01378 # if defined(SPARC) || defined(IA64) 01379 /* Cause all stacked registers to be saved in memory. Return a */ 01380 /* pointer to the top of the corresponding memory stack. */ 01381 word GC_save_regs_in_stack GC_PROTO((void)); 01382 # endif 01383 /* Push register contents onto mark stack. */ 01384 /* If NURSERY is defined, the default push */ 01385 /* action can be overridden with GC_push_proc */ 01386 01387 # ifdef NURSERY 01388 extern void (*GC_push_proc)(ptr_t); 01389 # endif 01390 # if defined(MSWIN32) || defined(MSWINCE) 01391 void __cdecl GC_push_one GC_PROTO((word p)); 01392 # else 01393 void GC_push_one GC_PROTO((word p)); 01394 /* If p points to an object, mark it */ 01395 /* and push contents on the mark stack */ 01396 /* Pointer recognition test always */ 01397 /* accepts interior pointers, i.e. this */ 01398 /* is appropriate for pointers found on */ 01399 /* stack. */ 01400 # endif 01401 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS) 01402 void GC_mark_and_push_stack GC_PROTO((word p, ptr_t source)); 01403 /* Ditto, omits plausibility test */ 01404 # else 01405 void GC_mark_and_push_stack GC_PROTO((word p)); 01406 # endif 01407 void GC_push_marked GC_PROTO((struct hblk * h, hdr * hhdr)); 01408 /* Push contents of all marked objects in h onto */ 01409 /* mark stack. */ 01410 #ifdef SMALL_CONFIG 01411 # define GC_push_next_marked_dirty(h) GC_push_next_marked(h) 01412 #else 01413 struct hblk * GC_push_next_marked_dirty GC_PROTO((struct hblk * h)); 01414 /* Invoke GC_push_marked on next dirty block above h. */ 01415 /* Return a pointer just past the end of this block. */ 01416 #endif /* !SMALL_CONFIG */ 01417 struct hblk * GC_push_next_marked GC_PROTO((struct hblk * h)); 01418 /* Ditto, but also mark from clean pages. */ 01419 struct hblk * GC_push_next_marked_uncollectable GC_PROTO((struct hblk * h)); 01420 /* Ditto, but mark only from uncollectable pages. */ 01421 GC_bool GC_stopped_mark GC_PROTO((GC_stop_func stop_func)); 01422 /* Stop world and mark from all roots */ 01423 /* and rescuers. */ 01424 void GC_clear_hdr_marks GC_PROTO((hdr * hhdr)); 01425 /* Clear the mark bits in a header */ 01426 void GC_set_hdr_marks GC_PROTO((hdr * hhdr)); 01427 /* Set the mark bits in a header */ 01428 void GC_set_fl_marks GC_PROTO((ptr_t p)); 01429 /* Set all mark bits associated with */ 01430 /* a free list. */ 01431 void GC_add_roots_inner GC_PROTO((char * b, char * e, GC_bool tmp)); 01432 void GC_remove_roots_inner GC_PROTO((char * b, char * e)); 01433 GC_bool GC_is_static_root GC_PROTO((ptr_t p)); 01434 /* Is the address p in one of the registered static */ 01435 /* root sections? */ 01436 # if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION) 01437 GC_bool GC_is_tmp_root GC_PROTO((ptr_t p)); 01438 /* Is the address p in one of the temporary static */ 01439 /* root sections? */ 01440 # endif 01441 void GC_register_dynamic_libraries GC_PROTO((void)); 01442 /* Add dynamic library data sections to the root set. */ 01443 01444 GC_bool GC_register_main_static_data GC_PROTO((void)); 01445 /* We need to register the main data segment. Returns */ 01446 /* TRUE unless this is done implicitly as part of */ 01447 /* dynamic library registration. */ 01448 01449 /* Machine dependent startup routines */ 01450 ptr_t GC_get_stack_base GC_PROTO((void)); /* Cold end of stack */ 01451 #ifdef IA64 01452 ptr_t GC_get_register_stack_base GC_PROTO((void)); 01453 /* Cold end of register stack. */ 01454 #endif 01455 void GC_register_data_segments GC_PROTO((void)); 01456 01457 /* Black listing: */ 01458 void GC_bl_init GC_PROTO((void)); 01459 # ifdef PRINT_BLACK_LIST 01460 void GC_add_to_black_list_normal GC_PROTO((word p, ptr_t source)); 01461 /* Register bits as a possible future false */ 01462 /* reference from the heap or static data */ 01463 # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \ 01464 if (GC_all_interior_pointers) { \ 01465 GC_add_to_black_list_stack(bits, (ptr_t)(source)); \ 01466 } else { \ 01467 GC_add_to_black_list_normal(bits, (ptr_t)(source)); \ 01468 } 01469 # else 01470 void GC_add_to_black_list_normal GC_PROTO((word p)); 01471 # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \ 01472 if (GC_all_interior_pointers) { \ 01473 GC_add_to_black_list_stack(bits); \ 01474 } else { \ 01475 GC_add_to_black_list_normal(bits); \ 01476 } 01477 # endif 01478 01479 # ifdef PRINT_BLACK_LIST 01480 void GC_add_to_black_list_stack GC_PROTO((word p, ptr_t source)); 01481 # else 01482 void GC_add_to_black_list_stack GC_PROTO((word p)); 01483 # endif 01484 struct hblk * GC_is_black_listed GC_PROTO((struct hblk * h, word len)); 01485 /* If there are likely to be false references */ 01486 /* to a block starting at h of the indicated */ 01487 /* length, then return the next plausible */ 01488 /* starting location for h that might avoid */ 01489 /* these false references. */ 01490 void GC_promote_black_lists GC_PROTO((void)); 01491 /* Declare an end to a black listing phase. */ 01492 void GC_unpromote_black_lists GC_PROTO((void)); 01493 /* Approximately undo the effect of the above. */ 01494 /* This actually loses some information, but */ 01495 /* only in a reasonably safe way. */ 01496 word GC_number_stack_black_listed GC_PROTO(( \ 01497 struct hblk *start, struct hblk *endp1)); 01498 /* Return the number of (stack) blacklisted */ 01499 /* blocks in the range for statistical */ 01500 /* purposes. */ 01501 01502 ptr_t GC_scratch_alloc GC_PROTO((word bytes)); 01503 /* GC internal memory allocation for */ 01504 /* small objects. Deallocation is not */ 01505 /* possible. */ 01506 01507 /* Heap block layout maps: */ 01508 void GC_invalidate_map GC_PROTO((hdr * hhdr)); 01509 /* Remove the object map associated */ 01510 /* with the block. This identifies */ 01511 /* the block as invalid to the mark */ 01512 /* routines. */ 01513 GC_bool GC_add_map_entry GC_PROTO((word sz)); 01514 /* Add a heap block map for objects of */ 01515 /* size sz to obj_map. */ 01516 /* Return FALSE on failure. */ 01517 void GC_register_displacement_inner GC_PROTO((word offset)); 01518 /* Version of GC_register_displacement */ 01519 /* that assumes lock is already held */ 01520 /* and signals are already disabled. */ 01521 01522 /* hblk allocation: */ 01523 void GC_new_hblk GC_PROTO((word size_in_words, int kind)); 01524 /* Allocate a new heap block, and build */ 01525 /* a free list in it. */ 01526 01527 ptr_t GC_build_fl GC_PROTO((struct hblk *h, word sz, 01528 GC_bool clear, ptr_t list)); 01529 /* Build a free list for objects of */ 01530 /* size sz in block h. Append list to */ 01531 /* end of the free lists. Possibly */ 01532 /* clear objects on the list. Normally */ 01533 /* called by GC_new_hblk, but also */ 01534 /* called explicitly without GC lock. */ 01535 01536 struct hblk * GC_allochblk GC_PROTO(( \ 01537 word size_in_words, int kind, unsigned flags)); 01538 /* Allocate a heap block, inform */ 01539 /* the marker that block is valid */ 01540 /* for objects of indicated size. */ 01541 01542 ptr_t GC_alloc_large GC_PROTO((word lw, int k, unsigned flags)); 01543 /* Allocate a large block of size lw words. */ 01544 /* The block is not cleared. */ 01545 /* Flags is 0 or IGNORE_OFF_PAGE. */ 01546 /* Calls GC_allchblk to do the actual */ 01547 /* allocation, but also triggers GC and/or */ 01548 /* heap expansion as appropriate. */ 01549 /* Does not update GC_words_allocd, but does */ 01550 /* other accounting. */ 01551 01552 ptr_t GC_alloc_large_and_clear GC_PROTO((word lw, int k, unsigned flags)); 01553 /* As above, but clear block if appropriate */ 01554 /* for kind k. */ 01555 01556 void GC_freehblk GC_PROTO((struct hblk * p)); 01557 /* Deallocate a heap block and mark it */ 01558 /* as invalid. */ 01559 01560 /* Misc GC: */ 01561 void GC_init_inner GC_PROTO((void)); 01562 GC_bool GC_expand_hp_inner GC_PROTO((word n)); 01563 void GC_start_reclaim GC_PROTO((int abort_if_found)); 01564 /* Restore unmarked objects to free */ 01565 /* lists, or (if abort_if_found is */ 01566 /* TRUE) report them. */ 01567 /* Sweeping of small object pages is */ 01568 /* largely deferred. */ 01569 void GC_continue_reclaim GC_PROTO((word sz, int kind)); 01570 /* Sweep pages of the given size and */ 01571 /* kind, as long as possible, and */ 01572 /* as long as the corr. free list is */ 01573 /* empty. */ 01574 void GC_reclaim_or_delete_all GC_PROTO((void)); 01575 /* Arrange for all reclaim lists to be */ 01576 /* empty. Judiciously choose between */ 01577 /* sweeping and discarding each page. */ 01578 GC_bool GC_reclaim_all GC_PROTO((GC_stop_func stop_func, GC_bool ignore_old)); 01579 /* Reclaim all blocks. Abort (in a */ 01580 /* consistent state) if f returns TRUE. */ 01581 GC_bool GC_block_empty GC_PROTO((hdr * hhdr)); 01582 /* Block completely unmarked? */ 01583 GC_bool GC_never_stop_func GC_PROTO((void)); 01584 /* Returns FALSE. */ 01585 GC_bool GC_try_to_collect_inner GC_PROTO((GC_stop_func f)); 01586 01587 /* Collect; caller must have acquired */ 01588 /* lock and disabled signals. */ 01589 /* Collection is aborted if f returns */ 01590 /* TRUE. Returns TRUE if it completes */ 01591 /* successfully. */ 01592 # define GC_gcollect_inner() \ 01593 (void) GC_try_to_collect_inner(GC_never_stop_func) 01594 void GC_finish_collection GC_PROTO((void)); 01595 /* Finish collection. Mark bits are */ 01596 /* consistent and lock is still held. */ 01597 GC_bool GC_collect_or_expand GC_PROTO(( \ 01598 word needed_blocks, GC_bool ignore_off_page)); 01599 /* Collect or expand heap in an attempt */ 01600 /* make the indicated number of free */ 01601 /* blocks available. Should be called */ 01602 /* until the blocks are available or */ 01603 /* until it fails by returning FALSE. */ 01604 01605 extern GC_bool GC_is_initialized; /* GC_init() has been run. */ 01606 01607 #if defined(MSWIN32) || defined(MSWINCE) 01608 void GC_deinit GC_PROTO((void)); 01609 /* Free any resources allocated by */ 01610 /* GC_init */ 01611 #endif 01612 01613 void GC_collect_a_little_inner GC_PROTO((int n)); 01614 /* Do n units worth of garbage */ 01615 /* collection work, if appropriate. */ 01616 /* A unit is an amount appropriate for */ 01617 /* HBLKSIZE bytes of allocation. */ 01618 /* ptr_t GC_generic_malloc GC_PROTO((word lb, int k)); */ 01619 /* Allocate an object of the given */ 01620 /* kind. By default, there are only */ 01621 /* a few kinds: composite(pointerfree), */ 01622 /* atomic, uncollectable, etc. */ 01623 /* We claim it's possible for clever */ 01624 /* client code that understands GC */ 01625 /* internals to add more, e.g. to */ 01626 /* communicate object layout info */ 01627 /* to the collector. */ 01628 /* The actual decl is in gc_mark.h. */ 01629 ptr_t GC_generic_malloc_ignore_off_page GC_PROTO((size_t b, int k)); 01630 /* As above, but pointers past the */ 01631 /* first page of the resulting object */ 01632 /* are ignored. */ 01633 ptr_t GC_generic_malloc_inner GC_PROTO((word lb, int k)); 01634 /* Ditto, but I already hold lock, etc. */ 01635 ptr_t GC_generic_malloc_words_small_inner GC_PROTO((word lw, int k)); 01636 /* Analogous to the above, but assumes */ 01637 /* a small object size, and bypasses */ 01638 /* MERGE_SIZES mechanism. */ 01639 ptr_t GC_generic_malloc_words_small GC_PROTO((size_t lw, int k)); 01640 /* As above, but size in units of words */ 01641 /* Bypasses MERGE_SIZES. Assumes */ 01642 /* words <= MAXOBJSZ. */ 01643 ptr_t GC_generic_malloc_inner_ignore_off_page GC_PROTO((size_t lb, int k)); 01644 /* Allocate an object, where */ 01645 /* the client guarantees that there */ 01646 /* will always be a pointer to the */ 01647 /* beginning of the object while the */ 01648 /* object is live. */ 01649 ptr_t GC_allocobj GC_PROTO((word sz, int kind)); 01650 /* Make the indicated */ 01651 /* free list nonempty, and return its */ 01652 /* head. */ 01653 01654 void GC_free_inner(GC_PTR p); 01655 01656 void GC_init_headers GC_PROTO((void)); 01657 struct hblkhdr * GC_install_header GC_PROTO((struct hblk *h)); 01658 /* Install a header for block h. */ 01659 /* Return 0 on failure, or the header */ 01660 /* otherwise. */ 01661 GC_bool GC_install_counts GC_PROTO((struct hblk * h, word sz)); 01662 /* Set up forwarding counts for block */ 01663 /* h of size sz. */ 01664 /* Return FALSE on failure. */ 01665 void GC_remove_header GC_PROTO((struct hblk * h)); 01666 /* Remove the header for block h. */ 01667 void GC_remove_counts GC_PROTO((struct hblk * h, word sz)); 01668 /* Remove forwarding counts for h. */ 01669 hdr * GC_find_header GC_PROTO((ptr_t h)); /* Debugging only. */ 01670 01671 void GC_finalize GC_PROTO((void)); 01672 /* Perform all indicated finalization actions */ 01673 /* on unmarked objects. */ 01674 /* Unreachable finalizable objects are enqueued */ 01675 /* for processing by GC_invoke_finalizers. */ 01676 /* Invoked with lock. */ 01677 01678 void GC_notify_or_invoke_finalizers GC_PROTO((void)); 01679 /* If GC_finalize_on_demand is not set, invoke */ 01680 /* eligible finalizers. Otherwise: */ 01681 /* Call *GC_finalizer_notifier if there are */ 01682 /* finalizers to be run, and we haven't called */ 01683 /* this procedure yet this GC cycle. */ 01684 01685 GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data)); 01686 GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data)); 01687 /* Auxiliary fns to make finalization work */ 01688 /* correctly with displaced pointers introduced */ 01689 /* by the debugging allocators. */ 01690 01691 void GC_add_to_heap GC_PROTO((struct hblk *p, word bytes)); 01692 /* Add a HBLKSIZE aligned chunk to the heap. */ 01693 01694 void GC_print_obj GC_PROTO((ptr_t p)); 01695 /* P points to somewhere inside an object with */ 01696 /* debugging info. Print a human readable */ 01697 /* description of the object to stderr. */ 01698 extern void (*GC_check_heap) GC_PROTO((void)); 01699 /* Check that all objects in the heap with */ 01700 /* debugging info are intact. */ 01701 /* Add any that are not to GC_smashed list. */ 01702 extern void (*GC_print_all_smashed) GC_PROTO((void)); 01703 /* Print GC_smashed if it's not empty. */ 01704 /* Clear GC_smashed list. */ 01705 extern void GC_print_all_errors GC_PROTO((void)); 01706 /* Print smashed and leaked objects, if any. */ 01707 /* Clear the lists of such objects. */ 01708 extern void (*GC_print_heap_obj) GC_PROTO((ptr_t p)); 01709 /* If possible print s followed by a more */ 01710 /* detailed description of the object */ 01711 /* referred to by p. */ 01712 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG) 01713 void GC_print_address_map GC_PROTO((void)); 01714 /* Print an address map of the process. */ 01715 #endif 01716 01717 extern GC_bool GC_have_errors; /* We saw a smashed or leaked object. */ 01718 /* Call error printing routine */ 01719 /* occasionally. */ 01720 extern GC_bool GC_print_stats; /* Produce at least some logging output */ 01721 /* Set from environment variable. */ 01722 01723 #ifndef NO_DEBUGGING 01724 extern GC_bool GC_dump_regularly; /* Generate regular debugging dumps. */ 01725 # define COND_DUMP if (GC_dump_regularly) GC_dump(); 01726 #else 01727 # define COND_DUMP 01728 #endif 01729 01730 #ifdef KEEP_BACK_PTRS 01731 extern long GC_backtraces; 01732 void GC_generate_random_backtrace_no_gc(void); 01733 #endif 01734 01735 extern GC_bool GC_print_back_height; 01736 01737 #ifdef MAKE_BACK_GRAPH 01738 void GC_print_back_graph_stats(void); 01739 #endif 01740 01741 /* Macros used for collector internal allocation. */ 01742 /* These assume the collector lock is held. */ 01743 #ifdef DBG_HDRS_ALL 01744 extern GC_PTR GC_debug_generic_malloc_inner(size_t lb, int k); 01745 extern GC_PTR GC_debug_generic_malloc_inner_ignore_off_page(size_t lb, 01746 int k); 01747 # define GC_INTERNAL_MALLOC GC_debug_generic_malloc_inner 01748 # define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \ 01749 GC_debug_generic_malloc_inner_ignore_off_page 01750 # ifdef THREADS 01751 # define GC_INTERNAL_FREE GC_debug_free_inner 01752 # else 01753 # define GC_INTERNAL_FREE GC_debug_free 01754 # endif 01755 #else 01756 # define GC_INTERNAL_MALLOC GC_generic_malloc_inner 01757 # define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \ 01758 GC_generic_malloc_inner_ignore_off_page 01759 # ifdef THREADS 01760 # define GC_INTERNAL_FREE GC_free_inner 01761 # else 01762 # define GC_INTERNAL_FREE GC_free 01763 # endif 01764 #endif 01765 01766 /* Memory unmapping: */ 01767 #ifdef USE_MUNMAP 01768 void GC_unmap_old(void); 01769 void GC_merge_unmapped(void); 01770 void GC_unmap(ptr_t start, word bytes); 01771 void GC_remap(ptr_t start, word bytes); 01772 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2); 01773 #endif 01774 01775 /* Virtual dirty bit implementation: */ 01776 /* Each implementation exports the following: */ 01777 void GC_read_dirty GC_PROTO((void)); 01778 /* Retrieve dirty bits. */ 01779 GC_bool GC_page_was_dirty GC_PROTO((struct hblk *h)); 01780 /* Read retrieved dirty bits. */ 01781 GC_bool GC_page_was_ever_dirty GC_PROTO((struct hblk *h)); 01782 /* Could the page contain valid heap pointers? */ 01783 void GC_is_fresh GC_PROTO((struct hblk *h, word n)); 01784 /* Assert the region currently contains no */ 01785 /* valid pointers. */ 01786 void GC_remove_protection GC_PROTO((struct hblk *h, word nblocks, 01787 GC_bool pointerfree)); 01788 /* h is about to be writteni or allocated. Ensure */ 01789 /* that it's not write protected by the virtual */ 01790 /* dirty bit implementation. */ 01791 01792 void GC_dirty_init GC_PROTO((void)); 01793 01794 /* Slow/general mark bit manipulation: */ 01795 GC_API GC_bool GC_is_marked GC_PROTO((ptr_t p)); 01796 void GC_clear_mark_bit GC_PROTO((ptr_t p)); 01797 void GC_set_mark_bit GC_PROTO((ptr_t p)); 01798 01799 /* Stubborn objects: */ 01800 void GC_read_changed GC_PROTO((void)); /* Analogous to GC_read_dirty */ 01801 GC_bool GC_page_was_changed GC_PROTO((struct hblk * h)); 01802 /* Analogous to GC_page_was_dirty */ 01803 void GC_clean_changing_list GC_PROTO((void)); 01804 /* Collect obsolete changing list entries */ 01805 void GC_stubborn_init GC_PROTO((void)); 01806 01807 /* Debugging print routines: */ 01808 void GC_print_block_list GC_PROTO((void)); 01809 void GC_print_hblkfreelist GC_PROTO((void)); 01810 void GC_print_heap_sects GC_PROTO((void)); 01811 void GC_print_static_roots GC_PROTO((void)); 01812 void GC_print_finalization_stats GC_PROTO((void)); 01813 void GC_dump GC_PROTO((void)); 01814 01815 #ifdef KEEP_BACK_PTRS 01816 void GC_store_back_pointer(ptr_t source, ptr_t dest); 01817 void GC_marked_for_finalization(ptr_t dest); 01818 # define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest) 01819 # define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest) 01820 #else 01821 # define GC_STORE_BACK_PTR(source, dest) 01822 # define GC_MARKED_FOR_FINALIZATION(dest) 01823 #endif 01824 01825 /* Make arguments appear live to compiler */ 01826 # ifdef __WATCOMC__ 01827 void GC_noop(void*, ...); 01828 # else 01829 # ifdef __DMC__ 01830 GC_API void GC_noop(...); 01831 # else 01832 GC_API void GC_noop(); 01833 # endif 01834 # endif 01835 01836 void GC_noop1 GC_PROTO((word)); 01837 01838 /* Logging and diagnostic output: */ 01839 GC_API void GC_printf GC_PROTO((GC_CONST char * format, long, long, long, long, long, long)); 01840 /* A version of printf that doesn't allocate, */ 01841 /* is restricted to long arguments, and */ 01842 /* (unfortunately) doesn't use varargs for */ 01843 /* portability. Restricted to 6 args and */ 01844 /* 1K total output length. */ 01845 /* (We use sprintf. Hopefully that doesn't */ 01846 /* allocate for long arguments.) */ 01847 # define GC_printf0(f) GC_printf(f, 0l, 0l, 0l, 0l, 0l, 0l) 01848 # define GC_printf1(f,a) GC_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l) 01849 # define GC_printf2(f,a,b) GC_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l) 01850 # define GC_printf3(f,a,b,c) GC_printf(f, (long)a, (long)b, (long)c, 0l, 0l, 0l) 01851 # define GC_printf4(f,a,b,c,d) GC_printf(f, (long)a, (long)b, (long)c, \ 01852 (long)d, 0l, 0l) 01853 # define GC_printf5(f,a,b,c,d,e) GC_printf(f, (long)a, (long)b, (long)c, \ 01854 (long)d, (long)e, 0l) 01855 # define GC_printf6(f,a,b,c,d,e,g) GC_printf(f, (long)a, (long)b, (long)c, \ 01856 (long)d, (long)e, (long)g) 01857 01858 GC_API void GC_err_printf GC_PROTO((GC_CONST char * format, long, long, long, long, long, long)); 01859 # define GC_err_printf0(f) GC_err_puts(f) 01860 # define GC_err_printf1(f,a) GC_err_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l) 01861 # define GC_err_printf2(f,a,b) GC_err_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l) 01862 # define GC_err_printf3(f,a,b,c) GC_err_printf(f, (long)a, (long)b, (long)c, \ 01863 0l, 0l, 0l) 01864 # define GC_err_printf4(f,a,b,c,d) GC_err_printf(f, (long)a, (long)b, \ 01865 (long)c, (long)d, 0l, 0l) 01866 # define GC_err_printf5(f,a,b,c,d,e) GC_err_printf(f, (long)a, (long)b, \ 01867 (long)c, (long)d, \ 01868 (long)e, 0l) 01869 # define GC_err_printf6(f,a,b,c,d,e,g) GC_err_printf(f, (long)a, (long)b, \ 01870 (long)c, (long)d, \ 01871 (long)e, (long)g) 01872 /* Ditto, writes to stderr. */ 01873 01874 void GC_err_puts GC_PROTO((GC_CONST char *s)); 01875 /* Write s to stderr, don't buffer, don't add */ 01876 /* newlines, don't ... */ 01877 01878 #if defined(LINUX) && !defined(SMALL_CONFIG) 01879 void GC_err_write GC_PROTO((GC_CONST char *buf, size_t len)); 01880 /* Write buf to stderr, don't buffer, don't add */ 01881 /* newlines, don't ... */ 01882 #endif 01883 01884 01885 # ifdef GC_ASSERTIONS 01886 # define GC_ASSERT(expr) if(!(expr)) {\ 01887 GC_err_printf2("Assertion failure: %s:%ld\n", \ 01888 __FILE__, (unsigned long)__LINE__); \ 01889 ABORT("assertion failure"); } 01890 # else 01891 # define GC_ASSERT(expr) 01892 # endif 01893 01894 /* Check a compile time assertion at compile time. The error */ 01895 /* message for failure is a bit baroque, but ... */ 01896 #if defined(mips) && !defined(__GNUC__) 01897 /* DOB: MIPSPro C gets an internal error taking the sizeof an array type. 01898 This code works correctly (ugliness is to avoid "unused var" warnings) */ 01899 # define GC_STATIC_ASSERT(expr) do { if (0) { char j[(expr)? 1 : -1]; j[0]='\0'; j[0]=j[0]; } } while(0) 01900 #else 01901 # define GC_STATIC_ASSERT(expr) sizeof(char[(expr)? 1 : -1]) 01902 #endif 01903 01904 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) 01905 /* We need additional synchronization facilities from the thread */ 01906 /* support. We believe these are less performance critical */ 01907 /* than the main garbage collector lock; standard pthreads-based */ 01908 /* implementations should be sufficient. */ 01909 01910 /* The mark lock and condition variable. If the GC lock is also */ 01911 /* acquired, the GC lock must be acquired first. The mark lock is */ 01912 /* used to both protect some variables used by the parallel */ 01913 /* marker, and to protect GC_fl_builder_count, below. */ 01914 /* GC_notify_all_marker() is called when */ 01915 /* the state of the parallel marker changes */ 01916 /* in some significant way (see gc_mark.h for details). The */ 01917 /* latter set of events includes incrementing GC_mark_no. */ 01918 /* GC_notify_all_builder() is called when GC_fl_builder_count */ 01919 /* reaches 0. */ 01920 01921 extern void GC_acquire_mark_lock(); 01922 extern void GC_release_mark_lock(); 01923 extern void GC_notify_all_builder(); 01924 /* extern void GC_wait_builder(); */ 01925 extern void GC_wait_for_reclaim(); 01926 01927 extern word GC_fl_builder_count; /* Protected by mark lock. */ 01928 # endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */ 01929 # ifdef PARALLEL_MARK 01930 extern void GC_notify_all_marker(); 01931 extern void GC_wait_marker(); 01932 extern word GC_mark_no; /* Protected by mark lock. */ 01933 01934 extern void GC_help_marker(word my_mark_no); 01935 /* Try to help out parallel marker for mark cycle */ 01936 /* my_mark_no. Returns if the mark cycle finishes or */ 01937 /* was already done, or there was nothing to do for */ 01938 /* some other reason. */ 01939 # endif /* PARALLEL_MARK */ 01940 01941 # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) 01942 /* We define the thread suspension signal here, so that we can refer */ 01943 /* to it in the dirty bit implementation, if necessary. Ideally we */ 01944 /* would allocate a (real-time ?) signal using the standard mechanism.*/ 01945 /* unfortunately, there is no standard mechanism. (There is one */ 01946 /* in Linux glibc, but it's not exported.) Thus we continue to use */ 01947 /* the same hard-coded signals we've always used. */ 01948 # if !defined(SIG_SUSPEND) 01949 # if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS) 01950 # if defined(SPARC) && !defined(SIGPWR) 01951 /* SPARC/Linux doesn't properly define SIGPWR in <signal.h>. 01952 * It is aliased to SIGLOST in asm/signal.h, though. */ 01953 # define SIG_SUSPEND SIGLOST 01954 # else 01955 /* Linuxthreads itself uses SIGUSR1 and SIGUSR2. */ 01956 # define SIG_SUSPEND SIGPWR 01957 # endif 01958 # else /* !GC_LINUX_THREADS */ 01959 # if defined(_SIGRTMIN) 01960 # define SIG_SUSPEND _SIGRTMIN + 6 01961 # else 01962 # define SIG_SUSPEND SIGRTMIN + 6 01963 # endif 01964 # endif 01965 # endif /* !SIG_SUSPEND */ 01966 01967 # endif 01968 01969 # endif /* GC_PRIVATE_H */
Generated on Tue Jul 12 2022 19:59:54 by
