Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Dependents: mruby_mbed_web mirb_mbed
gc.c
00001 /* 00002 ** gc.c - garbage collector for mruby 00003 ** 00004 ** See Copyright Notice in mruby.h 00005 */ 00006 00007 #include <string.h> 00008 #include <stdlib.h> 00009 #include "mruby.h" 00010 #include "mruby/array.h" 00011 #include "mruby/class.h" 00012 #include "mruby/data.h" 00013 #include "mruby/hash.h" 00014 #include "mruby/proc.h" 00015 #include "mruby/range.h" 00016 #include "mruby/string.h" 00017 #include "mruby/variable.h" 00018 #include "mruby/gc.h" 00019 #include "mruby/error.h" 00020 00021 /* 00022 = Tri-color Incremental Garbage Collection 00023 00024 mruby's GC is Tri-color Incremental GC with Mark & Sweep. 00025 Algorithm details are omitted. 00026 Instead, the implementation part is described below. 00027 00028 == Object's Color 00029 00030 Each object can be painted in three colors: 00031 00032 * White - Unmarked. 00033 * Gray - Marked, But the child objects are unmarked. 00034 * Black - Marked, the child objects are also marked. 00035 00036 == Two White Types 00037 00038 There're two white color types in a flip-flop fashion: White-A and White-B, 00039 which respectively represent the Current White color (the newly allocated 00040 objects in the current GC cycle) and the Sweep Target White color (the 00041 dead objects to be swept). 00042 00043 A and B will be switched just at the beginning of the next GC cycle. At 00044 that time, all the dead objects have been swept, while the newly created 00045 objects in the current GC cycle which finally remains White are now 00046 regarded as dead objects. Instead of traversing all the White-A objects and 00047 painting them as White-B, just switch the meaning of White-A and White-B as 00048 this will be much cheaper. 00049 00050 As a result, the objects we sweep in the current GC cycle are always 00051 left from the previous GC cycle. This allows us to sweep objects 00052 incrementally, without the disturbance of the newly created objects. 00053 00054 == Execution Timing 00055 00056 GC Execution Time and Each step interval are decided by live objects count. 00057 List of Adjustment API: 00058 00059 * gc_interval_ratio_set 00060 * gc_step_ratio_set 00061 00062 For details, see the comments for each function. 00063 00064 == Write Barrier 00065 00066 mruby implementer and C extension library writer must insert a write 00067 barrier when updating a reference from a field of an object. 00068 When updating a reference from a field of object A to object B, 00069 two different types of write barrier are available: 00070 00071 * mrb_field_write_barrier - target B object for a mark. 00072 * mrb_write_barrier - target A object for a mark. 00073 00074 == Generational Mode 00075 00076 mruby's GC offers an Generational Mode while re-using the tri-color GC 00077 infrastructure. It will treat the Black objects as Old objects after each 00078 sweep phase, instead of painting them White. The key ideas are still the same 00079 as traditional generational GC: 00080 00081 * Minor GC - just traverse the Young objects (Gray objects) in the mark 00082 phase, then only sweep the newly created objects, and leave 00083 the Old objects live. 00084 00085 * Major GC - same as a full regular GC cycle. 00086 00087 The difference from "traditional" generational GC is, that the major GC 00088 in mruby is triggered incrementally in a tri-color manner. 00089 00090 00091 For details, see the comments for each function. 00092 00093 */ 00094 00095 struct free_obj { 00096 MRB_OBJECT_HEADER; 00097 struct RBasic *next; 00098 }; 00099 00100 typedef struct { 00101 union { 00102 struct free_obj free; 00103 struct RBasic basic; 00104 struct RObject object; 00105 struct RClass klass; 00106 struct RString string; 00107 struct RArray array; 00108 struct RHash hash; 00109 struct RRange range; 00110 struct RData data; 00111 struct RProc proc; 00112 struct RException exc; 00113 #ifdef MRB_WORD_BOXING 00114 struct RFloat floatv; 00115 struct RCptr cptr; 00116 #endif 00117 } as; 00118 } RVALUE; 00119 00120 #ifdef GC_PROFILE 00121 #include <stdio.h> 00122 #include <sys/time.h> 00123 00124 static double program_invoke_time = 0; 00125 static double gc_time = 0; 00126 static double gc_total_time = 0; 00127 00128 static double 00129 gettimeofday_time(void) 00130 { 00131 struct timeval tv; 00132 gettimeofday(&tv, NULL); 00133 return tv.tv_sec + tv.tv_usec * 1e-6; 00134 } 00135 00136 #define GC_INVOKE_TIME_REPORT(with) do {\ 00137 fprintf(stderr, "%s\n", with);\ 00138 fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\ 00139 fprintf(stderr, "is_generational: %d\n", is_generational(mrb));\ 00140 fprintf(stderr, "is_major_gc: %d\n", is_major_gc(mrb));\ 00141 } while(0) 00142 00143 #define GC_TIME_START do {\ 00144 gc_time = gettimeofday_time();\ 00145 } while(0) 00146 00147 #define GC_TIME_STOP_AND_REPORT do {\ 00148 gc_time = gettimeofday_time() - gc_time;\ 00149 gc_total_time += gc_time;\ 00150 fprintf(stderr, "gc_state: %d\n", mrb->gc_state);\ 00151 fprintf(stderr, "live: %zu\n", mrb->live);\ 00152 fprintf(stderr, "majorgc_old_threshold: %zu\n", mrb->majorgc_old_threshold);\ 00153 fprintf(stderr, "gc_threshold: %zu\n", mrb->gc_threshold);\ 00154 fprintf(stderr, "gc_time: %30.20f\n", gc_time);\ 00155 fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\ 00156 } while(0) 00157 #else 00158 #define GC_INVOKE_TIME_REPORT(s) 00159 #define GC_TIME_START 00160 #define GC_TIME_STOP_AND_REPORT 00161 #endif 00162 00163 #ifdef GC_DEBUG 00164 #define DEBUG(x) (x) 00165 #else 00166 #define DEBUG(x) 00167 #endif 00168 00169 #define GC_STEP_SIZE 1024 00170 00171 00172 MRB_API void* 00173 mrb_realloc_simple(mrb_state *mrb, void *p, size_t len) 00174 { 00175 void *p2; 00176 00177 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud); 00178 if (!p2 && len > 0 && mrb->heaps) { 00179 mrb_full_gc(mrb); 00180 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud); 00181 } 00182 00183 return p2; 00184 } 00185 00186 00187 MRB_API void* 00188 mrb_realloc(mrb_state *mrb, void *p, size_t len) 00189 { 00190 void *p2; 00191 00192 p2 = mrb_realloc_simple(mrb, p, len); 00193 if (!p2 && len) { 00194 if (mrb->out_of_memory) { 00195 /* mrb_panic(mrb); */ 00196 } 00197 else { 00198 mrb->out_of_memory = TRUE; 00199 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err)); 00200 } 00201 } 00202 else { 00203 mrb->out_of_memory = FALSE; 00204 } 00205 00206 return p2; 00207 } 00208 00209 MRB_API void* 00210 mrb_malloc(mrb_state *mrb, size_t len) 00211 { 00212 return mrb_realloc(mrb, 0, len); 00213 } 00214 00215 MRB_API void* 00216 mrb_malloc_simple(mrb_state *mrb, size_t len) 00217 { 00218 return mrb_realloc_simple(mrb, 0, len); 00219 } 00220 00221 MRB_API void* 00222 mrb_calloc(mrb_state *mrb, size_t nelem, size_t len) 00223 { 00224 void *p; 00225 00226 if (nelem > 0 && len > 0 && 00227 nelem <= SIZE_MAX / len) { 00228 size_t size; 00229 size = nelem * len; 00230 p = mrb_malloc(mrb, size); 00231 00232 memset(p, 0, size); 00233 } 00234 else { 00235 p = NULL; 00236 } 00237 00238 return p; 00239 } 00240 00241 MRB_API void 00242 mrb_free(mrb_state *mrb, void *p) 00243 { 00244 (mrb->allocf)(mrb, p, 0, mrb->allocf_ud); 00245 } 00246 00247 #ifndef MRB_HEAP_PAGE_SIZE 00248 #define MRB_HEAP_PAGE_SIZE 1024 00249 #endif 00250 00251 struct heap_page { 00252 struct RBasic *freelist; 00253 struct heap_page *prev; 00254 struct heap_page *next; 00255 struct heap_page *free_next; 00256 struct heap_page *free_prev; 00257 mrb_bool old:1; 00258 RVALUE objects[MRB_HEAP_PAGE_SIZE]; 00259 }; 00260 00261 static void 00262 link_heap_page(mrb_state *mrb, struct heap_page *page) 00263 { 00264 page->next = mrb->heaps; 00265 if (mrb->heaps) 00266 mrb->heaps->prev = page; 00267 mrb->heaps = page; 00268 } 00269 00270 static void 00271 unlink_heap_page(mrb_state *mrb, struct heap_page *page) 00272 { 00273 if (page->prev) 00274 page->prev->next = page->next; 00275 if (page->next) 00276 page->next->prev = page->prev; 00277 if (mrb->heaps == page) 00278 mrb->heaps = page->next; 00279 page->prev = NULL; 00280 page->next = NULL; 00281 } 00282 00283 static void 00284 link_free_heap_page(mrb_state *mrb, struct heap_page *page) 00285 { 00286 page->free_next = mrb->free_heaps; 00287 if (mrb->free_heaps) { 00288 mrb->free_heaps->free_prev = page; 00289 } 00290 mrb->free_heaps = page; 00291 } 00292 00293 static void 00294 unlink_free_heap_page(mrb_state *mrb, struct heap_page *page) 00295 { 00296 if (page->free_prev) 00297 page->free_prev->free_next = page->free_next; 00298 if (page->free_next) 00299 page->free_next->free_prev = page->free_prev; 00300 if (mrb->free_heaps == page) 00301 mrb->free_heaps = page->free_next; 00302 page->free_prev = NULL; 00303 page->free_next = NULL; 00304 } 00305 00306 static void 00307 add_heap(mrb_state *mrb) 00308 { 00309 struct heap_page *page = (struct heap_page *)mrb_calloc(mrb, 1, sizeof(struct heap_page)); 00310 RVALUE *p, *e; 00311 struct RBasic *prev = NULL; 00312 00313 for (p = page->objects, e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) { 00314 p->as.free.tt = MRB_TT_FREE; 00315 p->as.free.next = prev; 00316 prev = &p->as.basic; 00317 } 00318 page->freelist = prev; 00319 00320 link_heap_page(mrb, page); 00321 link_free_heap_page(mrb, page); 00322 } 00323 00324 #define DEFAULT_GC_INTERVAL_RATIO 200 00325 #define DEFAULT_GC_STEP_RATIO 200 00326 #define DEFAULT_MAJOR_GC_INC_RATIO 200 00327 #define is_generational(mrb) ((mrb)->is_generational_gc_mode) 00328 #define is_major_gc(mrb) (is_generational(mrb) && (mrb)->gc_full) 00329 #define is_minor_gc(mrb) (is_generational(mrb) && !(mrb)->gc_full) 00330 00331 void 00332 mrb_init_heap(mrb_state *mrb) 00333 { 00334 mrb->heaps = NULL; 00335 mrb->free_heaps = NULL; 00336 add_heap(mrb); 00337 mrb->gc_interval_ratio = DEFAULT_GC_INTERVAL_RATIO; 00338 mrb->gc_step_ratio = DEFAULT_GC_STEP_RATIO; 00339 #ifndef MRB_GC_TURN_OFF_GENERATIONAL 00340 mrb->is_generational_gc_mode = TRUE; 00341 mrb->gc_full = TRUE; 00342 #endif 00343 00344 #ifdef GC_PROFILE 00345 program_invoke_time = gettimeofday_time(); 00346 #endif 00347 } 00348 00349 static void obj_free(mrb_state *mrb, struct RBasic *obj); 00350 00351 void 00352 mrb_free_heap(mrb_state *mrb) 00353 { 00354 struct heap_page *page = mrb->heaps; 00355 struct heap_page *tmp; 00356 RVALUE *p, *e; 00357 00358 while (page) { 00359 tmp = page; 00360 page = page->next; 00361 for (p = tmp->objects, e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) { 00362 if (p->as.free.tt != MRB_TT_FREE) 00363 obj_free(mrb, &p->as.basic); 00364 } 00365 mrb_free(mrb, tmp); 00366 } 00367 } 00368 00369 static void 00370 gc_protect(mrb_state *mrb, struct RBasic *p) 00371 { 00372 #ifdef MRB_GC_FIXED_ARENA 00373 if (mrb->arena_idx >= MRB_GC_ARENA_SIZE) { 00374 /* arena overflow error */ 00375 mrb->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */ 00376 mrb_raise(mrb, E_RUNTIME_ERROR, "arena overflow error"); 00377 } 00378 #else 00379 if (mrb->arena_idx >= mrb->arena_capa) { 00380 /* extend arena */ 00381 mrb->arena_capa = (int)(mrb->arena_capa * 1.5); 00382 mrb->arena = (struct RBasic**)mrb_realloc(mrb, mrb->arena, sizeof(struct RBasic*)*mrb->arena_capa); 00383 } 00384 #endif 00385 mrb->arena[mrb->arena_idx++] = p; 00386 } 00387 00388 MRB_API void 00389 mrb_gc_protect(mrb_state *mrb, mrb_value obj) 00390 { 00391 if (mrb_immediate_p(obj)) return; 00392 gc_protect(mrb, mrb_basic_ptr(obj)); 00393 } 00394 00395 MRB_API struct RBasic* 00396 mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls) 00397 { 00398 struct RBasic *p; 00399 static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } }; 00400 00401 #ifdef MRB_GC_STRESS 00402 mrb_full_gc(mrb); 00403 #endif 00404 if (mrb->gc_threshold < mrb->live) { 00405 mrb_incremental_gc(mrb); 00406 } 00407 if (mrb->free_heaps == NULL) { 00408 add_heap(mrb); 00409 } 00410 00411 p = mrb->free_heaps->freelist; 00412 mrb->free_heaps->freelist = ((struct free_obj*)p)->next; 00413 if (mrb->free_heaps->freelist == NULL) { 00414 unlink_free_heap_page(mrb, mrb->free_heaps); 00415 } 00416 00417 mrb->live++; 00418 gc_protect(mrb, p); 00419 *(RVALUE *)p = RVALUE_zero; 00420 p->tt = ttype; 00421 p->c = cls; 00422 paint_partial_white(mrb, p); 00423 return p; 00424 } 00425 00426 static inline void 00427 add_gray_list(mrb_state *mrb, struct RBasic *obj) 00428 { 00429 #ifdef MRB_GC_STRESS 00430 if (obj->tt > MRB_TT_MAXDEFINE) { 00431 abort(); 00432 } 00433 #endif 00434 paint_gray(obj); 00435 obj->gcnext = mrb->gray_list; 00436 mrb->gray_list = obj; 00437 } 00438 00439 static void 00440 mark_context_stack(mrb_state *mrb, struct mrb_context *c) 00441 { 00442 size_t i; 00443 size_t e; 00444 00445 e = c->stack - c->stbase; 00446 if (c->ci) e += c->ci->nregs; 00447 if (c->stbase + e > c->stend) e = c->stend - c->stbase; 00448 for (i=0; i<e; i++) { 00449 mrb_value v = c->stbase[i]; 00450 00451 if (!mrb_immediate_p(v)) { 00452 if (mrb_basic_ptr(v)->tt == MRB_TT_FREE) { 00453 c->stbase[i] = mrb_nil_value(); 00454 } 00455 else { 00456 mrb_gc_mark(mrb, mrb_basic_ptr(v)); 00457 } 00458 } 00459 } 00460 } 00461 00462 static void 00463 mark_context(mrb_state *mrb, struct mrb_context *c) 00464 { 00465 int i, e = 0; 00466 mrb_callinfo *ci; 00467 00468 /* mark stack */ 00469 mark_context_stack(mrb, c); 00470 00471 /* mark VM stack */ 00472 if (c->cibase) { 00473 for (ci = c->cibase; ci <= c->ci; ci++) { 00474 if (ci->eidx > e) { 00475 e = ci->eidx; 00476 } 00477 mrb_gc_mark(mrb, (struct RBasic*)ci->env); 00478 mrb_gc_mark(mrb, (struct RBasic*)ci->proc); 00479 mrb_gc_mark(mrb, (struct RBasic*)ci->target_class); 00480 } 00481 } 00482 /* mark ensure stack */ 00483 for (i=0; i<e; i++) { 00484 mrb_gc_mark(mrb, (struct RBasic*)c->ensure[i]); 00485 } 00486 /* mark fibers */ 00487 if (c->prev && c->prev->fib) { 00488 mrb_gc_mark(mrb, (struct RBasic*)c->prev->fib); 00489 } 00490 } 00491 00492 static void 00493 gc_mark_children(mrb_state *mrb, struct RBasic *obj) 00494 { 00495 mrb_assert(is_gray(obj)); 00496 paint_black(obj); 00497 mrb->gray_list = obj->gcnext; 00498 mrb_gc_mark(mrb, (struct RBasic*)obj->c); 00499 switch (obj->tt) { 00500 case MRB_TT_ICLASS: 00501 mrb_gc_mark(mrb, (struct RBasic*)((struct RClass*)obj)->super); 00502 break; 00503 00504 case MRB_TT_CLASS: 00505 case MRB_TT_MODULE: 00506 case MRB_TT_SCLASS: 00507 { 00508 struct RClass *c = (struct RClass*)obj; 00509 00510 mrb_gc_mark_mt(mrb, c); 00511 mrb_gc_mark(mrb, (struct RBasic*)c->super); 00512 } 00513 /* fall through */ 00514 00515 case MRB_TT_OBJECT: 00516 case MRB_TT_DATA: 00517 case MRB_TT_EXCEPTION: 00518 mrb_gc_mark_iv(mrb, (struct RObject*)obj); 00519 break; 00520 00521 case MRB_TT_PROC: 00522 { 00523 struct RProc *p = (struct RProc*)obj; 00524 00525 mrb_gc_mark(mrb, (struct RBasic*)p->env); 00526 mrb_gc_mark(mrb, (struct RBasic*)p->target_class); 00527 } 00528 break; 00529 00530 case MRB_TT_ENV: 00531 { 00532 struct REnv *e = (struct REnv*)obj; 00533 00534 if (!MRB_ENV_STACK_SHARED_P(e)) { 00535 mrb_int i, len; 00536 00537 len = MRB_ENV_STACK_LEN(e); 00538 for (i=0; i<len; i++) { 00539 mrb_gc_mark_value(mrb, e->stack[i]); 00540 } 00541 } 00542 } 00543 break; 00544 00545 case MRB_TT_FIBER: 00546 { 00547 struct mrb_context *c = ((struct RFiber*)obj)->cxt; 00548 00549 if (c) mark_context(mrb, c); 00550 } 00551 break; 00552 00553 case MRB_TT_ARRAY: 00554 { 00555 struct RArray *a = (struct RArray*)obj; 00556 size_t i, e; 00557 00558 for (i=0,e=a->len; i<e; i++) { 00559 mrb_gc_mark_value(mrb, a->ptr[i]); 00560 } 00561 } 00562 break; 00563 00564 case MRB_TT_HASH: 00565 mrb_gc_mark_iv(mrb, (struct RObject*)obj); 00566 mrb_gc_mark_hash(mrb, (struct RHash*)obj); 00567 break; 00568 00569 case MRB_TT_STRING: 00570 break; 00571 00572 case MRB_TT_RANGE: 00573 { 00574 struct RRange *r = (struct RRange*)obj; 00575 00576 if (r->edges) { 00577 mrb_gc_mark_value(mrb, r->edges->beg); 00578 mrb_gc_mark_value(mrb, r->edges->end); 00579 } 00580 } 00581 break; 00582 00583 default: 00584 break; 00585 } 00586 } 00587 00588 MRB_API void 00589 mrb_gc_mark(mrb_state *mrb, struct RBasic *obj) 00590 { 00591 if (obj == 0) return; 00592 if (!is_white(obj)) return; 00593 mrb_assert((obj)->tt != MRB_TT_FREE); 00594 add_gray_list(mrb, obj); 00595 } 00596 00597 static void 00598 obj_free(mrb_state *mrb, struct RBasic *obj) 00599 { 00600 DEBUG(printf("obj_free(%p,tt=%d)\n",obj,obj->tt)); 00601 switch (obj->tt) { 00602 /* immediate - no mark */ 00603 case MRB_TT_TRUE: 00604 case MRB_TT_FIXNUM: 00605 case MRB_TT_SYMBOL: 00606 /* cannot happen */ 00607 return; 00608 00609 case MRB_TT_FLOAT: 00610 #ifdef MRB_WORD_BOXING 00611 break; 00612 #else 00613 return; 00614 #endif 00615 00616 case MRB_TT_OBJECT: 00617 case MRB_TT_EXCEPTION: 00618 mrb_gc_free_iv(mrb, (struct RObject*)obj); 00619 break; 00620 00621 case MRB_TT_CLASS: 00622 case MRB_TT_MODULE: 00623 case MRB_TT_SCLASS: 00624 mrb_gc_free_mt(mrb, (struct RClass*)obj); 00625 mrb_gc_free_iv(mrb, (struct RObject*)obj); 00626 break; 00627 00628 case MRB_TT_ENV: 00629 { 00630 struct REnv *e = (struct REnv*)obj; 00631 00632 if (!MRB_ENV_STACK_SHARED_P(e)) { 00633 mrb_free(mrb, e->stack); 00634 e->stack = NULL; 00635 } 00636 } 00637 break; 00638 00639 case MRB_TT_FIBER: 00640 { 00641 struct mrb_context *c = ((struct RFiber*)obj)->cxt; 00642 00643 if (c != mrb->root_c) 00644 mrb_free_context(mrb, c); 00645 } 00646 break; 00647 00648 case MRB_TT_ARRAY: 00649 if (ARY_SHARED_P(obj)) 00650 mrb_ary_decref(mrb, ((struct RArray*)obj)->aux.shared); 00651 else 00652 mrb_free(mrb, ((struct RArray*)obj)->ptr); 00653 break; 00654 00655 case MRB_TT_HASH: 00656 mrb_gc_free_iv(mrb, (struct RObject*)obj); 00657 mrb_gc_free_hash(mrb, (struct RHash*)obj); 00658 break; 00659 00660 case MRB_TT_STRING: 00661 mrb_gc_free_str(mrb, (struct RString*)obj); 00662 break; 00663 00664 case MRB_TT_PROC: 00665 { 00666 struct RProc *p = (struct RProc*)obj; 00667 00668 if (!MRB_PROC_CFUNC_P(p) && p->body.irep) { 00669 mrb_irep_decref(mrb, p->body.irep); 00670 } 00671 } 00672 break; 00673 00674 case MRB_TT_RANGE: 00675 mrb_free(mrb, ((struct RRange*)obj)->edges); 00676 break; 00677 00678 case MRB_TT_DATA: 00679 { 00680 struct RData *d = (struct RData*)obj; 00681 if (d->type && d->type->dfree) { 00682 d->type->dfree(mrb, d->data); 00683 } 00684 mrb_gc_free_iv(mrb, (struct RObject*)obj); 00685 } 00686 break; 00687 00688 default: 00689 break; 00690 } 00691 obj->tt = MRB_TT_FREE; 00692 } 00693 00694 static void 00695 root_scan_phase(mrb_state *mrb) 00696 { 00697 size_t i, e; 00698 00699 if (!is_minor_gc(mrb)) { 00700 mrb->gray_list = NULL; 00701 mrb->atomic_gray_list = NULL; 00702 } 00703 00704 mrb_gc_mark_gv(mrb); 00705 /* mark arena */ 00706 for (i=0,e=mrb->arena_idx; i<e; i++) { 00707 mrb_gc_mark(mrb, mrb->arena[i]); 00708 } 00709 /* mark class hierarchy */ 00710 mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class); 00711 /* mark top_self */ 00712 mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self); 00713 /* mark exception */ 00714 mrb_gc_mark(mrb, (struct RBasic*)mrb->exc); 00715 /* mark pre-allocated exception */ 00716 mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err); 00717 00718 mark_context(mrb, mrb->root_c); 00719 if (mrb->root_c->fib) { 00720 mrb_gc_mark(mrb, (struct RBasic*)mrb->root_c->fib); 00721 } 00722 if (mrb->root_c != mrb->c) { 00723 mark_context(mrb, mrb->c); 00724 } 00725 } 00726 00727 static size_t 00728 gc_gray_mark(mrb_state *mrb, struct RBasic *obj) 00729 { 00730 size_t children = 0; 00731 00732 gc_mark_children(mrb, obj); 00733 00734 switch (obj->tt) { 00735 case MRB_TT_ICLASS: 00736 children++; 00737 break; 00738 00739 case MRB_TT_CLASS: 00740 case MRB_TT_SCLASS: 00741 case MRB_TT_MODULE: 00742 { 00743 struct RClass *c = (struct RClass*)obj; 00744 00745 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj); 00746 children += mrb_gc_mark_mt_size(mrb, c); 00747 children++; 00748 } 00749 break; 00750 00751 case MRB_TT_OBJECT: 00752 case MRB_TT_DATA: 00753 case MRB_TT_EXCEPTION: 00754 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj); 00755 break; 00756 00757 case MRB_TT_ENV: 00758 children += (int)obj->flags; 00759 break; 00760 00761 case MRB_TT_FIBER: 00762 { 00763 struct mrb_context *c = ((struct RFiber*)obj)->cxt; 00764 size_t i; 00765 mrb_callinfo *ci; 00766 00767 if (!c) break; 00768 /* mark stack */ 00769 i = c->stack - c->stbase; 00770 if (c->ci) i += c->ci->nregs; 00771 if (c->stbase + i > c->stend) i = c->stend - c->stbase; 00772 children += i; 00773 00774 /* mark ensure stack */ 00775 children += (c->ci) ? c->ci->eidx : 0; 00776 00777 /* mark closure */ 00778 if (c->cibase) { 00779 for (i=0, ci = c->cibase; ci <= c->ci; i++, ci++) 00780 ; 00781 } 00782 children += i; 00783 } 00784 break; 00785 00786 case MRB_TT_ARRAY: 00787 { 00788 struct RArray *a = (struct RArray*)obj; 00789 children += a->len; 00790 } 00791 break; 00792 00793 case MRB_TT_HASH: 00794 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj); 00795 children += mrb_gc_mark_hash_size(mrb, (struct RHash*)obj); 00796 break; 00797 00798 case MRB_TT_PROC: 00799 case MRB_TT_RANGE: 00800 children+=2; 00801 break; 00802 00803 default: 00804 break; 00805 } 00806 return children; 00807 } 00808 00809 00810 static void 00811 gc_mark_gray_list(mrb_state *mrb) { 00812 while (mrb->gray_list) { 00813 if (is_gray(mrb->gray_list)) 00814 gc_mark_children(mrb, mrb->gray_list); 00815 else 00816 mrb->gray_list = mrb->gray_list->gcnext; 00817 } 00818 } 00819 00820 00821 static size_t 00822 incremental_marking_phase(mrb_state *mrb, size_t limit) 00823 { 00824 size_t tried_marks = 0; 00825 00826 while (mrb->gray_list && tried_marks < limit) { 00827 tried_marks += gc_gray_mark(mrb, mrb->gray_list); 00828 } 00829 00830 return tried_marks; 00831 } 00832 00833 static void 00834 final_marking_phase(mrb_state *mrb) 00835 { 00836 mark_context_stack(mrb, mrb->root_c); 00837 gc_mark_gray_list(mrb); 00838 mrb_assert(mrb->gray_list == NULL); 00839 mrb->gray_list = mrb->atomic_gray_list; 00840 mrb->atomic_gray_list = NULL; 00841 gc_mark_gray_list(mrb); 00842 mrb_assert(mrb->gray_list == NULL); 00843 } 00844 00845 static void 00846 prepare_incremental_sweep(mrb_state *mrb) 00847 { 00848 mrb->gc_state = GC_STATE_SWEEP; 00849 mrb->sweeps = mrb->heaps; 00850 mrb->gc_live_after_mark = mrb->live; 00851 } 00852 00853 static size_t 00854 incremental_sweep_phase(mrb_state *mrb, size_t limit) 00855 { 00856 struct heap_page *page = mrb->sweeps; 00857 size_t tried_sweep = 0; 00858 00859 while (page && (tried_sweep < limit)) { 00860 RVALUE *p = page->objects; 00861 RVALUE *e = p + MRB_HEAP_PAGE_SIZE; 00862 size_t freed = 0; 00863 mrb_bool dead_slot = TRUE; 00864 mrb_bool full = (page->freelist == NULL); 00865 00866 if (is_minor_gc(mrb) && page->old) { 00867 /* skip a slot which doesn't contain any young object */ 00868 p = e; 00869 dead_slot = FALSE; 00870 } 00871 while (p<e) { 00872 if (is_dead(mrb, &p->as.basic)) { 00873 if (p->as.basic.tt != MRB_TT_FREE) { 00874 obj_free(mrb, &p->as.basic); 00875 p->as.free.next = page->freelist; 00876 page->freelist = (struct RBasic*)p; 00877 freed++; 00878 } 00879 } 00880 else { 00881 if (!is_generational(mrb)) 00882 paint_partial_white(mrb, &p->as.basic); /* next gc target */ 00883 dead_slot = 0; 00884 } 00885 p++; 00886 } 00887 00888 /* free dead slot */ 00889 if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) { 00890 struct heap_page *next = page->next; 00891 00892 unlink_heap_page(mrb, page); 00893 unlink_free_heap_page(mrb, page); 00894 mrb_free(mrb, page); 00895 page = next; 00896 } 00897 else { 00898 if (full && freed > 0) { 00899 link_free_heap_page(mrb, page); 00900 } 00901 if (page->freelist == NULL && is_minor_gc(mrb)) 00902 page->old = TRUE; 00903 else 00904 page->old = FALSE; 00905 page = page->next; 00906 } 00907 tried_sweep += MRB_HEAP_PAGE_SIZE; 00908 mrb->live -= freed; 00909 mrb->gc_live_after_mark -= freed; 00910 } 00911 mrb->sweeps = page; 00912 return tried_sweep; 00913 } 00914 00915 static size_t 00916 incremental_gc(mrb_state *mrb, size_t limit) 00917 { 00918 switch (mrb->gc_state) { 00919 case GC_STATE_ROOT: 00920 root_scan_phase(mrb); 00921 mrb->gc_state = GC_STATE_MARK; 00922 flip_white_part(mrb); 00923 return 0; 00924 case GC_STATE_MARK: 00925 if (mrb->gray_list) { 00926 return incremental_marking_phase(mrb, limit); 00927 } 00928 else { 00929 final_marking_phase(mrb); 00930 prepare_incremental_sweep(mrb); 00931 return 0; 00932 } 00933 case GC_STATE_SWEEP: { 00934 size_t tried_sweep = 0; 00935 tried_sweep = incremental_sweep_phase(mrb, limit); 00936 if (tried_sweep == 0) 00937 mrb->gc_state = GC_STATE_ROOT; 00938 return tried_sweep; 00939 } 00940 default: 00941 /* unknown state */ 00942 mrb_assert(0); 00943 return 0; 00944 } 00945 } 00946 00947 static void 00948 incremental_gc_until(mrb_state *mrb, enum gc_state to_state) 00949 { 00950 do { 00951 incremental_gc(mrb, SIZE_MAX); 00952 } while (mrb->gc_state != to_state); 00953 } 00954 00955 static void 00956 incremental_gc_step(mrb_state *mrb) 00957 { 00958 size_t limit = 0, result = 0; 00959 limit = (GC_STEP_SIZE/100) * mrb->gc_step_ratio; 00960 while (result < limit) { 00961 result += incremental_gc(mrb, limit); 00962 if (mrb->gc_state == GC_STATE_ROOT) 00963 break; 00964 } 00965 00966 mrb->gc_threshold = mrb->live + GC_STEP_SIZE; 00967 } 00968 00969 static void 00970 clear_all_old(mrb_state *mrb) 00971 { 00972 mrb_bool origin_mode = mrb->is_generational_gc_mode; 00973 00974 mrb_assert(is_generational(mrb)); 00975 if (is_major_gc(mrb)) { 00976 /* finish the half baked GC */ 00977 incremental_gc_until(mrb, GC_STATE_ROOT); 00978 } 00979 00980 /* Sweep the dead objects, then reset all the live objects 00981 * (including all the old objects, of course) to white. */ 00982 mrb->is_generational_gc_mode = FALSE; 00983 prepare_incremental_sweep(mrb); 00984 incremental_gc_until(mrb, GC_STATE_ROOT); 00985 mrb->is_generational_gc_mode = origin_mode; 00986 00987 /* The gray objects have already been painted as white */ 00988 mrb->atomic_gray_list = mrb->gray_list = NULL; 00989 } 00990 00991 MRB_API void 00992 mrb_incremental_gc(mrb_state *mrb) 00993 { 00994 if (mrb->gc_disabled) return; 00995 00996 GC_INVOKE_TIME_REPORT("mrb_incremental_gc()"); 00997 GC_TIME_START; 00998 00999 if (is_minor_gc(mrb)) { 01000 incremental_gc_until(mrb, GC_STATE_ROOT); 01001 } 01002 else { 01003 incremental_gc_step(mrb); 01004 } 01005 01006 if (mrb->gc_state == GC_STATE_ROOT) { 01007 mrb_assert(mrb->live >= mrb->gc_live_after_mark); 01008 mrb->gc_threshold = (mrb->gc_live_after_mark/100) * mrb->gc_interval_ratio; 01009 if (mrb->gc_threshold < GC_STEP_SIZE) { 01010 mrb->gc_threshold = GC_STEP_SIZE; 01011 } 01012 01013 if (is_major_gc(mrb)) { 01014 mrb->majorgc_old_threshold = mrb->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; 01015 mrb->gc_full = FALSE; 01016 } 01017 else if (is_minor_gc(mrb)) { 01018 if (mrb->live > mrb->majorgc_old_threshold) { 01019 clear_all_old(mrb); 01020 mrb->gc_full = TRUE; 01021 } 01022 } 01023 } 01024 01025 GC_TIME_STOP_AND_REPORT; 01026 } 01027 01028 /* Perform a full gc cycle */ 01029 MRB_API void 01030 mrb_full_gc(mrb_state *mrb) 01031 { 01032 if (mrb->gc_disabled) return; 01033 GC_INVOKE_TIME_REPORT("mrb_full_gc()"); 01034 GC_TIME_START; 01035 01036 if (is_generational(mrb)) { 01037 /* clear all the old objects back to young */ 01038 clear_all_old(mrb); 01039 mrb->gc_full = TRUE; 01040 } 01041 else if (mrb->gc_state != GC_STATE_ROOT) { 01042 /* finish half baked GC cycle */ 01043 incremental_gc_until(mrb, GC_STATE_ROOT); 01044 } 01045 01046 incremental_gc_until(mrb, GC_STATE_ROOT); 01047 mrb->gc_threshold = (mrb->gc_live_after_mark/100) * mrb->gc_interval_ratio; 01048 01049 if (is_generational(mrb)) { 01050 mrb->majorgc_old_threshold = mrb->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; 01051 mrb->gc_full = FALSE; 01052 } 01053 01054 GC_TIME_STOP_AND_REPORT; 01055 } 01056 01057 MRB_API void 01058 mrb_garbage_collect(mrb_state *mrb) 01059 { 01060 mrb_full_gc(mrb); 01061 } 01062 01063 MRB_API int 01064 mrb_gc_arena_save(mrb_state *mrb) 01065 { 01066 return mrb->arena_idx; 01067 } 01068 01069 MRB_API void 01070 mrb_gc_arena_restore(mrb_state *mrb, int idx) 01071 { 01072 #ifndef MRB_GC_FIXED_ARENA 01073 int capa = mrb->arena_capa; 01074 01075 if (idx < capa / 2) { 01076 capa = (int)(capa * 0.66); 01077 if (capa < MRB_GC_ARENA_SIZE) { 01078 capa = MRB_GC_ARENA_SIZE; 01079 } 01080 if (capa != mrb->arena_capa) { 01081 mrb->arena = (struct RBasic**)mrb_realloc(mrb, mrb->arena, sizeof(struct RBasic*)*capa); 01082 mrb->arena_capa = capa; 01083 } 01084 } 01085 #endif 01086 mrb->arena_idx = idx; 01087 } 01088 01089 /* 01090 * Field write barrier 01091 * Paint obj(Black) -> value(White) to obj(Black) -> value(Gray). 01092 */ 01093 01094 MRB_API void 01095 mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value) 01096 { 01097 if (!is_black(obj)) return; 01098 if (!is_white(value)) return; 01099 01100 mrb_assert(!is_dead(mrb, value) && !is_dead(mrb, obj)); 01101 mrb_assert(is_generational(mrb) || mrb->gc_state != GC_STATE_ROOT); 01102 01103 if (is_generational(mrb) || mrb->gc_state == GC_STATE_MARK) { 01104 add_gray_list(mrb, value); 01105 } 01106 else { 01107 mrb_assert(mrb->gc_state == GC_STATE_SWEEP); 01108 paint_partial_white(mrb, obj); /* for never write barriers */ 01109 } 01110 } 01111 01112 /* 01113 * Write barrier 01114 * Paint obj(Black) to obj(Gray). 01115 * 01116 * The object that is painted gray will be traversed atomically in final 01117 * mark phase. So you use this write barrier if it's frequency written spot. 01118 * e.g. Set element on Array. 01119 */ 01120 01121 MRB_API void 01122 mrb_write_barrier(mrb_state *mrb, struct RBasic *obj) 01123 { 01124 if (!is_black(obj)) return; 01125 01126 mrb_assert(!is_dead(mrb, obj)); 01127 mrb_assert(is_generational(mrb) || mrb->gc_state != GC_STATE_ROOT); 01128 paint_gray(obj); 01129 obj->gcnext = mrb->atomic_gray_list; 01130 mrb->atomic_gray_list = obj; 01131 } 01132 01133 /* 01134 * call-seq: 01135 * GC.start -> nil 01136 * 01137 * Initiates full garbage collection. 01138 * 01139 */ 01140 01141 static mrb_value 01142 gc_start(mrb_state *mrb, mrb_value obj) 01143 { 01144 mrb_full_gc(mrb); 01145 return mrb_nil_value(); 01146 } 01147 01148 /* 01149 * call-seq: 01150 * GC.enable -> true or false 01151 * 01152 * Enables garbage collection, returning <code>true</code> if garbage 01153 * collection was previously disabled. 01154 * 01155 * GC.disable #=> false 01156 * GC.enable #=> true 01157 * GC.enable #=> false 01158 * 01159 */ 01160 01161 static mrb_value 01162 gc_enable(mrb_state *mrb, mrb_value obj) 01163 { 01164 mrb_bool old = mrb->gc_disabled; 01165 01166 mrb->gc_disabled = FALSE; 01167 01168 return mrb_bool_value(old); 01169 } 01170 01171 /* 01172 * call-seq: 01173 * GC.disable -> true or false 01174 * 01175 * Disables garbage collection, returning <code>true</code> if garbage 01176 * collection was already disabled. 01177 * 01178 * GC.disable #=> false 01179 * GC.disable #=> true 01180 * 01181 */ 01182 01183 static mrb_value 01184 gc_disable(mrb_state *mrb, mrb_value obj) 01185 { 01186 mrb_bool old = mrb->gc_disabled; 01187 01188 mrb->gc_disabled = TRUE; 01189 01190 return mrb_bool_value(old); 01191 } 01192 01193 /* 01194 * call-seq: 01195 * GC.interval_ratio -> fixnum 01196 * 01197 * Returns ratio of GC interval. Default value is 200(%). 01198 * 01199 */ 01200 01201 static mrb_value 01202 gc_interval_ratio_get(mrb_state *mrb, mrb_value obj) 01203 { 01204 return mrb_fixnum_value(mrb->gc_interval_ratio); 01205 } 01206 01207 /* 01208 * call-seq: 01209 * GC.interval_ratio = fixnum -> nil 01210 * 01211 * Updates ratio of GC interval. Default value is 200(%). 01212 * GC start as soon as after end all step of GC if you set 100(%). 01213 * 01214 */ 01215 01216 static mrb_value 01217 gc_interval_ratio_set(mrb_state *mrb, mrb_value obj) 01218 { 01219 mrb_int ratio; 01220 01221 mrb_get_args(mrb, "i", &ratio); 01222 mrb->gc_interval_ratio = ratio; 01223 return mrb_nil_value(); 01224 } 01225 01226 /* 01227 * call-seq: 01228 * GC.step_ratio -> fixnum 01229 * 01230 * Returns step span ratio of Incremental GC. Default value is 200(%). 01231 * 01232 */ 01233 01234 static mrb_value 01235 gc_step_ratio_get(mrb_state *mrb, mrb_value obj) 01236 { 01237 return mrb_fixnum_value(mrb->gc_step_ratio); 01238 } 01239 01240 /* 01241 * call-seq: 01242 * GC.step_ratio = fixnum -> nil 01243 * 01244 * Updates step span ratio of Incremental GC. Default value is 200(%). 01245 * 1 step of incrementalGC becomes long if a rate is big. 01246 * 01247 */ 01248 01249 static mrb_value 01250 gc_step_ratio_set(mrb_state *mrb, mrb_value obj) 01251 { 01252 mrb_int ratio; 01253 01254 mrb_get_args(mrb, "i", &ratio); 01255 mrb->gc_step_ratio = ratio; 01256 return mrb_nil_value(); 01257 } 01258 01259 static void 01260 change_gen_gc_mode(mrb_state *mrb, mrb_bool enable) 01261 { 01262 if (is_generational(mrb) && !enable) { 01263 clear_all_old(mrb); 01264 mrb_assert(mrb->gc_state == GC_STATE_ROOT); 01265 mrb->gc_full = FALSE; 01266 } 01267 else if (!is_generational(mrb) && enable) { 01268 incremental_gc_until(mrb, GC_STATE_ROOT); 01269 mrb->majorgc_old_threshold = mrb->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; 01270 mrb->gc_full = FALSE; 01271 } 01272 mrb->is_generational_gc_mode = enable; 01273 } 01274 01275 /* 01276 * call-seq: 01277 * GC.generational_mode -> true or false 01278 * 01279 * Returns generational or normal gc mode. 01280 * 01281 */ 01282 01283 static mrb_value 01284 gc_generational_mode_get(mrb_state *mrb, mrb_value self) 01285 { 01286 return mrb_bool_value(mrb->is_generational_gc_mode); 01287 } 01288 01289 /* 01290 * call-seq: 01291 * GC.generational_mode = true or false -> true or false 01292 * 01293 * Changes to generational or normal gc mode. 01294 * 01295 */ 01296 01297 static mrb_value 01298 gc_generational_mode_set(mrb_state *mrb, mrb_value self) 01299 { 01300 mrb_bool enable; 01301 01302 mrb_get_args(mrb, "b", &enable); 01303 if (mrb->is_generational_gc_mode != enable) 01304 change_gen_gc_mode(mrb, enable); 01305 01306 return mrb_bool_value(enable); 01307 } 01308 01309 void 01310 mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data) 01311 { 01312 struct heap_page* page = mrb->heaps; 01313 01314 while (page != NULL) { 01315 RVALUE *p, *pend; 01316 01317 p = page->objects; 01318 pend = p + MRB_HEAP_PAGE_SIZE; 01319 for (;p < pend; p++) { 01320 (*callback)(mrb, &p->as.basic, data); 01321 } 01322 01323 page = page->next; 01324 } 01325 } 01326 01327 #ifdef GC_TEST 01328 #ifdef GC_DEBUG 01329 static mrb_value gc_test(mrb_state *, mrb_value); 01330 #endif 01331 #endif 01332 01333 void 01334 mrb_init_gc(mrb_state *mrb) 01335 { 01336 struct RClass *gc; 01337 01338 gc = mrb_define_module(mrb, "GC"); 01339 01340 mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE()); 01341 mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE()); 01342 mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE()); 01343 mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE()); 01344 mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1)); 01345 mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE()); 01346 mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1)); 01347 mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1)); 01348 mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE()); 01349 #ifdef GC_TEST 01350 #ifdef GC_DEBUG 01351 mrb_define_class_method(mrb, gc, "test", gc_test, MRB_ARGS_NONE()); 01352 #endif 01353 #endif 01354 } 01355 01356 #ifdef GC_TEST 01357 #ifdef GC_DEBUG 01358 void 01359 test_mrb_field_write_barrier(void) 01360 { 01361 mrb_state *mrb = mrb_open(); 01362 struct RBasic *obj, *value; 01363 01364 puts("test_mrb_field_write_barrier"); 01365 mrb->is_generational_gc_mode = FALSE; 01366 obj = mrb_basic_ptr(mrb_ary_new(mrb)); 01367 value = mrb_basic_ptr(mrb_str_new_lit(mrb, "value")); 01368 paint_black(obj); 01369 paint_partial_white(mrb, value); 01370 01371 01372 puts(" in GC_STATE_MARK"); 01373 mrb->gc_state = GC_STATE_MARK; 01374 mrb_field_write_barrier(mrb, obj, value); 01375 01376 mrb_assert(is_gray(value)); 01377 01378 01379 puts(" in GC_STATE_SWEEP"); 01380 paint_partial_white(mrb, value); 01381 mrb->gc_state = GC_STATE_SWEEP; 01382 mrb_field_write_barrier(mrb, obj, value); 01383 01384 mrb_assert(obj->color & mrb->current_white_part); 01385 mrb_assert(value->color & mrb->current_white_part); 01386 01387 01388 puts(" fail with black"); 01389 mrb->gc_state = GC_STATE_MARK; 01390 paint_white(obj); 01391 paint_partial_white(mrb, value); 01392 mrb_field_write_barrier(mrb, obj, value); 01393 01394 mrb_assert(obj->color & mrb->current_white_part); 01395 01396 01397 puts(" fail with gray"); 01398 mrb->gc_state = GC_STATE_MARK; 01399 paint_black(obj); 01400 paint_gray(value); 01401 mrb_field_write_barrier(mrb, obj, value); 01402 01403 mrb_assert(is_gray(value)); 01404 01405 01406 { 01407 puts("test_mrb_field_write_barrier_value"); 01408 obj = mrb_basic_ptr(mrb_ary_new(mrb)); 01409 mrb_value value = mrb_str_new_lit(mrb, "value"); 01410 paint_black(obj); 01411 paint_partial_white(mrb, mrb_basic_ptr(value)); 01412 01413 mrb->gc_state = GC_STATE_MARK; 01414 mrb_field_write_barrier_value(mrb, obj, value); 01415 01416 mrb_assert(is_gray(mrb_basic_ptr(value))); 01417 } 01418 01419 mrb_close(mrb); 01420 } 01421 01422 void 01423 test_mrb_write_barrier(void) 01424 { 01425 mrb_state *mrb = mrb_open(); 01426 struct RBasic *obj; 01427 01428 puts("test_mrb_write_barrier"); 01429 obj = mrb_basic_ptr(mrb_ary_new(mrb)); 01430 paint_black(obj); 01431 01432 puts(" in GC_STATE_MARK"); 01433 mrb->gc_state = GC_STATE_MARK; 01434 mrb_write_barrier(mrb, obj); 01435 01436 mrb_assert(is_gray(obj)); 01437 mrb_assert(mrb->atomic_gray_list == obj); 01438 01439 01440 puts(" fail with gray"); 01441 paint_gray(obj); 01442 mrb_write_barrier(mrb, obj); 01443 01444 mrb_assert(is_gray(obj)); 01445 01446 mrb_close(mrb); 01447 } 01448 01449 void 01450 test_add_gray_list(void) 01451 { 01452 mrb_state *mrb = mrb_open(); 01453 struct RBasic *obj1, *obj2; 01454 01455 puts("test_add_gray_list"); 01456 change_gen_gc_mode(mrb, FALSE); 01457 mrb_assert(mrb->gray_list == NULL); 01458 obj1 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test")); 01459 add_gray_list(mrb, obj1); 01460 mrb_assert(mrb->gray_list == obj1); 01461 mrb_assert(is_gray(obj1)); 01462 01463 obj2 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test")); 01464 add_gray_list(mrb, obj2); 01465 mrb_assert(mrb->gray_list == obj2); 01466 mrb_assert(mrb->gray_list->gcnext == obj1); 01467 mrb_assert(is_gray(obj2)); 01468 01469 mrb_close(mrb); 01470 } 01471 01472 void 01473 test_gc_gray_mark(void) 01474 { 01475 mrb_state *mrb = mrb_open(); 01476 mrb_value obj_v, value_v; 01477 struct RBasic *obj; 01478 size_t gray_num = 0; 01479 01480 puts("test_gc_gray_mark"); 01481 01482 puts(" in MRB_TT_CLASS"); 01483 obj = (struct RBasic*)mrb->object_class; 01484 paint_gray(obj); 01485 gray_num = gc_gray_mark(mrb, obj); 01486 mrb_assert(is_black(obj)); 01487 mrb_assert(gray_num > 1); 01488 01489 puts(" in MRB_TT_ARRAY"); 01490 obj_v = mrb_ary_new(mrb); 01491 value_v = mrb_str_new_lit(mrb, "test"); 01492 paint_gray(mrb_basic_ptr(obj_v)); 01493 paint_partial_white(mrb, mrb_basic_ptr(value_v)); 01494 mrb_ary_push(mrb, obj_v, value_v); 01495 gray_num = gc_gray_mark(mrb, mrb_basic_ptr(obj_v)); 01496 mrb_assert(is_black(mrb_basic_ptr(obj_v))); 01497 mrb_assert(is_gray(mrb_basic_ptr(value_v))); 01498 mrb_assert(gray_num == 1); 01499 01500 mrb_close(mrb); 01501 } 01502 01503 void 01504 test_incremental_gc(void) 01505 { 01506 mrb_state *mrb = mrb_open(); 01507 size_t max = ~0, live = 0, total = 0, freed = 0; 01508 RVALUE *free; 01509 struct heap_page *page; 01510 01511 puts("test_incremental_gc"); 01512 change_gen_gc_mode(mrb, FALSE); 01513 01514 puts(" in mrb_full_gc"); 01515 mrb_full_gc(mrb); 01516 01517 mrb_assert(mrb->gc_state == GC_STATE_ROOT); 01518 puts(" in GC_STATE_ROOT"); 01519 incremental_gc(mrb, max); 01520 mrb_assert(mrb->gc_state == GC_STATE_MARK); 01521 puts(" in GC_STATE_MARK"); 01522 incremental_gc_until(mrb, GC_STATE_SWEEP); 01523 mrb_assert(mrb->gc_state == GC_STATE_SWEEP); 01524 01525 puts(" in GC_STATE_SWEEP"); 01526 page = mrb->heaps; 01527 while (page) { 01528 RVALUE *p = page->objects; 01529 RVALUE *e = p + MRB_HEAP_PAGE_SIZE; 01530 while (p<e) { 01531 if (is_black(&p->as.basic)) { 01532 live++; 01533 } 01534 if (is_gray(&p->as.basic) && !is_dead(mrb, &p->as.basic)) { 01535 printf("%p\n", &p->as.basic); 01536 } 01537 p++; 01538 } 01539 page = page->next; 01540 total += MRB_HEAP_PAGE_SIZE; 01541 } 01542 01543 mrb_assert(mrb->gray_list == NULL); 01544 01545 incremental_gc(mrb, max); 01546 mrb_assert(mrb->gc_state == GC_STATE_SWEEP); 01547 01548 incremental_gc(mrb, max); 01549 mrb_assert(mrb->gc_state == GC_STATE_ROOT); 01550 01551 free = (RVALUE*)mrb->heaps->freelist; 01552 while (free) { 01553 freed++; 01554 free = (RVALUE*)free->as.free.next; 01555 } 01556 01557 mrb_assert(mrb->live == live); 01558 mrb_assert(mrb->live == total-freed); 01559 01560 puts("test_incremental_gc(gen)"); 01561 incremental_gc_until(mrb, GC_STATE_SWEEP); 01562 change_gen_gc_mode(mrb, TRUE); 01563 01564 mrb_assert(mrb->gc_full == FALSE); 01565 mrb_assert(mrb->gc_state == GC_STATE_ROOT); 01566 01567 puts(" in minor"); 01568 mrb_assert(is_minor_gc(mrb)); 01569 mrb_assert(mrb->majorgc_old_threshold > 0); 01570 mrb->majorgc_old_threshold = 0; 01571 mrb_incremental_gc(mrb); 01572 mrb_assert(mrb->gc_full == TRUE); 01573 mrb_assert(mrb->gc_state == GC_STATE_ROOT); 01574 01575 puts(" in major"); 01576 mrb_assert(is_major_gc(mrb)); 01577 do { 01578 mrb_incremental_gc(mrb); 01579 } while (mrb->gc_state != GC_STATE_ROOT); 01580 mrb_assert(mrb->gc_full == FALSE); 01581 01582 mrb_close(mrb); 01583 } 01584 01585 void 01586 test_incremental_sweep_phase(void) 01587 { 01588 mrb_state *mrb = mrb_open(); 01589 01590 puts("test_incremental_sweep_phase"); 01591 01592 add_heap(mrb); 01593 mrb->sweeps = mrb->heaps; 01594 01595 mrb_assert(mrb->heaps->next->next == NULL); 01596 mrb_assert(mrb->free_heaps->next->next == NULL); 01597 incremental_sweep_phase(mrb, MRB_HEAP_PAGE_SIZE*3); 01598 01599 mrb_assert(mrb->heaps->next == NULL); 01600 mrb_assert(mrb->heaps == mrb->free_heaps); 01601 01602 mrb_close(mrb); 01603 } 01604 01605 static mrb_value 01606 gc_test(mrb_state *mrb, mrb_value self) 01607 { 01608 test_mrb_field_write_barrier(); 01609 test_mrb_write_barrier(); 01610 test_add_gray_list(); 01611 test_gc_gray_mark(); 01612 test_incremental_gc(); 01613 test_incremental_sweep_phase(); 01614 return mrb_nil_value(); 01615 } 01616 #endif /* GC_DEBUG */ 01617 #endif /* GC_TEST */ 01618
Generated on Tue Jul 12 2022 18:00:34 by
1.7.2