Feature #2471 » switch_gc.patch
gc_lazy.c (revision 0) | ||
---|---|---|
/**********************************************************************
|
||
gc_lazy.c -
|
||
$Author: $
|
||
created at:
|
||
Copyright (C) 1993-2007 Yukihiro Matsumoto
|
||
Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
|
||
Copyright (C) 2000 Information-technology Promotion Agency, Japan
|
||
**********************************************************************/
|
||
#define live objspace->heap.live
|
||
#define dead objspace->heap.dead
|
||
#define do_heap_free objspace->heap.do_heap_free
|
||
#define heaps_sweep_index objspace->heap.sweep_index
|
||
#define heaps_sweep_inc objspace->heap.sweep_increment
|
||
#define during_sweep objspace->flags.during_sweep
|
||
static int garbage_collect_force_with_gvl(rb_objspace_t *objspace);
|
||
static void *
|
||
vm_xmalloc_lazy(rb_objspace_t *objspace, size_t size)
|
||
{
|
||
void *mem;
|
||
if ((ssize_t)size < 0) {
|
||
negative_size_allocation_error("negative allocation size (or too big)");
|
||
}
|
||
if (size == 0) size = 1;
|
||
#if CALC_EXACT_MALLOC_SIZE
|
||
size += sizeof(size_t);
|
||
#endif
|
||
if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
|
||
(malloc_increase+size) > malloc_limit) {
|
||
garbage_collect_force_with_gvl(objspace);
|
||
}
|
||
mem = malloc(size);
|
||
if (!mem) {
|
||
if (garbage_collect_force_with_gvl(objspace)) {
|
||
mem = malloc(size);
|
||
}
|
||
if (!mem) {
|
||
ruby_memerror();
|
||
}
|
||
}
|
||
malloc_increase += size;
|
||
#if CALC_EXACT_MALLOC_SIZE
|
||
objspace->malloc_params.allocated_size += size;
|
||
objspace->malloc_params.allocations++;
|
||
((size_t *)mem)[0] = size;
|
||
mem = (size_t *)mem + 1;
|
||
#endif
|
||
return mem;
|
||
}
|
||
static void *
|
||
vm_xrealloc_lazy(rb_objspace_t *objspace, void *ptr, size_t size)
|
||
{
|
||
void *mem;
|
||
if ((ssize_t)size < 0) {
|
||
negative_size_allocation_error("negative re-allocation size");
|
||
}
|
||
if (!ptr) return vm_xmalloc_lazy(objspace, size);
|
||
if (size == 0) {
|
||
vm_xfree(objspace, ptr);
|
||
return 0;
|
||
}
|
||
if (ruby_gc_stress && !ruby_disable_gc_stress)
|
||
garbage_collect_with_gvl(objspace);
|
||
#if CALC_EXACT_MALLOC_SIZE
|
||
size += sizeof(size_t);
|
||
objspace->malloc_params.allocated_size -= size;
|
||
ptr = (size_t *)ptr - 1;
|
||
#endif
|
||
mem = realloc(ptr, size);
|
||
if (!mem) {
|
||
if (garbage_collect_force_with_gvl(objspace)) {
|
||
mem = realloc(ptr, size);
|
||
}
|
||
if (!mem) {
|
||
ruby_memerror();
|
||
}
|
||
}
|
||
malloc_increase += size;
|
||
#if CALC_EXACT_MALLOC_SIZE
|
||
objspace->malloc_params.allocated_size += size;
|
||
((size_t *)mem)[0] = size;
|
||
mem = (size_t *)mem + 1;
|
||
#endif
|
||
return mem;
|
||
}
|
||
static void
|
||
assign_heap_slot_lazy(rb_objspace_t *objspace)
|
||
{
|
||
RVALUE *p, *pend, *membase;
|
||
size_t hi, lo, mid;
|
||
size_t objs;
|
||
objs = HEAP_OBJ_LIMIT;
|
||
p = (RVALUE*)malloc(HEAP_SIZE);
|
||
if (p == 0) {
|
||
during_gc = 0;
|
||
rb_memerror();
|
||
}
|
||
membase = p;
|
||
if ((VALUE)p % sizeof(RVALUE) != 0) {
|
||
p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
|
||
if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
|
||
objs--;
|
||
}
|
||
}
|
||
lo = 0;
|
||
hi = heaps_used;
|
||
while (lo < hi) {
|
||
register RVALUE *mid_membase;
|
||
mid = (lo + hi) / 2;
|
||
mid_membase = heaps[mid].membase;
|
||
if (mid_membase < membase) {
|
||
lo = mid + 1;
|
||
}
|
||
else if (mid_membase > membase) {
|
||
hi = mid;
|
||
}
|
||
else {
|
||
rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
|
||
}
|
||
}
|
||
if (hi < heaps_used) {
|
||
MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
|
||
}
|
||
heaps[hi].membase = membase;
|
||
heaps[hi].slot = p;
|
||
heaps[hi].limit = objs;
|
||
heaps[hi].color = BLACK;
|
||
pend = p + objs;
|
||
if (lomem == 0 || lomem > p) lomem = p;
|
||
if (himem < pend) himem = pend;
|
||
heaps_used++;
|
||
while (p < pend) {
|
||
p->as.free.flags = 0;
|
||
p->as.free.next = freelist;
|
||
freelist = p;
|
||
p++;
|
||
}
|
||
if (hi < heaps_sweep_index) {
|
||
heaps_sweep_index++;
|
||
}
|
||
}
|
||
static VALUE
|
||
rb_newobj_from_heap_lazy(rb_objspace_t *objspace)
|
||
{
|
||
VALUE obj;
|
||
if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
|
||
if (!garbage_collect(objspace)) {
|
||
during_gc = 0;
|
||
rb_memerror();
|
||
}
|
||
}
|
||
obj = (VALUE)freelist;
|
||
freelist = freelist->as.free.next;
|
||
MEMZERO((void*)obj, RVALUE, 1);
|
||
#ifdef GC_DEBUG
|
||
RANY(obj)->file = rb_sourcefile();
|
||
RANY(obj)->line = rb_sourceline();
|
||
#endif
|
||
return obj;
|
||
}
|
||
static void
|
||
gc_mark_lazy(rb_objspace_t *objspace, VALUE ptr, int lev)
|
||
{
|
||
register RVALUE *obj;
|
||
obj = RANY(ptr);
|
||
if (rb_special_const_p(ptr)) return; /* special const not marked */
|
||
if (obj->as.basic.flags == 0) return; /* free cell */
|
||
if (obj->as.basic.flags & FL_MARK) return; /* already marked */
|
||
obj->as.basic.flags |= FL_MARK;
|
||
live++;
|
||
if (lev > GC_LEVEL_MAX || (lev == 0 && stack_check())) {
|
||
if (!mark_stack_overflow) {
|
||
if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
|
||
*mark_stack_ptr = ptr;
|
||
mark_stack_ptr++;
|
||
}
|
||
else {
|
||
mark_stack_overflow = 1;
|
||
}
|
||
}
|
||
return;
|
||
}
|
||
gc_mark_children(objspace, ptr, lev+1);
|
||
}
|
||
static void
|
||
gc_mark_children_lazy(rb_objspace_t *objspace, VALUE ptr, int lev)
|
||
{
|
||
register RVALUE *obj = RANY(ptr);
|
||
goto marking; /* skip */
|
||
again:
|
||
obj = RANY(ptr);
|
||
if (rb_special_const_p(ptr)) return; /* special const not marked */
|
||
if (obj->as.basic.flags == 0) return; /* free cell */
|
||
if (obj->as.basic.flags & FL_MARK) return; /* already marked */
|
||
obj->as.basic.flags |= FL_MARK;
|
||
live++;
|
||
marking:
|
||
if (FL_TEST(obj, FL_EXIVAR)) {
|
||
rb_mark_generic_ivar(ptr);
|
||
}
|
||
switch (BUILTIN_TYPE(obj)) {
|
||
case T_NIL:
|
||
case T_FIXNUM:
|
||
rb_bug("rb_gc_mark() called for broken object");
|
||
break;
|
||
case T_NODE:
|
||
switch (nd_type(obj)) {
|
||
case NODE_IF: /* 1,2,3 */
|
||
case NODE_FOR:
|
||
case NODE_ITER:
|
||
case NODE_WHEN:
|
||
case NODE_MASGN:
|
||
case NODE_RESCUE:
|
||
case NODE_RESBODY:
|
||
case NODE_CLASS:
|
||
case NODE_BLOCK_PASS:
|
||
gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
|
||
/* fall through */
|
||
case NODE_BLOCK: /* 1,3 */
|
||
case NODE_OPTBLOCK:
|
||
case NODE_ARRAY:
|
||
case NODE_DSTR:
|
||
case NODE_DXSTR:
|
||
case NODE_DREGX:
|
||
case NODE_DREGX_ONCE:
|
||
case NODE_ENSURE:
|
||
case NODE_CALL:
|
||
case NODE_DEFS:
|
||
case NODE_OP_ASGN1:
|
||
case NODE_ARGS:
|
||
gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
|
||
/* fall through */
|
||
case NODE_SUPER: /* 3 */
|
||
case NODE_FCALL:
|
||
case NODE_DEFN:
|
||
case NODE_ARGS_AUX:
|
||
ptr = (VALUE)obj->as.node.u3.node;
|
||
goto again;
|
||
case NODE_WHILE:
|
||
case NODE_UNTIL:
|
||
case NODE_AND:
|
||
case NODE_OR:
|
||
case NODE_CASE:
|
||
case NODE_SCLASS:
|
||
case NODE_DOT2:
|
||
case NODE_DOT3:
|
||
case NODE_FLIP2:
|
||
case NODE_FLIP3:
|
||
case NODE_MATCH2:
|
||
case NODE_MATCH3:
|
||
case NODE_OP_ASGN_OR:
|
||
case NODE_OP_ASGN_AND:
|
||
case NODE_MODULE:
|
||
case NODE_ALIAS:
|
||
case NODE_VALIAS:
|
||
case NODE_ARGSCAT:
|
||
gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
|
||
/* fall through */
|
||
case NODE_GASGN:
|
||
case NODE_LASGN:
|
||
case NODE_DASGN:
|
||
case NODE_DASGN_CURR:
|
||
case NODE_IASGN:
|
||
case NODE_IASGN2:
|
||
case NODE_CVASGN:
|
||
case NODE_COLON3:
|
||
case NODE_OPT_N:
|
||
case NODE_EVSTR:
|
||
case NODE_UNDEF:
|
||
case NODE_POSTEXE:
|
||
ptr = (VALUE)obj->as.node.u2.node;
|
||
goto again;
|
||
case NODE_HASH: /* 1 */
|
||
case NODE_LIT:
|
||
case NODE_STR:
|
||
case NODE_XSTR:
|
||
case NODE_DEFINED:
|
||
case NODE_MATCH:
|
||
case NODE_RETURN:
|
||
case NODE_BREAK:
|
||
case NODE_NEXT:
|
||
case NODE_YIELD:
|
||
case NODE_COLON2:
|
||
case NODE_SPLAT:
|
||
case NODE_TO_ARY:
|
||
ptr = (VALUE)obj->as.node.u1.node;
|
||
goto again;
|
||
case NODE_SCOPE: /* 2,3 */
|
||
case NODE_CDECL:
|
||
case NODE_OPT_ARG:
|
||
gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
|
||
ptr = (VALUE)obj->as.node.u2.node;
|
||
goto again;
|
||
case NODE_ZARRAY: /* - */
|
||
case NODE_ZSUPER:
|
||
case NODE_VCALL:
|
||
case NODE_GVAR:
|
||
case NODE_LVAR:
|
||
case NODE_DVAR:
|
||
case NODE_IVAR:
|
||
case NODE_CVAR:
|
||
case NODE_NTH_REF:
|
||
case NODE_BACK_REF:
|
||
case NODE_REDO:
|
||
case NODE_RETRY:
|
||
case NODE_SELF:
|
||
case NODE_NIL:
|
||
case NODE_TRUE:
|
||
case NODE_FALSE:
|
||
case NODE_ERRINFO:
|
||
case NODE_BLOCK_ARG:
|
||
break;
|
||
case NODE_ALLOCA:
|
||
mark_locations_array(objspace,
|
||
(VALUE*)obj->as.node.u1.value,
|
||
obj->as.node.u3.cnt);
|
||
ptr = (VALUE)obj->as.node.u2.node;
|
||
goto again;
|
||
default: /* unlisted NODE */
|
||
if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
|
||
gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
|
||
}
|
||
if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
|
||
gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
|
||
}
|
||
if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
|
||
gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
|
||
}
|
||
}
|
||
return; /* no need to mark class. */
|
||
}
|
||
gc_mark(objspace, obj->as.basic.klass, lev);
|
||
switch (BUILTIN_TYPE(obj)) {
|
||
case T_ICLASS:
|
||
case T_CLASS:
|
||
case T_MODULE:
|
||
mark_m_tbl(objspace, RCLASS_M_TBL(obj), lev);
|
||
mark_tbl(objspace, RCLASS_IV_TBL(obj), lev);
|
||
ptr = RCLASS_SUPER(obj);
|
||
goto again;
|
||
case T_ARRAY:
|
||
if (FL_TEST(obj, ELTS_SHARED)) {
|
||
ptr = obj->as.array.as.heap.aux.shared;
|
||
goto again;
|
||
}
|
||
else {
|
||
long i, len = RARRAY_LEN(obj);
|
||
VALUE *ptr = RARRAY_PTR(obj);
|
||
for (i=0; i < len; i++) {
|
||
gc_mark(objspace, *ptr++, lev);
|
||
}
|
||
}
|
||
break;
|
||
case T_HASH:
|
||
mark_hash(objspace, obj->as.hash.ntbl, lev);
|
||
ptr = obj->as.hash.ifnone;
|
||
goto again;
|
||
case T_STRING:
|
||
#define STR_ASSOC FL_USER3 /* copied from string.c */
|
||
if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
|
||
ptr = obj->as.string.as.heap.aux.shared;
|
||
goto again;
|
||
}
|
||
break;
|
||
case T_DATA:
|
||
if (RTYPEDDATA_P(obj)) {
|
||
if (obj->as.typeddata.type->dmark) (*obj->as.typeddata.type->dmark)(DATA_PTR(obj));
|
||
}
|
||
else {
|
||
if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
|
||
}
|
||
break;
|
||
case T_OBJECT:
|
||
{
|
||
long i, len = ROBJECT_NUMIV(obj);
|
||
VALUE *ptr = ROBJECT_IVPTR(obj);
|
||
for (i = 0; i < len; i++) {
|
||
gc_mark(objspace, *ptr++, lev);
|
||
}
|
||
}
|
||
break;
|
||
case T_FILE:
|
||
if (obj->as.file.fptr) {
|
||
gc_mark(objspace, obj->as.file.fptr->pathv, lev);
|
||
gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev);
|
||
gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat, lev);
|
||
gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts, lev);
|
||
gc_mark(objspace, obj->as.file.fptr->encs.ecopts, lev);
|
||
gc_mark(objspace, obj->as.file.fptr->write_lock, lev);
|
||
}
|
||
break;
|
||
case T_REGEXP:
|
||
gc_mark(objspace, obj->as.regexp.src, lev);
|
||
break;
|
||
case T_FLOAT:
|
||
case T_BIGNUM:
|
||
case T_ZOMBIE:
|
||
break;
|
||
case T_MATCH:
|
||
gc_mark(objspace, obj->as.match.regexp, lev);
|
||
if (obj->as.match.str) {
|
||
ptr = obj->as.match.str;
|
||
goto again;
|
||
}
|
||
break;
|
||
case T_RATIONAL:
|
||
gc_mark(objspace, obj->as.rational.num, lev);
|
||
gc_mark(objspace, obj->as.rational.den, lev);
|
||
break;
|
||
case T_COMPLEX:
|
||
gc_mark(objspace, obj->as.complex.real, lev);
|
||
gc_mark(objspace, obj->as.complex.imag, lev);
|
||
break;
|
||
case T_STRUCT:
|
||
{
|
||
long len = RSTRUCT_LEN(obj);
|
||
VALUE *ptr = RSTRUCT_PTR(obj);
|
||
while (len--) {
|
||
gc_mark(objspace, *ptr++, lev);
|
||
}
|
||
}
|
||
break;
|
||
default:
|
||
rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
|
||
BUILTIN_TYPE(obj), (void *)obj,
|
||
is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
|
||
}
|
||
}
|
||
void rb_gc_abort_threads(void);
|
||
static int
|
||
slot_sweep(rb_objspace_t *objspace, struct heaps_slot *target)
|
||
{
|
||
RVALUE *p, *pend, *free;
|
||
RVALUE *final;
|
||
size_t freed = 0;
|
||
if (target->color == BLACK || target->color == WHITE) {
|
||
return Qfalse;
|
||
}
|
||
final = deferred_final_list;
|
||
free = freelist;
|
||
p = target->slot; pend = p + target->limit;
|
||
while (p < pend) {
|
||
if (!(p->as.basic.flags & FL_MARK)) {
|
||
if (p->as.basic.flags) {
|
||
obj_free(objspace, (VALUE)p);
|
||
}
|
||
if (need_call_final && FL_TEST(p, FL_FINALIZE)) {
|
||
p->as.free.flags = FL_MARK; /* remain marked */
|
||
p->as.free.next = deferred_final_list;
|
||
deferred_final_list = p;
|
||
}
|
||
else {
|
||
VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
|
||
p->as.free.flags = 0;
|
||
p->as.free.next = freelist;
|
||
freelist = p;
|
||
}
|
||
freed++;
|
||
}
|
||
else if (RBASIC(p)->flags == FL_MARK) {
|
||
/* objects to be finalized */
|
||
/* do nothing remain marked */
|
||
}
|
||
else {
|
||
p->as.basic.flags &= ~FL_MARK;
|
||
}
|
||
p++;
|
||
}
|
||
dead += freed;
|
||
if (freed == target->limit && dead > do_heap_free) {
|
||
RVALUE *pp;
|
||
target->limit = 0;
|
||
target->color = WHITE;
|
||
for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
|
||
pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
|
||
}
|
||
freelist = free; /* cancel this page from freelist */
|
||
}
|
||
else {
|
||
target->color = BLACK;
|
||
}
|
||
return Qtrue;
|
||
}
|
||
static void
|
||
heap_sweep_increment(rb_objspace_t *objspace)
|
||
{
|
||
size_t i = 0;
|
||
while (i < heaps_sweep_inc && heaps_sweep_index < heaps_used) {
|
||
if (slot_sweep(objspace, &heaps[heaps_sweep_index])) {
|
||
i++;
|
||
}
|
||
heaps_sweep_index++;
|
||
}
|
||
}
|
||
static void
|
||
heap_sweep(rb_objspace_t *objspace)
|
||
{
|
||
while (!freelist && heaps_sweep_index < heaps_used) {
|
||
slot_sweep(objspace, &heaps[heaps_sweep_index]);
|
||
heaps_sweep_index++;
|
||
}
|
||
}
|
||
static int
|
||
gc_lazy_sweep(rb_objspace_t *objspace, rb_thread_t *th)
|
||
{
|
||
if (heaps_increment(objspace)) {
|
||
heap_sweep_increment(objspace);
|
||
}
|
||
else {
|
||
heap_sweep(objspace);
|
||
}
|
||
if (!freelist) {
|
||
return Qfalse;
|
||
}
|
||
return Qtrue;
|
||
}
|
||
static void
|
||
rb_gc_force_recycle_lazy(VALUE p)
|
||
{
|
||
VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
|
||
RANY(p)->as.free.flags = 0;
|
||
}
|
||
static void
|
||
gc_mark_all_clear(rb_objspace_t *objspace)
|
||
{
|
||
RVALUE *last = 0;
|
||
size_t i, j;
|
||
|
||
for (i = j = 0; j < heaps_used; i++) {
|
||
if (heaps[i].color == WHITE && !deferred_final_list) {
|
||
if (!last) {
|
||
last = heaps[i].membase;
|
||
}
|
||
else {
|
||
free(heaps[i].membase);
|
||
}
|
||
heaps_used--;
|
||
}
|
||
else {
|
||
if (heaps[i].color == GRAY) {
|
||
RVALUE *p, *pend;
|
||
p = heaps[i].slot; pend = p + heaps[i].limit;
|
||
while (p < pend) {
|
||
if (!(RBASIC(p)->flags & FL_MARK)) {
|
||
if (p->as.basic.flags && !FL_TEST(p, FL_FINALIZE)) {
|
||
obj_free(objspace, (VALUE)p);
|
||
VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
|
||
p->as.free.flags = 0;
|
||
}
|
||
}
|
||
else if (RBASIC(p)->flags != FL_MARK) {
|
||
p->as.basic.flags &= ~FL_MARK;
|
||
}
|
||
p++;
|
||
}
|
||
}
|
||
else {
|
||
heaps[i].color = GRAY;
|
||
}
|
||
if (i != j) {
|
||
heaps[j] = heaps[i];
|
||
}
|
||
j++;
|
||
}
|
||
}
|
||
if (last) {
|
||
if (last < heaps_freed) {
|
||
free(heaps_freed);
|
||
heaps_freed = last;
|
||
}
|
||
else {
|
||
free(last);
|
||
}
|
||
}
|
||
}
|
||
static void
|
||
gc_marks(rb_objspace_t *objspace, rb_thread_t *th)
|
||
{
|
||
struct gc_list *list;
|
||
size_t free_min = 0;
|
||
live = 0;
|
||
freelist = 0;
|
||
gc_mark_all_clear(objspace);
|
||
SET_STACK_END;
|
||
init_mark_stack(objspace);
|
||
th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
|
||
if (finalizer_table) {
|
||
mark_tbl(objspace, finalizer_table, 0);
|
||
}
|
||
mark_current_machine_context(objspace, th);
|
||
rb_gc_mark_threads();
|
||
rb_gc_mark_symbols();
|
||
rb_gc_mark_encodings();
|
||
/* mark protected global variables */
|
||
for (list = global_List; list; list = list->next) {
|
||
rb_gc_mark_maybe(*list->varptr);
|
||
}
|
||
rb_mark_end_proc();
|
||
rb_gc_mark_global_tbl();
|
||
mark_tbl(objspace, rb_class_tbl, 0);
|
||
/* mark generic instance variables for special constants */
|
||
rb_mark_generic_ivar_tbl();
|
||
rb_gc_mark_parser();
|
||
/* gc_mark objects whose marking are not completed*/
|
||
while (!MARK_STACK_EMPTY) {
|
||
if (mark_stack_overflow) {
|
||
gc_mark_all(objspace);
|
||
}
|
||
else {
|
||
gc_mark_rest(objspace);
|
||
}
|
||
}
|
||
// GC_PROF_MARK_TIMER_STOP; // TODO
|
||
dead = 0;
|
||
heaps_sweep_index = 0;
|
||
heaps_sweep_inc = (heaps_used / 10) + 1;
|
||
do_heap_free = (heaps_used * HEAP_OBJ_LIMIT) * 0.65;
|
||
free_min = (heaps_used * HEAP_OBJ_LIMIT) * 0.2;
|
||
if (free_min < FREE_MIN) free_min = FREE_MIN;
|
||
if (free_min > (heaps_used * HEAP_OBJ_LIMIT - live)) {
|
||
set_heaps_increment(objspace);
|
||
heaps_sweep_inc = (heaps_used + heaps_sweep_inc) / heaps_sweep_inc + 1;
|
||
}
|
||
// GC_PROF_TIMER_STOP; // TODO
|
||
if (GC_NOTIFY) printf("end garbage_collect()\n");
|
||
}
|
||
static int
|
||
garbage_collect_force_with_gvl(rb_objspace_t *objspace)
|
||
{
|
||
if (malloc_increase > malloc_limit) {
|
||
malloc_limit += (malloc_increase - malloc_limit) * (double)live / (heaps_used * HEAP_OBJ_LIMIT);
|
||
if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT;
|
||
}
|
||
malloc_increase = 0;
|
||
gc_marks(objspace, GET_THREAD());
|
||
return garbage_collect_with_gvl(objspace);
|
||
}
|
||
static int
|
||
garbage_collect_lazy(rb_objspace_t *objspace)
|
||
{
|
||
struct gc_list *list;
|
||
rb_thread_t *th = GET_THREAD();
|
||
INIT_GC_PROF_PARAMS;
|
||
if (GC_NOTIFY) printf("start garbage_collect()\n");
|
||
if (!heaps) {
|
||
return FALSE;
|
||
}
|
||
if (dont_gc || during_gc) {
|
||
if (!freelist) {
|
||
if (!heaps_increment(objspace)) {
|
||
set_heaps_increment(objspace);
|
||
heaps_increment(objspace);
|
||
}
|
||
}
|
||
return TRUE;
|
||
}
|
||
during_gc++;
|
||
objspace->count++;
|
||
GC_PROF_TIMER_START;
|
||
GC_PROF_MARK_TIMER_START;
|
||
SET_STACK_END;
|
||
while (!gc_lazy_sweep(objspace, th)) {
|
||
gc_marks(objspace, th);
|
||
}
|
||
GC_PROF_MARK_TIMER_STOP;
|
||
GC_PROF_SET_HEAP_INFO; // TODO : correct?
|
||
GC_PROF_TIMER_STOP;
|
||
during_gc = 0;
|
||
if (GC_NOTIFY) printf("end garbage_collect()\n");
|
||
return TRUE;
|
||
}
|
||
static void
|
||
gc_finalize_deferred_lazy(rb_objspace_t *objspace)
|
||
{
|
||
finalize_deferred(objspace);
|
||
}
|
||
static void
|
||
rb_gc_lazy(void)
|
||
{
|
||
rb_objspace_t *objspace = &rb_objspace;
|
||
gc_finalize_deferred(objspace);
|
||
garbage_collect(objspace);
|
||
}
|
||
REGISTER_GC(lazy);
|
gc_default.c (revision 0) | ||
---|---|---|
/**********************************************************************
|
||
gc_default.c -
|
||
$Author: $
|
||
created at:
|
||
Copyright (C) 1993-2007 Yukihiro Matsumoto
|
||
Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
|
||
Copyright (C) 2000 Information-technology Promotion Agency, Japan
|
||
**********************************************************************/
|
||
#include "gc_core.h"
|
||
static void *
|
||
vm_xmalloc_default(rb_objspace_t *objspace, size_t size)
|
||
{
|
||
void *mem;
|
||
if ((ssize_t)size < 0) {
|
||
negative_size_allocation_error("negative allocation size (or too big)");
|
||
}
|
||
if (size == 0) size = 1;
|
||
#if CALC_EXACT_MALLOC_SIZE
|
||
size += sizeof(size_t);
|
||
#endif
|
||
if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
|
||
(malloc_increase+size) > malloc_limit) {
|
||
garbage_collect_with_gvl(objspace);
|
||
}
|
||
mem = malloc(size);
|
||
if (!mem) {
|
||
if (garbage_collect_with_gvl(objspace)) {
|
||
mem = malloc(size);
|
||
}
|
||
if (!mem) {
|
||
ruby_memerror();
|
||
}
|
||
}
|
||
malloc_increase += size;
|
||
#if CALC_EXACT_MALLOC_SIZE
|
||
objspace->malloc_params.allocated_size += size;
|
||
objspace->malloc_params.allocations++;
|
||
((size_t *)mem)[0] = size;
|
||
mem = (size_t *)mem + 1;
|
||
#endif
|
||
return mem;
|
||
}
|
||
static void *
|
||
vm_xrealloc_default(rb_objspace_t *objspace, void *ptr, size_t size)
|
||
{
|
||
void *mem;
|
||
if ((ssize_t)size < 0) {
|
||
negative_size_allocation_error("negative re-allocation size");
|
||
}
|
||
if (!ptr) return vm_xmalloc(objspace, size);
|
||
if (size == 0) {
|
||
vm_xfree(objspace, ptr);
|
||
return 0;
|
||
}
|
||
if (ruby_gc_stress && !ruby_disable_gc_stress)
|
||
garbage_collect_with_gvl(objspace);
|
||
#if CALC_EXACT_MALLOC_SIZE
|
||
size += sizeof(size_t);
|
||
objspace->malloc_params.allocated_size -= size;
|
||
ptr = (size_t *)ptr - 1;
|
||
#endif
|
||
mem = realloc(ptr, size);
|
||
if (!mem) {
|
||
if (garbage_collect_with_gvl(objspace)) {
|
||
mem = realloc(ptr, size);
|
||
}
|
||
if (!mem) {
|
||
ruby_memerror();
|
||
}
|
||
}
|
||
malloc_increase += size;
|
||
#if CALC_EXACT_MALLOC_SIZE
|
||
objspace->malloc_params.allocated_size += size;
|
||
((size_t *)mem)[0] = size;
|
||
mem = (size_t *)mem + 1;
|
||
#endif
|
||
return mem;
|
||
}
|
||
static void
|
||
assign_heap_slot_default(rb_objspace_t *objspace)
|
||
{
|
||
RVALUE *p, *pend, *membase;
|
||
size_t hi, lo, mid;
|
||
size_t objs;
|
||
objs = HEAP_OBJ_LIMIT;
|
||
p = (RVALUE*)malloc(HEAP_SIZE);
|
||
if (p == 0) {
|
||
during_gc = 0;
|
||
rb_memerror();
|
||
}
|
||
membase = p;
|
||
if ((VALUE)p % sizeof(RVALUE) != 0) {
|
||
p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
|
||
if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
|
||
objs--;
|
||
}
|
||
}
|
||
lo = 0;
|
||
hi = heaps_used;
|
||
while (lo < hi) {
|
||
register RVALUE *mid_membase;
|
||
mid = (lo + hi) / 2;
|
||
mid_membase = heaps[mid].membase;
|
||
if (mid_membase < membase) {
|
||
lo = mid + 1;
|
||
}
|
||
else if (mid_membase > membase) {
|
||
hi = mid;
|
||
}
|
||
else {
|
||
rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
|
||
}
|
||
}
|
||
if (hi < heaps_used) {
|
||
MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
|
||
}
|
||
heaps[hi].membase = membase;
|
||
heaps[hi].slot = p;
|
||
heaps[hi].limit = objs;
|
||
pend = p + objs;
|
||
if (lomem == 0 || lomem > p) lomem = p;
|
||
if (himem < pend) himem = pend;
|
||
heaps_used++;
|
||
while (p < pend) {
|
||
p->as.free.flags = 0;
|
||
p->as.free.next = freelist;
|
||
freelist = p;
|
||
p++;
|
||
}
|
||
}
|
||
static VALUE
|
||
rb_newobj_from_heap_default(rb_objspace_t *objspace)
|
||
{
|
||
VALUE obj;
|
||
if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
|
||
if (!heaps_increment(objspace) && !garbage_collect(objspace)) {
|
||
during_gc = 0;
|
||
rb_memerror();
|
||
}
|
||
}
|
||
obj = (VALUE)freelist;
|
||
freelist = freelist->as.free.next;
|
||
MEMZERO((void*)obj, RVALUE, 1);
|
||
#ifdef GC_DEBUG
|
||
RANY(obj)->file = rb_sourcefile();
|
||
RANY(obj)->line = rb_sourceline();
|
||
#endif
|
||
return obj;
|
||
}
|
||
static void
|
||
gc_mark_default(rb_objspace_t *objspace, VALUE ptr, int lev)
|
||
{
|
||
register RVALUE *obj;
|
||
obj = RANY(ptr);
|
||
if (rb_special_const_p(ptr)) return; /* special const not marked */
|
||
if (obj->as.basic.flags == 0) return; /* free cell */
|
||
if (obj->as.basic.flags & FL_MARK) return; /* already marked */
|
||
obj->as.basic.flags |= FL_MARK;
|
||
if (lev > GC_LEVEL_MAX || (lev == 0 && stack_check())) {
|
||
if (!mark_stack_overflow) {
|
||
if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
|
||
*mark_stack_ptr = ptr;
|
||
mark_stack_ptr++;
|
||
}
|
||
else {
|
||
mark_stack_overflow = 1;
|
||
}
|
||
}
|
||
return;
|
||
}
|
||
gc_mark_children(objspace, ptr, lev+1);
|
||
}
|
||
static void
|
||
gc_mark_children_default(rb_objspace_t *objspace, VALUE ptr, int lev)
|
||
{
|
||
register RVALUE *obj = RANY(ptr);
|
||
goto marking; /* skip */
|
||
again:
|
||
obj = RANY(ptr);
|
||
if (rb_special_const_p(ptr)) return; /* special const not marked */
|
||
if (obj->as.basic.flags == 0) return; /* free cell */
|
||
if (obj->as.basic.flags & FL_MARK) return; /* already marked */
|
||
obj->as.basic.flags |= FL_MARK;
|
||
marking:
|
||
if (FL_TEST(obj, FL_EXIVAR)) {
|
||
rb_mark_generic_ivar(ptr);
|
||
}
|
||
switch (BUILTIN_TYPE(obj)) {
|
||
case T_NIL:
|
||
case T_FIXNUM:
|
||
rb_bug("rb_gc_mark() called for broken object");
|
||
break;
|
||
case T_NODE:
|
||
switch (nd_type(obj)) {
|
||
case NODE_IF: /* 1,2,3 */
|
||
case NODE_FOR:
|
||
case NODE_ITER:
|
||
case NODE_WHEN:
|
||
case NODE_MASGN:
|
||
case NODE_RESCUE:
|
||
case NODE_RESBODY:
|
||
case NODE_CLASS:
|
||
case NODE_BLOCK_PASS:
|
||
gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
|
||
/* fall through */
|
||
case NODE_BLOCK: /* 1,3 */
|
||
case NODE_OPTBLOCK:
|
||
case NODE_ARRAY:
|
||
case NODE_DSTR:
|
||
case NODE_DXSTR:
|
||
case NODE_DREGX:
|
||
case NODE_DREGX_ONCE:
|
||
case NODE_ENSURE:
|
||
case NODE_CALL:
|
||
case NODE_DEFS:
|
||
case NODE_OP_ASGN1:
|
||
case NODE_ARGS:
|
||
gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
|
||
/* fall through */
|
||
case NODE_SUPER: /* 3 */
|
||
case NODE_FCALL:
|
||
case NODE_DEFN:
|
||
case NODE_ARGS_AUX:
|
||
ptr = (VALUE)obj->as.node.u3.node;
|
||
goto again;
|
||
case NODE_WHILE:
|
||
case NODE_UNTIL:
|
||
case NODE_AND:
|
||
case NODE_OR:
|
||
case NODE_CASE:
|
||
case NODE_SCLASS:
|
||
case NODE_DOT2:
|
||
case NODE_DOT3:
|
||
case NODE_FLIP2:
|
||
case NODE_FLIP3:
|
||
case NODE_MATCH2:
|
||
case NODE_MATCH3:
|
||
case NODE_OP_ASGN_OR:
|
||
case NODE_OP_ASGN_AND:
|
||
case NODE_MODULE:
|
||
case NODE_ALIAS:
|
||
case NODE_VALIAS:
|
||
case NODE_ARGSCAT:
|
||
gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
|
||
/* fall through */
|
||
case NODE_GASGN:
|
||
case NODE_LASGN:
|
||
case NODE_DASGN:
|
||
case NODE_DASGN_CURR:
|
||
case NODE_IASGN:
|
||
case NODE_IASGN2:
|
||
case NODE_CVASGN:
|
||
case NODE_COLON3:
|
||
case NODE_OPT_N:
|
||
case NODE_EVSTR:
|
||
case NODE_UNDEF:
|
||
case NODE_POSTEXE:
|
||
ptr = (VALUE)obj->as.node.u2.node;
|
||
goto again;
|
||
case NODE_HASH: /* 1 */
|
||
case NODE_LIT:
|
||
case NODE_STR:
|
||
case NODE_XSTR:
|
||
case NODE_DEFINED:
|
||
case NODE_MATCH:
|
||
case NODE_RETURN:
|
||
case NODE_BREAK:
|
||
case NODE_NEXT:
|
||
case NODE_YIELD:
|
||
case NODE_COLON2:
|
||
case NODE_SPLAT:
|
||
case NODE_TO_ARY:
|
||
ptr = (VALUE)obj->as.node.u1.node;
|
||
goto again;
|
||
case NODE_SCOPE: /* 2,3 */
|
||
case NODE_CDECL:
|
||
case NODE_OPT_ARG:
|
||
gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
|
||
ptr = (VALUE)obj->as.node.u2.node;
|
||
goto again;
|
||
case NODE_ZARRAY: /* - */
|
||
case NODE_ZSUPER:
|
||
case NODE_VCALL:
|
||
case NODE_GVAR:
|
||
case NODE_LVAR:
|
||
case NODE_DVAR:
|
||
case NODE_IVAR:
|
||
case NODE_CVAR:
|
||
case NODE_NTH_REF:
|
||
case NODE_BACK_REF:
|
||
case NODE_REDO:
|
||
case NODE_RETRY:
|
||
case NODE_SELF:
|
||
case NODE_NIL:
|
||
case NODE_TRUE:
|
||
case NODE_FALSE:
|
||
case NODE_ERRINFO:
|
||
case NODE_BLOCK_ARG:
|
||
break;
|
||
case NODE_ALLOCA:
|
||
mark_locations_array(objspace,
|
||
(VALUE*)obj->as.node.u1.value,
|
||
obj->as.node.u3.cnt);
|
||
ptr = (VALUE)obj->as.node.u2.node;
|
||
goto again;
|
||
default: /* unlisted NODE */
|
||
if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
|
||
gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
|
||
}
|
||
if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
|
||
gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
|
||
}
|
||
if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
|
||
gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
|
||
}
|
||
}
|
||
return; /* no need to mark class. */
|
||
}
|
||
gc_mark(objspace, obj->as.basic.klass, lev);
|
||
switch (BUILTIN_TYPE(obj)) {
|
||
case T_ICLASS:
|
||
case T_CLASS:
|
||
case T_MODULE:
|
||
mark_m_tbl(objspace, RCLASS_M_TBL(obj), lev);
|
||
mark_tbl(objspace, RCLASS_IV_TBL(obj), lev);
|
||
ptr = RCLASS_SUPER(obj);
|
||
goto again;
|
||
case T_ARRAY:
|
||
if (FL_TEST(obj, ELTS_SHARED)) {
|
||
ptr = obj->as.array.as.heap.aux.shared;
|
||
goto again;
|
||
}
|
||
else {
|
||
long i, len = RARRAY_LEN(obj);
|
||
VALUE *ptr = RARRAY_PTR(obj);
|
||
for (i=0; i < len; i++) {
|
||
gc_mark(objspace, *ptr++, lev);
|
||
}
|
||
}
|
||
break;
|
||
case T_HASH:
|
||
mark_hash(objspace, obj->as.hash.ntbl, lev);
|
||
ptr = obj->as.hash.ifnone;
|
||
goto again;
|
||
case T_STRING:
|
||
#define STR_ASSOC FL_USER3 /* copied from string.c */
|
||
if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
|
||
ptr = obj->as.string.as.heap.aux.shared;
|
||
goto again;
|
||
}
|
||
break;
|
||
case T_DATA:
|
||
if (RTYPEDDATA_P(obj)) {
|
||
if (obj->as.typeddata.type->dmark) (*obj->as.typeddata.type->dmark)(DATA_PTR(obj));
|
||
}
|
||
else {
|
||
if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
|
||
}
|
||
break;
|
||
case T_OBJECT:
|
||
{
|
||
long i, len = ROBJECT_NUMIV(obj);
|
||
VALUE *ptr = ROBJECT_IVPTR(obj);
|
||
for (i = 0; i < len; i++) {
|
||
gc_mark(objspace, *ptr++, lev);
|
||
}
|
||
}
|
||
break;
|
||
case T_FILE:
|
||
if (obj->as.file.fptr) {
|
||
gc_mark(objspace, obj->as.file.fptr->pathv, lev);
|
||
gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev);
|
||
gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat, lev);
|
||
gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts, lev);
|
||
gc_mark(objspace, obj->as.file.fptr->encs.ecopts, lev);
|
||
gc_mark(objspace, obj->as.file.fptr->write_lock, lev);
|
||
}
|
||
break;
|
||
case T_REGEXP:
|
||
gc_mark(objspace, obj->as.regexp.src, lev);
|
||
break;
|
||
case T_FLOAT:
|
||
case T_BIGNUM:
|
||
case T_ZOMBIE:
|
||
break;
|
||
case T_MATCH:
|
||
gc_mark(objspace, obj->as.match.regexp, lev);
|
||
if (obj->as.match.str) {
|
||
ptr = obj->as.match.str;
|
||
goto again;
|
||
}
|
||
break;
|
||
case T_RATIONAL:
|
||
gc_mark(objspace, obj->as.rational.num, lev);
|
||
gc_mark(objspace, obj->as.rational.den, lev);
|
||
break;
|
||
case T_COMPLEX:
|
||
gc_mark(objspace, obj->as.complex.real, lev);
|
||
gc_mark(objspace, obj->as.complex.imag, lev);
|
||
break;
|
||
case T_STRUCT:
|
||
{
|
||
long len = RSTRUCT_LEN(obj);
|
||
VALUE *ptr = RSTRUCT_PTR(obj);
|
||
while (len--) {
|
||
gc_mark(objspace, *ptr++, lev);
|
||
}
|
||
}
|
||
break;
|
||
default:
|
||
rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
|
||
BUILTIN_TYPE(obj), (void *)obj,
|
||
is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
|
||
}
|
||
}
|
||
#ifdef NOSELECT_GC
|
||
void
|
||
rb_gc_force_recycle(VALUE p)
|
||
#else
|
||
static void
|
||
rb_gc_force_recycle_default(VALUE p)
|
||
#endif
|
||
{
|
||
rb_objspace_t *objspace = &rb_objspace;
|
||
add_freelist(objspace, (RVALUE *)p);
|
||
}
|
||
static void
|
||
gc_finalize_deferred_default(rb_objspace_t *objspace)
|
||
{
|
||
finalize_deferred(objspace);
|
||
free_unused_heaps(objspace);
|
||
}
|
||
#ifdef NOSELECT_GC
|
||
void
|
||
rb_gc(void)
|
||
#else
|
||
static void
|
||
rb_gc_default(void)
|
||
#endif
|
||
{
|
||
rb_objspace_t *objspace = &rb_objspace;
|
||
garbage_collect(objspace);
|
||
gc_finalize_deferred(objspace);
|
||
}
|
||
static int
|
||
garbage_collect_default(rb_objspace_t *objspace)
|
||
{
|
||
struct gc_list *list;
|
||
rb_thread_t *th = GET_THREAD();
|
||
INIT_GC_PROF_PARAMS;
|
||
if (GC_NOTIFY) printf("start garbage_collect()\n");
|
||
if (!heaps) {
|
||
return FALSE;
|
||
}
|
||
if (dont_gc || during_gc) {
|
||
if (!freelist) {
|
||
if (!heaps_increment(objspace)) {
|
||
set_heaps_increment(objspace);
|
||
heaps_increment(objspace);
|
||
}
|
||
}
|
||
return TRUE;
|
||
}
|
||
during_gc++;
|
||
objspace->count++;
|
||
GC_PROF_TIMER_START;
|
||
GC_PROF_MARK_TIMER_START;
|
||
SET_STACK_END;
|
||
init_mark_stack(objspace);
|
||
th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
|
||
if (finalizer_table) {
|
||
mark_tbl(objspace, finalizer_table, 0);
|
||
}
|
||
mark_current_machine_context(objspace, th);
|
||
rb_gc_mark_threads();
|
||
rb_gc_mark_symbols();
|
||
rb_gc_mark_encodings();
|
||
/* mark protected global variables */
|
||
for (list = global_List; list; list = list->next) {
|
||
rb_gc_mark_maybe(*list->varptr);
|
||
}
|
||
rb_mark_end_proc();
|
||
rb_gc_mark_global_tbl();
|
||
mark_tbl(objspace, rb_class_tbl, 0);
|
||
/* mark generic instance variables for special constants */
|
||
rb_mark_generic_ivar_tbl();
|
||
rb_gc_mark_parser();
|
||
/* gc_mark objects whose marking are not completed*/
|
||
while (!MARK_STACK_EMPTY) {
|
||
if (mark_stack_overflow) {
|
||
gc_mark_all(objspace);
|
||
}
|
||
else {
|
||
gc_mark_rest(objspace);
|
||
}
|
||
}
|
||
GC_PROF_MARK_TIMER_STOP;
|
||
GC_PROF_SWEEP_TIMER_START;
|
||
gc_sweep(objspace);
|
||
GC_PROF_SWEEP_TIMER_STOP;
|
||
GC_PROF_TIMER_STOP;
|
||
if (GC_NOTIFY) printf("end garbage_collect()\n");
|
||
return TRUE;
|
||
}
|
||
REGISTER_GC(default);
|
gc.c (working copy) | ||
---|---|---|
#pragma pack(pop)
|
||
#endif
|
||
enum slot_color {
|
||
WHITE = 0x00, /* garbage */
|
||
BLACK = 0x01, /* used */
|
||
GRAY = 0x02, /* not sweep */
|
||
};
|
||
struct heaps_slot {
|
||
void *membase;
|
||
RVALUE *slot;
|
||
size_t limit;
|
||
enum slot_color color;
|
||
};
|
||
#define HEAP_MIN_SLOTS 10000
|
||
... | ... | |
RVALUE *freelist;
|
||
RVALUE *range[2];
|
||
RVALUE *freed;
|
||
size_t live;
|
||
size_t dead;
|
||
size_t do_heap_free;
|
||
size_t sweep_index;
|
||
size_t sweep_increment;
|
||
} heap;
|
||
struct {
|
||
int dont_gc;
|
||
int during_gc;
|
||
int during_sweep;
|
||
} flags;
|
||
struct {
|
||
st_table *table;
|
||
... | ... | |
extern VALUE rb_cMutex;
|
||
extern st_table *rb_class_tbl;
|
||
typedef struct gc_algorithm {
|
||
const char *name;
|
||
void *(*vm_xmalloc)(rb_objspace_t *objspace, size_t size);
|
||
void *(*vm_xrealloc)(rb_objspace_t *objspace, void *ptr, size_t size);
|
||
void (*assign_heap_slot)(rb_objspace_t *objspace);
|
||
VALUE (*rb_newobj_from_heap)(rb_objspace_t *objspace);
|
||
void (*gc_mark)(rb_objspace_t *objspace, VALUE ptr, int lev);
|
||
void (*gc_mark_children)(rb_objspace_t *objspace, VALUE ptr, int lev);
|
||
int (*garbage_collect)(rb_objspace_t *objspace);
|
||
void (*gc_finalize_deferred)(rb_objspace_t *objspace);
|
||
void (*rb_gc_force_recycle)(VALUE p);
|
||
void (*rb_gc)(void);
|
||
} gc_algorithm_t;
|
||
gc_algorithm_t *current_gc_algorithm;
|
||
#ifdef NOSELECT_GC
|
||
static void * vm_xmalloc(rb_objspace_t *objspace, size_t size);
|
||
static void *vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size);
|
||
static void assign_heap_slot(rb_objspace_t *objspace);
|
||
static VALUE rb_newobj_from_heap(rb_objspace_t *objspace);
|
||
static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev);
|
||
static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev);
|
||
static int garbage_collect(rb_objspace_t *objspace);
|
||
static void gc_finalize_deferred(rb_objspace_t *objspace);
|
||
#define vm_xmalloc_default(objspace, size) \
|
||
vm_xmalloc(objspace, size)
|
||
#define vm_xrealloc_default(objspace, ptr, size) \
|
||
vm_xrealloc(objspace, ptr, size)
|
||
#define assign_heap_slot_default(objspace) \
|
||
assign_heap_slot(objspace)
|
||
#define rb_newobj_from_heap_default(objspace) \
|
||
rb_newobj_from_heap(objspace)
|
||
#define gc_mark_default(objspace, ptr, lev) \
|
||
gc_mark(objspace, ptr, lev)
|
||
#define gc_mark_children_default(objspace, ptr, lev) \
|
||
gc_mark_children(objspace, ptr, lev)
|
||
#define garbage_collect_default(objspace) \
|
||
garbage_collect(objspace)
|
||
#define gc_finalize_deferred_default(objspace) \
|
||
gc_finalize_deferred(objspace)
|
||
#define REGISTER_GC(name)
|
||
#else /* NOSELECT_GC */
|
||
#define vm_xmalloc(objspace, size) \
|
||
current_gc_algorithm->vm_xmalloc(objspace, size)
|
||
#define vm_xrealloc(objspace, ptr, size) \
|
||
current_gc_algorithm->vm_xrealloc(objspace, ptr, size)
|
||
#define assign_heap_slot(objspace) \
|
||
current_gc_algorithm->assign_heap_slot(objspace)
|
||
#define rb_newobj_from_heap(objspace) \
|
||
current_gc_algorithm->rb_newobj_from_heap(objspace)
|
||
#define gc_mark(objspace, ptr, lev) \
|
||
current_gc_algorithm->gc_mark(objspace, ptr, lev)
|
||
#define gc_mark_children(objspace, ptr, lev) \
|
||
current_gc_algorithm->gc_mark_children(objspace, ptr, lev)
|
||
#define garbage_collect(objspace) \
|
||
current_gc_algorithm->garbage_collect(objspace)
|
||
#define gc_finalize_deferred(objspace) \
|
||
current_gc_algorithm->gc_finalize_deferred(objspace)
|
||
void
|
||
rb_gc_force_recycle(VALUE p)
|
||
{
|
||
current_gc_algorithm->rb_gc_force_recycle(p);
|
||
}
|
||
void
|
||
rb_gc(void)
|
||
{
|
||
current_gc_algorithm->rb_gc();
|
||
}
|
||
#define REGISTER_GC(name) \
|
||
gc_algorithm_t gc_algorithm_##name = { \
|
||
#name, \
|
||
vm_xmalloc_##name, \
|
||
vm_xrealloc_##name, \
|
||
assign_heap_slot_##name, \
|