Index: dln.c =================================================================== --- dln.c (revision 27126) +++ dln.c (working copy) @@ -1230,6 +1230,12 @@ aix_loaderror(const char *pathname) #define translit_separator(str) (void)(str) #endif +int +dln_loadable(void) +{ + return 1; +} + void* dln_load(const char *file) { Index: dmydln.c =================================================================== --- dmydln.c (revision 27126) +++ dmydln.c (working copy) @@ -1,5 +1,11 @@ #include "ruby/ruby.h" +int +dln_loadable(void) +{ + return 0; +} + void* dln_load(const char *file) { Index: error.c =================================================================== --- error.c (revision 27126) +++ error.c (working copy) @@ -1179,6 +1179,11 @@ rb_loaderror(const char *fmt, ...) VALUE mesg; va_start(args, fmt); + if (!gc_decided()) { + vfprintf(stderr, fmt, args); + va_end(args); + abort(); + } mesg = rb_vsprintf(fmt, args); va_end(args); rb_exc_raise(rb_exc_new3(rb_eLoadError, mesg)); Index: ext/gc_bmp/extconf.rb new file mode 100644 =================================================================== --- /dev/null (revision 27126) +++ ext/gc_bmp/extconf.rb (working copy) @@ -0,0 +1,2 @@ +require 'mkmf' +create_makefile("gc_bmp") Index: ext/gc_bmp/gc_bmp.c new file mode 100644 =================================================================== --- /dev/null (revision 27126) +++ ext/gc_bmp/gc_bmp.c (working copy) @@ -0,0 +1,2770 @@ +/********************************************************************** + + gc_bmp.c - + + $Author$ + created at: Tue Oct 5 09:44:46 JST 1993 + + Copyright (C) 1993-2007 Yukihiro Matsumoto + Copyright (C) 2000 Network Applied Communication Laboratory, Inc. + Copyright (C) 2000 Information-technology Promotion Agency, Japan + +**********************************************************************/ + +#include "ruby.h" +#include "ruby/re.h" +#include "ruby/io.h" +#include +#include +#include + +#ifndef FALSE +# define FALSE 0 +#elif FALSE +# error FALSE must be false +#endif +#ifndef TRUE +# define TRUE 1 +#elif !TRUE +# error TRUE must be true +#endif + +#ifdef HAVE_SYS_TIME_H +#include +#endif + +#ifdef HAVE_SYS_RESOURCE_H +#include +#endif + +#if defined _WIN32 || defined __CYGWIN__ +#include +#endif + +#ifdef HAVE_VALGRIND_MEMCHECK_H +# include +# ifndef VALGRIND_MAKE_MEM_DEFINED +# define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE(p, n) +# endif +# ifndef VALGRIND_MAKE_MEM_UNDEFINED +# define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE(p, n) +# endif +#else +# define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */ +# define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */ +#endif + +int rb_io_fptr_finalize(struct rb_io_t*); + +#define rb_setjmp(env) RUBY_SETJMP(env) +#define rb_jmp_buf rb_jmpbuf_t + +/* Make alloca work the best possible way. */ +#ifdef __GNUC__ +# ifndef atarist +# ifndef alloca +# define alloca __builtin_alloca +# endif +# endif /* atarist */ +#else +# ifdef HAVE_ALLOCA_H +# include +# else +# ifdef _AIX + #pragma alloca +# else +# ifndef alloca /* predefined by HP cc +Olibcalls */ +void *alloca (); +# endif +# endif /* AIX */ +# endif /* HAVE_ALLOCA_H */ +#endif /* __GNUC__ */ + +#ifndef GC_MALLOC_LIMIT +#define GC_MALLOC_LIMIT 8000000 +#endif + +#define MARK_STACK_MAX 1024 + +/* for GC profile */ +#define GC_PROFILE_MORE_DETAIL 1 +typedef struct gc_profile_record { + double gc_time; + double gc_mark_time; + double gc_sweep_time; + double gc_invoke_time; + + size_t heap_use_slots; + size_t heap_live_objects; + size_t heap_free_objects; + size_t heap_total_objects; + size_t heap_use_size; + size_t heap_total_size; + + int have_finalize; + + size_t allocate_increase; + size_t allocate_limit; +} gc_profile_record; + +static double +getrusage_time(void) +{ +#ifdef RUSAGE_SELF + struct rusage usage; + struct timeval time; + getrusage(RUSAGE_SELF, &usage); + time = usage.ru_utime; + return time.tv_sec + time.tv_usec * 1e-6; +#elif defined _WIN32 + FILETIME creation_time, exit_time, kernel_time, user_time; + ULARGE_INTEGER ui; + LONG_LONG q; + double t; + + if (GetProcessTimes(GetCurrentProcess(), + &creation_time, &exit_time, &kernel_time, &user_time) == 0) + { + return 0.0; + } + memcpy(&ui, &user_time, sizeof(FILETIME)); + q = ui.QuadPart / 10L; + t = (DWORD)(q % 1000000L) * 1e-6; + q /= 1000000L; +#ifdef __GNUC__ + t += q; +#else + t += (double)(DWORD)(q >> 16) * (1 << 16); + t += (DWORD)q & ~(~0 << 16); +#endif + return t; +#else + return 0.0; +#endif +} + +#define GC_PROF_TIMER_START do {\ + if (objspace->profile.run) {\ + if (!objspace->profile.record) {\ + objspace->profile.size = 1000;\ + objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);\ + }\ + if (count >= objspace->profile.size) {\ + objspace->profile.size += 1000;\ + objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);\ + }\ + if (!objspace->profile.record) {\ + rb_bug("gc_profile malloc or realloc miss");\ + }\ + MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);\ + gc_time = getrusage_time();\ + objspace->profile.record[count].gc_invoke_time = gc_time - objspace->profile.invoke_time;\ + }\ + } while(0) + +#define GC_PROF_TIMER_STOP do {\ + if (objspace->profile.run) {\ + gc_time = getrusage_time() - gc_time;\ + if (gc_time < 0) gc_time = 0;\ + objspace->profile.record[count].gc_time = gc_time;\ + objspace->profile.count++;\ + }\ + } while(0) + +#if GC_PROFILE_MORE_DETAIL +#define INIT_GC_PROF_PARAMS double gc_time = 0, mark_time = 0, sweep_time = 0;\ + size_t count = objspace->profile.count + +#define GC_PROF_MARK_TIMER_START do {\ + if (objspace->profile.run) {\ + mark_time = getrusage_time();\ + }\ + } while(0) + +#define GC_PROF_MARK_TIMER_STOP do {\ + if (objspace->profile.run) {\ + mark_time = getrusage_time() - mark_time;\ + if (mark_time < 0) mark_time = 0;\ + objspace->profile.record[count].gc_mark_time = mark_time;\ + }\ + } while(0) + +#define GC_PROF_SWEEP_TIMER_START do {\ + if (objspace->profile.run) {\ + sweep_time = getrusage_time();\ + }\ + } while(0) + +#define GC_PROF_SWEEP_TIMER_STOP do {\ + if (objspace->profile.run) {\ + sweep_time = getrusage_time() - sweep_time;\ + if (sweep_time < 0) sweep_time = 0;\ + objspace->profile.record[count].gc_sweep_time = sweep_time;\ + }\ + } while(0) +#define GC_PROF_SET_MALLOC_INFO do {\ + if (objspace->profile.run) {\ + gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\ + record->allocate_increase = malloc_increase;\ + record->allocate_limit = malloc_limit; \ + }\ + } while(0) +#define GC_PROF_SET_HEAP_INFO do {\ + if (objspace->profile.run) {\ + gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\ + record->heap_use_slots = heaps_used;\ + record->heap_live_objects = live;\ + record->heap_free_objects = freed; \ + record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\ + record->have_finalize = final_list ? Qtrue : Qfalse;\ + record->heap_use_size = live * sizeof(RVALUE); \ + record->heap_total_size = heaps_used * (HEAP_OBJ_LIMIT * sizeof(RVALUE));\ + }\ + } while(0) +#else +#define INIT_GC_PROF_PARAMS double gc_time = 0;\ + size_t count = objspace->profile.count +#define GC_PROF_MARK_TIMER_START +#define GC_PROF_MARK_TIMER_STOP +#define GC_PROF_SWEEP_TIMER_START +#define GC_PROF_SWEEP_TIMER_STOP +#define GC_PROF_SET_MALLOC_INFO +#define GC_PROF_SET_HEAP_INFO do {\ + if (objspace->profile.run) {\ + gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\ + record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\ + record->heap_use_size = live * sizeof(RVALUE); \ + record->heap_total_size = heaps_used * HEAP_SIZE;\ + }\ + } while(0) +#endif + + +#if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__) +#pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */ +#endif + +typedef struct RVALUE { + union { + struct { + VALUE flags; /* always 0 for freed obj */ + struct RVALUE *next; + } free; + struct { + VALUE flags; + struct RVALUE *next; + int *map; + VALUE slot; + int limit; + } bitmap; + struct RBasic basic; + struct RObject object; + struct RClass klass; + struct RFloat flonum; + struct RString string; + struct RArray array; + struct RRegexp regexp; + struct RHash hash; + struct RData data; + struct RTypedData typeddata; + struct RStruct rstruct; + struct RBignum bignum; + struct RFile file; + struct RMatch match; + struct RRational rational; + struct RComplex complex; + } as; +#ifdef GC_DEBUG + const char *file; + int line; +#endif +} RVALUE; + +#if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__) +#pragma pack(pop) +#endif + +struct heaps_slot { + void *membase; + RVALUE *slot; + size_t limit; + RVALUE *bitmap; +}; + +#define HEAP_MIN_SLOTS 10000 +#define FREE_MIN 4096 + +struct gc_list { + VALUE *varptr; + struct gc_list *next; +}; + +#define CALC_EXACT_MALLOC_SIZE 0 + +typedef struct rb_objspace { + struct { + size_t limit; + size_t increase; +#if CALC_EXACT_MALLOC_SIZE + size_t allocated_size; + size_t allocations; +#endif + } malloc_params; + struct { + size_t increment; + struct heaps_slot *ptr; + size_t length; + size_t used; + RVALUE *freelist; + RVALUE *range[2]; + RVALUE *freed; + } heap; + struct { + int dont_gc; + int during_gc; + } flags; + struct { + st_table *table; + RVALUE *deferred; + } final; + struct { + VALUE buffer[MARK_STACK_MAX]; + VALUE *ptr; + int overflow; + } markstack; + struct { + int run; + gc_profile_record *record; + size_t count; + size_t size; + double invoke_time; + } profile; + struct gc_list *global_list; + unsigned int count; + int gc_stress; + + struct { + RVALUE *freed_bitmap; + } ext_heap; +} rb_objspace_t; + +#define malloc_limit objspace->malloc_params.limit +#define malloc_increase objspace->malloc_params.increase +#define heap_slots objspace->heap.slots +#define heaps objspace->heap.ptr +#define heaps_length objspace->heap.length +#define heaps_used objspace->heap.used +#define freelist objspace->heap.freelist +#define lomem objspace->heap.range[0] +#define himem objspace->heap.range[1] +#define heaps_inc objspace->heap.increment +#define heaps_freed objspace->heap.freed +#define dont_gc objspace->flags.dont_gc +#define during_gc objspace->flags.during_gc +#define finalizer_table objspace->final.table +#define deferred_final_list objspace->final.deferred +#define mark_stack objspace->markstack.buffer +#define mark_stack_ptr objspace->markstack.ptr +#define mark_stack_overflow objspace->markstack.overflow +#define global_List objspace->global_list +#define ruby_gc_stress objspace->gc_stress + +#define need_call_final (finalizer_table && finalizer_table->num_entries) + +static void rb_objspace_call_finalizer(rb_objspace_t *objspace); + +#include "ruby/gc_ext.h" +static rb_gc_inner_t *gc_inner; + +/* TODO: more suitable and safety expression */ +#define T_BITMAP (T_FIXNUM + 1) +#define FL_ALIGNOFF FL_MARK + +static rb_objspace_t * +rb_objspace_alloc_tmp(void) +{ + rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t)); + memset(objspace, 0, sizeof(*objspace)); + malloc_limit = GC_MALLOC_LIMIT; + + return objspace; +} + +static void +rb_objspace_free_tmp(rb_objspace_t *objspace) +{ + rb_objspace_call_finalizer(objspace); + if (objspace->profile.record) { + free(objspace->profile.record); + objspace->profile.record = 0; + } + if (global_List) { + struct gc_list *list, *next; + for (list = global_List; list; list = next) { + next = list->next; + free(list); + } + } + if (heaps) { + size_t i; + for (i = 0; i < heaps_used; ++i) { + free(heaps[i].membase); + } + free(heaps); + heaps_used = 0; + heaps = 0; + } + free(objspace); +} + +/* tiny heap size */ +/* 32KB */ +/*#define HEAP_SIZE 0x8000 */ +/* 128KB */ +/*#define HEAP_SIZE 0x20000 */ +/* 64KB */ +/*#define HEAP_SIZE 0x10000 */ +/* 16KB */ +#define BITMAP_ALIGN 0x4000 +/* 8KB */ +/*#define HEAP_SIZE 0x2000 */ +/* 4KB */ +/*#define HEAP_SIZE 0x1000 */ +/* 2KB */ +/*#define HEAP_SIZE 0x800 */ + +#define HEAP_SIZE ((BITMAP_ALIGN / sizeof(struct RVALUE) + 2) * sizeof(RVALUE)) +#define BITMAP_MASK (0xFFFFFFFF - BITMAP_ALIGN + 1) +#define HEAP_OBJ_LIMIT (HEAP_SIZE / sizeof(struct RVALUE) - 1) + +extern VALUE rb_cMutex; +extern st_table *rb_class_tbl; + +int ruby_disable_gc_stress = 0; + +static void run_final(rb_objspace_t *objspace, VALUE obj); +static int garbage_collect(rb_objspace_t *objspace); + +/* + * call-seq: + * GC.stress => true or false + * + * returns current status of GC stress mode. + */ + +static VALUE +gc_stress_get(VALUE self) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + return ruby_gc_stress ? Qtrue : Qfalse; +} + +/* + * call-seq: + * GC.stress = bool => bool + * + * updates GC stress mode. + * + * When GC.stress = true, GC is invoked for all GC opportunity: + * all memory and object allocation. + * + * Since it makes Ruby very slow, it is only for debugging. + */ + +static VALUE +gc_stress_set(VALUE self, VALUE flag) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + rb_secure(2); + ruby_gc_stress = RTEST(flag); + return flag; +} + +/* + * call-seq: + * GC::Profiler.enable? => true or false + * + * returns current status of GC profile mode. + */ + +static VALUE +gc_profile_enable_get(VALUE self) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + return objspace->profile.run; +} + +/* + * call-seq: + * GC::Profiler.enable => nil + * + * updates GC profile mode. + * start profiler for GC. + * + */ + +static VALUE +gc_profile_enable(void) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + + objspace->profile.run = TRUE; + return Qnil; +} + +/* + * call-seq: + * GC::Profiler.disable => nil + * + * updates GC profile mode. + * stop profiler for GC. + * + */ + +static VALUE +gc_profile_disable(void) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + + objspace->profile.run = FALSE; + return Qnil; +} + +/* + * call-seq: + * GC::Profiler.clear => nil + * + * clear before profile data. + * + */ + +static VALUE +gc_profile_clear(void) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size); + objspace->profile.count = 0; + return Qnil; +} + +static void vm_xfree(rb_objspace_t *objspace, void *ptr); + +static void * +vm_xmalloc(rb_objspace_t *objspace, size_t size) +{ + void *mem; + + if ((ssize_t)size < 0) { + gc_inner->negative_size_allocation_error("negative allocation size (or too big)"); + } + if (size == 0) size = 1; + +#if CALC_EXACT_MALLOC_SIZE + size += sizeof(size_t); +#endif + + if ((ruby_gc_stress && !ruby_disable_gc_stress) || + (malloc_increase+size) > malloc_limit) { + gc_inner->garbage_collect_with_gvl(objspace); + } + mem = malloc(size); + if (!mem) { + if (gc_inner->garbage_collect_with_gvl(objspace)) { + mem = malloc(size); + } + if (!mem) { + gc_inner->ruby_memerror(); + } + } + malloc_increase += size; + +#if CALC_EXACT_MALLOC_SIZE + objspace->malloc_params.allocated_size += size; + objspace->malloc_params.allocations++; + ((size_t *)mem)[0] = size; + mem = (size_t *)mem + 1; +#endif + + return mem; +} + +static void * +vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size) +{ + void *mem; + + if ((ssize_t)size < 0) { + gc_inner->negative_size_allocation_error("negative re-allocation size"); + } + if (!ptr) return vm_xmalloc(objspace, size); + if (size == 0) { + vm_xfree(objspace, ptr); + return 0; + } + if (ruby_gc_stress && !ruby_disable_gc_stress) + gc_inner->garbage_collect_with_gvl(objspace); + +#if CALC_EXACT_MALLOC_SIZE + size += sizeof(size_t); + objspace->malloc_params.allocated_size -= size; + ptr = (size_t *)ptr - 1; +#endif + + mem = realloc(ptr, size); + if (!mem) { + if (gc_inner->garbage_collect_with_gvl(objspace)) { + mem = realloc(ptr, size); + } + if (!mem) { + gc_inner->ruby_memerror(); + } + } + malloc_increase += size; + +#if CALC_EXACT_MALLOC_SIZE + objspace->malloc_params.allocated_size += size; + ((size_t *)mem)[0] = size; + mem = (size_t *)mem + 1; +#endif + + return mem; +} + +static void +vm_xfree(rb_objspace_t *objspace, void *ptr) +{ +#if CALC_EXACT_MALLOC_SIZE + size_t size; + ptr = ((size_t *)ptr) - 1; + size = ((size_t*)ptr)[0]; + objspace->malloc_params.allocated_size -= size; + objspace->malloc_params.allocations--; +#endif + + free(ptr); +} + +static void * +ruby_xmalloc_tmp(size_t size) +{ + return vm_xmalloc(gc_inner->get_objspace(), size); +} + +static void * +ruby_xmalloc2_tmp(size_t n, size_t size) +{ + size_t len = size * n; + if (n != 0 && size != len / n) { + rb_raise(rb_eArgError, "malloc: possible integer overflow"); + } + return vm_xmalloc(gc_inner->get_objspace(), len); +} + +static void * +ruby_xcalloc_tmp(size_t n, size_t size) +{ + void *mem = ruby_xmalloc2(n, size); + memset(mem, 0, n * size); + + return mem; +} + +static void * +ruby_xrealloc_tmp(void *ptr, size_t size) +{ + return vm_xrealloc(gc_inner->get_objspace(), ptr, size); +} + +static void * +ruby_xrealloc2_tmp(void *ptr, size_t n, size_t size) +{ + size_t len = size * n; + if (n != 0 && size != len / n) { + rb_raise(rb_eArgError, "realloc: possible integer overflow"); + } + return ruby_xrealloc(ptr, len); +} + +static void +ruby_xfree_tmp(void *x) +{ + if (x) + vm_xfree(gc_inner->get_objspace(), x); +} + + +/* + * call-seq: + * GC.enable => true or false + * + * Enables garbage collection, returning true if garbage + * collection was previously disabled. + * + * GC.disable #=> false + * GC.enable #=> true + * GC.enable #=> false + * + */ + +static VALUE +rb_gc_enable_tmp(void) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + int old = dont_gc; + + dont_gc = FALSE; + return old ? Qtrue : Qfalse; +} + +/* + * call-seq: + * GC.disable => true or false + * + * Disables garbage collection, returning true if garbage + * collection was already disabled. + * + * GC.disable #=> false + * GC.disable #=> true + * + */ + +static VALUE +rb_gc_disable_tmp(void) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + int old = dont_gc; + + dont_gc = TRUE; + return old ? Qtrue : Qfalse; +} + +extern VALUE rb_mGC; + +static void +rb_gc_register_address_tmp(VALUE *addr) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + struct gc_list *tmp; + + tmp = ALLOC(struct gc_list); + tmp->next = global_List; + tmp->varptr = addr; + global_List = tmp; +} + +static void +rb_gc_unregister_address_tmp(VALUE *addr) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + struct gc_list *tmp = global_List; + + if (tmp->varptr == addr) { + global_List = tmp->next; + xfree(tmp); + return; + } + while (tmp->next) { + if (tmp->next->varptr == addr) { + struct gc_list *t = tmp->next; + + tmp->next = tmp->next->next; + xfree(t); + break; + } + tmp = tmp->next; + } +} + + +static void +allocate_heaps(rb_objspace_t *objspace, size_t next_heaps_length) +{ + struct heaps_slot *p; + size_t size; + + size = next_heaps_length*sizeof(struct heaps_slot); + + if (heaps_used > 0) { + p = (struct heaps_slot *)realloc(heaps, size); + if (p) heaps = p; + } + else { + p = heaps = (struct heaps_slot *)malloc(size); + } + + if (p == 0) { + during_gc = 0; + rb_memerror(); + } + heaps_length = next_heaps_length; +} + + +#define FIND_BITMAP(res, p) do {\ + if (((RVALUE *)p)->as.free.flags & FL_ALIGNOFF) {\ + res = (RVALUE *)((((VALUE)p & BITMAP_MASK) + BITMAP_ALIGN) / sizeof(RVALUE) * sizeof(RVALUE)); \ + }\ + else {\ + res = (RVALUE *)(((VALUE)p & BITMAP_MASK) / sizeof(RVALUE) * sizeof(RVALUE));\ + }\ +} while(0) + +#define NUM_IN_SLOT(p, slot) (((VALUE)p - (VALUE)slot)/sizeof(RVALUE)) +#define BITMAP_INDEX(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) / (sizeof(int) * 8)) +/* #define BITMAP_INDEX(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) >> 5) */ +#define BITMAP_OFFSET(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) & ((sizeof(int) * 8)-1)) +#define MARKED_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] & 1 << BITMAP_OFFSET(bmap, p)) +#define MARK_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] |= 1 << BITMAP_OFFSET(bmap, p)) +#define CLEAR_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] &= ~(1 << BITMAP_OFFSET(bmap, p))) +#define MARKED_IN_BITMAP_DIRECT(map, index, offset) (map[index] & 1 << offset) +#define MARK_IN_BITMAP_DIRECT(map, index, offset) (map[index] |= 1 << offset) + +/* for debug */ +void +bitmap_p(RVALUE *p) +{ + RVALUE *bmap; + int index, offset, marked; + + FIND_BITMAP(bmap, p); + index = BITMAP_INDEX(bmap, p); + offset = BITMAP_OFFSET(bmap, p); + marked = MARKED_IN_BITMAP(bmap, p); + printf("bitmap : ((RVALUE *)%p)\n", bmap); + printf("map_index : %d | offset : %d\n", index, offset); + printf("is mark ? %s\n", marked? "true" : "false"); +} + +VALUE +find_bitmap(RVALUE *p) { + RVALUE *res; + + FIND_BITMAP(res, p); + return (VALUE)res; +} + +void +dump_bitmap(RVALUE *bmap) { + int i; + + for (i = 0; i < 26; i++) { + printf("dump %p map %d : %d %s\n", bmap, i, bmap->as.bitmap.map[i], bmap->as.bitmap.map[i]? "remain" : "clean"); + } +} + +void +bitmap2obj(RVALUE *bmap, int index, int offset) +{ + printf("(RVALUE *)%p\n", (RVALUE *)(bmap->as.bitmap.slot + (index * sizeof(int) * 8 + offset) * sizeof(RVALUE))); +} + + +static void +make_bitmap(struct heaps_slot *slot) +{ + RVALUE *p, *pend, *bitmap, *last, *border; + int *map = 0; + int size; + + p = slot->slot; + pend = p + slot->limit; + last = pend - 1; + RBASIC(last)->flags = 0; + FIND_BITMAP(bitmap, last); + if (bitmap < p || pend <= bitmap) { + rb_bug("not include in heap slot: result bitmap(%p), find (%p), p (%p), pend(%p)", bitmap, last, p, pend); + } + border = bitmap; + if (!((VALUE)border % BITMAP_ALIGN)) { + border--; + } + while (p < pend) { + if (p <= border) { + RBASIC(p)->flags = FL_ALIGNOFF; + } + else { + RBASIC(p)->flags = 0; + } + p++; + } + + size = sizeof(int) * (HEAP_OBJ_LIMIT / (sizeof(int) * 8)+1); + map = (int *)malloc(size); + if (map == 0) { + rb_memerror(); + } + MEMZERO(map, int, (size/sizeof(int))); + bitmap->as.bitmap.flags |= T_BITMAP; + bitmap->as.bitmap.map = map; + bitmap->as.bitmap.slot = (VALUE)slot->slot; + bitmap->as.bitmap.limit = slot->limit; + slot->bitmap = bitmap; +} + +void +test_bitmap(RVALUE *p, RVALUE *pend) +{ + RVALUE *first, *bmap = 0, *bmap_tmp; + int i; + + first = p; + FIND_BITMAP(bmap_tmp, p); + while (p < pend) { + if (MARKED_IN_BITMAP(bmap, p)) printf("already marking! %p\n", p); + if (bmap_tmp != p) { + FIND_BITMAP(bmap, p); + if (bmap_tmp != bmap) printf("diffrence bmap %p : %p\n", bmap_tmp, bmap); + MARK_IN_BITMAP(bmap, p); + } + else { + MARK_IN_BITMAP(bmap, p); + } + if (!MARKED_IN_BITMAP(bmap, p)) printf("not marking! %p\n", p); + p++; + } + for (i =0; i < 26; i++) { + printf("bitmap[%d] : %x\n", i, bmap->as.bitmap.map[i]); + } + p = first; + while (p < pend) { + if (bmap_tmp != p) { + FIND_BITMAP(bmap, p); + CLEAR_IN_BITMAP(bmap, p); + } + else { + CLEAR_IN_BITMAP(bmap, p); + } + if (MARKED_IN_BITMAP(bmap, p)) printf("not clear! %p\n", p); + p++; + } + for (i =0; i < 26; i++) { + printf("bitmap[%d] : %x\n", i, bmap->as.bitmap.map[i]); + } +} + +static void +assign_heap_slot(rb_objspace_t *objspace) +{ + RVALUE *p, *pend, *membase; + size_t hi, lo, mid; + size_t objs; + + objs = HEAP_OBJ_LIMIT; + p = (RVALUE*)malloc(HEAP_SIZE); + + if (p == 0) { + during_gc = 0; + rb_memerror(); + } + + membase = p; + if ((VALUE)p % sizeof(RVALUE) != 0) { + p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE))); + } + + lo = 0; + hi = heaps_used; + while (lo < hi) { + register RVALUE *mid_membase; + mid = (lo + hi) / 2; + mid_membase = heaps[mid].membase; + if (mid_membase < membase) { + lo = mid + 1; + } + else if (mid_membase > membase) { + hi = mid; + } + else { + rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid); + } + } + if (hi < heaps_used) { + MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi); + } + heaps[hi].membase = membase; + heaps[hi].slot = p; + heaps[hi].limit = objs; + pend = p + objs; + if (lomem == 0 || lomem > p) lomem = p; + if (himem < pend) himem = pend; + heaps_used++; + + make_bitmap(&heaps[hi]); + while (p < pend) { + if (BUILTIN_TYPE(p) != T_BITMAP) { + p->as.free.next = freelist; + freelist = p; + } + p++; + } +} + +static void +init_heap(rb_objspace_t *objspace) +{ + size_t add, i; + + add = HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT; + + if (!add) { + add = 1; + } + + if ((heaps_used + add) > heaps_length) { + allocate_heaps(objspace, heaps_used + add); + } + + for (i = 0; i < add; i++) { + assign_heap_slot(objspace); + } + heaps_inc = 0; + objspace->profile.invoke_time = getrusage_time(); +} + + +static void +set_heaps_increment(rb_objspace_t *objspace) +{ + size_t next_heaps_length = (size_t)(heaps_used * 1.8); + + if (next_heaps_length == heaps_used) { + next_heaps_length++; + } + + heaps_inc = next_heaps_length - heaps_used; + + if (next_heaps_length > heaps_length) { + allocate_heaps(objspace, next_heaps_length); + } +} + +static int +heaps_increment(rb_objspace_t *objspace) +{ + if (heaps_inc > 0) { + assign_heap_slot(objspace); + heaps_inc--; + return TRUE; + } + return FALSE; +} + +#define RANY(o) ((RVALUE*)(o)) + +static VALUE +rb_newobj_from_heap(rb_objspace_t *objspace) +{ + VALUE obj; + int bmap_left = 0; + + if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) { + if (!heaps_increment(objspace) && !garbage_collect(objspace)) { + during_gc = 0; + rb_memerror(); + } + } + + obj = (VALUE)freelist; + freelist = freelist->as.free.next; + + if (RANY(obj)->as.free.flags & FL_ALIGNOFF) { + bmap_left = Qtrue; + } + MEMZERO((void*)obj, RVALUE, 1); + if (bmap_left) { + RANY(obj)->as.free.flags = FL_ALIGNOFF; + } +#ifdef GC_DEBUG + RANY(obj)->file = rb_sourcefile(); + RANY(obj)->line = rb_sourceline(); +#endif + + return obj; +} + +/* TODO: remove this function. */ +#if USE_VALUE_CACHE +static VALUE +rb_fill_value_cache(rb_thread_t *th) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + int i; + VALUE rv; + RVALUE *bmap; + + /* LOCK */ + for (i=0; ivalue_cache[i] = v; + FIND_BITMAP(bmap, v); + MARK_IN_BITMAP(bmap, v); + } + th->value_cache_ptr = &th->value_cache[0]; + rv = rb_newobj_from_heap(objspace); + /* UNLOCK */ + return rv; +} +#endif + +static int +rb_during_gc_tmp(void) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + return during_gc; +} + +static VALUE +rb_newobj_tmp(void) +{ +#if USE_VALUE_CACHE + rb_thread_t *th = GET_THREAD(); + VALUE v = *th->value_cache_ptr; +#endif + rb_objspace_t *objspace = gc_inner->get_objspace(); + + if (during_gc) { + dont_gc = 1; + during_gc = 0; + rb_bug("object allocation during garbage collection phase"); + } + +#if USE_VALUE_CACHE + if (v) { + rb_set_flag_force(v, 0); + th->value_cache_ptr++; + } + else { + v = rb_fill_value_cache(th); + } + +#if defined(GC_DEBUG) + printf("cache index: %d, v: %p, th: %p\n", + th->value_cache_ptr - th->value_cache, v, th); +#endif + return v; +#else + return rb_newobj_from_heap(objspace); +#endif +} + +static void +rb_set_flag_force_tmp(VALUE obj, VALUE t) +{ + t = t & ~FL_ALIGNOFF; + if (RBASIC(obj)->flags & FL_ALIGNOFF) { + RBASIC(obj)->flags = FL_ALIGNOFF | t; + } + else { + RBASIC(obj)->flags = t; + } +} + +static VALUE +rb_data_object_alloc_tmp(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, + RUBY_DATA_FUNC dfree) +{ + NEWOBJ(data, struct RData); + if (klass) Check_Type(klass, T_CLASS); + OBJSETUP(data, klass, T_DATA); + data->data = datap; + data->dfree = dfree; + data->dmark = dmark; + + return (VALUE)data; +} + +static VALUE +rb_data_typed_object_alloc_tmp(VALUE klass, void *datap, + const rb_data_type_t *type) +{ + NEWOBJ(data, struct RTypedData); + + if (klass) Check_Type(klass, T_CLASS); + + OBJSETUP(data, klass, T_DATA); + + data->data = datap; + data->typed_flag = 1; + data->type = type; + + return (VALUE)data; +} + +static size_t +rb_objspace_data_type_memsize_tmp(VALUE obj) +{ + if (RTYPEDDATA_P(obj)) { + return RTYPEDDATA_TYPE(obj)->dsize(RTYPEDDATA_DATA(obj)); + } + else { + return 0; + } +} + +static const char * +rb_objspace_data_type_name_tmp(VALUE obj) +{ + if (RTYPEDDATA_P(obj)) { + return RTYPEDDATA_TYPE(obj)->wrap_struct_name; + } + else { + return 0; + } +} + +#ifdef __ia64 +#define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp()) +#else +#define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end) +#endif + +#define STACK_START (th->machine_stack_start) +#define STACK_END (th->machine_stack_end) +#define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE)) + +#if STACK_GROW_DIRECTION < 0 +# define STACK_LENGTH (size_t)(STACK_START - STACK_END) +#elif STACK_GROW_DIRECTION > 0 +# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1) +#else +# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \ + : (size_t)(STACK_END - STACK_START + 1)) +#endif +#if !STACK_GROW_DIRECTION +int ruby_stack_grow_direction; +static int +ruby_get_stack_grow_direction_tmp(volatile VALUE *addr) +{ + VALUE *end; + SET_MACHINE_STACK_END(&end); + + if (end > addr) return ruby_stack_grow_direction = 1; + return ruby_stack_grow_direction = -1; +} +#endif + +#define GC_WATER_MARK 512 + +static int +ruby_stack_check_tmp(void) +{ +#if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) + return 0; +#else + return gc_inner->stack_check(); +#endif +} + +static void +init_mark_stack(rb_objspace_t *objspace) +{ + mark_stack_overflow = 0; + mark_stack_ptr = mark_stack; +} + +#define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack) + +static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev); + +#define IS_FREE_CELL(obj) ((obj->as.basic.flags & ~(FL_ALIGNOFF)) == 0) + +static void +gc_mark_all(rb_objspace_t *objspace) +{ + RVALUE *p, *pend, *bmap; + size_t i; + + init_mark_stack(objspace); + for (i = 0; i < heaps_used; i++) { + p = heaps[i].slot; pend = p + heaps[i].limit; + bmap = heaps[i].bitmap; + while (p < pend) { + if (MARKED_IN_BITMAP(bmap, p) && + !(IS_FREE_CELL(p))) { + gc_inner->gc_mark_children(objspace, (VALUE)p, 0); + } + p++; + } + } +} + +static void +gc_mark_rest(rb_objspace_t *objspace) +{ + VALUE tmp_arry[MARK_STACK_MAX]; + VALUE *p; + + p = (mark_stack_ptr - mark_stack) + tmp_arry; + MEMCPY(tmp_arry, mark_stack, VALUE, p - tmp_arry); + + init_mark_stack(objspace); + while (p != tmp_arry) { + p--; + gc_inner->gc_mark_children(objspace, *p, 0); + } +} + +static inline int +is_pointer_to_heap(rb_objspace_t *objspace, void *ptr) +{ + register RVALUE *p = RANY(ptr); + register struct heaps_slot *heap; + register size_t hi, lo, mid; + + if (p < lomem || p > himem) return FALSE; + if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE; + + /* check if p looks like a pointer using bsearch*/ + lo = 0; + hi = heaps_used; + while (lo < hi) { + mid = (lo + hi) / 2; + heap = &heaps[mid]; + if (heap->slot <= p) { + if (p < heap->slot + heap->limit) + return TRUE; + lo = mid + 1; + } + else { + hi = mid; + } + } + return FALSE; +} + +static void +mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n) +{ + VALUE v; + while (n--) { + v = *x; + VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)); + if (is_pointer_to_heap(objspace, (void *)v)) { + gc_mark(objspace, v, 0); + } + x++; + } +} + +static void +gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end) +{ + long n; + + if (end <= start) return; + n = end - start; + mark_locations_array(objspace, start, n); +} + +static void +rb_gc_mark_locations_tmp(VALUE *start, VALUE *end) +{ + gc_mark_locations(gc_inner->get_objspace(), start, end); +} + +#define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, start, end) + +struct mark_tbl_arg { + rb_objspace_t *objspace; + int lev; +}; + +static int +mark_entry(ID key, VALUE value, st_data_t data) +{ + struct mark_tbl_arg *arg = (void*)data; + gc_mark(arg->objspace, value, arg->lev); + return ST_CONTINUE; +} + +static void +mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev) +{ + struct mark_tbl_arg arg; + if (!tbl) return; + arg.objspace = objspace; + arg.lev = lev; + st_foreach(tbl, mark_entry, (st_data_t)&arg); +} + +static int +mark_key(VALUE key, VALUE value, st_data_t data) +{ + struct mark_tbl_arg *arg = (void*)data; + gc_mark(arg->objspace, key, arg->lev); + return ST_CONTINUE; +} + +static void +mark_set(rb_objspace_t *objspace, st_table *tbl, int lev) +{ + struct mark_tbl_arg arg; + if (!tbl) return; + arg.objspace = objspace; + arg.lev = lev; + st_foreach(tbl, mark_key, (st_data_t)&arg); +} + +static void +rb_mark_set_tmp(st_table *tbl) +{ + mark_set(gc_inner->get_objspace(), tbl, 0); +} + +static int +mark_keyvalue(VALUE key, VALUE value, st_data_t data) +{ + struct mark_tbl_arg *arg = (void*)data; + gc_mark(arg->objspace, key, arg->lev); + gc_mark(arg->objspace, value, arg->lev); + return ST_CONTINUE; +} + +static void +mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev) +{ + struct mark_tbl_arg arg; + if (!tbl) return; + arg.objspace = objspace; + arg.lev = lev; + st_foreach(tbl, mark_keyvalue, (st_data_t)&arg); +} + +static int +mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data) +{ + struct mark_tbl_arg *arg = (void*)data; + gc_inner->mark_method_entry(arg->objspace, me, arg->lev); + return ST_CONTINUE; +} + +static void +mark_m_tbl(rb_objspace_t *objspace, st_table *tbl, int lev) +{ + struct mark_tbl_arg arg; + if (!tbl) return; + arg.objspace = objspace; + arg.lev = lev; + st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg); +} + +static int +free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data) +{ + rb_free_method_entry(me); + return ST_CONTINUE; +} + +static void +rb_free_m_table_tmp(st_table *tbl) +{ + st_foreach(tbl, free_method_entry_i, 0); + st_free_table(tbl); +} + +static void +rb_mark_tbl_tmp(st_table *tbl) +{ + mark_tbl(gc_inner->get_objspace(), tbl, 0); +} + +static void +rb_gc_mark_maybe_tmp(VALUE obj) +{ + if (is_pointer_to_heap(gc_inner->get_objspace(), (void *)obj)) { + gc_mark(gc_inner->get_objspace(), obj, 0); + } +} + +#define GC_LEVEL_MAX 250 + +static void +gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev) +{ + register RVALUE *obj, *bmap; + + obj = RANY(ptr); + if (rb_special_const_p(ptr)) return; /* special const not marked */ + if (IS_FREE_CELL(obj)) return; /* free cell */ + if (BUILTIN_TYPE(obj) == T_BITMAP) return; + FIND_BITMAP(bmap, obj); + if (MARKED_IN_BITMAP(bmap, obj)) return; /* already marked */ + MARK_IN_BITMAP(bmap, obj); + + if (lev > GC_LEVEL_MAX || (lev == 0 && gc_inner->stack_check())) { + if (!mark_stack_overflow) { + if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) { + *mark_stack_ptr = ptr; + mark_stack_ptr++; + } + else { + mark_stack_overflow = 1; + } + } + return; + } + gc_inner->gc_mark_children(objspace, ptr, lev+1); +} + +static int +gc_set_mark_flag(register RVALUE *obj) +{ + register RVALUE *bmap; + if (IS_FREE_CELL(obj)) return 1; /* free cell */ + FIND_BITMAP(bmap, obj); + if (MARKED_IN_BITMAP(bmap, obj)) return 1; /* already marked */ + MARK_IN_BITMAP(bmap, obj); + return 0; +} + +static inline void +add_freelist(rb_objspace_t *objspace, RVALUE *p) +{ + RVALUE *bmap; + + VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE)); + rb_set_flag_force((VALUE)p, 0); + FIND_BITMAP(bmap, p); + CLEAR_IN_BITMAP(bmap, p); + p->as.free.next = freelist; + freelist = p; +} + +static void +finalize_list(rb_objspace_t *objspace, RVALUE *p) +{ + while (p) { + RVALUE *tmp = p->as.free.next; + run_final(objspace, (VALUE)p); + if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */ + add_freelist(objspace, p); + } + else { + struct heaps_slot *slot = (struct heaps_slot *)RDATA(p)->dmark; + slot->limit--; + } + p = tmp; + } +} + +static void +free_unused_heaps(rb_objspace_t *objspace) +{ + size_t i, j; + RVALUE *last = 0, *bmap = 0; + + for (i = j = 1; j < heaps_used; i++) { + if (heaps[i].limit == 0) { + if (!last) { + last = heaps[i].membase; + bmap = heaps[i].bitmap; + } + else { + free(heaps[i].membase); + free(heaps[i].bitmap->as.bitmap.map); + } + heaps_used--; + } + else { + if (i != j) { + heaps[j] = heaps[i]; + } + j++; + } + } + if (last) { + if (last < heaps_freed) { + free(heaps_freed); + free(objspace->ext_heap.freed_bitmap->as.bitmap.map); + heaps_freed = last; + heaps_freed = bmap; + } + else { + free(last); + free(bmap->as.bitmap.map); + } + } +} + +static void +gc_sweep(rb_objspace_t *objspace) +{ + RVALUE *p, *pend, *final_list; + size_t freed = 0; + size_t i; + size_t live = 0, free_min = 0, do_heap_free = 0; + + do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65); + free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2); + + if (free_min < FREE_MIN) { + do_heap_free = heaps_used * HEAP_OBJ_LIMIT; + free_min = FREE_MIN; + } + + freelist = 0; + final_list = deferred_final_list; + deferred_final_list = 0; + for (i = 0; i < heaps_used; i++) { + size_t free_num = 0, final_num = 0; + RVALUE *free = freelist; + RVALUE *final = final_list; + int *map = heaps[i].bitmap->as.bitmap.map; + int deferred, bmap_index = 0, bmap_offset = 0; + + p = heaps[i].slot; pend = p + heaps[i].limit; + while (p < pend) { + if (BUILTIN_TYPE(p) == T_BITMAP) { + free_num++; + } + else if(!(MARKED_IN_BITMAP_DIRECT(map, bmap_index, bmap_offset))) { + if (!(IS_FREE_CELL(p)) && + ((deferred = gc_inner->obj_free(objspace, (VALUE)p)) || + ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) { + if (!deferred) { + rb_set_flag_force((VALUE)p, T_ZOMBIE); + RDATA(p)->dfree = 0; + } + p->as.free.next = final_list; + final_list = p; + final_num++; + } + else { + /* Do not touch the fields if they don't have to be modified. + * This is in order to preserve copy-on-write semantics. + */ + if (!IS_FREE_CELL(p)) + rb_set_flag_force((VALUE)p, 0); + if (p->as.free.next != freelist) + p->as.free.next = freelist; + freelist = p; + free_num++; + } + } + else if (BUILTIN_TYPE(p) == T_ZOMBIE) { + /* objects to be finalized */ + /* do nothing remain marked */ + } + else { + live++; + } + p++; + bmap_offset++; + if (bmap_offset >= (int)(sizeof(int) * 8)) { + bmap_index++; + bmap_offset = 0; + } + } + MEMZERO(heaps[i].bitmap->as.bitmap.map, int, bmap_index+1); + if (final_num + free_num == heaps[i].limit && freed > do_heap_free) { + RVALUE *pp; + + for (pp = final_list; pp != final; pp = pp->as.free.next) { + RDATA(pp)->dmark = (void *)&heaps[i]; + pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */ + } + heaps[i].limit = final_num; + + freelist = free; /* cancel this page from freelist */ + } + else { + freed += free_num; + } + } + GC_PROF_SET_MALLOC_INFO; + if (malloc_increase > malloc_limit) { + malloc_limit += (size_t)((malloc_increase - malloc_limit) * (double)live / (live + freed)); + if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT; + } + malloc_increase = 0; + if (freed < free_min) { + set_heaps_increment(objspace); + heaps_increment(objspace); + } + during_gc = 0; + + /* clear finalization list */ + if (final_list) { + RVALUE *bmap, *pp; + for (pp = final_list; pp != 0; pp = pp->as.free.next) { + FIND_BITMAP(bmap, pp); + MARK_IN_BITMAP(bmap, pp); + } + GC_PROF_SET_HEAP_INFO; + deferred_final_list = final_list; + gc_inner->ruby_vm_set_finalizer_interrupt(); + } + else { + free_unused_heaps(objspace); + GC_PROF_SET_HEAP_INFO; + } +} + +static void +rb_gc_force_recycle_tmp(VALUE p) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + add_freelist(objspace, (RVALUE *)p); +} + +static inline void +make_deferred(RVALUE *p) +{ + rb_set_flag_force((VALUE)p, (p->as.basic.flags & ~T_MASK) | T_ZOMBIE); +} + +static inline void +make_io_deferred(RVALUE *p) +{ + rb_io_t *fptr = p->as.file.fptr; + make_deferred(p); + p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize; + p->as.data.data = fptr; +} + +#define GC_NOTIFY 0 + +void rb_vm_mark(void *ptr); + +#if STACK_GROW_DIRECTION < 0 +#define GET_STACK_BOUNDS(start, end, appendix) (start = STACK_END, end = STACK_START) +#elif STACK_GROW_DIRECTION > 0 +#define GET_STACK_BOUNDS(start, end, appendix) (start = STACK_START, end = STACK_END+appendix) +#else +#define GET_STACK_BOUNDS(stack_start, stack_end, appendix) \ + ((STACK_END < STACK_START) ? \ + (start = STACK_END, end = STACK_START) : (start = STACK_START, end = STACK_END+appendix)) +#endif + +void rb_gc_mark_encodings(void); + +static int +garbage_collect(rb_objspace_t *objspace) +{ + struct gc_list *list; + INIT_GC_PROF_PARAMS; + + if (GC_NOTIFY) printf("start garbage_collect()\n"); + + if (!heaps) { + return FALSE; + } + + if (dont_gc || during_gc) { + if (!freelist) { + if (!heaps_increment(objspace)) { + set_heaps_increment(objspace); + heaps_increment(objspace); + } + } + return TRUE; + } + during_gc++; + objspace->count++; + + GC_PROF_TIMER_START; + GC_PROF_MARK_TIMER_START; + + gc_inner->gc_mark_core(objspace); + + if (finalizer_table) { + mark_tbl(objspace, finalizer_table, 0); + } + + rb_gc_mark_threads(); + rb_gc_mark_symbols(); + rb_gc_mark_encodings(); + + /* mark protected global variables */ + for (list = global_List; list; list = list->next) { + rb_gc_mark_maybe(*list->varptr); + } + rb_mark_end_proc(); + rb_gc_mark_global_tbl(); + + mark_tbl(objspace, rb_class_tbl, 0); + + /* mark generic instance variables for special constants */ + rb_mark_generic_ivar_tbl(); + + rb_gc_mark_parser(); + + /* gc_mark objects whose marking are not completed*/ + while (!MARK_STACK_EMPTY) { + if (mark_stack_overflow) { + gc_mark_all(objspace); + } + else { + gc_mark_rest(objspace); + } + } + GC_PROF_MARK_TIMER_STOP; + + GC_PROF_SWEEP_TIMER_START; + gc_sweep(objspace); + GC_PROF_SWEEP_TIMER_STOP; + + GC_PROF_TIMER_STOP; + if (GC_NOTIFY) printf("end garbage_collect()\n"); + return TRUE; +} + +static int +rb_garbage_collect_tmp(void) +{ + return garbage_collect(gc_inner->get_objspace()); +} + +/* + * Document-class: ObjectSpace + * + * The ObjectSpace module contains a number of routines + * that interact with the garbage collection facility and allow you to + * traverse all living objects with an iterator. + * + * ObjectSpace also provides support for object + * finalizers, procs that will be called when a specific object is + * about to be destroyed by garbage collection. + * + * include ObjectSpace + * + * + * a = "A" + * b = "B" + * c = "C" + * + * + * define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" }) + * define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" }) + * define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" }) + * + * produces: + * + * Finalizer three on 537763470 + * Finalizer one on 537763480 + * Finalizer two on 537763480 + * + */ + +static void +Init_heap_tmp(void) +{ + init_heap(gc_inner->get_objspace()); +} + +/* + * rb_objspace_each_objects() is special C API to walk through + * Ruby object space. This C API is too difficult to use it. + * To be frank, you should not use it. Or you need to read the + * source code of this function and understand what this function does. + * + * 'callback' will be called several times (the number of heap slot, + * at current implementation) with: + * vstart: a pointer to the first living object of the heap_slot. + * vend: a pointer to next to the valid heap_slot area. + * stride: a distance to next VALUE. + * + * If callback() returns non-zero, the iteration will be stopped. + * + * This is a sample callback code to iterate liveness objects: + * + * int + * sample_callback(void *vstart, void *vend, int stride, void *data) { + * VALUE v = (VALUE)vstart; + * for (; v != (VALUE)vend; v += stride) { + * if (RBASIC(v)->flags) { // liveness check + * // do something with live object 'v' + * } + * return 0; // continue to iteration + * } + * + * Note: 'vstart' is not a top of heap_slot. This point the first + * living object to grasp at least one object to avoid GC issue. + * This means that you can not walk through all Ruby object slot + * including freed object slot. + * + * Note: On this implementation, 'stride' is same as sizeof(RVALUE). + * However, there are possibilities to pass variable values with + * 'stride' with some reasons. You must use stride instead of + * use some constant value in the iteration. + */ +static void +rb_objspace_each_objects_tmp(int (*callback)(void *vstart, void *vend, + size_t stride, void *d), + void *data) +{ + size_t i; + RVALUE *membase = 0; + RVALUE *pstart, *pend; + rb_objspace_t *objspace = gc_inner->get_objspace(); + volatile VALUE v; + + i = 0; + while (i < heaps_used) { + while (0 < i && (uintptr_t)membase < (uintptr_t)heaps[i-1].membase) + i--; + while (i < heaps_used && (uintptr_t)heaps[i].membase <= (uintptr_t)membase ) + i++; + if (heaps_used <= i) + break; + membase = heaps[i].membase; + + pstart = heaps[i].slot; + pend = pstart + heaps[i].limit; + + for (; pstart != pend; pstart++) { + if (pstart->as.basic.flags & ~FL_ALIGNOFF) { + v = (VALUE)pstart; /* acquire to save this object */ + break; + } + } + if (pstart != pend) { + if ((*callback)(pstart, pend, sizeof(RVALUE), data)) { + return; + } + } + } + + return; +} + +struct os_each_struct { + size_t num; + VALUE of; +}; + +static int +os_obj_of_i(void *vstart, void *vend, size_t stride, void *data) +{ + struct os_each_struct *oes = (struct os_each_struct *)data; + RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend; + volatile VALUE v; + + for (; p != pend; p++) { + if (!IS_FREE_CELL(p)) { + if (gc_inner->os_obj_of_check_type(p)) { + if (BUILTIN_TYPE(p) == T_BITMAP) continue; + if (!p->as.basic.klass) continue; + v = (VALUE)p; + if (!oes->of || rb_obj_is_kind_of(v, oes->of)) { + rb_yield(v); + oes->num++; + } + } + } + } + + return 0; +} + +static VALUE +os_obj_of(VALUE of) +{ + struct os_each_struct oes; + + oes.num = 0; + oes.of = of; + rb_objspace_each_objects(os_obj_of_i, &oes); + return SIZET2NUM(oes.num); +} + +/* + * call-seq: + * ObjectSpace.each_object([module]) {|obj| ... } => fixnum + * + * Calls the block once for each living, nonimmediate object in this + * Ruby process. If module is specified, calls the block + * for only those classes or modules that match (or are a subclass of) + * module. Returns the number of objects found. Immediate + * objects (Fixnums, Symbols + * true, false, and nil) are + * never returned. In the example below, each_object + * returns both the numbers we defined and several constants defined in + * the Math module. + * + * a = 102.7 + * b = 95 # Won't be returned + * c = 12345678987654321 + * count = ObjectSpace.each_object(Numeric) {|x| p x } + * puts "Total count: #{count}" + * + * produces: + * + * 12345678987654321 + * 102.7 + * 2.71828182845905 + * 3.14159265358979 + * 2.22044604925031e-16 + * 1.7976931348623157e+308 + * 2.2250738585072e-308 + * Total count: 7 + * + */ + +static VALUE +os_each_obj(int argc, VALUE *argv, VALUE os) +{ + VALUE of; + + rb_secure(4); + if (argc == 0) { + of = 0; + } + else { + rb_scan_args(argc, argv, "01", &of); + } + RETURN_ENUMERATOR(os, 1, &of); + return os_obj_of(of); +} + +/* + * call-seq: + * ObjectSpace.undefine_finalizer(obj) + * + * Removes all finalizers for obj. + * + */ + +static VALUE +undefine_final(VALUE os, VALUE obj) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + if (OBJ_FROZEN(obj)) rb_error_frozen("object"); + if (finalizer_table) { + st_delete(finalizer_table, (st_data_t*)&obj, 0); + } + FL_UNSET(obj, FL_FINALIZE); + return obj; +} + +/* + * call-seq: + * ObjectSpace.define_finalizer(obj, aProc=proc()) + * + * Adds aProc as a finalizer, to be called after obj + * was destroyed. + * + */ + +static VALUE +define_final(int argc, VALUE *argv, VALUE os) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + VALUE obj, block, table; + + rb_scan_args(argc, argv, "11", &obj, &block); + if (OBJ_FROZEN(obj)) rb_error_frozen("object"); + if (argc == 1) { + block = rb_block_proc(); + } + else if (!rb_respond_to(block, rb_intern("call"))) { + rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", + rb_obj_classname(block)); + } + if (!FL_ABLE(obj)) { + rb_raise(rb_eArgError, "cannot define finalizer for %s", + rb_obj_classname(obj)); + } + RBASIC(obj)->flags |= FL_FINALIZE; + + block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block); + OBJ_FREEZE(block); + + if (!finalizer_table) { + finalizer_table = st_init_numtable(); + } + if (st_lookup(finalizer_table, obj, &table)) { + rb_ary_push(table, block); + } + else { + table = rb_ary_new3(1, block); + RBASIC(table)->klass = 0; + st_add_direct(finalizer_table, obj, table); + } + return block; +} + +static void +rb_gc_copy_finalizer_tmp(VALUE dest, VALUE obj) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + VALUE table; + + if (!finalizer_table) return; + if (!FL_TEST(obj, FL_FINALIZE)) return; + if (st_lookup(finalizer_table, obj, &table)) { + st_insert(finalizer_table, dest, table); + } + FL_SET(dest, FL_FINALIZE); +} + +static VALUE +run_single_final(VALUE arg) +{ + VALUE *args = (VALUE *)arg; + rb_eval_cmd(args[0], args[1], (int)args[2]); + return Qnil; +} + +static void +run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE objid, VALUE table) +{ + long i; + int status; + VALUE args[3]; + + args[1] = 0; + args[2] = (VALUE)rb_safe_level(); + if (!args[1] && RARRAY_LEN(table) > 0) { + args[1] = rb_obj_freeze(rb_ary_new3(1, objid)); + } + for (i=0; iklass = 0; + + if (RTYPEDDATA_P(obj)) { + free_func = RTYPEDDATA_TYPE(obj)->dfree; + } + else { + free_func = RDATA(obj)->dfree; + } + if (free_func) { + (*free_func)(DATA_PTR(obj)); + } + + if (finalizer_table && + st_delete(finalizer_table, (st_data_t*)&obj, &table)) { + run_finalizer(objspace, obj, objid, table); + } +} + +static void +finalize_deferred(rb_objspace_t *objspace) +{ + RVALUE *p = deferred_final_list; + deferred_final_list = 0; + + if (p) { + finalize_list(objspace, p); + } +} + +static void +gc_finalize_deferred(rb_objspace_t *objspace) +{ + finalize_deferred(objspace); + free_unused_heaps(objspace); +} + +static int +chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg) +{ + RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg, *bmap; + if (p->as.basic.flags & FL_FINALIZE) { + FIND_BITMAP(bmap, p); + if (!MARKED_IN_BITMAP(bmap, p)) { + if (BUILTIN_TYPE(p) != T_ZOMBIE) { + rb_set_flag_force((VALUE)p, T_ZOMBIE); + MARK_IN_BITMAP(bmap, p); + RDATA(p)->dfree = 0; + } + p->as.free.next = *final_list; + *final_list = p; + } + } + return ST_CONTINUE; +} + +struct force_finalize_list { + VALUE obj; + VALUE table; + struct force_finalize_list *next; +}; + +static int +force_chain_object(st_data_t key, st_data_t val, st_data_t arg) +{ + struct force_finalize_list **prev = (struct force_finalize_list **)arg; + struct force_finalize_list *curr = ALLOC(struct force_finalize_list); + curr->obj = key; + curr->table = val; + curr->next = *prev; + *prev = curr; + return ST_DELETE; +} + +static void +rb_gc_call_finalizer_at_exit_tmp(void) +{ + rb_objspace_call_finalizer(gc_inner->get_objspace()); +} + +void +rb_objspace_call_finalizer(rb_objspace_t *objspace) +{ + RVALUE *p, *pend; + RVALUE *final_list = 0; + size_t i; + + /* run finalizers */ + if (finalizer_table) { + do { + /* XXX: this loop will make no sense */ + /* because mark will not be removed */ + finalize_deferred(objspace); + mark_tbl(objspace, finalizer_table, 0); + st_foreach(finalizer_table, chain_finalized_object, + (st_data_t)&deferred_final_list); + } while (deferred_final_list); + /* force to run finalizer */ + while (finalizer_table->num_entries) { + struct force_finalize_list *list = 0; + st_foreach(finalizer_table, force_chain_object, (st_data_t)&list); + while (list) { + struct force_finalize_list *curr = list; + run_finalizer(objspace, curr->obj, rb_obj_id(curr->obj), curr->table); + list = curr->next; + xfree(curr); + } + } + st_free_table(finalizer_table); + finalizer_table = 0; + } + /* finalizers are part of garbage collection */ + during_gc++; + /* run data object's finalizers */ + for (i = 0; i < heaps_used; i++) { + p = heaps[i].slot; pend = p + heaps[i].limit; + while (p < pend) { + if (BUILTIN_TYPE(p) == T_DATA && + DATA_PTR(p) && RANY(p)->as.data.dfree && + RANY(p)->as.basic.klass != rb_cThread && RANY(p)->as.basic.klass != rb_cMutex) { + rb_set_flag_force((VALUE)p, 0); + if (RTYPEDDATA_P(p)) { + RDATA(p)->dfree = RANY(p)->as.typeddata.type->dfree; + } + if ((long)RANY(p)->as.data.dfree == -1) { + xfree(DATA_PTR(p)); + } + else if (RANY(p)->as.data.dfree) { + make_deferred(RANY(p)); + RANY(p)->as.free.next = final_list; + final_list = p; + } + } + else if (BUILTIN_TYPE(p) == T_FILE) { + if (RANY(p)->as.file.fptr) { + make_io_deferred(RANY(p)); + RANY(p)->as.free.next = final_list; + final_list = p; + } + } + p++; + } + } + during_gc = 0; + if (final_list) { + finalize_list(objspace, final_list); + } +} + +static void +rb_gc_tmp(void) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + garbage_collect(objspace); + gc_finalize_deferred(objspace); +} + +/* + * call-seq: + * ObjectSpace._id2ref(object_id) -> an_object + * + * Converts an object id to a reference to the object. May not be + * called on an object id passed as a parameter to a finalizer. + * + * s = "I am a string" #=> "I am a string" + * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string" + * r == s #=> true + * + */ + +static VALUE +id2ref(VALUE obj, VALUE objid) +{ +#if SIZEOF_LONG == SIZEOF_VOIDP +#define NUM2PTR(x) NUM2ULONG(x) +#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP +#define NUM2PTR(x) NUM2ULL(x) +#endif + rb_objspace_t *objspace = gc_inner->get_objspace(); + VALUE ptr; + void *p0; + + rb_secure(4); + ptr = NUM2PTR(objid); + p0 = (void *)ptr; + + if (ptr == Qtrue) return Qtrue; + if (ptr == Qfalse) return Qfalse; + if (ptr == Qnil) return Qnil; + if (FIXNUM_P(ptr)) return (VALUE)ptr; + ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */ + + if ((ptr % sizeof(RVALUE)) == (4 << 2)) { + ID symid = ptr / sizeof(RVALUE); + if (rb_id2name(symid) == 0) + rb_raise(rb_eRangeError, "%p is not symbol id value", p0); + return ID2SYM(symid); + } + + if (!is_pointer_to_heap(objspace, (void *)ptr) || + BUILTIN_TYPE(ptr) > T_FIXNUM || BUILTIN_TYPE(ptr) == T_ICLASS) { + rb_raise(rb_eRangeError, "%p is not id value", p0); + } + if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) { + rb_raise(rb_eRangeError, "%p is recycled object", p0); + } + return (VALUE)ptr; +} + +/* + * Document-method: __id__ + * Document-method: object_id + * + * call-seq: + * obj.__id__ => fixnum + * obj.object_id => fixnum + * + * Returns an integer identifier for obj. The same number will + * be returned on all calls to id for a given object, and + * no two active objects will share an id. + * Object#object_id is a different concept from the + * :name notation, which returns the symbol id of + * name. Replaces the deprecated Object#id. + */ + +/* + * call-seq: + * obj.hash => fixnum + * + * Generates a Fixnum hash value for this object. This + * function must have the property that a.eql?(b) implies + * a.hash == b.hash. The hash value is used by class + * Hash. Any hash value that exceeds the capacity of a + * Fixnum will be truncated before being used. + */ + +static VALUE +rb_obj_id_tmp(VALUE obj) +{ + /* + * 32-bit VALUE space + * MSB ------------------------ LSB + * false 00000000000000000000000000000000 + * true 00000000000000000000000000000010 + * nil 00000000000000000000000000000100 + * undef 00000000000000000000000000000110 + * symbol ssssssssssssssssssssssss00001110 + * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE)) + * fixnum fffffffffffffffffffffffffffffff1 + * + * object_id space + * LSB + * false 00000000000000000000000000000000 + * true 00000000000000000000000000000010 + * nil 00000000000000000000000000000100 + * undef 00000000000000000000000000000110 + * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4) + * object oooooooooooooooooooooooooooooo0 o...o % A = 0 + * fixnum fffffffffffffffffffffffffffffff1 bignum if required + * + * where A = sizeof(RVALUE)/4 + * + * sizeof(RVALUE) is + * 20 if 32-bit, double is 4-byte aligned + * 24 if 32-bit, double is 8-byte aligned + * 40 if 64-bit + */ + if (TYPE(obj) == T_SYMBOL) { + return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG; + } + if (SPECIAL_CONST_P(obj)) { + return LONG2NUM((SIGNED_VALUE)obj); + } + return (VALUE)((SIGNED_VALUE)obj|FIXNUM_FLAG); +} + +static int +set_zero(st_data_t key, st_data_t val, st_data_t arg) +{ + VALUE k = (VALUE)key; + VALUE hash = (VALUE)arg; + rb_hash_aset(hash, k, INT2FIX(0)); + return ST_CONTINUE; +} + +/* + * call-seq: + * ObjectSpace.count_objects([result_hash]) -> hash + * + * Counts objects for each type. + * + * It returns a hash as: + * {:TOTAL=>10000, :FREE=>3011, :T_OBJECT=>6, :T_CLASS=>404, ...} + * + * If the optional argument, result_hash, is given, + * it is overwritten and returned. + * This is intended to avoid probe effect. + * + * The contents of the returned hash is implementation defined. + * It may be changed in future. + * + * This method is not expected to work except C Ruby. + * + */ + +static VALUE +count_objects(int argc, VALUE *argv, VALUE os) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + size_t counts[T_MASK+1]; + size_t freed = 0; + size_t total = 0; + size_t i; + VALUE hash; + + if (rb_scan_args(argc, argv, "01", &hash) == 1) { + if (TYPE(hash) != T_HASH) + rb_raise(rb_eTypeError, "non-hash given"); + } + + for (i = 0; i <= T_MASK; i++) { + counts[i] = 0; + } + + for (i = 0; i < heaps_used; i++) { + RVALUE *p, *pend; + + p = heaps[i].slot; pend = p + heaps[i].limit; + for (;p < pend; p++) { + if (!IS_FREE_CELL(p)) { + counts[BUILTIN_TYPE(p)]++; + } + else { + freed++; + } + } + total += heaps[i].limit; + } + + if (hash == Qnil) { + hash = rb_hash_new(); + } + else if (!RHASH_EMPTY_P(hash)) { + st_foreach(RHASH_TBL(hash), set_zero, hash); + } + rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total)); + rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed)); + + for (i = 0; i <= T_MASK; i++) { + VALUE type; + switch (i) { +#define COUNT_TYPE(t) case t: type = ID2SYM(rb_intern(#t)); break; + /* COUNT_TYPE(T_BITMAP); */ +#undef COUNT_TYPE + default: + type = gc_inner->builtin2sym(i); + break; + } + if (counts[i]) + rb_hash_aset(hash, type, SIZET2NUM(counts[i])); + } + + return hash; +} + +/* + * call-seq: + * GC.count -> Integer + * + * The number of times GC occurred. + * + * It returns the number of times GC occurred since the process started. + * + */ + +static VALUE +gc_count(VALUE self) +{ + return UINT2NUM((gc_inner->get_objspace())->count); +} + +#if CALC_EXACT_MALLOC_SIZE +/* + * call-seq: + * GC.malloc_allocated_size -> Integer + * + * The allocated size by malloc(). + * + * It returns the allocated size by malloc(). + */ + +static VALUE +gc_malloc_allocated_size(VALUE self) +{ + return UINT2NUM((gc_inner->get_objspace())->malloc_params.allocated_size); +} + +/* + * call-seq: + * GC.malloc_allocations -> Integer + * + * The number of allocated memory object by malloc(). + * + * It returns the number of allocated memory object by malloc(). + */ + +static VALUE +gc_malloc_allocations(VALUE self) +{ + return UINT2NUM((gc_inner->get_objspace())->malloc_params.allocations); +} +#endif + +static VALUE +gc_profile_record_get(void) +{ + VALUE prof; + VALUE gc_profile = rb_ary_new(); + size_t i; + rb_objspace_t *objspace = (gc_inner->get_objspace()); + + if (!objspace->profile.run) { + return Qnil; + } + + for (i =0; i < objspace->profile.count; i++) { + prof = rb_hash_new(); + rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(objspace->profile.record[i].gc_time)); + rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(objspace->profile.record[i].gc_invoke_time)); + rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), rb_uint2inum(objspace->profile.record[i].heap_use_size)); + rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), rb_uint2inum(objspace->profile.record[i].heap_total_size)); + rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), rb_uint2inum(objspace->profile.record[i].heap_total_objects)); +#if GC_PROFILE_MORE_DETAIL + rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(objspace->profile.record[i].gc_mark_time)); + rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(objspace->profile.record[i].gc_sweep_time)); + rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), rb_uint2inum(objspace->profile.record[i].allocate_increase)); + rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), rb_uint2inum(objspace->profile.record[i].allocate_limit)); + rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SLOTS")), rb_uint2inum(objspace->profile.record[i].heap_use_slots)); + rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), rb_uint2inum(objspace->profile.record[i].heap_live_objects)); + rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), rb_uint2inum(objspace->profile.record[i].heap_free_objects)); + rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), objspace->profile.record[i].have_finalize); +#endif + rb_ary_push(gc_profile, prof); + } + + return gc_profile; +} + +/* + * call-seq: + * GC::Profiler.result -> string + * + * Report profile data to string. + * + * It returns a string as: + * GC 1 invokes. + * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms) + * 1 0.012 159240 212940 10647 0.00000000000001530000 + */ + +static VALUE +gc_profile_result(void) +{ + rb_objspace_t *objspace = gc_inner->get_objspace(); + VALUE record; + VALUE result; + int i; + + record = gc_profile_record_get(); + if (objspace->profile.run && objspace->profile.count) { + result = rb_sprintf("GC %d invokes.\n", NUM2INT(gc_count(0))); + rb_str_cat2(result, "Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"); + for (i = 0; i < (int)RARRAY_LEN(record); i++) { + VALUE r = RARRAY_PTR(record)[i]; + rb_str_catf(result, "%5d %19.3f %20d %20d %20d %30.20f\n", + i+1, NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_INVOKE_TIME")))), + NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SIZE")))), + NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")))), + NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")))), + NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_TIME"))))*1000); + } +#if GC_PROFILE_MORE_DETAIL + rb_str_cat2(result, "\n\n"); + rb_str_cat2(result, "More detail.\n"); + rb_str_cat2(result, "Index Allocate Increase Allocate Limit Use Slot Have Finalize Mark Time(ms) Sweep Time(ms)\n"); + for (i = 0; i < (int)RARRAY_LEN(record); i++) { + VALUE r = RARRAY_PTR(record)[i]; + rb_str_catf(result, "%5d %17d %17d %9d %14s %25.20f %25.20f\n", + i+1, NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_INCREASE")))), + NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_LIMIT")))), + NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SLOTS")))), + rb_hash_aref(r, ID2SYM(rb_intern("HAVE_FINALIZE")))? "true" : "false", + NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_MARK_TIME"))))*1000, + NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_SWEEP_TIME"))))*1000); + } +#endif + } + else { + result = rb_str_new2(""); + } + return result; +} + + +/* + * call-seq: + * GC::Profiler.report + * + * GC::Profiler.result display + * + */ + +static VALUE +gc_profile_report(int argc, VALUE *argv, VALUE self) +{ + VALUE out; + + if (argc == 0) { + out = rb_stdout; + } + else { + rb_scan_args(argc, argv, "01", &out); + } + rb_io_write(out, gc_profile_result()); + + return Qnil; +} + +/* + * The GC module provides an interface to Ruby's mark and + * sweep garbage collection mechanism. Some of the underlying methods + * are also available via the ObjectSpace module. + */ + +static void +Init_GC_tmp(void) +{ + VALUE rb_mObSpace; + VALUE rb_mProfiler; + + rb_mGC = rb_define_module("GC"); + rb_define_singleton_method(rb_mGC, "start", rb_gc_start, 0); + rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0); + rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0); + rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0); + rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1); + rb_define_singleton_method(rb_mGC, "count", gc_count, 0); + rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0); + + rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler"); + rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0); + rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0); + rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0); + rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0); + rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0); + rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1); + + rb_mObSpace = rb_define_module("ObjectSpace"); + rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1); + rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0); + + rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1); + rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1); + + rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1); + + rb_define_method(rb_mKernel, "__id__", rb_obj_id, 0); + rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0); + + rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1); +#if CALC_EXACT_MALLOC_SIZE + rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0); + rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0); +#endif +} + +static rb_gc_t gc_tmp = { + ruby_xfree_tmp, + ruby_xmalloc_tmp, + ruby_xmalloc2_tmp, + ruby_xcalloc_tmp, + ruby_xrealloc_tmp, + ruby_xrealloc2_tmp, + + sizeof(RVALUE), + sizeof(struct heaps_slot), + rb_objspace_alloc_tmp, + rb_objspace_free_tmp, + rb_gc_register_address_tmp, + rb_gc_enable_tmp, + rb_gc_disable_tmp, + rb_gc_unregister_address_tmp, + rb_during_gc_tmp, + rb_newobj_tmp, + rb_set_flag_force_tmp, + rb_data_object_alloc_tmp, + rb_data_typed_object_alloc_tmp, + rb_objspace_data_type_memsize_tmp, + rb_objspace_data_type_name_tmp, +#if !STACK_GROW_DIRECTION + ruby_get_stack_grow_direction_tmp, +#else + NULL, +#endif + ruby_stack_check_tmp, + rb_gc_mark_locations_tmp, + rb_mark_set_tmp, + rb_free_m_table_tmp, + rb_mark_tbl_tmp, + rb_gc_mark_maybe_tmp, + rb_gc_force_recycle_tmp, + rb_garbage_collect_tmp, + Init_heap_tmp, + rb_objspace_each_objects_tmp, + rb_gc_copy_finalizer_tmp, + rb_gc_call_finalizer_at_exit_tmp, + rb_gc_tmp, + rb_obj_id_tmp, + Init_GC_tmp, + + mark_locations_array, + mark_tbl, + mark_m_tbl, + gc_mark, + gc_set_mark_flag, + garbage_collect, + gc_finalize_deferred, + mark_hash, +}; + +void +Init_gc_bmp(void) +{ + gc_inner = rb_gc_set(&gc_tmp); +} Index: gc.c =================================================================== --- gc.c (revision 27126) +++ gc.c (working copy) @@ -368,14 +368,32 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress; static void rb_objspace_call_finalizer(rb_objspace_t *objspace); +#include "ruby/gc_ext.h" + +rb_gc_t *gc; +int gc_decided_p = 0; + +static rb_objspace_t * +get_objspace(void) +{ + return &rb_objspace; +} + #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE rb_objspace_t * rb_objspace_alloc(void) { + rb_objspace_t *objspace = gc->rb_objspace_alloc(); + ruby_gc_stress = ruby_initial_gc_stress; + return objspace; +} + +static rb_objspace_t * +rb_objspace_alloc_default(void) +{ rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t)); memset(objspace, 0, sizeof(*objspace)); malloc_limit = GC_MALLOC_LIMIT; - ruby_gc_stress = ruby_initial_gc_stress; return objspace; } @@ -383,7 +401,13 @@ rb_objspace_alloc(void) void rb_objspace_free(rb_objspace_t *objspace) { - rb_objspace_call_finalizer(objspace); + gc->rb_objspace_free(objspace); +} + +static void +rb_objspace_free_default(rb_objspace_t *objspace) +{ + rb_objspace_call_finalizer(objspace); if (objspace->profile.record) { free(objspace->profile.record); objspace->profile.record = 0; @@ -437,7 +461,7 @@ static int garbage_collect(rb_objspace_t *objspace); void rb_global_variable(VALUE *var) { - rb_gc_register_address(var); + gc->rb_gc_register_address(var); } static void * @@ -468,7 +492,7 @@ ruby_memerror(void) void rb_memerror(void) { - rb_thread_t *th = GET_THREAD(); + rb_thread_t *th = GET_THREAD(); if (!nomem_error || (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) { fprintf(stderr, "[FATAL] failed to allocate memory\n"); @@ -612,7 +636,7 @@ negative_size_allocation_error(const char *msg) static void * gc_with_gvl(void *ptr) { - return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr); + return (void *)(VALUE)gc->garbage_collect((rb_objspace_t *)ptr); } static int @@ -620,7 +644,7 @@ garbage_collect_with_gvl(rb_objspace_t *objspace) { if (dont_gc) return TRUE; if (ruby_thread_has_gvl_p()) { - return garbage_collect(objspace); + return gc->garbage_collect(objspace); } else { if (ruby_native_thread_p()) { @@ -734,12 +758,24 @@ vm_xfree(rb_objspace_t *objspace, void *ptr) void * ruby_xmalloc(size_t size) { + return gc->ruby_xmalloc(size); +} + +static void * +ruby_xmalloc_default(size_t size) +{ return vm_xmalloc(&rb_objspace, size); } void * ruby_xmalloc2(size_t n, size_t size) { + return gc->ruby_xmalloc2(n, size); +} + +static void * +ruby_xmalloc2_default(size_t n, size_t size) +{ size_t len = size * n; if (n != 0 && size != len / n) { rb_raise(rb_eArgError, "malloc: possible integer overflow"); @@ -750,6 +786,12 @@ ruby_xmalloc2(size_t n, size_t size) void * ruby_xcalloc(size_t n, size_t size) { + return gc->ruby_xcalloc(n, size); +} + +static void * +ruby_xcalloc_default(size_t n, size_t size) +{ void *mem = ruby_xmalloc2(n, size); memset(mem, 0, n * size); @@ -759,12 +801,24 @@ ruby_xcalloc(size_t n, size_t size) void * ruby_xrealloc(void *ptr, size_t size) { + return gc->ruby_xrealloc(ptr, size); +} + +static void * +ruby_xrealloc_default(void *ptr, size_t size) +{ return vm_xrealloc(&rb_objspace, ptr, size); } void * ruby_xrealloc2(void *ptr, size_t n, size_t size) { + return gc->ruby_xrealloc2(ptr, n, size); +} + +static void * +ruby_xrealloc2_default(void *ptr, size_t n, size_t size) +{ size_t len = size * n; if (n != 0 && size != len / n) { rb_raise(rb_eArgError, "realloc: possible integer overflow"); @@ -775,6 +829,12 @@ ruby_xrealloc2(void *ptr, size_t n, size_t size) void ruby_xfree(void *x) { + gc->ruby_xfree(x); +} + +static void +ruby_xfree_default(void *x) +{ if (x) vm_xfree(&rb_objspace, x); } @@ -796,6 +856,12 @@ ruby_xfree(void *x) VALUE rb_gc_enable(void) { + return gc->rb_gc_enable(); +} + +static VALUE +rb_gc_enable_default(void) +{ rb_objspace_t *objspace = &rb_objspace; int old = dont_gc; @@ -818,6 +884,12 @@ rb_gc_enable(void) VALUE rb_gc_disable(void) { + return gc->rb_gc_disable(); +} + +static VALUE +rb_gc_disable_default(void) +{ rb_objspace_t *objspace = &rb_objspace; int old = dont_gc; @@ -837,6 +909,12 @@ rb_gc_register_mark_object(VALUE obj) void rb_gc_register_address(VALUE *addr) { + gc->rb_gc_register_address(addr); +} + +static void +rb_gc_register_address_default(VALUE *addr) +{ rb_objspace_t *objspace = &rb_objspace; struct gc_list *tmp; @@ -849,6 +927,12 @@ rb_gc_register_address(VALUE *addr) void rb_gc_unregister_address(VALUE *addr) { + gc->rb_gc_unregister_address(addr); +} + +static void +rb_gc_unregister_address_default(VALUE *addr) +{ rb_objspace_t *objspace = &rb_objspace; struct gc_list *tmp = global_List; @@ -1040,7 +1124,7 @@ rb_fill_value_cache(rb_thread_t *th) VALUE v = rb_newobj_from_heap(objspace); th->value_cache[i] = v; - RBASIC(v)->flags = FL_MARK; + rb_set_flag_force(v, FL_MARK); } th->value_cache_ptr = &th->value_cache[0]; rv = rb_newobj_from_heap(objspace); @@ -1052,6 +1136,12 @@ rb_fill_value_cache(rb_thread_t *th) int rb_during_gc(void) { + return gc->rb_during_gc(); +} + +static int +rb_during_gc_default(void) +{ rb_objspace_t *objspace = &rb_objspace; return during_gc; } @@ -1059,6 +1149,12 @@ rb_during_gc(void) VALUE rb_newobj(void) { + return gc->rb_newobj(); +} + +static VALUE +rb_newobj_default(void) +{ #if USE_VALUE_CACHE || (defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE) rb_thread_t *th = GET_THREAD(); #endif @@ -1079,7 +1175,7 @@ rb_newobj(void) #if USE_VALUE_CACHE if (v) { - RBASIC(v)->flags = 0; + rb_set_flag_force(v, 0); th->value_cache_ptr++; } else { @@ -1111,9 +1207,28 @@ rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2) return n; } +void +rb_set_flag_force(VALUE obj, VALUE t) +{ + gc->rb_set_flag_force(obj, t); +} + +static void +rb_set_flag_force_default(VALUE obj, VALUE t) +{ + RBASIC(obj)->flags = t; +} + VALUE rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree) { + return gc->rb_data_object_alloc(klass, datap, dmark, dfree); +} + +static VALUE +rb_data_object_alloc_default(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, + RUBY_DATA_FUNC dfree) +{ NEWOBJ(data, struct RData); if (klass) Check_Type(klass, T_CLASS); OBJSETUP(data, klass, T_DATA); @@ -1127,6 +1242,13 @@ rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_F VALUE rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type) { + return gc->rb_data_typed_object_alloc(klass, datap, type); +} + +static VALUE +rb_data_typed_object_alloc_default(VALUE klass, void *datap, + const rb_data_type_t *type) +{ NEWOBJ(data, struct RTypedData); if (klass) Check_Type(klass, T_CLASS); @@ -1143,6 +1265,12 @@ rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type) size_t rb_objspace_data_type_memsize(VALUE obj) { + return gc->rb_objspace_data_type_memsize(obj); +} + +static size_t +rb_objspace_data_type_memsize_default(VALUE obj) +{ if (RTYPEDDATA_P(obj)) { return RTYPEDDATA_TYPE(obj)->dsize(RTYPEDDATA_DATA(obj)); } @@ -1154,6 +1282,12 @@ rb_objspace_data_type_memsize(VALUE obj) const char * rb_objspace_data_type_name(VALUE obj) { + return gc->rb_objspace_data_type_name(obj); +} + +static const char * +rb_objspace_data_type_name_default(VALUE obj) +{ if (RTYPEDDATA_P(obj)) { return RTYPEDDATA_TYPE(obj)->wrap_struct_name; } @@ -1185,6 +1319,12 @@ int ruby_stack_grow_direction; int ruby_get_stack_grow_direction(volatile VALUE *addr) { + return gc->ruby_get_stack_grow_direction(addr); +} + +static int +ruby_get_stack_grow_direction_default(volatile VALUE *addr) +{ VALUE *end; SET_MACHINE_STACK_END(&end); @@ -1223,6 +1363,12 @@ stack_check(void) int ruby_stack_check(void) { + return gc->ruby_stack_check(); +} + +static int +ruby_stack_check_default(void) +{ #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) return 0; #else @@ -1285,16 +1431,16 @@ is_pointer_to_heap(rb_objspace_t *objspace, void *ptr) register size_t hi, lo, mid; if (p < lomem || p > himem) return FALSE; - if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE; + if ((VALUE)p % gc->rval_size != 0) return FALSE; /* check if p looks like a pointer using bsearch*/ lo = 0; hi = heaps_used; while (lo < hi) { mid = (lo + hi) / 2; - heap = &heaps[mid]; + heap = (struct heaps_slot *)((char*)heaps + mid * gc->slot_size); if (heap->slot <= p) { - if (p < heap->slot + heap->limit) + if ((char*)p < (char*)heap->slot + heap->limit * gc->rval_size) return TRUE; lo = mid + 1; } @@ -1326,12 +1472,18 @@ gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end) if (end <= start) return; n = end - start; - mark_locations_array(objspace, start, n); + gc->mark_locations_array(objspace, start, n); } void rb_gc_mark_locations(VALUE *start, VALUE *end) { + return gc->rb_gc_mark_locations(start, end); +} + +static void +rb_gc_mark_locations_default(VALUE *start, VALUE *end) +{ gc_mark_locations(&rb_objspace, start, end); } @@ -1381,6 +1533,12 @@ mark_set(rb_objspace_t *objspace, st_table *tbl, int lev) void rb_mark_set(st_table *tbl) { + gc->rb_mark_set(tbl); +} + +static void +rb_mark_set_default(st_table *tbl) +{ mark_set(&rb_objspace, tbl, 0); } @@ -1406,7 +1564,7 @@ mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev) void rb_mark_hash(st_table *tbl) { - mark_hash(&rb_objspace, tbl, 0); + gc->mark_hash(&rb_objspace, tbl, 0); } static void @@ -1414,14 +1572,14 @@ mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me, int lev) { const rb_method_definition_t *def = me->def; - gc_mark(objspace, me->klass, lev); + gc->gc_mark(objspace, me->klass, lev); if (!def) return; switch (def->type) { case VM_METHOD_TYPE_ISEQ: - gc_mark(objspace, def->body.iseq->self, lev); + gc->gc_mark(objspace, def->body.iseq->self, lev); break; case VM_METHOD_TYPE_BMETHOD: - gc_mark(objspace, def->body.proc, lev); + gc->gc_mark(objspace, def->body.proc, lev); break; case VM_METHOD_TYPE_ATTRSET: case VM_METHOD_TYPE_IVAR: @@ -1466,6 +1624,12 @@ free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data) void rb_free_m_table(st_table *tbl) { + gc->rb_free_m_table(tbl); +} + +static void +rb_free_m_table_default(st_table *tbl) +{ st_foreach(tbl, free_method_entry_i, 0); st_free_table(tbl); } @@ -1473,12 +1637,24 @@ rb_free_m_table(st_table *tbl) void rb_mark_tbl(st_table *tbl) { + gc->rb_mark_tbl(tbl); +} + +static void +rb_mark_tbl_default(st_table *tbl) +{ mark_tbl(&rb_objspace, tbl, 0); } void rb_gc_mark_maybe(VALUE obj) { + gc->rb_gc_mark_maybe(obj); +} + +static void +rb_gc_mark_maybe_default(VALUE obj) +{ if (is_pointer_to_heap(&rb_objspace, (void *)obj)) { gc_mark(&rb_objspace, obj, 0); } @@ -1515,7 +1691,16 @@ gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev) void rb_gc_mark(VALUE ptr) { - gc_mark(&rb_objspace, ptr, 0); + gc->gc_mark(&rb_objspace, ptr, 0); +} + +static int +gc_set_mark_flag(register RVALUE *obj) +{ + if (obj->as.basic.flags == 0) return 1; /* free cell */ + if (obj->as.basic.flags & FL_MARK) return 1; /* already marked */ + obj->as.basic.flags |= FL_MARK; + return 0; } static void @@ -1528,9 +1713,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) again: obj = RANY(ptr); if (rb_special_const_p(ptr)) return; /* special const not marked */ - if (obj->as.basic.flags == 0) return; /* free cell */ - if (obj->as.basic.flags & FL_MARK) return; /* already marked */ - obj->as.basic.flags |= FL_MARK; + if (gc->gc_set_mark_flag(obj)) return; marking: if (FL_TEST(obj, FL_EXIVAR)) { @@ -1554,7 +1737,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) case NODE_RESBODY: case NODE_CLASS: case NODE_BLOCK_PASS: - gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev); + gc->gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev); /* fall through */ case NODE_BLOCK: /* 1,3 */ case NODE_OPTBLOCK: @@ -1568,7 +1751,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) case NODE_DEFS: case NODE_OP_ASGN1: case NODE_ARGS: - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); + gc->gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); /* fall through */ case NODE_SUPER: /* 3 */ case NODE_FCALL: @@ -1595,7 +1778,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) case NODE_ALIAS: case NODE_VALIAS: case NODE_ARGSCAT: - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); + gc->gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); /* fall through */ case NODE_GASGN: /* 2 */ case NODE_LASGN: @@ -1631,7 +1814,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) case NODE_SCOPE: /* 2,3 */ case NODE_CDECL: case NODE_OPT_ARG: - gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev); + gc->gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev); ptr = (VALUE)obj->as.node.u2.node; goto again; @@ -1655,7 +1838,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) case NODE_BLOCK_ARG: break; case NODE_ALLOCA: - mark_locations_array(objspace, + gc->mark_locations_array(objspace, (VALUE*)obj->as.node.u1.value, obj->as.node.u3.cnt); ptr = (VALUE)obj->as.node.u2.node; @@ -1663,25 +1846,25 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) default: /* unlisted NODE */ if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) { - gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); + gc->gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev); } if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) { - gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev); + gc->gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev); } if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) { - gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev); + gc->gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev); } } return; /* no need to mark class. */ } - gc_mark(objspace, obj->as.basic.klass, lev); + gc->gc_mark(objspace, obj->as.basic.klass, lev); switch (BUILTIN_TYPE(obj)) { case T_ICLASS: case T_CLASS: case T_MODULE: - mark_m_tbl(objspace, RCLASS_M_TBL(obj), lev); - mark_tbl(objspace, RCLASS_IV_TBL(obj), lev); + gc->mark_m_tbl(objspace, RCLASS_M_TBL(obj), lev); + gc->mark_tbl(objspace, RCLASS_IV_TBL(obj), lev); ptr = RCLASS_SUPER(obj); goto again; @@ -1694,13 +1877,13 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) long i, len = RARRAY_LEN(obj); VALUE *ptr = RARRAY_PTR(obj); for (i=0; i < len; i++) { - gc_mark(objspace, *ptr++, lev); + gc->gc_mark(objspace, *ptr++, lev); } } break; case T_HASH: - mark_hash(objspace, obj->as.hash.ntbl, lev); + gc->mark_hash(objspace, obj->as.hash.ntbl, lev); ptr = obj->as.hash.ifnone; goto again; @@ -1726,24 +1909,24 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) long i, len = ROBJECT_NUMIV(obj); VALUE *ptr = ROBJECT_IVPTR(obj); for (i = 0; i < len; i++) { - gc_mark(objspace, *ptr++, lev); + gc->gc_mark(objspace, *ptr++, lev); } } break; case T_FILE: if (obj->as.file.fptr) { - gc_mark(objspace, obj->as.file.fptr->pathv, lev); - gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev); - gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat, lev); - gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts, lev); - gc_mark(objspace, obj->as.file.fptr->encs.ecopts, lev); - gc_mark(objspace, obj->as.file.fptr->write_lock, lev); + gc->gc_mark(objspace, obj->as.file.fptr->pathv, lev); + gc->gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev); + gc->gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat, lev); + gc->gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts, lev); + gc->gc_mark(objspace, obj->as.file.fptr->encs.ecopts, lev); + gc->gc_mark(objspace, obj->as.file.fptr->write_lock, lev); } break; case T_REGEXP: - gc_mark(objspace, obj->as.regexp.src, lev); + gc->gc_mark(objspace, obj->as.regexp.src, lev); break; case T_FLOAT: @@ -1752,7 +1935,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) break; case T_MATCH: - gc_mark(objspace, obj->as.match.regexp, lev); + gc->gc_mark(objspace, obj->as.match.regexp, lev); if (obj->as.match.str) { ptr = obj->as.match.str; goto again; @@ -1760,13 +1943,13 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) break; case T_RATIONAL: - gc_mark(objspace, obj->as.rational.num, lev); - gc_mark(objspace, obj->as.rational.den, lev); + gc->gc_mark(objspace, obj->as.rational.num, lev); + gc->gc_mark(objspace, obj->as.rational.den, lev); break; case T_COMPLEX: - gc_mark(objspace, obj->as.complex.real, lev); - gc_mark(objspace, obj->as.complex.imag, lev); + gc->gc_mark(objspace, obj->as.complex.real, lev); + gc->gc_mark(objspace, obj->as.complex.imag, lev); break; case T_STRUCT: @@ -1775,7 +1958,7 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) VALUE *ptr = RSTRUCT_PTR(obj); while (len--) { - gc_mark(objspace, *ptr++, lev); + gc->gc_mark(objspace, *ptr++, lev); } } break; @@ -1793,7 +1976,7 @@ static inline void add_freelist(rb_objspace_t *objspace, RVALUE *p) { VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE)); - p->as.free.flags = 0; + rb_set_flag_force((VALUE)p, 0); p->as.free.next = freelist; freelist = p; } @@ -1850,6 +2033,12 @@ free_unused_heaps(rb_objspace_t *objspace) } static void +ruby_vm_set_finalizer_interrupt(void) +{ + RUBY_VM_SET_FINALIZER_INTERRUPT(GET_THREAD()); +} + +static void gc_sweep(rb_objspace_t *objspace) { RVALUE *p, *pend, *final_list; @@ -1881,7 +2070,7 @@ gc_sweep(rb_objspace_t *objspace) ((deferred = obj_free(objspace, (VALUE)p)) || ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) { if (!deferred) { - p->as.free.flags = T_ZOMBIE; + rb_set_flag_force((VALUE)p, T_ZOMBIE); RDATA(p)->dfree = 0; } p->as.free.flags |= FL_MARK; @@ -1946,6 +2135,12 @@ gc_sweep(rb_objspace_t *objspace) void rb_gc_force_recycle(VALUE p) { + gc->rb_gc_force_recycle(p); +} + +static void +rb_gc_force_recycle_default(VALUE p) +{ rb_objspace_t *objspace = &rb_objspace; add_freelist(objspace, (RVALUE *)p); } @@ -1953,7 +2148,7 @@ rb_gc_force_recycle(VALUE p) static inline void make_deferred(RVALUE *p) { - p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE; + rb_set_flag_force((VALUE)p, (p->as.basic.flags & ~T_MASK) | T_ZOMBIE); } static inline void @@ -2117,27 +2312,36 @@ mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th) SET_STACK_END; GET_STACK_BOUNDS(stack_start, stack_end, 1); - mark_locations_array(objspace, - (VALUE*)save_regs_gc_mark, - sizeof(save_regs_gc_mark) / sizeof(VALUE)); + gc->mark_locations_array(objspace, + (VALUE*)save_regs_gc_mark, + sizeof(save_regs_gc_mark) / sizeof(VALUE)); rb_gc_mark_locations(stack_start, stack_end); #ifdef __ia64 rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end); #endif #if defined(__mc68000__) - mark_locations_array((VALUE*)((char*)STACK_END + 2), - (STACK_START - STACK_END)); + gc->mark_locations_array((VALUE*)((char*)STACK_END + 2), + (STACK_START - STACK_END)); #endif } void rb_gc_mark_encodings(void); +static void +gc_mark_core(rb_objspace_t *objspace) +{ + rb_thread_t *th = GET_THREAD(); + SET_STACK_END; + init_mark_stack(objspace); + th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm); + mark_current_machine_context(objspace, th); +} + static int garbage_collect(rb_objspace_t *objspace) { struct gc_list *list; - rb_thread_t *th = GET_THREAD(); INIT_GC_PROF_PARAMS; if (GC_NOTIFY) printf("start garbage_collect()\n"); @@ -2160,18 +2364,13 @@ garbage_collect(rb_objspace_t *objspace) GC_PROF_TIMER_START; GC_PROF_MARK_TIMER_START; - SET_STACK_END; - - init_mark_stack(objspace); - th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm); + gc_mark_core(objspace); if (finalizer_table) { mark_tbl(objspace, finalizer_table, 0); } - mark_current_machine_context(objspace, th); - rb_gc_mark_threads(); rb_gc_mark_symbols(); rb_gc_mark_encodings(); @@ -2213,6 +2412,12 @@ garbage_collect(rb_objspace_t *objspace) int rb_garbage_collect(void) { + return gc->rb_garbage_collect(); +} + +static int +rb_garbage_collect_default(void) +{ return garbage_collect(&rb_objspace); } @@ -2289,6 +2494,12 @@ Init_stack(volatile VALUE *addr) void Init_heap(void) { + gc->Init_heap(); +} + +static void +Init_heap_default(void) +{ init_heap(&rb_objspace); } @@ -2333,6 +2544,14 @@ rb_objspace_each_objects(int (*callback)(void *vstart, void *vend, size_t stride, void *d), void *data) { + gc->rb_objspace_each_objects(callback,data); +} + +static void +rb_objspace_each_objects_default(int (*callback)(void *vstart, void *vend, + size_t stride, void *d), + void *data) +{ size_t i; RVALUE *membase = 0; RVALUE *pstart, *pend; @@ -2374,6 +2593,22 @@ struct os_each_struct { }; static int +os_obj_of_check_type(RVALUE *p) +{ + switch (BUILTIN_TYPE(p)) { + case T_NONE: + case T_ICLASS: + case T_NODE: + case T_ZOMBIE: + return 0; + case T_CLASS: + if (FL_TEST(p, FL_SINGLETON)) + return 0; + } + return 1; +} + +static int os_obj_of_i(void *vstart, void *vend, size_t stride, void *data) { struct os_each_struct *oes = (struct os_each_struct *)data; @@ -2382,16 +2617,7 @@ os_obj_of_i(void *vstart, void *vend, size_t stride, void *data) for (; p != pend; p++) { if (p->as.basic.flags) { - switch (BUILTIN_TYPE(p)) { - case T_NONE: - case T_ICLASS: - case T_NODE: - case T_ZOMBIE: - continue; - case T_CLASS: - if (FL_TEST(p, FL_SINGLETON)) - continue; - default: + if (os_obj_of_check_type(p)) { if (!p->as.basic.klass) continue; v = (VALUE)p; if (!oes->of || rb_obj_is_kind_of(v, oes->of)) { @@ -2535,6 +2761,12 @@ define_final(int argc, VALUE *argv, VALUE os) void rb_gc_copy_finalizer(VALUE dest, VALUE obj) { + gc->rb_gc_copy_finalizer(dest, obj); +} + +static void +rb_gc_copy_finalizer_default(VALUE dest, VALUE obj) +{ rb_objspace_t *objspace = &rb_objspace; VALUE table; @@ -2620,7 +2852,7 @@ gc_finalize_deferred(rb_objspace_t *objspace) void rb_gc_finalize_deferred(void) { - gc_finalize_deferred(&rb_objspace); + gc->gc_finalize_deferred(&rb_objspace); } static int @@ -2629,7 +2861,7 @@ chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg) RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg; if ((p->as.basic.flags & (FL_FINALIZE|FL_MARK)) == FL_FINALIZE) { if (BUILTIN_TYPE(p) != T_ZOMBIE) { - p->as.free.flags = FL_MARK | T_ZOMBIE; /* remain marked */ + rb_set_flag_force((VALUE)p, FL_MARK | T_ZOMBIE); /* remain marked */ RDATA(p)->dfree = 0; } p->as.free.next = *final_list; @@ -2659,6 +2891,12 @@ force_chain_object(st_data_t key, st_data_t val, st_data_t arg) void rb_gc_call_finalizer_at_exit(void) { + gc->rb_gc_call_finalizer_at_exit(); +} + +static void +rb_gc_call_finalizer_at_exit_default(void) +{ rb_objspace_call_finalizer(&rb_objspace); } @@ -2702,7 +2940,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace) if (BUILTIN_TYPE(p) == T_DATA && DATA_PTR(p) && RANY(p)->as.data.dfree && RANY(p)->as.basic.klass != rb_cThread && RANY(p)->as.basic.klass != rb_cMutex) { - p->as.free.flags = 0; + rb_set_flag_force((VALUE)p, 0); if (RTYPEDDATA_P(p)) { RDATA(p)->dfree = RANY(p)->as.typeddata.type->dfree; } @@ -2734,6 +2972,12 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace) void rb_gc(void) { + gc->rb_gc(); +} + +static void +rb_gc_default(void) +{ rb_objspace_t *objspace = &rb_objspace; garbage_collect(objspace); gc_finalize_deferred(objspace); @@ -2821,6 +3065,12 @@ id2ref(VALUE obj, VALUE objid) VALUE rb_obj_id(VALUE obj) { + return gc->rb_obj_id(obj); +} + +static VALUE +rb_obj_id_default(VALUE obj) +{ /* * 32-bit VALUE space * MSB ------------------------ LSB @@ -2867,6 +3117,40 @@ set_zero(st_data_t key, st_data_t val, st_data_t arg) return ST_CONTINUE; } +static VALUE +builtin2sym(VALUE i) +{ + switch (i) { +#define COUNT_TYPE(t) case t: return ID2SYM(rb_intern(#t)); break; + COUNT_TYPE(T_NONE); + COUNT_TYPE(T_OBJECT); + COUNT_TYPE(T_CLASS); + COUNT_TYPE(T_MODULE); + COUNT_TYPE(T_FLOAT); + COUNT_TYPE(T_STRING); + COUNT_TYPE(T_REGEXP); + COUNT_TYPE(T_ARRAY); + COUNT_TYPE(T_HASH); + COUNT_TYPE(T_STRUCT); + COUNT_TYPE(T_BIGNUM); + COUNT_TYPE(T_FILE); + COUNT_TYPE(T_DATA); + COUNT_TYPE(T_MATCH); + COUNT_TYPE(T_COMPLEX); + COUNT_TYPE(T_RATIONAL); + COUNT_TYPE(T_NIL); + COUNT_TYPE(T_TRUE); + COUNT_TYPE(T_FALSE); + COUNT_TYPE(T_SYMBOL); + COUNT_TYPE(T_FIXNUM); + COUNT_TYPE(T_UNDEF); + COUNT_TYPE(T_NODE); + COUNT_TYPE(T_ICLASS); + COUNT_TYPE(T_ZOMBIE); +#undef COUNT_TYPE + } + return INT2NUM(i); +} /* * call-seq: * ObjectSpace.count_objects([result_hash]) -> hash @@ -2931,39 +3215,8 @@ count_objects(int argc, VALUE *argv, VALUE os) rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed)); for (i = 0; i <= T_MASK; i++) { - VALUE type; - switch (i) { -#define COUNT_TYPE(t) case t: type = ID2SYM(rb_intern(#t)); break; - COUNT_TYPE(T_NONE); - COUNT_TYPE(T_OBJECT); - COUNT_TYPE(T_CLASS); - COUNT_TYPE(T_MODULE); - COUNT_TYPE(T_FLOAT); - COUNT_TYPE(T_STRING); - COUNT_TYPE(T_REGEXP); - COUNT_TYPE(T_ARRAY); - COUNT_TYPE(T_HASH); - COUNT_TYPE(T_STRUCT); - COUNT_TYPE(T_BIGNUM); - COUNT_TYPE(T_FILE); - COUNT_TYPE(T_DATA); - COUNT_TYPE(T_MATCH); - COUNT_TYPE(T_COMPLEX); - COUNT_TYPE(T_RATIONAL); - COUNT_TYPE(T_NIL); - COUNT_TYPE(T_TRUE); - COUNT_TYPE(T_FALSE); - COUNT_TYPE(T_SYMBOL); - COUNT_TYPE(T_FIXNUM); - COUNT_TYPE(T_UNDEF); - COUNT_TYPE(T_NODE); - COUNT_TYPE(T_ICLASS); - COUNT_TYPE(T_ZOMBIE); -#undef COUNT_TYPE - default: type = INT2NUM(i); break; - } if (counts[i]) - rb_hash_aset(hash, type, SIZET2NUM(counts[i])); + rb_hash_aset(hash, builtin2sym(i), SIZET2NUM(counts[i])); } return hash; @@ -3163,6 +3416,16 @@ gc_profile_total_time(VALUE self) void Init_GC(void) { + gc->Init_GC(); + nomem_error = rb_exc_new3(rb_eNoMemError, + rb_obj_freeze(rb_str_new2("failed to allocate memory"))); + OBJ_TAINT(nomem_error); + OBJ_FREEZE(nomem_error); +} + +static void +Init_GC_default(void) +{ VALUE rb_mObSpace; VALUE rb_mProfiler; @@ -3193,18 +3456,218 @@ Init_GC(void) rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1); - nomem_error = rb_exc_new3(rb_eNoMemError, - rb_obj_freeze(rb_str_new2("failed to allocate memory"))); - OBJ_TAINT(nomem_error); - OBJ_FREEZE(nomem_error); - rb_define_method(rb_mKernel, "__id__", rb_obj_id, 0); rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0); rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1); - #if CALC_EXACT_MALLOC_SIZE rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0); rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0); #endif } + +static rb_gc_t gc_default = { + ruby_xfree_default, + ruby_xmalloc_default, + ruby_xmalloc2_default, + ruby_xcalloc_default, + ruby_xrealloc_default, + ruby_xrealloc2_default, + + sizeof(RVALUE), + sizeof(struct heaps_slot), +#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE + rb_objspace_alloc_default, + rb_objspace_free_default, +#else + NULL, NULL, +#endif + rb_gc_register_address_default, + rb_gc_enable_default, + rb_gc_disable_default, + rb_gc_unregister_address_default, + rb_during_gc_default, + rb_newobj_default, + rb_set_flag_force_default, + rb_data_object_alloc_default, + rb_data_typed_object_alloc_default, + rb_objspace_data_type_memsize_default, + rb_objspace_data_type_name_default, +#if !STACK_GROW_DIRECTION + ruby_get_stack_grow_direction_default, +#else + NULL, +#endif + ruby_stack_check_default, + rb_gc_mark_locations_default, + rb_mark_set_default, + rb_free_m_table_default, + rb_mark_tbl_default, + rb_gc_mark_maybe_default, + rb_gc_force_recycle_default, + rb_garbage_collect_default, + Init_heap_default, + rb_objspace_each_objects_default, + rb_gc_copy_finalizer_default, + rb_gc_call_finalizer_at_exit_default, + rb_gc_default, + rb_obj_id_default, + Init_GC_default, + + mark_locations_array, + mark_tbl, + mark_m_tbl, + gc_mark, + gc_set_mark_flag, + garbage_collect, + gc_finalize_deferred, + mark_hash, +}; + +static void +ruby_xfree_boot(void *ptr) +{ + if (ptr) + free(ptr); +} + +static void * +ruby_xmalloc_boot(size_t size) +{ + void *ptr = malloc(size); + return ptr; +} + +static void * +ruby_xcalloc_boot(size_t n, size_t size) +{ + void *mem = malloc(size * n); + memset(mem, 0, n * size); + + return mem; +} + +static rb_gc_t gc_boot = { + ruby_xfree_boot, + ruby_xmalloc_boot, + NULL, + ruby_xcalloc_boot, +}; + +rb_gc_t *gc = &gc_boot; + +static rb_gc_inner_t gc_inner = { + get_objspace, + gc_mark_core, + ruby_memerror, + negative_size_allocation_error, + garbage_collect_with_gvl, + stack_check, + gc_mark_children, + obj_free, + builtin2sym, + os_obj_of_check_type, + mark_method_entry, + ruby_vm_set_finalizer_interrupt, +}; + +rb_gc_inner_t * +rb_gc_set(rb_gc_t *ptr) +{ + if (!gc_decided_p) { + gc = ptr; + gc_decided_p = 1; + } + return &gc_inner; +} + +void +rb_gc_load(int *argc, char ***argv) +{ + rb_gc_t *decided_gc; + + if (dln_loadable()) { + char *name = 0; + char *paths = (char*) malloc(2); + size_t paths_len = 2; + int opt_index = 1; + + MEMZERO(paths, char, 0); + while (*argc > opt_index && argv[0][opt_index][0] == '-' && + argv[0][opt_index][1] == 'I') { + char *path; + size_t path_len; + if (argv[0][opt_index][2]) { + path = argv[0][opt_index] + 2; + opt_index += 1; + } + else if (*argc > opt_index + 1) { + path = argv[0][opt_index + 1]; + opt_index += 2; + } + else { + opt_index += 1; + break; + } + path_len = strlen(path) + 1; + paths_len += path_len; + paths = (char*) realloc(paths, paths_len + 1); + memcpy(paths + paths_len - path_len, path, path_len); + paths[paths_len] = 0; + } + + if (*argc > opt_index && argv[0][opt_index][0] == '-' && + argv[0][opt_index][1] == 'G') { + int pass_size = 0; + if (argv[0][opt_index][2]) { + name = argv[0][opt_index] + 2; + pass_size = 1; + } + else if (*argc > opt_index + 1) { + name = argv[0][opt_index + 1]; + pass_size = 2; + } + *argc -= pass_size; + memcpy(*argv + opt_index, *argv + opt_index + pass_size, + sizeof(char**) * (*argc - opt_index)); + } + else { + name = getenv("RUBYGC"); + } + if (name && name[0]) { + extern const char ruby_initial_load_paths[]; + const char *path; + const char *paths_list[] = {paths, &ruby_initial_load_paths[0]}; + size_t i; + + for (i = 0; i < sizeof(paths_list) / sizeof(char *); i++) { + for (path = paths_list[i]; *path; path += strlen(path) + 1) { +#define MAX_PATHLEN 1024 + char pathname[MAX_PATHLEN]; + snprintf(pathname, MAX_PATHLEN, "%s/%s.so", path, name); + /* TODO : is using access(1) OK? */ + if (!access(pathname, R_OK)) { + dln_load(pathname); + i = sizeof(paths_list); + break; + } + } + } + if (!gc_decided_p) { + fprintf(stderr, "warning: can't find %s, use default GC.\n", name); + } + } + } + rb_gc_set(&gc_default); + + decided_gc = gc; + gc = &gc_boot; + ALLOCA_N(VALUE, 0); + gc = decided_gc; +} + +int +gc_decided() +{ + return gc_decided_p; +} Index: include/ruby/gc_ext.h new file mode 100644 =================================================================== --- /dev/null (revision 27126) +++ include/ruby/gc_ext.h (working copy) @@ -0,0 +1,80 @@ +#ifndef METHOD_H +typedef struct rb_method_entry_struct rb_method_entry_t; +#endif +void rb_free_method_entry(rb_method_entry_t *me); +void rb_objspace_each_objects(int (*callback)(void *vstart, void *vend, + size_t stride, void *d), + void *data); + +typedef struct rb_gc { + void (*ruby_xfree)(void *x); + void *(*ruby_xmalloc)(size_t size); + void *(*ruby_xmalloc2)(size_t n, size_t size); + void *(*ruby_xcalloc)(size_t n, size_t size); + void *(*ruby_xrealloc)(void *ptr, size_t size); + void *(*ruby_xrealloc2)(void *ptr, size_t n, size_t size); + + size_t rval_size; + size_t slot_size; + rb_objspace_t *(*rb_objspace_alloc)(void); + void (*rb_objspace_free)(rb_objspace_t *objspace); + void (*rb_gc_register_address)(VALUE *var); + VALUE (*rb_gc_enable)(void); + VALUE (*rb_gc_disable)(void); + void (*rb_gc_unregister_address)(VALUE *addr); + int (*rb_during_gc)(void); + VALUE (*rb_newobj)(void); + void (*rb_set_flag_force)(VALUE obj, VALUE t); + VALUE (*rb_data_object_alloc)(VALUE klass, void *datap, + RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree); + VALUE (*rb_data_typed_object_alloc)(VALUE klass, void *datap, + const rb_data_type_t *type); + size_t (*rb_objspace_data_type_memsize)(VALUE obj); + const char *(*rb_objspace_data_type_name)(VALUE obj); + int (*ruby_get_stack_grow_direction)(volatile VALUE *addr); + int (*ruby_stack_check)(void); + void (*rb_gc_mark_locations)(VALUE *start, VALUE *end); + void (*rb_mark_set)(st_table *tbl); + void (*rb_free_m_table)(st_table *tbl); + void (*rb_mark_tbl)(st_table *tbl); + void (*rb_gc_mark_maybe)(VALUE obj); + void (*rb_gc_force_recycle)(VALUE p); + int (*rb_garbage_collect)(void); + void (*Init_heap)(void); + void (*rb_objspace_each_objects)(int (*callback)(void *vstart, void *vend, + size_t stride, void *d), + void *data); + void (*rb_gc_copy_finalizer)(VALUE dest, VALUE obj); + void (*rb_gc_call_finalizer_at_exit)(void); + void (*rb_gc)(void); + VALUE (*rb_obj_id)(VALUE obj); + void (*Init_GC)(void); + + void (*mark_locations_array)(rb_objspace_t *objspace, register VALUE *x, + register long n); + void (*mark_tbl)(rb_objspace_t *objspace, st_table *tbl, int lev); + void (*mark_m_tbl)(rb_objspace_t *objspace, st_table *tbl, int lev); + void (*gc_mark)(rb_objspace_t *objspace, VALUE ptr, int lev); + int (*gc_set_mark_flag)(register RVALUE *obj); + int (*garbage_collect)(rb_objspace_t *objspace); + void (*gc_finalize_deferred)(rb_objspace_t *objspace); + void (*mark_hash)(rb_objspace_t *objspace, st_table *tbl, int lev); +} rb_gc_t; + +typedef struct rb_gc_inner { + rb_objspace_t *(*get_objspace)(void); + void (*gc_mark_core)(rb_objspace_t *objspace); + void (*ruby_memerror)(void); + void (*negative_size_allocation_error)(const char *msg); + int (*garbage_collect_with_gvl)(rb_objspace_t *objspace); + int (*stack_check)(void); + void (*gc_mark_children)(rb_objspace_t *objspace, VALUE ptr, int lev); + int (*obj_free)(rb_objspace_t *, VALUE); + VALUE (*builtin2sym)(VALUE i); + int (*os_obj_of_check_type)(RVALUE *p); + void (*mark_method_entry)(rb_objspace_t *objspace, + const rb_method_entry_t *me, int lev); + void (*ruby_vm_set_finalizer_interrupt)(void); +} rb_gc_inner_t; + +rb_gc_inner_t *rb_gc_set(rb_gc_t *ptr); Index: include/ruby/ruby.h =================================================================== --- include/ruby/ruby.h (revision 27126) +++ include/ruby/ruby.h (working copy) @@ -517,9 +517,10 @@ VALUE rb_uint2big(VALUE); VALUE rb_int2big(SIGNED_VALUE); VALUE rb_newobj(void); +void rb_set_flag_force(VALUE obj, VALUE t); #define NEWOBJ(obj,type) type *obj = (type*)rb_newobj() #define OBJSETUP(obj,c,t) do {\ - RBASIC(obj)->flags = (t);\ + rb_set_flag_force((VALUE)obj, t);\ RBASIC(obj)->klass = (c);\ if (rb_safe_level() >= 3) FL_SET(obj, FL_TAINT | FL_UNTRUSTED);\ } while (0) Index: object.c =================================================================== --- object.c (revision 27126) +++ object.c (working copy) @@ -191,7 +191,7 @@ init_copy(VALUE dest, VALUE obj) if (OBJ_FROZEN(dest)) { rb_raise(rb_eTypeError, "[bug] frozen object (%s) allocated", rb_obj_classname(dest)); } - RBASIC(dest)->flags &= ~(T_MASK|FL_EXIVAR); + rb_set_flag_force(dest, RBASIC(dest)->flags & ~(T_MASK|FL_EXIVAR)); RBASIC(dest)->flags |= RBASIC(obj)->flags & (T_MASK|FL_EXIVAR|FL_TAINT|FL_UNTRUSTED); rb_copy_generic_ivar(dest, obj); rb_gc_copy_finalizer(dest, obj); @@ -264,7 +264,9 @@ rb_obj_clone(VALUE obj) } clone = rb_obj_alloc(rb_obj_class(obj)); RBASIC(clone)->klass = rb_singleton_class_clone(obj); - RBASIC(clone)->flags = (RBASIC(obj)->flags | FL_TEST(clone, FL_TAINT) | FL_TEST(clone, FL_UNTRUSTED)) & ~(FL_FREEZE|FL_FINALIZE); + rb_set_flag_force(clone, (RBASIC(obj)->flags | FL_TEST(clone, FL_TAINT) | + FL_TEST(clone, FL_UNTRUSTED)) & + ~(FL_FREEZE|FL_FINALIZE)); init_copy(clone, obj); rb_funcall(clone, id_init_clone, 1, obj); RBASIC(clone)->flags |= RBASIC(obj)->flags & FL_FREEZE; Index: ruby.c =================================================================== --- ruby.c (revision 27126) +++ ruby.c (working copy) @@ -1857,4 +1857,5 @@ ruby_sysinit(int *argc, char ***argv) #if defined(USE_DLN_A_OUT) dln_argv0 = origarg.argv[0]; #endif + rb_gc_load(argc, argv); } Index: vm.c =================================================================== --- vm.c (revision 27126) +++ vm.c (working copy) @@ -1701,7 +1701,7 @@ thread_free(void *ptr) VALUE *ptr = th->value_cache_ptr; while (*ptr) { VALUE v = *ptr; - RBASIC(v)->flags = 0; + rb_set_flag_force(v, 0); RBASIC(v)->klass = 0; ptr++; } @@ -1980,7 +1980,7 @@ Init_VM(void) /* ::VM::FrozenCore */ fcore = rb_class_new(rb_cBasicObject); - RBASIC(fcore)->flags = T_ICLASS; + rb_set_flag_force(fcore, T_ICLASS); klass = rb_singleton_class(fcore); rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3); rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);