Project

General

Profile

Feature #2471 » extgc.patch

wanabe (_ wanabe), 04/01/2010 12:01 AM

View differences:

dln.c (working copy)
#define translit_separator(str) (void)(str)
#endif
int
dln_loadable(void)
{
return 1;
}
void*
dln_load(const char *file)
{
dmydln.c (working copy)
#include "ruby/ruby.h"
int
dln_loadable(void)
{
return 0;
}
void*
dln_load(const char *file)
{
error.c (working copy)
VALUE mesg;
va_start(args, fmt);
if (!gc_decided()) {
vfprintf(stderr, fmt, args);
va_end(args);
abort();
}
mesg = rb_vsprintf(fmt, args);
va_end(args);
rb_exc_raise(rb_exc_new3(rb_eLoadError, mesg));
ext/gc_bmp/extconf.rb (working copy)
require 'mkmf'
create_makefile("gc_bmp")
ext/gc_bmp/gc_bmp.c (working copy)
/**********************************************************************
gc_bmp.c -
$Author$
created at: Tue Oct 5 09:44:46 JST 1993
Copyright (C) 1993-2007 Yukihiro Matsumoto
Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
Copyright (C) 2000 Information-technology Promotion Agency, Japan
**********************************************************************/
#include "ruby.h"
#include "ruby/re.h"
#include "ruby/io.h"
#include <stdio.h>
#include <setjmp.h>
#include <sys/types.h>
#ifndef FALSE
# define FALSE 0
#elif FALSE
# error FALSE must be false
#endif
#ifndef TRUE
# define TRUE 1
#elif !TRUE
# error TRUE must be true
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_SYS_RESOURCE_H
#include <sys/resource.h>
#endif
#if defined _WIN32 || defined __CYGWIN__
#include <windows.h>
#endif
#ifdef HAVE_VALGRIND_MEMCHECK_H
# include <valgrind/memcheck.h>
# ifndef VALGRIND_MAKE_MEM_DEFINED
# define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE(p, n)
# endif
# ifndef VALGRIND_MAKE_MEM_UNDEFINED
# define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE(p, n)
# endif
#else
# define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */
# define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */
#endif
int rb_io_fptr_finalize(struct rb_io_t*);
#define rb_setjmp(env) RUBY_SETJMP(env)
#define rb_jmp_buf rb_jmpbuf_t
/* Make alloca work the best possible way. */
#ifdef __GNUC__
# ifndef atarist
# ifndef alloca
# define alloca __builtin_alloca
# endif
# endif /* atarist */
#else
# ifdef HAVE_ALLOCA_H
# include <alloca.h>
# else
# ifdef _AIX
#pragma alloca
# else
# ifndef alloca /* predefined by HP cc +Olibcalls */
void *alloca ();
# endif
# endif /* AIX */
# endif /* HAVE_ALLOCA_H */
#endif /* __GNUC__ */
#ifndef GC_MALLOC_LIMIT
#define GC_MALLOC_LIMIT 8000000
#endif
#define MARK_STACK_MAX 1024
/* for GC profile */
#define GC_PROFILE_MORE_DETAIL 1
typedef struct gc_profile_record {
double gc_time;
double gc_mark_time;
double gc_sweep_time;
double gc_invoke_time;
size_t heap_use_slots;
size_t heap_live_objects;
size_t heap_free_objects;
size_t heap_total_objects;
size_t heap_use_size;
size_t heap_total_size;
int have_finalize;
size_t allocate_increase;
size_t allocate_limit;
} gc_profile_record;
static double
getrusage_time(void)
{
#ifdef RUSAGE_SELF
struct rusage usage;
struct timeval time;
getrusage(RUSAGE_SELF, &usage);
time = usage.ru_utime;
return time.tv_sec + time.tv_usec * 1e-6;
#elif defined _WIN32
FILETIME creation_time, exit_time, kernel_time, user_time;
ULARGE_INTEGER ui;
LONG_LONG q;
double t;
if (GetProcessTimes(GetCurrentProcess(),
&creation_time, &exit_time, &kernel_time, &user_time) == 0)
{
return 0.0;
}
memcpy(&ui, &user_time, sizeof(FILETIME));
q = ui.QuadPart / 10L;
t = (DWORD)(q % 1000000L) * 1e-6;
q /= 1000000L;
#ifdef __GNUC__
t += q;
#else
t += (double)(DWORD)(q >> 16) * (1 << 16);
t += (DWORD)q & ~(~0 << 16);
#endif
return t;
#else
return 0.0;
#endif
}
#define GC_PROF_TIMER_START do {\
if (objspace->profile.run) {\
if (!objspace->profile.record) {\
objspace->profile.size = 1000;\
objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);\
}\
if (count >= objspace->profile.size) {\
objspace->profile.size += 1000;\
objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);\
}\
if (!objspace->profile.record) {\
rb_bug("gc_profile malloc or realloc miss");\
}\
MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);\
gc_time = getrusage_time();\
objspace->profile.record[count].gc_invoke_time = gc_time - objspace->profile.invoke_time;\
}\
} while(0)
#define GC_PROF_TIMER_STOP do {\
if (objspace->profile.run) {\
gc_time = getrusage_time() - gc_time;\
if (gc_time < 0) gc_time = 0;\
objspace->profile.record[count].gc_time = gc_time;\
objspace->profile.count++;\
}\
} while(0)
#if GC_PROFILE_MORE_DETAIL
#define INIT_GC_PROF_PARAMS double gc_time = 0, mark_time = 0, sweep_time = 0;\
size_t count = objspace->profile.count
#define GC_PROF_MARK_TIMER_START do {\
if (objspace->profile.run) {\
mark_time = getrusage_time();\
}\
} while(0)
#define GC_PROF_MARK_TIMER_STOP do {\
if (objspace->profile.run) {\
mark_time = getrusage_time() - mark_time;\
if (mark_time < 0) mark_time = 0;\
objspace->profile.record[count].gc_mark_time = mark_time;\
}\
} while(0)
#define GC_PROF_SWEEP_TIMER_START do {\
if (objspace->profile.run) {\
sweep_time = getrusage_time();\
}\
} while(0)
#define GC_PROF_SWEEP_TIMER_STOP do {\
if (objspace->profile.run) {\
sweep_time = getrusage_time() - sweep_time;\
if (sweep_time < 0) sweep_time = 0;\
objspace->profile.record[count].gc_sweep_time = sweep_time;\
}\
} while(0)
#define GC_PROF_SET_MALLOC_INFO do {\
if (objspace->profile.run) {\
gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
record->allocate_increase = malloc_increase;\
record->allocate_limit = malloc_limit; \
}\
} while(0)
#define GC_PROF_SET_HEAP_INFO do {\
if (objspace->profile.run) {\
gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
record->heap_use_slots = heaps_used;\
record->heap_live_objects = live;\
record->heap_free_objects = freed; \
record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\
record->have_finalize = final_list ? Qtrue : Qfalse;\
record->heap_use_size = live * sizeof(RVALUE); \
record->heap_total_size = heaps_used * (HEAP_OBJ_LIMIT * sizeof(RVALUE));\
}\
} while(0)
#else
#define INIT_GC_PROF_PARAMS double gc_time = 0;\
size_t count = objspace->profile.count
#define GC_PROF_MARK_TIMER_START
#define GC_PROF_MARK_TIMER_STOP
#define GC_PROF_SWEEP_TIMER_START
#define GC_PROF_SWEEP_TIMER_STOP
#define GC_PROF_SET_MALLOC_INFO
#define GC_PROF_SET_HEAP_INFO do {\
if (objspace->profile.run) {\
gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\
record->heap_use_size = live * sizeof(RVALUE); \
record->heap_total_size = heaps_used * HEAP_SIZE;\
}\
} while(0)
#endif
#if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
#pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
#endif
typedef struct RVALUE {
union {
struct {
VALUE flags; /* always 0 for freed obj */
struct RVALUE *next;
} free;
struct {
VALUE flags;
struct RVALUE *next;
int *map;
VALUE slot;
int limit;
} bitmap;
struct RBasic basic;
struct RObject object;
struct RClass klass;
struct RFloat flonum;
struct RString string;
struct RArray array;
struct RRegexp regexp;
struct RHash hash;
struct RData data;
struct RTypedData typeddata;
struct RStruct rstruct;
struct RBignum bignum;
struct RFile file;
struct RMatch match;
struct RRational rational;
struct RComplex complex;
} as;
#ifdef GC_DEBUG
const char *file;
int line;
#endif
} RVALUE;
#if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
#pragma pack(pop)
#endif
struct heaps_slot {
void *membase;
RVALUE *slot;
size_t limit;
RVALUE *bitmap;
};
#define HEAP_MIN_SLOTS 10000
#define FREE_MIN 4096
struct gc_list {
VALUE *varptr;
struct gc_list *next;
};
#define CALC_EXACT_MALLOC_SIZE 0
typedef struct rb_objspace {
struct {
size_t limit;
size_t increase;
#if CALC_EXACT_MALLOC_SIZE
size_t allocated_size;
size_t allocations;
#endif
} malloc_params;
struct {
size_t increment;
struct heaps_slot *ptr;
size_t length;
size_t used;
RVALUE *freelist;
RVALUE *range[2];
RVALUE *freed;
} heap;
struct {
int dont_gc;
int during_gc;
} flags;
struct {
st_table *table;
RVALUE *deferred;
} final;
struct {
VALUE buffer[MARK_STACK_MAX];
VALUE *ptr;
int overflow;
} markstack;
struct {
int run;
gc_profile_record *record;
size_t count;
size_t size;
double invoke_time;
} profile;
struct gc_list *global_list;
unsigned int count;
int gc_stress;
struct {
RVALUE *freed_bitmap;
} ext_heap;
} rb_objspace_t;
#define malloc_limit objspace->malloc_params.limit
#define malloc_increase objspace->malloc_params.increase
#define heap_slots objspace->heap.slots
#define heaps objspace->heap.ptr
#define heaps_length objspace->heap.length
#define heaps_used objspace->heap.used
#define freelist objspace->heap.freelist
#define lomem objspace->heap.range[0]
#define himem objspace->heap.range[1]
#define heaps_inc objspace->heap.increment
#define heaps_freed objspace->heap.freed
#define dont_gc objspace->flags.dont_gc
#define during_gc objspace->flags.during_gc
#define finalizer_table objspace->final.table
#define deferred_final_list objspace->final.deferred
#define mark_stack objspace->markstack.buffer
#define mark_stack_ptr objspace->markstack.ptr
#define mark_stack_overflow objspace->markstack.overflow
#define global_List objspace->global_list
#define ruby_gc_stress objspace->gc_stress
#define need_call_final (finalizer_table && finalizer_table->num_entries)
static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
#include "ruby/gc_ext.h"
static rb_gc_inner_t *gc_inner;
/* TODO: more suitable and safety expression */
#define T_BITMAP (T_FIXNUM + 1)
#define FL_ALIGNOFF FL_MARK
static rb_objspace_t *
rb_objspace_alloc_tmp(void)
{
rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
memset(objspace, 0, sizeof(*objspace));
malloc_limit = GC_MALLOC_LIMIT;
return objspace;
}
static void
rb_objspace_free_tmp(rb_objspace_t *objspace)
{
rb_objspace_call_finalizer(objspace);
if (objspace->profile.record) {
free(objspace->profile.record);
objspace->profile.record = 0;
}
if (global_List) {
struct gc_list *list, *next;
for (list = global_List; list; list = next) {
next = list->next;
free(list);
}
}
if (heaps) {
size_t i;
for (i = 0; i < heaps_used; ++i) {
free(heaps[i].membase);
}
free(heaps);
heaps_used = 0;
heaps = 0;
}
free(objspace);
}
/* tiny heap size */
/* 32KB */
/*#define HEAP_SIZE 0x8000 */
/* 128KB */
/*#define HEAP_SIZE 0x20000 */
/* 64KB */
/*#define HEAP_SIZE 0x10000 */
/* 16KB */
#define BITMAP_ALIGN 0x4000
/* 8KB */
/*#define HEAP_SIZE 0x2000 */
/* 4KB */
/*#define HEAP_SIZE 0x1000 */
/* 2KB */
/*#define HEAP_SIZE 0x800 */
#define HEAP_SIZE ((BITMAP_ALIGN / sizeof(struct RVALUE) + 2) * sizeof(RVALUE))
#define BITMAP_MASK (0xFFFFFFFF - BITMAP_ALIGN + 1)
#define HEAP_OBJ_LIMIT (HEAP_SIZE / sizeof(struct RVALUE) - 1)
extern VALUE rb_cMutex;
extern st_table *rb_class_tbl;
int ruby_disable_gc_stress = 0;
static void run_final(rb_objspace_t *objspace, VALUE obj);
static int garbage_collect(rb_objspace_t *objspace);
/*
* call-seq:
* GC.stress => true or false
*
* returns current status of GC stress mode.
*/
static VALUE
gc_stress_get(VALUE self)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
return ruby_gc_stress ? Qtrue : Qfalse;
}
/*
* call-seq:
* GC.stress = bool => bool
*
* updates GC stress mode.
*
* When GC.stress = true, GC is invoked for all GC opportunity:
* all memory and object allocation.
*
* Since it makes Ruby very slow, it is only for debugging.
*/
static VALUE
gc_stress_set(VALUE self, VALUE flag)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
rb_secure(2);
ruby_gc_stress = RTEST(flag);
return flag;
}
/*
* call-seq:
* GC::Profiler.enable? => true or false
*
* returns current status of GC profile mode.
*/
static VALUE
gc_profile_enable_get(VALUE self)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
return objspace->profile.run;
}
/*
* call-seq:
* GC::Profiler.enable => nil
*
* updates GC profile mode.
* start profiler for GC.
*
*/
static VALUE
gc_profile_enable(void)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
objspace->profile.run = TRUE;
return Qnil;
}
/*
* call-seq:
* GC::Profiler.disable => nil
*
* updates GC profile mode.
* stop profiler for GC.
*
*/
static VALUE
gc_profile_disable(void)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
objspace->profile.run = FALSE;
return Qnil;
}
/*
* call-seq:
* GC::Profiler.clear => nil
*
* clear before profile data.
*
*/
static VALUE
gc_profile_clear(void)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
objspace->profile.count = 0;
return Qnil;
}
static void vm_xfree(rb_objspace_t *objspace, void *ptr);
static void *
vm_xmalloc(rb_objspace_t *objspace, size_t size)
{
void *mem;
if ((ssize_t)size < 0) {
gc_inner->negative_size_allocation_error("negative allocation size (or too big)");
}
if (size == 0) size = 1;
#if CALC_EXACT_MALLOC_SIZE
size += sizeof(size_t);
#endif
if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
(malloc_increase+size) > malloc_limit) {
gc_inner->garbage_collect_with_gvl(objspace);
}
mem = malloc(size);
if (!mem) {
if (gc_inner->garbage_collect_with_gvl(objspace)) {
mem = malloc(size);
}
if (!mem) {
gc_inner->ruby_memerror();
}
}
malloc_increase += size;
#if CALC_EXACT_MALLOC_SIZE
objspace->malloc_params.allocated_size += size;
objspace->malloc_params.allocations++;
((size_t *)mem)[0] = size;
mem = (size_t *)mem + 1;
#endif
return mem;
}
static void *
vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
{
void *mem;
if ((ssize_t)size < 0) {
gc_inner->negative_size_allocation_error("negative re-allocation size");
}
if (!ptr) return vm_xmalloc(objspace, size);
if (size == 0) {
vm_xfree(objspace, ptr);
return 0;
}
if (ruby_gc_stress && !ruby_disable_gc_stress)
gc_inner->garbage_collect_with_gvl(objspace);
#if CALC_EXACT_MALLOC_SIZE
size += sizeof(size_t);
objspace->malloc_params.allocated_size -= size;
ptr = (size_t *)ptr - 1;
#endif
mem = realloc(ptr, size);
if (!mem) {
if (gc_inner->garbage_collect_with_gvl(objspace)) {
mem = realloc(ptr, size);
}
if (!mem) {
gc_inner->ruby_memerror();
}
}
malloc_increase += size;
#if CALC_EXACT_MALLOC_SIZE
objspace->malloc_params.allocated_size += size;
((size_t *)mem)[0] = size;
mem = (size_t *)mem + 1;
#endif
return mem;
}
static void
vm_xfree(rb_objspace_t *objspace, void *ptr)
{
#if CALC_EXACT_MALLOC_SIZE
size_t size;
ptr = ((size_t *)ptr) - 1;
size = ((size_t*)ptr)[0];
objspace->malloc_params.allocated_size -= size;
objspace->malloc_params.allocations--;
#endif
free(ptr);
}
static void *
ruby_xmalloc_tmp(size_t size)
{
return vm_xmalloc(gc_inner->get_objspace(), size);
}
static void *
ruby_xmalloc2_tmp(size_t n, size_t size)
{
size_t len = size * n;
if (n != 0 && size != len / n) {
rb_raise(rb_eArgError, "malloc: possible integer overflow");
}
return vm_xmalloc(gc_inner->get_objspace(), len);
}
static void *
ruby_xcalloc_tmp(size_t n, size_t size)
{
void *mem = ruby_xmalloc2(n, size);
memset(mem, 0, n * size);
return mem;
}
static void *
ruby_xrealloc_tmp(void *ptr, size_t size)
{
return vm_xrealloc(gc_inner->get_objspace(), ptr, size);
}
static void *
ruby_xrealloc2_tmp(void *ptr, size_t n, size_t size)
{
size_t len = size * n;
if (n != 0 && size != len / n) {
rb_raise(rb_eArgError, "realloc: possible integer overflow");
}
return ruby_xrealloc(ptr, len);
}
static void
ruby_xfree_tmp(void *x)
{
if (x)
vm_xfree(gc_inner->get_objspace(), x);
}
/*
* call-seq:
* GC.enable => true or false
*
* Enables garbage collection, returning <code>true</code> if garbage
* collection was previously disabled.
*
* GC.disable #=> false
* GC.enable #=> true
* GC.enable #=> false
*
*/
static VALUE
rb_gc_enable_tmp(void)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
int old = dont_gc;
dont_gc = FALSE;
return old ? Qtrue : Qfalse;
}
/*
* call-seq:
* GC.disable => true or false
*
* Disables garbage collection, returning <code>true</code> if garbage
* collection was already disabled.
*
* GC.disable #=> false
* GC.disable #=> true
*
*/
static VALUE
rb_gc_disable_tmp(void)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
int old = dont_gc;
dont_gc = TRUE;
return old ? Qtrue : Qfalse;
}
extern VALUE rb_mGC;
static void
rb_gc_register_address_tmp(VALUE *addr)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
struct gc_list *tmp;
tmp = ALLOC(struct gc_list);
tmp->next = global_List;
tmp->varptr = addr;
global_List = tmp;
}
static void
rb_gc_unregister_address_tmp(VALUE *addr)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
struct gc_list *tmp = global_List;
if (tmp->varptr == addr) {
global_List = tmp->next;
xfree(tmp);
return;
}
while (tmp->next) {
if (tmp->next->varptr == addr) {
struct gc_list *t = tmp->next;
tmp->next = tmp->next->next;
xfree(t);
break;
}
tmp = tmp->next;
}
}
static void
allocate_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
{
struct heaps_slot *p;
size_t size;
size = next_heaps_length*sizeof(struct heaps_slot);
if (heaps_used > 0) {
p = (struct heaps_slot *)realloc(heaps, size);
if (p) heaps = p;
}
else {
p = heaps = (struct heaps_slot *)malloc(size);
}
if (p == 0) {
during_gc = 0;
rb_memerror();
}
heaps_length = next_heaps_length;
}
#define FIND_BITMAP(res, p) do {\
if (((RVALUE *)p)->as.free.flags & FL_ALIGNOFF) {\
res = (RVALUE *)((((VALUE)p & BITMAP_MASK) + BITMAP_ALIGN) / sizeof(RVALUE) * sizeof(RVALUE)); \
}\
else {\
res = (RVALUE *)(((VALUE)p & BITMAP_MASK) / sizeof(RVALUE) * sizeof(RVALUE));\
}\
} while(0)
#define NUM_IN_SLOT(p, slot) (((VALUE)p - (VALUE)slot)/sizeof(RVALUE))
#define BITMAP_INDEX(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) / (sizeof(int) * 8))
/* #define BITMAP_INDEX(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) >> 5) */
#define BITMAP_OFFSET(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) & ((sizeof(int) * 8)-1))
#define MARKED_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] & 1 << BITMAP_OFFSET(bmap, p))
#define MARK_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] |= 1 << BITMAP_OFFSET(bmap, p))
#define CLEAR_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] &= ~(1 << BITMAP_OFFSET(bmap, p)))
#define MARKED_IN_BITMAP_DIRECT(map, index, offset) (map[index] & 1 << offset)
#define MARK_IN_BITMAP_DIRECT(map, index, offset) (map[index] |= 1 << offset)
/* for debug */
void
bitmap_p(RVALUE *p)
{
RVALUE *bmap;
int index, offset, marked;
FIND_BITMAP(bmap, p);
index = BITMAP_INDEX(bmap, p);
offset = BITMAP_OFFSET(bmap, p);
marked = MARKED_IN_BITMAP(bmap, p);
printf("bitmap : ((RVALUE *)%p)\n", bmap);
printf("map_index : %d | offset : %d\n", index, offset);
printf("is mark ? %s\n", marked? "true" : "false");
}
VALUE
find_bitmap(RVALUE *p) {
RVALUE *res;
FIND_BITMAP(res, p);
return (VALUE)res;
}
void
dump_bitmap(RVALUE *bmap) {
int i;
for (i = 0; i < 26; i++) {
printf("dump %p map %d : %d %s\n", bmap, i, bmap->as.bitmap.map[i], bmap->as.bitmap.map[i]? "remain" : "clean");
}
}
void
bitmap2obj(RVALUE *bmap, int index, int offset)
{
printf("(RVALUE *)%p\n", (RVALUE *)(bmap->as.bitmap.slot + (index * sizeof(int) * 8 + offset) * sizeof(RVALUE)));
}
static void
make_bitmap(struct heaps_slot *slot)
{
RVALUE *p, *pend, *bitmap, *last, *border;
int *map = 0;
int size;
p = slot->slot;
pend = p + slot->limit;
last = pend - 1;
RBASIC(last)->flags = 0;
FIND_BITMAP(bitmap, last);
if (bitmap < p || pend <= bitmap) {
rb_bug("not include in heap slot: result bitmap(%p), find (%p), p (%p), pend(%p)", bitmap, last, p, pend);
}
border = bitmap;
if (!((VALUE)border % BITMAP_ALIGN)) {
border--;
}
while (p < pend) {
if (p <= border) {
RBASIC(p)->flags = FL_ALIGNOFF;
}
else {
RBASIC(p)->flags = 0;
}
p++;
}
size = sizeof(int) * (HEAP_OBJ_LIMIT / (sizeof(int) * 8)+1);
map = (int *)malloc(size);
if (map == 0) {
rb_memerror();
}
MEMZERO(map, int, (size/sizeof(int)));
bitmap->as.bitmap.flags |= T_BITMAP;
bitmap->as.bitmap.map = map;
bitmap->as.bitmap.slot = (VALUE)slot->slot;
bitmap->as.bitmap.limit = slot->limit;
slot->bitmap = bitmap;
}
void
test_bitmap(RVALUE *p, RVALUE *pend)
{
RVALUE *first, *bmap = 0, *bmap_tmp;
int i;
first = p;
FIND_BITMAP(bmap_tmp, p);
while (p < pend) {
if (MARKED_IN_BITMAP(bmap, p)) printf("already marking! %p\n", p);
if (bmap_tmp != p) {
FIND_BITMAP(bmap, p);
if (bmap_tmp != bmap) printf("diffrence bmap %p : %p\n", bmap_tmp, bmap);
MARK_IN_BITMAP(bmap, p);
}
else {
MARK_IN_BITMAP(bmap, p);
}
if (!MARKED_IN_BITMAP(bmap, p)) printf("not marking! %p\n", p);
p++;
}
for (i =0; i < 26; i++) {
printf("bitmap[%d] : %x\n", i, bmap->as.bitmap.map[i]);
}
p = first;
while (p < pend) {
if (bmap_tmp != p) {
FIND_BITMAP(bmap, p);
CLEAR_IN_BITMAP(bmap, p);
}
else {
CLEAR_IN_BITMAP(bmap, p);
}
if (MARKED_IN_BITMAP(bmap, p)) printf("not clear! %p\n", p);
p++;
}
for (i =0; i < 26; i++) {
printf("bitmap[%d] : %x\n", i, bmap->as.bitmap.map[i]);
}
}
static void
assign_heap_slot(rb_objspace_t *objspace)
{
RVALUE *p, *pend, *membase;
size_t hi, lo, mid;
size_t objs;
objs = HEAP_OBJ_LIMIT;
p = (RVALUE*)malloc(HEAP_SIZE);
if (p == 0) {
during_gc = 0;
rb_memerror();
}
membase = p;
if ((VALUE)p % sizeof(RVALUE) != 0) {
p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
}
lo = 0;
hi = heaps_used;
while (lo < hi) {
register RVALUE *mid_membase;
mid = (lo + hi) / 2;
mid_membase = heaps[mid].membase;
if (mid_membase < membase) {
lo = mid + 1;
}
else if (mid_membase > membase) {
hi = mid;
}
else {
rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
}
}
if (hi < heaps_used) {
MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
}
heaps[hi].membase = membase;
heaps[hi].slot = p;
heaps[hi].limit = objs;
pend = p + objs;
if (lomem == 0 || lomem > p) lomem = p;
if (himem < pend) himem = pend;
heaps_used++;
make_bitmap(&heaps[hi]);
while (p < pend) {
if (BUILTIN_TYPE(p) != T_BITMAP) {
p->as.free.next = freelist;
freelist = p;
}
p++;
}
}
static void
init_heap(rb_objspace_t *objspace)
{
size_t add, i;
add = HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT;
if (!add) {
add = 1;
}
if ((heaps_used + add) > heaps_length) {
allocate_heaps(objspace, heaps_used + add);
}
for (i = 0; i < add; i++) {
assign_heap_slot(objspace);
}
heaps_inc = 0;
objspace->profile.invoke_time = getrusage_time();
}
static void
set_heaps_increment(rb_objspace_t *objspace)
{
size_t next_heaps_length = (size_t)(heaps_used * 1.8);
if (next_heaps_length == heaps_used) {
next_heaps_length++;
}
heaps_inc = next_heaps_length - heaps_used;
if (next_heaps_length > heaps_length) {
allocate_heaps(objspace, next_heaps_length);
}
}
static int
heaps_increment(rb_objspace_t *objspace)
{
if (heaps_inc > 0) {
assign_heap_slot(objspace);
heaps_inc--;
return TRUE;
}
return FALSE;
}
#define RANY(o) ((RVALUE*)(o))
static VALUE
rb_newobj_from_heap(rb_objspace_t *objspace)
{
VALUE obj;
int bmap_left = 0;
if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
if (!heaps_increment(objspace) && !garbage_collect(objspace)) {
during_gc = 0;
rb_memerror();
}
}
obj = (VALUE)freelist;
freelist = freelist->as.free.next;
if (RANY(obj)->as.free.flags & FL_ALIGNOFF) {
bmap_left = Qtrue;
}
MEMZERO((void*)obj, RVALUE, 1);
if (bmap_left) {
RANY(obj)->as.free.flags = FL_ALIGNOFF;
}
#ifdef GC_DEBUG
RANY(obj)->file = rb_sourcefile();
RANY(obj)->line = rb_sourceline();
#endif
return obj;
}
/* TODO: remove this function. */
#if USE_VALUE_CACHE
static VALUE
rb_fill_value_cache(rb_thread_t *th)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
int i;
VALUE rv;
RVALUE *bmap;
/* LOCK */
for (i=0; i<RUBY_VM_VALUE_CACHE_SIZE; i++) {
VALUE v = rb_newobj_from_heap(objspace);
th->value_cache[i] = v;
FIND_BITMAP(bmap, v);
MARK_IN_BITMAP(bmap, v);
}
th->value_cache_ptr = &th->value_cache[0];
rv = rb_newobj_from_heap(objspace);
/* UNLOCK */
return rv;
}
#endif
static int
rb_during_gc_tmp(void)
{
rb_objspace_t *objspace = gc_inner->get_objspace();
return during_gc;
}
static VALUE
rb_newobj_tmp(void)
{
#if USE_VALUE_CACHE
rb_thread_t *th = GET_THREAD();
VALUE v = *th->value_cache_ptr;
#endif
rb_objspace_t *objspace = gc_inner->get_objspace();
if (during_gc) {
dont_gc = 1;
during_gc = 0;
rb_bug("object allocation during garbage collection phase");
}
#if USE_VALUE_CACHE
if (v) {
rb_set_flag_force(v, 0);
th->value_cache_ptr++;
}
else {
v = rb_fill_value_cache(th);
}
#if defined(GC_DEBUG)
printf("cache index: %d, v: %p, th: %p\n",
th->value_cache_ptr - th->value_cache, v, th);
#endif
return v;
#else
return rb_newobj_from_heap(objspace);
#endif
}
static void
rb_set_flag_force_tmp(VALUE obj, VALUE t)
{
t = t & ~FL_ALIGNOFF;
if (RBASIC(obj)->flags & FL_ALIGNOFF) {
RBASIC(obj)->flags = FL_ALIGNOFF | t;
}
else {
RBASIC(obj)->flags = t;
}
}
static VALUE
rb_data_object_alloc_tmp(VALUE klass, void *datap, RUBY_DATA_FUNC dmark,
RUBY_DATA_FUNC dfree)
{
NEWOBJ(data, struct RData);
if (klass) Check_Type(klass, T_CLASS);
OBJSETUP(data, klass, T_DATA);
data->data = datap;
data->dfree = dfree;
data->dmark = dmark;
return (VALUE)data;
}
static VALUE
rb_data_typed_object_alloc_tmp(VALUE klass, void *datap,
const rb_data_type_t *type)
{
NEWOBJ(data, struct RTypedData);
if (klass) Check_Type(klass, T_CLASS);
OBJSETUP(data, klass, T_DATA);
data->data = datap;
data->typed_flag = 1;
data->type = type;
return (VALUE)data;
}
static size_t
rb_objspace_data_type_memsize_tmp(VALUE obj)
{
if (RTYPEDDATA_P(obj)) {
return RTYPEDDATA_TYPE(obj)->dsize(RTYPEDDATA_DATA(obj));
}
else {
return 0;
}
}
static const char *
rb_objspace_data_type_name_tmp(VALUE obj)
{
if (RTYPEDDATA_P(obj)) {
return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
}
else {
return 0;
}
}
#ifdef __ia64
#define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
#else
#define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
#endif
#define STACK_START (th->machine_stack_start)
#define STACK_END (th->machine_stack_end)
#define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
#if STACK_GROW_DIRECTION < 0
# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
#elif STACK_GROW_DIRECTION > 0
# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
#else
# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
: (size_t)(STACK_END - STACK_START + 1))
#endif
#if !STACK_GROW_DIRECTION
int ruby_stack_grow_direction;
static int
ruby_get_stack_grow_direction_tmp(volatile VALUE *addr)
{
VALUE *end;
SET_MACHINE_STACK_END(&end);
if (end > addr) return ruby_stack_grow_direction = 1;
return ruby_stack_grow_direction = -1;
}
#endif
#define GC_WATER_MARK 512
static int
ruby_stack_check_tmp(void)
{
#if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
return 0;
#else
return gc_inner->stack_check();
#endif
}
static void
init_mark_stack(rb_objspace_t *objspace)
{
mark_stack_overflow = 0;
mark_stack_ptr = mark_stack;
}
#define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev);
#define IS_FREE_CELL(obj) ((obj->as.basic.flags & ~(FL_ALIGNOFF)) == 0)
static void
gc_mark_all(rb_objspace_t *objspace)
{
RVALUE *p, *pend, *bmap;
size_t i;
init_mark_stack(objspace);
for (i = 0; i < heaps_used; i++) {
p = heaps[i].slot; pend = p + heaps[i].limit;
bmap = heaps[i].bitmap;
while (p < pend) {
if (MARKED_IN_BITMAP(bmap, p) &&
!(IS_FREE_CELL(p))) {
gc_inner->gc_mark_children(objspace, (VALUE)p, 0);
}
p++;
}
}
}
static void
gc_mark_rest(rb_objspace_t *objspace)
{
VALUE tmp_arry[MARK_STACK_MAX];
VALUE *p;
p = (mark_stack_ptr - mark_stack) + tmp_arry;
MEMCPY(tmp_arry, mark_stack, VALUE, p - tmp_arry);
init_mark_stack(objspace);
while (p != tmp_arry) {
p--;
gc_inner->gc_mark_children(objspace, *p, 0);
}
}
static inline int
is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
{
register RVALUE *p = RANY(ptr);
register struct heaps_slot *heap;
register size_t hi, lo, mid;
if (p < lomem || p > himem) return FALSE;
if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
/* check if p looks like a pointer using bsearch*/
lo = 0;
hi = heaps_used;
while (lo < hi) {
mid = (lo + hi) / 2;
heap = &heaps[mid];
if (heap->slot <= p) {
if (p < heap->slot + heap->limit)
return TRUE;
lo = mid + 1;
}
else {
hi = mid;
}
}
return FALSE;
}
static void
mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
{
VALUE v;
while (n--) {
v = *x;
VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
if (is_pointer_to_heap(objspace, (void *)v)) {
gc_mark(objspace, v, 0);
}
x++;
}
}
static void
gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
{
long n;
if (end <= start) return;
n = end - start;
mark_locations_array(objspace, start, n);
}
static void
rb_gc_mark_locations_tmp(VALUE *start, VALUE *end)
{
gc_mark_locations(gc_inner->get_objspace(), start, end);
}
#define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, start, end)
struct mark_tbl_arg {
rb_objspace_t *objspace;
int lev;
};
static int
mark_entry(ID key, VALUE value, st_data_t data)
{
struct mark_tbl_arg *arg = (void*)data;
gc_mark(arg->objspace, value, arg->lev);
return ST_CONTINUE;
}
static void
mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
{
struct mark_tbl_arg arg;
if (!tbl) return;
arg.objspace = objspace;
arg.lev = lev;
st_foreach(tbl, mark_entry, (st_data_t)&arg);
}
static int
mark_key(VALUE key, VALUE value, st_data_t data)
{
struct mark_tbl_arg *arg = (void*)data;
gc_mark(arg->objspace, key, arg->lev);
return ST_CONTINUE;
}
static void
mark_set(rb_objspace_t *objspace, st_table *tbl, int lev)
{
struct mark_tbl_arg arg;
if (!tbl) return;
arg.objspace = objspace;
arg.lev = lev;
st_foreach(tbl, mark_key, (st_data_t)&arg);
}
static void
rb_mark_set_tmp(st_table *tbl)
{
mark_set(gc_inner->get_objspace(), tbl, 0);
}
static int
mark_keyvalue(VALUE key, VALUE value, st_data_t data)
{
struct mark_tbl_arg *arg = (void*)data;
gc_mark(arg->objspace, key, arg->lev);
gc_mark(arg->objspace, value, arg->lev);
return ST_CONTINUE;
}
static void
mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev)
{
struct mark_tbl_arg arg;
if (!tbl) return;
arg.objspace = objspace;
arg.lev = lev;
st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
}
static int
mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
{
struct mark_tbl_arg *arg = (void*)data;
gc_inner->mark_method_entry(arg->objspace, me, arg->lev);
... This diff was truncated because it exceeds the maximum size that can be displayed.
(2-2/2)