Project

General

Profile

Feature #2471 ยป extgc.patch

wanabe (_ wanabe), 04/01/2010 12:01 AM

View differences:

dln.c (working copy)
1230 1230
#define translit_separator(str) (void)(str)
1231 1231
#endif
1232 1232

  
1233
int
1234
dln_loadable(void)
1235
{
1236
    return 1;
1237
}
1238

  
1233 1239
void*
1234 1240
dln_load(const char *file)
1235 1241
{
dmydln.c (working copy)
1 1
#include "ruby/ruby.h"
2 2

  
3
int
4
dln_loadable(void)
5
{
6
    return 0;
7
}
8

  
3 9
void*
4 10
dln_load(const char *file)
5 11
{
error.c (working copy)
1179 1179
    VALUE mesg;
1180 1180

  
1181 1181
    va_start(args, fmt);
1182
    if (!gc_decided()) {
1183
	vfprintf(stderr, fmt, args);
1184
	va_end(args);
1185
	abort();
1186
    }
1182 1187
    mesg = rb_vsprintf(fmt, args);
1183 1188
    va_end(args);
1184 1189
    rb_exc_raise(rb_exc_new3(rb_eLoadError, mesg));
ext/gc_bmp/extconf.rb (working copy)
1
require 'mkmf'
2
create_makefile("gc_bmp")
ext/gc_bmp/gc_bmp.c (working copy)
1
/**********************************************************************
2

  
3
  gc_bmp.c -
4

  
5
  $Author$
6
  created at: Tue Oct  5 09:44:46 JST 1993
7

  
8
  Copyright (C) 1993-2007 Yukihiro Matsumoto
9
  Copyright (C) 2000  Network Applied Communication Laboratory, Inc.
10
  Copyright (C) 2000  Information-technology Promotion Agency, Japan
11

  
12
**********************************************************************/
13

  
14
#include "ruby.h"
15
#include "ruby/re.h"
16
#include "ruby/io.h"
17
#include <stdio.h>
18
#include <setjmp.h>
19
#include <sys/types.h>
20

  
21
#ifndef FALSE
22
# define FALSE 0
23
#elif FALSE
24
# error FALSE must be false
25
#endif
26
#ifndef TRUE
27
# define TRUE 1
28
#elif !TRUE
29
# error TRUE must be true
30
#endif
31

  
32
#ifdef HAVE_SYS_TIME_H
33
#include <sys/time.h>
34
#endif
35

  
36
#ifdef HAVE_SYS_RESOURCE_H
37
#include <sys/resource.h>
38
#endif
39

  
40
#if defined _WIN32 || defined __CYGWIN__
41
#include <windows.h>
42
#endif
43

  
44
#ifdef HAVE_VALGRIND_MEMCHECK_H
45
# include <valgrind/memcheck.h>
46
# ifndef VALGRIND_MAKE_MEM_DEFINED
47
#  define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE(p, n)
48
# endif
49
# ifndef VALGRIND_MAKE_MEM_UNDEFINED
50
#  define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE(p, n)
51
# endif
52
#else
53
# define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */
54
# define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */
55
#endif
56

  
57
int rb_io_fptr_finalize(struct rb_io_t*);
58

  
59
#define rb_setjmp(env) RUBY_SETJMP(env)
60
#define rb_jmp_buf rb_jmpbuf_t
61

  
62
/* Make alloca work the best possible way.  */
63
#ifdef __GNUC__
64
# ifndef atarist
65
#  ifndef alloca
66
#   define alloca __builtin_alloca
67
#  endif
68
# endif /* atarist */
69
#else
70
# ifdef HAVE_ALLOCA_H
71
#  include <alloca.h>
72
# else
73
#  ifdef _AIX
74
 #pragma alloca
75
#  else
76
#   ifndef alloca /* predefined by HP cc +Olibcalls */
77
void *alloca ();
78
#   endif
79
#  endif /* AIX */
80
# endif /* HAVE_ALLOCA_H */
81
#endif /* __GNUC__ */
82

  
83
#ifndef GC_MALLOC_LIMIT
84
#define GC_MALLOC_LIMIT 8000000
85
#endif
86

  
87
#define MARK_STACK_MAX 1024
88

  
89
/* for GC profile */
90
#define GC_PROFILE_MORE_DETAIL 1
91
typedef struct gc_profile_record {
92
    double gc_time;
93
    double gc_mark_time;
94
    double gc_sweep_time;
95
    double gc_invoke_time;
96

  
97
    size_t heap_use_slots;
98
    size_t heap_live_objects;
99
    size_t heap_free_objects;
100
    size_t heap_total_objects;
101
    size_t heap_use_size;
102
    size_t heap_total_size;
103

  
104
    int have_finalize;
105

  
106
    size_t allocate_increase;
107
    size_t allocate_limit;
108
} gc_profile_record;
109

  
110
static double
111
getrusage_time(void)
112
{
113
#ifdef RUSAGE_SELF
114
    struct rusage usage;
115
    struct timeval time;
116
    getrusage(RUSAGE_SELF, &usage);
117
    time = usage.ru_utime;
118
    return time.tv_sec + time.tv_usec * 1e-6;
119
#elif defined _WIN32
120
    FILETIME creation_time, exit_time, kernel_time, user_time;
121
    ULARGE_INTEGER ui;
122
    LONG_LONG q;
123
    double t;
124

  
125
    if (GetProcessTimes(GetCurrentProcess(),
126
			&creation_time, &exit_time, &kernel_time, &user_time) == 0)
127
    {
128
	return 0.0;
129
    }
130
    memcpy(&ui, &user_time, sizeof(FILETIME));
131
    q = ui.QuadPart / 10L;
132
    t = (DWORD)(q % 1000000L) * 1e-6;
133
    q /= 1000000L;
134
#ifdef __GNUC__
135
    t += q;
136
#else
137
    t += (double)(DWORD)(q >> 16) * (1 << 16);
138
    t += (DWORD)q & ~(~0 << 16);
139
#endif
140
    return t;
141
#else
142
    return 0.0;
143
#endif
144
}
145

  
146
#define GC_PROF_TIMER_START do {\
147
	if (objspace->profile.run) {\
148
	    if (!objspace->profile.record) {\
149
		objspace->profile.size = 1000;\
150
		objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);\
151
	    }\
152
	    if (count >= objspace->profile.size) {\
153
		objspace->profile.size += 1000;\
154
		objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);\
155
	    }\
156
	    if (!objspace->profile.record) {\
157
		rb_bug("gc_profile malloc or realloc miss");\
158
	    }\
159
	    MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);\
160
	    gc_time = getrusage_time();\
161
	    objspace->profile.record[count].gc_invoke_time = gc_time - objspace->profile.invoke_time;\
162
	}\
163
    } while(0)
164

  
165
#define GC_PROF_TIMER_STOP do {\
166
	if (objspace->profile.run) {\
167
	    gc_time = getrusage_time() - gc_time;\
168
	    if (gc_time < 0) gc_time = 0;\
169
	    objspace->profile.record[count].gc_time = gc_time;\
170
	    objspace->profile.count++;\
171
	}\
172
    } while(0)
173

  
174
#if GC_PROFILE_MORE_DETAIL
175
#define INIT_GC_PROF_PARAMS double gc_time = 0, mark_time = 0, sweep_time = 0;\
176
    size_t count = objspace->profile.count
177

  
178
#define GC_PROF_MARK_TIMER_START do {\
179
	if (objspace->profile.run) {\
180
	    mark_time = getrusage_time();\
181
	}\
182
    } while(0)
183

  
184
#define GC_PROF_MARK_TIMER_STOP do {\
185
	if (objspace->profile.run) {\
186
	    mark_time = getrusage_time() - mark_time;\
187
	    if (mark_time < 0) mark_time = 0;\
188
	    objspace->profile.record[count].gc_mark_time = mark_time;\
189
	}\
190
    } while(0)
191

  
192
#define GC_PROF_SWEEP_TIMER_START do {\
193
	if (objspace->profile.run) {\
194
	    sweep_time = getrusage_time();\
195
	}\
196
    } while(0)
197

  
198
#define GC_PROF_SWEEP_TIMER_STOP do {\
199
	if (objspace->profile.run) {\
200
	    sweep_time = getrusage_time() - sweep_time;\
201
	    if (sweep_time < 0) sweep_time = 0;\
202
	    objspace->profile.record[count].gc_sweep_time = sweep_time;\
203
	}\
204
    } while(0)
205
#define GC_PROF_SET_MALLOC_INFO do {\
206
	if (objspace->profile.run) {\
207
	    gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
208
	    record->allocate_increase = malloc_increase;\
209
	    record->allocate_limit = malloc_limit; \
210
	}\
211
    } while(0)
212
#define GC_PROF_SET_HEAP_INFO do {\
213
	if (objspace->profile.run) {\
214
	    gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
215
	    record->heap_use_slots = heaps_used;\
216
	    record->heap_live_objects = live;\
217
	    record->heap_free_objects = freed; \
218
	    record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\
219
	    record->have_finalize = final_list ? Qtrue : Qfalse;\
220
	    record->heap_use_size = live * sizeof(RVALUE); \
221
	    record->heap_total_size = heaps_used * (HEAP_OBJ_LIMIT * sizeof(RVALUE));\
222
	}\
223
    } while(0)
224
#else
225
#define INIT_GC_PROF_PARAMS double gc_time = 0;\
226
    size_t count = objspace->profile.count
227
#define GC_PROF_MARK_TIMER_START
228
#define GC_PROF_MARK_TIMER_STOP
229
#define GC_PROF_SWEEP_TIMER_START
230
#define GC_PROF_SWEEP_TIMER_STOP
231
#define GC_PROF_SET_MALLOC_INFO
232
#define GC_PROF_SET_HEAP_INFO do {\
233
	if (objspace->profile.run) {\
234
	    gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
235
	    record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\
236
	    record->heap_use_size = live * sizeof(RVALUE); \
237
	    record->heap_total_size = heaps_used * HEAP_SIZE;\
238
	}\
239
    } while(0)
240
#endif
241

  
242

  
243
#if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
244
#pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
245
#endif
246

  
247
typedef struct RVALUE {
248
    union {
249
	struct {
250
	    VALUE flags;		/* always 0 for freed obj */
251
	    struct RVALUE *next;
252
	} free;
253
	struct {
254
	    VALUE flags;
255
	    struct RVALUE *next;
256
	    int *map;
257
	    VALUE slot;
258
	    int limit;
259
	} bitmap;
260
	struct RBasic  basic;
261
	struct RObject object;
262
	struct RClass  klass;
263
	struct RFloat  flonum;
264
	struct RString string;
265
	struct RArray  array;
266
	struct RRegexp regexp;
267
	struct RHash   hash;
268
	struct RData   data;
269
	struct RTypedData   typeddata;
270
	struct RStruct rstruct;
271
	struct RBignum bignum;
272
	struct RFile   file;
273
	struct RMatch  match;
274
	struct RRational rational;
275
	struct RComplex complex;
276
    } as;
277
#ifdef GC_DEBUG
278
    const char *file;
279
    int   line;
280
#endif
281
} RVALUE;
282

  
283
#if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
284
#pragma pack(pop)
285
#endif
286

  
287
struct heaps_slot {
288
    void *membase;
289
    RVALUE *slot;
290
    size_t limit;
291
    RVALUE *bitmap;
292
};
293

  
294
#define HEAP_MIN_SLOTS 10000
295
#define FREE_MIN  4096
296

  
297
struct gc_list {
298
    VALUE *varptr;
299
    struct gc_list *next;
300
};
301

  
302
#define CALC_EXACT_MALLOC_SIZE 0
303

  
304
typedef struct rb_objspace {
305
    struct {
306
	size_t limit;
307
	size_t increase;
308
#if CALC_EXACT_MALLOC_SIZE
309
	size_t allocated_size;
310
	size_t allocations;
311
#endif
312
    } malloc_params;
313
    struct {
314
	size_t increment;
315
	struct heaps_slot *ptr;
316
	size_t length;
317
	size_t used;
318
	RVALUE *freelist;
319
	RVALUE *range[2];
320
	RVALUE *freed;
321
    } heap;
322
    struct {
323
	int dont_gc;
324
	int during_gc;
325
    } flags;
326
    struct {
327
	st_table *table;
328
	RVALUE *deferred;
329
    } final;
330
    struct {
331
	VALUE buffer[MARK_STACK_MAX];
332
	VALUE *ptr;
333
	int overflow;
334
    } markstack;
335
    struct {
336
	int run;
337
	gc_profile_record *record;
338
	size_t count;
339
	size_t size;
340
	double invoke_time;
341
    } profile;
342
    struct gc_list *global_list;
343
    unsigned int count;
344
    int gc_stress;
345

  
346
    struct {
347
	RVALUE *freed_bitmap;
348
    } ext_heap;
349
} rb_objspace_t;
350

  
351
#define malloc_limit		objspace->malloc_params.limit
352
#define malloc_increase 	objspace->malloc_params.increase
353
#define heap_slots		objspace->heap.slots
354
#define heaps			objspace->heap.ptr
355
#define heaps_length		objspace->heap.length
356
#define heaps_used		objspace->heap.used
357
#define freelist		objspace->heap.freelist
358
#define lomem			objspace->heap.range[0]
359
#define himem			objspace->heap.range[1]
360
#define heaps_inc		objspace->heap.increment
361
#define heaps_freed		objspace->heap.freed
362
#define dont_gc 		objspace->flags.dont_gc
363
#define during_gc		objspace->flags.during_gc
364
#define finalizer_table 	objspace->final.table
365
#define deferred_final_list	objspace->final.deferred
366
#define mark_stack		objspace->markstack.buffer
367
#define mark_stack_ptr		objspace->markstack.ptr
368
#define mark_stack_overflow	objspace->markstack.overflow
369
#define global_List		objspace->global_list
370
#define ruby_gc_stress		objspace->gc_stress
371

  
372
#define need_call_final 	(finalizer_table && finalizer_table->num_entries)
373

  
374
static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
375

  
376
#include "ruby/gc_ext.h"
377
static rb_gc_inner_t *gc_inner;
378

  
379
/* TODO: more suitable and safety expression */
380
#define T_BITMAP (T_FIXNUM + 1)
381
#define FL_ALIGNOFF FL_MARK
382

  
383
static rb_objspace_t *
384
rb_objspace_alloc_tmp(void)
385
{
386
    rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
387
    memset(objspace, 0, sizeof(*objspace));
388
    malloc_limit = GC_MALLOC_LIMIT;
389

  
390
    return objspace;
391
}
392

  
393
static void
394
rb_objspace_free_tmp(rb_objspace_t *objspace)
395
{
396
    rb_objspace_call_finalizer(objspace);
397
    if (objspace->profile.record) {
398
	free(objspace->profile.record);
399
	objspace->profile.record = 0;
400
    }
401
    if (global_List) {
402
	struct gc_list *list, *next;
403
	for (list = global_List; list; list = next) {
404
	    next = list->next;
405
	    free(list);
406
	}
407
    }
408
    if (heaps) {
409
	size_t i;
410
	for (i = 0; i < heaps_used; ++i) {
411
	    free(heaps[i].membase);
412
	}
413
	free(heaps);
414
	heaps_used = 0;
415
	heaps = 0;
416
    }
417
    free(objspace);
418
}
419

  
420
/* tiny heap size */
421
/* 32KB */
422
/*#define HEAP_SIZE 0x8000 */
423
/* 128KB */
424
/*#define HEAP_SIZE 0x20000 */
425
/* 64KB */
426
/*#define HEAP_SIZE 0x10000 */
427
/* 16KB */
428
#define BITMAP_ALIGN 0x4000
429
/* 8KB */
430
/*#define HEAP_SIZE 0x2000 */
431
/* 4KB */
432
/*#define HEAP_SIZE 0x1000 */
433
/* 2KB */
434
/*#define HEAP_SIZE 0x800 */
435

  
436
#define HEAP_SIZE ((BITMAP_ALIGN / sizeof(struct RVALUE) + 2) * sizeof(RVALUE))
437
#define BITMAP_MASK  (0xFFFFFFFF - BITMAP_ALIGN + 1)
438
#define HEAP_OBJ_LIMIT (HEAP_SIZE / sizeof(struct RVALUE) - 1)
439

  
440
extern VALUE rb_cMutex;
441
extern st_table *rb_class_tbl;
442

  
443
int ruby_disable_gc_stress = 0;
444

  
445
static void run_final(rb_objspace_t *objspace, VALUE obj);
446
static int garbage_collect(rb_objspace_t *objspace);
447

  
448
/*
449
 *  call-seq:
450
 *    GC.stress                 => true or false
451
 *
452
 *  returns current status of GC stress mode.
453
 */
454

  
455
static VALUE
456
gc_stress_get(VALUE self)
457
{
458
    rb_objspace_t *objspace = gc_inner->get_objspace();
459
    return ruby_gc_stress ? Qtrue : Qfalse;
460
}
461

  
462
/*
463
 *  call-seq:
464
 *    GC.stress = bool          => bool
465
 *
466
 *  updates GC stress mode.
467
 *
468
 *  When GC.stress = true, GC is invoked for all GC opportunity:
469
 *  all memory and object allocation.
470
 *
471
 *  Since it makes Ruby very slow, it is only for debugging.
472
 */
473

  
474
static VALUE
475
gc_stress_set(VALUE self, VALUE flag)
476
{
477
    rb_objspace_t *objspace = gc_inner->get_objspace();
478
    rb_secure(2);
479
    ruby_gc_stress = RTEST(flag);
480
    return flag;
481
}
482

  
483
/*
484
 *  call-seq:
485
 *    GC::Profiler.enable?                 => true or false
486
 *
487
 *  returns current status of GC profile mode.
488
 */
489

  
490
static VALUE
491
gc_profile_enable_get(VALUE self)
492
{
493
    rb_objspace_t *objspace = gc_inner->get_objspace();
494
    return objspace->profile.run;
495
}
496

  
497
/*
498
 *  call-seq:
499
 *    GC::Profiler.enable          => nil
500
 *
501
 *  updates GC profile mode.
502
 *  start profiler for GC.
503
 *
504
 */
505

  
506
static VALUE
507
gc_profile_enable(void)
508
{
509
    rb_objspace_t *objspace = gc_inner->get_objspace();
510

  
511
    objspace->profile.run = TRUE;
512
    return Qnil;
513
}
514

  
515
/*
516
 *  call-seq:
517
 *    GC::Profiler.disable          => nil
518
 *
519
 *  updates GC profile mode.
520
 *  stop profiler for GC.
521
 *
522
 */
523

  
524
static VALUE
525
gc_profile_disable(void)
526
{
527
    rb_objspace_t *objspace = gc_inner->get_objspace();
528

  
529
    objspace->profile.run = FALSE;
530
    return Qnil;
531
}
532

  
533
/*
534
 *  call-seq:
535
 *    GC::Profiler.clear          => nil
536
 *
537
 *  clear before profile data.
538
 *
539
 */
540

  
541
static VALUE
542
gc_profile_clear(void)
543
{
544
    rb_objspace_t *objspace = gc_inner->get_objspace();
545
    MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
546
    objspace->profile.count = 0;
547
    return Qnil;
548
}
549

  
550
static void vm_xfree(rb_objspace_t *objspace, void *ptr);
551

  
552
static void *
553
vm_xmalloc(rb_objspace_t *objspace, size_t size)
554
{
555
    void *mem;
556

  
557
    if ((ssize_t)size < 0) {
558
	gc_inner->negative_size_allocation_error("negative allocation size (or too big)");
559
    }
560
    if (size == 0) size = 1;
561

  
562
#if CALC_EXACT_MALLOC_SIZE
563
    size += sizeof(size_t);
564
#endif
565

  
566
    if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
567
	(malloc_increase+size) > malloc_limit) {
568
	gc_inner->garbage_collect_with_gvl(objspace);
569
    }
570
    mem = malloc(size);
571
    if (!mem) {
572
	if (gc_inner->garbage_collect_with_gvl(objspace)) {
573
	    mem = malloc(size);
574
	}
575
	if (!mem) {
576
	    gc_inner->ruby_memerror();
577
	}
578
    }
579
    malloc_increase += size;
580

  
581
#if CALC_EXACT_MALLOC_SIZE
582
    objspace->malloc_params.allocated_size += size;
583
    objspace->malloc_params.allocations++;
584
    ((size_t *)mem)[0] = size;
585
    mem = (size_t *)mem + 1;
586
#endif
587

  
588
    return mem;
589
}
590

  
591
static void *
592
vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
593
{
594
    void *mem;
595

  
596
    if ((ssize_t)size < 0) {
597
	gc_inner->negative_size_allocation_error("negative re-allocation size");
598
    }
599
    if (!ptr) return vm_xmalloc(objspace, size);
600
    if (size == 0) {
601
	vm_xfree(objspace, ptr);
602
	return 0;
603
    }
604
    if (ruby_gc_stress && !ruby_disable_gc_stress)
605
	gc_inner->garbage_collect_with_gvl(objspace);
606

  
607
#if CALC_EXACT_MALLOC_SIZE
608
    size += sizeof(size_t);
609
    objspace->malloc_params.allocated_size -= size;
610
    ptr = (size_t *)ptr - 1;
611
#endif
612

  
613
    mem = realloc(ptr, size);
614
    if (!mem) {
615
	if (gc_inner->garbage_collect_with_gvl(objspace)) {
616
	    mem = realloc(ptr, size);
617
	}
618
	if (!mem) {
619
	    gc_inner->ruby_memerror();
620
        }
621
    }
622
    malloc_increase += size;
623

  
624
#if CALC_EXACT_MALLOC_SIZE
625
    objspace->malloc_params.allocated_size += size;
626
    ((size_t *)mem)[0] = size;
627
    mem = (size_t *)mem + 1;
628
#endif
629

  
630
    return mem;
631
}
632

  
633
static void
634
vm_xfree(rb_objspace_t *objspace, void *ptr)
635
{
636
#if CALC_EXACT_MALLOC_SIZE
637
    size_t size;
638
    ptr = ((size_t *)ptr) - 1;
639
    size = ((size_t*)ptr)[0];
640
    objspace->malloc_params.allocated_size -= size;
641
    objspace->malloc_params.allocations--;
642
#endif
643

  
644
    free(ptr);
645
}
646

  
647
static void *
648
ruby_xmalloc_tmp(size_t size)
649
{
650
    return vm_xmalloc(gc_inner->get_objspace(), size);
651
}
652

  
653
static void *
654
ruby_xmalloc2_tmp(size_t n, size_t size)
655
{
656
    size_t len = size * n;
657
    if (n != 0 && size != len / n) {
658
	rb_raise(rb_eArgError, "malloc: possible integer overflow");
659
    }
660
    return vm_xmalloc(gc_inner->get_objspace(), len);
661
}
662

  
663
static void *
664
ruby_xcalloc_tmp(size_t n, size_t size)
665
{
666
    void *mem = ruby_xmalloc2(n, size);
667
    memset(mem, 0, n * size);
668

  
669
    return mem;
670
}
671

  
672
static void *
673
ruby_xrealloc_tmp(void *ptr, size_t size)
674
{
675
    return vm_xrealloc(gc_inner->get_objspace(), ptr, size);
676
}
677

  
678
static void *
679
ruby_xrealloc2_tmp(void *ptr, size_t n, size_t size)
680
{
681
    size_t len = size * n;
682
    if (n != 0 && size != len / n) {
683
	rb_raise(rb_eArgError, "realloc: possible integer overflow");
684
    }
685
    return ruby_xrealloc(ptr, len);
686
}
687

  
688
static void
689
ruby_xfree_tmp(void *x)
690
{
691
    if (x)
692
	vm_xfree(gc_inner->get_objspace(), x);
693
}
694

  
695

  
696
/*
697
 *  call-seq:
698
 *     GC.enable    => true or false
699
 *
700
 *  Enables garbage collection, returning <code>true</code> if garbage
701
 *  collection was previously disabled.
702
 *
703
 *     GC.disable   #=> false
704
 *     GC.enable    #=> true
705
 *     GC.enable    #=> false
706
 *
707
 */
708

  
709
static VALUE
710
rb_gc_enable_tmp(void)
711
{
712
    rb_objspace_t *objspace = gc_inner->get_objspace();
713
    int old = dont_gc;
714

  
715
    dont_gc = FALSE;
716
    return old ? Qtrue : Qfalse;
717
}
718

  
719
/*
720
 *  call-seq:
721
 *     GC.disable    => true or false
722
 *
723
 *  Disables garbage collection, returning <code>true</code> if garbage
724
 *  collection was already disabled.
725
 *
726
 *     GC.disable   #=> false
727
 *     GC.disable   #=> true
728
 *
729
 */
730

  
731
static VALUE
732
rb_gc_disable_tmp(void)
733
{
734
    rb_objspace_t *objspace = gc_inner->get_objspace();
735
    int old = dont_gc;
736

  
737
    dont_gc = TRUE;
738
    return old ? Qtrue : Qfalse;
739
}
740

  
741
extern VALUE rb_mGC;
742

  
743
static void
744
rb_gc_register_address_tmp(VALUE *addr)
745
{
746
    rb_objspace_t *objspace = gc_inner->get_objspace();
747
    struct gc_list *tmp;
748

  
749
    tmp = ALLOC(struct gc_list);
750
    tmp->next = global_List;
751
    tmp->varptr = addr;
752
    global_List = tmp;
753
}
754

  
755
static void
756
rb_gc_unregister_address_tmp(VALUE *addr)
757
{
758
    rb_objspace_t *objspace = gc_inner->get_objspace();
759
    struct gc_list *tmp = global_List;
760

  
761
    if (tmp->varptr == addr) {
762
	global_List = tmp->next;
763
	xfree(tmp);
764
	return;
765
    }
766
    while (tmp->next) {
767
	if (tmp->next->varptr == addr) {
768
	    struct gc_list *t = tmp->next;
769

  
770
	    tmp->next = tmp->next->next;
771
	    xfree(t);
772
	    break;
773
	}
774
	tmp = tmp->next;
775
    }
776
}
777

  
778

  
779
static void
780
allocate_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
781
{
782
    struct heaps_slot *p;
783
    size_t size;
784

  
785
    size = next_heaps_length*sizeof(struct heaps_slot);
786

  
787
    if (heaps_used > 0) {
788
	p = (struct heaps_slot *)realloc(heaps, size);
789
	if (p) heaps = p;
790
    }
791
    else {
792
	p = heaps = (struct heaps_slot *)malloc(size);
793
    }
794

  
795
    if (p == 0) {
796
	during_gc = 0;
797
	rb_memerror();
798
    }
799
    heaps_length = next_heaps_length;
800
}
801

  
802

  
803
#define FIND_BITMAP(res, p) do {\
804
    if (((RVALUE *)p)->as.free.flags & FL_ALIGNOFF) {\
805
        res = (RVALUE *)((((VALUE)p & BITMAP_MASK) + BITMAP_ALIGN) / sizeof(RVALUE) * sizeof(RVALUE)); \
806
    }\
807
    else {\
808
        res = (RVALUE *)(((VALUE)p & BITMAP_MASK) / sizeof(RVALUE) * sizeof(RVALUE));\
809
    }\
810
} while(0)
811

  
812
#define NUM_IN_SLOT(p, slot) (((VALUE)p - (VALUE)slot)/sizeof(RVALUE))
813
#define BITMAP_INDEX(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) / (sizeof(int) * 8))
814
/* #define BITMAP_INDEX(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) >> 5) */
815
#define BITMAP_OFFSET(bmap, p) (NUM_IN_SLOT(p, bmap->as.bitmap.slot) & ((sizeof(int) * 8)-1))
816
#define MARKED_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] & 1 << BITMAP_OFFSET(bmap, p))
817
#define MARK_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] |= 1 << BITMAP_OFFSET(bmap, p))
818
#define CLEAR_IN_BITMAP(bmap, p) (bmap->as.bitmap.map[BITMAP_INDEX(bmap, p)] &= ~(1 << BITMAP_OFFSET(bmap, p)))
819
#define MARKED_IN_BITMAP_DIRECT(map, index, offset) (map[index] & 1 << offset)
820
#define MARK_IN_BITMAP_DIRECT(map, index, offset) (map[index] |= 1 << offset)
821

  
822
/* for debug */
823
void
824
bitmap_p(RVALUE *p)
825
{
826
    RVALUE *bmap;
827
    int index, offset, marked;
828

  
829
    FIND_BITMAP(bmap, p);
830
    index = BITMAP_INDEX(bmap, p);
831
    offset = BITMAP_OFFSET(bmap, p);
832
    marked = MARKED_IN_BITMAP(bmap, p);
833
    printf("bitmap : ((RVALUE *)%p)\n", bmap);
834
    printf("map_index : %d | offset : %d\n", index, offset);
835
    printf("is mark ? %s\n", marked? "true" : "false");
836
}
837

  
838
VALUE
839
find_bitmap(RVALUE *p) {
840
    RVALUE *res;
841

  
842
    FIND_BITMAP(res, p);
843
    return (VALUE)res;
844
}
845

  
846
void
847
dump_bitmap(RVALUE *bmap) {
848
    int i;
849
    
850
    for (i = 0; i < 26; i++) {
851
	printf("dump %p map %d : %d %s\n", bmap, i, bmap->as.bitmap.map[i], bmap->as.bitmap.map[i]? "remain" : "clean");
852
    }
853
}
854

  
855
void
856
bitmap2obj(RVALUE *bmap, int index, int offset)
857
{
858
    printf("(RVALUE *)%p\n", (RVALUE *)(bmap->as.bitmap.slot + (index * sizeof(int) * 8 + offset) * sizeof(RVALUE)));
859
}
860

  
861

  
862
static void
863
make_bitmap(struct heaps_slot *slot)
864
{
865
    RVALUE *p, *pend, *bitmap, *last, *border;
866
    int *map = 0;
867
    int size;
868
   
869
    p = slot->slot;
870
    pend = p + slot->limit;
871
    last = pend - 1;
872
    RBASIC(last)->flags = 0;
873
    FIND_BITMAP(bitmap, last);
874
    if (bitmap < p || pend <= bitmap) {
875
	rb_bug("not include in heap slot: result bitmap(%p), find (%p), p (%p), pend(%p)", bitmap, last, p, pend);
876
    }
877
    border = bitmap;
878
    if (!((VALUE)border % BITMAP_ALIGN)) {
879
	border--;
880
    }
881
    while (p < pend) {
882
	if (p <= border) {
883
	    RBASIC(p)->flags = FL_ALIGNOFF;
884
	}
885
	else {
886
	    RBASIC(p)->flags = 0;
887
	}
888
	p++;
889
    }
890

  
891
    size = sizeof(int) * (HEAP_OBJ_LIMIT / (sizeof(int) * 8)+1);
892
    map = (int *)malloc(size);
893
    if (map == 0) {
894
	rb_memerror();
895
    }
896
    MEMZERO(map, int, (size/sizeof(int)));
897
    bitmap->as.bitmap.flags |= T_BITMAP;
898
    bitmap->as.bitmap.map = map;
899
    bitmap->as.bitmap.slot = (VALUE)slot->slot;
900
    bitmap->as.bitmap.limit = slot->limit;
901
    slot->bitmap = bitmap;
902
}
903

  
904
void
905
test_bitmap(RVALUE *p, RVALUE *pend)
906
{
907
    RVALUE *first, *bmap = 0, *bmap_tmp;
908
    int i;
909

  
910
    first = p;
911
    FIND_BITMAP(bmap_tmp, p);
912
    while (p < pend) {
913
	if (MARKED_IN_BITMAP(bmap, p)) printf("already marking! %p\n", p);
914
	if (bmap_tmp != p) {
915
	    FIND_BITMAP(bmap, p);
916
	    if (bmap_tmp != bmap) printf("diffrence bmap %p : %p\n", bmap_tmp, bmap);
917
	    MARK_IN_BITMAP(bmap, p);
918
	}
919
	else {
920
	    MARK_IN_BITMAP(bmap, p);
921
	}
922
	if (!MARKED_IN_BITMAP(bmap, p)) printf("not marking! %p\n", p);
923
	p++;
924
    }
925
    for (i =0; i < 26; i++) {
926
	printf("bitmap[%d] : %x\n", i, bmap->as.bitmap.map[i]);
927
    }
928
    p = first;
929
    while (p < pend) {
930
	if (bmap_tmp != p) {
931
	    FIND_BITMAP(bmap, p);
932
	    CLEAR_IN_BITMAP(bmap, p);
933
	}
934
	else {
935
	    CLEAR_IN_BITMAP(bmap, p);
936
	}
937
	if (MARKED_IN_BITMAP(bmap, p)) printf("not clear! %p\n", p);
938
	p++;
939
    }
940
    for (i =0; i < 26; i++) {
941
	printf("bitmap[%d] : %x\n", i, bmap->as.bitmap.map[i]);
942
    }
943
}
944

  
945
static void
946
assign_heap_slot(rb_objspace_t *objspace)
947
{
948
    RVALUE *p, *pend, *membase;
949
    size_t hi, lo, mid;
950
    size_t objs;
951

  
952
    objs = HEAP_OBJ_LIMIT;
953
    p = (RVALUE*)malloc(HEAP_SIZE);
954

  
955
    if (p == 0) {
956
	during_gc = 0;
957
	rb_memerror();
958
    }
959

  
960
    membase = p;
961
    if ((VALUE)p % sizeof(RVALUE) != 0) {
962
	p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
963
    }
964

  
965
    lo = 0;
966
    hi = heaps_used;
967
    while (lo < hi) {
968
	register RVALUE *mid_membase;
969
	mid = (lo + hi) / 2;
970
	mid_membase = heaps[mid].membase;
971
	if (mid_membase < membase) {
972
	    lo = mid + 1;
973
	}
974
	else if (mid_membase > membase) {
975
	    hi = mid;
976
	}
977
	else {
978
	    rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
979
	}
980
    }
981
    if (hi < heaps_used) {
982
	MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
983
    }
984
    heaps[hi].membase = membase;
985
    heaps[hi].slot = p;
986
    heaps[hi].limit = objs;
987
    pend = p + objs;
988
    if (lomem == 0 || lomem > p) lomem = p;
989
    if (himem < pend) himem = pend;
990
    heaps_used++;
991

  
992
    make_bitmap(&heaps[hi]);
993
    while (p < pend) {
994
	if (BUILTIN_TYPE(p) != T_BITMAP) {
995
	    p->as.free.next = freelist;
996
	    freelist = p;
997
	}
998
	p++;
999
    }
1000
}
1001

  
1002
static void
1003
init_heap(rb_objspace_t *objspace)
1004
{
1005
    size_t add, i;
1006

  
1007
    add = HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT;
1008

  
1009
    if (!add) {
1010
        add = 1;
1011
    }
1012

  
1013
    if ((heaps_used + add) > heaps_length) {
1014
        allocate_heaps(objspace, heaps_used + add);
1015
    }
1016

  
1017
    for (i = 0; i < add; i++) {
1018
        assign_heap_slot(objspace);
1019
    }
1020
    heaps_inc = 0;
1021
    objspace->profile.invoke_time = getrusage_time();
1022
}
1023

  
1024

  
1025
static void
1026
set_heaps_increment(rb_objspace_t *objspace)
1027
{
1028
    size_t next_heaps_length = (size_t)(heaps_used * 1.8);
1029

  
1030
    if (next_heaps_length == heaps_used) {
1031
        next_heaps_length++;
1032
    }
1033

  
1034
    heaps_inc = next_heaps_length - heaps_used;
1035

  
1036
    if (next_heaps_length > heaps_length) {
1037
	allocate_heaps(objspace, next_heaps_length);
1038
    }
1039
}
1040

  
1041
static int
1042
heaps_increment(rb_objspace_t *objspace)
1043
{
1044
    if (heaps_inc > 0) {
1045
        assign_heap_slot(objspace);
1046
	heaps_inc--;
1047
	return TRUE;
1048
    }
1049
    return FALSE;
1050
}
1051

  
1052
#define RANY(o) ((RVALUE*)(o))
1053

  
1054
static VALUE
1055
rb_newobj_from_heap(rb_objspace_t *objspace)
1056
{
1057
    VALUE obj;
1058
    int bmap_left = 0;
1059

  
1060
    if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
1061
	if (!heaps_increment(objspace) && !garbage_collect(objspace)) {
1062
	    during_gc = 0;
1063
	    rb_memerror();
1064
	}
1065
    }
1066

  
1067
    obj = (VALUE)freelist;
1068
    freelist = freelist->as.free.next;
1069

  
1070
    if (RANY(obj)->as.free.flags & FL_ALIGNOFF) {
1071
	bmap_left = Qtrue;
1072
    }
1073
    MEMZERO((void*)obj, RVALUE, 1);
1074
    if (bmap_left) {
1075
	RANY(obj)->as.free.flags = FL_ALIGNOFF;
1076
    }
1077
#ifdef GC_DEBUG
1078
    RANY(obj)->file = rb_sourcefile();
1079
    RANY(obj)->line = rb_sourceline();
1080
#endif
1081

  
1082
    return obj;
1083
}
1084

  
1085
/* TODO: remove this function. */
1086
#if USE_VALUE_CACHE
1087
static VALUE
1088
rb_fill_value_cache(rb_thread_t *th)
1089
{
1090
    rb_objspace_t *objspace = gc_inner->get_objspace();
1091
    int i;
1092
    VALUE rv;
1093
    RVALUE *bmap;
1094

  
1095
    /* LOCK */
1096
    for (i=0; i<RUBY_VM_VALUE_CACHE_SIZE; i++) {
1097
	VALUE v = rb_newobj_from_heap(objspace);
1098

  
1099
	th->value_cache[i] = v;
1100
	FIND_BITMAP(bmap, v);
1101
	MARK_IN_BITMAP(bmap, v);
1102
    }
1103
    th->value_cache_ptr = &th->value_cache[0];
1104
    rv = rb_newobj_from_heap(objspace);
1105
    /* UNLOCK */
1106
    return rv;
1107
}
1108
#endif
1109

  
1110
static int
1111
rb_during_gc_tmp(void)
1112
{
1113
    rb_objspace_t *objspace = gc_inner->get_objspace();
1114
    return during_gc;
1115
}
1116

  
1117
static VALUE
1118
rb_newobj_tmp(void)
1119
{
1120
#if USE_VALUE_CACHE
1121
    rb_thread_t *th = GET_THREAD();
1122
    VALUE v = *th->value_cache_ptr;
1123
#endif
1124
    rb_objspace_t *objspace = gc_inner->get_objspace();
1125

  
1126
    if (during_gc) {
1127
	dont_gc = 1;
1128
	during_gc = 0;
1129
	rb_bug("object allocation during garbage collection phase");
1130
    }
1131

  
1132
#if USE_VALUE_CACHE
1133
    if (v) {
1134
	rb_set_flag_force(v, 0);
1135
	th->value_cache_ptr++;
1136
    }
1137
    else {
1138
	v = rb_fill_value_cache(th);
1139
    }
1140

  
1141
#if defined(GC_DEBUG)
1142
    printf("cache index: %d, v: %p, th: %p\n",
1143
	   th->value_cache_ptr - th->value_cache, v, th);
1144
#endif
1145
    return v;
1146
#else
1147
    return rb_newobj_from_heap(objspace);
1148
#endif
1149
}
1150

  
1151
static void
1152
rb_set_flag_force_tmp(VALUE obj, VALUE t)
1153
{
1154
    t = t & ~FL_ALIGNOFF;
1155
    if (RBASIC(obj)->flags & FL_ALIGNOFF) {
1156
	RBASIC(obj)->flags = FL_ALIGNOFF | t;
1157
    }
1158
    else {
1159
	RBASIC(obj)->flags = t;
1160
    }
1161
}
1162

  
1163
static VALUE
1164
rb_data_object_alloc_tmp(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, 
1165
			     RUBY_DATA_FUNC dfree)
1166
{
1167
    NEWOBJ(data, struct RData);
1168
    if (klass) Check_Type(klass, T_CLASS);
1169
    OBJSETUP(data, klass, T_DATA);
1170
    data->data = datap;
1171
    data->dfree = dfree;
1172
    data->dmark = dmark;
1173

  
1174
    return (VALUE)data;
1175
}
1176

  
1177
static VALUE
1178
rb_data_typed_object_alloc_tmp(VALUE klass, void *datap, 
1179
				   const rb_data_type_t *type)
1180
{
1181
    NEWOBJ(data, struct RTypedData);
1182

  
1183
    if (klass) Check_Type(klass, T_CLASS);
1184

  
1185
    OBJSETUP(data, klass, T_DATA);
1186

  
1187
    data->data = datap;
1188
    data->typed_flag = 1;
1189
    data->type = type;
1190

  
1191
    return (VALUE)data;
1192
}
1193

  
1194
static size_t
1195
rb_objspace_data_type_memsize_tmp(VALUE obj)
1196
{
1197
    if (RTYPEDDATA_P(obj)) {
1198
	return RTYPEDDATA_TYPE(obj)->dsize(RTYPEDDATA_DATA(obj));
1199
    }
1200
    else {
1201
	return 0;
1202
    }
1203
}
1204

  
1205
static const char *
1206
rb_objspace_data_type_name_tmp(VALUE obj)
1207
{
1208
    if (RTYPEDDATA_P(obj)) {
1209
	return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1210
    }
1211
    else {
1212
	return 0;
1213
    }
1214
}
1215

  
1216
#ifdef __ia64
1217
#define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
1218
#else
1219
#define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
1220
#endif
1221

  
1222
#define STACK_START (th->machine_stack_start)
1223
#define STACK_END (th->machine_stack_end)
1224
#define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
1225

  
1226
#if STACK_GROW_DIRECTION < 0
1227
# define STACK_LENGTH  (size_t)(STACK_START - STACK_END)
1228
#elif STACK_GROW_DIRECTION > 0
1229
# define STACK_LENGTH  (size_t)(STACK_END - STACK_START + 1)
1230
#else
1231
# define STACK_LENGTH  ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
1232
			: (size_t)(STACK_END - STACK_START + 1))
1233
#endif
1234
#if !STACK_GROW_DIRECTION
1235
int ruby_stack_grow_direction;
1236
static int
1237
ruby_get_stack_grow_direction_tmp(volatile VALUE *addr)
1238
{
1239
    VALUE *end;
1240
    SET_MACHINE_STACK_END(&end);
1241

  
1242
    if (end > addr) return ruby_stack_grow_direction = 1;
1243
    return ruby_stack_grow_direction = -1;
1244
}
1245
#endif
1246

  
1247
#define GC_WATER_MARK 512
1248

  
1249
static int
1250
ruby_stack_check_tmp(void)
1251
{
1252
#if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
1253
    return 0;
1254
#else
1255
    return gc_inner->stack_check();
1256
#endif
1257
}
1258

  
1259
static void
1260
init_mark_stack(rb_objspace_t *objspace)
1261
{
1262
    mark_stack_overflow = 0;
1263
    mark_stack_ptr = mark_stack;
1264
}
1265

  
1266
#define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
1267

  
1268
static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev);
1269

  
1270
#define IS_FREE_CELL(obj) ((obj->as.basic.flags & ~(FL_ALIGNOFF)) == 0)
1271

  
1272
static void
1273
gc_mark_all(rb_objspace_t *objspace)
1274
{
1275
    RVALUE *p, *pend, *bmap;
1276
    size_t i;
1277

  
1278
    init_mark_stack(objspace);
1279
    for (i = 0; i < heaps_used; i++) {
1280
	p = heaps[i].slot; pend = p + heaps[i].limit;
1281
	bmap = heaps[i].bitmap;
1282
	while (p < pend) {
1283
	    if (MARKED_IN_BITMAP(bmap, p) &&
1284
		!(IS_FREE_CELL(p))) {
1285
		gc_inner->gc_mark_children(objspace, (VALUE)p, 0);
1286
	    }
1287
	    p++;
1288
	}
1289
    }
1290
}
1291

  
1292
static void
1293
gc_mark_rest(rb_objspace_t *objspace)
1294
{
1295
    VALUE tmp_arry[MARK_STACK_MAX];
1296
    VALUE *p;
1297

  
1298
    p = (mark_stack_ptr - mark_stack) + tmp_arry;
1299
    MEMCPY(tmp_arry, mark_stack, VALUE, p - tmp_arry);
1300

  
1301
    init_mark_stack(objspace);
1302
    while (p != tmp_arry) {
1303
	p--;
1304
	gc_inner->gc_mark_children(objspace, *p, 0);
1305
    }
1306
}
1307

  
1308
static inline int
1309
is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
1310
{
1311
    register RVALUE *p = RANY(ptr);
1312
    register struct heaps_slot *heap;
1313
    register size_t hi, lo, mid;
1314

  
1315
    if (p < lomem || p > himem) return FALSE;
1316
    if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
1317

  
1318
    /* check if p looks like a pointer using bsearch*/
1319
    lo = 0;
1320
    hi = heaps_used;
1321
    while (lo < hi) {
1322
	mid = (lo + hi) / 2;
1323
	heap = &heaps[mid];
1324
	if (heap->slot <= p) {
1325
	    if (p < heap->slot + heap->limit)
1326
		return TRUE;
1327
	    lo = mid + 1;
1328
	}
1329
	else {
1330
	    hi = mid;
1331
	}
1332
    }
1333
    return FALSE;
1334
}
1335

  
1336
static void
1337
mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
1338
{
1339
    VALUE v;
1340
    while (n--) {
1341
        v = *x;
1342
        VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
1343
	if (is_pointer_to_heap(objspace, (void *)v)) {
1344
	    gc_mark(objspace, v, 0);
1345
	}
1346
	x++;
1347
    }
1348
}
1349

  
1350
static void
1351
gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
1352
{
1353
    long n;
1354

  
1355
    if (end <= start) return;
1356
    n = end - start;
1357
    mark_locations_array(objspace, start, n);
1358
}
1359

  
1360
static void
1361
rb_gc_mark_locations_tmp(VALUE *start, VALUE *end)
1362
{
1363
    gc_mark_locations(gc_inner->get_objspace(), start, end);
1364
}
1365

  
1366
#define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, start, end)
1367

  
1368
struct mark_tbl_arg {
1369
    rb_objspace_t *objspace;
1370
    int lev;
1371
};
1372

  
1373
static int
1374
mark_entry(ID key, VALUE value, st_data_t data)
1375
{
1376
    struct mark_tbl_arg *arg = (void*)data;
1377
    gc_mark(arg->objspace, value, arg->lev);
1378
    return ST_CONTINUE;
1379
}
1380

  
1381
static void
1382
mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
1383
{
1384
    struct mark_tbl_arg arg;
1385
    if (!tbl) return;
1386
    arg.objspace = objspace;
1387
    arg.lev = lev;
1388
    st_foreach(tbl, mark_entry, (st_data_t)&arg);
1389
}
1390

  
1391
static int
1392
mark_key(VALUE key, VALUE value, st_data_t data)
1393
{
1394
    struct mark_tbl_arg *arg = (void*)data;
1395
    gc_mark(arg->objspace, key, arg->lev);
1396
    return ST_CONTINUE;
1397
}
1398

  
1399
static void
1400
mark_set(rb_objspace_t *objspace, st_table *tbl, int lev)
1401
{
1402
    struct mark_tbl_arg arg;
1403
    if (!tbl) return;
1404
    arg.objspace = objspace;
1405
    arg.lev = lev;
1406
    st_foreach(tbl, mark_key, (st_data_t)&arg);
1407
}
1408

  
1409
static void
1410
rb_mark_set_tmp(st_table *tbl)
1411
{
1412
    mark_set(gc_inner->get_objspace(), tbl, 0);
1413
}
1414

  
1415
static int
1416
mark_keyvalue(VALUE key, VALUE value, st_data_t data)
1417
{
1418
    struct mark_tbl_arg *arg = (void*)data;
1419
    gc_mark(arg->objspace, key, arg->lev);
1420
    gc_mark(arg->objspace, value, arg->lev);
1421
    return ST_CONTINUE;
1422
}
1423

  
1424
static void
1425
mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev)
1426
{
1427
    struct mark_tbl_arg arg;
1428
    if (!tbl) return;
1429
    arg.objspace = objspace;
1430
    arg.lev = lev;
1431
    st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
1432
}
1433

  
1434
static int
1435
mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
1436
{
1437
    struct mark_tbl_arg *arg = (void*)data;
1438
    gc_inner->mark_method_entry(arg->objspace, me, arg->lev);
... This diff was truncated because it exceeds the maximum size that can be displayed.