Project

General

Profile

Feature #2471 ยป switch_gc.patch

wanabe (_ wanabe), 12/10/2009 08:42 AM

View differences:

gc_lazy.c (revision 0)
1
/**********************************************************************
2

  
3
  gc_lazy.c -
4

  
5
  $Author: $
6
  created at: 
7

  
8
  Copyright (C) 1993-2007 Yukihiro Matsumoto
9
  Copyright (C) 2000  Network Applied Communication Laboratory, Inc.
10
  Copyright (C) 2000  Information-technology Promotion Agency, Japan
11

  
12
**********************************************************************/
13

  
14
#define live			objspace->heap.live
15
#define dead			objspace->heap.dead
16
#define do_heap_free		objspace->heap.do_heap_free
17
#define heaps_sweep_index	objspace->heap.sweep_index
18
#define heaps_sweep_inc 	objspace->heap.sweep_increment
19
#define during_sweep		objspace->flags.during_sweep
20

  
21
static int garbage_collect_force_with_gvl(rb_objspace_t *objspace);
22

  
23
static void *
24
vm_xmalloc_lazy(rb_objspace_t *objspace, size_t size)
25
{
26
    void *mem;
27

  
28
    if ((ssize_t)size < 0) {
29
	negative_size_allocation_error("negative allocation size (or too big)");
30
    }
31
    if (size == 0) size = 1;
32

  
33
#if CALC_EXACT_MALLOC_SIZE
34
    size += sizeof(size_t);
35
#endif
36

  
37
    if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
38
	(malloc_increase+size) > malloc_limit) {
39
	garbage_collect_force_with_gvl(objspace);
40
    }
41
    mem = malloc(size);
42
    if (!mem) {
43
	if (garbage_collect_force_with_gvl(objspace)) {
44
	    mem = malloc(size);
45
	}
46
	if (!mem) {
47
	    ruby_memerror();
48
	}
49
    }
50
    malloc_increase += size;
51

  
52
#if CALC_EXACT_MALLOC_SIZE
53
    objspace->malloc_params.allocated_size += size;
54
    objspace->malloc_params.allocations++;
55
    ((size_t *)mem)[0] = size;
56
    mem = (size_t *)mem + 1;
57
#endif
58

  
59
    return mem;
60
}
61

  
62
static void *
63
vm_xrealloc_lazy(rb_objspace_t *objspace, void *ptr, size_t size)
64
{
65
    void *mem;
66

  
67
    if ((ssize_t)size < 0) {
68
	negative_size_allocation_error("negative re-allocation size");
69
    }
70
    if (!ptr) return vm_xmalloc_lazy(objspace, size);
71
    if (size == 0) {
72
	vm_xfree(objspace, ptr);
73
	return 0;
74
    }
75
    if (ruby_gc_stress && !ruby_disable_gc_stress)
76
	garbage_collect_with_gvl(objspace);
77

  
78
#if CALC_EXACT_MALLOC_SIZE
79
    size += sizeof(size_t);
80
    objspace->malloc_params.allocated_size -= size;
81
    ptr = (size_t *)ptr - 1;
82
#endif
83

  
84
    mem = realloc(ptr, size);
85
    if (!mem) {
86
	if (garbage_collect_force_with_gvl(objspace)) {
87
	    mem = realloc(ptr, size);
88
	}
89
	if (!mem) {
90
	    ruby_memerror();
91
        }
92
    }
93
    malloc_increase += size;
94

  
95
#if CALC_EXACT_MALLOC_SIZE
96
    objspace->malloc_params.allocated_size += size;
97
    ((size_t *)mem)[0] = size;
98
    mem = (size_t *)mem + 1;
99
#endif
100

  
101
    return mem;
102
}
103

  
104
static void
105
assign_heap_slot_lazy(rb_objspace_t *objspace)
106
{
107
    RVALUE *p, *pend, *membase;
108
    size_t hi, lo, mid;
109
    size_t objs;
110

  
111
    objs = HEAP_OBJ_LIMIT;
112
    p = (RVALUE*)malloc(HEAP_SIZE);
113

  
114
    if (p == 0) {
115
	during_gc = 0;
116
	rb_memerror();
117
    }
118

  
119
    membase = p;
120
    if ((VALUE)p % sizeof(RVALUE) != 0) {
121
	p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
122
	if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
123
	    objs--;
124
	}
125
    }
126

  
127
    lo = 0;
128
    hi = heaps_used;
129
    while (lo < hi) {
130
	register RVALUE *mid_membase;
131
	mid = (lo + hi) / 2;
132
	mid_membase = heaps[mid].membase;
133
	if (mid_membase < membase) {
134
	    lo = mid + 1;
135
	}
136
	else if (mid_membase > membase) {
137
	    hi = mid;
138
	}
139
	else {
140
	    rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
141
	}
142
    }
143
    if (hi < heaps_used) {
144
	MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
145
    }
146
    heaps[hi].membase = membase;
147
    heaps[hi].slot = p;
148
    heaps[hi].limit = objs;
149
    heaps[hi].color = BLACK;
150
    pend = p + objs;
151
    if (lomem == 0 || lomem > p) lomem = p;
152
    if (himem < pend) himem = pend;
153
    heaps_used++;
154

  
155
    while (p < pend) {
156
	p->as.free.flags = 0;
157
	p->as.free.next = freelist;
158
	freelist = p;
159
	p++;
160
    }
161
    if (hi < heaps_sweep_index) {
162
	heaps_sweep_index++;
163
    }
164
}
165

  
166
static VALUE
167
rb_newobj_from_heap_lazy(rb_objspace_t *objspace)
168
{
169
    VALUE obj;
170

  
171
    if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
172
	if (!garbage_collect(objspace)) {
173
	    during_gc = 0;
174
	    rb_memerror();
175
	}
176
    }
177

  
178
    obj = (VALUE)freelist;
179
    freelist = freelist->as.free.next;
180

  
181
    MEMZERO((void*)obj, RVALUE, 1);
182
#ifdef GC_DEBUG
183
    RANY(obj)->file = rb_sourcefile();
184
    RANY(obj)->line = rb_sourceline();
185
#endif
186

  
187
    return obj;
188
}
189

  
190
static void
191
gc_mark_lazy(rb_objspace_t *objspace, VALUE ptr, int lev)
192
{
193
    register RVALUE *obj;
194

  
195
    obj = RANY(ptr);
196
    if (rb_special_const_p(ptr)) return; /* special const not marked */
197
    if (obj->as.basic.flags == 0) return;       /* free cell */
198
    if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
199
    obj->as.basic.flags |= FL_MARK;
200
    live++;
201

  
202
    if (lev > GC_LEVEL_MAX || (lev == 0 && stack_check())) {
203
	if (!mark_stack_overflow) {
204
	    if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
205
		*mark_stack_ptr = ptr;
206
		mark_stack_ptr++;
207
	    }
208
	    else {
209
		mark_stack_overflow = 1;
210
	    }
211
	}
212
	return;
213
    }
214
    gc_mark_children(objspace, ptr, lev+1);
215
}
216

  
217
static void
218
gc_mark_children_lazy(rb_objspace_t *objspace, VALUE ptr, int lev)
219
{
220
    register RVALUE *obj = RANY(ptr);
221

  
222
    goto marking;		/* skip */
223

  
224
  again:
225
    obj = RANY(ptr);
226
    if (rb_special_const_p(ptr)) return; /* special const not marked */
227
    if (obj->as.basic.flags == 0) return;       /* free cell */
228
    if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
229
    obj->as.basic.flags |= FL_MARK;
230
    live++;
231

  
232
  marking:
233
    if (FL_TEST(obj, FL_EXIVAR)) {
234
	rb_mark_generic_ivar(ptr);
235
    }
236

  
237
    switch (BUILTIN_TYPE(obj)) {
238
      case T_NIL:
239
      case T_FIXNUM:
240
	rb_bug("rb_gc_mark() called for broken object");
241
	break;
242

  
243
      case T_NODE:
244
	switch (nd_type(obj)) {
245
	  case NODE_IF:		/* 1,2,3 */
246
	  case NODE_FOR:
247
	  case NODE_ITER:
248
	  case NODE_WHEN:
249
	  case NODE_MASGN:
250
	  case NODE_RESCUE:
251
	  case NODE_RESBODY:
252
	  case NODE_CLASS:
253
	  case NODE_BLOCK_PASS:
254
	    gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
255
	    /* fall through */
256
	  case NODE_BLOCK:	/* 1,3 */
257
	  case NODE_OPTBLOCK:
258
	  case NODE_ARRAY:
259
	  case NODE_DSTR:
260
	  case NODE_DXSTR:
261
	  case NODE_DREGX:
262
	  case NODE_DREGX_ONCE:
263
	  case NODE_ENSURE:
264
	  case NODE_CALL:
265
	  case NODE_DEFS:
266
	  case NODE_OP_ASGN1:
267
	  case NODE_ARGS:
268
	    gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
269
	    /* fall through */
270
	  case NODE_SUPER:	/* 3 */
271
	  case NODE_FCALL:
272
	  case NODE_DEFN:
273
	  case NODE_ARGS_AUX:
274
	    ptr = (VALUE)obj->as.node.u3.node;
275
	    goto again;
276

  
277
	  case NODE_WHILE:
278
	  case NODE_UNTIL:
279
	  case NODE_AND:
280
	  case NODE_OR:
281
	  case NODE_CASE:
282
	  case NODE_SCLASS:
283
	  case NODE_DOT2:
284
	  case NODE_DOT3:
285
	  case NODE_FLIP2:
286
	  case NODE_FLIP3:
287
	  case NODE_MATCH2:
288
	  case NODE_MATCH3:
289
	  case NODE_OP_ASGN_OR:
290
	  case NODE_OP_ASGN_AND:
291
	  case NODE_MODULE:
292
	  case NODE_ALIAS:
293
	  case NODE_VALIAS:
294
	  case NODE_ARGSCAT:
295
	    gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
296
	    /* fall through */
297
	  case NODE_GASGN:
298
	  case NODE_LASGN:
299
	  case NODE_DASGN:
300
	  case NODE_DASGN_CURR:
301
	  case NODE_IASGN:
302
	  case NODE_IASGN2:
303
	  case NODE_CVASGN:
304
	  case NODE_COLON3:
305
	  case NODE_OPT_N:
306
	  case NODE_EVSTR:
307
	  case NODE_UNDEF:
308
	  case NODE_POSTEXE:
309
	    ptr = (VALUE)obj->as.node.u2.node;
310
	    goto again;
311

  
312
	  case NODE_HASH:	/* 1 */
313
	  case NODE_LIT:
314
	  case NODE_STR:
315
	  case NODE_XSTR:
316
	  case NODE_DEFINED:
317
	  case NODE_MATCH:
318
	  case NODE_RETURN:
319
	  case NODE_BREAK:
320
	  case NODE_NEXT:
321
	  case NODE_YIELD:
322
	  case NODE_COLON2:
323
	  case NODE_SPLAT:
324
	  case NODE_TO_ARY:
325
	    ptr = (VALUE)obj->as.node.u1.node;
326
	    goto again;
327

  
328
	  case NODE_SCOPE:	/* 2,3 */
329
	  case NODE_CDECL:
330
	  case NODE_OPT_ARG:
331
	    gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
332
	    ptr = (VALUE)obj->as.node.u2.node;
333
	    goto again;
334

  
335
	  case NODE_ZARRAY:	/* - */
336
	  case NODE_ZSUPER:
337
	  case NODE_VCALL:
338
	  case NODE_GVAR:
339
	  case NODE_LVAR:
340
	  case NODE_DVAR:
341
	  case NODE_IVAR:
342
	  case NODE_CVAR:
343
	  case NODE_NTH_REF:
344
	  case NODE_BACK_REF:
345
	  case NODE_REDO:
346
	  case NODE_RETRY:
347
	  case NODE_SELF:
348
	  case NODE_NIL:
349
	  case NODE_TRUE:
350
	  case NODE_FALSE:
351
	  case NODE_ERRINFO:
352
	  case NODE_BLOCK_ARG:
353
	    break;
354
	  case NODE_ALLOCA:
355
	    mark_locations_array(objspace,
356
				 (VALUE*)obj->as.node.u1.value,
357
				 obj->as.node.u3.cnt);
358
	    ptr = (VALUE)obj->as.node.u2.node;
359
	    goto again;
360

  
361
	  default:		/* unlisted NODE */
362
	    if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
363
		gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
364
	    }
365
	    if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
366
		gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
367
	    }
368
	    if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
369
		gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
370
	    }
371
	}
372
	return;			/* no need to mark class. */
373
    }
374

  
375
    gc_mark(objspace, obj->as.basic.klass, lev);
376
    switch (BUILTIN_TYPE(obj)) {
377
      case T_ICLASS:
378
      case T_CLASS:
379
      case T_MODULE:
380
	mark_m_tbl(objspace, RCLASS_M_TBL(obj), lev);
381
	mark_tbl(objspace, RCLASS_IV_TBL(obj), lev);
382
	ptr = RCLASS_SUPER(obj);
383
	goto again;
384

  
385
      case T_ARRAY:
386
	if (FL_TEST(obj, ELTS_SHARED)) {
387
	    ptr = obj->as.array.as.heap.aux.shared;
388
	    goto again;
389
	}
390
	else {
391
	    long i, len = RARRAY_LEN(obj);
392
	    VALUE *ptr = RARRAY_PTR(obj);
393
	    for (i=0; i < len; i++) {
394
		gc_mark(objspace, *ptr++, lev);
395
	    }
396
	}
397
	break;
398

  
399
      case T_HASH:
400
	mark_hash(objspace, obj->as.hash.ntbl, lev);
401
	ptr = obj->as.hash.ifnone;
402
	goto again;
403

  
404
      case T_STRING:
405
#define STR_ASSOC FL_USER3   /* copied from string.c */
406
	if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
407
	    ptr = obj->as.string.as.heap.aux.shared;
408
	    goto again;
409
	}
410
	break;
411

  
412
      case T_DATA:
413
	if (RTYPEDDATA_P(obj)) {
414
	    if (obj->as.typeddata.type->dmark) (*obj->as.typeddata.type->dmark)(DATA_PTR(obj));
415
	}
416
	else {
417
	    if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
418
	}
419
	break;
420

  
421
      case T_OBJECT:
422
        {
423
            long i, len = ROBJECT_NUMIV(obj);
424
	    VALUE *ptr = ROBJECT_IVPTR(obj);
425
            for (i  = 0; i < len; i++) {
426
		gc_mark(objspace, *ptr++, lev);
427
            }
428
        }
429
	break;
430

  
431
      case T_FILE:
432
        if (obj->as.file.fptr) {
433
            gc_mark(objspace, obj->as.file.fptr->pathv, lev);
434
            gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev);
435
            gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat, lev);
436
            gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts, lev);
437
            gc_mark(objspace, obj->as.file.fptr->encs.ecopts, lev);
438
            gc_mark(objspace, obj->as.file.fptr->write_lock, lev);
439
        }
440
        break;
441

  
442
      case T_REGEXP:
443
        gc_mark(objspace, obj->as.regexp.src, lev);
444
        break;
445

  
446
      case T_FLOAT:
447
      case T_BIGNUM:
448
      case T_ZOMBIE:
449
	break;
450

  
451
      case T_MATCH:
452
	gc_mark(objspace, obj->as.match.regexp, lev);
453
	if (obj->as.match.str) {
454
	    ptr = obj->as.match.str;
455
	    goto again;
456
	}
457
	break;
458

  
459
      case T_RATIONAL:
460
	gc_mark(objspace, obj->as.rational.num, lev);
461
	gc_mark(objspace, obj->as.rational.den, lev);
462
	break;
463

  
464
      case T_COMPLEX:
465
	gc_mark(objspace, obj->as.complex.real, lev);
466
	gc_mark(objspace, obj->as.complex.imag, lev);
467
	break;
468

  
469
      case T_STRUCT:
470
	{
471
	    long len = RSTRUCT_LEN(obj);
472
	    VALUE *ptr = RSTRUCT_PTR(obj);
473

  
474
	    while (len--) {
475
		gc_mark(objspace, *ptr++, lev);
476
	    }
477
	}
478
	break;
479

  
480
      default:
481
	rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
482
	       BUILTIN_TYPE(obj), (void *)obj,
483
	       is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
484
    }
485
}
486

  
487
void rb_gc_abort_threads(void);
488

  
489
static int
490
slot_sweep(rb_objspace_t *objspace, struct heaps_slot *target)
491
{
492
    RVALUE *p, *pend, *free;
493
    RVALUE *final;
494
    size_t freed = 0;
495

  
496
    if (target->color == BLACK || target->color == WHITE) {
497
	return Qfalse;
498
    }
499

  
500
    final = deferred_final_list;
501
    free = freelist;
502
    p = target->slot; pend = p + target->limit;
503
    while (p < pend) {
504
	if (!(p->as.basic.flags & FL_MARK)) {
505
	    if (p->as.basic.flags) {
506
		obj_free(objspace, (VALUE)p);
507
	    }
508
	    if (need_call_final && FL_TEST(p, FL_FINALIZE)) {
509
		p->as.free.flags = FL_MARK; /* remain marked */
510
		p->as.free.next = deferred_final_list;
511
		deferred_final_list = p;
512
	    }
513
	    else {
514
		VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
515
		p->as.free.flags = 0;
516
		p->as.free.next = freelist;
517
		freelist = p;
518
	    }
519
	    freed++;
520
	}
521
	else if (RBASIC(p)->flags == FL_MARK) {
522
	    /* objects to be finalized */
523
	    /* do nothing remain marked */
524
	}
525
	else {
526
	    p->as.basic.flags &= ~FL_MARK;
527
	}
528
	p++;
529
    }
530
    dead += freed;
531
    if (freed == target->limit && dead > do_heap_free) {
532
	RVALUE *pp;
533

  
534
	target->limit = 0;
535
	target->color = WHITE;
536
	for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
537
	    pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
538
	}
539
	freelist = free;	/* cancel this page from freelist */
540
    }
541
    else {
542
	target->color = BLACK;
543
    }
544
    return Qtrue;
545
}
546

  
547
static void
548
heap_sweep_increment(rb_objspace_t *objspace)
549
{
550
    size_t i = 0;
551

  
552
    while (i < heaps_sweep_inc && heaps_sweep_index < heaps_used) {
553
	if (slot_sweep(objspace, &heaps[heaps_sweep_index])) {
554
	    i++;
555
	}
556
	heaps_sweep_index++;
557
    }
558
}
559

  
560
static void
561
heap_sweep(rb_objspace_t *objspace)
562
{
563
    while (!freelist && heaps_sweep_index < heaps_used) {
564
	slot_sweep(objspace, &heaps[heaps_sweep_index]);
565
	heaps_sweep_index++;
566
    }
567
}
568

  
569
static int
570
gc_lazy_sweep(rb_objspace_t *objspace, rb_thread_t *th)
571
{
572
    if (heaps_increment(objspace)) {
573
	heap_sweep_increment(objspace);
574
    }
575
    else {
576
	heap_sweep(objspace);
577
    }
578

  
579
    if (!freelist) {
580
	return Qfalse;
581
    }
582

  
583
    return Qtrue;
584
}
585

  
586
static void
587
rb_gc_force_recycle_lazy(VALUE p)
588
{
589
    VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
590
    RANY(p)->as.free.flags = 0;
591
}
592

  
593
static void
594
gc_mark_all_clear(rb_objspace_t *objspace)
595
{
596
    RVALUE *last = 0;
597
    size_t i, j;
598
    
599
    for (i = j = 0; j < heaps_used; i++) {
600
	if (heaps[i].color == WHITE && !deferred_final_list) {
601
	    if (!last) {
602
		last = heaps[i].membase;
603
	    }
604
	    else {
605
		free(heaps[i].membase);
606
	    }
607
	    heaps_used--;
608
	}
609
	else {
610
	    if (heaps[i].color == GRAY) {
611
		RVALUE *p, *pend;
612
		p = heaps[i].slot; pend = p + heaps[i].limit;
613
		while (p < pend) {
614
		    if (!(RBASIC(p)->flags & FL_MARK)) {
615
			if (p->as.basic.flags && !FL_TEST(p, FL_FINALIZE)) {
616
			    obj_free(objspace, (VALUE)p);
617
			    VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
618
			    p->as.free.flags = 0;
619
			}
620
		    }
621
		    else if (RBASIC(p)->flags != FL_MARK) {
622
			p->as.basic.flags &= ~FL_MARK;
623
		    }
624
		    p++;
625
		}
626
	    }
627
	    else {
628
		heaps[i].color = GRAY;
629
	    }
630
	    if (i != j) {
631
		heaps[j] = heaps[i];
632
	    }
633
	    j++;
634
	}
635
    }
636
    if (last) {
637
	if (last < heaps_freed) {
638
	    free(heaps_freed);
639
	    heaps_freed = last;
640
	}
641
	else {
642
	    free(last);
643
	}
644
    }
645
}
646

  
647
static void
648
gc_marks(rb_objspace_t *objspace, rb_thread_t *th)
649
{
650
    struct gc_list *list;
651
    size_t free_min = 0;
652

  
653
    live = 0;
654
    freelist = 0;
655

  
656
    gc_mark_all_clear(objspace);
657

  
658
    SET_STACK_END;
659

  
660
    init_mark_stack(objspace);
661

  
662
    th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
663

  
664
    if (finalizer_table) {
665
	mark_tbl(objspace, finalizer_table, 0);
666
    }
667

  
668
    mark_current_machine_context(objspace, th);
669

  
670
    rb_gc_mark_threads();
671
    rb_gc_mark_symbols();
672
    rb_gc_mark_encodings();
673

  
674
    /* mark protected global variables */
675
    for (list = global_List; list; list = list->next) {
676
	rb_gc_mark_maybe(*list->varptr);
677
    }
678
    rb_mark_end_proc();
679
    rb_gc_mark_global_tbl();
680

  
681
    mark_tbl(objspace, rb_class_tbl, 0);
682

  
683
    /* mark generic instance variables for special constants */
684
    rb_mark_generic_ivar_tbl();
685

  
686
    rb_gc_mark_parser();
687

  
688
    /* gc_mark objects whose marking are not completed*/
689
    while (!MARK_STACK_EMPTY) {
690
	if (mark_stack_overflow) {
691
	    gc_mark_all(objspace);
692
	}
693
	else {
694
	    gc_mark_rest(objspace);
695
	}
696
    }
697
    // GC_PROF_MARK_TIMER_STOP; // TODO
698

  
699
    dead = 0;
700
    heaps_sweep_index = 0;
701
    heaps_sweep_inc = (heaps_used / 10) + 1;
702
    do_heap_free = (heaps_used * HEAP_OBJ_LIMIT) * 0.65;
703
    free_min = (heaps_used * HEAP_OBJ_LIMIT)  * 0.2;
704
    if (free_min < FREE_MIN) free_min = FREE_MIN;
705
    if (free_min > (heaps_used * HEAP_OBJ_LIMIT - live)) {
706
	set_heaps_increment(objspace);
707
	heaps_sweep_inc = (heaps_used + heaps_sweep_inc) / heaps_sweep_inc + 1;
708
    }
709

  
710
    // GC_PROF_TIMER_STOP; // TODO
711
    if (GC_NOTIFY) printf("end garbage_collect()\n");
712
}
713

  
714
static int
715
garbage_collect_force_with_gvl(rb_objspace_t *objspace)
716
{
717
    if (malloc_increase > malloc_limit) {
718
	malloc_limit += (malloc_increase - malloc_limit) * (double)live / (heaps_used * HEAP_OBJ_LIMIT);
719
	if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT;
720
    }
721
    malloc_increase = 0;
722
    gc_marks(objspace, GET_THREAD());
723
    return garbage_collect_with_gvl(objspace);
724
}
725

  
726
static int
727
garbage_collect_lazy(rb_objspace_t *objspace)
728
{
729
    struct gc_list *list;
730
    rb_thread_t *th = GET_THREAD();
731
    INIT_GC_PROF_PARAMS;
732

  
733
    if (GC_NOTIFY) printf("start garbage_collect()\n");
734

  
735
    if (!heaps) {
736
	return FALSE;
737
    }
738

  
739
    if (dont_gc || during_gc) {
740
	if (!freelist) {
741
            if (!heaps_increment(objspace)) {
742
                set_heaps_increment(objspace);
743
                heaps_increment(objspace);
744
            }
745
	}
746
	return TRUE;
747
    }
748
    during_gc++;
749
    objspace->count++;
750

  
751
    GC_PROF_TIMER_START;
752
    GC_PROF_MARK_TIMER_START;
753
    SET_STACK_END;
754

  
755
    while (!gc_lazy_sweep(objspace, th)) {
756
        gc_marks(objspace, th);
757
    }
758

  
759
    GC_PROF_MARK_TIMER_STOP;
760
    GC_PROF_SET_HEAP_INFO; // TODO : correct?
761
    GC_PROF_TIMER_STOP;
762
    during_gc = 0;
763

  
764
    if (GC_NOTIFY) printf("end garbage_collect()\n");
765
    return TRUE;
766
}
767

  
768
static void
769
gc_finalize_deferred_lazy(rb_objspace_t *objspace)
770
{
771
    finalize_deferred(objspace);
772
}
773

  
774
static void
775
rb_gc_lazy(void)
776
{
777
    rb_objspace_t *objspace = &rb_objspace;
778
    gc_finalize_deferred(objspace);
779
    garbage_collect(objspace);
780
}
781

  
782
REGISTER_GC(lazy);
gc_default.c (revision 0)
1
/**********************************************************************
2

  
3
  gc_default.c -
4

  
5
  $Author: $
6
  created at: 
7

  
8
  Copyright (C) 1993-2007 Yukihiro Matsumoto
9
  Copyright (C) 2000  Network Applied Communication Laboratory, Inc.
10
  Copyright (C) 2000  Information-technology Promotion Agency, Japan
11

  
12
**********************************************************************/
13

  
14
#include "gc_core.h"
15

  
16
static void *
17
vm_xmalloc_default(rb_objspace_t *objspace, size_t size)
18
{
19
    void *mem;
20

  
21
    if ((ssize_t)size < 0) {
22
	negative_size_allocation_error("negative allocation size (or too big)");
23
    }
24
    if (size == 0) size = 1;
25

  
26
#if CALC_EXACT_MALLOC_SIZE
27
    size += sizeof(size_t);
28
#endif
29

  
30
    if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
31
	(malloc_increase+size) > malloc_limit) {
32
	garbage_collect_with_gvl(objspace);
33
    }
34
    mem = malloc(size);
35
    if (!mem) {
36
	if (garbage_collect_with_gvl(objspace)) {
37
	    mem = malloc(size);
38
	}
39
	if (!mem) {
40
	    ruby_memerror();
41
	}
42
    }
43
    malloc_increase += size;
44

  
45
#if CALC_EXACT_MALLOC_SIZE
46
    objspace->malloc_params.allocated_size += size;
47
    objspace->malloc_params.allocations++;
48
    ((size_t *)mem)[0] = size;
49
    mem = (size_t *)mem + 1;
50
#endif
51

  
52
    return mem;
53
}
54

  
55
static void *
56
vm_xrealloc_default(rb_objspace_t *objspace, void *ptr, size_t size)
57
{
58
    void *mem;
59

  
60
    if ((ssize_t)size < 0) {
61
	negative_size_allocation_error("negative re-allocation size");
62
    }
63
    if (!ptr) return vm_xmalloc(objspace, size);
64
    if (size == 0) {
65
	vm_xfree(objspace, ptr);
66
	return 0;
67
    }
68
    if (ruby_gc_stress && !ruby_disable_gc_stress)
69
	garbage_collect_with_gvl(objspace);
70

  
71
#if CALC_EXACT_MALLOC_SIZE
72
    size += sizeof(size_t);
73
    objspace->malloc_params.allocated_size -= size;
74
    ptr = (size_t *)ptr - 1;
75
#endif
76

  
77
    mem = realloc(ptr, size);
78
    if (!mem) {
79
	if (garbage_collect_with_gvl(objspace)) {
80
	    mem = realloc(ptr, size);
81
	}
82
	if (!mem) {
83
	    ruby_memerror();
84
        }
85
    }
86
    malloc_increase += size;
87

  
88
#if CALC_EXACT_MALLOC_SIZE
89
    objspace->malloc_params.allocated_size += size;
90
    ((size_t *)mem)[0] = size;
91
    mem = (size_t *)mem + 1;
92
#endif
93

  
94
    return mem;
95
}
96

  
97
static void
98
assign_heap_slot_default(rb_objspace_t *objspace)
99
{
100
    RVALUE *p, *pend, *membase;
101
    size_t hi, lo, mid;
102
    size_t objs;
103

  
104
    objs = HEAP_OBJ_LIMIT;
105
    p = (RVALUE*)malloc(HEAP_SIZE);
106

  
107
    if (p == 0) {
108
	during_gc = 0;
109
	rb_memerror();
110
    }
111

  
112
    membase = p;
113
    if ((VALUE)p % sizeof(RVALUE) != 0) {
114
	p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
115
	if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
116
	    objs--;
117
	}
118
    }
119

  
120
    lo = 0;
121
    hi = heaps_used;
122
    while (lo < hi) {
123
	register RVALUE *mid_membase;
124
	mid = (lo + hi) / 2;
125
	mid_membase = heaps[mid].membase;
126
	if (mid_membase < membase) {
127
	    lo = mid + 1;
128
	}
129
	else if (mid_membase > membase) {
130
	    hi = mid;
131
	}
132
	else {
133
	    rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
134
	}
135
    }
136
    if (hi < heaps_used) {
137
	MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
138
    }
139
    heaps[hi].membase = membase;
140
    heaps[hi].slot = p;
141
    heaps[hi].limit = objs;
142
    pend = p + objs;
143
    if (lomem == 0 || lomem > p) lomem = p;
144
    if (himem < pend) himem = pend;
145
    heaps_used++;
146

  
147
    while (p < pend) {
148
	p->as.free.flags = 0;
149
	p->as.free.next = freelist;
150
	freelist = p;
151
	p++;
152
    }
153
}
154

  
155
static VALUE
156
rb_newobj_from_heap_default(rb_objspace_t *objspace)
157
{
158
    VALUE obj;
159

  
160
    if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
161
	if (!heaps_increment(objspace) && !garbage_collect(objspace)) {
162
	    during_gc = 0;
163
	    rb_memerror();
164
	}
165
    }
166

  
167
    obj = (VALUE)freelist;
168
    freelist = freelist->as.free.next;
169

  
170
    MEMZERO((void*)obj, RVALUE, 1);
171
#ifdef GC_DEBUG
172
    RANY(obj)->file = rb_sourcefile();
173
    RANY(obj)->line = rb_sourceline();
174
#endif
175

  
176
    return obj;
177
}
178

  
179
static void
180
gc_mark_default(rb_objspace_t *objspace, VALUE ptr, int lev)
181
{
182
    register RVALUE *obj;
183

  
184
    obj = RANY(ptr);
185
    if (rb_special_const_p(ptr)) return; /* special const not marked */
186
    if (obj->as.basic.flags == 0) return;       /* free cell */
187
    if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
188
    obj->as.basic.flags |= FL_MARK;
189

  
190
    if (lev > GC_LEVEL_MAX || (lev == 0 && stack_check())) {
191
	if (!mark_stack_overflow) {
192
	    if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
193
		*mark_stack_ptr = ptr;
194
		mark_stack_ptr++;
195
	    }
196
	    else {
197
		mark_stack_overflow = 1;
198
	    }
199
	}
200
	return;
201
    }
202
    gc_mark_children(objspace, ptr, lev+1);
203
}
204

  
205
static void
206
gc_mark_children_default(rb_objspace_t *objspace, VALUE ptr, int lev)
207
{
208
    register RVALUE *obj = RANY(ptr);
209

  
210
    goto marking;		/* skip */
211

  
212
  again:
213
    obj = RANY(ptr);
214
    if (rb_special_const_p(ptr)) return; /* special const not marked */
215
    if (obj->as.basic.flags == 0) return;       /* free cell */
216
    if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
217
    obj->as.basic.flags |= FL_MARK;
218

  
219
  marking:
220
    if (FL_TEST(obj, FL_EXIVAR)) {
221
	rb_mark_generic_ivar(ptr);
222
    }
223

  
224
    switch (BUILTIN_TYPE(obj)) {
225
      case T_NIL:
226
      case T_FIXNUM:
227
	rb_bug("rb_gc_mark() called for broken object");
228
	break;
229

  
230
      case T_NODE:
231
	switch (nd_type(obj)) {
232
	  case NODE_IF:		/* 1,2,3 */
233
	  case NODE_FOR:
234
	  case NODE_ITER:
235
	  case NODE_WHEN:
236
	  case NODE_MASGN:
237
	  case NODE_RESCUE:
238
	  case NODE_RESBODY:
239
	  case NODE_CLASS:
240
	  case NODE_BLOCK_PASS:
241
	    gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
242
	    /* fall through */
243
	  case NODE_BLOCK:	/* 1,3 */
244
	  case NODE_OPTBLOCK:
245
	  case NODE_ARRAY:
246
	  case NODE_DSTR:
247
	  case NODE_DXSTR:
248
	  case NODE_DREGX:
249
	  case NODE_DREGX_ONCE:
250
	  case NODE_ENSURE:
251
	  case NODE_CALL:
252
	  case NODE_DEFS:
253
	  case NODE_OP_ASGN1:
254
	  case NODE_ARGS:
255
	    gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
256
	    /* fall through */
257
	  case NODE_SUPER:	/* 3 */
258
	  case NODE_FCALL:
259
	  case NODE_DEFN:
260
	  case NODE_ARGS_AUX:
261
	    ptr = (VALUE)obj->as.node.u3.node;
262
	    goto again;
263

  
264
	  case NODE_WHILE:
265
	  case NODE_UNTIL:
266
	  case NODE_AND:
267
	  case NODE_OR:
268
	  case NODE_CASE:
269
	  case NODE_SCLASS:
270
	  case NODE_DOT2:
271
	  case NODE_DOT3:
272
	  case NODE_FLIP2:
273
	  case NODE_FLIP3:
274
	  case NODE_MATCH2:
275
	  case NODE_MATCH3:
276
	  case NODE_OP_ASGN_OR:
277
	  case NODE_OP_ASGN_AND:
278
	  case NODE_MODULE:
279
	  case NODE_ALIAS:
280
	  case NODE_VALIAS:
281
	  case NODE_ARGSCAT:
282
	    gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
283
	    /* fall through */
284
	  case NODE_GASGN:
285
	  case NODE_LASGN:
286
	  case NODE_DASGN:
287
	  case NODE_DASGN_CURR:
288
	  case NODE_IASGN:
289
	  case NODE_IASGN2:
290
	  case NODE_CVASGN:
291
	  case NODE_COLON3:
292
	  case NODE_OPT_N:
293
	  case NODE_EVSTR:
294
	  case NODE_UNDEF:
295
	  case NODE_POSTEXE:
296
	    ptr = (VALUE)obj->as.node.u2.node;
297
	    goto again;
298

  
299
	  case NODE_HASH:	/* 1 */
300
	  case NODE_LIT:
301
	  case NODE_STR:
302
	  case NODE_XSTR:
303
	  case NODE_DEFINED:
304
	  case NODE_MATCH:
305
	  case NODE_RETURN:
306
	  case NODE_BREAK:
307
	  case NODE_NEXT:
308
	  case NODE_YIELD:
309
	  case NODE_COLON2:
310
	  case NODE_SPLAT:
311
	  case NODE_TO_ARY:
312
	    ptr = (VALUE)obj->as.node.u1.node;
313
	    goto again;
314

  
315
	  case NODE_SCOPE:	/* 2,3 */
316
	  case NODE_CDECL:
317
	  case NODE_OPT_ARG:
318
	    gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
319
	    ptr = (VALUE)obj->as.node.u2.node;
320
	    goto again;
321

  
322
	  case NODE_ZARRAY:	/* - */
323
	  case NODE_ZSUPER:
324
	  case NODE_VCALL:
325
	  case NODE_GVAR:
326
	  case NODE_LVAR:
327
	  case NODE_DVAR:
328
	  case NODE_IVAR:
329
	  case NODE_CVAR:
330
	  case NODE_NTH_REF:
331
	  case NODE_BACK_REF:
332
	  case NODE_REDO:
333
	  case NODE_RETRY:
334
	  case NODE_SELF:
335
	  case NODE_NIL:
336
	  case NODE_TRUE:
337
	  case NODE_FALSE:
338
	  case NODE_ERRINFO:
339
	  case NODE_BLOCK_ARG:
340
	    break;
341
	  case NODE_ALLOCA:
342
	    mark_locations_array(objspace,
343
				 (VALUE*)obj->as.node.u1.value,
344
				 obj->as.node.u3.cnt);
345
	    ptr = (VALUE)obj->as.node.u2.node;
346
	    goto again;
347

  
348
	  default:		/* unlisted NODE */
349
	    if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
350
		gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
351
	    }
352
	    if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
353
		gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
354
	    }
355
	    if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
356
		gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
357
	    }
358
	}
359
	return;			/* no need to mark class. */
360
    }
361

  
362
    gc_mark(objspace, obj->as.basic.klass, lev);
363
    switch (BUILTIN_TYPE(obj)) {
364
      case T_ICLASS:
365
      case T_CLASS:
366
      case T_MODULE:
367
	mark_m_tbl(objspace, RCLASS_M_TBL(obj), lev);
368
	mark_tbl(objspace, RCLASS_IV_TBL(obj), lev);
369
	ptr = RCLASS_SUPER(obj);
370
	goto again;
371

  
372
      case T_ARRAY:
373
	if (FL_TEST(obj, ELTS_SHARED)) {
374
	    ptr = obj->as.array.as.heap.aux.shared;
375
	    goto again;
376
	}
377
	else {
378
	    long i, len = RARRAY_LEN(obj);
379
	    VALUE *ptr = RARRAY_PTR(obj);
380
	    for (i=0; i < len; i++) {
381
		gc_mark(objspace, *ptr++, lev);
382
	    }
383
	}
384
	break;
385

  
386
      case T_HASH:
387
	mark_hash(objspace, obj->as.hash.ntbl, lev);
388
	ptr = obj->as.hash.ifnone;
389
	goto again;
390

  
391
      case T_STRING:
392
#define STR_ASSOC FL_USER3   /* copied from string.c */
393
	if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
394
	    ptr = obj->as.string.as.heap.aux.shared;
395
	    goto again;
396
	}
397
	break;
398

  
399
      case T_DATA:
400
	if (RTYPEDDATA_P(obj)) {
401
	    if (obj->as.typeddata.type->dmark) (*obj->as.typeddata.type->dmark)(DATA_PTR(obj));
402
	}
403
	else {
404
	    if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
405
	}
406
	break;
407

  
408
      case T_OBJECT:
409
        {
410
            long i, len = ROBJECT_NUMIV(obj);
411
	    VALUE *ptr = ROBJECT_IVPTR(obj);
412
            for (i  = 0; i < len; i++) {
413
		gc_mark(objspace, *ptr++, lev);
414
            }
415
        }
416
	break;
417

  
418
      case T_FILE:
419
        if (obj->as.file.fptr) {
420
            gc_mark(objspace, obj->as.file.fptr->pathv, lev);
421
            gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev);
422
            gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat, lev);
423
            gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts, lev);
424
            gc_mark(objspace, obj->as.file.fptr->encs.ecopts, lev);
425
            gc_mark(objspace, obj->as.file.fptr->write_lock, lev);
426
        }
427
        break;
428

  
429
      case T_REGEXP:
430
        gc_mark(objspace, obj->as.regexp.src, lev);
431
        break;
432

  
433
      case T_FLOAT:
434
      case T_BIGNUM:
435
      case T_ZOMBIE:
436
	break;
437

  
438
      case T_MATCH:
439
	gc_mark(objspace, obj->as.match.regexp, lev);
440
	if (obj->as.match.str) {
441
	    ptr = obj->as.match.str;
442
	    goto again;
443
	}
444
	break;
445

  
446
      case T_RATIONAL:
447
	gc_mark(objspace, obj->as.rational.num, lev);
448
	gc_mark(objspace, obj->as.rational.den, lev);
449
	break;
450

  
451
      case T_COMPLEX:
452
	gc_mark(objspace, obj->as.complex.real, lev);
453
	gc_mark(objspace, obj->as.complex.imag, lev);
454
	break;
455

  
456
      case T_STRUCT:
457
	{
458
	    long len = RSTRUCT_LEN(obj);
459
	    VALUE *ptr = RSTRUCT_PTR(obj);
460

  
461
	    while (len--) {
462
		gc_mark(objspace, *ptr++, lev);
463
	    }
464
	}
465
	break;
466

  
467
      default:
468
	rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
469
	       BUILTIN_TYPE(obj), (void *)obj,
470
	       is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
471
    }
472
}
473

  
474
#ifdef NOSELECT_GC
475
void
476
rb_gc_force_recycle(VALUE p)
477
#else
478
static void
479
rb_gc_force_recycle_default(VALUE p)
480
#endif
481
{
482
    rb_objspace_t *objspace = &rb_objspace;
483
    add_freelist(objspace, (RVALUE *)p);
484
}
485

  
486
static void
487
gc_finalize_deferred_default(rb_objspace_t *objspace)
488
{
489
    finalize_deferred(objspace);
490
    free_unused_heaps(objspace);
491
}
492

  
493
#ifdef NOSELECT_GC
494
void
495
rb_gc(void)
496
#else
497
static void
498
rb_gc_default(void)
499
#endif
500
{
501
    rb_objspace_t *objspace = &rb_objspace;
502
    garbage_collect(objspace);
503
    gc_finalize_deferred(objspace);
504
}
505

  
506
static int
507
garbage_collect_default(rb_objspace_t *objspace)
508
{
509
    struct gc_list *list;
510
    rb_thread_t *th = GET_THREAD();
511
    INIT_GC_PROF_PARAMS;
512

  
513
    if (GC_NOTIFY) printf("start garbage_collect()\n");
514

  
515
    if (!heaps) {
516
	return FALSE;
517
    }
518

  
519
    if (dont_gc || during_gc) {
520
	if (!freelist) {
521
            if (!heaps_increment(objspace)) {
522
                set_heaps_increment(objspace);
523
                heaps_increment(objspace);
524
            }
525
	}
526
	return TRUE;
527
    }
528
    during_gc++;
529
    objspace->count++;
530

  
531
    GC_PROF_TIMER_START;
532
    GC_PROF_MARK_TIMER_START;
533
    SET_STACK_END;
534

  
535
    init_mark_stack(objspace);
536

  
537
    th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
538

  
539
    if (finalizer_table) {
540
	mark_tbl(objspace, finalizer_table, 0);
541
    }
542

  
543
    mark_current_machine_context(objspace, th);
544

  
545
    rb_gc_mark_threads();
546
    rb_gc_mark_symbols();
547
    rb_gc_mark_encodings();
548

  
549
    /* mark protected global variables */
550
    for (list = global_List; list; list = list->next) {
551
	rb_gc_mark_maybe(*list->varptr);
552
    }
553
    rb_mark_end_proc();
554
    rb_gc_mark_global_tbl();
555

  
556
    mark_tbl(objspace, rb_class_tbl, 0);
557

  
558
    /* mark generic instance variables for special constants */
559
    rb_mark_generic_ivar_tbl();
560

  
561
    rb_gc_mark_parser();
562

  
563
    /* gc_mark objects whose marking are not completed*/
564
    while (!MARK_STACK_EMPTY) {
565
	if (mark_stack_overflow) {
566
	    gc_mark_all(objspace);
567
	}
568
	else {
569
	    gc_mark_rest(objspace);
570
	}
571
    }
572
    GC_PROF_MARK_TIMER_STOP;
573

  
574
    GC_PROF_SWEEP_TIMER_START;
575
    gc_sweep(objspace);
576
    GC_PROF_SWEEP_TIMER_STOP;
577

  
578
    GC_PROF_TIMER_STOP;
579
    if (GC_NOTIFY) printf("end garbage_collect()\n");
580
    return TRUE;
581
}
582

  
583
REGISTER_GC(default);
gc.c (working copy)
276 276
#pragma pack(pop)
277 277
#endif
278 278

  
279
enum slot_color {
280
    WHITE = 0x00,  /* garbage */
281
    BLACK = 0x01,  /* used */
282
    GRAY  = 0x02,  /* not sweep */
283
};
284

  
279 285
struct heaps_slot {
280 286
    void *membase;
281 287
    RVALUE *slot;
282 288
    size_t limit;
289
    enum slot_color color;
283 290
};
284 291

  
285 292
#define HEAP_MIN_SLOTS 10000
......
309 316
	RVALUE *freelist;
310 317
	RVALUE *range[2];
311 318
	RVALUE *freed;
319
	size_t live;
320
	size_t dead;
321
	size_t do_heap_free;
322
	size_t sweep_index;
323
	size_t sweep_increment;
312 324
    } heap;
313 325
    struct {
314 326
	int dont_gc;
315 327
	int during_gc;
328
	int during_sweep;
316 329
    } flags;
317 330
    struct {
318 331
	st_table *table;
......
429 442
extern VALUE rb_cMutex;
430 443
extern st_table *rb_class_tbl;
431 444

  
445
typedef struct gc_algorithm {
446
    const char *name;
447
    void *(*vm_xmalloc)(rb_objspace_t *objspace, size_t size);
448
    void *(*vm_xrealloc)(rb_objspace_t *objspace, void *ptr, size_t size);
449
    void (*assign_heap_slot)(rb_objspace_t *objspace);
450
    VALUE (*rb_newobj_from_heap)(rb_objspace_t *objspace);
451
    void (*gc_mark)(rb_objspace_t *objspace, VALUE ptr, int lev);
452
    void (*gc_mark_children)(rb_objspace_t *objspace, VALUE ptr, int lev);
453
    int (*garbage_collect)(rb_objspace_t *objspace);
454
    void (*gc_finalize_deferred)(rb_objspace_t *objspace);
455

  
456
    void (*rb_gc_force_recycle)(VALUE p);
457
    void (*rb_gc)(void);
458
} gc_algorithm_t;
459

  
460
gc_algorithm_t *current_gc_algorithm;
461

  
462
#ifdef NOSELECT_GC
463

  
464
static void * vm_xmalloc(rb_objspace_t *objspace, size_t size);
465
static void *vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size);
466
static void assign_heap_slot(rb_objspace_t *objspace);
467
static VALUE rb_newobj_from_heap(rb_objspace_t *objspace);
468
static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev);
469
static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev);
470
static int garbage_collect(rb_objspace_t *objspace);
471
static void gc_finalize_deferred(rb_objspace_t *objspace);
472

  
473
#define vm_xmalloc_default(objspace, size) \
474
  vm_xmalloc(objspace, size)
475
#define vm_xrealloc_default(objspace, ptr, size) \
476
  vm_xrealloc(objspace, ptr, size)
477
#define assign_heap_slot_default(objspace) \
478
  assign_heap_slot(objspace)
479
#define rb_newobj_from_heap_default(objspace) \
480
  rb_newobj_from_heap(objspace)
481
#define gc_mark_default(objspace, ptr, lev) \
482
  gc_mark(objspace, ptr, lev)
483
#define gc_mark_children_default(objspace, ptr, lev) \
484
  gc_mark_children(objspace, ptr, lev)
485
#define garbage_collect_default(objspace) \
486
  garbage_collect(objspace)
487
#define gc_finalize_deferred_default(objspace) \
488
  gc_finalize_deferred(objspace)
489
#define REGISTER_GC(name)
490

  
491
#else /* NOSELECT_GC */
492

  
493
#define vm_xmalloc(objspace, size) \
494
  current_gc_algorithm->vm_xmalloc(objspace, size)
495
#define vm_xrealloc(objspace, ptr, size) \
496
  current_gc_algorithm->vm_xrealloc(objspace, ptr, size)
497
#define assign_heap_slot(objspace) \
498
  current_gc_algorithm->assign_heap_slot(objspace)
499
#define rb_newobj_from_heap(objspace) \
500
  current_gc_algorithm->rb_newobj_from_heap(objspace)
501
#define gc_mark(objspace, ptr, lev) \
502
  current_gc_algorithm->gc_mark(objspace, ptr, lev)
503
#define gc_mark_children(objspace, ptr, lev) \
504
  current_gc_algorithm->gc_mark_children(objspace, ptr, lev)
505
#define garbage_collect(objspace) \
506
  current_gc_algorithm->garbage_collect(objspace)
507
#define gc_finalize_deferred(objspace) \
508
  current_gc_algorithm->gc_finalize_deferred(objspace)
509

  
510
void
511
rb_gc_force_recycle(VALUE p)
512
{
513
    current_gc_algorithm->rb_gc_force_recycle(p);
514
}
515

  
516
void
517
rb_gc(void)
518
{
519
    current_gc_algorithm->rb_gc();
520
}
521

  
522
#define REGISTER_GC(name) \
523
  gc_algorithm_t gc_algorithm_##name = { \
524
    #name, \
525
    vm_xmalloc_##name, \
526
    vm_xrealloc_##name, \
527
    assign_heap_slot_##name, \
... This diff was truncated because it exceeds the maximum size that can be displayed.