Project

General

Profile

Bug #14858 ยป transient_heap.patch

ko1 (Koichi Sasada), 06/21/2018 03:11 AM

View differences:

array.c (working copy)
18 18
#include "probes.h"
19 19
#include "id.h"
20 20
#include "debug_counter.h"
21
#include "gc.h"
22

  
23
// #define ARRAY_DEBUG
21 24

  
22 25
#ifndef ARRAY_DEBUG
23 26
# define NDEBUG
......
53 56
#define FL_SET_EMBED(a) do { \
54 57
    assert(!ARY_SHARED_P(a)); \
55 58
    FL_SET((a), RARRAY_EMBED_FLAG); \
59
    FL_UNSET_RAW((a), RARRAY_TRANSIENT_FLAG); \
60
    ary_verify(a); \
56 61
} while (0)
57 62
#define FL_UNSET_EMBED(ary) FL_UNSET((ary), RARRAY_EMBED_FLAG|RARRAY_EMBED_LEN_MASK)
58 63
#define FL_SET_SHARED(ary) do { \
......
130 135
} while (0)
131 136
#define FL_SET_SHARED_ROOT(ary) do { \
132 137
    assert(!ARY_EMBED_P(ary)); \
138
    assert(!ARY_TRANSIENT_P(ary)); \
133 139
    FL_SET((ary), RARRAY_SHARED_ROOT_FLAG); \
134 140
} while (0)
135 141

  
136 142
#define ARY_SET(a, i, v) RARRAY_ASET((assert(!ARY_SHARED_P(a)), (a)), (i), (v))
137 143

  
144

  
145
#ifdef ARRAY_DEBUG
146
#define ary_verify(ary) ary_verify_(ary, __FILE__, __LINE__)
147

  
148
static void
149
ary_verify_(VALUE ary, const char *file, int line)
150
{
151
    if (FL_TEST(ary, ELTS_SHARED)) {
152
        VALUE root = RARRAY(ary)->as.heap.aux.shared;
153
        const VALUE *ptr = RARRAY_CONST_PTR(ary);
154
        const VALUE *root_ptr = RARRAY_CONST_PTR(root);
155
        long len = RARRAY_LEN(ary), root_len = RARRAY_LEN(root);
156
        assert(FL_TEST(root, RARRAY_SHARED_ROOT_FLAG));
157
        assert(root_ptr <= ptr && ptr + len <= root_ptr + root_len);
158
        ary_verify(root);
159
    }
160
    else if (ARY_EMBED_P(ary)) {
161
        assert(!ARY_TRANSIENT_P(ary));
162
        assert(!ARY_SHARED_P(ary));
163
        assert(RARRAY_LEN(ary) <= RARRAY_EMBED_LEN_MAX);
164
    }
165
    else {
166
#if 0
167
        const VALUE *ptr = RARRAY_CONST_PTR(ary);
168
        long i, len = RARRAY_LEN(ary);
169
        volatile VALUE v;
170
        for (i=0; i<len; i++) {
171
            v = ptr[i]; // access check
172
        }
173
#endif
174
    }
175
}
176
#else
177
#define ary_verify(ary) ((void)0)
178
#endif
179

  
138 180
void
139 181
rb_mem_clear(register VALUE *mem, register long size)
140 182
{
......
201 243
    ary_memcpy0(ary, beg, argc, argv, ary);
202 244
}
203 245

  
246
void *rb_transient_heap_alloc(VALUE obj, size_t size);
247

  
248
static VALUE *
249
ary_heap_alloc(VALUE ary, size_t capa)
250
{
251
    VALUE *ptr = rb_transient_heap_alloc(ary, sizeof(VALUE) * capa);
252

  
253
    if (ptr != NULL) {
254
        FL_SET_RAW(ary, RARRAY_TRANSIENT_FLAG);
255
        //fprintf(stderr, "ary:%p (%p) is TRANSIENT.\n", (void *)ary, ptr);
256
    }
257
    else {
258
        FL_UNSET_RAW(ary, RARRAY_TRANSIENT_FLAG);
259
        ptr = ALLOC_N(VALUE, capa);
260
        //fprintf(stderr, "ary:%p (%p) is not TRANSIENT.\n", (void *)ary, ptr);
261
    }
262

  
263
    return ptr;
264
}
265

  
266
static void
267
ary_heap_free_ptr(VALUE ary, const VALUE *ptr, long size)
268
{
269
    if (ARY_TRANSIENT_P(ary)) {
270
        /* ignore it */
271
        // fprintf(stderr, "ary_heap_free: %p is transient.\n", (void *)ary);
272
    }
273
    else {
274
        // fprintf(stderr, "ary_heap_free: %p is freed.\n", (void *)ary);
275
        ruby_sized_xfree((void *)ptr, size);
276
    }
277
}
278

  
279
static void
280
ary_heap_free(VALUE ary)
281
{
282
    // fprintf(stderr, "ary_heap_free: %p\n", (void *)ary);
283
    if (ARY_TRANSIENT_P(ary)) {
284
        /* ignore */
285
    }
286
    else {
287
        ary_heap_free_ptr(ary, ARY_HEAP_PTR(ary), ARY_HEAP_SIZE(ary));
288
    }
289
}
290

  
291
static void
292
ary_heap_realloc(VALUE ary, size_t new_capa)
293
{
294
    size_t old_capa = RARRAY(ary)->as.heap.aux.capa;
295

  
296
    if (ARY_TRANSIENT_P(ary)) {
297
        if (new_capa <= old_capa) {
298
            /* do nothing */
299
        }
300
        else {
301
            VALUE *new_ptr = rb_transient_heap_alloc(ary, sizeof(VALUE) * new_capa);
302

  
303
            if (new_ptr == NULL) {
304
                new_ptr = ALLOC_N(VALUE, new_capa);
305
                FL_UNSET_RAW(ary, RARRAY_TRANSIENT_FLAG);
306
            }
307

  
308
            MEMCPY(new_ptr, ARY_HEAP_PTR(ary), VALUE, old_capa);
309
            ARY_SET_PTR(ary, new_ptr);
310
        }
311
    }
312
    else {
313
        SIZED_REALLOC_N(RARRAY(ary)->as.heap.ptr, VALUE, new_capa, old_capa);
314
    }
315
}
316

  
317
void
318
rb_ary_detransient(VALUE ary)
319
{
320
    // fprintf(stderr, "rb_ary_detransient:\n");
321
    // fprintf(stderr, "(1) %s\n", rb_obj_info(ary));
322

  
323
    if (ARY_TRANSIENT_P(ary)) {
324
        VALUE *new_ptr;
325
        long capa = RARRAY(ary)->as.heap.aux.capa;
326
        long len  = RARRAY(ary)->as.heap.len;
327

  
328
        assert(ARY_OWNS_HEAP_P(ary));
329
        assert(ARY_TRANSIENT_P(ary));
330

  
331
        if (ARY_SHARED_ROOT_P(ary)) {
332
            capa = len;
333
        }
334

  
335
        new_ptr = ALLOC_N(VALUE, capa);
336
        MEMCPY(new_ptr, ARY_HEAP_PTR(ary), VALUE, capa);
337
        RARRAY(ary)->as.heap.ptr = new_ptr;
338
        /* do not use ARY_SET_PTR() because they assert !frozen */
339
        FL_UNSET_RAW(ary, RARRAY_TRANSIENT_FLAG);
340

  
341
        // fprintf(stderr, "(2) %s\n", rb_obj_info(ary));
342
    }
343
}
344

  
204 345
static void
205 346
ary_resize_capa(VALUE ary, long capacity)
206 347
{
207 348
    assert(RARRAY_LEN(ary) <= capacity);
208 349
    assert(!OBJ_FROZEN(ary));
209 350
    assert(!ARY_SHARED_P(ary));
351

  
352
    // fprintf(stderr, "ary_resize_capa (%ld): %s\n", capacity, rb_obj_info(ary));
353

  
210 354
    if (capacity > RARRAY_EMBED_LEN_MAX) {
211 355
        if (ARY_EMBED_P(ary)) {
212 356
            long len = ARY_EMBED_LEN(ary);
213
            VALUE *ptr = ALLOC_N(VALUE, (capacity));
357
            VALUE *ptr = ary_heap_alloc(ary, capacity);
358

  
214 359
            MEMCPY(ptr, ARY_EMBED_PTR(ary), VALUE, len);
215 360
            FL_UNSET_EMBED(ary);
216 361
            ARY_SET_PTR(ary, ptr);
217 362
            ARY_SET_HEAP_LEN(ary, len);
218 363
        }
219 364
        else {
220
	    SIZED_REALLOC_N(RARRAY(ary)->as.heap.ptr, VALUE, capacity, RARRAY(ary)->as.heap.aux.capa);
365
            // fprintf(stderr, "ary_resize_capa %s\n", rb_obj_info(ary));
366
            ary_heap_realloc(ary, capacity);
221 367
        }
222
        ARY_SET_CAPA(ary, (capacity));
368
        ARY_SET_CAPA(ary, capacity);
369
        // fprintf(stderr, "-> ary_resize_capa: %s\n", rb_obj_info(ary));
370

  
371
        // fprintf(stderr, "ary_resize_capa %p len:%ld capa:%ld - %s\n", (void *)ary, RARRAY_LEN(ary), capacity, rb_obj_info(ary));
223 372
    }
224 373
    else {
225 374
        if (!ARY_EMBED_P(ary)) {
226 375
            long len = RARRAY_LEN(ary);
376
            long old_capa = RARRAY(ary)->as.heap.aux.capa;
227 377
	    const VALUE *ptr = RARRAY_CONST_PTR(ary);
228

  
229 378
	    if (len > capacity) len = capacity;
230 379
            MEMCPY((VALUE *)RARRAY(ary)->as.ary, ptr, VALUE, len);
380
            ary_heap_free_ptr(ary, ptr, old_capa);
381

  
231 382
            FL_SET_EMBED(ary);
232 383
            ARY_SET_LEN(ary, len);
233
            ruby_sized_xfree((VALUE *)ptr, RARRAY(ary)->as.heap.aux.capa);
384

  
385
            // fprintf(stderr, "ary_resize_capa: heap->embed %p len:%ld\n", (void *)ary, len);
234 386
        }
235 387
    }
388

  
389
    ary_verify(ary);
236 390
}
237 391

  
238 392
static inline void
......
242 396
    long old_capa = RARRAY(ary)->as.heap.aux.capa;
243 397
    assert(!ARY_SHARED_P(ary));
244 398
    assert(old_capa >= capacity);
245
    if (old_capa > capacity)
246
        SIZED_REALLOC_N(RARRAY(ary)->as.heap.ptr, VALUE, capacity, old_capa);
399
    if (old_capa > capacity) ary_heap_realloc(ary, capacity);
400

  
401
    ary_verify(ary);
247 402
}
248 403

  
249 404
static void
......
258 413
	new_capa = (ARY_MAX_SIZE - min) / 2;
259 414
    }
260 415
    new_capa += min;
416
    // fprintf(stderr, "ary_double_capa: %p %d\n", (void *)ary, FL_TEST(ary, RARRAY_TRANSIENT_FLAG) ? 1 : 0);
261 417
    ary_resize_capa(ary, new_capa);
418

  
419
    ary_verify(ary);
262 420
}
263 421

  
264 422
static void
......
282 440
    VALUE shared = RARRAY(ary)->as.heap.aux.shared;
283 441
    rb_ary_decrement_share(shared);
284 442
    FL_UNSET_SHARED(ary);
443

  
444
    ary_verify(ary);
285 445
}
286 446

  
287 447
static inline void
......
314 474
rb_ary_modify_check(VALUE ary)
315 475
{
316 476
    rb_check_frozen(ary);
477
    ary_verify(ary);
317 478
}
318 479

  
319 480
void
......
343 504
	    rb_ary_decrement_share(shared);
344 505
	}
345 506
        else {
346
            VALUE *ptr = ALLOC_N(VALUE, len);
507
            VALUE *ptr = ary_heap_alloc(ary, len);
347 508
            MEMCPY(ptr, RARRAY_CONST_PTR(ary), VALUE, len);
348 509
            rb_ary_unshare(ary);
349 510
            ARY_SET_CAPA(ary, len);
......
352 513

  
353 514
	rb_gc_writebarrier_remember(ary);
354 515
    }
516
    ary_verify(ary);
355 517
}
356 518

  
357 519
static VALUE
......
370 532
	    if (ARY_SHARED_OCCUPIED(shared)) {
371 533
		if (RARRAY_CONST_PTR(ary) - RARRAY_CONST_PTR(shared) + new_len <= RARRAY_LEN(shared)) {
372 534
		    rb_ary_modify_check(ary);
535

  
536
                    ary_verify(shared);
373 537
		    return shared;
374 538
		}
375 539
		else {
......
379 543
		    if (new_len > capa - (capa >> 6)) {
380 544
			ary_double_capa(ary, new_len);
381 545
		    }
546

  
547
                    ary_verify(ary);
382 548
		    return ary;
383 549
		}
384 550
	    }
......
393 559
	ary_double_capa(ary, new_len);
394 560
    }
395 561

  
562
    ary_verify(ary);
396 563
    return ary;
397 564
}
398 565

  
......
449 616
    return ary_alloc(klass);
450 617
}
451 618

  
619
void rb_transient_heap_dump(void);
620

  
452 621
static VALUE
453 622
ary_new(VALUE klass, long capa)
454 623
{
......
465 634

  
466 635
    ary = ary_alloc(klass);
467 636
    if (capa > RARRAY_EMBED_LEN_MAX) {
468
	ptr = ALLOC_N(VALUE, capa);
637
        ptr = ary_heap_alloc(ary, capa);
469 638
        FL_UNSET_EMBED(ary);
470 639
        ARY_SET_PTR(ary, ptr);
471 640
        ARY_SET_CAPA(ary, capa);
......
529 698
VALUE
530 699
rb_ary_tmp_new(long capa)
531 700
{
532
    return ary_new(0, capa);
701
    VALUE ary = ary_new(0, capa);
702
    rb_ary_detransient(ary);
703
    return ary;
533 704
}
534 705

  
535 706
VALUE
......
546 717
{
547 718
    if (ARY_OWNS_HEAP_P(ary)) {
548 719
	RB_DEBUG_COUNTER_INC(obj_ary_ptr);
549
	ruby_sized_xfree((void *)ARY_HEAP_PTR(ary), ARY_HEAP_SIZE(ary));
720
        ary_heap_free(ary);
550 721
    }
551 722
    else {
552 723
	RB_DEBUG_COUNTER_INC(obj_ary_embed);
......
569 740
{
570 741
    rb_ary_free(ary);
571 742
    RBASIC(ary)->flags |= RARRAY_EMBED_FLAG;
572
    RBASIC(ary)->flags &= ~RARRAY_EMBED_LEN_MASK;
743
    RBASIC(ary)->flags &= ~(RARRAY_EMBED_LEN_MASK | RARRAY_TRANSIENT_FLAG);
573 744
}
574 745

  
575 746
static VALUE
576 747
ary_make_shared(VALUE ary)
577 748
{
578 749
    assert(!ARY_EMBED_P(ary));
750
    ary_verify(ary);
751

  
579 752
    if (ARY_SHARED_P(ary)) {
580 753
	return ARY_SHARED(ary);
581 754
    }
......
583 756
	return ary;
584 757
    }
585 758
    else if (OBJ_FROZEN(ary)) {
759
        rb_ary_detransient(ary);
586 760
	ary_shrink_capa(ary);
587 761
	FL_SET_SHARED_ROOT(ary);
588 762
	ARY_SET_SHARED_NUM(ary, 1);
......
590 764
    }
591 765
    else {
592 766
	long capa = ARY_CAPA(ary), len = RARRAY_LEN(ary);
767
        const VALUE *ptr;
593 768
	NEWOBJ_OF(shared, struct RArray, 0, T_ARRAY | (RGENGC_WB_PROTECTED_ARRAY ? FL_WB_PROTECTED : 0));
594
        FL_UNSET_EMBED(shared);
595 769

  
770
        rb_ary_detransient(ary);
771
        ptr = ARY_HEAP_PTR(ary);
772

  
773
        FL_UNSET_EMBED(shared);
596 774
	ARY_SET_LEN((VALUE)shared, capa);
597
	ARY_SET_PTR((VALUE)shared, RARRAY_CONST_PTR(ary));
775
	ARY_SET_PTR((VALUE)shared, ptr);
598 776
	ary_mem_clear((VALUE)shared, len, capa - len);
599 777
	FL_SET_SHARED_ROOT(shared);
600 778
	ARY_SET_SHARED_NUM((VALUE)shared, 1);
601 779
	FL_SET_SHARED(ary);
602 780
	ARY_SET_SHARED(ary, (VALUE)shared);
603 781
	OBJ_FREEZE(shared);
782

  
783
        ary_verify(ary);
604 784
	return (VALUE)shared;
605 785
    }
606 786
}
......
736 916
    rb_ary_modify(ary);
737 917
    if (argc == 0) {
738 918
	if (ARY_OWNS_HEAP_P(ary) && RARRAY_CONST_PTR(ary) != 0) {
739
	    ruby_sized_xfree((void *)RARRAY_CONST_PTR(ary), ARY_HEAP_SIZE(ary));
919
            ary_heap_free(ary);
740 920
	}
741 921
        rb_ary_unshare_safe(ary);
742 922
        FL_SET_EMBED(ary);
......
784 964
    return ary;
785 965
}
786 966

  
967
static VALUE
968
ary_initialize_copy(VALUE self, VALUE orig)
969
{
970
    FL_UNSET(self,
971
             FL_USER1 | FL_USER2 | FL_USER3 | /* embed */
972
             FL_USER5 | /* shard */
973
             FL_USER13);
974

  
975
    return rb_ary_replace(self, orig);
976
}
977

  
787 978
/*
788 979
 * Returns a new array populated with the given objects.
789 980
 *
......
858 1049

  
859 1050
        ARY_INCREASE_PTR(result, offset);
860 1051
        ARY_SET_LEN(result, len);
1052

  
1053
        ary_verify(result);
861 1054
        return result;
862 1055
    }
863 1056
}
......
973 1166
    }
974 1167
    --n;
975 1168
    ARY_SET_LEN(ary, n);
1169
    ary_verify(ary);
976 1170
    return RARRAY_AREF(ary, n);
977 1171
}
978 1172

  
......
1006 1200
    rb_ary_modify_check(ary);
1007 1201
    result = ary_take_first_or_last(argc, argv, ary, ARY_TAKE_LAST);
1008 1202
    ARY_INCREASE_LEN(ary, -RARRAY_LEN(result));
1203
    ary_verify(ary);
1009 1204
    return result;
1010 1205
}
1011 1206

  
......
1024 1219
		MEMMOVE(ptr, ptr+1, VALUE, len-1);
1025 1220
	    }); /* WB: no new reference */
1026 1221
            ARY_INCREASE_LEN(ary, -1);
1222
            ary_verify(ary);
1027 1223
	    return top;
1028 1224
	}
1029 1225
        assert(!ARY_EMBED_P(ary)); /* ARY_EMBED_LEN_MAX < ARY_DEFAULT_SIZE */
......
1037 1233
    ARY_INCREASE_PTR(ary, 1);		/* shift ptr */
1038 1234
    ARY_INCREASE_LEN(ary, -1);
1039 1235

  
1236
    ary_verify(ary);
1237

  
1040 1238
    return top;
1041 1239
}
1042 1240

  
......
1096 1294
    }
1097 1295
    ARY_INCREASE_LEN(ary, -n);
1098 1296

  
1297
    ary_verify(ary);
1099 1298
    return result;
1100 1299
}
1101 1300

  
......
1129 1328

  
1130 1329
    /* use shared array for big "queues" */
1131 1330
    if (new_len > ARY_DEFAULT_SIZE * 4) {
1331
        ary_verify(ary);
1332

  
1132 1333
	/* make a room for unshifted items */
1133 1334
	capa = ARY_CAPA(ary);
1134 1335
	ary_make_shared(ary);
......
1146 1347
	}
1147 1348
	ARY_SET_PTR(ary, head - argc);
1148 1349
	assert(ARY_SHARED_OCCUPIED(ARY_SHARED(ary)));
1350

  
1351
        ary_verify(ary);
1149 1352
	return ARY_SHARED(ary);
1150 1353
    }
1151 1354
    else {
......
1154 1357
	    MEMMOVE(ptr + argc, ptr, VALUE, len);
1155 1358
	});
1156 1359

  
1360
        ary_verify(ary);
1157 1361
	return ary;
1158 1362
    }
1159 1363
}
......
1667 1871
    }
1668 1872
    else {
1669 1873
	if (olen > len + ARY_DEFAULT_SIZE) {
1670
	    SIZED_REALLOC_N(RARRAY(ary)->as.heap.ptr, VALUE, len, RARRAY(ary)->as.heap.aux.capa);
1874
            ary_heap_realloc(ary, len);
1671 1875
	    ARY_SET_CAPA(ary, len);
1672 1876
	}
1673 1877
	ARY_SET_HEAP_LEN(ary, len);
1674 1878
    }
1879
    ary_verify(ary);
1675 1880
    return ary;
1676 1881
}
1677 1882

  
......
2496 2701
                    rb_ary_unshare(ary);
2497 2702
                }
2498 2703
                else {
2499
		    ruby_sized_xfree((void *)ARY_HEAP_PTR(ary), ARY_HEAP_SIZE(ary));
2704
                    ary_heap_free(ary);
2500 2705
                }
2501 2706
                ARY_SET_PTR(ary, RARRAY_CONST_PTR(tmp));
2502 2707
                ARY_SET_HEAP_LEN(ary, len);
......
3100 3305
	MEMMOVE(ptr+pos, ptr+pos+1, VALUE, len-pos-1);
3101 3306
    });
3102 3307
    ARY_INCREASE_LEN(ary, -1);
3103

  
3308
    ary_verify(ary);
3104 3309
    return del;
3105 3310
}
3106 3311

  
......
3204 3409

  
3205 3410
    for (i = 0; i < RARRAY_LEN(orig); i++) {
3206 3411
	VALUE v = RARRAY_AREF(orig, i);
3412

  
3207 3413
	if (!RTEST(rb_yield(v))) {
3208 3414
	    rb_ary_push(result, v);
3209 3415
	}
......
3485 3691
        VALUE shared = 0;
3486 3692

  
3487 3693
        if (ARY_OWNS_HEAP_P(copy)) {
3488
	    RARRAY_PTR_USE(copy, ptr, ruby_sized_xfree(ptr, ARY_HEAP_SIZE(copy)));
3694
            ary_heap_free(copy);
3489 3695
	}
3490 3696
        else if (ARY_SHARED_P(copy)) {
3491 3697
            shared = ARY_SHARED(copy);
......
3501 3707
    else {
3502 3708
        VALUE shared = ary_make_shared(orig);
3503 3709
        if (ARY_OWNS_HEAP_P(copy)) {
3504
	    RARRAY_PTR_USE(copy, ptr, ruby_sized_xfree(ptr, ARY_HEAP_SIZE(copy)));
3710
            ary_heap_free(copy);
3505 3711
        }
3506 3712
        else {
3507 3713
            rb_ary_unshare_safe(copy);
......
3528 3734
rb_ary_clear(VALUE ary)
3529 3735
{
3530 3736
    rb_ary_modify_check(ary);
3531
    ARY_SET_LEN(ary, 0);
3532 3737
    if (ARY_SHARED_P(ary)) {
3533 3738
	if (!ARY_EMBED_P(ary)) {
3534 3739
	    rb_ary_unshare(ary);
3535 3740
	    FL_SET_EMBED(ary);
3741
            ARY_SET_EMBED_LEN(ary, 0);
3536 3742
	}
3537 3743
    }
3538
    else if (ARY_DEFAULT_SIZE * 2 < ARY_CAPA(ary)) {
3744
    else {
3745
        ARY_SET_LEN(ary, 0);
3746
        if (ARY_DEFAULT_SIZE * 2 < ARY_CAPA(ary)) {
3539 3747
	ary_resize_capa(ary, ARY_DEFAULT_SIZE * 2);
3540 3748
    }
3749
    }
3541 3750
    return ary;
3542 3751
}
3543 3752

  
......
6248 6457
    rb_define_singleton_method(rb_cArray, "[]", rb_ary_s_create, -1);
6249 6458
    rb_define_singleton_method(rb_cArray, "try_convert", rb_ary_s_try_convert, 1);
6250 6459
    rb_define_method(rb_cArray, "initialize", rb_ary_initialize, -1);
6251
    rb_define_method(rb_cArray, "initialize_copy", rb_ary_replace, 1);
6460
    rb_define_method(rb_cArray, "initialize_copy", ary_initialize_copy, 1);
6252 6461

  
6253 6462
    rb_define_method(rb_cArray, "inspect", rb_ary_inspect, 0);
6254 6463
    rb_define_alias(rb_cArray,  "to_s", "inspect");
common.mk (working copy)
131 131
		thread.$(OBJEXT) \
132 132
		time.$(OBJEXT) \
133 133
		transcode.$(OBJEXT) \
134
		transient_heap.$(OBJEXT) \
134 135
		util.$(OBJEXT) \
135 136
		variable.$(OBJEXT) \
136 137
		version.$(OBJEXT) \
......
2827 2828
transcode.$(OBJEXT): {$(VPATH)}subst.h
2828 2829
transcode.$(OBJEXT): {$(VPATH)}transcode.c
2829 2830
transcode.$(OBJEXT): {$(VPATH)}transcode_data.h
2831
transient_heap.$(OBJEXT): {$(VPATH)}transient_heap.c
2830 2832
util.$(OBJEXT): $(hdrdir)/ruby/ruby.h
2831 2833
util.$(OBJEXT): $(top_srcdir)/include/ruby.h
2832 2834
util.$(OBJEXT): {$(VPATH)}config.h
gc.c (working copy)
837 837
void rb_iseq_mark(const rb_iseq_t *iseq);
838 838
void rb_iseq_free(const rb_iseq_t *iseq);
839 839

  
840
void rb_transient_heap_start_marking(int full_marking);
841
void rb_transient_heap_finish_marking(void);
842
void rb_transient_heap_mark(VALUE obj, const void *ptr);
843
void rb_transient_heap_promote(VALUE obj);
844

  
840 845
void rb_gcdebug_print_obj_condition(VALUE obj);
841 846

  
842 847
static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
......
1188 1193
{
1189 1194
    MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1190 1195
    objspace->rgengc.old_objects++;
1196
    rb_transient_heap_promote(obj);
1191 1197

  
1192 1198
#if RGENGC_PROFILE >= 2
1193 1199
    objspace->profile.total_promoted_count++;
......
4606 4612

  
4607 4613
      case T_ARRAY:
4608 4614
	if (FL_TEST(obj, ELTS_SHARED)) {
4609
	    gc_mark(objspace, any->as.array.as.heap.aux.shared);
4615
            VALUE root = any->as.array.as.heap.aux.shared;
4616
	    gc_mark(objspace, root);
4610 4617
	}
4611 4618
	else {
4612 4619
	    long i, len = RARRAY_LEN(obj);
4613 4620
	    const VALUE *ptr = RARRAY_CONST_PTR(obj);
4614 4621
	    for (i=0; i < len; i++) {
4615
		gc_mark(objspace, *ptr++);
4622
		gc_mark(objspace, ptr[i]);
4623
	    }
4624

  
4625
            if (objspace->mark_func_data == NULL) {
4626
                if (!FL_TEST_RAW(obj, RARRAY_EMBED_FLAG) &&
4627
                    ARY_TRANSIENT_P(obj)) {
4628
                    rb_transient_heap_mark(obj, ptr);
4629
                }
4616 4630
	    }
4617 4631
	}
4618 4632
	break;
......
5605 5619
#endif
5606 5620
    }
5607 5621

  
5622
    rb_transient_heap_finish_marking();
5623

  
5608 5624
    gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
5609 5625

  
5610 5626
    return TRUE;
......
6474 6490
    objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
6475 6491
    gc_prof_setup_new_record(objspace, reason);
6476 6492
    gc_reset_malloc_info(objspace);
6493
    rb_transient_heap_start_marking(do_full_mark);
6477 6494

  
6478 6495
    gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
6479 6496
    GC_ASSERT(during_gc);
......
9450 9467
#if USE_RGENGC
9451 9468
	const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
9452 9469

  
9470
        if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
9453 9471
	snprintf(buff, buff_size, "%p [%d%s%s%s%s] %s",
9454 9472
		 (void *)obj, age,
9455 9473
		 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj),  "L"),
......
9457 9475
		 C(RVALUE_MARKING_BITMAP(obj),        "R"),
9458 9476
		 C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
9459 9477
		 obj_type_name(obj));
9478
        }
9479
        else {
9480
            /* fake */
9481
            snprintf(buff, buff_size, "%p [%dXXXX] %s",
9482
                     (void *)obj, age,
9483
                     obj_type_name(obj));
9484
        }
9460 9485
#else
9461 9486
	snprintf(buff, buff_size, "%p [%s] %s",
9462 9487
		 (void *)obj,
......
9486 9511
	    UNEXPECTED_NODE(rb_raw_obj_info);
9487 9512
	    break;
9488 9513
	  case T_ARRAY:
9489
	    snprintf(buff, buff_size, "%s [%s%s] len: %d", buff,
9514
            if (FL_TEST(obj, ELTS_SHARED)) {
9515
                snprintf(buff, buff_size, "%s shared -> %s", buff,
9516
                         rb_obj_info(RARRAY(obj)->as.heap.aux.shared));
9517
            }
9518
            else if (FL_TEST(obj, RARRAY_EMBED_FLAG)) {
9519
                snprintf(buff, buff_size, "%s [%s%s] len: %d (embed)", buff,
9490 9520
		     C(ARY_EMBED_P(obj),  "E"),
9491 9521
		     C(ARY_SHARED_P(obj), "S"),
9492 9522
		     (int)RARRAY_LEN(obj));
9523
            }
9524
            else {
9525
                snprintf(buff, buff_size, "%s [%s%s%s] len: %d, capa:%d ptr:%p", buff,
9526
                         C(ARY_EMBED_P(obj),  "E"),
9527
                         C(ARY_SHARED_P(obj), "S"),
9528
                         C(ARY_TRANSIENT_P(obj), "T"),
9529
                         (int)RARRAY_LEN(obj),
9530
                         ARY_EMBED_P(obj) ? -1 : (int)RARRAY(obj)->as.heap.aux.capa,
9531
                         RARRAY_CONST_PTR(obj));
9532
            }
9493 9533
	    break;
9494 9534
	  case T_STRING: {
9495 9535
	    snprintf(buff, buff_size, "%s %s", buff, RSTRING_PTR(obj));
inits.c (working copy)
16 16
void
17 17
rb_call_inits(void)
18 18
{
19
    CALL(TransientHeap);
19 20
    CALL(Method);
20 21
    CALL(RandomSeedCore);
21 22
    CALL(sym);
internal.h (working copy)
1073 1073
struct vtm; /* defined by timev.h */
1074 1074

  
1075 1075
/* array.c */
1076
#define RARRAY_TRANSIENT_FLAG FL_USER13
1077
#define ARY_TRANSIENT_P(ary) FL_TEST_RAW((ary), RARRAY_TRANSIENT_FLAG)
1078

  
1076 1079
VALUE rb_ary_last(int, const VALUE *, VALUE);
1077 1080
void rb_ary_set_len(VALUE, long);
1078 1081
void rb_ary_delete_same(VALUE, VALUE);
transient_heap.c (working copy)
1
#include "ruby/ruby.h"
2
#include "ruby/debug.h"
3
#include "vm_debug.h"
4
#include "gc.h"
5
#include "internal.h"
6
#include <sys/mman.h>
7
#include <errno.h>
8
#include "ruby_assert.h"
9

  
10
/*
11
 * 1: enable assertions
12
 * 2: enable verify
13
 */
14
#ifndef TRANSIENT_HEAP_CHECK_MODE
15
#define TRANSIENT_HEAP_CHECK_MODE 0
16
#endif
17
#define TH_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(TRANSIENT_HEAP_CHECK_MODE > 0, expr, #expr)
18

  
19
/*
20
 * 1: show events
21
 * 2: show dump at events
22
 * 3: show all operations
23
 */
24
#define TRANSIENT_HEAP_DEBUG 0
25

  
26
/* Provide blocks infinitely for debug.
27
 * This mode generates blocks unlimitedly
28
 * and prohibit access free'ed blocks to check invalid access.
29
 */
30
#define TRANSIENT_HEAP_INFINITE_BLOCK_MODE 0
31

  
32
enum transient_heap_status {
33
    transient_heap_none,
34
    transient_heap_marking,
35
    transient_heap_escaping
36
};
37

  
38
struct transient_heap_block {
39
    struct transient_heap_block_header {
40
        int16_t size; /* sizeof(block) = TRANSIENT_HEAP_BLOCK_SIZE - sizeof(struct transient_heap_block_header) */
41
        int16_t index;
42
        int16_t last_marked_index;
43
        int16_t objects;
44
        struct transient_heap_block *next_block;
45
    } info;
46
    char buff[];
47
};
48

  
49
struct transient_heap {
50
    struct transient_heap_block *using_blocks;
51
    struct transient_heap_block *marked_blocks;
52
    struct transient_heap_block *free_blocks;
53
    int total_objects;
54
    int total_marked_objects;
55
    int total_blocks;
56
    enum transient_heap_status status;
57

  
58
    VALUE *promoted_objects;
59
    int promoted_objects_size;
60
    int promoted_objects_index;
61
};
62

  
63
struct transient_alloc_header {
64
    uint16_t magic;
65
    uint16_t size;
66
    int16_t next_marked_index;
67
    int16_t dummy;
68
    VALUE obj;
69
};
70

  
71
static struct transient_heap global_transient_heap;
72

  
73

  
74
#define TRANSIENT_HEAP_PROMOTED_DEFAULT_SIZE 1024
75

  
76
                                          /*  K      M */
77
#define TRANSIENT_HEAP_TOTAL_SIZE  (1024 * 1024 *   16)
78
#define TRANSIENT_HEAP_BLOCK_SIZE  (1024 *   32       ) /* int16_t */
79
#define TRANSIENT_HEAP_ALLOC_MAX   (1024 *    2       )
80
#define TRANSIENT_HEAP_BLOCK_NUM   (TRANSIENT_HEAP_TOTAL_SIZE / TRANSIENT_HEAP_BLOCK_SIZE)
81

  
82
#define TRANSIENT_HEAP_ALLOC_MAGIC 0xfeab
83
#define TRANSIENT_HEAP_ALLOC_ALIGN RUBY_ALIGNOF(void *)
84

  
85
#define TRANSIENT_HEAP_ALLOC_MARKING_LAST -1
86
#define TRANSIENT_HEAP_ALLOC_MARKING_FREE -2
87

  
88
#define ROUND_UP(v, a)  (((size_t)(v) + (a) - 1) & ~((a) - 1))
89

  
90
static void
91
transient_heap_block_dump(struct transient_heap* theap, struct transient_heap_block *block)
92
{
93
    int i=0, n=0;
94
    struct transient_alloc_header *header = NULL;
95

  
96
    while (i<block->info.index) {
97
        header = (void *)&block->buff[i];
98
        fprintf(stderr, "%4d %8d %p size:%4d next:%4d %s\n", n, i, header, header->size, header->next_marked_index, rb_obj_info(header->obj));
99
        i += header->size;
100
        n++;
101
    }
102
}
103

  
104
static void
105
transient_heap_blocks_dump(struct transient_heap* theap, struct transient_heap_block *block, const char *type_str)
106
{
107
    while (block) {
108
        fprintf(stderr, "- transient_heap_dump: %s:%p index:%d objects:%d last_marked_index:%d next:%p\n",
109
                type_str, block, block->info.index, block->info.objects, block->info.last_marked_index, block->info.next_block);
110

  
111
        transient_heap_block_dump(theap, block);
112
        block = block->info.next_block;
113
    }
114
}
115

  
116
static void
117
transient_heap_dump(struct transient_heap* theap)
118
{
119
    fprintf(stderr, "transient_heap_dump objects:%d marked_objects:%d blocks:%d\n", theap->total_objects, theap->total_marked_objects, theap->total_blocks);
120
    transient_heap_blocks_dump(theap, theap->using_blocks, "using_blocks");
121
    transient_heap_blocks_dump(theap, theap->marked_blocks, "marked_blocks");
122
    transient_heap_blocks_dump(theap, theap->free_blocks, "free_blocks");
123
}
124

  
125
void
126
rb_transient_heap_dump(void)
127
{
128
    transient_heap_dump(&global_transient_heap);
129
}
130

  
131
#if TRANSIENT_HEAP_CHECK_MODE >= 2
132
static int
133
transient_heap_block_verify(struct transient_heap *theap, struct transient_heap_block *block)
134
{
135
    int i=0, n=0;
136
    struct transient_alloc_header *header;
137

  
138
    while (i<block->info.index) {
139
        header = (void *)&block->buff[i];
140
        TH_ASSERT(header->magic == TRANSIENT_HEAP_ALLOC_MAGIC);
141
        n ++;
142
        i += header->size;
143
    }
144
    TH_ASSERT(block->info.objects == n);
145

  
146
    return n;
147
}
148
#endif
149

  
150
static void
151
transient_heap_verify(struct transient_heap *theap)
152
{
153
#if TRANSIENT_HEAP_CHECK_MODE >= 2
154
    struct transient_heap_block *block;
155
    int n=0;
156

  
157
    // using_blocks
158
    block = theap->using_blocks;
159
    while (block) {
160
        n += transient_heap_block_verify(theap, block);
161
        block = block->info.next_block;
162
    }
163

  
164
    // marked_blocks
165
    block = theap->marked_blocks;
166
    while (block) {
167
        n += transient_heap_block_verify(theap, block);
168
        TH_ASSERT(block->info.index > 0);
169
        block = block->info.next_block;
170
    }
171

  
172
    TH_ASSERT(n == theap->total_objects);
173
    TH_ASSERT(n >= theap->total_marked_objects);
174
#endif
175
}
176

  
177
static struct transient_heap*
178
transient_heap_get(void)
179
{
180
    struct transient_heap* theap = &global_transient_heap;
181
    transient_heap_verify(theap);
182
    return theap;
183
}
184

  
185
static void
186
reset_block(struct transient_heap_block *block)
187
{
188
    block->info.size = TRANSIENT_HEAP_BLOCK_SIZE - sizeof(struct transient_heap_block_header);
189
    block->info.index = 0;
190
    block->info.objects = 0;
191
    block->info.last_marked_index = TRANSIENT_HEAP_ALLOC_MARKING_LAST;
192
    block->info.next_block = NULL;
193
}
194

  
195
static void
196
connect_to_free_blocks(struct transient_heap *theap, struct transient_heap_block *block)
197
{
198
    block->info.next_block = theap->free_blocks;
199
    theap->free_blocks = block;
200
}
201

  
202
static void
203
connect_to_using_blocks(struct transient_heap *theap, struct transient_heap_block *block)
204
{
205
    block->info.next_block = theap->using_blocks;
206
    theap->using_blocks = block;
207
}
208

  
209
#if 0
210
static void
211
connect_to_marked_blocks(struct transient_heap *theap, struct transient_heap_block *block)
212
{
213
    block->info.next_block = theap->marked_blocks;
214
    theap->marked_blocks = block;
215
}
216
#endif
217

  
218
static void
219
append_to_marked_blocks(struct transient_heap *theap, struct transient_heap_block *append_blocks)
220
{
221
    if (theap->marked_blocks) {
222
        struct transient_heap_block *block = theap->marked_blocks, *last_block = NULL;
223
        while (block) {
224
            last_block = block;
225
            block = block->info.next_block;
226
        }
227

  
228
        TH_ASSERT(last_block->info.next_block == NULL);
229
        last_block->info.next_block = append_blocks;
230
    }
231
    else {
232
        theap->marked_blocks = append_blocks;
233
    }
234
}
235

  
236
static struct transient_heap_block *
237
transient_heap_block_alloc(struct transient_heap* theap)
238
{
239
    struct transient_heap_block *block;
240
    block = mmap(NULL, TRANSIENT_HEAP_BLOCK_SIZE, PROT_READ | PROT_WRITE,
241
                 MAP_PRIVATE | MAP_ANONYMOUS,
242
                 -1, 0);
243
    if (block == MAP_FAILED) rb_bug("transient_heap_block_alloc: err:%d\n", errno);
244

  
245
    reset_block(block);
246

  
247
    TH_ASSERT(((intptr_t)block->buff & (TRANSIENT_HEAP_ALLOC_ALIGN-1)) == 0);
248

  
249
    theap->total_blocks++;
250
    // fprintf(stderr, "transient_heap_block_alloc: %d\n", theap->total_blocks);
251
    return block;
252
}
253

  
254
static struct transient_heap_block *
255
transient_heap_allocatable_block(struct transient_heap* theap)
256
{
257
    struct transient_heap_block *block;
258

  
259
#if TRANSIENT_HEAP_INFINITE_BLOCK_MODE
260
    block = transient_heap_block_alloc(theap);
261
#else
262
    // get one block from free_blocks
263
    block = theap->free_blocks;
264
    if (block) {
265
        theap->free_blocks = block->info.next_block;
266
        block->info.next_block = NULL;
267
    }
268
#endif
269

  
270
    return block;
271
}
272

  
273
static struct transient_alloc_header *
274
transient_heap_allocatable_header(struct transient_heap* theap, size_t size)
275
{
276
    struct transient_heap_block *block = theap->using_blocks;
277

  
278
    while (block) {
279
        TH_ASSERT(block->info.size >= block->info.index);
280

  
281
        if (block->info.size - block->info.index >= (int32_t)size) {
282
            struct transient_alloc_header *header = (void *)&block->buff[block->info.index];
283
            block->info.index += size;
284
            block->info.objects++;
285
            return header;
286
        }
287
        else {
288
            block = transient_heap_allocatable_block(theap);
289
            if (block) connect_to_using_blocks(theap, block);
290
        }
291
    }
292

  
293
    return NULL;
294
}
295

  
296
void *
297
rb_transient_heap_alloc(VALUE obj, size_t req_size)
298
{
299
    struct transient_heap* theap = transient_heap_get();
300
    size_t size = ROUND_UP(req_size + sizeof(struct transient_alloc_header), TRANSIENT_HEAP_ALLOC_ALIGN);
301

  
302
    if (size > TRANSIENT_HEAP_ALLOC_MAX) {
303
        // fprintf(stderr, "rb_transient_heap_alloc: NULL (too big: %ld)\n", (long)size);
304
        return NULL;
305
    }
306
    else if (RB_OBJ_PROMOTED_RAW(obj)) {
307
        // fprintf(stderr, "rb_transient_heap_alloc: NULL (not for promoted objects)\n");
308
        return NULL;
309
    }
310
    else {
311
        struct transient_alloc_header *header = transient_heap_allocatable_header(theap, size);
312
        if (header) {
313
            void *ptr;
314

  
315
            header->size = size;
316
            header->magic = TRANSIENT_HEAP_ALLOC_MAGIC;
317
            header->next_marked_index = TRANSIENT_HEAP_ALLOC_MARKING_FREE;
318
            header->obj = obj; // TODO: for verify
319

  
320
            // stat info
321
            theap->total_objects++;
322
            ptr = header + 1;
323
            // fprintf(stderr, "rb_transient_heap_alloc: header:%p ptr:%p size:%d obj:%s\n", header, ptr, (int)size, rb_obj_info(obj));
324
            return ptr;
325
        }
326
        else {
327
            // fprintf(stderr, "rb_transient_heap_alloc: NULL (no enough space: %ld)\n", (long)size);
328
            return NULL;
329
        }
330
    }
331
}
332

  
333
void
334
Init_TransientHeap(void)
335
{
336
    int i, block_num;
337
    struct transient_heap* theap = transient_heap_get();
338

  
339
#if TRANSIENT_HEAP_INFINITE_BLOCK_MODE
340
    block_num = 1;
341
#else
342
    TH_ASSERT(TRANSIENT_HEAP_BLOCK_SIZE * TRANSIENT_HEAP_BLOCK_NUM == TRANSIENT_HEAP_TOTAL_SIZE);
343
    block_num = TRANSIENT_HEAP_BLOCK_NUM;
344
#endif
345
    for (i=0; i<block_num-1; i++) {
346
        connect_to_free_blocks(theap, transient_heap_block_alloc(theap));
347
    }
348
    theap->using_blocks = transient_heap_block_alloc(theap);
349

  
350
    theap->promoted_objects_size = TRANSIENT_HEAP_PROMOTED_DEFAULT_SIZE;
351
    theap->promoted_objects_index = 0;
352
    /* should not use ALLOC_N to be free from GC */
353
    theap->promoted_objects = malloc(sizeof(VALUE) * theap->promoted_objects_size);
354
    if (theap->promoted_objects == NULL) rb_bug("Init_TransientHeap: malloc failed.");
355
}
356

  
357
static struct transient_heap_block *
358
blocks_alloc_header_to_block(struct transient_heap *theap, struct transient_heap_block *blocks, struct transient_alloc_header *header)
359
{
360
    struct transient_heap_block *block = blocks;
361

  
362
    TH_ASSERT(theap->status == transient_heap_marking);
363

  
364
    while (block) {
365
        if (block->buff <= (char *)header && (char *)header < block->buff + block->info.size) {
366
            return block;
367
        }
368
        block = block->info.next_block;
369
    }
370

  
371
    return NULL;
372
}
373

  
374
static struct transient_heap_block *
375
alloc_header_to_block(struct transient_heap *theap, struct transient_alloc_header *header)
376
{
377
    struct transient_heap_block *block;
378

  
379
    if ((block = blocks_alloc_header_to_block(theap, theap->marked_blocks, header)) != NULL) {
380
        if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "alloc_header_to_block: found in marked_blocks\n");
381
        return block;
382
    }
383
    else if ((block = blocks_alloc_header_to_block(theap, theap->using_blocks, header)) != NULL) {
384
        if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "alloc_header_to_block: found in using_blocks\n");
385
        return block;
386
    }
387
    else {
388
        transient_heap_dump(theap);
389
        rb_bug("alloc_header_to_block: not found in mark_blocks (%p)\n", header);
390
    }
391
}
392

  
393
void
394
rb_transient_heap_mark(VALUE obj, const void *ptr)
395
{
396
    struct transient_alloc_header *header = (void *)ptr;
397
    header = header - 1;
398

  
399
    if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_mark: %s (%p)\n", rb_obj_info(obj), ptr);
400

  
401
    if (header->next_marked_index != TRANSIENT_HEAP_ALLOC_MARKING_FREE) {
402
        // already marked
403
        return;
404
    }
405

  
406
    if (header->magic != TRANSIENT_HEAP_ALLOC_MAGIC) {
407
        struct transient_heap* theap = transient_heap_get();
408
        transient_heap_dump(theap);
409
        rb_bug("rb_transient_heap_mark: magic is broken");
410
    }
411
    else if (header->obj != obj) {
412
        struct transient_heap* theap = transient_heap_get();
413
        transient_heap_dump(theap);
414
        rb_bug("rb_transient_heap_mark: unmatch\n");
415
    }
416
    else {
417
        struct transient_heap* theap = transient_heap_get();
418
        struct transient_heap_block *block = alloc_header_to_block(theap, header);
419
        header->next_marked_index = block->info.last_marked_index;
420
        block->info.last_marked_index = (int)((char *)header - block->buff);
421
        theap->total_marked_objects++;
422
    }
423
}
424

  
425
static void *
426
transient_heap_ptr(VALUE obj, int error)
427
{
428
    void *ptr;
429

  
430
    switch (BUILTIN_TYPE(obj)) {
431
      case T_ARRAY:
432
        if (ARY_TRANSIENT_P(obj)) {
433
            ptr = (VALUE *)RARRAY_CONST_PTR(obj);
434
        }
435
        else {
436
            ptr = NULL;
437
        }
438
        break;
439
      default:
440
        if (error) {
441
            rb_bug("transient_heap_ptr: unknown obj %s\n", rb_obj_info(obj));
442
        }
443
        else {
444
            ptr = NULL;
445
        }
446
    }
447

  
448
    return ptr;
449
}
450

  
451
void
452
rb_transient_heap_promote(VALUE obj)
453
{
454

  
455
    if (transient_heap_ptr(obj, FALSE)) {
456
        struct transient_heap* theap = transient_heap_get();
457

  
458
        if (TRANSIENT_HEAP_DEBUG >= 3) fprintf(stderr, "rb_transient_heap_promote: %s\n", rb_obj_info(obj));
459

  
460
        if (theap->promoted_objects_size <= theap->promoted_objects_index) {
461
            theap->promoted_objects_size *= 2;
462
            if (TRANSIENT_HEAP_DEBUG >= 0) fprintf(stderr, "rb_transient_heap_promote: expand table to %d\n", theap->promoted_objects_size);
463
            theap->promoted_objects = realloc(theap->promoted_objects, theap->promoted_objects_size * sizeof(VALUE));
464
            if (theap->promoted_objects == NULL) rb_bug("rb_transient_heap_promote: realloc failed");
465
        }
466
        theap->promoted_objects[theap->promoted_objects_index++] = obj;
467
    }
468
    else {
469
        /* ignore */
470
    }
471
}
472

  
473
void
474
rb_transient_heap_promoted(VALUE obj, const void *ptr)
475
{
476
    struct transient_alloc_header *header = (void *)ptr;
477
    header = header - 1;
478

  
479
    
480
}
481

  
482
static struct transient_alloc_header *
483
alloc_header(struct transient_heap_block* block, int index)
484
{
485
    return (void *)&block->buff[index];
486
}
487

  
488
void rb_ary_detransient(VALUE ary);
489

  
490
static void
491
transient_heap_reset(void)
492
{
493
    struct transient_heap* theap = transient_heap_get();
494
    struct transient_heap_block* block;
495

  
496
    if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, "!! transient_heap_reset\n");
497

  
498
    block = theap->marked_blocks;
499
    while (block) {
500
        struct transient_heap_block *next_block = block->info.next_block;
501
        theap->total_objects -= block->info.objects;
502
#if TRANSIENT_HEAP_INFINITE_BLOCK_MODE
503
        // debug mode
504
        if (madvise(block, TRANSIENT_HEAP_BLOCK_SIZE, MADV_DONTNEED) != 0) {
505
            rb_bug("madvise err:%d", errno);
506
        }
507
        if (mprotect(block, TRANSIENT_HEAP_BLOCK_SIZE, PROT_NONE) != 0) {
508
            rb_bug("mprotect err:%d", errno);
509
        }
510
        theap->total_blocks--;
511
#else
512
        reset_block(block);
513
        connect_to_free_blocks(theap, block);
514
#endif
515
        block = next_block;
516
    }
517

  
518
    theap->marked_blocks = NULL;
519
    theap->total_marked_objects = 0;
520
}
521

  
522
static void
523
transient_heap_block_escape(struct transient_heap* theap, struct transient_heap_block* block)
524
{
525
    int marked_index = block->info.last_marked_index;
526
    block->info.last_marked_index = TRANSIENT_HEAP_ALLOC_MARKING_LAST;
527

  
528
    while (marked_index >= 0) {
529
        struct transient_alloc_header *header = alloc_header(block, marked_index);
530
        VALUE obj = header->obj;
531

  
532
        if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, " * transient_heap_block_escape %p %s\n", header, rb_obj_info(obj));
533

  
534
        if (obj != Qnil) {
535
            switch (BUILTIN_TYPE(obj)) {
536
              case T_ARRAY:
537
                rb_ary_detransient(obj);
538
                break;
539
              default:
540
                rb_bug("unsupporeted");
541
            }
542
            header->obj = Qundef; // to verify
543
        }
544
        marked_index = header->next_marked_index;
545
    }
546
}
547

  
548
static void
549
transient_heap_update_status(struct transient_heap* theap, enum transient_heap_status status)
550
{
551
    TH_ASSERT(theap->status != status);
552
    theap->status = status;
553
}
554

  
555
static void
556
transient_heap_escape(void *dmy)
557
{
558
    struct transient_heap* theap = transient_heap_get();
559

  
560
    if (theap->status == transient_heap_marking) {
561
        if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, "!! transient_heap_escape: skip while transient_heap_marking\n");
562
    }
563
    else {
564
        VALUE gc_disabled = rb_gc_disable();
565
        struct transient_heap_block* block;
566

  
567
        if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, "!! transient_heap_escape start total_blocks:%d\n", theap->total_blocks);
568
        if (TRANSIENT_HEAP_DEBUG >= 2) transient_heap_dump(theap);
569

  
570
        TH_ASSERT(theap->status == transient_heap_none);
571
        transient_heap_update_status(theap, transient_heap_escaping);
572

  
573
        // escape marked blocks
574
        block = theap->marked_blocks;
575
        while (block) {
576
            transient_heap_block_escape(theap, block);
577
            block = block->info.next_block;
578
        }
579

  
580
        // escape using blocks
581
        // only affect incremental marking
582
        block = theap->using_blocks;
583
        while (block) {
584
            transient_heap_block_escape(theap, block);
585
            block = block->info.next_block;
586
        }
587

  
588
        // all objects in marked_objects are escaped.
589
        transient_heap_reset();
590

  
591
        if (TRANSIENT_HEAP_DEBUG > 0) {
592
            fprintf(stderr, "!! transient_heap_escape end total_blocks:%d\n", theap->total_blocks);
593
            // transient_heap_dump(theap);
594
        }
595

  
596
        transient_heap_verify(theap);
597
        transient_heap_update_status(theap, transient_heap_none);
598
        if (gc_disabled != Qtrue) rb_gc_enable();
599
    }
600
}
601

  
602
static void
603
clear_marked_index(struct transient_heap_block* block)
604
{
605
    int marked_index = block->info.last_marked_index;
606

  
607
    while (marked_index != TRANSIENT_HEAP_ALLOC_MARKING_LAST) {
608
        struct transient_alloc_header *header = alloc_header(block, marked_index);
609
        TH_ASSERT(marked_index != TRANSIENT_HEAP_ALLOC_MARKING_FREE);
610
        // fprintf(stderr, "clear_marked_index - block:%p mark_index:%d\n", block, marked_index);
611

  
612
        marked_index = header->next_marked_index;
613
        header->next_marked_index = TRANSIENT_HEAP_ALLOC_MARKING_FREE;
614
    }
615

  
616
    block->info.last_marked_index = TRANSIENT_HEAP_ALLOC_MARKING_LAST;
617
}
618

  
619
void
620
rb_transient_heap_start_marking(int full_marking)
621
{
622
    struct transient_heap* theap = transient_heap_get();
623
    struct transient_heap_block* block;
624

  
625
    if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, "!! rb_transient_heap_start_marking objects:%d blocks:%d full_marking:%d\n",
626
                                           theap->total_objects, theap->total_blocks, full_marking);
627
    if (TRANSIENT_HEAP_DEBUG >= 2) transient_heap_dump(theap);
628

  
629
    // clear marking info
630
    block = theap->marked_blocks;
631
    while (block) {
632
        clear_marked_index(block);
633
        block = block->info.next_block;
634
    }
635

  
636
    block = theap->using_blocks;
637
    while (block) {
638
        clear_marked_index(block);
639
        block = block->info.next_block;
640
    }
641

  
642
    if (theap->using_blocks) {
643
        if (theap->using_blocks->info.objects > 0) {
644
            append_to_marked_blocks(theap, theap->using_blocks);
645
            theap->using_blocks = NULL;
646
        }
647
        else {
648
            append_to_marked_blocks(theap, theap->using_blocks->info.next_block);
649
            theap->using_blocks->info.next_block = NULL;
650
        }
651
    }
652

  
653
    if (theap->using_blocks == NULL) {
654
        theap->using_blocks = transient_heap_allocatable_block(theap);
655
    }
656

  
657
    TH_ASSERT(theap->status == transient_heap_none);
658
    transient_heap_update_status(theap, transient_heap_marking);
659
    theap->total_marked_objects = 0;
660

  
661
    if (full_marking) {
662
        theap->promoted_objects_index = 0;
663
    }
664
    else { /* mark promoted objects */
665
        int i;
666
        for (i=0; i<theap->promoted_objects_index; i++) {
667
            VALUE obj = theap->promoted_objects[i];
668
            void *ptr = transient_heap_ptr(obj, TRUE);
669
            if (ptr) {
670
                rb_transient_heap_mark(obj, ptr);
671
            }
672
        }
673
    }
674

  
675
    transient_heap_verify(theap);
676
}
677

  
678
void
679
rb_transient_heap_finish_marking(void)
680
{
681
    struct transient_heap* theap = transient_heap_get();
682

  
683
    if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, "!! rb_transient_heap_finish_marking objects:%d, marked:%d\n",
684
                                           theap->total_objects,
685
                                           theap->total_marked_objects);
686
    if (TRANSIENT_HEAP_DEBUG >= 2) transient_heap_dump(theap);
687

  
688
    TH_ASSERT(theap->total_objects >= theap->total_marked_objects);
689

  
690
    TH_ASSERT(theap->status == transient_heap_marking);
691
    transient_heap_update_status(theap, transient_heap_none);
692

  
693
    if (theap->total_marked_objects > 0) {
694
        if (TRANSIENT_HEAP_DEBUG >= 1) fprintf(stderr, "-> rb_transient_heap_finish_marking register escape func.\n");
695
        rb_postponed_job_register_one(0, transient_heap_escape, NULL);
696
    }
697
    else {
698
        transient_heap_reset();
699
    }
700

  
701
    transient_heap_verify(theap);
702
}
0 703