Project

General

Profile

Feature #3176 ยป thread-priorities-try3.diff

coatl (caleb clausen), 05/18/2010 02:41 AM

View differences:

eval.c
36 36

  
37 37
void rb_clear_trace_func(void);
38 38
void rb_thread_stop_timer_thread(void);
39
extern void rb_threadptr_interrupt(rb_thread_t *th);
39 40

  
40 41
void rb_call_inits(void);
41 42
void Init_heap(void);
......
118 119
    ruby_finalize_1();
119 120
}
120 121

  
121
void rb_thread_stop_timer_thread(void);
122

  
123 122
int
124 123
ruby_cleanup(volatile int ex)
125 124
{
signal.c
566 566
{
567 567
    int i, sig = 0;
568 568

  
569
    /*this function could be made much faster by use of a bitmask and ffs() */
569 570
    for (i=1; i<RUBY_NSIG; i++) {
570 571
	if (signal_buff.cnt[i] > 0) {
571 572
	    rb_disable_interrupt();
thread.c
47 47
#include "eval_intern.h"
48 48
#include "gc.h"
49 49

  
50
#ifndef USE_NATIVE_THREAD_PRIORITY
51
#define USE_NATIVE_THREAD_PRIORITY 0
52
#define RUBY_THREAD_PRIORITY_MAX 3
53
#define RUBY_THREAD_PRIORITY_MIN -3
54
#endif
55

  
56 50
#ifndef THREAD_DEBUG
57 51
#define THREAD_DEBUG 0
58 52
#endif
......
99 93

  
100 94
static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
101 95

  
96
static void rb_pqueue_enqueue(pqueue_t *pqueue, rb_thread_t *th, unsigned priority);
97
static rb_thread_t *rb_pqueue_dequeue(pqueue_t *pqueue);
98
static rb_thread_t *rb_pqueue_dequeue_starting_at(pqueue_t *pqueue, unsigned start_from, unsigned *found_at);
99
void rb_threadptr_interrupt(rb_thread_t *th);
100

  
102 101
#define RB_GC_SAVE_MACHINE_CONTEXT(th) \
103 102
  do { \
104 103
    rb_gc_save_machine_context(th); \
105 104
    SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
106 105
  } while (0)
107 106

  
107
#define GVL_TAKE(th) \
108
  while (0!=native_mutex_trylock(&(th)->vm->global_vm_lock)) { \
109
    thread_debug("waiting for gvl\n"); \
110
    /*might be good to check RUBY_VM_INTERRUPTED here*/ \
111
    rb_pqueue_enqueue(&(th)->vm->ready_to_run_list, \
112
		   (th), \
113
		   RUBY_THREAD_PRIORITY_MAX-(th)->priority \
114
    ); \
115
    rb_doze((th)); \
116
  }
117

  
118
#define GVL_GIVE(th) \
119
   do { \
120
     rb_thread_t *th2; \
121
     native_mutex_unlock(&(th)->vm->global_vm_lock); \
122
     th2=rb_pqueue_dequeue(&(th)->vm->ready_to_run_list); \
123
     thread_debug("giving up gvl to %p\n", th2); \
124
     if (th2) rb_undoze(th2); \
125
   } while(0)
126

  
108 127
#define GVL_UNLOCK_BEGIN() do { \
109 128
  rb_thread_t *_th_stored = GET_THREAD(); \
110 129
  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
111
  native_mutex_unlock(&_th_stored->vm->global_vm_lock)
130
  GVL_GIVE(_th_stored)
112 131

  
113 132
#define GVL_UNLOCK_END() \
114
  native_mutex_lock(&_th_stored->vm->global_vm_lock); \
133
  GVL_TAKE(_th_stored); \
115 134
  rb_thread_set_current(_th_stored); \
116 135
} while(0)
117 136

  
......
130 149
    (th)->status = THREAD_STOPPED; \
131 150
    thread_debug("enter blocking region (%p)\n", (void *)(th)); \
132 151
    RB_GC_SAVE_MACHINE_CONTEXT(th); \
133
    native_mutex_unlock(&(th)->vm->global_vm_lock); \
152
    GVL_GIVE(th); \
134 153
  } while (0)
135 154

  
136 155
#define BLOCKING_REGION(exec, ubf, ubfarg) do { \
......
264 283
}
265 284

  
266 285
static void
286
rb_pqueue_flush(pqueue_t *pqueue)
287
{
288
    memset(pqueue,0,sizeof(pqueue));
289
    native_mutex_initialize(&pqueue->lock);
290
}
291

  
292
static void
293
rb_pqueue_initialize(pqueue_t *pqueue)
294
{
295
    rb_pqueue_flush(pqueue);
296
    if (sizeof(pqueue->mask)*CHAR_BIT<RUBY_NUM_PRIORITIES) 
297
	rb_fatal("pqueue_t.mask smaller than %d bits!", RUBY_NUM_PRIORITIES);
298
    if (!getenv("THREAD_PRIOS_WARN")) {
299
        rb_warn("need benchmarks");
300
        rb_warn("need to test thread priorities more");
301
        ruby_setenv("THREAD_PRIOS_WARN","1");
302
    }
303
}
304

  
305
void
306
rb_pqueue_destroy(pqueue_t *pqueue)
307
{
308
    native_mutex_destroy(&pqueue->lock);
309
    memset(pqueue,0,sizeof(pqueue));
310
}
311

  
312
static void
313
rb_pqueue_enqueue(pqueue_t *pqueue, rb_thread_t *th, unsigned priority)
314
{
315
    rb_thread_t *queue;
316

  
317
    if (priority>=RUBY_NUM_PRIORITIES) priority=RUBY_NUM_PRIORITIES-1;
318
    /*th->next should be NULL here*/
319

  
320
    native_mutex_lock(&pqueue->lock);
321
    pqueue->mask |= 1<<priority;
322
    queue=pqueue->queues[priority];
323
    if (queue==NULL) {
324
	th->next=th;
325
    } else {
326
	th->next=queue->next;
327
	queue->next=th;
328
    }
329
    pqueue->queues[priority]=th;
330
    native_mutex_unlock(&pqueue->lock);
331
}
332

  
333
static rb_thread_t *
334
rb_pqueue_dequeue(pqueue_t *pqueue)
335
{
336
    int i;
337
    rb_thread_t *result;
338
    unsigned mask;
339

  
340
    native_mutex_lock(&pqueue->lock);
341
    mask = pqueue->mask;
342

  
343
    i=ffs(mask)-1;
344
    if (i==-1) {
345
	result=NULL;
346
    } else {
347
	rb_thread_t *queue=pqueue->queues[i];
348
	/*queue should be non-NULL here*/
349
	result=queue->next;
350
	if (result==queue) { /*last item in this queue?*/
351
	    pqueue->queues[i]=NULL; 
352
	    pqueue->mask &= ~(1<<i);
353
	} else {
354
	    queue->next=result->next;
355
	}
356
	result->next=NULL;
357
    }
358
    native_mutex_unlock(&pqueue->lock);
359
    return result;
360
}
361

  
362
static rb_thread_t *
363
rb_pqueue_dequeue_starting_at(pqueue_t *pqueue, unsigned start_from, unsigned *found_at)
364
{
365
    int i;
366
    rb_thread_t *result;
367
    unsigned mask;
368

  
369
    mask=(1<<start_from)-1;
370
    mask=~mask;
371

  
372
    native_mutex_lock(&pqueue->lock);
373
    mask &= pqueue->mask;
374

  
375
    i=ffs(mask)-1;
376
    if (i==-1) {
377
	result=NULL;
378
	*found_at=-1;
379
    } else {
380
	rb_thread_t *queue=pqueue->queues[i];
381
	/*queue should be non-NULL here*/
382
	*found_at=i;
383
	result=queue->next;
384
	if (result==queue) { /*last item in this queue?*/
385
	    pqueue->queues[i]=NULL; 
386
	    pqueue->mask &= ~(1<<i);
387
	} else {
388
	    queue->next=result->next;
389
	}
390
	result->next=NULL;
391
    }
392
    native_mutex_unlock(&pqueue->lock);
393
    return result;
394
}
395

  
396
/*returns the priority of the highest priority item in the queue.
397
  returns -1 if the queue is empty.
398
  note: this returns a queue-relative priority (0..31, with 0==highest prio),
399
        rather than a ruby-level priority (-16..15, with 15==highest prio).
400
*/
401
static int
402
rb_pqueue_highest_priority(pqueue_t *pqueue)
403
{
404
    return ffs(pqueue->mask)-1;
405
}
406

  
407
static void
408
rb_pqueue_rotate(pqueue_t *pqueue)
409
{
410
    unsigned i=pqueue->next_promote_index;
411
    if (i){
412
	rb_thread_t *promoting;
413
	unsigned found_at;
414
	promoting=rb_pqueue_dequeue_starting_at(pqueue,i,&found_at);
415
	if (!promoting) promoting=rb_pqueue_dequeue_starting_at(pqueue,0,&found_at);
416
	if (promoting) rb_pqueue_enqueue(pqueue,promoting,found_at-1);
417
    }
418
    if (++pqueue->next_promote_index>=RUBY_NUM_PRIORITIES) pqueue->next_promote_index=0;
419
}
420

  
421
static void
267 422
set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
268 423
		     struct rb_unblock_callback *old)
269 424
{
......
290 445
    native_mutex_unlock(&th->interrupt_lock);
291 446
}
292 447

  
448
/*notify a thread that it should stop waiting and call the thread's 
449
  unblocking function. see rb_thread_blocking_region for a 
450
  description of blocking regions and unblocking functions. Typically, 
451
  th->unblock.func is set to one of these:
452
    ubf_handle (win32)
453
    ubf_pthread_cond_signal (pthreads)
454
    ubf_select
455
    lock_interrupt
456
    rb_big_stop
457
  and th->unblock.arg is set to th. However, they might be different if
458
  an extention used rb_thread_blocking_region or rb_thread_call_without_gvl
459
  to define a custom blocking region. 
460
*/
461

  
293 462
void
294 463
rb_threadptr_interrupt(rb_thread_t *th)
295 464
{
......
431 600
#endif
432 601
    thread_debug("thread start: %p\n", (void *)th);
433 602

  
434
    native_mutex_lock(&th->vm->global_vm_lock);
603
    GVL_TAKE(th);
435 604
    {
436 605
	thread_debug("thread start (get lock): %p\n", (void *)th);
437 606
	rb_thread_set_current(th);
......
520 689
    thread_unlock_all_locking_mutexes(th);
521 690
    if (th != main_th) rb_check_deadlock(th->vm);
522 691
    if (th->vm->main_thread == th) {
692
	/*ending main thread; interpreter will exit*/
523 693
	ruby_cleanup(state);
524 694
    }
525 695
    else {
526 696
	thread_cleanup_func(th);
527
	native_mutex_unlock(&th->vm->global_vm_lock);
697
	GVL_GIVE(th);
528 698
    }
529 699

  
530 700
    return 0;
......
995 1165
    rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
996 1166
}
997 1167

  
1168
static int
1169
rb_there_are_equal_or_higher_priority_threads(rb_thread_t *th)
1170
{
1171
    int highest_waiting=rb_pqueue_highest_priority(&th->vm->ready_to_run_list);
1172
    if (highest_waiting==-1) return 0;
1173
    highest_waiting=RUBY_THREAD_PRIORITY_MAX-highest_waiting;
1174

  
1175
    return(highest_waiting>=th->priority);
1176
}
1177

  
998 1178
static void rb_threadptr_execute_interrupts_rec(rb_thread_t *, int);
999 1179

  
1180
#define TICKS_PER_ROTATION 4
1181

  
1000 1182
static void
1001 1183
rb_thread_schedule_rec(int sched_depth)
1002 1184
{
1185
    static int ticks_til_rotate=TICKS_PER_ROTATION;
1186

  
1003 1187
    thread_debug("rb_thread_schedule\n");
1004 1188
    if (!rb_thread_alone()) {
1005 1189
	rb_thread_t *th = GET_THREAD();
1190
	if (!sched_depth || rb_there_are_equal_or_higher_priority_threads(th)) {
1191
	    thread_debug("rb_thread_schedule/switch start\n");
1006 1192

  
1007
	thread_debug("rb_thread_schedule/switch start\n");
1193
	    RB_GC_SAVE_MACHINE_CONTEXT(th);
1194
	    GVL_GIVE(th);
1195
	    GVL_TAKE(th);
1008 1196

  
1009
	RB_GC_SAVE_MACHINE_CONTEXT(th);
1010
	native_mutex_unlock(&th->vm->global_vm_lock);
1011
	{
1012
	    native_thread_yield();
1197
	    rb_thread_set_current(th);
1198
	    thread_debug("rb_thread_schedule/switch done\n");
1013 1199
	}
1014
	native_mutex_lock(&th->vm->global_vm_lock);
1015 1200

  
1016
	rb_thread_set_current(th);
1017
	thread_debug("rb_thread_schedule/switch done\n");
1201
	if (sched_depth){
1202
	    if (ticks_til_rotate) {
1203
		--ticks_til_rotate;
1204
	    } else {
1205
		ticks_til_rotate=TICKS_PER_ROTATION;
1206
		rb_pqueue_rotate(&th->vm->ready_to_run_list);
1207
	    }
1208
	}
1018 1209

  
1019
        if (!sched_depth && UNLIKELY(GET_THREAD()->interrupt_flag)) {
1020
            rb_threadptr_execute_interrupts_rec(GET_THREAD(), sched_depth+1);
1021
        }
1210
	if (!sched_depth && UNLIKELY(GET_THREAD()->interrupt_flag)) {
1211
	    rb_threadptr_execute_interrupts_rec(GET_THREAD(), sched_depth+1);
1212
	}
1022 1213
    }
1023 1214
}
1024 1215

  
......
1033 1224
static inline void
1034 1225
blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1035 1226
{
1036
    native_mutex_lock(&th->vm->global_vm_lock);
1227
    GVL_TAKE(th);
1037 1228
    rb_thread_set_current(th);
1038 1229
    thread_debug("leave blocking region (%p)\n", (void *)th);
1039 1230
    remove_signal_thread_list(th);
......
1247 1438
    return Qnil;
1248 1439
}
1249 1440

  
1250
/*
1441
/* check the current thread for 'interrupts', (asynchronous events sent by other
1442
 * threads or the system) and handle them if present. Here are the types of 
1443
 * 'interrupt':
1444
 *   a signal
1445
 *   an exception sent asynchonously (via Thread#raise)
1446
 *   c-level finalizers which are run as a result of garbage collection
1447
 *   the thread's time slice has expired so it must give up time to other threads
1251 1448
 *
1449
 * this method and rb_thread_schedule_rec are mutually recursive; however,
1450
 * the sched_depth counter prevents re-entry into the time slice expiry logic.
1451
 * (so this method should never be recursed into more than twice, and never
1452
 * more than once in the time slice expiry logic.)
1252 1453
 */
1253 1454

  
1254 1455
static void
......
1299 1500
            sched_depth++;
1300 1501
	    EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
1301 1502

  
1302
	    if (th->slice > 0) {
1303
		th->slice--;
1304
	    }
1305
	    else {
1306
	      reschedule:
1307
		rb_thread_schedule_rec(sched_depth+1);
1308
		if (th->slice < 0) {
1309
		    th->slice++;
1310
		    goto reschedule;
1311
		}
1312
		else {
1313
		    th->slice = th->priority;
1314
		}
1315
	    }
1503
	    rb_thread_schedule_rec(sched_depth+1);
1316 1504
	}
1317 1505
    }
1318 1506
}
......
1331 1519

  
1332 1520
/*****************************************************/
1333 1521

  
1522

  
1523
/*just an alias for rb_threadptr_interrupt, appearently... so why is it needed?*/
1334 1524
static void
1335 1525
rb_threadptr_ready(rb_thread_t *th)
1336 1526
{
......
2175 2365
 *  will run more frequently than lower-priority threads (but lower-priority
2176 2366
 *  threads can also run).
2177 2367
 *
2178
 *  This is just hint for Ruby thread scheduler.  It may be ignored on some
2179
 *  platform.
2180
 *
2181 2368
 *     count1 = count2 = 0
2182 2369
 *     a = Thread.new do
2183 2370
 *           loop { count1 += 1 }
......
2214 2401
	priority = RUBY_THREAD_PRIORITY_MIN;
2215 2402
    }
2216 2403
    th->priority = priority;
2217
    th->slice = priority;
2218 2404
#endif
2219 2405
    return INT2NUM(th->priority);
2220 2406
}
......
2748 2934
    vm->main_thread = th;
2749 2935

  
2750 2936
    native_mutex_reinitialize_atfork(&th->vm->global_vm_lock);
2937
    rb_pqueue_flush(&vm->ready_to_run_list);
2751 2938
    st_foreach(vm->living_threads, atfork, (st_data_t)th);
2752 2939
    st_clear(vm->living_threads);
2753 2940
    st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
......
4244 4431
	    rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock;
4245 4432
	    native_mutex_initialize(lp);
4246 4433
	    native_mutex_lock(lp);
4434
	    rb_pqueue_initialize(&GET_THREAD()->vm->ready_to_run_list);
4247 4435
	    native_mutex_initialize(&GET_THREAD()->interrupt_lock);
4248 4436
	}
4249 4437
    }
thread_pthread.c
163 163
    return pthread_setspecific(ruby_native_thread_key, th) == 0;
164 164
}
165 165

  
166
/*called once to initialize the main thread*/
166 167
static void
167 168
Init_native_thread(void)
168 169
{
......
502 503
#endif
503 504
	CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
504 505

  
506
	pthread_cond_init(&th->native_thread_data.sleep_cond, 0);
507

  
505 508
	err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
506
	thread_debug("create: %p (%d)", (void *)th, err);
509
	thread_debug("create: %p (%d)\n", (void *)th, err);
507 510
	CHECK_ERR(pthread_attr_destroy(&attr));
508 511

  
509
	if (!err) {
510
	    pthread_cond_init(&th->native_thread_data.sleep_cond, 0);
512
	if (err) {
513
	    pthread_cond_destroy(&th->native_thread_data.sleep_cond);
511 514
	}
512 515
    }
513 516
    return err;
......
584 587

  
585 588
#define PER_NANO 1000000000
586 589

  
590
/*go into a 'light sleep', while waiting for the GVL
591
  to become available. To be called by ready threads
592
  that are waiting to run. 
593
*/
594
static void
595
rb_doze(rb_thread_t *th)
596
{
597
    int r;
598

  
599
    pthread_mutex_lock(&th->interrupt_lock);
600

  
601
    thread_debug("doze: pthread_cond_wait start\n");
602
    r = pthread_cond_wait(&th->native_thread_data.sleep_cond,
603
			  &th->interrupt_lock);
604
    thread_debug("doze: pthread_cond_wait end\n");
605
    if (r) rb_bug_errno("pthread_cond_wait", r);
606

  
607
    pthread_mutex_unlock(&th->interrupt_lock);
608
}
609

  
610
static void 
611
rb_undoze(rb_thread_t *th)
612
{
613
    pthread_cond_signal(&th->native_thread_data.sleep_cond);
614
}
615

  
587 616
static void
588 617
native_sleep(rb_thread_t *th, struct timeval *tv)
589 618
{
thread_win32.c
44 44
    return TlsSetValue(ruby_native_thread_key, th);
45 45
}
46 46

  
47
/*called once to initialize the main thread*/
47 48
static void
48 49
Init_native_thread(void)
49 50
{
......
103 104
    thread_debug("  w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
104 105
		 events, count, timeout, th);
105 106
    if (th && (intr = th->native_thread_data.interrupt_event)) {
106
	native_mutex_lock(&th->vm->global_vm_lock);
107
	GVL_TAKE(th);
107 108
	if (intr == th->native_thread_data.interrupt_event) {
108 109
	    w32_reset_event(intr);
109 110
	    if (RUBY_VM_INTERRUPTED(th)) {
......
116 117
	    targets[count++] = intr;
117 118
	    thread_debug("  * handle: %p (count: %d, intr)\n", intr, count);
118 119
	}
119
	native_mutex_unlock(&th->vm->global_vm_lock);
120
	GVL_GIVE(th);
120 121
    }
121 122

  
122 123
    thread_debug("  WaitForMultipleObjects start (count: %d)\n", count);
......
210 211
    return ret;
211 212
}
212 213

  
214
/*go into a 'light sleep', while waiting for the GVL
215
  to become available. To be called by ready threads
216
  that are waiting to run.
217
*/
218
static void
219
rb_doze(rb_thread_t *th)
220
{
221
    DWORD ret;
222

  
223
    thread_debug("doze start\n");
224
    ret=WaitForSingleObject(th->interrupt_event, INFINITE);
225
    thread_debug("doze done (%lu)\n", ret);
226
    if (WAIT_OBJECT_0 != ret) w32_error("WaitForSingleObject in doze");
227

  
228
}
229

  
230
static void
231
rb_undoze(rb_thread_t *th)
232
{
233
    w32_set_event(th->native_thread_data.interrupt_event);
234
}
235

  
213 236
static void
214 237
native_sleep(rb_thread_t *th, struct timeval *tv)
215 238
{
......
238 261
	    thread_debug("native_sleep start (%lu)\n", msec);
239 262
	    ret = w32_wait_events(0, 0, msec, th);
240 263
	    thread_debug("native_sleep done (%lu)\n", ret);
264
            /*should check for error and rb_bug if there was one here*/
241 265
	}
242 266

  
243 267
	native_mutex_lock(&th->interrupt_lock);
vm.c
51 51
void vm_analysis_register(int reg, int isset);
52 52
void vm_analysis_insn(int insn);
53 53

  
54
extern void rb_pqueue_destroy(pqueue_t *pqueue);
55

  
56

  
54 57
void
55 58
rb_vm_change_state(void)
56 59
{
......
1532 1535
	}
1533 1536
	rb_thread_lock_unlock(&vm->global_vm_lock);
1534 1537
	rb_thread_lock_destroy(&vm->global_vm_lock);
1538
	rb_pqueue_destroy(&vm->ready_to_run_list);
1535 1539
	ruby_xfree(vm);
1536 1540
	ruby_current_vm = 0;
1537 1541
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
vm_core.h
49 49
#include <setjmp.h>
50 50
#include <signal.h>
51 51

  
52
#ifndef USE_NATIVE_THREAD_PRIORITY
53
#define USE_NATIVE_THREAD_PRIORITY 0
54
#define RUBY_THREAD_PRIORITY_MAX 15
55
#define RUBY_THREAD_PRIORITY_MIN -16
56
#define RUBY_NUM_PRIORITIES (1+RUBY_THREAD_PRIORITY_MAX-RUBY_THREAD_PRIORITY_MIN)
57
#endif
58

  
52 59
#ifndef NSIG
53 60
# define NSIG (_SIGMAX + 1)      /* For QNX */
54 61
#endif
......
270 277
void rb_objspace_free(struct rb_objspace *);
271 278
#endif
272 279

  
280
struct rb_thread_struct;
281
typedef struct priority_queue { 
282
    /*elements in queues are circularly linked lists of rb_thread_t,
283
      and queues[i] points to the _tail_ of the queue. in this way,
284
      both the head and tail of the queue are easily accessible (O(1))
285
      but only one word is required to hold a pointer to the queue.
286
    */
287
    struct rb_thread_struct *queues[RUBY_NUM_PRIORITIES]; 
288
    /*queues[0]==highest prio, queues[RUBY_NUM_PRIORITIES-1]==lowest prio*/
289

  
290
    /*mask holds a index of which elements in queues are nonempty.
291
      if queues[i]!=NULL, then mask&(1<<i) is set.
292
    */
293
    unsigned mask; /*must be at least RUBY_NUM_PRIORITIES bits*/
294
    unsigned next_promote_index;  /*makes this into a fair priority queue*/
295
    rb_thread_lock_t lock;
296
} pqueue_t;
297

  
273 298
typedef struct rb_vm_struct {
274 299
    VALUE self;
275 300

  
276 301
    rb_thread_lock_t global_vm_lock;
302
    pqueue_t ready_to_run_list;
277 303

  
278 304
    struct rb_thread_struct *main_thread;
279 305
    struct rb_thread_struct *running_thread;
......
410 436
    rb_thread_id_t thread_id;
411 437
    enum rb_thread_status status;
412 438
    int priority;
413
    int slice;
414 439

  
415 440
    native_thread_data_t native_thread_data;
416 441
    void *blocking_region_buffer;
......
477 502
    /* misc */
478 503
    int method_missing_reason;
479 504
    int abort_on_exception;
505

  
506
    struct rb_thread_struct *next;
507

  
480 508
#ifdef USE_SIGALTSTACK
481 509
    void *altstack;
482 510
#endif