Project

General

Profile

Feature #3176 ยป thread-priorities-try2.diff

first try at a patch to implement thread priorities - coatl (caleb clausen), 05/12/2010 07:35 AM

View differences:

bignum.c
3455 3455
    return Qtrue;
3456 3456
}
3457 3457

  
3458
extern int ffs(int);
3459

  
3460
/*
3461
 *  call-seq:
3462
 *     big.ffs  ->  integer
3463
 *
3464
 *  Returns the index (starting at 1) of the least significant bit which
3465
 *  is set in <i>big</i>. Returns 0 if <i>big</i> is 0.
3466
 *
3467
 *     0xFFFF_FFFF_FFFF_FFFF.ffs        #=> 1
3468
 *     0x8000_0000_0000_0000.ffs        #=> 64
3469
 *     -0x8000_0000_0000_0000.ffs       #=> 64
3470
 */
3471

  
3472
static VALUE
3473
rb_big_ffs(VALUE x)
3474
{
3475
    int i;
3476
    int len=RBIGNUM_LEN(x);
3477
    int result;
3478
    BDIGIT *bdigits=BDIGITS(x);
3479

  
3480
    for(i=0;i<len;i++){
3481
	result=ffs(bdigits[i]);
3482
	if (result) 
3483
	    return UINT2NUM(result + i*sizeof(BDIGIT)*CHAR_BIT);
3484
    }
3485
    return INT2NUM(0);
3486
}
3487

  
3458 3488
/*
3459 3489
 *  Bignum objects hold integers outside the range of
3460 3490
 *  Fixnum. Bignum objects are created
......
3516 3546
    rb_define_method(rb_cBignum, "odd?", rb_big_odd_p, 0);
3517 3547
    rb_define_method(rb_cBignum, "even?", rb_big_even_p, 0);
3518 3548

  
3549
    rb_define_method(rb_cBignum, "ffs", rb_big_ffs, 0);
3550

  
3519 3551
    power_cache_init();
3520 3552
}
configure.in
1140 1140
	      setsid telldir seekdir fchmod cosh sinh tanh log2 round\
1141 1141
	      setuid setgid daemon select_large_fdset setenv unsetenv\
1142 1142
              mktime timegm gmtime_r clock_gettime gettimeofday\
1143
              pread sendfile shutdown sigaltstack)
1143
              pread sendfile shutdown sigaltstack ffs)
1144 1144

  
1145 1145
AC_CACHE_CHECK(for unsetenv returns a value, rb_cv_unsetenv_return_value,
1146 1146
  [AC_TRY_COMPILE([
eval.c
36 36

  
37 37
void rb_clear_trace_func(void);
38 38
void rb_thread_stop_timer_thread(void);
39
extern void rb_threadptr_interrupt(rb_thread_t *th);
39 40

  
40 41
void rb_call_inits(void);
41 42
void Init_heap(void);
......
118 119
    ruby_finalize_1();
119 120
}
120 121

  
121
void rb_thread_stop_timer_thread(void);
122

  
123 122
int
124 123
ruby_cleanup(volatile int ex)
125 124
{
numeric.c
3198 3198
    return Qtrue;
3199 3199
}
3200 3200

  
3201
#ifndef HAVE_FFS
3202
static const char ffs_table[] = [0, 1, 2, 1, 3, 1, 2, 1, 4, 1, 2, 1, 3, 1, 2, 1];
3203
#define FFS_BITS 4
3204
#define FFS_MASK ((1<<FFS_BITS)-1)
3205
int
3206
ffs(int i)
3207
{
3208
    int j,count=0;
3209
    if (i==0) return 0;
3210
    while(1){
3211
        j=i&FFS_MASK;
3212
        if (j) return ffs_table[j] + count;
3213
        i>>=FFS_BITS;
3214
        count+=FFS_BITS;
3215
    }
3216
}
3217
#endif
3218

  
3219
/*
3220
 *  call-seq:
3221
 *     fix.ffs  ->  integer
3222
 *
3223
 *  Returns the index (starting at 1) of the least significant bit which 
3224
 *  is set in <i>fix</i>. Returns 0 if <i>fix</i> is 0.
3225
 *
3226
 *     1.ffs        #=> 1
3227
 *     2.ffs        #=> 2
3228
 *     3.ffs        #=> 1
3229
 *     4.ffs        #=> 3
3230
 *     0.ffs        #=> 0
3231
 *     -1.ffs       #=> 1
3232
 *     -2.ffs       #=> 2
3233
 */
3234

  
3235
static VALUE
3236
fix_ffs(VALUE num)
3237
{
3238
    return INT2FIX(ffs(FIX2INT(num)));
3239
}
3240

  
3201 3241
/*
3202 3242
 *  Document-class: ZeroDivisionError
3203 3243
 *
......
3351 3391
    rb_define_method(rb_cFixnum, "even?", fix_even_p, 0);
3352 3392
    rb_define_method(rb_cFixnum, "succ", fix_succ, 0);
3353 3393

  
3394
    rb_define_method(rb_cFixnum, "ffs", fix_ffs, 0);
3395

  
3354 3396
    rb_cFloat  = rb_define_class("Float", rb_cNumeric);
3355 3397

  
3356 3398
    rb_undef_alloc_func(rb_cFloat);
signal.c
571 571
{
572 572
    int i, sig = 0;
573 573

  
574
    /*this function could be made much faster by use of a bitmask and ffs() */
574 575
    for (i=1; i<RUBY_NSIG; i++) {
575 576
	if (signal_buff.cnt[i] > 0) {
576 577
	    rb_disable_interrupt();
thread.c
47 47
#include "eval_intern.h"
48 48
#include "gc.h"
49 49

  
50
#ifndef USE_NATIVE_THREAD_PRIORITY
51
#define USE_NATIVE_THREAD_PRIORITY 0
52
#define RUBY_THREAD_PRIORITY_MAX 3
53
#define RUBY_THREAD_PRIORITY_MIN -3
54
#endif
55

  
56 50
#ifndef THREAD_DEBUG
57 51
#define THREAD_DEBUG 0
58 52
#endif
......
99 93

  
100 94
static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
101 95

  
96
static void pqueue_enqueue(pqueue_t *pqueue, rb_thread_t *th, unsigned priority);
97
static rb_thread_t *pqueue_dequeue(pqueue_t *pqueue);
98
static rb_thread_t *pqueue_dequeue_starting_at(pqueue_t *pqueue, unsigned start_from, unsigned *found_at);
99
void rb_threadptr_interrupt(rb_thread_t *th);
100

  
102 101
#define RB_GC_SAVE_MACHINE_CONTEXT(th) \
103 102
  do { \
104 103
    rb_gc_save_machine_context(th); \
105 104
    SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
106 105
  } while (0)
107 106

  
107
#define GVL_TAKE(th) \
108
  while (0!=native_mutex_trylock(&(th)->vm->global_vm_lock)) { \
109
    thread_debug("waiting for gvl\n"); \
110
    /*might be good to check RUBY_VM_INTERRUPTED here*/ \
111
    pqueue_enqueue(&(th)->vm->ready_to_run_list, \
112
		   (th), \
113
		   RUBY_THREAD_PRIORITY_MAX-(th)->priority \
114
    ); \
115
    doze((th)); \
116
  }
117

  
118
#define GVL_GIVE(th) \
119
   do { \
120
     rb_thread_t *th2; \
121
     native_mutex_unlock(&(th)->vm->global_vm_lock); \
122
     th2=pqueue_dequeue(&(th)->vm->ready_to_run_list); \
123
     thread_debug("giving up gvl to %p\n", th2); \
124
     if (th2) undoze(th2); \
125
   } while(0)
126

  
108 127
#define GVL_UNLOCK_BEGIN() do { \
109 128
  rb_thread_t *_th_stored = GET_THREAD(); \
110 129
  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
111
  native_mutex_unlock(&_th_stored->vm->global_vm_lock)
130
  GVL_GIVE(_th_stored)
112 131

  
113 132
#define GVL_UNLOCK_END() \
114
  native_mutex_lock(&_th_stored->vm->global_vm_lock); \
133
  GVL_TAKE(_th_stored); \
115 134
  rb_thread_set_current(_th_stored); \
116 135
} while(0)
117 136

  
......
130 149
    (th)->status = THREAD_STOPPED; \
131 150
    thread_debug("enter blocking region (%p)\n", (void *)(th)); \
132 151
    RB_GC_SAVE_MACHINE_CONTEXT(th); \
133
    native_mutex_unlock(&(th)->vm->global_vm_lock); \
152
    GVL_GIVE(th); \
134 153
  } while (0)
135 154

  
136 155
#define BLOCKING_REGION(exec, ubf, ubfarg) do { \
......
264 283
}
265 284

  
266 285
static void
286
pqueue_flush(pqueue_t *pqueue)
287
{
288
    memset(pqueue,0,sizeof(pqueue));
289
    native_mutex_initialize(&pqueue->lock);
290
}
291

  
292
static void
293
pqueue_initialize(pqueue_t *pqueue)
294
{
295
    pqueue_flush(pqueue);
296
    if (sizeof(pqueue->mask)*CHAR_BIT<RUBY_NUM_PRIORITIES) 
297
	rb_fatal("pqueue_t.mask smaller than %d bits!", RUBY_NUM_PRIORITIES);
298
    if (!getenv("THREAD_PRIOS_WARN")) {
299
        rb_warn("need benchmarks");
300
        rb_warn("need to test ffs");
301
        rb_warn("need to test thread priorities more");
302
        ruby_setenv("THREAD_PRIOS_WARN","1");
303
    }
304
}
305

  
306
void
307
pqueue_destroy(pqueue_t *pqueue)
308
{
309
    native_mutex_destroy(&pqueue->lock);
310
    memset(pqueue,0,sizeof(pqueue));
311
}
312

  
313
static void
314
pqueue_enqueue(pqueue_t *pqueue, rb_thread_t *th, unsigned priority)
315
{
316
    rb_thread_t *queue;
317

  
318
    if (priority>=RUBY_NUM_PRIORITIES) priority=RUBY_NUM_PRIORITIES-1;
319
    /*th->next should be NULL here*/
320

  
321
    native_mutex_lock(&pqueue->lock);
322
    pqueue->mask |= 1<<priority;
323
    queue=pqueue->queues[priority];
324
    if (queue==NULL) {
325
	th->next=th;
326
    } else {
327
	th->next=queue->next;
328
	queue->next=th;
329
    }
330
    pqueue->queues[priority]=th;
331
    native_mutex_unlock(&pqueue->lock);
332
}
333

  
334
static rb_thread_t *
335
pqueue_dequeue(pqueue_t *pqueue)
336
{
337
    int i;
338
    rb_thread_t *result;
339
    unsigned mask;
340

  
341
    native_mutex_lock(&pqueue->lock);
342
    mask = pqueue->mask;
343

  
344
    i=ffs(mask)-1;
345
    if (i==-1) {
346
	result=NULL;
347
    } else {
348
	rb_thread_t *queue=pqueue->queues[i];
349
	/*queue should be non-NULL here*/
350
	result=queue->next;
351
	if (result==queue) { /*last item in this queue?*/
352
	    pqueue->queues[i]=NULL; 
353
	    pqueue->mask &= ~(1<<i);
354
	} else {
355
	    queue->next=result->next;
356
	}
357
	result->next=NULL;
358
    }
359
    native_mutex_unlock(&pqueue->lock);
360
    return result;
361
}
362

  
363
static rb_thread_t *
364
pqueue_dequeue_starting_at(pqueue_t *pqueue, unsigned start_from, unsigned *found_at)
365
{
366
    int i;
367
    rb_thread_t *result;
368
    unsigned mask;
369

  
370
    mask=(1<<start_from)-1;
371
    mask=~mask;
372

  
373
    native_mutex_lock(&pqueue->lock);
374
    mask &= pqueue->mask;
375

  
376
    i=ffs(mask)-1;
377
    if (i==-1) {
378
	result=NULL;
379
	*found_at=-1;
380
    } else {
381
	rb_thread_t *queue=pqueue->queues[i];
382
	/*queue should be non-NULL here*/
383
	*found_at=i;
384
	result=queue->next;
385
	if (result==queue) { /*last item in this queue?*/
386
	    pqueue->queues[i]=NULL; 
387
	    pqueue->mask &= ~(1<<i);
388
	} else {
389
	    queue->next=result->next;
390
	}
391
	result->next=NULL;
392
    }
393
    native_mutex_unlock(&pqueue->lock);
394
    return result;
395
}
396

  
397
/*returns the priority of the highest priority item in the queue.
398
  returns -1 if the queue is empty.
399
  note: this returns a queue-relative priority (0..31, with 0==highest prio),
400
        rather than a ruby-level priority (-16..15, with 15==highest prio).
401
*/
402
static int
403
pqueue_highest_priority(pqueue_t *pqueue)
404
{
405
    return ffs(pqueue->mask)-1;
406
}
407

  
408
static void
409
pqueue_rotate(pqueue_t *pqueue)
410
{
411
    unsigned i=pqueue->next_promote_index;
412
    if (i){
413
	rb_thread_t *promoting;
414
	unsigned found_at;
415
	promoting=pqueue_dequeue_starting_at(pqueue,i,&found_at);
416
	if (!promoting) promoting=pqueue_dequeue_starting_at(pqueue,0,&found_at);
417
	if (promoting) pqueue_enqueue(pqueue,promoting,found_at-1);
418
    }
419
    if (++pqueue->next_promote_index>=RUBY_NUM_PRIORITIES) pqueue->next_promote_index=0;
420
}
421

  
422
static void
267 423
set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
268 424
		     struct rb_unblock_callback *old)
269 425
{
......
290 446
    native_mutex_unlock(&th->interrupt_lock);
291 447
}
292 448

  
449
/*notify a thread that it should stop waiting and call the thread's 
450
  unblocking function. see rb_thread_blocking_region for a 
451
  description of blocking regions and unblocking functions. Typically, 
452
  th->unblock.func is set to one of these:
453
    ubf_handle (win32)
454
    ubf_pthread_cond_signal (pthreads)
455
    ubf_select
456
    lock_interrupt
457
    rb_big_stop
458
  and th->unblock.arg is set to th. However, they might be different if
459
  an extention used rb_thread_blocking_region or rb_thread_call_without_gvl
460
  to define a custom blocking region. 
461
*/
462

  
293 463
void
294 464
rb_threadptr_interrupt(rb_thread_t *th)
295 465
{
......
426 596
#endif
427 597
    thread_debug("thread start: %p\n", (void *)th);
428 598

  
429
    native_mutex_lock(&th->vm->global_vm_lock);
599
    GVL_TAKE(th);
430 600
    {
431 601
	thread_debug("thread start (get lock): %p\n", (void *)th);
432 602
	rb_thread_set_current(th);
......
515 685
    thread_unlock_all_locking_mutexes(th);
516 686
    if (th != main_th) rb_check_deadlock(th->vm);
517 687
    if (th->vm->main_thread == th) {
688
	/*ending main thread; interpreter will exit*/
518 689
	ruby_cleanup(state);
519 690
    }
520 691
    else {
521 692
	thread_cleanup_func(th);
522
	native_mutex_unlock(&th->vm->global_vm_lock);
693
	GVL_GIVE(th);
523 694
    }
524 695

  
525 696
    return 0;
......
990 1161
    rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
991 1162
}
992 1163

  
1164
static int
1165
there_are_equal_or_higher_priority_threads(rb_thread_t *th)
1166
{
1167
    int highest_waiting=pqueue_highest_priority(&th->vm->ready_to_run_list);
1168
    if (highest_waiting==-1) return 0;
1169
    highest_waiting=RUBY_THREAD_PRIORITY_MAX-highest_waiting;
1170

  
1171
    return(highest_waiting>=th->priority);
1172
}
1173

  
993 1174
static void rb_threadptr_execute_interrupts_rec(rb_thread_t *, int);
994 1175

  
995 1176
static void
996 1177
rb_thread_schedule_rec(int sched_depth)
997 1178
{
1179
    static int ticks_til_rotate=0;
1180

  
998 1181
    thread_debug("rb_thread_schedule\n");
999 1182
    if (!rb_thread_alone()) {
1000 1183
	rb_thread_t *th = GET_THREAD();
1184
	if (!sched_depth || there_are_equal_or_higher_priority_threads(th)) {
1185
	    thread_debug("rb_thread_schedule/switch start\n");
1001 1186

  
1002
	thread_debug("rb_thread_schedule/switch start\n");
1187
	    RB_GC_SAVE_MACHINE_CONTEXT(th);
1188
	    GVL_GIVE(th);
1189
	    GVL_TAKE(th);
1003 1190

  
1004
	RB_GC_SAVE_MACHINE_CONTEXT(th);
1005
	native_mutex_unlock(&th->vm->global_vm_lock);
1006
	{
1007
	    native_thread_yield();
1191
	    rb_thread_set_current(th);
1192
	    thread_debug("rb_thread_schedule/switch done\n");
1008 1193
	}
1009
	native_mutex_lock(&th->vm->global_vm_lock);
1010 1194

  
1011
	rb_thread_set_current(th);
1012
	thread_debug("rb_thread_schedule/switch done\n");
1195
	if (sched_depth){
1196
	    if (ticks_til_rotate) {
1197
		--ticks_til_rotate;
1198
	    } else {
1199
		ticks_til_rotate=10;
1200
		pqueue_rotate(&th->vm->ready_to_run_list);
1201
	    }
1202
	}
1013 1203

  
1014
        if (!sched_depth && UNLIKELY(GET_THREAD()->interrupt_flag)) {
1015
            rb_threadptr_execute_interrupts_rec(GET_THREAD(), sched_depth+1);
1016
        }
1204
	if (!sched_depth && UNLIKELY(GET_THREAD()->interrupt_flag)) {
1205
	    rb_threadptr_execute_interrupts_rec(GET_THREAD(), sched_depth+1);
1206
	}
1017 1207
    }
1018 1208
}
1019 1209

  
......
1028 1218
static inline void
1029 1219
blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1030 1220
{
1031
    native_mutex_lock(&th->vm->global_vm_lock);
1221
    GVL_TAKE(th);
1032 1222
    rb_thread_set_current(th);
1033 1223
    thread_debug("leave blocking region (%p)\n", (void *)th);
1034 1224
    remove_signal_thread_list(th);
......
1242 1432
    return Qnil;
1243 1433
}
1244 1434

  
1245
/*
1435
/* check the current thread for 'interrupts', (asynchronous events sent by other
1436
 * threads or the system) and handle them if present. Here are the types of 
1437
 * 'interrupt':
1438
 *   a signal
1439
 *   an exception sent asynchonously (via Thread#raise)
1440
 *   c-level finalizers which are run as a result of garbage collection
1441
 *   the thread's time slice has expired so it must give up time to other threads
1246 1442
 *
1443
 * this method and rb_thread_schedule_rec are mutually recursive; however,
1444
 * the sched_depth counter prevents re-entry into the time slice expiry logic.
1445
 * (so this method should never be recursed into more than twice, and never
1446
 * more than once in the time slice expiry logic.)
1247 1447
 */
1248 1448

  
1249 1449
static void
......
1294 1494
            sched_depth++;
1295 1495
	    EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
1296 1496

  
1497
	    /*I think all this mucking with slice is unnecessary now*/
1297 1498
	    if (th->slice > 0) {
1298 1499
		th->slice--;
1299 1500
	    }
......
1326 1527

  
1327 1528
/*****************************************************/
1328 1529

  
1530

  
1531
/*just an alias for rb_threadptr_interrupt, appearently... so why is it needed?*/
1329 1532
static void
1330 1533
rb_threadptr_ready(rb_thread_t *th)
1331 1534
{
......
2170 2373
 *  will run more frequently than lower-priority threads (but lower-priority
2171 2374
 *  threads can also run).
2172 2375
 *
2173
 *  This is just hint for Ruby thread scheduler.  It may be ignored on some
2174
 *  platform.
2175
 *
2176 2376
 *     count1 = count2 = 0
2177 2377
 *     a = Thread.new do
2178 2378
 *           loop { count1 += 1 }
......
2743 2943
    vm->main_thread = th;
2744 2944

  
2745 2945
    native_mutex_reinitialize_atfork(&th->vm->global_vm_lock);
2946
    pqueue_flush(&vm->ready_to_run_list);
2746 2947
    st_foreach(vm->living_threads, atfork, (st_data_t)th);
2747 2948
    st_clear(vm->living_threads);
2748 2949
    st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
......
4239 4440
	    rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock;
4240 4441
	    native_mutex_initialize(lp);
4241 4442
	    native_mutex_lock(lp);
4443
	    pqueue_initialize(&GET_THREAD()->vm->ready_to_run_list);
4242 4444
	    native_mutex_initialize(&GET_THREAD()->interrupt_lock);
4243 4445
	}
4244 4446
    }
thread_pthread.c
163 163
    return pthread_setspecific(ruby_native_thread_key, th) == 0;
164 164
}
165 165

  
166
/*called once to initialize the main thread*/
166 167
static void
167 168
Init_native_thread(void)
168 169
{
......
501 502
#endif
502 503
	CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
503 504

  
505
	pthread_cond_init(&th->native_thread_data.sleep_cond, 0);
506

  
504 507
	err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
505
	thread_debug("create: %p (%d)", (void *)th, err);
508
	thread_debug("create: %p (%d)\n", (void *)th, err);
506 509
	CHECK_ERR(pthread_attr_destroy(&attr));
507 510

  
508
	if (!err) {
509
	    pthread_cond_init(&th->native_thread_data.sleep_cond, 0);
511
	if (err) {
512
	    pthread_cond_destroy(&th->native_thread_data.sleep_cond);
510 513
	}
511 514
    }
512 515
    return err;
......
583 586

  
584 587
#define PER_NANO 1000000000
585 588

  
589
/*go into a 'light sleep', while waiting for the GVL
590
  to become available. To be called by ready threads
591
  that are waiting to run. 
592
*/
593
static void
594
doze(rb_thread_t *th)
595
{
596
    int r;
597

  
598
    pthread_mutex_lock(&th->interrupt_lock);
599

  
600
    thread_debug("doze: pthread_cond_wait start\n");
601
    r = pthread_cond_wait(&th->native_thread_data.sleep_cond,
602
			  &th->interrupt_lock);
603
    thread_debug("doze: pthread_cond_wait end\n");
604
    if (r) rb_bug_errno("pthread_cond_wait", r);
605

  
606
    pthread_mutex_unlock(&th->interrupt_lock);
607
}
608

  
609
static void undoze(rb_thread_t *th)
610
{
611
    pthread_cond_signal(&th->native_thread_data.sleep_cond);
612
}
613

  
586 614
static void
587 615
native_sleep(rb_thread_t *th, struct timeval *tv)
588 616
{
thread_win32.c
44 44
    return TlsSetValue(ruby_native_thread_key, th);
45 45
}
46 46

  
47
/*called once to initialize the main thread*/
47 48
static void
48 49
Init_native_thread(void)
49 50
{
......
103 104
    thread_debug("  w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
104 105
		 events, count, timeout, th);
105 106
    if (th && (intr = th->native_thread_data.interrupt_event)) {
106
	native_mutex_lock(&th->vm->global_vm_lock);
107
	GVL_TAKE(th);
107 108
	if (intr == th->native_thread_data.interrupt_event) {
108 109
	    w32_reset_event(intr);
109 110
	    if (RUBY_VM_INTERRUPTED(th)) {
......
116 117
	    targets[count++] = intr;
117 118
	    thread_debug("  * handle: %p (count: %d, intr)\n", intr, count);
118 119
	}
119
	native_mutex_unlock(&th->vm->global_vm_lock);
120
	GVL_GIVE(th);
120 121
    }
121 122

  
122 123
    thread_debug("  WaitForMultipleObjects start (count: %d)\n", count);
......
210 211
    return ret;
211 212
}
212 213

  
214
/*go into a 'light sleep', while waiting for the GVL
215
  to become available. To be called by ready threads
216
  that are waiting to run.
217
*/
218
static void
219
doze(rb_thread_t *th)
220
{
221
    DWORD ret;
222

  
223
    thread_debug("doze start\n");
224
    ret=WaitForSingleObject(th->interrupt_event, INFINITE);
225
    thread_debug("doze done (%lu)\n", ret);
226
    if (WAIT_OBJECT_0 != ret) w32_error("WaitForSingleObject in doze");
227

  
228
}
229

  
230
static void
231
undoze(rb_thread_t *th)
232
{
233
    w32_set_event(th->native_thread_data.interrupt_event);
234
 }
235

  
213 236
static void
214 237
native_sleep(rb_thread_t *th, struct timeval *tv)
215 238
{
......
238 261
	    thread_debug("native_sleep start (%lu)\n", msec);
239 262
	    ret = w32_wait_events(0, 0, msec, th);
240 263
	    thread_debug("native_sleep done (%lu)\n", ret);
264
            /*should check for error and rb_bug if there was one here*/
241 265
	}
242 266

  
243 267
	native_mutex_lock(&th->interrupt_lock);
vm.c
51 51
void vm_analysis_register(int reg, int isset);
52 52
void vm_analysis_insn(int insn);
53 53

  
54
extern void pqueue_destroy(pqueue_t *pqueue);
55

  
56

  
54 57
void
55 58
rb_vm_change_state(void)
56 59
{
......
1532 1535
	}
1533 1536
	rb_thread_lock_unlock(&vm->global_vm_lock);
1534 1537
	rb_thread_lock_destroy(&vm->global_vm_lock);
1538
	pqueue_destroy(&vm->ready_to_run_list);
1535 1539
	ruby_xfree(vm);
1536 1540
	ruby_current_vm = 0;
1537 1541
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
vm_core.h
49 49
#include <setjmp.h>
50 50
#include <signal.h>
51 51

  
52
#ifndef USE_NATIVE_THREAD_PRIORITY
53
#define USE_NATIVE_THREAD_PRIORITY 0
54
#define RUBY_THREAD_PRIORITY_MAX 15
55
#define RUBY_THREAD_PRIORITY_MIN -16
56
#define RUBY_NUM_PRIORITIES (1+RUBY_THREAD_PRIORITY_MAX-RUBY_THREAD_PRIORITY_MIN)
57
#endif
58

  
52 59
#ifndef NSIG
53 60
# define NSIG (_SIGMAX + 1)      /* For QNX */
54 61
#endif
......
266 273
void rb_objspace_free(struct rb_objspace *);
267 274
#endif
268 275

  
276
struct rb_thread_struct;
277
typedef struct priority_queue { 
278
    /*elements in queues are circularly linked lists of rb_thread_t,
279
      and queues[i] points to the _tail_ of the queue. in this way,
280
      both the head and tail of the queue are easily accessible (O(1))
281
      but only one word is required to hold a pointer to the queue.
282
    */
283
    struct rb_thread_struct *queues[RUBY_NUM_PRIORITIES]; 
284
    /*queues[0]==highest prio, queues[RUBY_NUM_PRIORITIES-1]==lowest prio*/
285

  
286
    /*mask holds a index of which elements in queues are nonempty.
287
      if queues[i]!=NULL, then mask&(1<<i) is set.
288
    */
289
    unsigned mask; /*must be at least RUBY_NUM_PRIORITIES bits*/
290
    unsigned next_promote_index;  /*makes this into a fair priority queue*/
291
    rb_thread_lock_t lock;
292
} pqueue_t;
293

  
269 294
typedef struct rb_vm_struct {
270 295
    VALUE self;
271 296

  
272 297
    rb_thread_lock_t global_vm_lock;
298
    pqueue_t ready_to_run_list;
273 299

  
274 300
    struct rb_thread_struct *main_thread;
275 301
    struct rb_thread_struct *running_thread;
......
473 499
    /* misc */
474 500
    int method_missing_reason;
475 501
    int abort_on_exception;
502

  
503
    struct rb_thread_struct *next;
504

  
476 505
} rb_thread_t;
477 506

  
478 507
/* iseq.c */