Feature #3176 ยป setpriority_wont_work.diff
eval.c | ||
---|---|---|
36 | 36 | |
37 | 37 |
void rb_clear_trace_func(void); |
38 | 38 |
void rb_thread_stop_timer_thread(void); |
39 |
extern void rb_threadptr_interrupt(rb_thread_t *th); |
|
39 | 40 | |
40 | 41 |
void rb_call_inits(void); |
41 | 42 |
void Init_heap(void); |
thread.c | ||
---|---|---|
93 | 93 | |
94 | 94 |
static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region); |
95 | 95 | |
96 |
#if !USE_NATIVE_THREAD_PRIORITY |
|
96 | 97 |
static void rb_pqueue_enqueue(pqueue_t *pqueue, rb_thread_t *th, unsigned priority); |
97 | 98 |
static rb_thread_t *rb_pqueue_dequeue(pqueue_t *pqueue); |
98 | 99 |
static rb_thread_t *rb_pqueue_dequeue_starting_at(pqueue_t *pqueue, unsigned start_from, unsigned *found_at); |
100 |
#endif |
|
99 | 101 | |
100 | 102 |
#define RB_GC_SAVE_MACHINE_CONTEXT(th) \ |
101 | 103 |
do { \ |
... | ... | |
103 | 105 |
SET_MACHINE_STACK_END(&(th)->machine_stack_end); \ |
104 | 106 |
} while (0) |
105 | 107 | |
108 |
#if USE_NATIVE_THREAD_PRIORITY |
|
109 | ||
110 |
#define GVL_TAKE(th) (native_mutex_lock(&(th)->vm->global_vm_lock)) |
|
111 | ||
112 |
#define GVL_GIVE(th) (native_mutex_unlock(&(th)->vm->global_vm_lock)) |
|
113 | ||
114 |
#else |
|
115 | ||
106 | 116 |
#define GVL_TAKE(th) \ |
107 | 117 |
while (0!=native_mutex_trylock(&(th)->vm->global_vm_lock)) { \ |
108 | 118 |
thread_debug("waiting for gvl\n"); \ |
... | ... | |
123 | 133 |
if (th2) rb_undoze(th2); \ |
124 | 134 |
} while(0) |
125 | 135 | |
136 |
#endif |
|
137 | ||
126 | 138 |
#define GVL_UNLOCK_BEGIN() do { \ |
127 | 139 |
rb_thread_t *_th_stored = GET_THREAD(); \ |
128 | 140 |
RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \ |
... | ... | |
281 | 293 |
native_mutex_destroy(lock); |
282 | 294 |
} |
283 | 295 | |
296 |
#if !USE_NATIVE_THREAD_PRIORITY |
|
284 | 297 |
static void |
285 | 298 |
rb_pqueue_flush(pqueue_t *pqueue) |
286 | 299 |
{ |
... | ... | |
411 | 424 |
} |
412 | 425 |
if (++pqueue->next_promote_index>=RUBY_NUM_PRIORITIES) pqueue->next_promote_index=0; |
413 | 426 |
} |
427 |
#endif /*USE_NATIVE_THREAD_PRIORITY*/ |
|
414 | 428 | |
415 | 429 |
static void |
416 | 430 |
set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg, |
... | ... | |
1159 | 1173 |
rb_thread_wait_for(rb_time_timeval(INT2FIX(sec))); |
1160 | 1174 |
} |
1161 | 1175 | |
1176 |
#if USE_NATIVE_THREAD_PRIORITY |
|
1177 |
#define rb_there_are_equal_or_higher_priority_threads(th) 0 |
|
1178 |
#else |
|
1162 | 1179 |
static int |
1163 | 1180 |
rb_there_are_equal_or_higher_priority_threads(rb_thread_t *th) |
1164 | 1181 |
{ |
... | ... | |
1168 | 1185 | |
1169 | 1186 |
return(highest_waiting>=th->priority); |
1170 | 1187 |
} |
1188 |
#endif |
|
1171 | 1189 | |
1172 | 1190 |
static void rb_threadptr_execute_interrupts_rec(rb_thread_t *, int); |
1173 | 1191 | |
... | ... | |
1192 | 1210 |
thread_debug("rb_thread_schedule/switch done\n"); |
1193 | 1211 |
} |
1194 | 1212 | |
1213 |
#if !USE_NATIVE_THREAD_PRIORITY |
|
1195 | 1214 |
if (sched_depth){ |
1196 | 1215 |
if (ticks_til_rotate) { |
1197 | 1216 |
--ticks_til_rotate; |
... | ... | |
1200 | 1219 |
rb_pqueue_rotate(&th->vm->ready_to_run_list); |
1201 | 1220 |
} |
1202 | 1221 |
} |
1222 |
#endif |
|
1203 | 1223 | |
1204 | 1224 |
if (!sched_depth && UNLIKELY(GET_THREAD()->interrupt_flag)) { |
1205 | 1225 |
rb_threadptr_execute_interrupts_rec(GET_THREAD(), sched_depth+1); |
... | ... | |
2383 | 2403 | |
2384 | 2404 |
rb_secure(4); |
2385 | 2405 | |
2386 |
#if USE_NATIVE_THREAD_PRIORITY |
|
2387 |
th->priority = NUM2INT(prio); |
|
2388 |
native_thread_apply_priority(th); |
|
2389 |
#else |
|
2390 | 2406 |
priority = NUM2INT(prio); |
2391 | 2407 |
if (priority > RUBY_THREAD_PRIORITY_MAX) { |
2392 | 2408 |
priority = RUBY_THREAD_PRIORITY_MAX; |
... | ... | |
2395 | 2411 |
priority = RUBY_THREAD_PRIORITY_MIN; |
2396 | 2412 |
} |
2397 | 2413 |
th->priority = priority; |
2414 |
#if USE_NATIVE_THREAD_PRIORITY |
|
2415 |
native_thread_apply_priority(th); |
|
2398 | 2416 |
#endif |
2399 | 2417 |
return INT2NUM(th->priority); |
2400 | 2418 |
} |
... | ... | |
2928 | 2946 |
vm->main_thread = th; |
2929 | 2947 | |
2930 | 2948 |
native_mutex_reinitialize_atfork(&th->vm->global_vm_lock); |
2949 |
#if !USE_NATIVE_THREAD_PRIORITY |
|
2931 | 2950 |
rb_pqueue_flush(&vm->ready_to_run_list); |
2951 |
#endif |
|
2932 | 2952 |
st_foreach(vm->living_threads, atfork, (st_data_t)th); |
2933 | 2953 |
st_clear(vm->living_threads); |
2934 | 2954 |
st_insert(vm->living_threads, thval, (st_data_t)th->thread_id); |
... | ... | |
4425 | 4445 |
rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock; |
4426 | 4446 |
native_mutex_initialize(lp); |
4427 | 4447 |
native_mutex_lock(lp); |
4448 |
#if !USE_NATIVE_THREAD_PRIORITY |
|
4428 | 4449 |
rb_pqueue_initialize(&GET_THREAD()->vm->ready_to_run_list); |
4450 |
#endif |
|
4429 | 4451 |
native_mutex_initialize(&GET_THREAD()->interrupt_lock); |
4430 | 4452 |
} |
4431 | 4453 |
} |
thread_pthread.c | ||
---|---|---|
531 | 531 |
static void |
532 | 532 |
native_thread_apply_priority(rb_thread_t *th) |
533 | 533 |
{ |
534 |
#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0) |
|
534 |
#ifdef linux |
|
535 |
setpriority(PRIO_PROCESS,th->thread_id,-th->priority); |
|
536 |
#else |
|
535 | 537 |
struct sched_param sp; |
536 | 538 |
int policy; |
537 | 539 |
int priority = 0 - th->priority; |
... | ... | |
549 | 551 | |
550 | 552 |
sp.sched_priority = priority; |
551 | 553 |
pthread_setschedparam(th->thread_id, policy, &sp); |
552 |
#else |
|
553 |
/* not touched */ |
|
554 | 554 |
#endif |
555 | 555 |
} |
556 | 556 |
vm.c | ||
---|---|---|
51 | 51 |
void vm_analysis_register(int reg, int isset); |
52 | 52 |
void vm_analysis_insn(int insn); |
53 | 53 | |
54 |
#if !USE_NATIVE_THREAD_PRIORITY |
|
54 | 55 |
extern void rb_pqueue_destroy(pqueue_t *pqueue); |
55 | ||
56 |
#endif |
|
56 | 57 | |
57 | 58 |
void |
58 | 59 |
rb_vm_change_state(void) |
... | ... | |
1535 | 1536 |
} |
1536 | 1537 |
rb_thread_lock_unlock(&vm->global_vm_lock); |
1537 | 1538 |
rb_thread_lock_destroy(&vm->global_vm_lock); |
1539 |
#if !USE_NATIVE_THREAD_PRIORITY |
|
1538 | 1540 |
rb_pqueue_destroy(&vm->ready_to_run_list); |
1541 |
#endif |
|
1539 | 1542 |
ruby_xfree(vm); |
1540 | 1543 |
ruby_current_vm = 0; |
1541 | 1544 |
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE |
vm_core.h | ||
---|---|---|
49 | 49 |
#include <setjmp.h> |
50 | 50 |
#include <signal.h> |
51 | 51 | |
52 |
#ifdef linux |
|
53 |
#define USE_NATIVE_THREAD_PRIORITY 1 |
|
54 |
#define RUBY_THREAD_PRIORITY_MAX 20 |
|
55 |
#define RUBY_THREAD_PRIORITY_MIN -19 |
|
56 |
#endif |
|
57 | ||
52 | 58 |
#ifndef USE_NATIVE_THREAD_PRIORITY |
53 | 59 |
#define USE_NATIVE_THREAD_PRIORITY 0 |
54 | 60 |
#define RUBY_THREAD_PRIORITY_MAX 15 |
55 | 61 |
#define RUBY_THREAD_PRIORITY_MIN -16 |
56 |
#define RUBY_NUM_PRIORITIES (1+RUBY_THREAD_PRIORITY_MAX-RUBY_THREAD_PRIORITY_MIN) |
|
57 | 62 |
#endif |
58 | 63 | |
64 |
#define RUBY_NUM_PRIORITIES (1+RUBY_THREAD_PRIORITY_MAX-RUBY_THREAD_PRIORITY_MIN) |
|
65 | ||
59 | 66 |
#ifndef NSIG |
60 | 67 |
# define NSIG (_SIGMAX + 1) /* For QNX */ |
61 | 68 |
#endif |
... | ... | |
277 | 284 |
void rb_objspace_free(struct rb_objspace *); |
278 | 285 |
#endif |
279 | 286 | |
287 |
#if !USE_NATIVE_THREAD_PRIORITY |
|
280 | 288 |
struct rb_thread_struct; |
281 | 289 |
typedef struct priority_queue { |
282 | 290 |
/*elements in queues are circularly linked lists of rb_thread_t, |
... | ... | |
294 | 302 |
unsigned next_promote_index; /*makes this into a fair priority queue*/ |
295 | 303 |
rb_thread_lock_t lock; |
296 | 304 |
} pqueue_t; |
305 |
#endif |
|
297 | 306 | |
298 | 307 |
typedef struct rb_vm_struct { |
299 | 308 |
VALUE self; |
300 | 309 | |
301 | 310 |
rb_thread_lock_t global_vm_lock; |
311 |
#if !USE_NATIVE_THREAD_PRIORITY |
|
302 | 312 |
pqueue_t ready_to_run_list; |
313 |
#endif |
|
303 | 314 | |
304 | 315 |
struct rb_thread_struct *main_thread; |
305 | 316 |
struct rb_thread_struct *running_thread; |
... | ... | |
503 | 514 |
int method_missing_reason; |
504 | 515 |
int abort_on_exception; |
505 | 516 | |
517 |
#if !USE_NATIVE_THREAD_PRIORITY |
|
506 | 518 |
struct rb_thread_struct *next; |
519 |
#endif |
|
507 | 520 | |
508 | 521 |
#ifdef USE_SIGALTSTACK |
509 | 522 |
void *altstack; |