Project

General

Profile

Feature #14859 ยป 0001-implement-Timeout-in-VM.patch

normalperson (Eric Wong), 06/21/2018 12:21 AM

View differences:

benchmark/bm_timeout_mt_nested.rb
1
require 'timeout'
2
total = 10000
3
nthr = 8
4
nr = total / nthr
5
def nest_timeout(n, nthr)
6
  n -= 1
7
  if n > 0
8
    Timeout.timeout(n) { nest_timeout(n, nthr) }
9
  else
10
    nthr.times { Thread.pass }
11
  end
12
end
13
nthr.times.map do
14
  Thread.new do
15
    nr.times { nest_timeout(10, nthr) }
16
  end
17
end.map(&:join)
benchmark/bm_timeout_mt_same.rb
1
require 'timeout'
2
total = 100000
3
nthr = 8
4
nr = total / nthr
5
nthr.times.map do
6
  Thread.new do
7
    nr.times { Timeout.timeout(5) { Thread.pass } }
8
  end
9
end.map(&:join)
benchmark/bm_timeout_mt_ugly.rb
1
# unrealistic: this is the worst-case of insertion-sort-based timeout
2
require 'timeout'
3
total = 100000
4
nthr = 8
5
nr = total / nthr
6
nthr.times.map do
7
  Thread.new do
8
    i = nr
9
    while (i -= 1) >= 0
10
      Timeout.timeout(i + 1) { nthr.times { Thread.pass } }
11
    end
12
  end
13
end.map(&:join)
benchmark/bm_timeout_nested.rb
1
require 'timeout'
2
def nest_timeout(n)
3
  n -= 1
4
  if n > 0
5
    Timeout.timeout(n) { nest_timeout(n) }
6
  end
7
end
8
100000.times do
9
  nest_timeout(10)
10
end
benchmark/bm_timeout_same.rb
1
require 'timeout'
2
100000.times { Timeout.timeout(5) {} }
benchmark/bm_timeout_zero.rb
1
require 'timeout'
2
100000.times { Timeout.timeout(0) {} }
common.mk
130 130
		symbol.$(OBJEXT) \
131 131
		thread.$(OBJEXT) \
132 132
		time.$(OBJEXT) \
133
		timeout.$(OBJEXT) \
133 134
		transcode.$(OBJEXT) \
134 135
		util.$(OBJEXT) \
135 136
		variable.$(OBJEXT) \
......
2812 2813
time.$(OBJEXT): {$(VPATH)}subst.h
2813 2814
time.$(OBJEXT): {$(VPATH)}time.c
2814 2815
time.$(OBJEXT): {$(VPATH)}timev.h
2816
timeout.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
2817
timeout.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
2818
timeout.$(OBJEXT): $(CCAN_DIR)/list/list.h
2819
timeout.$(OBJEXT): $(CCAN_DIR)/str/str.h
2820
timeout.$(OBJEXT): $(hdrdir)/ruby/ruby.h
2821
timeout.$(OBJEXT): $(top_srcdir)/include/ruby.h
2822
timeout.$(OBJEXT): {$(VPATH)}config.h
2823
timeout.$(OBJEXT): {$(VPATH)}defines.h
2824
timeout.$(OBJEXT): {$(VPATH)}id.h
2825
timeout.$(OBJEXT): {$(VPATH)}intern.h
2826
timeout.$(OBJEXT): {$(VPATH)}internal.h
2827
timeout.$(OBJEXT): {$(VPATH)}method.h
2828
timeout.$(OBJEXT): {$(VPATH)}missing.h
2829
timeout.$(OBJEXT): {$(VPATH)}node.h
2830
timeout.$(OBJEXT): {$(VPATH)}ruby_assert.h
2831
timeout.$(OBJEXT): {$(VPATH)}ruby_atomic.h
2832
timeout.$(OBJEXT): {$(VPATH)}st.h
2833
timeout.$(OBJEXT): {$(VPATH)}subst.h
2834
timeout.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
2835
timeout.$(OBJEXT): {$(VPATH)}thread_native.h
2836
timeout.$(OBJEXT): {$(VPATH)}timeout.c
2837
timeout.$(OBJEXT): {$(VPATH)}vm_core.h
2838
timeout.$(OBJEXT): {$(VPATH)}vm_opts.h
2815 2839
transcode.$(OBJEXT): $(hdrdir)/ruby/ruby.h
2816 2840
transcode.$(OBJEXT): $(top_srcdir)/include/ruby.h
2817 2841
transcode.$(OBJEXT): {$(VPATH)}config.h
inits.c
62 62
    CALL(version);
63 63
    CALL(vm_trace);
64 64
    CALL(ast);
65
    CALL(timeout);
65 66
}
66 67
#undef CALL
internal.h
1838 1838
/* time.c */
1839 1839
struct timeval rb_time_timeval(VALUE);
1840 1840

  
1841
/* timeout.c */
1842
typedef struct rb_vm_struct rb_vm_t;
1843
typedef struct rb_execution_context_struct rb_execution_context_t;
1844
struct timespec *rb_timeout_sleep_interval(rb_vm_t *, struct timespec *);
1845
void rb_timeout_expire(const rb_execution_context_t *);
1846

  
1841 1847
/* thread.c */
1842 1848
#define COVERAGE_INDEX_LINES    0
1843 1849
#define COVERAGE_INDEX_BRANCHES 1
......
1859 1865
void rb_mutex_allow_trap(VALUE self, int val);
1860 1866
VALUE rb_uninterruptible(VALUE (*b_proc)(ANYARGS), VALUE data);
1861 1867
VALUE rb_mutex_owned_p(VALUE self);
1868
void rb_getclockofday(struct timespec *);
1862 1869

  
1863 1870
/* thread_pthread.c, thread_win32.c */
1864 1871
int rb_divert_reserved_fd(int fd);
test/test_timeout.rb
1 1
# frozen_string_literal: false
2 2
require 'test/unit'
3 3
require 'timeout'
4
begin
5
  require 'io/wait'
6
rescue LoadError
7
end
4 8

  
5 9
class TestTimeout < Test::Unit::TestCase
6 10
  def test_queue
......
107 111
    }
108 112
    assert(ok, bug11344)
109 113
  end
114

  
115
  def test_io
116
    t = 0.001
117
    IO.pipe do |r, w|
118
      assert_raise(Timeout::Error) { Timeout.timeout(t) { r.read } }
119
      if r.respond_to?(:wait)
120
        assert_raise(Timeout::Error) { Timeout.timeout(t) { r.wait } }
121
        assert_raise(Timeout::Error) { Timeout.timeout(t) { r.wait(9) } }
122
      end
123
      rset = [r, r.dup]
124
      assert_raise(Timeout::Error) do
125
        Timeout.timeout(t) { IO.select(rset, nil, nil, 9) }
126
      end
127
      assert_raise(Timeout::Error) { Timeout.timeout(t) { IO.select(rset) } }
128
      rset.each(&:close)
129
    end
130
  end
131

  
132
  def test_thread_join
133
    th = Thread.new { sleep }
134
    assert_raise(Timeout::Error) { Timeout.timeout(0.001) { th.join } }
135
  ensure
136
    th.kill
137
    th.join
138
  end
139

  
140
  def test_mutex_lock
141
    m = Mutex.new
142
    m.lock
143
    th = Thread.new { m.synchronize { :ok} }
144
    assert_raise(Timeout::Error) { Timeout.timeout(0.001) { th.join } }
145
    m.unlock
146
    assert_equal :ok, th.value
147
  end
148

  
149
  def test_yield_and_return_value
150
    r = Timeout.timeout(0) do |sec|
151
      assert_equal 0, sec
152
      sec
153
    end
154
    assert_equal 0, r
155
    t = 123
156
    r = Timeout.timeout(t) do |sec|
157
      assert_same t, sec
158
      sec
159
    end
160
    assert_same r, t
161
    r = Timeout.timeout(t, RuntimeError) do |sec|
162
      assert_same t, sec
163
      sec
164
    end
165
    assert_same r, t
166
  end
167

  
168
  def test_timeout_thread
169
    in_thread { sleep }
170
  end
171

  
172
  def test_timeout_loop
173
    in_thread { loop {} }
174
  end
175

  
176
  def test_timeout_io_read
177
    IO.pipe { |r, w| in_thread { r.read } }
178
  end
179

  
180
  def test_timeout_mutex
181
    m = Mutex.new
182
    m.synchronize { in_thread { m.synchronize {} } }
183
    in_thread { m.synchronize { m.sleep } }
184
  end
185

  
186
  def in_thread(&blk)
187
    th = Thread.new do
188
      begin
189
        Timeout.timeout(0.001) { blk.call }
190
      rescue => e
191
        e
192
      end
193
    end
194
    assert_same th, th.join(0.3)
195
    assert_kind_of Timeout::Error, th.value
196
  end
110 197
end
thread.c
474 474
}
475 475

  
476 476
static void
477
rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
477
rb_threadptr_interrupt_set(rb_thread_t *th, rb_atomic_t flag)
478 478
{
479 479
    rb_native_mutex_lock(&th->interrupt_lock);
480
    if (trap) {
481
	RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
482
    }
483
    else {
484
	RUBY_VM_SET_INTERRUPT(th->ec);
485
    }
480
    ATOMIC_OR(th->ec->interrupt_flag, flag);
486 481
    if (th->unblock.func != NULL) {
487
	(th->unblock.func)(th->unblock.arg);
488
    }
489
    else {
490
	/* none */
482
        (th->unblock.func)(th->unblock.arg);
491 483
    }
492 484
    rb_native_mutex_unlock(&th->interrupt_lock);
493 485
}
......
495 487
void
496 488
rb_threadptr_interrupt(rb_thread_t *th)
497 489
{
498
    rb_threadptr_interrupt_common(th, 0);
490
    rb_threadptr_interrupt_set(th, PENDING_INTERRUPT_MASK);
499 491
}
500 492

  
501 493
static void
502 494
threadptr_trap_interrupt(rb_thread_t *th)
503 495
{
504
    rb_threadptr_interrupt_common(th, 1);
496
    rb_threadptr_interrupt_set(th, TRAP_INTERRUPT_MASK);
505 497
}
506 498

  
507 499
static void
......
1189 1181
    rb_timespec_now(ts);
1190 1182
}
1191 1183

  
1184
void
1185
rb_getclockofday(struct timespec *ts)
1186
{
1187
    getclockofday(ts);
1188
}
1189

  
1192 1190
static void
1193 1191
timespec_add(struct timespec *dst, const struct timespec *ts)
1194 1192
{
......
2143 2141
	int timer_interrupt;
2144 2142
	int pending_interrupt;
2145 2143
	int trap_interrupt;
2144
	int timeout_interrupt;
2146 2145

  
2147 2146
	timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2148 2147
	pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2149 2148
	postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2150 2149
	trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2150
	timeout_interrupt = interrupt & TIMEOUT_INTERRUPT_MASK;
2151 2151

  
2152 2152
	if (postponed_job_interrupt) {
2153 2153
	    rb_postponed_job_flush(th->vm);
......
2189 2189
	    }
2190 2190
	}
2191 2191

  
2192
	if (timeout_interrupt) {
2193
	    rb_timeout_expire(th->ec);
2194
	}
2195

  
2192 2196
	if (timer_interrupt) {
2193 2197
	    uint32_t limits_us = TIME_QUANTUM_USEC;
2194 2198

  
......
4162 4166
    }
4163 4167
    rb_native_mutex_unlock(&vm->thread_destruct_lock);
4164 4168

  
4169
    if (vm->timer_thread_timeout >= 0) {
4170
        rb_threadptr_interrupt_set(vm->main_thread, TIMEOUT_INTERRUPT_MASK);
4171
    }
4165 4172
    /* check signal */
4166 4173
    rb_threadptr_check_signal(vm->main_thread);
4167 4174

  
4175
    vm->timer_thread_timeout = ATOMIC_EXCHANGE(vm->next_timeout, -1);
4168 4176
#if 0
4169 4177
    /* prove profiler */
4170 4178
    if (vm->prove_profile.enable) {
......
5063 5071
    if (vm_living_thread_num(vm) > vm->sleeper) return;
5064 5072
    if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5065 5073
    if (patrol_thread && patrol_thread != GET_THREAD()) return;
5074
    if (rb_timeout_sleep_interval(vm, 0)) return;
5066 5075

  
5067 5076
    list_for_each(&vm->living_threads, th, vmlt_node) {
5068 5077
	if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
thread_pthread.c
45 45
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
46 46
void rb_native_cond_initialize(rb_nativethread_cond_t *cond);
47 47
void rb_native_cond_destroy(rb_nativethread_cond_t *cond);
48
static void rb_thread_wakeup_timer_thread_low(void);
48
void rb_thread_wakeup_timer_thread_low(void);
49 49
static struct {
50 50
    pthread_t id;
51 51
    int created;
......
1246 1246
    }
1247 1247
}
1248 1248

  
1249
static void
1249
void
1250 1250
rb_thread_wakeup_timer_thread_low(void)
1251 1251
{
1252 1252
    if (timer_thread_pipe.owner_process == getpid()) {
......
1363 1363
 * @pre the calling context is in the timer thread.
1364 1364
 */
1365 1365
static inline void
1366
timer_thread_sleep(rb_global_vm_lock_t* gvl)
1366
timer_thread_sleep(rb_vm_t *vm)
1367 1367
{
1368 1368
    int result;
1369 1369
    int need_polling;
......
1376 1376

  
1377 1377
    need_polling = !ubf_threads_empty();
1378 1378

  
1379
    if (gvl->waiting > 0 || need_polling) {
1379
    if (vm->gvl.waiting > 0 || need_polling) {
1380 1380
	/* polling (TIME_QUANTUM_USEC usec) */
1381 1381
	result = poll(pollfds, 1, TIME_QUANTUM_USEC/1000);
1382 1382
    }
1383 1383
    else {
1384
	/* wait (infinite) */
1385
	result = poll(pollfds, numberof(pollfds), -1);
1384
	/* wait (infinite, or whatever timeout.c sets) */
1385
	result = poll(pollfds, numberof(pollfds), vm->timer_thread_timeout);
1386 1386
    }
1387 1387

  
1388 1388
    if (result == 0) {
......
1409 1409
#else /* USE_SLEEPY_TIMER_THREAD */
1410 1410
# define PER_NANO 1000000000
1411 1411
void rb_thread_wakeup_timer_thread(void) {}
1412
static void rb_thread_wakeup_timer_thread_low(void) {}
1412
void rb_thread_wakeup_timer_thread_low(void) {}
1413 1413

  
1414 1414
static rb_nativethread_lock_t timer_thread_lock;
1415 1415
static rb_nativethread_cond_t timer_thread_cond;
1416 1416

  
1417 1417
static inline void
1418
timer_thread_sleep(rb_global_vm_lock_t* unused)
1418
timer_thread_sleep(rb_vm_t *unused)
1419 1419
{
1420 1420
    struct timespec ts;
1421 1421
    ts.tv_sec = 0;
......
1479 1479
static void *
1480 1480
thread_timer(void *p)
1481 1481
{
1482
    rb_global_vm_lock_t *gvl = (rb_global_vm_lock_t *)p;
1482
    rb_vm_t *vm = p;
1483 1483

  
1484 1484
    if (TT_DEBUG) WRITE_CONST(2, "start timer thread\n");
1485 1485

  
......
1501 1501
	if (TT_DEBUG) WRITE_CONST(2, "tick\n");
1502 1502

  
1503 1503
        /* wait */
1504
	timer_thread_sleep(gvl);
1504
	timer_thread_sleep(vm);
1505 1505
    }
1506 1506
#if USE_SLEEPY_TIMER_THREAD
1507 1507
    CLOSE_INVALIDATE(normal[0]);
......
1573 1573
	if (timer_thread.created) {
1574 1574
	    rb_bug("rb_thread_create_timer_thread: Timer thread was already created\n");
1575 1575
	}
1576
	err = pthread_create(&timer_thread.id, &attr, thread_timer, &vm->gvl);
1576
	err = pthread_create(&timer_thread.id, &attr, thread_timer, vm);
1577 1577
	pthread_attr_destroy(&attr);
1578 1578

  
1579 1579
	if (err == EINVAL) {
......
1584 1584
	     * default stack size is enough for them:
1585 1585
	     */
1586 1586
	    stack_size = 0;
1587
	    err = pthread_create(&timer_thread.id, NULL, thread_timer, &vm->gvl);
1587
	    err = pthread_create(&timer_thread.id, NULL, thread_timer, vm);
1588 1588
	}
1589 1589
	if (err != 0) {
1590 1590
	    rb_warn("pthread_create failed for timer: %s, scheduling broken",
thread_win32.c
696 696
    /* do nothing */
697 697
}
698 698

  
699
void
700
rb_thread_wakeup_timer_thread_low(void)
701
{
702
    /* do nothing */
703
}
704

  
699 705
static void
700 706
rb_thread_create_timer_thread(void)
701 707
{
timeout.c
1
#include "internal.h"
2
#include "vm_core.h"
3

  
4
/* match ccan/timer/timer.h, which we may support in the future: */
5
struct timer {
6
    struct list_node list;
7
    uint64_t time; /* usec */
8
};
9

  
10
struct timeout {
11
    rb_execution_context_t *ec;
12
    VALUE sec;
13
    VALUE klass;
14
    VALUE message;
15
    struct timer t;
16
};
17
static VALUE eTimeoutError, mTimeout, eUncaughtThrow;
18
static ID id_thread;
19

  
20
static uint64_t
21
timespec2usec(const struct timespec *ts)
22
{
23
    return (uint64_t)ts->tv_sec * 1000000 + (uint64_t)ts->tv_nsec / 1000;
24
}
25

  
26
static void
27
timers_ll_add(struct list_head *timers, struct timer *t,
28
              uint64_t rel_usec, uint64_t now_usec)
29
{
30
    struct timer *i = 0;
31

  
32
    t->time = rel_usec + now_usec;
33

  
34
    /*
35
     * search backwards: assume typical projects have multiple objects
36
     * sharing the same timeout values, so new timers will expire later
37
     * than existing timers
38
     */
39
    list_for_each_rev(timers, i, list) {
40
        if (t->time >= i->time) {
41
            list_add_after(timers, &i->list, &t->list);
42
            return;
43
        }
44
    }
45
    list_add(timers, &t->list);
46
}
47

  
48
static struct timer *
49
timers_ll_expire(struct list_head *timers, uint64_t now_usec)
50
{
51
    struct timer *t = list_top(timers, struct timer, list);
52

  
53
    if (t && now_usec >= t->time) {
54
        list_del_init(&t->list);
55
        return t;
56
    }
57
    return 0;
58
}
59

  
60
static struct timer *
61
timers_ll_earliest(const struct list_head *timers)
62
{
63
    return list_top(timers, struct timer, list);
64
}
65

  
66
static VALUE
67
timeout_yield(VALUE tag, VALUE sec)
68
{
69
    return rb_yield(sec);
70
}
71

  
72
static VALUE
73
timeout_run(VALUE x)
74
{
75
    struct timeout *a = (struct timeout *)x;
76

  
77
    if (RTEST(a->klass)) {
78
      return rb_yield(a->sec);
79
    }
80

  
81
    /* for Timeout::Error#exception to throw */
82
    a->message = rb_exc_new_str(eTimeoutError, a->message);
83

  
84
    /* hide for rb_gc_force_recycle */
85
    RBASIC_CLEAR_CLASS(a->message);
86
    x = rb_catch_obj(a->message, timeout_yield, a->sec);
87
    if (x == a->message) {
88
        rb_attr_delete(x, id_thread);
89
        rb_exc_raise(x);
90
    }
91
    /* common case, no timeout, so exc is still hidden and safe to recycle */
92
    VM_ASSERT(!RBASIC_CLASS(a->message) && RB_TYPE_P(a->message, T_OBJECT));
93
    if (FL_TEST(a->message, FL_EXIVAR)) {
94
        rb_free_generic_ivar(a->message);
95
	FL_UNSET(a->message, FL_EXIVAR);
96
    }
97
    rb_gc_force_recycle(a->message);
98
    return x;
99
}
100

  
101
static VALUE
102
timeout_ensure(VALUE x)
103
{
104
    struct timeout *a = (struct timeout *)x;
105
    list_del_init(&a->t.list); /* inlined timer_del */
106

  
107
    return Qfalse;
108
}
109

  
110
static struct timeout *
111
rb_timers_expire_one(rb_vm_t *vm, uint64_t now_usec)
112
{
113
    struct timer *t = timers_ll_expire(&vm->timers, now_usec);
114

  
115
    return t ? container_of(t, struct timeout, t) : 0;
116
}
117

  
118
static void
119
arm_timer(rb_vm_t *vm, uint64_t rel_usec)
120
{
121
    int msec = rel_usec / 1000;
122

  
123
    ATOMIC_EXCHANGE(vm->next_timeout, (rb_atomic_t)msec);
124

  
125
    /* _low makes a difference in benchmark/bm_timeout_mt_nested.rb */
126
    rb_thread_wakeup_timer_thread_low();
127
}
128

  
129
struct expire_args {
130
    uint64_t now_usec;
131
    rb_thread_t *current_th;
132
    enum rb_thread_status prev_status;
133
};
134

  
135
static VALUE
136
do_expire(VALUE x)
137
{
138
    struct expire_args *ea = (struct expire_args *)x;
139
    rb_vm_t *vm = ea->current_th->vm;
140
    struct timeout *a;
141

  
142
    while ((a = rb_timers_expire_one(vm, ea->now_usec))) {
143
        rb_thread_t *target_th = rb_ec_thread_ptr(a->ec);
144
        VALUE exc;
145

  
146
        if (RTEST(a->klass)) {
147
            exc = rb_exc_new_str(a->klass, a->message);
148
        }
149
        else { /* default, pre-made Timeout::Error */
150
            exc = a->message;
151
            RBASIC_SET_CLASS_RAW(exc, eTimeoutError); /* reveal */
152
            /* for Timeout::Error#exception to call `throw' */
153
            rb_ivar_set(exc, id_thread, target_th->self);
154
        }
155
        if (ea->current_th == target_th) {
156
            rb_threadptr_pending_interrupt_enque(target_th, exc);
157
            rb_threadptr_interrupt(target_th);
158
        }
159
        else {
160
            rb_funcall(target_th->self, rb_intern("raise"), 1, exc);
161
        }
162
    }
163
    return Qfalse;
164
}
165

  
166
static VALUE
167
expire_ensure(VALUE p)
168
{
169
    struct expire_args *ea = (struct expire_args *)p;
170
    rb_vm_t *vm = ea->current_th->vm;
171
    struct timer *t = timers_ll_earliest(&vm->timers);
172
    if (t) {
173
        arm_timer(vm, t->time > ea->now_usec ? t->time - ea->now_usec : 0);
174
    }
175
    ea->current_th->status = ea->prev_status;
176
    return Qfalse;
177
}
178

  
179
void
180
rb_timeout_expire(const rb_execution_context_t *ec)
181
{
182
    struct expire_args ea;
183
    struct timespec ts;
184

  
185
    rb_getclockofday(&ts);
186
    ea.now_usec = timespec2usec(&ts);
187
    ea.current_th = rb_ec_thread_ptr(ec);
188
    ea.prev_status = ea.current_th->status;
189
    ea.current_th->status = THREAD_RUNNABLE;
190
    rb_ensure(do_expire, (VALUE)&ea, expire_ensure, (VALUE)&ea);
191
}
192

  
193
struct timespec *
194
rb_timeout_sleep_interval(rb_vm_t *vm, struct timespec *ts)
195
{
196
    struct timer *t = timers_ll_earliest(&vm->timers);
197

  
198
    if (t && !ts) {
199
        return (struct timespec *)-1;
200
    }
201
    if (t) {
202
        uint64_t now_usec;
203
        rb_getclockofday(ts);
204
        now_usec = timespec2usec(ts);
205
        if (t->time >= now_usec) {
206
            uint64_t rel_usec = t->time - now_usec;
207
            ts->tv_sec = rel_usec / 1000000;
208
            ts->tv_nsec = rel_usec % 1000000 * 1000;
209
        }
210
        else {
211
            ts->tv_sec = 0;
212
            ts->tv_nsec = 0;
213
        }
214
        return ts;
215
    }
216

  
217
    return 0;
218
}
219

  
220
static void
221
timeout_add(struct timeout *a)
222
{
223
    rb_vm_t *vm = rb_ec_vm_ptr(a->ec);
224
    struct timer *cur = timers_ll_earliest(&vm->timers);
225
    uint64_t now_usec, rel_usec;
226
    struct timeval tv = rb_time_interval(a->sec);
227
    struct timespec ts;
228

  
229
    ts.tv_sec = tv.tv_sec;
230
    ts.tv_nsec = tv.tv_usec * 1000;
231
    rel_usec = timespec2usec(&ts);
232
    rb_getclockofday(&ts);
233
    now_usec = timespec2usec(&ts);
234
    timers_ll_add(&vm->timers, &a->t, rel_usec, now_usec);
235
    if (!cur || timers_ll_earliest(&vm->timers) == &a->t) {
236
        arm_timer(vm, rel_usec);
237
    }
238
}
239

  
240
static VALUE
241
s_timeout(int argc, VALUE *argv, VALUE mod)
242
{
243
    struct timeout a;
244

  
245
    rb_scan_args(argc, argv, "12", &a.sec, &a.klass, &a.message);
246
    if (NIL_P(a.sec) || rb_equal(a.sec, INT2FIX(0))) {
247
        return rb_yield(a.sec);
248
    }
249
    if (!RTEST(a.message)) {
250
        a.message = rb_fstring_cstr("execution expired");
251
    }
252
    a.ec = GET_EC();
253
    timeout_add(&a);
254
    return rb_ensure(timeout_run, (VALUE)&a, timeout_ensure, (VALUE)&a);
255
}
256

  
257
static VALUE
258
begin_throw(VALUE self)
259
{
260
    rb_throw_obj(self, self);
261
    return self;
262
}
263

  
264
static VALUE
265
rescue_throw(VALUE ignore, VALUE err)
266
{
267
    return Qnil;
268
}
269

  
270
/*
271
 * We don't want to generate a backtrace like the version
272
 * in timeout.rb does.  We also want to raise the same
273
 * exception object so s_timeout (in core) can match
274
 * against it without relying on an extra proc for:
275
 *
276
 *      proc { |exception| return yield(sec) }
277
 */
278
static VALUE
279
timeout_error_exception(int argc, VALUE *argv, VALUE self)
280
{
281
    if (rb_attr_get(self, id_thread) == rb_thread_current()) {
282
        rb_rescue2(begin_throw, self, rescue_throw, Qfalse, eUncaughtThrow, 0);
283
    }
284
    return self;
285
}
286

  
287
static VALUE
288
timeout_compat(int argc, VALUE *argv, VALUE mod)
289
{
290
    VALUE w[2];
291
    w[0] = rb_funcall(mod, rb_intern("__method__"), 0);
292
    w[0] = rb_sprintf("Object#%"PRIsVALUE
293
                      " is deprecated, use Timeout.timeout instead.", w[0]);
294
    w[1] = rb_hash_new();
295
    rb_hash_aset(w[1], ID2SYM(rb_intern("uplevel")), INT2FIX(1));
296
    rb_funcallv(mod, rb_intern("warn"), 2, w);
297
    return s_timeout(argc, argv, mTimeout);
298
}
299

  
300
void
301
Init_timeout(void)
302
{
303
#undef rb_intern
304
    mTimeout = rb_define_module("Timeout");
305
    eTimeoutError = rb_define_class_under(mTimeout, "Error", rb_eRuntimeError);
306
    eUncaughtThrow = rb_const_get(rb_cObject, rb_intern("UncaughtThrowError"));
307
    rb_define_method(mTimeout, "timeout", s_timeout, -1);
308
    rb_define_singleton_method(mTimeout, "timeout", s_timeout, -1);
309
    rb_define_method(eTimeoutError, "exception", timeout_error_exception, -1);
310
    id_thread = rb_intern("@thread");
311

  
312
    /* backwards compatibility */
313
    rb_define_method(rb_mKernel, "timeout", timeout_compat, -1);
314
    rb_const_set(rb_cObject, rb_intern("TimeoutError"), eTimeoutError);
315
    rb_deprecate_constant(rb_cObject, "TimeoutError");
316

  
317
    rb_provide("timeout.rb");
318
}
vm.c
2323 2323
{
2324 2324
    MEMZERO(vm, rb_vm_t, 1);
2325 2325
    rb_vm_living_threads_init(vm);
2326
    list_head_init(&vm->timers);
2326 2327
    vm->thread_report_on_exception = 1;
2327 2328
    vm->src_encoding_index = -1;
2329
    vm->next_timeout = (rb_atomic_t)-1;
2330
    vm->timer_thread_timeout = -1;
2328 2331

  
2329 2332
    vm_default_params_setup(vm);
2330 2333
}
vm_core.h
545 545

  
546 546
    rb_global_vm_lock_t gvl;
547 547
    rb_nativethread_lock_t    thread_destruct_lock;
548
    struct list_head timers; /* TODO: consider moving to rb_thread_t */
549
    rb_atomic_t next_timeout;
550
    int timer_thread_timeout;
548 551

  
549 552
    struct rb_thread_struct *main_thread;
550 553
    struct rb_thread_struct *running_thread;
......
1556 1559
void rb_thread_stop_timer_thread(void);
1557 1560
void rb_thread_reset_timer_thread(void);
1558 1561
void rb_thread_wakeup_timer_thread(void);
1562
void rb_thread_wakeup_timer_thread_low(void);
1559 1563

  
1560 1564
static inline void
1561 1565
rb_vm_living_threads_init(rb_vm_t *vm)
......
1691 1695
    TIMER_INTERRUPT_MASK         = 0x01,
1692 1696
    PENDING_INTERRUPT_MASK       = 0x02,
1693 1697
    POSTPONED_JOB_INTERRUPT_MASK = 0x04,
1694
    TRAP_INTERRUPT_MASK	         = 0x08
1698
    TRAP_INTERRUPT_MASK	         = 0x08,
1699
    TIMEOUT_INTERRUPT_MASK       = 0x10
1695 1700
};
1696 1701

  
1697 1702
#define RUBY_VM_SET_TIMER_INTERRUPT(ec)		ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
1698 1703
#define RUBY_VM_SET_INTERRUPT(ec)		ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
1699 1704
#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec)	ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
1700 1705
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)		ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
1701
#define RUBY_VM_INTERRUPTED(ec)			((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
1702
						 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
1706
#define RUBY_VM_INTERRUPTED(ec)                 ((ec)->interrupt_flag & \
1707
                                                  ~(ec)->interrupt_mask & \
1708
                                                  (PENDING_INTERRUPT_MASK|\
1709
                                                   TRAP_INTERRUPT_MASK|\
1710
                                                   TIMEOUT_INTERRUPT_MASK))
1703 1711
#define RUBY_VM_INTERRUPTED_ANY(ec)		((ec)->interrupt_flag & ~(ec)->interrupt_mask)
1704 1712

  
1705 1713
VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
1706
-