Misc #15014 ยป 0001-thread.c-use-rb_hrtime_t-scalar-for-high-resolution-.patch
| common.mk | ||
|---|---|---|
|
thread.$(OBJEXT): {$(VPATH)}encoding.h
|
||
|
thread.$(OBJEXT): {$(VPATH)}eval_intern.h
|
||
|
thread.$(OBJEXT): {$(VPATH)}gc.h
|
||
|
thread.$(OBJEXT): {$(VPATH)}hrtime.h
|
||
|
thread.$(OBJEXT): {$(VPATH)}id.h
|
||
|
thread.$(OBJEXT): {$(VPATH)}intern.h
|
||
|
thread.$(OBJEXT): {$(VPATH)}internal.h
|
||
| hrtime.h | ||
|---|---|---|
|
#ifndef RB_HRTIME_H
|
||
|
#define RB_HRTIME_H
|
||
|
#include "ruby/ruby.h"
|
||
|
#include <time.h>
|
||
|
#if defined(HAVE_SYS_TIME_H)
|
||
|
# include <sys/time.h>
|
||
|
#endif
|
||
|
/*
|
||
|
* Hi-res monotonic clock. It is currently nsec resolution, which has over
|
||
|
* 500 years of range.
|
||
|
*
|
||
|
* TBD: Is nsec even necessary? usec resolution seems enough for userspace
|
||
|
* and it'll be suitable for use with devices lasting over 500,000 years
|
||
|
* (maybe some devices designed for long-term space travel)
|
||
|
*/
|
||
|
#define RB_HRTIME_PER_USEC ((rb_hrtime_t)1000)
|
||
|
#define RB_HRTIME_PER_MSEC (RB_HRTIME_PER_USEC * (rb_hrtime_t)1000)
|
||
|
#define RB_HRTIME_PER_SEC (RB_HRTIME_PER_MSEC * (rb_hrtime_t)1000)
|
||
|
#define RB_HRTIME_MAX UINT64_MAX
|
||
|
/*
|
||
|
* Lets try to support time travelers. Lets assume anybody with a time machine
|
||
|
* also has access to a modern gcc or clang with 128-bit int support
|
||
|
*/
|
||
|
#ifdef MY_RUBY_BUILD_MAY_TIME_TRAVEL
|
||
|
typedef int128_t rb_hrtime_t;
|
||
|
#else
|
||
|
typedef uint64_t rb_hrtime_t;
|
||
|
#endif
|
||
|
static inline rb_hrtime_t
|
||
|
rb_hrtime_mul(rb_hrtime_t a, rb_hrtime_t b)
|
||
|
{
|
||
|
rb_hrtime_t c;
|
||
|
#ifdef HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW
|
||
|
if (__builtin_mul_overflow(a, b, &c))
|
||
|
return RB_HRTIME_MAX;
|
||
|
#else
|
||
|
if (b != 0 && b > RB_HRTIME_MAX / b) /* overflow */
|
||
|
return RB_HRTIME_MAX;
|
||
|
c = a * b;
|
||
|
#endif
|
||
|
return c;
|
||
|
}
|
||
|
static inline rb_hrtime_t
|
||
|
rb_hrtime_add(rb_hrtime_t a, rb_hrtime_t b)
|
||
|
{
|
||
|
rb_hrtime_t c;
|
||
|
#ifdef HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW
|
||
|
if (__builtin_add_overflow(a, b, &c))
|
||
|
return RB_HRTIME_MAX;
|
||
|
#else
|
||
|
c = a + b;
|
||
|
if (c < a) /* overflow */
|
||
|
return RB_HRTIME_MAX;
|
||
|
#endif
|
||
|
return c;
|
||
|
}
|
||
|
static inline rb_hrtime_t
|
||
|
rb_timeval2hrtime(const struct timeval *tv)
|
||
|
{
|
||
|
rb_hrtime_t s = rb_hrtime_mul((rb_hrtime_t)tv->tv_sec, RB_HRTIME_PER_SEC);
|
||
|
rb_hrtime_t u = rb_hrtime_mul((rb_hrtime_t)tv->tv_usec, RB_HRTIME_PER_USEC);
|
||
|
return rb_hrtime_add(s, u);
|
||
|
}
|
||
|
static inline rb_hrtime_t
|
||
|
rb_timespec2hrtime(const struct timespec *ts)
|
||
|
{
|
||
|
rb_hrtime_t s = rb_hrtime_mul((rb_hrtime_t)ts->tv_sec, RB_HRTIME_PER_SEC);
|
||
|
return rb_hrtime_add(s, (rb_hrtime_t)ts->tv_nsec);
|
||
|
}
|
||
|
static inline rb_hrtime_t
|
||
|
rb_msec2hrtime(unsigned long msec)
|
||
|
{
|
||
|
return rb_hrtime_mul((rb_hrtime_t)msec, RB_HRTIME_PER_MSEC);
|
||
|
}
|
||
|
static inline rb_hrtime_t
|
||
|
rb_sec2hrtime(time_t sec)
|
||
|
{
|
||
|
if (sec <= 0) return 0;
|
||
|
return rb_hrtime_mul((rb_hrtime_t)sec, RB_HRTIME_PER_SEC);
|
||
|
}
|
||
|
static inline struct timespec *
|
||
|
rb_hrtime2timespec(struct timespec *ts, const rb_hrtime_t *hrt)
|
||
|
{
|
||
|
if (hrt) {
|
||
|
ts->tv_sec = *hrt / RB_HRTIME_PER_SEC;
|
||
|
ts->tv_nsec = (int32_t)(*hrt % RB_HRTIME_PER_SEC);
|
||
|
return ts;
|
||
|
}
|
||
|
return 0;
|
||
|
}
|
||
|
static struct timeval *
|
||
|
rb_hrtime2timeval(struct timeval *tv, const rb_hrtime_t *hrt)
|
||
|
{
|
||
|
if (hrt) {
|
||
|
tv->tv_sec = *hrt / RB_HRTIME_PER_SEC;
|
||
|
tv->tv_usec = (int32_t)((*hrt % RB_HRTIME_PER_SEC)/RB_HRTIME_PER_USEC);
|
||
|
return tv;
|
||
|
}
|
||
|
return 0;
|
||
|
}
|
||
|
static inline void
|
||
|
rb_getclockofday(struct timespec *ts)
|
||
|
{
|
||
|
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
|
||
|
if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
|
||
|
return;
|
||
|
#endif
|
||
|
rb_timespec_now(ts);
|
||
|
}
|
||
|
static rb_hrtime_t
|
||
|
rb_hrtime_now(void)
|
||
|
{
|
||
|
struct timespec ts;
|
||
|
rb_getclockofday(&ts);
|
||
|
return rb_timespec2hrtime(&ts);
|
||
|
}
|
||
|
#endif /* RB_HRTIME_H */
|
||
| thread.c | ||
|---|---|---|
|
#include "internal.h"
|
||
|
#include "iseq.h"
|
||
|
#include "vm_core.h"
|
||
|
#include "hrtime.h"
|
||
|
#ifndef USE_NATIVE_THREAD_PRIORITY
|
||
|
#define USE_NATIVE_THREAD_PRIORITY 0
|
||
| ... | ... | |
|
SLEEP_SPURIOUS_CHECK = 0x2
|
||
|
};
|
||
|
static void sleep_timespec(rb_thread_t *, struct timespec, unsigned int fl);
|
||
|
static void sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
|
||
|
static void sleep_forever(rb_thread_t *th, unsigned int fl);
|
||
|
static void rb_thread_sleep_deadly_allow_spurious_wakeup(void);
|
||
|
static int rb_threadptr_dead(rb_thread_t *th);
|
||
|
static void rb_check_deadlock(rb_vm_t *vm);
|
||
|
static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
|
||
|
static const char *thread_status_name(rb_thread_t *th, int detail);
|
||
|
static void timespec_add(struct timespec *, const struct timespec *);
|
||
|
static void timespec_sub(struct timespec *, const struct timespec *);
|
||
|
static int timespec_cmp(const struct timespec *a, const struct timespec *b);
|
||
|
static int timespec_update_expire(struct timespec *, const struct timespec *);
|
||
|
static void getclockofday(struct timespec *);
|
||
|
static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
|
||
|
NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
|
||
|
static int consume_communication_pipe(int fd);
|
||
|
static int check_signals_nogvl(rb_thread_t *, int sigwait_fd);
|
||
| ... | ... | |
|
# endif
|
||
|
#endif
|
||
|
static struct timespec *
|
||
|
timespec_for(struct timespec *ts, const struct timeval *tv)
|
||
|
{
|
||
|
if (tv) {
|
||
|
ts->tv_sec = tv->tv_sec;
|
||
|
ts->tv_nsec = tv->tv_usec * 1000;
|
||
|
return ts;
|
||
|
}
|
||
|
return 0;
|
||
|
}
|
||
|
static struct timeval *
|
||
|
timeval_for(struct timeval *tv, const struct timespec *ts)
|
||
|
{
|
||
|
if (tv && ts) {
|
||
|
tv->tv_sec = ts->tv_sec;
|
||
|
tv->tv_usec = (int32_t)(ts->tv_nsec / 1000); /* 10**6 < 2**(32-1) */
|
||
|
return tv;
|
||
|
}
|
||
|
return 0;
|
||
|
}
|
||
|
static void
|
||
|
timeout_prepare(struct timespec **tsp,
|
||
|
struct timespec *ts, struct timespec *end,
|
||
|
const struct timeval *timeout)
|
||
|
timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
|
||
|
const struct timeval *timeout)
|
||
|
{
|
||
|
if (timeout) {
|
||
|
getclockofday(end);
|
||
|
timespec_add(end, timespec_for(ts, timeout));
|
||
|
*tsp = ts;
|
||
|
*rel = rb_timeval2hrtime(timeout);
|
||
|
*end = rb_hrtime_add(rb_hrtime_now(), *rel);
|
||
|
*to = rel;
|
||
|
}
|
||
|
else {
|
||
|
*tsp = 0;
|
||
|
*to = 0;
|
||
|
}
|
||
|
}
|
||
| ... | ... | |
|
terminate_all(vm, th);
|
||
|
while (vm_living_thread_num(vm) > 1) {
|
||
|
struct timespec ts = { 1, 0 };
|
||
|
rb_hrtime_t rel = RB_HRTIME_PER_SEC;
|
||
|
/*
|
||
|
* Thread exiting routine in thread_start_func_2 notify
|
||
|
* me when the last sub-thread exit.
|
||
|
*/
|
||
|
sleeping = 1;
|
||
|
native_sleep(th, &ts);
|
||
|
native_sleep(th, &rel);
|
||
|
RUBY_VM_CHECK_INTS_BLOCKING(ec);
|
||
|
sleeping = 0;
|
||
|
}
|
||
| ... | ... | |
|
struct join_arg {
|
||
|
rb_thread_t *target, *waiting;
|
||
|
struct timespec *limit;
|
||
|
rb_hrtime_t *limit;
|
||
|
};
|
||
|
static VALUE
|
||
| ... | ... | |
|
{
|
||
|
struct join_arg *p = (struct join_arg *)arg;
|
||
|
rb_thread_t *target_th = p->target, *th = p->waiting;
|
||
|
struct timespec end;
|
||
|
rb_hrtime_t end;
|
||
|
if (p->limit) {
|
||
|
getclockofday(&end);
|
||
|
timespec_add(&end, p->limit);
|
||
|
end = rb_hrtime_add(*p->limit, rb_hrtime_now());
|
||
|
}
|
||
|
while (target_th->status != THREAD_KILLED) {
|
||
| ... | ... | |
|
th->vm->sleeper--;
|
||
|
}
|
||
|
else {
|
||
|
if (timespec_update_expire(p->limit, &end)) {
|
||
|
if (hrtime_update_expire(p->limit, end)) {
|
||
|
thread_debug("thread_join: timeout (thid: %"PRI_THREAD_ID")\n",
|
||
|
thread_id_str(target_th));
|
||
|
return Qfalse;
|
||
| ... | ... | |
|
}
|
||
|
static VALUE
|
||
|
thread_join(rb_thread_t *target_th, struct timespec *ts)
|
||
|
thread_join(rb_thread_t *target_th, rb_hrtime_t *rel)
|
||
|
{
|
||
|
rb_thread_t *th = GET_THREAD();
|
||
|
struct join_arg arg;
|
||
| ... | ... | |
|
arg.target = target_th;
|
||
|
arg.waiting = th;
|
||
|
arg.limit = ts;
|
||
|
arg.limit = rel;
|
||
|
thread_debug("thread_join (thid: %"PRI_THREAD_ID", status: %s)\n",
|
||
|
thread_id_str(target_th), thread_status_name(target_th, TRUE));
|
||
| ... | ... | |
|
return target_th->self;
|
||
|
}
|
||
|
static struct timespec *double2timespec(struct timespec *, double);
|
||
|
static rb_hrtime_t *double2hrtime(rb_hrtime_t *, double);
|
||
|
/*
|
||
|
* call-seq:
|
||
| ... | ... | |
|
thread_join_m(int argc, VALUE *argv, VALUE self)
|
||
|
{
|
||
|
VALUE limit;
|
||
|
struct timespec timespec;
|
||
|
struct timespec *ts = 0;
|
||
|
rb_hrtime_t rel, *to = 0;
|
||
|
rb_scan_args(argc, argv, "01", &limit);
|
||
| ... | ... | |
|
switch (TYPE(limit)) {
|
||
|
case T_NIL: break;
|
||
|
case T_FIXNUM:
|
||
|
timespec.tv_sec = NUM2TIMET(limit);
|
||
|
if (timespec.tv_sec < 0)
|
||
|
timespec.tv_sec = 0;
|
||
|
timespec.tv_nsec = 0;
|
||
|
ts = ×pec;
|
||
|
rel = rb_sec2hrtime(NUM2TIMET(limit));
|
||
|
to = &rel;
|
||
|
break;
|
||
|
default:
|
||
|
ts = double2timespec(×pec, rb_num2dbl(limit));
|
||
|
to = double2hrtime(&rel, rb_num2dbl(limit));
|
||
|
}
|
||
|
return thread_join(rb_thread_ptr(self), ts);
|
||
|
return thread_join(rb_thread_ptr(self), to);
|
||
|
}
|
||
|
/*
|
||
| ... | ... | |
|
#define TIMESPEC_SEC_MAX TIMET_MAX
|
||
|
#define TIMESPEC_SEC_MIN TIMET_MIN
|
||
|
static struct timespec *
|
||
|
double2timespec(struct timespec *ts, double d)
|
||
|
static rb_hrtime_t *
|
||
|
double2hrtime(rb_hrtime_t *hrt, double d)
|
||
|
{
|
||
|
/* assume timespec.tv_sec has same signedness as time_t */
|
||
|
const double TIMESPEC_SEC_MAX_PLUS_ONE = TIMET_MAX_PLUS_ONE;
|
||
| ... | ... | |
|
return NULL;
|
||
|
}
|
||
|
else if (d <= 0) {
|
||
|
ts->tv_sec = 0;
|
||
|
ts->tv_nsec = 0;
|
||
|
*hrt = 0;
|
||
|
}
|
||
|
else {
|
||
|
ts->tv_sec = (time_t)d;
|
||
|
ts->tv_nsec = (long)((d - (time_t)d) * 1e9);
|
||
|
if (ts->tv_nsec < 0) {
|
||
|
ts->tv_nsec += (long)1e9;
|
||
|
ts->tv_sec -= 1;
|
||
|
}
|
||
|
*hrt = (rb_hrtime_t)(d * (double)RB_HRTIME_PER_SEC);
|
||
|
}
|
||
|
return ts;
|
||
|
return hrt;
|
||
|
}
|
||
|
static void
|
||
| ... | ... | |
|
th->status = prev_status;
|
||
|
}
|
||
|
static void
|
||
|
getclockofday(struct timespec *ts)
|
||
|
{
|
||
|
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
|
||
|
if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
|
||
|
return;
|
||
|
#endif
|
||
|
rb_timespec_now(ts);
|
||
|
}
|
||
|
static void
|
||
|
timespec_add(struct timespec *dst, const struct timespec *ts)
|
||
|
{
|
||
|
if (TIMESPEC_SEC_MAX - ts->tv_sec < dst->tv_sec)
|
||
|
dst->tv_sec = TIMESPEC_SEC_MAX;
|
||
|
else
|
||
|
dst->tv_sec += ts->tv_sec;
|
||
|
if ((dst->tv_nsec += ts->tv_nsec) >= 1000000000) {
|
||
|
if (dst->tv_sec == TIMESPEC_SEC_MAX) {
|
||
|
dst->tv_nsec = 999999999;
|
||
|
}
|
||
|
else {
|
||
|
dst->tv_sec++;
|
||
|
dst->tv_nsec -= 1000000000;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
static void
|
||
|
timespec_sub(struct timespec *dst, const struct timespec *tv)
|
||
|
{
|
||
|
dst->tv_sec -= tv->tv_sec;
|
||
|
if ((dst->tv_nsec -= tv->tv_nsec) < 0) {
|
||
|
--dst->tv_sec;
|
||
|
dst->tv_nsec += 1000000000;
|
||
|
}
|
||
|
}
|
||
|
static int
|
||
|
timespec_cmp(const struct timespec *a, const struct timespec *b)
|
||
|
{
|
||
|
if (a->tv_sec > b->tv_sec) {
|
||
|
return 1;
|
||
|
}
|
||
|
else if (a->tv_sec < b->tv_sec) {
|
||
|
return -1;
|
||
|
}
|
||
|
else {
|
||
|
if (a->tv_nsec > b->tv_nsec) {
|
||
|
return 1;
|
||
|
}
|
||
|
else if (a->tv_nsec < b->tv_nsec) {
|
||
|
return -1;
|
||
|
}
|
||
|
return 0;
|
||
|
}
|
||
|
}
|
||
|
/*
|
||
|
* @end is the absolute time when @ts is set to expire
|
||
|
* Returns true if @end has past
|
||
|
* Updates @ts and returns false otherwise
|
||
|
*/
|
||
|
static int
|
||
|
timespec_update_expire(struct timespec *ts, const struct timespec *end)
|
||
|
{
|
||
|
struct timespec now;
|
||
|
getclockofday(&now);
|
||
|
if (timespec_cmp(&now, end) >= 0) return 1;
|
||
|
thread_debug("timespec_update_expire: "
|
||
|
"%"PRI_TIMET_PREFIX"d.%.6ld > %"PRI_TIMET_PREFIX"d.%.6ld\n",
|
||
|
(time_t)end->tv_sec, (long)end->tv_nsec,
|
||
|
(time_t)now.tv_sec, (long)now.tv_nsec);
|
||
|
*ts = *end;
|
||
|
timespec_sub(ts, &now);
|
||
|
hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
|
||
|
{
|
||
|
rb_hrtime_t now = rb_hrtime_now();
|
||
|
if (now > end) return 1;
|
||
|
thread_debug("hrtime_update_expire: "
|
||
|
"%"PRI_64_PREFIX"u > %"PRI_64_PREFIX"u\n",
|
||
|
end, now);
|
||
|
*timeout = end - now;
|
||
|
return 0;
|
||
|
}
|
||
|
static void
|
||
|
sleep_timespec(rb_thread_t *th, struct timespec ts, unsigned int fl)
|
||
|
sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
|
||
|
{
|
||
|
struct timespec end;
|
||
|
enum rb_thread_status prev_status = th->status;
|
||
|
int woke;
|
||
|
rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
|
||
|
getclockofday(&end);
|
||
|
timespec_add(&end, &ts);
|
||
|
th->status = THREAD_STOPPED;
|
||
|
RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
|
||
|
while (th->status == THREAD_STOPPED) {
|
||
|
native_sleep(th, &ts);
|
||
|
native_sleep(th, &rel);
|
||
|
woke = vm_check_ints_blocking(th->ec);
|
||
|
if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
|
||
|
break;
|
||
|
if (timespec_update_expire(&ts, &end))
|
||
|
if (hrtime_update_expire(&rel, end))
|
||
|
break;
|
||
|
}
|
||
|
th->status = prev_status;
|
||
| ... | ... | |
|
rb_thread_wait_for(struct timeval time)
|
||
|
{
|
||
|
rb_thread_t *th = GET_THREAD();
|
||
|
struct timespec ts;
|
||
|
timespec_for(&ts, &time);
|
||
|
sleep_timespec(th, ts, SLEEP_SPURIOUS_CHECK);
|
||
|
sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
|
||
|
}
|
||
|
/*
|
||
| ... | ... | |
|
#endif
|
||
|
static int
|
||
|
wait_retryable(int *result, int errnum, struct timespec *timeout,
|
||
|
const struct timespec *end)
|
||
|
wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
|
||
|
{
|
||
|
if (*result < 0) {
|
||
|
switch (errnum) {
|
||
| ... | ... | |
|
case ERESTART:
|
||
|
#endif
|
||
|
*result = 0;
|
||
|
if (timeout && timespec_update_expire(timeout, end)) {
|
||
|
timeout->tv_sec = 0;
|
||
|
timeout->tv_nsec = 0;
|
||
|
if (rel && hrtime_update_expire(rel, end)) {
|
||
|
*rel = 0;
|
||
|
}
|
||
|
return TRUE;
|
||
|
}
|
||
| ... | ... | |
|
}
|
||
|
else if (*result == 0) {
|
||
|
/* check for spurious wakeup */
|
||
|
if (timeout) {
|
||
|
return !timespec_update_expire(timeout, end);
|
||
|
if (rel) {
|
||
|
return !hrtime_update_expire(rel, end);
|
||
|
}
|
||
|
return TRUE;
|
||
|
}
|
||
| ... | ... | |
|
return Qfalse;
|
||
|
}
|
||
|
static const struct timespec *
|
||
|
sigwait_timeout(rb_thread_t *th, int sigwait_fd, const struct timespec *orig,
|
||
|
static const rb_hrtime_t *
|
||
|
sigwait_timeout(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *orig,
|
||
|
int *drained_p)
|
||
|
{
|
||
|
static const struct timespec quantum = { 0, TIME_QUANTUM_USEC * 1000 };
|
||
|
static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
|
||
|
if (sigwait_fd >= 0 && (!ubf_threads_empty() || BUSY_WAIT_SIGNALS)) {
|
||
|
*drained_p = check_signals_nogvl(th, sigwait_fd);
|
||
|
if (!orig || timespec_cmp(orig, &quantum) > 0)
|
||
|
if (!orig || *orig > quantum)
|
||
|
return &quantum;
|
||
|
}
|
||
| ... | ... | |
|
struct select_set *set = (struct select_set *)p;
|
||
|
int MAYBE_UNUSED(result);
|
||
|
int lerrno;
|
||
|
struct timespec ts, end, *tsp;
|
||
|
const struct timespec *to;
|
||
|
struct timeval tv;
|
||
|
rb_hrtime_t *to, rel, end;
|
||
|
timeout_prepare(&tsp, &ts, &end, set->timeout);
|
||
|
timeout_prepare(&to, &rel, &end, set->timeout);
|
||
|
#define restore_fdset(dst, src) \
|
||
|
((dst) ? rb_fd_dup(dst, src) : (void)0)
|
||
|
#define do_select_update() \
|
||
| ... | ... | |
|
lerrno = 0;
|
||
|
BLOCKING_REGION(set->th, {
|
||
|
to = sigwait_timeout(set->th, set->sigwait_fd, tsp, &drained);
|
||
|
const rb_hrtime_t *sto;
|
||
|
struct timeval tv;
|
||
|
sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
|
||
|
result = native_fd_select(set->max, set->rset, set->wset, set->eset,
|
||
|
timeval_for(&tv, to), set->th);
|
||
|
rb_hrtime2timeval(&tv, sto), set->th);
|
||
|
if (result < 0) lerrno = errno;
|
||
|
}, set->sigwait_fd >= 0 ? ubf_sigwait : ubf_select, set->th, FALSE);
|
||
| ... | ... | |
|
}
|
||
|
RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
|
||
|
} while (wait_retryable(&result, lerrno, tsp, &end) && do_select_update());
|
||
|
} while (wait_retryable(&result, lerrno, to, end) && do_select_update());
|
||
|
if (result < 0) {
|
||
|
errno = lerrno;
|
||
| ... | ... | |
|
{
|
||
|
struct pollfd fds[2];
|
||
|
int result = 0, lerrno;
|
||
|
struct timespec ts, end, *tsp;
|
||
|
const struct timespec *to;
|
||
|
rb_hrtime_t *to, rel, end;
|
||
|
int drained;
|
||
|
rb_thread_t *th = GET_THREAD();
|
||
|
nfds_t nfds;
|
||
|
rb_unblock_function_t *ubf;
|
||
|
timeout_prepare(&tsp, &ts, &end, timeout);
|
||
|
timeout_prepare(&to, &rel, &end, timeout);
|
||
|
fds[0].fd = fd;
|
||
|
fds[0].events = (short)events;
|
||
|
do {
|
||
| ... | ... | |
|
lerrno = 0;
|
||
|
BLOCKING_REGION(th, {
|
||
|
to = sigwait_timeout(th, fds[1].fd, tsp, &drained);
|
||
|
result = ppoll(fds, nfds, to, NULL);
|
||
|
const rb_hrtime_t *sto;
|
||
|
struct timespec ts;
|
||
|
sto = sigwait_timeout(th, fds[1].fd, to, &drained);
|
||
|
result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), NULL);
|
||
|
if (result < 0) lerrno = errno;
|
||
|
}, ubf, th, FALSE);
|
||
| ... | ... | |
|
rb_sigwait_fd_migrate(th->vm);
|
||
|
}
|
||
|
RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
|
||
|
} while (wait_retryable(&result, lerrno, tsp, &end));
|
||
|
} while (wait_retryable(&result, lerrno, to, end));
|
||
|
if (result < 0) {
|
||
|
errno = lerrno;
|
||
| thread_pthread.c | ||
|---|---|---|
|
static void ubf_wakeup_all_threads(void);
|
||
|
static int ubf_threads_empty(void);
|
||
|
static int native_cond_timedwait(rb_nativethread_cond_t *, pthread_mutex_t *,
|
||
|
const struct timespec *);
|
||
|
static const struct timespec *sigwait_timeout(rb_thread_t *, int sigwait_fd,
|
||
|
const struct timespec *,
|
||
|
const rb_hrtime_t *abs);
|
||
|
static const rb_hrtime_t *sigwait_timeout(rb_thread_t *, int sigwait_fd,
|
||
|
const rb_hrtime_t *,
|
||
|
int *drained_p);
|
||
|
static void ubf_timer_disarm(void);
|
||
|
static void threadptr_trap_interrupt(rb_thread_t *);
|
||
| ... | ... | |
|
#define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
|
||
|
#define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
|
||
|
static struct timespec native_cond_timeout(rb_nativethread_cond_t *,
|
||
|
struct timespec rel);
|
||
|
static rb_hrtime_t native_cond_timeout(rb_nativethread_cond_t *, rb_hrtime_t);
|
||
|
/*
|
||
|
* Designate the next gvl.timer thread, favor the last thread in
|
||
| ... | ... | |
|
static void
|
||
|
do_gvl_timer(rb_vm_t *vm, rb_thread_t *th)
|
||
|
{
|
||
|
static struct timespec ts;
|
||
|
static rb_hrtime_t abs;
|
||
|
native_thread_data_t *nd = &th->native_thread_data;
|
||
|
/* take over wakeups from UBF_TIMER */
|
||
|
ubf_timer_disarm();
|
||
|
if (vm->gvl.timer_err == ETIMEDOUT) {
|
||
|
ts.tv_sec = 0;
|
||
|
ts.tv_nsec = TIME_QUANTUM_NSEC;
|
||
|
ts = native_cond_timeout(&nd->cond.gvlq, ts);
|
||
|
abs = native_cond_timeout(&nd->cond.gvlq, TIME_QUANTUM_NSEC);
|
||
|
}
|
||
|
vm->gvl.timer = th;
|
||
|
vm->gvl.timer_err = native_cond_timedwait(&nd->cond.gvlq, &vm->gvl.lock, &ts);
|
||
|
vm->gvl.timer_err = native_cond_timedwait(&nd->cond.gvlq, &vm->gvl.lock, &abs);
|
||
|
vm->gvl.timer = 0;
|
||
|
ubf_wakeup_all_threads();
|
||
| ... | ... | |
|
}
|
||
|
static int
|
||
|
native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *ts)
|
||
|
native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
|
||
|
const rb_hrtime_t *abs)
|
||
|
{
|
||
|
int r;
|
||
|
struct timespec ts;
|
||
|
/*
|
||
|
* An old Linux may return EINTR. Even though POSIX says
|
||
| ... | ... | |
|
* Let's hide it from arch generic code.
|
||
|
*/
|
||
|
do {
|
||
|
r = pthread_cond_timedwait(cond, mutex, ts);
|
||
|
r = pthread_cond_timedwait(cond, mutex, rb_hrtime2timespec(&ts, abs));
|
||
|
} while (r == EINTR);
|
||
|
if (r != 0 && r != ETIMEDOUT) {
|
||
| ... | ... | |
|
return r;
|
||
|
}
|
||
|
static struct timespec
|
||
|
native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
|
||
|
static rb_hrtime_t
|
||
|
native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
|
||
|
{
|
||
|
struct timespec abs;
|
||
|
if (condattr_monotonic) {
|
||
|
getclockofday(&abs);
|
||
|
return rb_hrtime_add(rb_hrtime_now(), rel);
|
||
|
}
|
||
|
else {
|
||
|
rb_timespec_now(&abs);
|
||
|
}
|
||
|
timespec_add(&abs, &timeout_rel);
|
||
|
struct timespec ts;
|
||
|
return abs;
|
||
|
rb_timespec_now(&ts);
|
||
|
return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
|
||
|
}
|
||
|
}
|
||
|
#define native_cleanup_push pthread_cleanup_push
|
||
| ... | ... | |
|
* worst case network latency across the globe) without wasting memory
|
||
|
*/
|
||
|
#ifndef THREAD_CACHE_TIME
|
||
|
# define THREAD_CACHE_TIME 3
|
||
|
# define THREAD_CACHE_TIME ((rb_hrtime_t)3 * RB_HRTIME_PER_SEC)
|
||
|
#endif
|
||
|
static rb_thread_t *
|
||
|
register_cached_thread_and_wait(void *altstack)
|
||
|
{
|
||
|
struct timespec end = { THREAD_CACHE_TIME, 0 };
|
||
|
rb_hrtime_t end = THREAD_CACHE_TIME;
|
||
|
struct cached_thread_entry entry;
|
||
|
rb_native_cond_initialize(&entry.cond);
|
||
| ... | ... | |
|
}
|
||
|
static void
|
||
|
native_cond_sleep(rb_thread_t *th, struct timespec *timeout_rel)
|
||
|
native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
||
|
{
|
||
|
struct timespec timeout;
|
||
|
rb_nativethread_lock_t *lock = &th->interrupt_lock;
|
||
|
rb_nativethread_cond_t *cond = &th->native_thread_data.cond.intr;
|
||
|
if (timeout_rel) {
|
||
|
/* Solaris cond_timedwait() return EINVAL if an argument is greater than
|
||
|
* current_time + 100,000,000. So cut up to 100,000,000. This is
|
||
|
* considered as a kind of spurious wakeup. The caller to native_sleep
|
||
|
* should care about spurious wakeup.
|
||
|
*
|
||
|
* See also [Bug #1341] [ruby-core:29702]
|
||
|
* http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
|
||
|
*/
|
||
|
if (timeout_rel->tv_sec > 100000000) {
|
||
|
timeout_rel->tv_sec = 100000000;
|
||
|
timeout_rel->tv_nsec = 0;
|
||
|
}
|
||
|
timeout = native_cond_timeout(cond, *timeout_rel);
|
||
|
}
|
||
|
/* Solaris cond_timedwait() return EINVAL if an argument is greater than
|
||
|
* current_time + 100,000,000. So cut up to 100,000,000. This is
|
||
|
* considered as a kind of spurious wakeup. The caller to native_sleep
|
||
|
* should care about spurious wakeup.
|
||
|
*
|
||
|
* See also [Bug #1341] [ruby-core:29702]
|
||
|
* http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
|
||
|
*/
|
||
|
const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
|
||
|
GVL_UNLOCK_BEGIN(th);
|
||
|
{
|
||
| ... | ... | |
|
thread_debug("native_sleep: interrupted before sleep\n");
|
||
|
}
|
||
|
else {
|
||
|
if (!timeout_rel)
|
||
|
if (!rel) {
|
||
|
rb_native_cond_wait(cond, lock);
|
||
|
else
|
||
|
native_cond_timedwait(cond, lock, &timeout);
|
||
|
}
|
||
|
else {
|
||
|
rb_hrtime_t end;
|
||
|
if (*rel > max) {
|
||
|
*rel = max;
|
||
|
}
|
||
|
end = native_cond_timeout(cond, *rel);
|
||
|
native_cond_timedwait(cond, lock, &end);
|
||
|
}
|
||
|
}
|
||
|
th->unblock.func = 0;
|
||
| ... | ... | |
|
check_signals_nogvl(th, sigwait_fd);
|
||
|
}
|
||
|
else {
|
||
|
struct timespec end, diff;
|
||
|
const struct timespec *to;
|
||
|
rb_hrtime_t rel, end;
|
||
|
int n = 0;
|
||
|
if (ts) {
|
||
|
getclockofday(&end);
|
||
|
timespec_add(&end, ts);
|
||
|
diff = *ts;
|
||
|
ts = &diff;
|
||
|
rel = rb_timespec2hrtime(ts);
|
||
|
end = rb_hrtime_add(rb_hrtime_now(), rel);
|
||
|
}
|
||
|
/*
|
||
|
* tricky: this needs to return on spurious wakeup (no auto-retry).
|
||
| ... | ... | |
|
* wakeups, so we care about the result of consume_communication_pipe
|
||
|
*/
|
||
|
for (;;) {
|
||
|
to = sigwait_timeout(th, sigwait_fd, ts, &n);
|
||
|
const rb_hrtime_t *sto = sigwait_timeout(th, sigwait_fd, &rel, &n);
|
||
|
struct timespec tmp;
|
||
|
if (n) return;
|
||
|
n = ppoll(&pfd, 1, to, 0);
|
||
|
n = ppoll(&pfd, 1, rb_hrtime2timespec(&tmp, sto), 0);
|
||
|
if (check_signals_nogvl(th, sigwait_fd))
|
||
|
return;
|
||
|
if (n || (th && RUBY_VM_INTERRUPTED(th->ec)))
|
||
|
return;
|
||
|
if (ts && timespec_update_expire(&diff, &end))
|
||
|
if (ts && hrtime_update_expire(&rel, end))
|
||
|
return;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
static void
|
||
|
native_sleep(rb_thread_t *th, struct timespec *timeout_rel)
|
||
|
native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
||
|
{
|
||
|
int sigwait_fd = rb_sigwait_fd_get(th);
|
||
| ... | ... | |
|
GVL_UNLOCK_BEGIN(th);
|
||
|
if (!RUBY_VM_INTERRUPTED(th->ec)) {
|
||
|
rb_sigwait_sleep(th, sigwait_fd, timeout_rel);
|
||
|
struct timespec ts;
|
||
|
rb_sigwait_sleep(th, sigwait_fd, rb_hrtime2timespec(&ts, rel));
|
||
|
}
|
||
|
else {
|
||
|
check_signals_nogvl(th, sigwait_fd);
|
||
| ... | ... | |
|
rb_sigwait_fd_migrate(th->vm);
|
||
|
}
|
||
|
else {
|
||
|
native_cond_sleep(th, timeout_rel);
|
||
|
native_cond_sleep(th, rel);
|
||
|
}
|
||
|
}
|
||
| thread_sync.c | ||
|---|---|---|
|
while (mutex->th != th) {
|
||
|
enum rb_thread_status prev_status = th->status;
|
||
|
struct timespec *timeout = 0;
|
||
|
struct timespec ts = { 0, 100000000 }; /* 100ms */
|
||
|
rb_hrtime_t *timeout = 0;
|
||
|
rb_hrtime_t rel = rb_msec2hrtime(100);
|
||
|
th->status = THREAD_STOPPED_FOREVER;
|
||
|
th->locking_mutex = self;
|
||
| ... | ... | |
|
*/
|
||
|
if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
|
||
|
!patrol_thread) {
|
||
|
timeout = &ts;
|
||
|
timeout = &rel;
|
||
|
patrol_thread = th;
|
||
|
}
|
||
| ... | ... | |
|
static VALUE
|
||
|
rb_mutex_wait_for(VALUE time)
|
||
|
{
|
||
|
struct timespec *t = (struct timespec*)time;
|
||
|
sleep_timespec(GET_THREAD(), *t, 0); /* permit spurious check */
|
||
|
rb_hrtime_t *rel = (rb_hrtime_t *)time;
|
||
|
/* permit spurious check */
|
||
|
sleep_hrtime(GET_THREAD(), *rel, 0);
|
||
|
return Qnil;
|
||
|
}
|
||
| ... | ... | |
|
if (!NIL_P(timeout)) {
|
||
|
t = rb_time_interval(timeout);
|
||
|
}
|
||
|
rb_mutex_unlock(self);
|
||
|
beg = time(0);
|
||
|
if (NIL_P(timeout)) {
|
||
|
rb_ensure(rb_mutex_sleep_forever, Qnil, mutex_lock_uninterruptible, self);
|
||
|
}
|
||
|
else {
|
||
|
struct timespec ts;
|
||
|
VALUE tsp = (VALUE)timespec_for(&ts, &t);
|
||
|
rb_hrtime_t rel = rb_timeval2hrtime(&t);
|
||
|
rb_ensure(rb_mutex_wait_for, tsp, mutex_lock_uninterruptible, self);
|
||
|
rb_ensure(rb_mutex_wait_for, (VALUE)&rel,
|
||
|
mutex_lock_uninterruptible, self);
|
||
|
}
|
||
|
RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
|
||
|
end = time(0) - beg;
|
||
| thread_win32.c | ||
|---|---|---|
|
return ret;
|
||
|
}
|
||
|
static DWORD
|
||
|
hrtime2msec(rb_hrtime_t hrt)
|
||
|
{
|
||
|
return (DWORD)hrt / (DWORD)RB_HRTIME_PER_MSEC;
|
||
|
}
|
||
|
static void
|
||
|
native_sleep(rb_thread_t *th, struct timespec *ts)
|
||
|
native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
||
|
{
|
||
|
const volatile DWORD msec = (ts) ?
|
||
|
(DWORD)(ts->tv_sec * 1000 + ts->tv_nsec / 1000000) : INFINITE;
|
||
|
const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
|
||
|
GVL_UNLOCK_BEGIN(th);
|
||
|
{
|
||
|
-
|
||