54 #undef _FORTIFY_SOURCE
55 #undef __USE_FORTIFY_LEVEL
56 #define __USE_FORTIFY_LEVEL 0
65 #ifndef USE_NATIVE_THREAD_PRIORITY
66 #define USE_NATIVE_THREAD_PRIORITY 0
67 #define RUBY_THREAD_PRIORITY_MAX 3
68 #define RUBY_THREAD_PRIORITY_MIN -3
72 #define THREAD_DEBUG 0
86 #define eKillSignal INT2FIX(0)
87 #define eTerminateSignal INT2FIX(1)
90 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
100 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
113 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
115 rb_gc_save_machine_context(th); \
116 SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
119 #define GVL_UNLOCK_BEGIN() do { \
120 rb_thread_t *_th_stored = GET_THREAD(); \
121 RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
122 gvl_release(_th_stored->vm);
124 #define GVL_UNLOCK_END() \
125 gvl_acquire(_th_stored->vm, _th_stored); \
126 rb_thread_set_current(_th_stored); \
129 #define blocking_region_begin(th, region, func, arg) \
131 (region)->prev_status = (th)->status; \
132 set_unblock_function((th), (func), (arg), &(region)->oldubf); \
133 (th)->blocking_region_buffer = (region); \
134 (th)->status = THREAD_STOPPED; \
135 thread_debug("enter blocking region (%p)\n", (void *)(th)); \
136 RB_GC_SAVE_MACHINE_CONTEXT(th); \
137 gvl_release((th)->vm); \
140 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
141 rb_thread_t *__th = GET_THREAD(); \
142 struct rb_blocking_region_buffer __region; \
143 blocking_region_begin(__th, &__region, (ubf), (ubfarg)); \
145 blocking_region_end(__th, &__region); \
146 RUBY_VM_CHECK_INTS(); \
150 #ifdef HAVE_VA_ARGS_MACRO
151 void rb_thread_debug(
const char *file,
int line,
const char *fmt, ...);
152 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
153 #define POSITION_FORMAT "%s:%d:"
154 #define POSITION_ARGS ,file, line
156 void rb_thread_debug(
const char *fmt, ...);
157 #define thread_debug rb_thread_debug
158 #define POSITION_FORMAT
159 #define POSITION_ARGS
162 # if THREAD_DEBUG < 0
163 static int rb_thread_debug_enabled;
174 rb_thread_s_debug(
void)
176 return INT2NUM(rb_thread_debug_enabled);
190 rb_thread_debug_enabled =
RTEST(val) ?
NUM2INT(val) : 0;
194 # define rb_thread_debug_enabled THREAD_DEBUG
197 #define thread_debug if(0)printf
201 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
204 VALUE *register_stack_start));
210 #define DEBUG_OUT() \
211 WaitForSingleObject(&debug_mutex, INFINITE); \
212 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
214 ReleaseMutex(&debug_mutex);
216 #elif defined(HAVE_PTHREAD_H)
219 #define DEBUG_OUT() \
220 pthread_mutex_lock(&debug_mutex); \
221 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
223 pthread_mutex_unlock(&debug_mutex);
226 #error "unsupported thread type"
230 static int debug_mutex_initialized = 1;
235 #ifdef HAVE_VA_ARGS_MACRO
236 const char *file,
int line,
238 const char *fmt, ...)
243 if (!rb_thread_debug_enabled)
return;
245 if (debug_mutex_initialized == 1) {
246 debug_mutex_initialized = 0;
247 native_mutex_initialize(&debug_mutex);
268 native_mutex_unlock(lock);
274 native_mutex_destroy(lock);
326 if (th != main_thread) {
333 thread_debug(
"terminate_i: main thread (%p)\n", (
void *)th);
365 if (err)
rb_bug(
"invalid keeping_mutexes: %s", err);
376 rb_bug(
"rb_thread_terminate_all: called by child thread (%p, %p)",
383 thread_debug(
"rb_thread_terminate_all (main thread: %p)\n", (
void *)th);
406 th->machine_register_stack_start = th->machine_register_stack_end = 0;
427 native_thread_destroy(th);
435 native_thread_init_stack(th);
447 # ifdef USE_SIGALTSTACK
450 rb_register_sigaltstack(th);
453 ruby_thread_set_native(th);
457 th->machine_register_stack_start = register_stack_start;
461 gvl_acquire(th->
vm, th);
463 thread_debug(
"thread start (get lock): %p\n", (
void *)th);
520 rb_bug(
"thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
532 if (join_th == main_th) errinfo =
Qnil;
534 switch (join_th->
status) {
569 "can't start a new thread (frozen ThreadGroup)");
587 err = native_thread_create(th);
603 if (
GET_VM()->inhibit_thread_creation)
665 #define DELAY_INFTY 1E30
760 return target_th->
self;
850 time.
tv_usec = (int)((d - (
int)d) * 1e6);
875 }
while (th->
status == status);
882 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
885 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
910 native_sleep(th, &tv);
944 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
947 if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
1040 gvl_yield(th->
vm, th);
1062 gvl_acquire(th->
vm, th);
1064 thread_debug(
"leave blocking region (%p)\n", (
void *)th);
1065 remove_signal_thread_list(th);
1085 int saved_errno =
errno;
1090 errno = saved_errno;
1136 int saved_errno = 0;
1146 saved_errno =
errno;
1148 errno = saved_errno;
1158 int saved_errno = 0;
1163 saved_errno =
errno;
1166 errno = saved_errno;
1227 fprintf(stderr,
"[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1235 rb_bug(
"rb_thread_call_with_gvl: called by a thread which has GVL.");
1296 int timer_interrupt = interrupt & 0x01;
1297 int finalizer_interrupt = interrupt & 0x04;
1325 if (finalizer_interrupt) {
1329 if (timer_interrupt) {
1330 unsigned long limits_us = 250 * 1000;
1364 rb_bug(
"deprecated function rb_gc_mark_threads is called");
1416 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
1417 #define USE_SIGALTSTACK
1424 #ifdef USE_SIGALTSTACK
1452 #define THREAD_IO_WAITING_P(th) ( \
1453 ((th)->status == THREAD_STOPPED || \
1454 (th)->status == THREAD_STOPPED_FOREVER) && \
1455 (th)->blocking_region_buffer && \
1456 (th)->unblock.func == ubf_select && \
1686 "stopping only thread\n\tnote: use sleep to stop forever");
2047 str =
rb_sprintf(
"#<%s:%p %s>", cname, (
void *)thread, status);
2279 #if USE_NATIVE_THREAD_PRIORITY
2281 native_thread_apply_priority(th);
2297 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
2330 fds->fdset =
ALLOC(fd_set);
2331 FD_ZERO(fds->fdset);
2337 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
2339 if (size <
sizeof(fd_set))
2340 size =
sizeof(fd_set);
2341 dst->maxfd = src->maxfd;
2343 memcpy(dst->fdset, src->fdset, size);
2349 if (fds->fdset)
xfree(fds->fdset);
2358 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
2364 size_t m = howmany(n + 1, NFDBITS) *
sizeof(fd_mask);
2365 size_t o = howmany(fds->maxfd, NFDBITS) *
sizeof(fd_mask);
2367 if (m <
sizeof(fd_set)) m =
sizeof(fd_set);
2368 if (o <
sizeof(fd_set)) o =
sizeof(fd_set);
2371 fds->fdset =
xrealloc(fds->fdset, m);
2372 memset((
char *)fds->fdset + o, 0, m - o);
2374 if (n >= fds->maxfd) fds->maxfd = n + 1;
2387 if (n >= fds->maxfd)
return;
2394 if (n >= fds->maxfd)
return 0;
2395 return FD_ISSET(n, fds->fdset) != 0;
2401 size_t size = howmany(max, NFDBITS) *
sizeof(fd_mask);
2403 if (size <
sizeof(fd_set)) size =
sizeof(fd_set);
2405 dst->fdset =
xrealloc(dst->fdset, size);
2406 memcpy(dst->fdset, src, size);
2412 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
2414 if (size >
sizeof(fd_set)) {
2417 memcpy(dst,
rb_fd_ptr(src),
sizeof(fd_set));
2423 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
2425 if (size <
sizeof(fd_set))
2426 size =
sizeof(fd_set);
2427 dst->maxfd = src->maxfd;
2428 dst->fdset =
xrealloc(dst->fdset, size);
2429 memcpy(dst->fdset, src->fdset, size);
2448 return select(n, r, w, e, timeout);
2456 #define FD_ZERO(f) rb_fd_zero(f)
2457 #define FD_SET(i, f) rb_fd_set((i), (f))
2458 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2459 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2461 #elif defined(_WIN32)
2467 set->fdset =
ALLOC(fd_set);
2468 FD_ZERO(set->fdset);
2485 if (max >
FD_SETSIZE || max > dst->fd_count) {
2489 memcpy(dst->fd_array, src->fdset->fd_array, max);
2490 dst->fd_count =
max;
2507 for (i = 0; i <
set->fdset->fd_count; i++) {
2508 if (set->fdset->fd_array[i] == s) {
2512 if (set->fdset->fd_count >= (
unsigned)set->capa) {
2514 set->fdset =
xrealloc(set->fdset,
sizeof(
unsigned int) +
sizeof(SOCKET) * set->capa);
2516 set->fdset->fd_array[
set->fdset->fd_count++] = s;
2524 #define FD_ZERO(f) rb_fd_zero(f)
2525 #define FD_SET(i, f) rb_fd_set((i), (f))
2526 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2527 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2530 #define rb_fd_rcopy(d, s) (*(d) = *(s))
2533 #if defined(__CYGWIN__)
2570 # if defined(__CYGWIN__)
2575 # if defined(__CYGWIN__)
2577 limit = (double)start_time.
tv_sec + (
double)start_time.
tv_usec*1e-6;
2581 limit += (double)timeout->
tv_sec+(
double)timeout->
tv_usec*1e-6;
2582 wait_rest = *timeout;
2583 timeout = &wait_rest;
2596 #if defined(__CYGWIN__)
2602 wait_100ms.
tv_usec = 100 * 1000;
2605 wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) < 0) ? &wait_100ms : timeout;
2609 if (result < 0) lerrno =
errno;
2610 if (result != 0)
break;
2621 subtract_tv(&elapsed, &start_time);
2623 if (!subtract_tv(timeout, &elapsed)) {
2627 if (cmp_tv(&wait_100ms, timeout) > 0) wait = timeout;
2629 }
while (__th->interrupt_flag == 0);
2631 }
while (result == 0 && !finish);
2633 #elif defined(_WIN32)
2637 result = native_fd_select(n, read, write, except, timeout, th);
2638 if (result < 0) lerrno =
errno;
2643 result =
rb_fd_select(n, read, write, except, timeout);
2644 if (result < 0) lerrno =
errno;
2666 wait_rest.
tv_sec = (
unsigned int)d;
2667 wait_rest.
tv_usec = (int)((d-(
double)wait_rest.
tv_sec)*1e6);
2694 thread_debug(
"rb_thread_wait_fd_rw(%d, %s)\n", fd, read ?
"read" :
"write");
2700 while (result <= 0) {
2708 thread_debug(
"rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ?
"read" :
"write");
2772 if (!read && !write && !except) {
2790 return do_select(max, read, write, except, timeout);
2798 #if defined(HAVE_POLL) && defined(linux)
2805 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
2806 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
2807 #define POLLEX_SET (POLLPRI)
2809 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
2810 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
2814 int ppoll(
struct pollfd *fds, nfds_t nfds,
2815 const struct timespec *ts,
const sigset_t *sigmask)
2826 tmp2 = ts->
tv_nsec / (1000 * 1000);
2830 timeout_ms = tmp + tmp2;
2835 return poll(fds, nfds, timeout_ms);
2860 fds.events = (short)events;
2865 result = ppoll(&fds, 1, timeout, NULL);
2866 if (result < 0) lerrno =
errno;
2891 if (fds.revents & POLLNVAL) {
2901 if (fds.revents & POLLIN_SET)
2903 if (fds.revents & POLLOUT_SET)
2905 if (fds.revents & POLLEX_SET)
2990 #ifdef USE_CONSERVATIVE_STACK_END
2995 *stack_end_p = &stack_end;
3004 th->machine_register_stack_end = rb_ia64_bsp();
3036 if (vm->prove_profile.enable) {
3039 if (vm->during_gc) {
3049 if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3050 native_reset_timer_thread();
3057 native_reset_timer_thread();
3064 rb_thread_create_timer_thread();
3085 if (
RTEST(coverages)) {
3158 return ptr ?
sizeof(
struct thgroup) : 0;
3337 "can't move from the enclosed thread group");
3370 #define GetMutexPtr(obj, tobj) \
3371 TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
3373 #define mutex_mark NULL
3383 if (err)
rb_bug(
"%s", err);
3385 native_mutex_destroy(&mutex->
lock);
3386 native_cond_destroy(&mutex->
cond);
3420 native_mutex_initialize(&mutex->
lock);
3421 native_cond_initialize(&mutex->
cond, RB_CONDATTR_CLOCK_MONOTONIC);
3483 native_mutex_lock(&mutex->
lock);
3484 if (mutex->
th == 0) {
3490 native_mutex_unlock(&mutex->
lock);
3498 int interrupted = 0;
3521 timeout_rel.
tv_nsec = timeout_ms * 1000 * 1000;
3522 timeout = native_cond_timeout(&mutex->
cond, timeout_rel);
3523 err = native_cond_timedwait(&mutex->
cond, &mutex->
lock, &timeout);
3526 native_cond_wait(&mutex->
cond, &mutex->
lock);
3539 native_mutex_lock(&mutex->
lock);
3541 native_cond_broadcast(&mutex->
cond);
3542 native_mutex_unlock(&mutex->
lock);
3572 while (mutex->
th != th) {
3582 native_mutex_lock(&mutex->
lock);
3596 interrupted =
lock_func(th, mutex, timeout_ms);
3597 native_mutex_unlock(&mutex->
lock);
3600 if (patrol_thread == th)
3601 patrol_thread =
NULL;
3606 if (mutex->
th && interrupted == 2) {
3610 th->
status = prev_status;
3630 native_mutex_lock(&mutex->
lock);
3632 if (mutex->
th == 0) {
3633 err =
"Attempt to unlock a mutex which is not locked";
3635 else if (mutex->
th != th) {
3636 err =
"Attempt to unlock a mutex which is locked by another thread";
3641 native_cond_signal(&mutex->
cond);
3644 native_mutex_unlock(&mutex->
lock);
3648 if (th_mutex == mutex) {
3655 if (tmp_mutex == mutex) {
3659 th_mutex = tmp_mutex;
3705 if (mutex->
th == th)
3744 if (!
NIL_P(timeout)) {
3749 if (
NIL_P(timeout)) {
3755 end = time(0) - beg;
3811 #define GetBarrierPtr(obj) ((VALUE)rb_check_typeddata((obj), &barrier_data_type))
3827 if (!mutex)
return Qfalse;
3891 #if SIZEOF_LONG == SIZEOF_VOIDP
3892 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
3893 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3894 #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
3895 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
3901 if (paired_obj_id) {
3936 VALUE other_paired_obj = pair_list;
3959 if (pair_list ==
Qundef) {
4027 if (outer && !outermost) {
4039 if (result == p.list) {
4086 #define RUBY_EVENT_REMOVED 0x1000000
4112 hook->
flag = events;
4182 for (; hook; hook = hook->
next) {
4187 if (flag & hook->
flag) {
4188 (*hook->
func)(flag, hook->
data,
self,
id, klass);
4225 th->
tracing &= ~EVENT_RUNNING_VM;
4265 if (func == 0 || hook->
func == func) {
4560 if (p->
klass != 0) {
4571 klass =
RBASIC(klass)->klass;
4574 klass =
rb_iv_get(klass,
"__attached__");
4578 argv[0] = eventname;
4583 argv[5] = klass ? klass :
Qnil;
4613 volatile int raised;
4614 volatile int outer_state;
4617 if (running == ev && !always) {
4625 outer_state = th->
state;
4630 result = (*func)(
arg, running);
4645 th->
state = outer_state;
4690 #define rb_intern(str) rb_intern_const(str)
4707 #if THREAD_DEBUG < 0
4763 recursive_key =
rb_intern(
"__recursive_key__");
4777 gvl_acquire(th->
vm, th);
4782 rb_thread_create_timer_thread();
4785 (void)native_mutex_trylock;
4810 native_mutex_lock(&mutex->
lock);
4814 native_mutex_unlock(&mutex->
lock);
4820 #ifdef DEBUG_DEADLOCK_CHECK
4833 native_mutex_lock(&mutex->
lock);
4835 native_mutex_unlock(&mutex->
lock);
4851 if (patrol_thread && patrol_thread !=
GET_THREAD())
return;
4859 #ifdef DEBUG_DEADLOCK_CHECK
4872 if (coverage &&
RBASIC(coverage)->klass == 0) {
4888 return GET_VM()->coverages;
4894 GET_VM()->coverages = coverages;