54 #undef _FORTIFY_SOURCE
55 #undef __USE_FORTIFY_LEVEL
56 #define __USE_FORTIFY_LEVEL 0
65 #ifndef USE_NATIVE_THREAD_PRIORITY
66 #define USE_NATIVE_THREAD_PRIORITY 0
67 #define RUBY_THREAD_PRIORITY_MAX 3
68 #define RUBY_THREAD_PRIORITY_MIN -3
72 #define THREAD_DEBUG 0
86 #define eKillSignal INT2FIX(0)
87 #define eTerminateSignal INT2FIX(1)
90 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
100 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
113 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
115 rb_gc_save_machine_context(th); \
116 SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
119 #define GVL_UNLOCK_BEGIN() do { \
120 rb_thread_t *_th_stored = GET_THREAD(); \
121 RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
122 gvl_release(_th_stored->vm);
124 #define GVL_UNLOCK_END() \
125 gvl_acquire(_th_stored->vm, _th_stored); \
126 rb_thread_set_current(_th_stored); \
129 #define blocking_region_begin(th, region, func, arg) \
131 (region)->prev_status = (th)->status; \
132 set_unblock_function((th), (func), (arg), &(region)->oldubf); \
133 (th)->blocking_region_buffer = (region); \
134 (th)->status = THREAD_STOPPED; \
135 thread_debug("enter blocking region (%p)\n", (void *)(th)); \
136 RB_GC_SAVE_MACHINE_CONTEXT(th); \
137 gvl_release((th)->vm); \
140 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
141 rb_thread_t *__th = GET_THREAD(); \
142 struct rb_blocking_region_buffer __region; \
143 blocking_region_begin(__th, &__region, (ubf), (ubfarg)); \
145 blocking_region_end(__th, &__region); \
146 RUBY_VM_CHECK_INTS(); \
150 #ifdef HAVE_VA_ARGS_MACRO
151 void rb_thread_debug(
const char *file,
int line,
const char *fmt, ...);
152 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
153 #define POSITION_FORMAT "%s:%d:"
154 #define POSITION_ARGS ,file, line
156 void rb_thread_debug(
const char *fmt, ...);
157 #define thread_debug rb_thread_debug
158 #define POSITION_FORMAT
159 #define POSITION_ARGS
162 # if THREAD_DEBUG < 0
163 static int rb_thread_debug_enabled;
174 rb_thread_s_debug(
void)
176 return INT2NUM(rb_thread_debug_enabled);
190 rb_thread_debug_enabled =
RTEST(val) ?
NUM2INT(val) : 0;
194 # define rb_thread_debug_enabled THREAD_DEBUG
197 #define thread_debug if(0)printf
201 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
204 VALUE *register_stack_start));
210 #define DEBUG_OUT() \
211 WaitForSingleObject(&debug_mutex, INFINITE); \
212 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
214 ReleaseMutex(&debug_mutex);
216 #elif defined(HAVE_PTHREAD_H)
219 #define DEBUG_OUT() \
220 pthread_mutex_lock(&debug_mutex); \
221 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
223 pthread_mutex_unlock(&debug_mutex);
226 #error "unsupported thread type"
230 static int debug_mutex_initialized = 1;
235 #ifdef HAVE_VA_ARGS_MACRO
236 const char *file,
int line,
238 const char *fmt, ...)
243 if (!rb_thread_debug_enabled)
return;
245 if (debug_mutex_initialized == 1) {
246 debug_mutex_initialized = 0;
247 native_mutex_initialize(&debug_mutex);
268 native_mutex_unlock(lock);
274 native_mutex_destroy(lock);
326 if (th != main_thread) {
333 thread_debug(
"terminate_i: main thread (%p)\n", (
void *)th);
363 if (err)
rb_bug(
"invalid keeping_mutexes: %s", err);
374 rb_bug(
"rb_thread_terminate_all: called by child thread (%p, %p)",
381 thread_debug(
"rb_thread_terminate_all (main thread: %p)\n", (
void *)th);
404 th->machine_register_stack_start = th->machine_register_stack_end = 0;
425 native_thread_destroy(th);
433 native_thread_init_stack(th);
445 # ifdef USE_SIGALTSTACK
448 rb_register_sigaltstack(th);
451 ruby_thread_set_native(th);
455 th->machine_register_stack_start = register_stack_start;
459 gvl_acquire(th->
vm, th);
461 thread_debug(
"thread start (get lock): %p\n", (
void *)th);
518 rb_bug(
"thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
530 if (join_th == main_th) errinfo =
Qnil;
532 switch (join_th->
status) {
567 "can't start a new thread (frozen ThreadGroup)");
585 err = native_thread_create(th);
601 if (
GET_VM()->inhibit_thread_creation)
663 #define DELAY_INFTY 1E30
758 return target_th->
self;
848 time.
tv_usec = (int)((d - (
int)d) * 1e6);
873 }
while (th->
status == status);
880 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
883 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
908 native_sleep(th, &tv);
942 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
945 if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
1038 gvl_yield(th->
vm, th);
1060 gvl_acquire(th->
vm, th);
1062 thread_debug(
"leave blocking region (%p)\n", (
void *)th);
1063 remove_signal_thread_list(th);
1083 int saved_errno =
errno;
1088 errno = saved_errno;
1134 int saved_errno = 0;
1144 saved_errno =
errno;
1146 errno = saved_errno;
1156 int saved_errno = 0;
1161 saved_errno =
errno;
1164 errno = saved_errno;
1225 fprintf(stderr,
"[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1233 rb_bug(
"rb_thread_call_with_gvl: called by a thread which has GVL.");
1294 int timer_interrupt = interrupt & 0x01;
1295 int finalizer_interrupt = interrupt & 0x04;
1323 if (finalizer_interrupt) {
1327 if (timer_interrupt) {
1328 unsigned long limits_us = 250 * 1000;
1362 rb_bug(
"deprecated function rb_gc_mark_threads is called");
1414 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
1415 #define USE_SIGALTSTACK
1422 #ifdef USE_SIGALTSTACK
1450 #define THREAD_IO_WAITING_P(th) ( \
1451 ((th)->status == THREAD_STOPPED || \
1452 (th)->status == THREAD_STOPPED_FOREVER) && \
1453 (th)->blocking_region_buffer && \
1454 (th)->unblock.func == ubf_select && \
1684 "stopping only thread\n\tnote: use sleep to stop forever");
2045 str =
rb_sprintf(
"#<%s:%p %s>", cname, (
void *)thread, status);
2277 #if USE_NATIVE_THREAD_PRIORITY
2279 native_thread_apply_priority(th);
2295 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
2328 fds->fdset =
ALLOC(fd_set);
2329 FD_ZERO(fds->fdset);
2335 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
2337 if (size <
sizeof(fd_set))
2338 size =
sizeof(fd_set);
2339 dst->maxfd = src->maxfd;
2341 memcpy(dst->fdset, src->fdset, size);
2347 if (fds->fdset)
xfree(fds->fdset);
2356 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
2362 size_t m = howmany(n + 1, NFDBITS) *
sizeof(fd_mask);
2363 size_t o = howmany(fds->maxfd, NFDBITS) *
sizeof(fd_mask);
2365 if (m <
sizeof(fd_set)) m =
sizeof(fd_set);
2366 if (o <
sizeof(fd_set)) o =
sizeof(fd_set);
2369 fds->fdset =
xrealloc(fds->fdset, m);
2370 memset((
char *)fds->fdset + o, 0, m - o);
2372 if (n >= fds->maxfd) fds->maxfd = n + 1;
2385 if (n >= fds->maxfd)
return;
2392 if (n >= fds->maxfd)
return 0;
2393 return FD_ISSET(n, fds->fdset) != 0;
2399 size_t size = howmany(max, NFDBITS) *
sizeof(fd_mask);
2401 if (size <
sizeof(fd_set)) size =
sizeof(fd_set);
2403 dst->fdset =
xrealloc(dst->fdset, size);
2404 memcpy(dst->fdset, src, size);
2410 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
2412 if (size >
sizeof(fd_set)) {
2415 memcpy(dst,
rb_fd_ptr(src),
sizeof(fd_set));
2421 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
2423 if (size <
sizeof(fd_set))
2424 size =
sizeof(fd_set);
2425 dst->maxfd = src->maxfd;
2426 dst->fdset =
xrealloc(dst->fdset, size);
2427 memcpy(dst->fdset, src->fdset, size);
2446 return select(n, r, w, e, timeout);
2454 #define FD_ZERO(f) rb_fd_zero(f)
2455 #define FD_SET(i, f) rb_fd_set((i), (f))
2456 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2457 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2459 #elif defined(_WIN32)
2465 set->fdset =
ALLOC(fd_set);
2466 FD_ZERO(set->fdset);
2483 if (max >
FD_SETSIZE || max > dst->fd_count) {
2487 memcpy(dst->fd_array, src->fdset->fd_array, max);
2488 dst->fd_count =
max;
2505 for (i = 0; i <
set->fdset->fd_count; i++) {
2506 if (set->fdset->fd_array[i] == s) {
2510 if (set->fdset->fd_count >= (
unsigned)set->capa) {
2512 set->fdset =
xrealloc(set->fdset,
sizeof(
unsigned int) +
sizeof(SOCKET) * set->capa);
2514 set->fdset->fd_array[
set->fdset->fd_count++] = s;
2522 #define FD_ZERO(f) rb_fd_zero(f)
2523 #define FD_SET(i, f) rb_fd_set((i), (f))
2524 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2525 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2528 #define rb_fd_rcopy(d, s) (*(d) = *(s))
2531 #if defined(__CYGWIN__)
2568 # if defined(__CYGWIN__)
2573 # if defined(__CYGWIN__)
2575 limit = (double)start_time.
tv_sec + (
double)start_time.
tv_usec*1e-6;
2579 limit += (double)timeout->
tv_sec+(
double)timeout->
tv_usec*1e-6;
2580 wait_rest = *timeout;
2581 timeout = &wait_rest;
2594 #if defined(__CYGWIN__)
2600 wait_100ms.
tv_usec = 100 * 1000;
2603 wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) < 0) ? &wait_100ms : timeout;
2607 if (result < 0) lerrno =
errno;
2608 if (result != 0)
break;
2619 subtract_tv(&elapsed, &start_time);
2621 if (!subtract_tv(timeout, &elapsed)) {
2625 if (cmp_tv(&wait_100ms, timeout) > 0) wait = timeout;
2627 }
while (__th->interrupt_flag == 0);
2629 }
while (result == 0 && !finish);
2631 #elif defined(_WIN32)
2635 result = native_fd_select(n, read, write, except, timeout, th);
2636 if (result < 0) lerrno =
errno;
2641 result =
rb_fd_select(n, read, write, except, timeout);
2642 if (result < 0) lerrno =
errno;
2664 wait_rest.
tv_sec = (
unsigned int)d;
2665 wait_rest.
tv_usec = (int)((d-(
double)wait_rest.
tv_sec)*1e6);
2692 thread_debug(
"rb_thread_wait_fd_rw(%d, %s)\n", fd, read ?
"read" :
"write");
2698 while (result <= 0) {
2706 thread_debug(
"rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ?
"read" :
"write");
2770 if (!read && !write && !except) {
2788 return do_select(max, read, write, except, timeout);
2796 #if defined(HAVE_POLL) && defined(linux)
2803 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
2804 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
2805 #define POLLEX_SET (POLLPRI)
2807 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
2808 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
2812 int ppoll(
struct pollfd *fds, nfds_t nfds,
2813 const struct timespec *ts,
const sigset_t *sigmask)
2824 tmp2 = ts->
tv_nsec / (1000 * 1000);
2828 timeout_ms = tmp + tmp2;
2833 return poll(fds, nfds, timeout_ms);
2858 fds.events = (short)events;
2863 result = ppoll(&fds, 1, timeout, NULL);
2864 if (result < 0) lerrno =
errno;
2889 if (fds.revents & POLLNVAL) {
2899 if (fds.revents & POLLIN_SET)
2901 if (fds.revents & POLLOUT_SET)
2903 if (fds.revents & POLLEX_SET)
2988 #ifdef USE_CONSERVATIVE_STACK_END
2993 *stack_end_p = &stack_end;
3002 th->machine_register_stack_end = rb_ia64_bsp();
3034 if (vm->prove_profile.enable) {
3037 if (vm->during_gc) {
3047 if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3048 native_reset_timer_thread();
3055 native_reset_timer_thread();
3062 rb_thread_create_timer_thread();
3083 if (
RTEST(coverages)) {
3158 return ptr ?
sizeof(
struct thgroup) : 0;
3337 "can't move from the enclosed thread group");
3370 #define GetMutexPtr(obj, tobj) \
3371 TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
3375 #define mutex_mark NULL
3385 if (err)
rb_bug(
"%s", err);
3387 native_mutex_destroy(&mutex->
lock);
3388 native_cond_destroy(&mutex->
cond);
3422 native_mutex_initialize(&mutex->
lock);
3423 native_cond_initialize(&mutex->
cond, RB_CONDATTR_CLOCK_MONOTONIC);
3485 native_mutex_lock(&mutex->
lock);
3486 if (mutex->
th == 0) {
3492 native_mutex_unlock(&mutex->
lock);
3500 int interrupted = 0;
3523 timeout_rel.
tv_nsec = timeout_ms * 1000 * 1000;
3524 timeout = native_cond_timeout(&mutex->
cond, timeout_rel);
3525 err = native_cond_timedwait(&mutex->
cond, &mutex->
lock, &timeout);
3528 native_cond_wait(&mutex->
cond, &mutex->
lock);
3541 native_mutex_lock(&mutex->
lock);
3543 native_cond_broadcast(&mutex->
cond);
3544 native_mutex_unlock(&mutex->
lock);
3574 while (mutex->
th != th) {
3584 native_mutex_lock(&mutex->
lock);
3598 interrupted =
lock_func(th, mutex, timeout_ms);
3599 native_mutex_unlock(&mutex->
lock);
3602 if (patrol_thread == th)
3603 patrol_thread =
NULL;
3608 if (mutex->
th && interrupted == 2) {
3612 th->
status = prev_status;
3632 native_mutex_lock(&mutex->
lock);
3634 if (mutex->
th == 0) {
3635 err =
"Attempt to unlock a mutex which is not locked";
3637 else if (mutex->
th != th) {
3638 err =
"Attempt to unlock a mutex which is locked by another thread";
3643 native_cond_signal(&mutex->
cond);
3646 native_mutex_unlock(&mutex->
lock);
3650 if (th_mutex == mutex) {
3657 if (tmp_mutex == mutex) {
3661 th_mutex = tmp_mutex;
3724 if (!
NIL_P(timeout)) {
3729 if (
NIL_P(timeout)) {
3735 end = time(0) - beg;
3791 #define GetBarrierPtr(obj) ((VALUE)rb_check_typeddata((obj), &barrier_data_type))
3807 if (!mutex)
return Qfalse;
3871 #if SIZEOF_LONG == SIZEOF_VOIDP
3872 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
3873 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3874 #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
3875 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
3881 if (paired_obj_id) {
3916 VALUE other_paired_obj = pair_list;
3939 if (pair_list ==
Qundef) {
4007 if (outer && !outermost) {
4019 if (result == p.list) {
4066 #define RUBY_EVENT_REMOVED 0x1000000
4092 hook->
flag = events;
4162 for (; hook; hook = hook->
next) {
4167 if (flag & hook->
flag) {
4168 (*hook->
func)(flag, hook->
data,
self,
id, klass);
4205 th->
tracing &= ~EVENT_RUNNING_VM;
4245 if (func == 0 || hook->
func == func) {
4528 if (p->
klass != 0) {
4539 klass =
RBASIC(klass)->klass;
4542 klass =
rb_iv_get(klass,
"__attached__");
4546 argv[0] = eventname;
4551 argv[5] = klass ? klass :
Qnil;
4581 volatile int raised;
4582 volatile int outer_state;
4585 if (running == ev && !always) {
4593 outer_state = th->
state;
4598 result = (*func)(
arg, running);
4613 th->
state = outer_state;
4658 #define rb_intern(str) rb_intern_const(str)
4675 #if THREAD_DEBUG < 0
4731 recursive_key =
rb_intern(
"__recursive_key__");
4745 gvl_acquire(th->
vm, th);
4750 rb_thread_create_timer_thread();
4753 (void)native_mutex_trylock;
4778 native_mutex_lock(&mutex->
lock);
4782 native_mutex_unlock(&mutex->
lock);
4788 #ifdef DEBUG_DEADLOCK_CHECK
4801 native_mutex_lock(&mutex->
lock);
4803 native_mutex_unlock(&mutex->
lock);
4819 if (patrol_thread && patrol_thread !=
GET_THREAD())
return;
4827 #ifdef DEBUG_DEADLOCK_CHECK
4840 if (coverage &&
RBASIC(coverage)->klass == 0) {
4856 return GET_VM()->coverages;
4862 GET_VM()->coverages = coverages;