54 #undef _FORTIFY_SOURCE
55 #undef __USE_FORTIFY_LEVEL
56 #define __USE_FORTIFY_LEVEL 0
66 #ifndef USE_NATIVE_THREAD_PRIORITY
67 #define USE_NATIVE_THREAD_PRIORITY 0
68 #define RUBY_THREAD_PRIORITY_MAX 3
69 #define RUBY_THREAD_PRIORITY_MIN -3
73 #define THREAD_DEBUG 0
76 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
77 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
94 #define eKillSignal INT2FIX(0)
95 #define eTerminateSignal INT2FIX(1)
98 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
108 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
124 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) \
125 do{(th)->machine_register_stack_end = rb_ia64_bsp();}while(0)
127 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th)
129 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
131 FLUSH_REGISTER_WINDOWS; \
132 RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \
133 setjmp((th)->machine_regs); \
134 SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
137 #define GVL_UNLOCK_BEGIN() do { \
138 rb_thread_t *_th_stored = GET_THREAD(); \
139 RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
140 gvl_release(_th_stored->vm);
142 #define GVL_UNLOCK_END() \
143 gvl_acquire(_th_stored->vm, _th_stored); \
144 rb_thread_set_current(_th_stored); \
148 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
150 #define only_if_constant(expr, notconst) notconst
152 #define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \
153 rb_thread_t *__th = GET_THREAD(); \
154 struct rb_blocking_region_buffer __region; \
155 if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
157 !only_if_constant(fail_if_interrupted, TRUE)) { \
159 blocking_region_end(__th, &__region); \
164 #ifdef HAVE_VA_ARGS_MACRO
165 void rb_thread_debug(
const char *file,
int line,
const char *
fmt, ...);
166 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
167 #define POSITION_FORMAT "%s:%d:"
168 #define POSITION_ARGS ,file, line
170 void rb_thread_debug(
const char *
fmt, ...);
171 #define thread_debug rb_thread_debug
172 #define POSITION_FORMAT
173 #define POSITION_ARGS
176 # if THREAD_DEBUG < 0
177 static int rb_thread_debug_enabled;
188 rb_thread_s_debug(
void)
190 return INT2NUM(rb_thread_debug_enabled);
204 rb_thread_debug_enabled =
RTEST(val) ?
NUM2INT(val) : 0;
208 # define rb_thread_debug_enabled THREAD_DEBUG
211 #define thread_debug if(0)printf
215 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
218 VALUE *register_stack_start));
224 #define DEBUG_OUT() \
225 WaitForSingleObject(&debug_mutex, INFINITE); \
226 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
228 ReleaseMutex(&debug_mutex);
230 #elif defined(HAVE_PTHREAD_H)
233 #define DEBUG_OUT() \
234 pthread_mutex_lock(&debug_mutex); \
235 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
237 pthread_mutex_unlock(&debug_mutex);
240 #error "unsupported thread type"
244 static int debug_mutex_initialized = 1;
249 #ifdef HAVE_VA_ARGS_MACRO
250 const char *file,
int line,
252 const char *
fmt, ...)
257 if (!rb_thread_debug_enabled)
return;
259 if (debug_mutex_initialized == 1) {
260 debug_mutex_initialized = 0;
261 native_mutex_initialize(&debug_mutex);
283 native_mutex_unlock(lock);
289 native_mutex_destroy(lock);
297 if (fail_if_interrupted) {
365 if (th != main_thread) {
371 thread_debug(
"terminate_i: main thread (%p)\n", (
void *)th);
404 if (err)
rb_bug(
"invalid keeping_mutexes: %s", err);
415 rb_bug(
"rb_thread_terminate_all: called by child thread (%p, %p)",
423 thread_debug(
"rb_thread_terminate_all (main thread: %p)\n", (
void *)th);
449 th->machine_register_stack_start = th->machine_register_stack_end = 0;
470 native_thread_destroy(th);
478 native_thread_init_stack(th);
490 # ifdef USE_SIGALTSTACK
493 rb_register_sigaltstack(th);
497 rb_bug(
"thread_start_func_2 must not used for main thread");
499 ruby_thread_set_native(th);
503 th->machine_register_stack_start = register_stack_start;
507 gvl_acquire(th->
vm, th);
509 thread_debug(
"thread start (get lock): %p\n", (
void *)th);
566 rb_bug(
"thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
586 join_list = join_list->
next;
597 native_mutex_lock(&
th->vm->thread_destruct_lock);
599 th->vm->running_thread =
NULL;
600 native_mutex_unlock(&
th->vm->thread_destruct_lock);
615 "can't start a new thread (frozen ThreadGroup)");
637 err = native_thread_create(th);
734 #define DELAY_INFTY 1E30
752 if ((*p)->th ==
th) {
795 if (th == target_th) {
798 if (
GET_VM()->main_thread == target_th) {
838 return target_th->
self;
950 while (th->
status == status) {
969 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
972 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
1005 native_sleep(th, &tv);
1018 if (!spurious_check)
1021 th->
status = prev_status;
1041 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1044 if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
1135 gvl_yield(th->
vm, th);
1163 thread_debug(
"enter blocking region (%p)\n", (
void *)th);
1165 gvl_release(th->
vm);
1176 gvl_acquire(th->
vm, th);
1178 thread_debug(
"leave blocking region (%p)\n", (
void *)th);
1179 remove_signal_thread_list(th);
1199 int saved_errno =
errno;
1204 errno = saved_errno;
1214 int saved_errno = 0;
1224 saved_errno =
errno;
1225 }, ubf, data2, fail_if_interrupted);
1227 if (!fail_if_interrupted) {
1231 errno = saved_errno;
1340 int saved_errno = 0;
1349 saved_errno =
errno;
1363 errno = saved_errno;
1373 void *(*f)(
void*) = (
void *(*)(
void*))
func;
1419 fprintf(stderr,
"[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1427 rb_bug(
"rb_thread_call_with_gvl: called by a thread which has GVL.");
1524 for (i=0; i<mask_stack_len; i++) {
1525 mask = mask_stack[mask_stack_len-(i+1)];
1527 for (j=0; j<ancestors_len; j++) {
1582 switch (mask_timing) {
1904 int timer_interrupt;
1905 int pending_interrupt;
1906 int finalizer_interrupt;
1912 }
while (old != interrupt);
1955 if (finalizer_interrupt) {
1959 if (timer_interrupt) {
1960 unsigned long limits_us = TIME_QUANTUM_USEC;
2031 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
2032 #define USE_SIGALTSTACK
2039 #ifdef USE_SIGALTSTACK
2118 if (th == target_th) {
2300 "stopping only thread\n\tnote: use sleep to stop forever");
2661 str =
rb_sprintf(
"#<%s:%p %s>", cname, (
void *)thread, status);
3006 if (!
RHASH(locals)->ntbl)
3075 #if USE_NATIVE_THREAD_PRIORITY
3077 native_thread_apply_priority(th);
3093 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3130 fds->fdset =
ALLOC(fd_set);
3131 FD_ZERO(fds->fdset);
3137 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
3139 if (size <
sizeof(fd_set))
3140 size =
sizeof(fd_set);
3141 dst->maxfd = src->maxfd;
3143 memcpy(dst->fdset, src->fdset, size);
3149 if (fds->fdset)
xfree(fds->fdset);
3158 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3164 size_t m = howmany(n + 1, NFDBITS) *
sizeof(fd_mask);
3165 size_t o = howmany(fds->maxfd, NFDBITS) *
sizeof(fd_mask);
3167 if (m <
sizeof(fd_set)) m =
sizeof(fd_set);
3168 if (o <
sizeof(fd_set)) o =
sizeof(fd_set);
3171 fds->fdset =
xrealloc(fds->fdset, m);
3172 memset((
char *)fds->fdset + o, 0, m - o);
3174 if (n >= fds->maxfd) fds->maxfd = n + 1;
3187 if (n >= fds->maxfd)
return;
3194 if (n >= fds->maxfd)
return 0;
3195 return FD_ISSET(n, fds->fdset) != 0;
3201 size_t size = howmany(max, NFDBITS) *
sizeof(fd_mask);
3203 if (size <
sizeof(fd_set)) size =
sizeof(fd_set);
3205 dst->fdset =
xrealloc(dst->fdset, size);
3206 memcpy(dst->fdset, src, size);
3212 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
3214 if (size >
sizeof(fd_set)) {
3223 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
3225 if (size <
sizeof(fd_set))
3226 size =
sizeof(fd_set);
3227 dst->maxfd = src->maxfd;
3228 dst->fdset =
xrealloc(dst->fdset, size);
3229 memcpy(dst->fdset, src->fdset, size);
3232 #ifdef __native_client__
3233 int select(
int nfds, fd_set *readfds, fd_set *writefds,
3234 fd_set *exceptfds,
struct timeval *timeout);
3253 return select(n, r, w, e, timeout);
3261 #define FD_ZERO(f) rb_fd_zero(f)
3262 #define FD_SET(i, f) rb_fd_set((i), (f))
3263 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3264 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3266 #elif defined(_WIN32)
3271 set->capa = FD_SETSIZE;
3272 set->fdset =
ALLOC(fd_set);
3273 FD_ZERO(set->fdset);
3290 if (max > FD_SETSIZE || (UINT)max > dst->fd_count) {
3294 memcpy(dst->fd_array, src->fdset->fd_array, max);
3295 dst->fd_count =
max;
3312 for (i = 0; i < set->fdset->fd_count; i++) {
3313 if (set->fdset->fd_array[i] == s) {
3317 if (set->fdset->fd_count >= (
unsigned)set->capa) {
3318 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3319 set->fdset =
xrealloc(set->fdset,
sizeof(
unsigned int) +
sizeof(SOCKET) * set->capa);
3321 set->fdset->fd_array[set->fdset->fd_count++] =
s;
3329 #define FD_ZERO(f) rb_fd_zero(f)
3330 #define FD_SET(i, f) rb_fd_set((i), (f))
3331 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3332 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3335 #define rb_fd_rcopy(d, s) (*(d) = *(s))
3353 limit += (double)timeout->
tv_sec+(
double)timeout->
tv_usec*1e-6;
3354 wait_rest = *timeout;
3355 timeout = &wait_rest;
3369 result = native_fd_select(n, read, write, except, timeout, th);
3393 wait_rest.
tv_sec = (time_t)d;
3421 thread_debug(
"rb_thread_wait_fd_rw(%d, %s)\n", fd, read ?
"read" :
"write");
3432 thread_debug(
"rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ?
"read" :
"write");
3496 if (!read && !write && !except) {
3514 return do_select(max, read, write, except, timeout);
3522 #if defined(HAVE_POLL) && defined(__linux__)
3529 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
3530 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
3531 #define POLLEX_SET (POLLPRI)
3536 ppoll(
struct pollfd *fds, nfds_t nfds,
3537 const struct timespec *ts,
const sigset_t *sigmask)
3548 tmp2 = ts->
tv_nsec / (1000 * 1000);
3552 timeout_ms = tmp + tmp2;
3558 return poll(fds, nfds, timeout_ms);
3584 fds.events = (short)events;
3589 result = ppoll(&fds, 1, timeout,
NULL);
3590 if (result < 0) lerrno =
errno;
3617 if (fds.revents & POLLNVAL) {
3627 if (fds.revents & POLLIN_SET)
3629 if (fds.revents & POLLOUT_SET)
3631 if (fds.revents & POLLEX_SET)
3717 #ifdef USE_CONSERVATIVE_STACK_END
3722 *stack_end_p = &stack_end;
3762 if (vm->prove_profile.enable) {
3765 if (vm->during_gc) {
3775 if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3776 native_reset_timer_thread();
3783 native_reset_timer_thread();
3790 rb_thread_create_timer_thread();
3811 if (
RTEST(coverages)) {
3884 return ptr ?
sizeof(
struct thgroup) : 0;
4069 "can't move from the enclosed thread group");
4102 #define GetMutexPtr(obj, tobj) \
4103 TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
4105 #define mutex_mark NULL
4115 if (err)
rb_bug(
"%s", err);
4117 native_mutex_destroy(&mutex->
lock);
4118 native_cond_destroy(&mutex->
cond);
4152 native_mutex_initialize(&mutex->
lock);
4153 native_cond_initialize(&mutex->
cond, RB_CONDATTR_CLOCK_MONOTONIC);
4215 native_mutex_lock(&mutex->
lock);
4216 if (mutex->
th == 0) {
4222 native_mutex_unlock(&mutex->
lock);
4230 int interrupted = 0;
4253 timeout_rel.
tv_nsec = timeout_ms * 1000 * 1000;
4254 timeout = native_cond_timeout(&mutex->
cond, timeout_rel);
4255 err = native_cond_timedwait(&mutex->
cond, &mutex->
lock, &timeout);
4258 native_cond_wait(&mutex->
cond, &mutex->
lock);
4271 native_mutex_lock(&mutex->
lock);
4273 native_cond_broadcast(&mutex->
cond);
4274 native_mutex_unlock(&mutex->
lock);
4308 while (mutex->
th != th) {
4311 volatile int timeout_ms = 0;
4318 native_mutex_lock(&mutex->
lock);
4332 interrupted =
lock_func(th, mutex, (
int)timeout_ms);
4333 native_mutex_unlock(&mutex->
lock);
4336 if (patrol_thread == th)
4337 patrol_thread =
NULL;
4342 if (mutex->
th && interrupted == 2) {
4346 th->
status = prev_status;
4376 if (mutex->
th == th)
4387 native_mutex_lock(&mutex->
lock);
4389 if (mutex->
th == 0) {
4390 err =
"Attempt to unlock a mutex which is not locked";
4392 else if (mutex->
th != th) {
4393 err =
"Attempt to unlock a mutex which is locked by another thread";
4398 native_cond_signal(&mutex->
cond);
4401 native_mutex_unlock(&mutex->
lock);
4405 while (*th_mutex != mutex) {
4452 if (mutex->
th == th)
4491 if (!
NIL_P(timeout)) {
4496 if (
NIL_P(timeout)) {
4502 end =
time(0) - beg;
4586 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4587 #define THREAD_SHIELD_WAITING_MASK (FL_USER0|FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7|FL_USER8|FL_USER9|FL_USER10|FL_USER11|FL_USER12|FL_USER13|FL_USER14|FL_USER15|FL_USER16|FL_USER17|FL_USER18|FL_USER19)
4588 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4589 #define rb_thread_shield_waiting(b) (int)((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT)
4617 return thread_shield;
4634 if (!mutex)
return Qfalse;
4709 #if SIZEOF_LONG == SIZEOF_VOIDP
4710 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
4711 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4712 #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
4713 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
4719 if (paired_obj_id) {
4754 VALUE other_paired_obj = pair_list;
4777 if (pair_list ==
Qundef) {
4845 if (outer && !outermost) {
4857 if (result == p.list) {
4973 #define rb_intern(str) rb_intern_const(str)
4994 #if THREAD_DEBUG < 0
5060 recursive_key =
rb_intern(
"__recursive_key__");
5069 gvl_acquire(th->
vm, th);
5081 rb_thread_create_timer_thread();
5084 (
void)native_mutex_trylock;
5109 native_mutex_lock(&mutex->
lock);
5113 native_mutex_unlock(&mutex->
lock);
5119 #ifdef DEBUG_DEADLOCK_CHECK
5132 native_mutex_lock(&mutex->
lock);
5134 native_mutex_unlock(&mutex->
lock);
5150 if (patrol_thread && patrol_thread !=
GET_THREAD())
return;
5157 argv[1] =
rb_str_new2(
"No live threads left. Deadlock?");
5158 #ifdef DEBUG_DEADLOCK_CHECK
5171 if (coverage &&
RBASIC(coverage)->klass == 0) {
5187 return GET_VM()->coverages;
5193 GET_VM()->coverages = coverages;
static int vm_living_thread_num(rb_vm_t *vm)
#define RB_TYPE_P(obj, type)
VALUE rb_mutex_locked_p(VALUE mutex)
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
rb_thread_list_t * join_list
static VALUE thgroup_enclose(VALUE group)
static VALUE rb_thread_variable_p(VALUE thread, VALUE key)
#define RUBY_VM_CHECK_INTS(th)
unsigned long running_time_us
#define RUBY_EVENT_THREAD_END
#define rb_fd_init_copy(d, s)
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
static VALUE thgroup_add(VALUE group, VALUE thread)
static int check_deadlock_i(st_data_t key, st_data_t val, int *found)
#define RUBY_EVENT_THREAD_BEGIN
VALUE rb_thread_main(void)
int rb_thread_check_trap_pending()
int ruby_thread_has_gvl_p(void)
VALUE rb_ary_pop(VALUE ary)
VALUE rb_get_coverages(void)
static VALUE rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
static const rb_thread_t * patrol_thread
struct rb_mutex_struct * next_mutex
void ruby_thread_stack_overflow(rb_thread_t *th)
void rb_bug(const char *fmt,...)
static VALUE rb_thread_priority(VALUE thread)
int gettimeofday(struct timeval *, struct timezone *)
RUBY_EXTERN VALUE rb_cModule
static int lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
static const char * rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
static void thread_cleanup_func_before_exec(void *th_ptr)
static VALUE trap(int sig, sighandler_t func, VALUE command)
struct rb_thread_struct * running_thread
VALUE rb_make_exception(int argc, VALUE *argv)
void rb_thread_wait_fd(int)
void rb_thread_atfork_before_exec(void)
const char * rb_obj_classname(VALUE)
#define RUBY_VM_SET_INTERRUPT(th)
static VALUE rb_thread_abort_exc_set(VALUE thread, VALUE val)
VALUE rb_proc_location(VALUE self)
void rb_thread_lock_unlock(rb_thread_lock_t *lock)
int pending_interrupt_queue_checked
static void rb_mutex_abandon_all(rb_mutex_t *mutexes)
struct rb_blocking_region_buffer * rb_thread_blocking_region_begin(void)
static int max(int a, int b)
int st_lookup(st_table *, st_data_t, st_data_t *)
VALUE(* func)(VALUE, VALUE, int)
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
static VALUE thgroup_enclosed_p(VALUE group)
void rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
VALUE rb_thread_list(void)
#define GetProcPtr(obj, ptr)
static VALUE thread_join_sleep(VALUE arg)
VALUE rb_blocking_function_t(void *)
rb_thread_lock_t interrupt_lock
pthread_mutex_t rb_thread_lock_t
rb_thread_lock_t thread_destruct_lock
st_table * st_init_numtable(void)
static int terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
void rb_thread_blocking_region_end(struct rb_blocking_region_buffer *region)
static VALUE rb_thread_variables(VALUE thread)
struct rb_thread_struct * th
VALUE rb_ary_delete_at(VALUE ary, long pos)
static VALUE recursive_list_access(void)
SSL_METHOD *(* func)(void)
rb_unblock_function_t * func
static void update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
static VALUE thread_s_new(int argc, VALUE *argv, VALUE klass)
void rb_error_frozen(const char *what)
VALUE pending_interrupt_mask_stack
VALUE rb_ary_shift(VALUE ary)
VALUE rb_mod_ancestors(VALUE mod)
static VALUE mutex_initialize(VALUE self)
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
VALUE rb_hash_lookup2(VALUE, VALUE, VALUE)
static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
VALUE rb_iv_get(VALUE, const char *)
struct rb_thread_struct volatile * th
static struct timeval double2timeval(double d)
ID rb_frame_this_func(void)
SOCKET rb_w32_get_osfhandle(int)
#define TH_JUMP_TAG(th, st)
static VALUE mutex_alloc(VALUE klass)
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
static const rb_data_type_t mutex_data_type
VALUE rb_ary_push(VALUE ary, VALUE item)
VALUE rb_mutex_lock(VALUE mutex)
st_table * living_threads
void rb_signal_exec(rb_thread_t *th, int sig)
static int handle_interrupt_arg_check_i(VALUE key, VALUE val)
VALUE rb_ary_tmp_new(long capa)
static VALUE rb_thread_safe_level(VALUE thread)
static VALUE rb_thread_aset(VALUE self, VALUE id, VALUE val)
VALUE rb_exec_recursive_paired_outer(VALUE(*)(VALUE, VALUE, int), VALUE, VALUE, VALUE)
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
#define OBJ_ID_EQL(obj_id, other)
void rb_raise(VALUE exc, const char *fmt,...)
VALUE rb_thread_alloc(VALUE klass)
static VALUE rb_mutex_sleep_forever(VALUE time)
static VALUE rb_thread_abort_exc(VALUE thread)
static void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
VALUE rb_ary_clear(VALUE ary)
static void clear_coverage(void)
static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check)
RUBY_EXTERN VALUE rb_eIOError
#define RUBY_VM_SET_TRAP_INTERRUPT(th)
static size_t thgroup_memsize(const void *ptr)
static void rb_mutex_abandon_locking_mutex(rb_thread_t *th)
static VALUE sym_immediate
static int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region, rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
static void thread_shield_mark(void *ptr)
VALUE rb_catch_obj(VALUE, VALUE(*)(ANYARGS), VALUE)
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, const rb_block_t *blockptr)
static size_t mutex_memsize(const void *ptr)
static volatile int system_working
static VALUE thread_join(rb_thread_t *target_th, double delay)
static VALUE remove_from_join_list(VALUE arg)
static int rb_threadptr_dead(rb_thread_t *th)
static VALUE rb_thread_alive_p(VALUE thread)
#define rb_fd_rcopy(d, s)
static VALUE exec_recursive_i(VALUE tag, struct exec_recursive_params *p)
void rb_thread_start_timer_thread(void)
static rb_fdset_t * init_set_fd(int fd, rb_fdset_t *fds)
void rb_thread_terminate_all(void)
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
void rb_hash_foreach(VALUE, int(*)(ANYARGS), VALUE)
unsigned long rb_event_flag_t
VALUE rb_hash_delete(VALUE, VALUE)
static VALUE rb_thread_stop_p(VALUE thread)
static void thread_cleanup_func(void *th_ptr, int atfork)
static double timeofday(void)
VALUE vm_thread_backtrace(int argc, VALUE *argv, VALUE thval)
static VALUE rb_thread_s_abort_exc_set(VALUE self, VALUE val)
VALUE(* first_func)(ANYARGS)
#define MEMZERO(p, type, n)
static VALUE rb_thread_s_main(VALUE klass)
void rb_exc_raise(VALUE mesg)
static void rb_thread_wait_fd_rw(int fd, int read)
static VALUE sym_on_blocking
static void rb_thread_schedule_limits(unsigned long limits_us)
int st_delete(st_table *, st_data_t *, st_data_t *)
static void rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
static VALUE thgroup_s_alloc(VALUE klass)
#define RUBY_VM_INTERRUPTED_ANY(th)
static int thread_list_i(st_data_t key, st_data_t val, void *data)
static VALUE coverage(VALUE fname, int n)
#define closed_stream_error
static const char * thread_status_name(rb_thread_t *th)
memset(y->frac+ix+1, 0,(y->Prec-(ix+1))*sizeof(BDIGIT))
VALUE rb_block_proc(void)
#define rb_fd_select(n, rfds, wfds, efds, timeout)
#define RUBY_THREAD_PRIORITY_MAX
static VALUE rb_thread_priority_set(VALUE thread, VALUE prio)
static int do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
static void sleep_for_polling(rb_thread_t *th)
#define RUBY_EVENT_COVERAGE
#define TypedData_Get_Struct(obj, type, data_type, sval)
int rb_block_given_p(void)
int rb_threadptr_set_raised(rb_thread_t *th)
#define StringValuePtr(v)
static const rb_data_type_t thread_shield_data_type
VALUE * rb_vm_ep_local_ep(VALUE *ep)
void rb_gc_finalize_deferred(void)
static VALUE rb_thread_inspect(VALUE thread)
static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *)
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
void * blocking_region_buffer
static VALUE exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
static VALUE thread_create_core(VALUE thval, VALUE args, VALUE(*fn)(ANYARGS))
void rb_define_const(VALUE, const char *, VALUE)
static int rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
void rb_thread_stop_timer_thread(int close_anyway)
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
static void rb_threadptr_ready(rb_thread_t *th)
static int rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th)
int rb_thread_alone(void)
void rb_thread_recycle_stack_release(VALUE *)
VALUE rb_thread_shield_wait(VALUE self)
void rb_threadptr_check_signal(rb_thread_t *mth)
int thread_abort_on_exception
void ruby_thread_init_stack(rb_thread_t *th)
VALUE rb_mutex_owned_p(VALUE self)
static VALUE rb_thread_exit(void)
#define TypedData_Wrap_Struct(klass, data_type, sval)
VALUE rb_thread_shield_release(VALUE self)
unsigned char buf[MIME_BUF_SIZE]
void rb_unblock_function_t(void *)
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
void rb_thread_check_ints(void)
VALUE * machine_stack_start
#define GVL_UNLOCK_BEGIN()
static const rb_data_type_t thgroup_data_type
void rb_throw_obj(VALUE tag, VALUE value)
static VALUE thread_s_current(VALUE klass)
static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
#define ATOMIC_CAS(var, oldval, newval)
void rb_thread_polling(void)
#define GetMutexPtr(obj, tobj)
VALUE rb_hash_aset(VALUE, VALUE, VALUE)
static VALUE rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
int rb_thread_select(int max, fd_set *read, fd_set *write, fd_set *except, struct timeval *timeout)
VALUE rb_thread_shield_destroy(VALUE self)
int st_foreach(st_table *, int(*)(ANYARGS), st_data_t)
struct rb_unblock_callback oldubf
#define rb_thread_set_current(th)
void rb_thread_atfork(void)
VALUE rb_thread_current(void)
static int thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
VALUE special_exceptions[ruby_special_error_count]
struct rb_mutex_struct * keeping_mutexes
VALUE rb_sprintf(const char *format,...)
VALUE rb_mutex_trylock(VALUE mutex)
static int set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg, struct rb_unblock_callback *old, int fail_if_interrupted)
static int thread_fd_close_i(st_data_t key, st_data_t val, st_data_t data)
#define rb_fd_copy(d, s, n)
VALUE rb_class_inherited_p(VALUE, VALUE)
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
VALUE rb_obj_is_mutex(VALUE obj)
#define RUBY_VM_SET_TIMER_INTERRUPT(th)
union select_args::@140 as
static VALUE thread_initialize(VALUE thread, VALUE args)
static void rb_check_deadlock(rb_vm_t *vm)
static VALUE rb_mutex_synchronize_m(VALUE self, VALUE args)
VALUE rb_exc_new2(VALUE etype, const char *s)
VALUE rb_thread_run(VALUE)
static VALUE thread_shield_alloc(VALUE klass)
void rb_mutex_allow_trap(VALUE self, int val)
VALUE vm_thread_backtrace_locations(int argc, VALUE *argv, VALUE thval)
VALUE rb_thread_kill(VALUE)
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
#define THREAD_SHIELD_WAITING_MASK
#define SAVE_ROOT_JMPBUF(th, stmt)
#define RUBY_TYPED_DEFAULT_FREE
int rb_remove_event_hook(rb_event_hook_func_t func)
static int keys_i(VALUE key, VALUE value, VALUE ary)
#define UNINITIALIZED_VAR(x)
struct rb_thread_struct * main_thread
VALUE rb_thread_local_aref(VALUE, ID)
static int clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
static VALUE rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
int rb_threadptr_reset_raised(rb_thread_t *th)
VALUE rb_thread_wakeup_alive(VALUE)
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
void rb_gc_set_stack_end(VALUE **stack_end_p)
static void rb_thread_shield_waiting_dec(VALUE b)
VALUE rb_exec_recursive_outer(VALUE(*)(VALUE, VALUE, int), VALUE, VALUE)
static VALUE rb_thread_variable_get(VALUE thread, VALUE id)
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
static VALUE thread_value(VALUE self)
static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
rb_atomic_t interrupt_flag
RUBY_EXTERN int isinf(double)
static void timer_thread_function(void *)
#define rb_fd_isset(n, f)
void rb_thread_schedule(void)
void rb_sys_fail(const char *mesg)
#define rb_fd_resize(n, f)
static VALUE rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
static VALUE rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
enum rb_thread_status status
static void st_delete_wrap(st_table *table, st_data_t key)
void rb_thread_sleep_forever(void)
static VALUE thread_s_pass(VALUE klass)
static VALUE thread_join_m(int argc, VALUE *argv, VALUE self)
const char * rb_class2name(VALUE)
VALUE rb_thread_wakeup(VALUE)
#define thread_start_func_2(th, st, rst)
VALUE rb_mutex_unlock(VALUE mutex)
static void rb_thread_sleep_deadly(void)
enum rb_thread_status prev_status
static VALUE mutex_sleep(int argc, VALUE *argv, VALUE self)
VALUE * machine_stack_end
struct timeval rb_time_interval(VALUE num)
#define THREAD_SHIELD_WAITING_SHIFT
static void rb_threadptr_to_kill(rb_thread_t *th)
void rb_reset_coverages(void)
void rb_thread_wait_for(struct timeval)
void rb_thread_lock_destroy(rb_thread_lock_t *lock)
static VALUE thgroup_list(VALUE group)
VALUE rb_obj_is_kind_of(VALUE, VALUE)
if(RB_TYPE_P(r, T_FLOAT))
#define RUBY_EVENT_SWITCH
unsigned long interrupt_mask
#define RUBY_THREAD_PRIORITY_MIN
void rb_thread_sleep(int)
VALUE rb_thread_group(VALUE thread)
struct rb_unblock_callback unblock
static VALUE rb_thread_aref(VALUE thread, VALUE id)
VALUE rb_thread_shield_new(void)
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted)
void rb_thread_execute_interrupts(VALUE th)
VALUE rb_exec_recursive_paired(VALUE(*)(VALUE, VALUE, int), VALUE, VALUE, VALUE)
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
#define TypedData_Make_Struct(klass, type, data_type, sval)
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
void rb_thread_reset_timer_thread(void)
RUBY_EXTERN VALUE rb_cObject
int rb_signal_buff_size(void)
static void rb_thread_shield_waiting_inc(VALUE b)
int rb_thread_fd_select(int, rb_fdset_t *, rb_fdset_t *, rb_fdset_t *, struct timeval *)
struct rb_encoding_entry * list
static VALUE rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
static void recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
static VALUE thread_start(VALUE klass, VALUE args)
static VALUE rb_mutex_wait_for(VALUE time)
VALUE rb_ary_dup(VALUE ary)
int st_insert(st_table *, st_data_t, st_data_t)
void rb_thread_fd_close(int)
#define GetThreadPtr(obj, ptr)
VALUE rb_exec_recursive(VALUE(*)(VALUE, VALUE, int), VALUE, VALUE)
static VALUE thread_raise_m(int argc, VALUE *argv, VALUE self)
#define rb_thread_shield_waiting(b)
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, klass_, data_)
static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check)
void st_clear(st_table *)
VALUE rb_thread_stop(void)
VALUE rb_obj_alloc(VALUE)
void rb_threadptr_trap_interrupt(rb_thread_t *th)
#define ruby_native_thread_p()
static int terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
struct rb_thread_list_struct * next
#define RUBY_VM_INTERRUPTED(th)
static VALUE rb_thread_s_abort_exc(void)
static void recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
VALUE rb_hash_aref(VALUE, VALUE)
void rb_vm_gvl_destroy(rb_vm_t *vm)
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
VALUE rb_uninterruptible(VALUE(*b_proc)(ANYARGS), VALUE data)
static void mutex_locked(rb_thread_t *th, VALUE self)
VALUE rb_thread_create(VALUE(*)(ANYARGS), void *)
VALUE rb_thread_local_aset(VALUE, ID, VALUE)
static VALUE rb_thread_keys(VALUE self)
#define GetThreadShieldPtr(obj)
VALUE pending_interrupt_queue
static void getclockofday(struct timeval *tp)
static VALUE select_single_cleanup(VALUE ptr)
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
static VALUE select_single(VALUE ptr)
struct rb_mutex_struct rb_mutex_t
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
int rb_get_next_signal(void)
static int thread_keys_i(ID key, VALUE value, VALUE ary)
static void * call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
#define GET_THROWOBJ_STATE(obj)
static VALUE rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
void rb_threadptr_interrupt(rb_thread_t *th)
VALUE rb_thread_blocking_region(rb_blocking_function_t *func, void *data1, rb_unblock_function_t *ubf, void *data2)
void rb_obj_call_init(VALUE obj, int argc, VALUE *argv)
static void mutex_free(void *ptr)
static rb_thread_t * GET_THREAD(void)
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
int rb_thread_fd_writable(int)
#define GET_THROWOBJ_VAL(obj)
void rb_set_coverages(VALUE coverages)
int select(int num_fds, fd_set *in_fds, fd_set *out_fds, fd_set *ex_fds, struct timeval *timeout)
static VALUE rb_thread_key_p(VALUE self, VALUE key)
#define RUBY_VM_CHECK_INTS_BLOCKING(th)
VALUE rb_convert_type(VALUE, int, const char *, const char *)
static VALUE rb_thread_s_kill(VALUE obj, VALUE th)
static VALUE recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
RUBY_EXTERN VALUE rb_cThread
void rb_reset_random_seed(void)
void rb_threadptr_signal_exit(rb_thread_t *th)
static void lock_interrupt(void *ptr)
static void rb_thread_atfork_internal(int(*atfork)(st_data_t, st_data_t, st_data_t))
struct timeval rb_time_timeval(VALUE)
int rb_thread_interrupted(VALUE thval)
static enum handle_interrupt_timing rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
static int terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)