Ruby  2.0.0p594(2014-10-27revision48167)
thread.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author: usa $
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23 ------------------------------------------------------------------------
24 
25  model 2:
26  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
27  When thread scheduling, running thread release GVL. If running thread
28  try blocking operation, this thread must release GVL and another
29  thread can continue this flow. After blocking operation, thread
30  must check interrupt (RUBY_VM_CHECK_INTS).
31 
32  Every VM can run parallel.
33 
34  Ruby threads are scheduled by OS thread scheduler.
35 
36 ------------------------------------------------------------------------
37 
38  model 3:
39  Every threads run concurrent or parallel and to access shared object
40  exclusive access control is needed. For example, to access String
41  object or Array object, fine grain lock must be locked every time.
42  */
43 
44 
45 /*
46  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
47  * 2.15 or later and set _FORTIFY_SOURCE > 0.
48  * However, the implementation is wrong. Even though Linux's select(2)
49  * support large fd size (>FD_SETSIZE), it wrongly assume fd is always
50  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
51  * it doesn't work correctly and makes program abort. Therefore we need to
52  * disable FORTY_SOURCE until glibc fixes it.
53  */
54 #undef _FORTIFY_SOURCE
55 #undef __USE_FORTIFY_LEVEL
56 #define __USE_FORTIFY_LEVEL 0
57 
58 /* for model 2 */
59 
60 #include "eval_intern.h"
61 #include "gc.h"
62 #include "internal.h"
63 #include "ruby/io.h"
64 #include "ruby/thread.h"
65 
66 #ifndef USE_NATIVE_THREAD_PRIORITY
67 #define USE_NATIVE_THREAD_PRIORITY 0
68 #define RUBY_THREAD_PRIORITY_MAX 3
69 #define RUBY_THREAD_PRIORITY_MIN -3
70 #endif
71 
72 #ifndef THREAD_DEBUG
73 #define THREAD_DEBUG 0
74 #endif
75 
76 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
77 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
78 
81 
85 
86 static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check);
87 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check);
88 static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check);
89 static double timeofday(void);
90 static int rb_threadptr_dead(rb_thread_t *th);
91 static void rb_check_deadlock(rb_vm_t *vm);
93 
94 #define eKillSignal INT2FIX(0)
95 #define eTerminateSignal INT2FIX(1)
96 static volatile int system_working = 1;
97 
98 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
99 
100 inline static void
102 {
103  st_delete(table, &key, 0);
104 }
105 
106 /********************************************************************************/
107 
108 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
109 
113 };
114 
116  struct rb_unblock_callback *old, int fail_if_interrupted);
117 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
118 
119 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
120  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
121 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
122 
123 #ifdef __ia64
124 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) \
125  do{(th)->machine_register_stack_end = rb_ia64_bsp();}while(0)
126 #else
127 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th)
128 #endif
129 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
130  do { \
131  FLUSH_REGISTER_WINDOWS; \
132  RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \
133  setjmp((th)->machine_regs); \
134  SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
135  } while (0)
136 
137 #define GVL_UNLOCK_BEGIN() do { \
138  rb_thread_t *_th_stored = GET_THREAD(); \
139  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
140  gvl_release(_th_stored->vm);
141 
142 #define GVL_UNLOCK_END() \
143  gvl_acquire(_th_stored->vm, _th_stored); \
144  rb_thread_set_current(_th_stored); \
145 } while(0)
146 
147 #ifdef __GNUC__
148 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
149 #else
150 #define only_if_constant(expr, notconst) notconst
151 #endif
152 #define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \
153  rb_thread_t *__th = GET_THREAD(); \
154  struct rb_blocking_region_buffer __region; \
155  if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
156  /* always return true unless fail_if_interrupted */ \
157  !only_if_constant(fail_if_interrupted, TRUE)) { \
158  exec; \
159  blocking_region_end(__th, &__region); \
160  }; \
161 } while(0)
162 
163 #if THREAD_DEBUG
164 #ifdef HAVE_VA_ARGS_MACRO
165 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
166 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
167 #define POSITION_FORMAT "%s:%d:"
168 #define POSITION_ARGS ,file, line
169 #else
170 void rb_thread_debug(const char *fmt, ...);
171 #define thread_debug rb_thread_debug
172 #define POSITION_FORMAT
173 #define POSITION_ARGS
174 #endif
175 
176 # if THREAD_DEBUG < 0
177 static int rb_thread_debug_enabled;
178 
179 /*
180  * call-seq:
181  * Thread.DEBUG -> num
182  *
183  * Returns the thread debug level. Available only if compiled with
184  * THREAD_DEBUG=-1.
185  */
186 
187 static VALUE
188 rb_thread_s_debug(void)
189 {
190  return INT2NUM(rb_thread_debug_enabled);
191 }
192 
193 /*
194  * call-seq:
195  * Thread.DEBUG = num
196  *
197  * Sets the thread debug level. Available only if compiled with
198  * THREAD_DEBUG=-1.
199  */
200 
201 static VALUE
202 rb_thread_s_debug_set(VALUE self, VALUE val)
203 {
204  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
205  return val;
206 }
207 # else
208 # define rb_thread_debug_enabled THREAD_DEBUG
209 # endif
210 #else
211 #define thread_debug if(0)printf
212 #endif
213 
214 #ifndef __ia64
215 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
216 #endif
217 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
218  VALUE *register_stack_start));
219 static void timer_thread_function(void *);
220 
221 #if defined(_WIN32)
222 #include "thread_win32.c"
223 
224 #define DEBUG_OUT() \
225  WaitForSingleObject(&debug_mutex, INFINITE); \
226  printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
227  fflush(stdout); \
228  ReleaseMutex(&debug_mutex);
229 
230 #elif defined(HAVE_PTHREAD_H)
231 #include "thread_pthread.c"
232 
233 #define DEBUG_OUT() \
234  pthread_mutex_lock(&debug_mutex); \
235  printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
236  fflush(stdout); \
237  pthread_mutex_unlock(&debug_mutex);
238 
239 #else
240 #error "unsupported thread type"
241 #endif
242 
243 #if THREAD_DEBUG
244 static int debug_mutex_initialized = 1;
245 static rb_thread_lock_t debug_mutex;
246 
247 void
248 rb_thread_debug(
249 #ifdef HAVE_VA_ARGS_MACRO
250  const char *file, int line,
251 #endif
252  const char *fmt, ...)
253 {
254  va_list args;
255  char buf[BUFSIZ];
256 
257  if (!rb_thread_debug_enabled) return;
258 
259  if (debug_mutex_initialized == 1) {
260  debug_mutex_initialized = 0;
261  native_mutex_initialize(&debug_mutex);
262  }
263 
264  va_start(args, fmt);
265  vsnprintf(buf, BUFSIZ, fmt, args);
266  va_end(args);
267 
268  DEBUG_OUT();
269 }
270 #endif
271 
272 void
274 {
275  gvl_release(vm);
276  gvl_destroy(vm);
277  native_mutex_destroy(&vm->thread_destruct_lock);
278 }
279 
280 void
282 {
283  native_mutex_unlock(lock);
284 }
285 
286 void
288 {
289  native_mutex_destroy(lock);
290 }
291 
292 static int
294  struct rb_unblock_callback *old, int fail_if_interrupted)
295 {
296  check_ints:
297  if (fail_if_interrupted) {
298  if (RUBY_VM_INTERRUPTED_ANY(th)) {
299  return FALSE;
300  }
301  }
302  else {
303  RUBY_VM_CHECK_INTS(th);
304  }
305 
306  native_mutex_lock(&th->interrupt_lock);
307  if (RUBY_VM_INTERRUPTED_ANY(th)) {
308  native_mutex_unlock(&th->interrupt_lock);
309  goto check_ints;
310  }
311  else {
312  if (old) *old = th->unblock;
313  th->unblock.func = func;
314  th->unblock.arg = arg;
315  }
316  native_mutex_unlock(&th->interrupt_lock);
317 
318  return TRUE;
319 }
320 
321 static void
323 {
324  native_mutex_lock(&th->interrupt_lock);
325  th->unblock = *old;
326  native_mutex_unlock(&th->interrupt_lock);
327 }
328 
329 static void
331 {
332  native_mutex_lock(&th->interrupt_lock);
333  if (trap)
335  else
337  if (th->unblock.func) {
338  (th->unblock.func)(th->unblock.arg);
339  }
340  else {
341  /* none */
342  }
343  native_mutex_unlock(&th->interrupt_lock);
344 }
345 
346 void
348 {
350 }
351 
352 void
354 {
356 }
357 
358 static int
360 {
361  VALUE thval = key;
362  rb_thread_t *th;
363  GetThreadPtr(thval, th);
364 
365  if (th != main_thread) {
366  thread_debug("terminate_i: %p\n", (void *)th);
369  }
370  else {
371  thread_debug("terminate_i: main thread (%p)\n", (void *)th);
372  }
373  return ST_CONTINUE;
374 }
375 
376 typedef struct rb_mutex_struct
377 {
380  struct rb_thread_struct volatile *th;
384 } rb_mutex_t;
385 
386 static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
389 static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
390 
391 void
393 {
394  const char *err;
395  rb_mutex_t *mutex;
396  rb_mutex_t *mutexes = th->keeping_mutexes;
397 
398  while (mutexes) {
399  mutex = mutexes;
400  /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
401  mutexes); */
402  mutexes = mutex->next_mutex;
403  err = rb_mutex_unlock_th(mutex, th);
404  if (err) rb_bug("invalid keeping_mutexes: %s", err);
405  }
406 }
407 
408 void
410 {
411  rb_thread_t *th = GET_THREAD(); /* main thread */
412  rb_vm_t *vm = th->vm;
413 
414  if (vm->main_thread != th) {
415  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
416  (void *)vm->main_thread, (void *)th);
417  }
418 
419  /* unlock all locking mutexes */
421 
422  retry:
423  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
425 
426  while (!rb_thread_alone()) {
427  int state;
428 
429  TH_PUSH_TAG(th);
430  if ((state = TH_EXEC_TAG()) == 0) {
431  native_sleep(th, 0);
433  }
434  TH_POP_TAG();
435 
436  if (state) {
437  goto retry;
438  }
439  }
440 }
441 
442 static void
444 {
445  rb_thread_t *th = th_ptr;
446  th->status = THREAD_KILLED;
448 #ifdef __ia64
449  th->machine_register_stack_start = th->machine_register_stack_end = 0;
450 #endif
451 }
452 
453 static void
454 thread_cleanup_func(void *th_ptr, int atfork)
455 {
456  rb_thread_t *th = th_ptr;
457 
458  th->locking_mutex = Qfalse;
460 
461  /*
462  * Unfortunately, we can't release native threading resource at fork
463  * because libc may have unstable locking state therefore touching
464  * a threading resource may cause a deadlock.
465  */
466  if (atfork)
467  return;
468 
469  native_mutex_destroy(&th->interrupt_lock);
470  native_thread_destroy(th);
471 }
472 
473 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
474 
475 void
477 {
478  native_thread_init_stack(th);
479 }
480 
481 static int
482 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
483 {
484  int state;
485  VALUE args = th->first_args;
486  rb_proc_t *proc;
487  rb_thread_list_t *join_list;
488  rb_thread_t *main_th;
489  VALUE errinfo = Qnil;
490 # ifdef USE_SIGALTSTACK
491  void rb_register_sigaltstack(rb_thread_t *th);
492 
493  rb_register_sigaltstack(th);
494 # endif
495 
496  if (th == th->vm->main_thread)
497  rb_bug("thread_start_func_2 must not used for main thread");
498 
499  ruby_thread_set_native(th);
500 
501  th->machine_stack_start = stack_start;
502 #ifdef __ia64
503  th->machine_register_stack_start = register_stack_start;
504 #endif
505  thread_debug("thread start: %p\n", (void *)th);
506 
507  gvl_acquire(th->vm, th);
508  {
509  thread_debug("thread start (get lock): %p\n", (void *)th);
511 
512  TH_PUSH_TAG(th);
513  if ((state = EXEC_TAG()) == 0) {
514  SAVE_ROOT_JMPBUF(th, {
515  if (!th->first_func) {
516  GetProcPtr(th->first_proc, proc);
517  th->errinfo = Qnil;
518  th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
519  th->root_svar = Qnil;
521  th->value = rb_vm_invoke_proc(th, proc, (int)RARRAY_LEN(args), RARRAY_PTR(args), 0);
523  }
524  else {
525  th->value = (*th->first_func)((void *)args);
526  }
527  });
528  }
529  else {
530  errinfo = th->errinfo;
531  if (state == TAG_FATAL) {
532  /* fatal error within this thread, need to stop whole script */
533  }
534  else if (th->safe_level >= 4) {
535  /* Ignore it. Main thread shouldn't be harmed from untrusted thread. */
536  errinfo = Qnil;
537  }
538  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
539  /* exit on main_thread. */
540  }
541  else if (th->vm->thread_abort_on_exception ||
543  /* exit on main_thread */
544  }
545  else {
546  errinfo = Qnil;
547  }
548  th->value = Qnil;
549  }
550 
551  th->status = THREAD_KILLED;
552  thread_debug("thread end: %p\n", (void *)th);
553 
554  main_th = th->vm->main_thread;
555  if (main_th == th) {
556  ruby_stop(0);
557  }
558  if (RB_TYPE_P(errinfo, T_OBJECT)) {
559  /* treat with normal error object */
560  rb_threadptr_raise(main_th, 1, &errinfo);
561  }
562  TH_POP_TAG();
563 
564  /* locking_mutex must be Qfalse */
565  if (th->locking_mutex != Qfalse) {
566  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
567  (void *)th, th->locking_mutex);
568  }
569 
570  /* delete self other than main thread from living_threads */
572  if (rb_thread_alone()) {
573  /* I'm last thread. wake up main thread from rb_thread_terminate_all */
574  rb_threadptr_interrupt(main_th);
575  }
576 
577  /* wake up joining threads */
578  join_list = th->join_list;
579  while (join_list) {
580  rb_threadptr_interrupt(join_list->th);
581  switch (join_list->th->status) {
583  join_list->th->status = THREAD_RUNNABLE;
584  default: break;
585  }
586  join_list = join_list->next;
587  }
588 
590  rb_check_deadlock(th->vm);
591 
592  if (!th->root_fiber) {
594  th->stack = 0;
595  }
596  }
597  native_mutex_lock(&th->vm->thread_destruct_lock);
598  /* make sure vm->running_thread never point me after this point.*/
599  th->vm->running_thread = NULL;
600  native_mutex_unlock(&th->vm->thread_destruct_lock);
602  gvl_release(th->vm);
603 
604  return 0;
605 }
606 
607 static VALUE
609 {
610  rb_thread_t *th, *current_th = GET_THREAD();
611  int err;
612 
613  if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
615  "can't start a new thread (frozen ThreadGroup)");
616  }
617  GetThreadPtr(thval, th);
618 
619  /* setup thread environment */
620  th->first_func = fn;
621  th->first_proc = fn ? Qfalse : rb_block_proc();
622  th->first_args = args; /* GC: shouldn't put before above line */
623 
624  th->priority = current_th->priority;
625  th->thgroup = current_th->thgroup;
626 
630  RBASIC(th->pending_interrupt_mask_stack)->klass = 0;
631 
632  th->interrupt_mask = 0;
633 
634  native_mutex_initialize(&th->interrupt_lock);
635 
636  /* kick thread */
637  err = native_thread_create(th);
638  if (err) {
639  th->status = THREAD_KILLED;
640  rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
641  }
642  st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
643  return thval;
644 }
645 
646 /*
647  * call-seq:
648  * Thread.new { ... } -> thread
649  * Thread.new(*args, &proc) -> thread
650  * Thread.new(*args) { |args| ... } -> thread
651  *
652  * Creates a new thread executing the given block.
653  *
654  * Any +args+ given to ::new will be passed to the block:
655  *
656  * arr = []
657  * a, b, c = 1, 2, 3
658  * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
659  * arr #=> [1, 2, 3]
660  *
661  * A ThreadError exception is raised if ::new is called without a block.
662  *
663  * If you're going to subclass Thread, be sure to call super in your
664  * +initialize+ method, otherwise a ThreadError will be raised.
665  */
666 static VALUE
668 {
669  rb_thread_t *th;
670  VALUE thread = rb_thread_alloc(klass);
671 
672  if (GET_VM()->main_thread->status == THREAD_KILLED)
673  rb_raise(rb_eThreadError, "can't alloc thread");
674 
675  rb_obj_call_init(thread, argc, argv);
676  GetThreadPtr(thread, th);
677  if (!th->first_args) {
678  rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
679  rb_class2name(klass));
680  }
681  return thread;
682 }
683 
684 /*
685  * call-seq:
686  * Thread.start([args]*) {|args| block } -> thread
687  * Thread.fork([args]*) {|args| block } -> thread
688  *
689  * Basically the same as ::new. However, if class Thread is subclassed, then
690  * calling +start+ in that subclass will not invoke the subclass's
691  * +initialize+ method.
692  */
693 
694 static VALUE
696 {
697  return thread_create_core(rb_thread_alloc(klass), args, 0);
698 }
699 
700 /* :nodoc: */
701 static VALUE
703 {
704  rb_thread_t *th;
705  if (!rb_block_given_p()) {
706  rb_raise(rb_eThreadError, "must be called with a block");
707  }
708  GetThreadPtr(thread, th);
709  if (th->first_args) {
710  VALUE proc = th->first_proc, line, loc;
711  const char *file;
712  if (!proc || !RTEST(loc = rb_proc_location(proc))) {
713  rb_raise(rb_eThreadError, "already initialized thread");
714  }
715  file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
716  if (NIL_P(line = RARRAY_PTR(loc)[1])) {
717  rb_raise(rb_eThreadError, "already initialized thread - %s",
718  file);
719  }
720  rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
721  file, NUM2INT(line));
722  }
723  return thread_create_core(thread, args, 0);
724 }
725 
726 VALUE
728 {
730 }
731 
732 
733 /* +infty, for this purpose */
734 #define DELAY_INFTY 1E30
735 
736 struct join_arg {
738  double limit;
739  int forever;
740 };
741 
742 static VALUE
744 {
745  struct join_arg *p = (struct join_arg *)arg;
746  rb_thread_t *target_th = p->target, *th = p->waiting;
747 
748  if (target_th->status != THREAD_KILLED) {
749  rb_thread_list_t **p = &target_th->join_list;
750 
751  while (*p) {
752  if ((*p)->th == th) {
753  *p = (*p)->next;
754  break;
755  }
756  p = &(*p)->next;
757  }
758  }
759 
760  return Qnil;
761 }
762 
763 static VALUE
765 {
766  struct join_arg *p = (struct join_arg *)arg;
767  rb_thread_t *target_th = p->target, *th = p->waiting;
768  double now, limit = p->limit;
769 
770  while (target_th->status != THREAD_KILLED) {
771  if (p->forever) {
772  sleep_forever(th, 1, 0);
773  }
774  else {
775  now = timeofday();
776  if (now > limit) {
777  thread_debug("thread_join: timeout (thid: %p)\n",
778  (void *)target_th->thread_id);
779  return Qfalse;
780  }
781  sleep_wait_for_interrupt(th, limit - now, 0);
782  }
783  thread_debug("thread_join: interrupted (thid: %p)\n",
784  (void *)target_th->thread_id);
785  }
786  return Qtrue;
787 }
788 
789 static VALUE
790 thread_join(rb_thread_t *target_th, double delay)
791 {
793  struct join_arg arg;
794 
795  if (th == target_th) {
796  rb_raise(rb_eThreadError, "Target thread must not be current thread");
797  }
798  if (GET_VM()->main_thread == target_th) {
799  rb_raise(rb_eThreadError, "Target thread must not be main thread");
800  }
801 
802  arg.target = target_th;
803  arg.waiting = th;
804  arg.limit = timeofday() + delay;
805  arg.forever = delay == DELAY_INFTY;
806 
807  thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
808 
809  if (target_th->status != THREAD_KILLED) {
811  list.next = target_th->join_list;
812  list.th = th;
813  target_th->join_list = &list;
814  if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
815  remove_from_join_list, (VALUE)&arg)) {
816  return Qnil;
817  }
818  }
819 
820  thread_debug("thread_join: success (thid: %p)\n",
821  (void *)target_th->thread_id);
822 
823  if (target_th->errinfo != Qnil) {
824  VALUE err = target_th->errinfo;
825 
826  if (FIXNUM_P(err)) {
827  /* */
828  }
829  else if (RB_TYPE_P(target_th->errinfo, T_NODE)) {
832  }
833  else {
834  /* normal exception */
835  rb_exc_raise(err);
836  }
837  }
838  return target_th->self;
839 }
840 
841 /*
842  * call-seq:
843  * thr.join -> thr
844  * thr.join(limit) -> thr
845  *
846  * The calling thread will suspend execution and run <i>thr</i>. Does not
847  * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
848  * the time limit expires, <code>nil</code> will be returned, otherwise
849  * <i>thr</i> is returned.
850  *
851  * Any threads not joined will be killed when the main program exits. If
852  * <i>thr</i> had previously raised an exception and the
853  * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
854  * (so the exception has not yet been processed) it will be processed at this
855  * time.
856  *
857  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
858  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
859  * x.join # Let x thread finish, a will be killed on exit.
860  *
861  * <em>produces:</em>
862  *
863  * axyz
864  *
865  * The following example illustrates the <i>limit</i> parameter.
866  *
867  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
868  * puts "Waiting" until y.join(0.15)
869  *
870  * <em>produces:</em>
871  *
872  * tick...
873  * Waiting
874  * tick...
875  * Waitingtick...
876  *
877  *
878  * tick...
879  */
880 
881 static VALUE
883 {
884  rb_thread_t *target_th;
885  double delay = DELAY_INFTY;
886  VALUE limit;
887 
888  GetThreadPtr(self, target_th);
889 
890  rb_scan_args(argc, argv, "01", &limit);
891  if (!NIL_P(limit)) {
892  delay = rb_num2dbl(limit);
893  }
894 
895  return thread_join(target_th, delay);
896 }
897 
898 /*
899  * call-seq:
900  * thr.value -> obj
901  *
902  * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
903  * its value.
904  *
905  * a = Thread.new { 2 + 2 }
906  * a.value #=> 4
907  */
908 
909 static VALUE
911 {
912  rb_thread_t *th;
913  GetThreadPtr(self, th);
915  return th->value;
916 }
917 
918 /*
919  * Thread Scheduling
920  */
921 
922 static struct timeval
924 {
925  struct timeval time;
926 
927  if (isinf(d)) {
928  time.tv_sec = TIMET_MAX;
929  time.tv_usec = 0;
930  return time;
931  }
932 
933  time.tv_sec = (int)d;
934  time.tv_usec = (int)((d - (int)d) * 1e6);
935  if (time.tv_usec < 0) {
936  time.tv_usec += (int)1e6;
937  time.tv_sec -= 1;
938  }
939  return time;
940 }
941 
942 static void
943 sleep_forever(rb_thread_t *th, int deadlockable, int spurious_check)
944 {
945  enum rb_thread_status prev_status = th->status;
947 
948  th->status = status;
950  while (th->status == status) {
951  if (deadlockable) {
952  th->vm->sleeper++;
953  rb_check_deadlock(th->vm);
954  }
955  native_sleep(th, 0);
956  if (deadlockable) {
957  th->vm->sleeper--;
958  }
960  if (!spurious_check)
961  break;
962  }
963  th->status = prev_status;
964 }
965 
966 static void
968 {
969 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
970  struct timespec ts;
971 
972  if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
973  tp->tv_sec = ts.tv_sec;
974  tp->tv_usec = ts.tv_nsec / 1000;
975  } else
976 #endif
977  {
978  gettimeofday(tp, NULL);
979  }
980 }
981 
982 static void
983 sleep_timeval(rb_thread_t *th, struct timeval tv, int spurious_check)
984 {
985  struct timeval to, tvn;
986  enum rb_thread_status prev_status = th->status;
987 
988  getclockofday(&to);
989  if (TIMET_MAX - tv.tv_sec < to.tv_sec)
990  to.tv_sec = TIMET_MAX;
991  else
992  to.tv_sec += tv.tv_sec;
993  if ((to.tv_usec += tv.tv_usec) >= 1000000) {
994  if (to.tv_sec == TIMET_MAX)
995  to.tv_usec = 999999;
996  else {
997  to.tv_sec++;
998  to.tv_usec -= 1000000;
999  }
1000  }
1001 
1002  th->status = THREAD_STOPPED;
1004  while (th->status == THREAD_STOPPED) {
1005  native_sleep(th, &tv);
1007  getclockofday(&tvn);
1008  if (to.tv_sec < tvn.tv_sec) break;
1009  if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
1010  thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
1011  (long)to.tv_sec, (long)to.tv_usec,
1012  (long)tvn.tv_sec, (long)tvn.tv_usec);
1013  tv.tv_sec = to.tv_sec - tvn.tv_sec;
1014  if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
1015  --tv.tv_sec;
1016  tv.tv_usec += 1000000;
1017  }
1018  if (!spurious_check)
1019  break;
1020  }
1021  th->status = prev_status;
1022 }
1023 
1024 void
1026 {
1027  thread_debug("rb_thread_sleep_forever\n");
1028  sleep_forever(GET_THREAD(), 0, 1);
1029 }
1030 
1031 static void
1033 {
1034  thread_debug("rb_thread_sleep_deadly\n");
1035  sleep_forever(GET_THREAD(), 1, 1);
1036 }
1037 
1038 static double
1040 {
1041 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1042  struct timespec tp;
1043 
1044  if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
1045  return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
1046  } else
1047 #endif
1048  {
1049  struct timeval tv;
1050  gettimeofday(&tv, NULL);
1051  return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
1052  }
1053 }
1054 
1055 static void
1056 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
1057 {
1058  sleep_timeval(th, double2timeval(sleepsec), spurious_check);
1059 }
1060 
1061 static void
1063 {
1064  struct timeval time;
1065  time.tv_sec = 0;
1066  time.tv_usec = 100 * 1000; /* 0.1 sec */
1067  sleep_timeval(th, time, 1);
1068 }
1069 
1070 void
1072 {
1073  rb_thread_t *th = GET_THREAD();
1074  sleep_timeval(th, time, 1);
1075 }
1076 
1077 void
1079 {
1080  if (!rb_thread_alone()) {
1081  rb_thread_t *th = GET_THREAD();
1083  sleep_for_polling(th);
1084  }
1085 }
1086 
1087 /*
1088  * CAUTION: This function causes thread switching.
1089  * rb_thread_check_ints() check ruby's interrupts.
1090  * some interrupt needs thread switching/invoke handlers,
1091  * and so on.
1092  */
1093 
1094 void
1096 {
1098 }
1099 
1100 /*
1101  * Hidden API for tcl/tk wrapper.
1102  * There is no guarantee to perpetuate it.
1103  */
1104 int
1106 {
1107  return rb_signal_buff_size() != 0;
1108 }
1109 
1110 /* This function can be called in blocking region. */
1111 int
1113 {
1114  rb_thread_t *th;
1115  GetThreadPtr(thval, th);
1116  return (int)RUBY_VM_INTERRUPTED(th);
1117 }
1118 
1119 void
1121 {
1123 }
1124 
1125 static void
1126 rb_thread_schedule_limits(unsigned long limits_us)
1127 {
1128  thread_debug("rb_thread_schedule\n");
1129  if (!rb_thread_alone()) {
1130  rb_thread_t *th = GET_THREAD();
1131 
1132  if (th->running_time_us >= limits_us) {
1133  thread_debug("rb_thread_schedule/switch start\n");
1135  gvl_yield(th->vm, th);
1137  thread_debug("rb_thread_schedule/switch done\n");
1138  }
1139  }
1140 }
1141 
1142 void
1144 {
1145  rb_thread_t *cur_th = GET_THREAD();
1147 
1148  if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(cur_th))) {
1150  }
1151 }
1152 
1153 /* blocking region */
1154 
1155 static inline int
1157  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1158 {
1159  region->prev_status = th->status;
1160  if (set_unblock_function(th, ubf, arg, &region->oldubf, fail_if_interrupted)) {
1161  th->blocking_region_buffer = region;
1162  th->status = THREAD_STOPPED;
1163  thread_debug("enter blocking region (%p)\n", (void *)th);
1165  gvl_release(th->vm);
1166  return TRUE;
1167  }
1168  else {
1169  return FALSE;
1170  }
1171 }
1172 
1173 static inline void
1175 {
1176  gvl_acquire(th->vm, th);
1178  thread_debug("leave blocking region (%p)\n", (void *)th);
1179  remove_signal_thread_list(th);
1180  th->blocking_region_buffer = 0;
1181  reset_unblock_function(th, &region->oldubf);
1182  if (th->status == THREAD_STOPPED) {
1183  th->status = region->prev_status;
1184  }
1185 }
1186 
1189 {
1190  rb_thread_t *th = GET_THREAD();
1192  blocking_region_begin(th, region, ubf_select, th, FALSE);
1193  return region;
1194 }
1195 
1196 void
1198 {
1199  int saved_errno = errno;
1200  rb_thread_t *th = ruby_thread_from_native();
1201  blocking_region_end(th, region);
1202  xfree(region);
1204  errno = saved_errno;
1205 }
1206 
1207 static void *
1208 call_without_gvl(void *(*func)(void *), void *data1,
1209  rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
1210 {
1211  void *val = 0;
1212 
1213  rb_thread_t *th = GET_THREAD();
1214  int saved_errno = 0;
1215 
1216  th->waiting_fd = -1;
1217  if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1218  ubf = ubf_select;
1219  data2 = th;
1220  }
1221 
1222  BLOCKING_REGION({
1223  val = func(data1);
1224  saved_errno = errno;
1225  }, ubf, data2, fail_if_interrupted);
1226 
1227  if (!fail_if_interrupted) {
1229  }
1230 
1231  errno = saved_errno;
1232 
1233  return val;
1234 }
1235 
1236 /*
1237  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1238  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1239  * without interrupt proceess.
1240  *
1241  * rb_thread_call_without_gvl() does:
1242  * (1) Check interrupts.
1243  * (2) release GVL.
1244  * Other Ruby threads may run in parallel.
1245  * (3) call func with data1
1246  * (4) acquire GVL.
1247  * Other Ruby threads can not run in parallel any more.
1248  * (5) Check interrupts.
1249  *
1250  * rb_thread_call_without_gvl2() does:
1251  * (1) Check interrupt and return if interrupted.
1252  * (2) release GVL.
1253  * (3) call func with data1 and a pointer to the flags.
1254  * (4) acquire GVL.
1255  *
1256  * If another thread interrupts this thread (Thread#kill, signal delivery,
1257  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1258  * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1259  * toggling a cancellation flag, canceling the invocation of a call inside
1260  * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1261  *
1262  * There are built-in ubfs and you can specify these ubfs:
1263  *
1264  * * RUBY_UBF_IO: ubf for IO operation
1265  * * RUBY_UBF_PROCESS: ubf for process operation
1266  *
1267  * However, we can not guarantee our built-in ubfs interrupt your `func()'
1268  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1269  * provide proper ubf(), your program will not stop for Control+C or other
1270  * shutdown events.
1271  *
1272  * "Check interrupts" on above list means that check asynchronous
1273  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1274  * request, and so on) and call corresponding procedures
1275  * (such as `trap' for signals, raise an exception for Thread#raise).
1276  * If `func()' finished and receive interrupts, you may skip interrupt
1277  * checking. For example, assume the following func() it read data from file.
1278  *
1279  * read_func(...) {
1280  * // (a) before read
1281  * read(buffer); // (b) reading
1282  * // (c) after read
1283  * }
1284  *
1285  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1286  * `read_func()' and interrupts are checked. However, if an interrupt occurs
1287  * at (c), after *read* operation is completed, check intterrupts is harmful
1288  * because it causes irrevocable side-effect, the read data will vanish. To
1289  * avoid such problem, the `read_func()' should be used with
1290  * `rb_thread_call_without_gvl2()'.
1291  *
1292  * If `rb_thread_call_without_gvl2()' detects interrupt, return its execution
1293  * immediately. This function does not show when the execution was interrupted.
1294  * For example, there are 4 possible timing (a), (b), (c) and before calling
1295  * read_func(). You need to record progress of a read_func() and check
1296  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1297  * `rb_thread_check_ints()' correctly or your program can not process proper
1298  * process such as `trap' and so on.
1299  *
1300  * NOTE: You can not execute most of Ruby C API and touch Ruby
1301  * objects in `func()' and `ubf()', including raising an
1302  * exception, because current thread doesn't acquire GVL
1303  * (it causes synchronization problems). If you need to
1304  * call ruby functions either use rb_thread_call_with_gvl()
1305  * or read source code of C APIs and confirm safety by
1306  * yourself.
1307  *
1308  * NOTE: In short, this API is difficult to use safely. I recommend you
1309  * use other ways if you have. We lack experiences to use this API.
1310  * Please report your problem related on it.
1311  *
1312  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1313  * for a short running `func()'. Be sure to benchmark and use this
1314  * mechanism when `func()' consumes enough time.
1315  *
1316  * Safe C API:
1317  * * rb_thread_interrupted() - check interrupt flag
1318  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1319  * they will work without GVL, and may acquire GVL when GC is needed.
1320  */
1321 void *
1322 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1323  rb_unblock_function_t *ubf, void *data2)
1324 {
1325  return call_without_gvl(func, data1, ubf, data2, TRUE);
1326 }
1327 
1328 void *
1329 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1330  rb_unblock_function_t *ubf, void *data2)
1331 {
1332  return call_without_gvl(func, data1, ubf, data2, FALSE);
1333 }
1334 
1335 VALUE
1337 {
1338  VALUE val = Qundef; /* shouldn't be used */
1339  rb_thread_t *th = GET_THREAD();
1340  int saved_errno = 0;
1341  int state;
1342 
1343  th->waiting_fd = fd;
1344 
1345  TH_PUSH_TAG(th);
1346  if ((state = EXEC_TAG()) == 0) {
1347  BLOCKING_REGION({
1348  val = func(data1);
1349  saved_errno = errno;
1350  }, ubf_select, th, FALSE);
1351  }
1352  TH_POP_TAG();
1353 
1354  /* clear waitinf_fd anytime */
1355  th->waiting_fd = -1;
1356 
1357  if (state) {
1358  JUMP_TAG(state);
1359  }
1360  /* TODO: check func() */
1362 
1363  errno = saved_errno;
1364 
1365  return val;
1366 }
1367 
1368 VALUE
1370  rb_blocking_function_t *func, void *data1,
1371  rb_unblock_function_t *ubf, void *data2)
1372 {
1373  void *(*f)(void*) = (void *(*)(void*))func;
1374  return (VALUE)rb_thread_call_without_gvl(f, data1, ubf, data2);
1375 }
1376 
1377 /*
1378  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1379  *
1380  * After releasing GVL using rb_thread_blocking_region() or
1381  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1382  * methods. If you need to access Ruby you must use this function
1383  * rb_thread_call_with_gvl().
1384  *
1385  * This function rb_thread_call_with_gvl() does:
1386  * (1) acquire GVL.
1387  * (2) call passed function `func'.
1388  * (3) release GVL.
1389  * (4) return a value which is returned at (2).
1390  *
1391  * NOTE: You should not return Ruby object at (2) because such Object
1392  * will not marked.
1393  *
1394  * NOTE: If an exception is raised in `func', this function DOES NOT
1395  * protect (catch) the exception. If you have any resources
1396  * which should free before throwing exception, you need use
1397  * rb_protect() in `func' and return a value which represents
1398  * exception is raised.
1399  *
1400  * NOTE: This function should not be called by a thread which was not
1401  * created as Ruby thread (created by Thread.new or so). In other
1402  * words, this function *DOES NOT* associate or convert a NON-Ruby
1403  * thread to a Ruby thread.
1404  */
1405 void *
1406 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1407 {
1408  rb_thread_t *th = ruby_thread_from_native();
1409  struct rb_blocking_region_buffer *brb;
1410  struct rb_unblock_callback prev_unblock;
1411  void *r;
1412 
1413  if (th == 0) {
1414  /* Error is occurred, but we can't use rb_bug()
1415  * because this thread is not Ruby's thread.
1416  * What should we do?
1417  */
1418 
1419  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1420  exit(EXIT_FAILURE);
1421  }
1422 
1424  prev_unblock = th->unblock;
1425 
1426  if (brb == 0) {
1427  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1428  }
1429 
1430  blocking_region_end(th, brb);
1431  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1432  r = (*func)(data1);
1433  /* leave from Ruby world: You can not access Ruby values, etc. */
1434  blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1435  return r;
1436 }
1437 
1438 /*
1439  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1440  *
1441  ***
1442  *** This API is EXPERIMENTAL!
1443  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1444  ***
1445  */
1446 
1447 int
1449 {
1450  rb_thread_t *th = ruby_thread_from_native();
1451 
1452  if (th && th->blocking_region_buffer == 0) {
1453  return 1;
1454  }
1455  else {
1456  return 0;
1457  }
1458 }
1459 
1460 /*
1461  * call-seq:
1462  * Thread.pass -> nil
1463  *
1464  * Give the thread scheduler a hint to pass execution to another thread.
1465  * A running thread may or may not switch, it depends on OS and processor.
1466  */
1467 
1468 static VALUE
1470 {
1472  return Qnil;
1473 }
1474 
1475 /*****************************************************/
1476 
1477 /*
1478  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1479  *
1480  * Async events such as an exception throwed by Thread#raise,
1481  * Thread#kill and thread termination (after main thread termination)
1482  * will be queued to th->pending_interrupt_queue.
1483  * - clear: clear the queue.
1484  * - enque: enque err object into queue.
1485  * - deque: deque err object from queue.
1486  * - active_p: return 1 if the queue should be checked.
1487  *
1488  * All rb_threadptr_pending_interrupt_* functions are called by
1489  * a GVL acquired thread, of course.
1490  * Note that all "rb_" prefix APIs need GVL to call.
1491  */
1492 
1493 void
1495 {
1497 }
1498 
1499 void
1501 {
1504 }
1505 
1511 };
1512 
1513 static enum handle_interrupt_timing
1515 {
1516  VALUE mask;
1517  long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1518  VALUE *mask_stack = RARRAY_PTR(th->pending_interrupt_mask_stack);
1519  VALUE ancestors = rb_mod_ancestors(err); /* TODO: GC guard */
1520  long ancestors_len = RARRAY_LEN(ancestors);
1521  VALUE *ancestors_ptr = RARRAY_PTR(ancestors);
1522  int i, j;
1523 
1524  for (i=0; i<mask_stack_len; i++) {
1525  mask = mask_stack[mask_stack_len-(i+1)];
1526 
1527  for (j=0; j<ancestors_len; j++) {
1528  VALUE klass = ancestors_ptr[j];
1529  VALUE sym;
1530 
1531  /* TODO: remove rb_intern() */
1532  if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1533  if (sym == sym_immediate) {
1534  return INTERRUPT_IMMEDIATE;
1535  }
1536  else if (sym == sym_on_blocking) {
1537  return INTERRUPT_ON_BLOCKING;
1538  }
1539  else if (sym == sym_never) {
1540  return INTERRUPT_NEVER;
1541  }
1542  else {
1543  rb_raise(rb_eThreadError, "unknown mask signature");
1544  }
1545  }
1546  }
1547  /* try to next mask */
1548  }
1549  return INTERRUPT_NONE;
1550 }
1551 
1552 static int
1554 {
1555  return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1556 }
1557 
1558 static int
1560 {
1561  int i;
1562  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1564  if (rb_class_inherited_p(e, err)) {
1565  return TRUE;
1566  }
1567  }
1568  return FALSE;
1569 }
1570 
1571 static VALUE
1573 {
1574 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1575  int i;
1576 
1577  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1579 
1581 
1582  switch (mask_timing) {
1583  case INTERRUPT_ON_BLOCKING:
1584  if (timing != INTERRUPT_ON_BLOCKING) {
1585  break;
1586  }
1587  /* fall through */
1588  case INTERRUPT_NONE: /* default: IMMEDIATE */
1589  case INTERRUPT_IMMEDIATE:
1591  return err;
1592  case INTERRUPT_NEVER:
1593  break;
1594  }
1595  }
1596 
1598  return Qundef;
1599 #else
1603  }
1604  return err;
1605 #endif
1606 }
1607 
1608 int
1610 {
1611  /*
1612  * For optimization, we don't check async errinfo queue
1613  * if it nor a thread interrupt mask were not changed
1614  * since last check.
1615  */
1617  return 0;
1618  }
1619 
1621  return 0;
1622  }
1623 
1624  return 1;
1625 }
1626 
1627 static int
1629 {
1630  if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
1631  rb_raise(rb_eArgError, "unknown mask signature");
1632  }
1633 
1634  return ST_CONTINUE;
1635 }
1636 
1637 /*
1638  * call-seq:
1639  * Thread.handle_interrupt(hash) { ... } -> result of the block
1640  *
1641  * Changes asynchronous interrupt timing.
1642  *
1643  * _interrupt_ means asynchronous event and corresponding procedure
1644  * by Thread#raise, Thread#kill, signal trap (not supported yet)
1645  * and main thread termination (if main thread terminates, then all
1646  * other thread will be killed).
1647  *
1648  * The given +hash+ has pairs like <code>ExceptionClass =>
1649  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
1650  * the given block. The TimingSymbol can be one of the following symbols:
1651  *
1652  * [+:immediate+] Invoke interrupts immediately.
1653  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
1654  * [+:never+] Never invoke all interrupts.
1655  *
1656  * _BlockingOperation_ means that the operation will block the calling thread,
1657  * such as read and write. On CRuby implementation, _BlockingOperation_ is any
1658  * operation executed without GVL.
1659  *
1660  * Masked asynchronous interrupts are delayed until they are enabled.
1661  * This method is similar to sigprocmask(3).
1662  *
1663  * === NOTE
1664  *
1665  * Asynchronous interrupts are difficult to use.
1666  *
1667  * If you need to communicate between threads, please consider to use another way such as Queue.
1668  *
1669  * Or use them with deep understanding about this method.
1670  *
1671  * === Usage
1672  *
1673  * In this example, we can guard from Thread#raise exceptions.
1674  *
1675  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
1676  * ignored in the first block of the main thread. In the second
1677  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
1678  *
1679  * th = Thread.new do
1680  * Thead.handle_interrupt(RuntimeError => :never) {
1681  * begin
1682  * # You can write resource allocation code safely.
1683  * Thread.handle_interrupt(RuntimeError => :immediate) {
1684  * # ...
1685  * }
1686  * ensure
1687  * # You can write resource deallocation code safely.
1688  * end
1689  * }
1690  * end
1691  * Thread.pass
1692  * # ...
1693  * th.raise "stop"
1694  *
1695  * While we are ignoring the RuntimeError exception, it's safe to write our
1696  * resource allocation code. Then, the ensure block is where we can safely
1697  * deallocate your resources.
1698  *
1699  * ==== Guarding from TimeoutError
1700  *
1701  * In the next example, we will guard from the TimeoutError exception. This
1702  * will help prevent from leaking resources when TimeoutError exceptions occur
1703  * during normal ensure clause. For this example we use the help of the
1704  * standard library Timeout, from lib/timeout.rb
1705  *
1706  * require 'timeout'
1707  * Thread.handle_interrupt(TimeoutError => :never) {
1708  * timeout(10){
1709  * # TimeoutError doesn't occur here
1710  * Thread.handle_interrupt(TimeoutError => :on_blocking) {
1711  * # possible to be killed by TimeoutError
1712  * # while blocking operation
1713  * }
1714  * # TimeoutError doesn't occur here
1715  * }
1716  * }
1717  *
1718  * In the first part of the +timeout+ block, we can rely on TimeoutError being
1719  * ignored. Then in the <code>TimeoutError => :on_blocking</code> block, any
1720  * operation that will block the calling thread is susceptible to a
1721  * TimeoutError exception being raised.
1722  *
1723  * ==== Stack control settings
1724  *
1725  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
1726  * to control more than one ExceptionClass and TimingSymbol at a time.
1727  *
1728  * Thread.handle_interrupt(FooError => :never) {
1729  * Thread.handle_interrupt(BarError => :never) {
1730  * # FooError and BarError are prohibited.
1731  * }
1732  * }
1733  *
1734  * ==== Inheritance with ExceptionClass
1735  *
1736  * All exceptions inherited from the ExceptionClass parameter will be considered.
1737  *
1738  * Thread.handle_interrupt(Exception => :never) {
1739  * # all exceptions inherited from Exception are prohibited.
1740  * }
1741  *
1742  */
1743 static VALUE
1745 {
1746  VALUE mask;
1747  rb_thread_t *th = GET_THREAD();
1748  VALUE r = Qnil;
1749  int state;
1750 
1751  if (!rb_block_given_p()) {
1752  rb_raise(rb_eArgError, "block is needed.");
1753  }
1754 
1755  mask = rb_convert_type(mask_arg, T_HASH, "Hash", "to_hash");
1761  }
1762 
1763  TH_PUSH_TAG(th);
1764  if ((state = EXEC_TAG()) == 0) {
1765  r = rb_yield(Qnil);
1766  }
1767  TH_POP_TAG();
1768 
1773  }
1774 
1775  RUBY_VM_CHECK_INTS(th);
1776 
1777  if (state) {
1778  JUMP_TAG(state);
1779  }
1780 
1781  return r;
1782 }
1783 
1784 /*
1785  * call-seq:
1786  * target_thread.pending_interrupt?(error = nil) -> true/false
1787  *
1788  * Returns whether or not the asychronous queue is empty for the target thread.
1789  *
1790  * If +error+ is given, then check only for +error+ type deferred events.
1791  *
1792  * See ::pending_interrupt? for more information.
1793  */
1794 static VALUE
1796 {
1797  rb_thread_t *target_th;
1798 
1799  GetThreadPtr(target_thread, target_th);
1800 
1801  if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
1802  return Qfalse;
1803  }
1804  else {
1805  if (argc == 1) {
1806  VALUE err;
1807  rb_scan_args(argc, argv, "01", &err);
1808  if (!rb_obj_is_kind_of(err, rb_cModule)) {
1809  rb_raise(rb_eTypeError, "class or module required for rescue clause");
1810  }
1811  if (rb_threadptr_pending_interrupt_include_p(target_th, err)) {
1812  return Qtrue;
1813  }
1814  else {
1815  return Qfalse;
1816  }
1817  }
1818  return Qtrue;
1819  }
1820 }
1821 
1822 /*
1823  * call-seq:
1824  * Thread.pending_interrupt?(error = nil) -> true/false
1825  *
1826  * Returns whether or not the asynchronous queue is empty.
1827  *
1828  * Since Thread::handle_interrupt can be used to defer asynchronous events.
1829  * This method can be used to determine if there are any deferred events.
1830  *
1831  * If you find this method returns true, then you may finish +:never+ blocks.
1832  *
1833  * For example, the following method processes deferred asynchronous events
1834  * immediately.
1835  *
1836  * def Thread.kick_interrupt_immediately
1837  * Thread.handle_interrupt(Object => :immediate) {
1838  * Thread.pass
1839  * }
1840  * end
1841  *
1842  * If +error+ is given, then check only for +error+ type deferred events.
1843  *
1844  * === Usage
1845  *
1846  * th = Thread.new{
1847  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1848  * while true
1849  * ...
1850  * # reach safe point to invoke interrupt
1851  * if Thread.pending_interrupt?
1852  * Thread.handle_interrupt(Object => :immediate){}
1853  * end
1854  * ...
1855  * end
1856  * }
1857  * }
1858  * ...
1859  * th.raise # stop thread
1860  *
1861  * This example can also be written as the following, which you should use to
1862  * avoid asynchronous interrupts.
1863  *
1864  * flag = true
1865  * th = Thread.new{
1866  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1867  * while true
1868  * ...
1869  * # reach safe point to invoke interrupt
1870  * break if flag == false
1871  * ...
1872  * end
1873  * }
1874  * }
1875  * ...
1876  * flag = false # stop thread
1877  */
1878 
1879 static VALUE
1881 {
1882  return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
1883 }
1884 
1885 static void
1887 {
1889  th->status = THREAD_RUNNABLE;
1890  th->to_kill = 1;
1891  th->errinfo = INT2FIX(TAG_FATAL);
1892  TH_JUMP_TAG(th, TAG_FATAL);
1893 }
1894 
1895 void
1897 {
1898  if (th->raised_flag) return;
1899 
1900  while (1) {
1901  rb_atomic_t interrupt;
1902  rb_atomic_t old;
1903  int sig;
1904  int timer_interrupt;
1905  int pending_interrupt;
1906  int finalizer_interrupt;
1907  int trap_interrupt;
1908 
1909  do {
1910  interrupt = th->interrupt_flag;
1911  old = ATOMIC_CAS(th->interrupt_flag, interrupt, interrupt & th->interrupt_mask);
1912  } while (old != interrupt);
1913 
1914  interrupt &= (rb_atomic_t)~th->interrupt_mask;
1915  if (!interrupt)
1916  return;
1917 
1918  timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
1919  pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
1920  finalizer_interrupt = interrupt & FINALIZER_INTERRUPT_MASK;
1921  trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
1922 
1923  /* signal handling */
1924  if (trap_interrupt && (th == th->vm->main_thread)) {
1925  enum rb_thread_status prev_status = th->status;
1926  th->status = THREAD_RUNNABLE;
1927  while ((sig = rb_get_next_signal()) != 0) {
1928  rb_signal_exec(th, sig);
1929  }
1930  th->status = prev_status;
1931  }
1932 
1933  /* exception from another thread */
1934  if (pending_interrupt && rb_threadptr_pending_interrupt_active_p(th)) {
1936  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
1937 
1938  if (err == Qundef) {
1939  /* no error */
1940  }
1941  else if (err == eKillSignal /* Thread#kill receieved */ ||
1942  err == eTerminateSignal /* Terminate thread */ ||
1943  err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
1945  }
1946  else {
1947  /* set runnable if th was slept. */
1948  if (th->status == THREAD_STOPPED ||
1950  th->status = THREAD_RUNNABLE;
1951  rb_exc_raise(err);
1952  }
1953  }
1954 
1955  if (finalizer_interrupt) {
1957  }
1958 
1959  if (timer_interrupt) {
1960  unsigned long limits_us = TIME_QUANTUM_USEC;
1961 
1962  if (th->priority > 0)
1963  limits_us <<= th->priority;
1964  else
1965  limits_us >>= -th->priority;
1966 
1967  if (th->status == THREAD_RUNNABLE)
1968  th->running_time_us += TIME_QUANTUM_USEC;
1969 
1970  EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0, Qundef);
1971 
1972  rb_thread_schedule_limits(limits_us);
1973  }
1974  }
1975 }
1976 
1977 void
1979 {
1980  rb_thread_t *th;
1981  GetThreadPtr(thval, th);
1983 }
1984 
1985 static void
1987 {
1989 }
1990 
1991 static VALUE
1993 {
1994  VALUE exc;
1995 
1996  if (rb_threadptr_dead(th)) {
1997  return Qnil;
1998  }
1999 
2000  if (argc == 0) {
2001  exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2002  }
2003  else {
2004  exc = rb_make_exception(argc, argv);
2005  }
2008  return Qnil;
2009 }
2010 
2011 void
2013 {
2014  VALUE argv[2];
2015 
2016  argv[0] = rb_eSignal;
2017  argv[1] = INT2FIX(sig);
2018  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2019 }
2020 
2021 void
2023 {
2024  VALUE argv[2];
2025 
2026  argv[0] = rb_eSystemExit;
2027  argv[1] = rb_str_new2("exit");
2028  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2029 }
2030 
2031 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
2032 #define USE_SIGALTSTACK
2033 #endif
2034 
2035 void
2037 {
2038  th->raised_flag = 0;
2039 #ifdef USE_SIGALTSTACK
2041 #else
2042  th->errinfo = sysstack_error;
2043  TH_JUMP_TAG(th, TAG_RAISE);
2044 #endif
2045 }
2046 
2047 int
2049 {
2050  if (th->raised_flag & RAISED_EXCEPTION) {
2051  return 1;
2052  }
2054  return 0;
2055 }
2056 
2057 int
2059 {
2060  if (!(th->raised_flag & RAISED_EXCEPTION)) {
2061  return 0;
2062  }
2063  th->raised_flag &= ~RAISED_EXCEPTION;
2064  return 1;
2065 }
2066 
2067 static int
2069 {
2070  int fd = (int)data;
2071  rb_thread_t *th;
2072  GetThreadPtr((VALUE)key, th);
2073 
2074  if (th->waiting_fd == fd) {
2078  }
2079  return ST_CONTINUE;
2080 }
2081 
2082 void
2084 {
2085  st_foreach(GET_THREAD()->vm->living_threads, thread_fd_close_i, (st_index_t)fd);
2086 }
2087 
2088 /*
2089  * call-seq:
2090  * thr.raise
2091  * thr.raise(string)
2092  * thr.raise(exception [, string [, array]])
2093  *
2094  * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
2095  * caller does not have to be <i>thr</i>.
2096  *
2097  * Thread.abort_on_exception = true
2098  * a = Thread.new { sleep(200) }
2099  * a.raise("Gotcha")
2100  *
2101  * <em>produces:</em>
2102  *
2103  * prog.rb:3: Gotcha (RuntimeError)
2104  * from prog.rb:2:in `initialize'
2105  * from prog.rb:2:in `new'
2106  * from prog.rb:2
2107  */
2108 
2109 static VALUE
2111 {
2112  rb_thread_t *target_th;
2113  rb_thread_t *th = GET_THREAD();
2114  GetThreadPtr(self, target_th);
2115  rb_threadptr_raise(target_th, argc, argv);
2116 
2117  /* To perform Thread.current.raise as Kernel.raise */
2118  if (th == target_th) {
2119  RUBY_VM_CHECK_INTS(th);
2120  }
2121  return Qnil;
2122 }
2123 
2124 
2125 /*
2126  * call-seq:
2127  * thr.exit -> thr or nil
2128  * thr.kill -> thr or nil
2129  * thr.terminate -> thr or nil
2130  *
2131  * Terminates <i>thr</i> and schedules another thread to be run. If this thread
2132  * is already marked to be killed, <code>exit</code> returns the
2133  * <code>Thread</code>. If this is the main thread, or the last thread, exits
2134  * the process.
2135  */
2136 
2137 VALUE
2139 {
2140  rb_thread_t *th;
2141 
2142  GetThreadPtr(thread, th);
2143 
2144  if (th != GET_THREAD() && th->safe_level < 4) {
2145  rb_secure(4);
2146  }
2147  if (th->to_kill || th->status == THREAD_KILLED) {
2148  return thread;
2149  }
2150  if (th == th->vm->main_thread) {
2152  }
2153 
2154  thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
2155 
2156  if (th == GET_THREAD()) {
2157  /* kill myself immediately */
2159  }
2160  else {
2163  }
2164  return thread;
2165 }
2166 
2167 
2168 /*
2169  * call-seq:
2170  * Thread.kill(thread) -> thread
2171  *
2172  * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
2173  *
2174  * count = 0
2175  * a = Thread.new { loop { count += 1 } }
2176  * sleep(0.1) #=> 0
2177  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2178  * count #=> 93947
2179  * a.alive? #=> false
2180  */
2181 
2182 static VALUE
2184 {
2185  return rb_thread_kill(th);
2186 }
2187 
2188 
2189 /*
2190  * call-seq:
2191  * Thread.exit -> thread
2192  *
2193  * Terminates the currently running thread and schedules another thread to be
2194  * run. If this thread is already marked to be killed, <code>exit</code>
2195  * returns the <code>Thread</code>. If this is the main thread, or the last
2196  * thread, exit the process.
2197  */
2198 
2199 static VALUE
2201 {
2202  rb_thread_t *th = GET_THREAD();
2203  return rb_thread_kill(th->self);
2204 }
2205 
2206 
2207 /*
2208  * call-seq:
2209  * thr.wakeup -> thr
2210  *
2211  * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
2212  * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
2213  *
2214  * c = Thread.new { Thread.stop; puts "hey!" }
2215  * sleep 0.1 while c.status!='sleep'
2216  * c.wakeup
2217  * c.join
2218  *
2219  * <em>produces:</em>
2220  *
2221  * hey!
2222  */
2223 
2224 VALUE
2226 {
2227  if (!RTEST(rb_thread_wakeup_alive(thread))) {
2228  rb_raise(rb_eThreadError, "killed thread");
2229  }
2230  return thread;
2231 }
2232 
2233 VALUE
2235 {
2236  rb_thread_t *th;
2237  GetThreadPtr(thread, th);
2238 
2239  if (th->status == THREAD_KILLED) {
2240  return Qnil;
2241  }
2242  rb_threadptr_ready(th);
2243  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2244  th->status = THREAD_RUNNABLE;
2245  return thread;
2246 }
2247 
2248 
2249 /*
2250  * call-seq:
2251  * thr.run -> thr
2252  *
2253  * Wakes up <i>thr</i>, making it eligible for scheduling.
2254  *
2255  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2256  * sleep 0.1 while a.status!='sleep'
2257  * puts "Got here"
2258  * a.run
2259  * a.join
2260  *
2261  * <em>produces:</em>
2262  *
2263  * a
2264  * Got here
2265  * c
2266  */
2267 
2268 VALUE
2270 {
2271  rb_thread_wakeup(thread);
2273  return thread;
2274 }
2275 
2276 
2277 /*
2278  * call-seq:
2279  * Thread.stop -> nil
2280  *
2281  * Stops execution of the current thread, putting it into a ``sleep'' state,
2282  * and schedules execution of another thread.
2283  *
2284  * a = Thread.new { print "a"; Thread.stop; print "c" }
2285  * sleep 0.1 while a.status!='sleep'
2286  * print "b"
2287  * a.run
2288  * a.join
2289  *
2290  * <em>produces:</em>
2291  *
2292  * abc
2293  */
2294 
2295 VALUE
2297 {
2298  if (rb_thread_alone()) {
2300  "stopping only thread\n\tnote: use sleep to stop forever");
2301  }
2303  return Qnil;
2304 }
2305 
2306 static int
2308 {
2309  VALUE ary = (VALUE)data;
2310  rb_thread_t *th;
2311  GetThreadPtr((VALUE)key, th);
2312 
2313  switch (th->status) {
2314  case THREAD_RUNNABLE:
2315  case THREAD_STOPPED:
2317  rb_ary_push(ary, th->self);
2318  default:
2319  break;
2320  }
2321  return ST_CONTINUE;
2322 }
2323 
2324 /********************************************************************/
2325 
2326 /*
2327  * call-seq:
2328  * Thread.list -> array
2329  *
2330  * Returns an array of <code>Thread</code> objects for all threads that are
2331  * either runnable or stopped.
2332  *
2333  * Thread.new { sleep(200) }
2334  * Thread.new { 1000000.times {|i| i*i } }
2335  * Thread.new { Thread.stop }
2336  * Thread.list.each {|t| p t}
2337  *
2338  * <em>produces:</em>
2339  *
2340  * #<Thread:0x401b3e84 sleep>
2341  * #<Thread:0x401b3f38 run>
2342  * #<Thread:0x401b3fb0 sleep>
2343  * #<Thread:0x401bdf4c run>
2344  */
2345 
2346 VALUE
2348 {
2349  VALUE ary = rb_ary_new();
2350  st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
2351  return ary;
2352 }
2353 
2354 VALUE
2356 {
2357  return GET_THREAD()->self;
2358 }
2359 
2360 /*
2361  * call-seq:
2362  * Thread.current -> thread
2363  *
2364  * Returns the currently executing thread.
2365  *
2366  * Thread.current #=> #<Thread:0x401bdf4c run>
2367  */
2368 
2369 static VALUE
2371 {
2372  return rb_thread_current();
2373 }
2374 
2375 VALUE
2377 {
2378  return GET_THREAD()->vm->main_thread->self;
2379 }
2380 
2381 /*
2382  * call-seq:
2383  * Thread.main -> thread
2384  *
2385  * Returns the main thread.
2386  */
2387 
2388 static VALUE
2390 {
2391  return rb_thread_main();
2392 }
2393 
2394 
2395 /*
2396  * call-seq:
2397  * Thread.abort_on_exception -> true or false
2398  *
2399  * Returns the status of the global ``abort on exception'' condition. The
2400  * default is <code>false</code>. When set to <code>true</code>, or if the
2401  * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
2402  * command line option <code>-d</code> was specified) all threads will abort
2403  * (the process will <code>exit(0)</code>) if an exception is raised in any
2404  * thread. See also <code>Thread::abort_on_exception=</code>.
2405  */
2406 
2407 static VALUE
2409 {
2411 }
2412 
2413 
2414 /*
2415  * call-seq:
2416  * Thread.abort_on_exception= boolean -> true or false
2417  *
2418  * When set to <code>true</code>, all threads will abort if an exception is
2419  * raised. Returns the new state.
2420  *
2421  * Thread.abort_on_exception = true
2422  * t1 = Thread.new do
2423  * puts "In new thread"
2424  * raise "Exception from thread"
2425  * end
2426  * sleep(1)
2427  * puts "not reached"
2428  *
2429  * <em>produces:</em>
2430  *
2431  * In new thread
2432  * prog.rb:4: Exception from thread (RuntimeError)
2433  * from prog.rb:2:in `initialize'
2434  * from prog.rb:2:in `new'
2435  * from prog.rb:2
2436  */
2437 
2438 static VALUE
2440 {
2441  rb_secure(4);
2443  return val;
2444 }
2445 
2446 
2447 /*
2448  * call-seq:
2449  * thr.abort_on_exception -> true or false
2450  *
2451  * Returns the status of the thread-local ``abort on exception'' condition for
2452  * <i>thr</i>. The default is <code>false</code>. See also
2453  * <code>Thread::abort_on_exception=</code>.
2454  */
2455 
2456 static VALUE
2458 {
2459  rb_thread_t *th;
2460  GetThreadPtr(thread, th);
2461  return th->abort_on_exception ? Qtrue : Qfalse;
2462 }
2463 
2464 
2465 /*
2466  * call-seq:
2467  * thr.abort_on_exception= boolean -> true or false
2468  *
2469  * When set to <code>true</code>, causes all threads (including the main
2470  * program) to abort if an exception is raised in <i>thr</i>. The process will
2471  * effectively <code>exit(0)</code>.
2472  */
2473 
2474 static VALUE
2476 {
2477  rb_thread_t *th;
2478  rb_secure(4);
2479 
2480  GetThreadPtr(thread, th);
2481  th->abort_on_exception = RTEST(val);
2482  return val;
2483 }
2484 
2485 
2486 /*
2487  * call-seq:
2488  * thr.group -> thgrp or nil
2489  *
2490  * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
2491  * the thread is not a member of any group.
2492  *
2493  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
2494  */
2495 
2496 VALUE
2498 {
2499  rb_thread_t *th;
2500  VALUE group;
2501  GetThreadPtr(thread, th);
2502  group = th->thgroup;
2503 
2504  if (!group) {
2505  group = Qnil;
2506  }
2507  return group;
2508 }
2509 
2510 static const char *
2512 {
2513  switch (th->status) {
2514  case THREAD_RUNNABLE:
2515  if (th->to_kill)
2516  return "aborting";
2517  else
2518  return "run";
2519  case THREAD_STOPPED:
2521  return "sleep";
2522  case THREAD_KILLED:
2523  return "dead";
2524  default:
2525  return "unknown";
2526  }
2527 }
2528 
2529 static int
2531 {
2532  return th->status == THREAD_KILLED;
2533 }
2534 
2535 
2536 /*
2537  * call-seq:
2538  * thr.status -> string, false or nil
2539  *
2540  * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
2541  * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
2542  * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
2543  * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
2544  * terminated with an exception.
2545  *
2546  * a = Thread.new { raise("die now") }
2547  * b = Thread.new { Thread.stop }
2548  * c = Thread.new { Thread.exit }
2549  * d = Thread.new { sleep }
2550  * d.kill #=> #<Thread:0x401b3678 aborting>
2551  * a.status #=> nil
2552  * b.status #=> "sleep"
2553  * c.status #=> false
2554  * d.status #=> "aborting"
2555  * Thread.current.status #=> "run"
2556  */
2557 
2558 static VALUE
2560 {
2561  rb_thread_t *th;
2562  GetThreadPtr(thread, th);
2563 
2564  if (rb_threadptr_dead(th)) {
2565  if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
2566  /* TODO */ ) {
2567  return Qnil;
2568  }
2569  return Qfalse;
2570  }
2571  return rb_str_new2(thread_status_name(th));
2572 }
2573 
2574 
2575 /*
2576  * call-seq:
2577  * thr.alive? -> true or false
2578  *
2579  * Returns <code>true</code> if <i>thr</i> is running or sleeping.
2580  *
2581  * thr = Thread.new { }
2582  * thr.join #=> #<Thread:0x401b3fb0 dead>
2583  * Thread.current.alive? #=> true
2584  * thr.alive? #=> false
2585  */
2586 
2587 static VALUE
2589 {
2590  rb_thread_t *th;
2591  GetThreadPtr(thread, th);
2592 
2593  if (rb_threadptr_dead(th))
2594  return Qfalse;
2595  return Qtrue;
2596 }
2597 
2598 /*
2599  * call-seq:
2600  * thr.stop? -> true or false
2601  *
2602  * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
2603  *
2604  * a = Thread.new { Thread.stop }
2605  * b = Thread.current
2606  * a.stop? #=> true
2607  * b.stop? #=> false
2608  */
2609 
2610 static VALUE
2612 {
2613  rb_thread_t *th;
2614  GetThreadPtr(thread, th);
2615 
2616  if (rb_threadptr_dead(th))
2617  return Qtrue;
2618  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2619  return Qtrue;
2620  return Qfalse;
2621 }
2622 
2623 /*
2624  * call-seq:
2625  * thr.safe_level -> integer
2626  *
2627  * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
2628  * levels can help when implementing sandboxes which run insecure code.
2629  *
2630  * thr = Thread.new { $SAFE = 3; sleep }
2631  * Thread.current.safe_level #=> 0
2632  * thr.safe_level #=> 3
2633  */
2634 
2635 static VALUE
2637 {
2638  rb_thread_t *th;
2639  GetThreadPtr(thread, th);
2640 
2641  return INT2NUM(th->safe_level);
2642 }
2643 
2644 /*
2645  * call-seq:
2646  * thr.inspect -> string
2647  *
2648  * Dump the name, id, and status of _thr_ to a string.
2649  */
2650 
2651 static VALUE
2653 {
2654  const char *cname = rb_obj_classname(thread);
2655  rb_thread_t *th;
2656  const char *status;
2657  VALUE str;
2658 
2659  GetThreadPtr(thread, th);
2660  status = thread_status_name(th);
2661  str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
2662  OBJ_INFECT(str, thread);
2663 
2664  return str;
2665 }
2666 
2667 VALUE
2669 {
2670  rb_thread_t *th;
2671  st_data_t val;
2672 
2673  GetThreadPtr(thread, th);
2674  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2675  rb_raise(rb_eSecurityError, "Insecure: thread locals");
2676  }
2677  if (!th->local_storage) {
2678  return Qnil;
2679  }
2680  if (st_lookup(th->local_storage, id, &val)) {
2681  return (VALUE)val;
2682  }
2683  return Qnil;
2684 }
2685 
2686 /*
2687  * call-seq:
2688  * thr[sym] -> obj or nil
2689  *
2690  * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
2691  * if not explicitely inside a Fiber), using either a symbol or a string name.
2692  * If the specified variable does not exist, returns <code>nil</code>.
2693  *
2694  * [
2695  * Thread.new { Thread.current["name"] = "A" },
2696  * Thread.new { Thread.current[:name] = "B" },
2697  * Thread.new { Thread.current["name"] = "C" }
2698  * ].each do |th|
2699  * th.join
2700  * puts "#{th.inspect}: #{th[:name]}"
2701  * end
2702  *
2703  * <em>produces:</em>
2704  *
2705  * #<Thread:0x00000002a54220 dead>: A
2706  * #<Thread:0x00000002a541a8 dead>: B
2707  * #<Thread:0x00000002a54130 dead>: C
2708  *
2709  * Thread#[] and Thread#[]= are not thread-local but fiber-local.
2710  * This confusion did not exist in Ruby 1.8 because
2711  * fibers were only available since Ruby 1.9.
2712  * Ruby 1.9 chooses that the methods behaves fiber-local to save
2713  * following idiom for dynamic scope.
2714  *
2715  * def meth(newvalue)
2716  * begin
2717  * oldvalue = Thread.current[:name]
2718  * Thread.current[:name] = newvalue
2719  * yield
2720  * ensure
2721  * Thread.current[:name] = oldvalue
2722  * end
2723  * end
2724  *
2725  * The idiom may not work as dynamic scope if the methods are thread-local
2726  * and a given block switches fiber.
2727  *
2728  * f = Fiber.new {
2729  * meth(1) {
2730  * Fiber.yield
2731  * }
2732  * }
2733  * meth(2) {
2734  * f.resume
2735  * }
2736  * f.resume
2737  * p Thread.current[:name]
2738  * #=> nil if fiber-local
2739  * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
2740  *
2741  * For thread-local variables, please see <code>Thread#thread_local_get</code>
2742  * and <code>Thread#thread_local_set</code>.
2743  *
2744  */
2745 
2746 static VALUE
2748 {
2749  return rb_thread_local_aref(thread, rb_to_id(id));
2750 }
2751 
2752 VALUE
2754 {
2755  rb_thread_t *th;
2756  GetThreadPtr(thread, th);
2757 
2758  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2759  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2760  }
2761  if (OBJ_FROZEN(thread)) {
2762  rb_error_frozen("thread locals");
2763  }
2764  if (!th->local_storage) {
2766  }
2767  if (NIL_P(val)) {
2768  st_delete_wrap(th->local_storage, id);
2769  return Qnil;
2770  }
2771  st_insert(th->local_storage, id, val);
2772  return val;
2773 }
2774 
2775 /*
2776  * call-seq:
2777  * thr[sym] = obj -> obj
2778  *
2779  * Attribute Assignment---Sets or creates the value of a fiber-local variable,
2780  * using either a symbol or a string. See also <code>Thread#[]</code>. For
2781  * thread-local variables, please see <code>Thread#thread_variable_set</code>
2782  * and <code>Thread#thread_variable_get</code>.
2783  */
2784 
2785 static VALUE
2787 {
2788  return rb_thread_local_aset(self, rb_to_id(id), val);
2789 }
2790 
2791 /*
2792  * call-seq:
2793  * thr.thread_variable_get(key) -> obj or nil
2794  *
2795  * Returns the value of a thread local variable that has been set. Note that
2796  * these are different than fiber local values. For fiber local values,
2797  * please see Thread#[] and Thread#[]=.
2798  *
2799  * Thread local values are carried along with threads, and do not respect
2800  * fibers. For example:
2801  *
2802  * Thread.new {
2803  * Thread.current.thread_variable_set("foo", "bar") # set a thread local
2804  * Thread.current["foo"] = "bar" # set a fiber local
2805  *
2806  * Fiber.new {
2807  * Fiber.yield [
2808  * Thread.current.thread_variable_get("foo"), # get the thread local
2809  * Thread.current["foo"], # get the fiber local
2810  * ]
2811  * }.resume
2812  * }.join.value # => ['bar', nil]
2813  *
2814  * The value "bar" is returned for the thread local, where nil is returned
2815  * for the fiber local. The fiber is executed in the same thread, so the
2816  * thread local values are available.
2817  *
2818  * See also Thread#[]
2819  */
2820 
2821 static VALUE
2823 {
2824  VALUE locals;
2825  rb_thread_t *th;
2826 
2827  GetThreadPtr(thread, th);
2828 
2829  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2830  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2831  }
2832 
2833  locals = rb_iv_get(thread, "locals");
2834  return rb_hash_aref(locals, ID2SYM(rb_to_id(id)));
2835 }
2836 
2837 /*
2838  * call-seq:
2839  * thr.thread_variable_set(key, value)
2840  *
2841  * Sets a thread local with +key+ to +value+. Note that these are local to
2842  * threads, and not to fibers. Please see Thread#thread_variable_get and
2843  * Thread#[] for more information.
2844  */
2845 
2846 static VALUE
2848 {
2849  VALUE locals;
2850  rb_thread_t *th;
2851 
2852  GetThreadPtr(thread, th);
2853 
2854  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2855  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2856  }
2857  if (OBJ_FROZEN(thread)) {
2858  rb_error_frozen("thread locals");
2859  }
2860 
2861  locals = rb_iv_get(thread, "locals");
2862  return rb_hash_aset(locals, ID2SYM(rb_to_id(id)), val);
2863 }
2864 
2865 /*
2866  * call-seq:
2867  * thr.key?(sym) -> true or false
2868  *
2869  * Returns <code>true</code> if the given string (or symbol) exists as a
2870  * fiber-local variable.
2871  *
2872  * me = Thread.current
2873  * me[:oliver] = "a"
2874  * me.key?(:oliver) #=> true
2875  * me.key?(:stanley) #=> false
2876  */
2877 
2878 static VALUE
2880 {
2881  rb_thread_t *th;
2882  ID id = rb_to_id(key);
2883 
2884  GetThreadPtr(self, th);
2885 
2886  if (!th->local_storage) {
2887  return Qfalse;
2888  }
2889  if (st_lookup(th->local_storage, id, 0)) {
2890  return Qtrue;
2891  }
2892  return Qfalse;
2893 }
2894 
2895 static int
2897 {
2898  rb_ary_push(ary, ID2SYM(key));
2899  return ST_CONTINUE;
2900 }
2901 
2902 static int
2904 {
2905  return (int)vm->living_threads->num_entries;
2906 }
2907 
2908 int
2910 {
2911  int num = 1;
2912  if (GET_THREAD()->vm->living_threads) {
2913  num = vm_living_thread_num(GET_THREAD()->vm);
2914  thread_debug("rb_thread_alone: %d\n", num);
2915  }
2916  return num == 1;
2917 }
2918 
2919 /*
2920  * call-seq:
2921  * thr.keys -> array
2922  *
2923  * Returns an an array of the names of the fiber-local variables (as Symbols).
2924  *
2925  * thr = Thread.new do
2926  * Thread.current[:cat] = 'meow'
2927  * Thread.current["dog"] = 'woof'
2928  * end
2929  * thr.join #=> #<Thread:0x401b3f10 dead>
2930  * thr.keys #=> [:dog, :cat]
2931  */
2932 
2933 static VALUE
2935 {
2936  rb_thread_t *th;
2937  VALUE ary = rb_ary_new();
2938  GetThreadPtr(self, th);
2939 
2940  if (th->local_storage) {
2942  }
2943  return ary;
2944 }
2945 
2946 static int
2948 {
2949  rb_ary_push(ary, key);
2950  return ST_CONTINUE;
2951 }
2952 
2953 /*
2954  * call-seq:
2955  * thr.thread_variables -> array
2956  *
2957  * Returns an an array of the names of the thread-local variables (as Symbols).
2958  *
2959  * thr = Thread.new do
2960  * Thread.current.thread_variable_set(:cat, 'meow')
2961  * Thread.current.thread_variable_set("dog", 'woof')
2962  * end
2963  * thr.join #=> #<Thread:0x401b3f10 dead>
2964  * thr.thread_variables #=> [:dog, :cat]
2965  *
2966  * Note that these are not fiber local variables. Please see Thread#[] and
2967  * Thread#thread_variable_get for more details.
2968  */
2969 
2970 static VALUE
2972 {
2973  VALUE locals;
2974  VALUE ary;
2975 
2976  locals = rb_iv_get(thread, "locals");
2977  ary = rb_ary_new();
2978  rb_hash_foreach(locals, keys_i, ary);
2979 
2980  return ary;
2981 }
2982 
2983 /*
2984  * call-seq:
2985  * thr.thread_variable?(key) -> true or false
2986  *
2987  * Returns <code>true</code> if the given string (or symbol) exists as a
2988  * thread-local variable.
2989  *
2990  * me = Thread.current
2991  * me.thread_variable_set(:oliver, "a")
2992  * me.thread_variable?(:oliver) #=> true
2993  * me.thread_variable?(:stanley) #=> false
2994  *
2995  * Note that these are not fiber local variables. Please see Thread#[] and
2996  * Thread#thread_variable_get for more details.
2997  */
2998 
2999 static VALUE
3001 {
3002  VALUE locals;
3003 
3004  locals = rb_iv_get(thread, "locals");
3005 
3006  if (!RHASH(locals)->ntbl)
3007  return Qfalse;
3008 
3009  if (st_lookup(RHASH(locals)->ntbl, ID2SYM(rb_to_id(key)), 0)) {
3010  return Qtrue;
3011  }
3012 
3013  return Qfalse;
3014 }
3015 
3016 /*
3017  * call-seq:
3018  * thr.priority -> integer
3019  *
3020  * Returns the priority of <i>thr</i>. Default is inherited from the
3021  * current thread which creating the new thread, or zero for the
3022  * initial main thread; higher-priority thread will run more frequently
3023  * than lower-priority threads (but lower-priority threads can also run).
3024  *
3025  * This is just hint for Ruby thread scheduler. It may be ignored on some
3026  * platform.
3027  *
3028  * Thread.current.priority #=> 0
3029  */
3030 
3031 static VALUE
3033 {
3034  rb_thread_t *th;
3035  GetThreadPtr(thread, th);
3036  return INT2NUM(th->priority);
3037 }
3038 
3039 
3040 /*
3041  * call-seq:
3042  * thr.priority= integer -> thr
3043  *
3044  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3045  * will run more frequently than lower-priority threads (but lower-priority
3046  * threads can also run).
3047  *
3048  * This is just hint for Ruby thread scheduler. It may be ignored on some
3049  * platform.
3050  *
3051  * count1 = count2 = 0
3052  * a = Thread.new do
3053  * loop { count1 += 1 }
3054  * end
3055  * a.priority = -1
3056  *
3057  * b = Thread.new do
3058  * loop { count2 += 1 }
3059  * end
3060  * b.priority = -2
3061  * sleep 1 #=> 1
3062  * count1 #=> 622504
3063  * count2 #=> 5832
3064  */
3065 
3066 static VALUE
3068 {
3069  rb_thread_t *th;
3070  int priority;
3071  GetThreadPtr(thread, th);
3072 
3073  rb_secure(4);
3074 
3075 #if USE_NATIVE_THREAD_PRIORITY
3076  th->priority = NUM2INT(prio);
3077  native_thread_apply_priority(th);
3078 #else
3079  priority = NUM2INT(prio);
3080  if (priority > RUBY_THREAD_PRIORITY_MAX) {
3081  priority = RUBY_THREAD_PRIORITY_MAX;
3082  }
3083  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3084  priority = RUBY_THREAD_PRIORITY_MIN;
3085  }
3086  th->priority = priority;
3087 #endif
3088  return INT2NUM(th->priority);
3089 }
3090 
3091 /* for IO */
3092 
3093 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3094 
3095 /*
3096  * several Unix platforms support file descriptors bigger than FD_SETSIZE
3097  * in select(2) system call.
3098  *
3099  * - Linux 2.2.12 (?)
3100  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3101  * select(2) documents how to allocate fd_set dynamically.
3102  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3103  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3104  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3105  * select(2) documents how to allocate fd_set dynamically.
3106  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3107  * - HP-UX documents how to allocate fd_set dynamically.
3108  * http://docs.hp.com/en/B2355-60105/select.2.html
3109  * - Solaris 8 has select_large_fdset
3110  * - Mac OS X 10.7 (Lion)
3111  * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3112  * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3113  * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3114  *
3115  * When fd_set is not big enough to hold big file descriptors,
3116  * it should be allocated dynamically.
3117  * Note that this assumes fd_set is structured as bitmap.
3118  *
3119  * rb_fd_init allocates the memory.
3120  * rb_fd_term free the memory.
3121  * rb_fd_set may re-allocates bitmap.
3122  *
3123  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3124  */
3125 
3126 void
3127 rb_fd_init(rb_fdset_t *fds)
3128 {
3129  fds->maxfd = 0;
3130  fds->fdset = ALLOC(fd_set);
3131  FD_ZERO(fds->fdset);
3132 }
3133 
3134 void
3136 {
3137  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3138 
3139  if (size < sizeof(fd_set))
3140  size = sizeof(fd_set);
3141  dst->maxfd = src->maxfd;
3142  dst->fdset = xmalloc(size);
3143  memcpy(dst->fdset, src->fdset, size);
3144 }
3145 
3146 void
3147 rb_fd_term(rb_fdset_t *fds)
3148 {
3149  if (fds->fdset) xfree(fds->fdset);
3150  fds->maxfd = 0;
3151  fds->fdset = 0;
3152 }
3153 
3154 void
3155 rb_fd_zero(rb_fdset_t *fds)
3156 {
3157  if (fds->fdset)
3158  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3159 }
3160 
3161 static void
3162 rb_fd_resize(int n, rb_fdset_t *fds)
3163 {
3164  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3165  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3166 
3167  if (m < sizeof(fd_set)) m = sizeof(fd_set);
3168  if (o < sizeof(fd_set)) o = sizeof(fd_set);
3169 
3170  if (m > o) {
3171  fds->fdset = xrealloc(fds->fdset, m);
3172  memset((char *)fds->fdset + o, 0, m - o);
3173  }
3174  if (n >= fds->maxfd) fds->maxfd = n + 1;
3175 }
3176 
3177 void
3178 rb_fd_set(int n, rb_fdset_t *fds)
3179 {
3180  rb_fd_resize(n, fds);
3181  FD_SET(n, fds->fdset);
3182 }
3183 
3184 void
3185 rb_fd_clr(int n, rb_fdset_t *fds)
3186 {
3187  if (n >= fds->maxfd) return;
3188  FD_CLR(n, fds->fdset);
3189 }
3190 
3191 int
3192 rb_fd_isset(int n, const rb_fdset_t *fds)
3193 {
3194  if (n >= fds->maxfd) return 0;
3195  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3196 }
3197 
3198 void
3199 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3200 {
3201  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3202 
3203  if (size < sizeof(fd_set)) size = sizeof(fd_set);
3204  dst->maxfd = max;
3205  dst->fdset = xrealloc(dst->fdset, size);
3206  memcpy(dst->fdset, src, size);
3207 }
3208 
3209 static void
3210 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
3211 {
3212  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3213 
3214  if (size > sizeof(fd_set)) {
3215  rb_raise(rb_eArgError, "too large fdsets");
3216  }
3217  memcpy(dst, rb_fd_ptr(src), sizeof(fd_set));
3218 }
3219 
3220 void
3221 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3222 {
3223  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3224 
3225  if (size < sizeof(fd_set))
3226  size = sizeof(fd_set);
3227  dst->maxfd = src->maxfd;
3228  dst->fdset = xrealloc(dst->fdset, size);
3229  memcpy(dst->fdset, src->fdset, size);
3230 }
3231 
3232 #ifdef __native_client__
3233 int select(int nfds, fd_set *readfds, fd_set *writefds,
3234  fd_set *exceptfds, struct timeval *timeout);
3235 #endif
3236 
3237 int
3238 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3239 {
3240  fd_set *r = NULL, *w = NULL, *e = NULL;
3241  if (readfds) {
3242  rb_fd_resize(n - 1, readfds);
3243  r = rb_fd_ptr(readfds);
3244  }
3245  if (writefds) {
3246  rb_fd_resize(n - 1, writefds);
3247  w = rb_fd_ptr(writefds);
3248  }
3249  if (exceptfds) {
3250  rb_fd_resize(n - 1, exceptfds);
3251  e = rb_fd_ptr(exceptfds);
3252  }
3253  return select(n, r, w, e, timeout);
3254 }
3255 
3256 #undef FD_ZERO
3257 #undef FD_SET
3258 #undef FD_CLR
3259 #undef FD_ISSET
3260 
3261 #define FD_ZERO(f) rb_fd_zero(f)
3262 #define FD_SET(i, f) rb_fd_set((i), (f))
3263 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3264 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3265 
3266 #elif defined(_WIN32)
3267 
3268 void
3269 rb_fd_init(rb_fdset_t *set)
3270 {
3271  set->capa = FD_SETSIZE;
3272  set->fdset = ALLOC(fd_set);
3273  FD_ZERO(set->fdset);
3274 }
3275 
3276 void
3278 {
3279  rb_fd_init(dst);
3280  rb_fd_dup(dst, src);
3281 }
3282 
3283 static void
3284 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
3285 {
3286  int max = rb_fd_max(src);
3287 
3288  /* we assume src is the result of select() with dst, so dst should be
3289  * larger or equal than src. */
3290  if (max > FD_SETSIZE || (UINT)max > dst->fd_count) {
3291  rb_raise(rb_eArgError, "too large fdsets");
3292  }
3293 
3294  memcpy(dst->fd_array, src->fdset->fd_array, max);
3295  dst->fd_count = max;
3296 }
3297 
3298 void
3299 rb_fd_term(rb_fdset_t *set)
3300 {
3301  xfree(set->fdset);
3302  set->fdset = NULL;
3303  set->capa = 0;
3304 }
3305 
3306 void
3307 rb_fd_set(int fd, rb_fdset_t *set)
3308 {
3309  unsigned int i;
3310  SOCKET s = rb_w32_get_osfhandle(fd);
3311 
3312  for (i = 0; i < set->fdset->fd_count; i++) {
3313  if (set->fdset->fd_array[i] == s) {
3314  return;
3315  }
3316  }
3317  if (set->fdset->fd_count >= (unsigned)set->capa) {
3318  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3319  set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
3320  }
3321  set->fdset->fd_array[set->fdset->fd_count++] = s;
3322 }
3323 
3324 #undef FD_ZERO
3325 #undef FD_SET
3326 #undef FD_CLR
3327 #undef FD_ISSET
3328 
3329 #define FD_ZERO(f) rb_fd_zero(f)
3330 #define FD_SET(i, f) rb_fd_set((i), (f))
3331 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3332 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3333 
3334 #else
3335 #define rb_fd_rcopy(d, s) (*(d) = *(s))
3336 #endif
3337 
3338 static int
3339 do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except,
3340  struct timeval *timeout)
3341 {
3343  int lerrno;
3344  rb_fdset_t UNINITIALIZED_VAR(orig_read);
3345  rb_fdset_t UNINITIALIZED_VAR(orig_write);
3346  rb_fdset_t UNINITIALIZED_VAR(orig_except);
3347  double limit = 0;
3348  struct timeval wait_rest;
3349  rb_thread_t *th = GET_THREAD();
3350 
3351  if (timeout) {
3352  limit = timeofday();
3353  limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
3354  wait_rest = *timeout;
3355  timeout = &wait_rest;
3356  }
3357 
3358  if (read)
3359  rb_fd_init_copy(&orig_read, read);
3360  if (write)
3361  rb_fd_init_copy(&orig_write, write);
3362  if (except)
3363  rb_fd_init_copy(&orig_except, except);
3364 
3365  retry:
3366  lerrno = 0;
3367 
3368  BLOCKING_REGION({
3369  result = native_fd_select(n, read, write, except, timeout, th);
3370  if (result < 0) lerrno = errno;
3371  }, ubf_select, th, FALSE);
3372 
3374 
3375  errno = lerrno;
3376 
3377  if (result < 0) {
3378  switch (errno) {
3379  case EINTR:
3380 #ifdef ERESTART
3381  case ERESTART:
3382 #endif
3383  if (read)
3384  rb_fd_dup(read, &orig_read);
3385  if (write)
3386  rb_fd_dup(write, &orig_write);
3387  if (except)
3388  rb_fd_dup(except, &orig_except);
3389 
3390  if (timeout) {
3391  double d = limit - timeofday();
3392 
3393  wait_rest.tv_sec = (time_t)d;
3394  wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
3395  if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
3396  if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
3397  }
3398 
3399  goto retry;
3400  default:
3401  break;
3402  }
3403  }
3404 
3405  if (read)
3406  rb_fd_term(&orig_read);
3407  if (write)
3408  rb_fd_term(&orig_write);
3409  if (except)
3410  rb_fd_term(&orig_except);
3411 
3412  return result;
3413 }
3414 
3415 static void
3416 rb_thread_wait_fd_rw(int fd, int read)
3417 {
3418  int result = 0;
3419  int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
3420 
3421  thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
3422 
3423  if (fd < 0) {
3424  rb_raise(rb_eIOError, "closed stream");
3425  }
3426 
3427  result = rb_wait_for_single_fd(fd, events, NULL);
3428  if (result < 0) {
3429  rb_sys_fail(0);
3430  }
3431 
3432  thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
3433 }
3434 
3435 void
3437 {
3438  rb_thread_wait_fd_rw(fd, 1);
3439 }
3440 
3441 int
3443 {
3444  rb_thread_wait_fd_rw(fd, 0);
3445  return TRUE;
3446 }
3447 
3448 int
3449 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
3450  struct timeval *timeout)
3451 {
3452  rb_fdset_t fdsets[3];
3453  rb_fdset_t *rfds = NULL;
3454  rb_fdset_t *wfds = NULL;
3455  rb_fdset_t *efds = NULL;
3456  int retval;
3457 
3458  if (read) {
3459  rfds = &fdsets[0];
3460  rb_fd_init(rfds);
3461  rb_fd_copy(rfds, read, max);
3462  }
3463  if (write) {
3464  wfds = &fdsets[1];
3465  rb_fd_init(wfds);
3466  rb_fd_copy(wfds, write, max);
3467  }
3468  if (except) {
3469  efds = &fdsets[2];
3470  rb_fd_init(efds);
3471  rb_fd_copy(efds, except, max);
3472  }
3473 
3474  retval = rb_thread_fd_select(max, rfds, wfds, efds, timeout);
3475 
3476  if (rfds) {
3477  rb_fd_rcopy(read, rfds);
3478  rb_fd_term(rfds);
3479  }
3480  if (wfds) {
3481  rb_fd_rcopy(write, wfds);
3482  rb_fd_term(wfds);
3483  }
3484  if (efds) {
3485  rb_fd_rcopy(except, efds);
3486  rb_fd_term(efds);
3487  }
3488 
3489  return retval;
3490 }
3491 
3492 int
3493 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
3494  struct timeval *timeout)
3495 {
3496  if (!read && !write && !except) {
3497  if (!timeout) {
3499  return 0;
3500  }
3501  rb_thread_wait_for(*timeout);
3502  return 0;
3503  }
3504 
3505  if (read) {
3506  rb_fd_resize(max - 1, read);
3507  }
3508  if (write) {
3509  rb_fd_resize(max - 1, write);
3510  }
3511  if (except) {
3512  rb_fd_resize(max - 1, except);
3513  }
3514  return do_select(max, read, write, except, timeout);
3515 }
3516 
3517 /*
3518  * poll() is supported by many OSes, but so far Linux is the only
3519  * one we know of that supports using poll() in all places select()
3520  * would work.
3521  */
3522 #if defined(HAVE_POLL) && defined(__linux__)
3523 # define USE_POLL
3524 #endif
3525 
3526 #ifdef USE_POLL
3527 
3528 /* The same with linux kernel. TODO: make platform independent definition. */
3529 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
3530 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
3531 #define POLLEX_SET (POLLPRI)
3532 
3533 #ifndef HAVE_PPOLL
3534 /* TODO: don't ignore sigmask */
3535 int
3536 ppoll(struct pollfd *fds, nfds_t nfds,
3537  const struct timespec *ts, const sigset_t *sigmask)
3538 {
3539  int timeout_ms;
3540 
3541  if (ts) {
3542  int tmp, tmp2;
3543 
3544  if (ts->tv_sec > TIMET_MAX/1000)
3545  timeout_ms = -1;
3546  else {
3547  tmp = ts->tv_sec * 1000;
3548  tmp2 = ts->tv_nsec / (1000 * 1000);
3549  if (TIMET_MAX - tmp < tmp2)
3550  timeout_ms = -1;
3551  else
3552  timeout_ms = tmp + tmp2;
3553  }
3554  }
3555  else
3556  timeout_ms = -1;
3557 
3558  return poll(fds, nfds, timeout_ms);
3559 }
3560 #endif
3561 
3562 /*
3563  * returns a mask of events
3564  */
3565 int
3566 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3567 {
3568  struct pollfd fds;
3569  int result = 0, lerrno;
3570  double limit = 0;
3571  struct timespec ts;
3572  struct timespec *timeout = NULL;
3573  rb_thread_t *th = GET_THREAD();
3574 
3575  if (tv) {
3576  ts.tv_sec = tv->tv_sec;
3577  ts.tv_nsec = tv->tv_usec * 1000;
3578  limit = timeofday();
3579  limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
3580  timeout = &ts;
3581  }
3582 
3583  fds.fd = fd;
3584  fds.events = (short)events;
3585 
3586 retry:
3587  lerrno = 0;
3588  BLOCKING_REGION({
3589  result = ppoll(&fds, 1, timeout, NULL);
3590  if (result < 0) lerrno = errno;
3591  }, ubf_select, th, FALSE);
3592 
3594 
3595  if (result < 0) {
3596  errno = lerrno;
3597  switch (errno) {
3598  case EINTR:
3599 #ifdef ERESTART
3600  case ERESTART:
3601 #endif
3602  if (timeout) {
3603  double d = limit - timeofday();
3604 
3605  ts.tv_sec = (long)d;
3606  ts.tv_nsec = (long)((d - (double)ts.tv_sec) * 1e9);
3607  if (ts.tv_sec < 0)
3608  ts.tv_sec = 0;
3609  if (ts.tv_nsec < 0)
3610  ts.tv_nsec = 0;
3611  }
3612  goto retry;
3613  }
3614  return -1;
3615  }
3616 
3617  if (fds.revents & POLLNVAL) {
3618  errno = EBADF;
3619  return -1;
3620  }
3621 
3622  /*
3623  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
3624  * Therefore we need fix it up.
3625  */
3626  result = 0;
3627  if (fds.revents & POLLIN_SET)
3628  result |= RB_WAITFD_IN;
3629  if (fds.revents & POLLOUT_SET)
3630  result |= RB_WAITFD_OUT;
3631  if (fds.revents & POLLEX_SET)
3632  result |= RB_WAITFD_PRI;
3633 
3634  return result;
3635 }
3636 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
3637 static rb_fdset_t *
3639 {
3640  rb_fd_init(fds);
3641  rb_fd_set(fd, fds);
3642 
3643  return fds;
3644 }
3645 
3646 struct select_args {
3647  union {
3648  int fd;
3649  int error;
3650  } as;
3654  struct timeval *tv;
3655 };
3656 
3657 static VALUE
3659 {
3660  struct select_args *args = (struct select_args *)ptr;
3661  int r;
3662 
3663  r = rb_thread_fd_select(args->as.fd + 1,
3664  args->read, args->write, args->except, args->tv);
3665  if (r == -1)
3666  args->as.error = errno;
3667  if (r > 0) {
3668  r = 0;
3669  if (args->read && rb_fd_isset(args->as.fd, args->read))
3670  r |= RB_WAITFD_IN;
3671  if (args->write && rb_fd_isset(args->as.fd, args->write))
3672  r |= RB_WAITFD_OUT;
3673  if (args->except && rb_fd_isset(args->as.fd, args->except))
3674  r |= RB_WAITFD_PRI;
3675  }
3676  return (VALUE)r;
3677 }
3678 
3679 static VALUE
3681 {
3682  struct select_args *args = (struct select_args *)ptr;
3683 
3684  if (args->read) rb_fd_term(args->read);
3685  if (args->write) rb_fd_term(args->write);
3686  if (args->except) rb_fd_term(args->except);
3687 
3688  return (VALUE)-1;
3689 }
3690 
3691 int
3692 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3693 {
3694  rb_fdset_t rfds, wfds, efds;
3695  struct select_args args;
3696  int r;
3697  VALUE ptr = (VALUE)&args;
3698 
3699  args.as.fd = fd;
3700  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
3701  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
3702  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
3703  args.tv = tv;
3704 
3706  if (r == -1)
3707  errno = args.as.error;
3708 
3709  return r;
3710 }
3711 #endif /* ! USE_POLL */
3712 
3713 /*
3714  * for GC
3715  */
3716 
3717 #ifdef USE_CONSERVATIVE_STACK_END
3718 void
3720 {
3721  VALUE stack_end;
3722  *stack_end_p = &stack_end;
3723 }
3724 #endif
3725 
3726 
3727 /*
3728  *
3729  */
3730 
3731 void
3733 {
3734  /* mth must be main_thread */
3735  if (rb_signal_buff_size() > 0) {
3736  /* wakeup main thread */
3738  }
3739 }
3740 
3741 static void
3743 {
3744  rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
3745 
3746  /*
3747  * Tricky: thread_destruct_lock doesn't close a race against
3748  * vm->running_thread switch. however it guarantee th->running_thread
3749  * point to valid pointer or NULL.
3750  */
3751  native_mutex_lock(&vm->thread_destruct_lock);
3752  /* for time slice */
3753  if (vm->running_thread)
3755  native_mutex_unlock(&vm->thread_destruct_lock);
3756 
3757  /* check signal */
3759 
3760 #if 0
3761  /* prove profiler */
3762  if (vm->prove_profile.enable) {
3763  rb_thread_t *th = vm->running_thread;
3764 
3765  if (vm->during_gc) {
3766  /* GC prove profiling */
3767  }
3768  }
3769 #endif
3770 }
3771 
3772 void
3774 {
3775  if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3776  native_reset_timer_thread();
3777  }
3778 }
3779 
3780 void
3782 {
3783  native_reset_timer_thread();
3784 }
3785 
3786 void
3788 {
3789  system_working = 1;
3790  rb_thread_create_timer_thread();
3791 }
3792 
3793 static int
3795 {
3796  int i;
3797  VALUE lines = (VALUE)val;
3798 
3799  for (i = 0; i < RARRAY_LEN(lines); i++) {
3800  if (RARRAY_PTR(lines)[i] != Qnil) {
3801  RARRAY_PTR(lines)[i] = INT2FIX(0);
3802  }
3803  }
3804  return ST_CONTINUE;
3805 }
3806 
3807 static void
3809 {
3810  VALUE coverages = rb_get_coverages();
3811  if (RTEST(coverages)) {
3812  st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
3813  }
3814 }
3815 
3816 static void
3818 {
3819  rb_thread_t *th = GET_THREAD();
3820  rb_vm_t *vm = th->vm;
3821  VALUE thval = th->self;
3822  vm->main_thread = th;
3823 
3824  gvl_atfork(th->vm);
3825  st_foreach(vm->living_threads, atfork, (st_data_t)th);
3826  st_clear(vm->living_threads);
3827  st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
3828  vm->sleeper = 0;
3829  clear_coverage();
3830 }
3831 
3832 static int
3834 {
3835  VALUE thval = key;
3836  rb_thread_t *th;
3837  GetThreadPtr(thval, th);
3838 
3839  if (th != (rb_thread_t *)current_th) {
3843  }
3844  return ST_CONTINUE;
3845 }
3846 
3847 void
3849 {
3851  GET_THREAD()->join_list = NULL;
3852 
3853  /* We don't want reproduce CVE-2003-0900. */
3855 }
3856 
3857 static int
3859 {
3860  VALUE thval = key;
3861  rb_thread_t *th;
3862  GetThreadPtr(thval, th);
3863 
3864  if (th != (rb_thread_t *)current_th) {
3866  }
3867  return ST_CONTINUE;
3868 }
3869 
3870 void
3872 {
3874 }
3875 
3876 struct thgroup {
3879 };
3880 
3881 static size_t
3882 thgroup_memsize(const void *ptr)
3883 {
3884  return ptr ? sizeof(struct thgroup) : 0;
3885 }
3886 
3888  "thgroup",
3890 };
3891 
3892 /*
3893  * Document-class: ThreadGroup
3894  *
3895  * <code>ThreadGroup</code> provides a means of keeping track of a number of
3896  * threads as a group. A <code>Thread</code> can belong to only one
3897  * <code>ThreadGroup</code> at a time; adding a thread to a new group will
3898  * remove it from any previous group.
3899  *
3900  * Newly created threads belong to the same group as the thread from which they
3901  * were created.
3902  */
3903 
3904 /*
3905  * Document-const: Default
3906  *
3907  * The default ThreadGroup created when Ruby starts; all Threads belong to it
3908  * by default.
3909  */
3910 static VALUE
3912 {
3913  VALUE group;
3914  struct thgroup *data;
3915 
3916  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
3917  data->enclosed = 0;
3918  data->group = group;
3919 
3920  return group;
3921 }
3922 
3926 };
3927 
3928 static int
3930 {
3931  VALUE thread = (VALUE)key;
3932  VALUE ary = ((struct thgroup_list_params *)data)->ary;
3933  VALUE group = ((struct thgroup_list_params *)data)->group;
3934  rb_thread_t *th;
3935  GetThreadPtr(thread, th);
3936 
3937  if (th->thgroup == group) {
3938  rb_ary_push(ary, thread);
3939  }
3940  return ST_CONTINUE;
3941 }
3942 
3943 /*
3944  * call-seq:
3945  * thgrp.list -> array
3946  *
3947  * Returns an array of all existing <code>Thread</code> objects that belong to
3948  * this group.
3949  *
3950  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
3951  */
3952 
3953 static VALUE
3955 {
3956  VALUE ary = rb_ary_new();
3957  struct thgroup_list_params param;
3958 
3959  param.ary = ary;
3960  param.group = group;
3961  st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
3962  return ary;
3963 }
3964 
3965 
3966 /*
3967  * call-seq:
3968  * thgrp.enclose -> thgrp
3969  *
3970  * Prevents threads from being added to or removed from the receiving
3971  * <code>ThreadGroup</code>. New threads can still be started in an enclosed
3972  * <code>ThreadGroup</code>.
3973  *
3974  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
3975  * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
3976  * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
3977  * tg.add thr
3978  *
3979  * <em>produces:</em>
3980  *
3981  * ThreadError: can't move from the enclosed thread group
3982  */
3983 
3984 static VALUE
3986 {
3987  struct thgroup *data;
3988 
3989  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3990  data->enclosed = 1;
3991 
3992  return group;
3993 }
3994 
3995 
3996 /*
3997  * call-seq:
3998  * thgrp.enclosed? -> true or false
3999  *
4000  * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
4001  * ThreadGroup#enclose.
4002  */
4003 
4004 static VALUE
4006 {
4007  struct thgroup *data;
4008 
4009  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4010  if (data->enclosed)
4011  return Qtrue;
4012  return Qfalse;
4013 }
4014 
4015 
4016 /*
4017  * call-seq:
4018  * thgrp.add(thread) -> thgrp
4019  *
4020  * Adds the given <em>thread</em> to this group, removing it from any other
4021  * group to which it may have previously belonged.
4022  *
4023  * puts "Initial group is #{ThreadGroup::Default.list}"
4024  * tg = ThreadGroup.new
4025  * t1 = Thread.new { sleep }
4026  * t2 = Thread.new { sleep }
4027  * puts "t1 is #{t1}"
4028  * puts "t2 is #{t2}"
4029  * tg.add(t1)
4030  * puts "Initial group now #{ThreadGroup::Default.list}"
4031  * puts "tg group now #{tg.list}"
4032  *
4033  * <em>produces:</em>
4034  *
4035  * Initial group is #<Thread:0x401bdf4c>
4036  * t1 is #<Thread:0x401b3c90>
4037  * t2 is #<Thread:0x401b3c18>
4038  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4039  * tg group now #<Thread:0x401b3c90>
4040  */
4041 
4042 static VALUE
4044 {
4045  rb_thread_t *th;
4046  struct thgroup *data;
4047 
4048  rb_secure(4);
4049  GetThreadPtr(thread, th);
4050 
4051  if (OBJ_FROZEN(group)) {
4052  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4053  }
4054  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4055  if (data->enclosed) {
4056  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4057  }
4058 
4059  if (!th->thgroup) {
4060  return Qnil;
4061  }
4062 
4063  if (OBJ_FROZEN(th->thgroup)) {
4064  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4065  }
4067  if (data->enclosed) {
4069  "can't move from the enclosed thread group");
4070  }
4071 
4072  th->thgroup = group;
4073  return group;
4074 }
4075 
4076 
4077 /*
4078  * Document-class: Mutex
4079  *
4080  * Mutex implements a simple semaphore that can be used to coordinate access to
4081  * shared data from multiple concurrent threads.
4082  *
4083  * Example:
4084  *
4085  * require 'thread'
4086  * semaphore = Mutex.new
4087  *
4088  * a = Thread.new {
4089  * semaphore.synchronize {
4090  * # access shared resource
4091  * }
4092  * }
4093  *
4094  * b = Thread.new {
4095  * semaphore.synchronize {
4096  * # access shared resource
4097  * }
4098  * }
4099  *
4100  */
4101 
4102 #define GetMutexPtr(obj, tobj) \
4103  TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
4104 
4105 #define mutex_mark NULL
4106 
4107 static void
4108 mutex_free(void *ptr)
4109 {
4110  if (ptr) {
4111  rb_mutex_t *mutex = ptr;
4112  if (mutex->th) {
4113  /* rb_warn("free locked mutex"); */
4114  const char *err = rb_mutex_unlock_th(mutex, mutex->th);
4115  if (err) rb_bug("%s", err);
4116  }
4117  native_mutex_destroy(&mutex->lock);
4118  native_cond_destroy(&mutex->cond);
4119  }
4120  ruby_xfree(ptr);
4121 }
4122 
4123 static size_t
4124 mutex_memsize(const void *ptr)
4125 {
4126  return ptr ? sizeof(rb_mutex_t) : 0;
4127 }
4128 
4130  "mutex",
4132 };
4133 
4134 VALUE
4136 {
4137  if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
4138  return Qtrue;
4139  }
4140  else {
4141  return Qfalse;
4142  }
4143 }
4144 
4145 static VALUE
4147 {
4148  VALUE volatile obj;
4149  rb_mutex_t *mutex;
4150 
4151  obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
4152  native_mutex_initialize(&mutex->lock);
4153  native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
4154  return obj;
4155 }
4156 
4157 /*
4158  * call-seq:
4159  * Mutex.new -> mutex
4160  *
4161  * Creates a new Mutex
4162  */
4163 static VALUE
4165 {
4166  return self;
4167 }
4168 
4169 VALUE
4171 {
4172  return mutex_alloc(rb_cMutex);
4173 }
4174 
4175 /*
4176  * call-seq:
4177  * mutex.locked? -> true or false
4178  *
4179  * Returns +true+ if this lock is currently held by some thread.
4180  */
4181 VALUE
4183 {
4184  rb_mutex_t *mutex;
4185  GetMutexPtr(self, mutex);
4186  return mutex->th ? Qtrue : Qfalse;
4187 }
4188 
4189 static void
4191 {
4192  rb_mutex_t *mutex;
4193  GetMutexPtr(self, mutex);
4194 
4195  if (th->keeping_mutexes) {
4196  mutex->next_mutex = th->keeping_mutexes;
4197  }
4198  th->keeping_mutexes = mutex;
4199 }
4200 
4201 /*
4202  * call-seq:
4203  * mutex.try_lock -> true or false
4204  *
4205  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
4206  * lock was granted.
4207  */
4208 VALUE
4210 {
4211  rb_mutex_t *mutex;
4212  VALUE locked = Qfalse;
4213  GetMutexPtr(self, mutex);
4214 
4215  native_mutex_lock(&mutex->lock);
4216  if (mutex->th == 0) {
4217  mutex->th = GET_THREAD();
4218  locked = Qtrue;
4219 
4220  mutex_locked(GET_THREAD(), self);
4221  }
4222  native_mutex_unlock(&mutex->lock);
4223 
4224  return locked;
4225 }
4226 
4227 static int
4228 lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
4229 {
4230  int interrupted = 0;
4231  int err = 0;
4232 
4233  mutex->cond_waiting++;
4234  for (;;) {
4235  if (!mutex->th) {
4236  mutex->th = th;
4237  break;
4238  }
4239  if (RUBY_VM_INTERRUPTED(th)) {
4240  interrupted = 1;
4241  break;
4242  }
4243  if (err == ETIMEDOUT) {
4244  interrupted = 2;
4245  break;
4246  }
4247 
4248  if (timeout_ms) {
4249  struct timespec timeout_rel;
4250  struct timespec timeout;
4251 
4252  timeout_rel.tv_sec = 0;
4253  timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
4254  timeout = native_cond_timeout(&mutex->cond, timeout_rel);
4255  err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
4256  }
4257  else {
4258  native_cond_wait(&mutex->cond, &mutex->lock);
4259  err = 0;
4260  }
4261  }
4262  mutex->cond_waiting--;
4263 
4264  return interrupted;
4265 }
4266 
4267 static void
4268 lock_interrupt(void *ptr)
4269 {
4270  rb_mutex_t *mutex = (rb_mutex_t *)ptr;
4271  native_mutex_lock(&mutex->lock);
4272  if (mutex->cond_waiting > 0)
4273  native_cond_broadcast(&mutex->cond);
4274  native_mutex_unlock(&mutex->lock);
4275 }
4276 
4277 /*
4278  * At maximum, only one thread can use cond_timedwait and watch deadlock
4279  * periodically. Multiple polling thread (i.e. concurrent deadlock check)
4280  * introduces new race conditions. [Bug #6278] [ruby-core:44275]
4281  */
4283 
4284 /*
4285  * call-seq:
4286  * mutex.lock -> self
4287  *
4288  * Attempts to grab the lock and waits if it isn't available.
4289  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
4290  */
4291 VALUE
4293 {
4294  rb_thread_t *th = GET_THREAD();
4295  rb_mutex_t *mutex;
4296  GetMutexPtr(self, mutex);
4297 
4298  /* When running trap handler */
4299  if (!mutex->allow_trap && th->interrupt_mask & TRAP_INTERRUPT_MASK) {
4300  rb_raise(rb_eThreadError, "can't be called from trap context");
4301  }
4302 
4303  if (rb_mutex_trylock(self) == Qfalse) {
4304  if (mutex->th == GET_THREAD()) {
4305  rb_raise(rb_eThreadError, "deadlock; recursive locking");
4306  }
4307 
4308  while (mutex->th != th) {
4309  int interrupted;
4310  enum rb_thread_status prev_status = th->status;
4311  volatile int timeout_ms = 0;
4312  struct rb_unblock_callback oldubf;
4313 
4314  set_unblock_function(th, lock_interrupt, mutex, &oldubf, FALSE);
4316  th->locking_mutex = self;
4317 
4318  native_mutex_lock(&mutex->lock);
4319  th->vm->sleeper++;
4320  /*
4321  * Carefully! while some contended threads are in lock_func(),
4322  * vm->sleepr is unstable value. we have to avoid both deadlock
4323  * and busy loop.
4324  */
4325  if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
4326  !patrol_thread) {
4327  timeout_ms = 100;
4328  patrol_thread = th;
4329  }
4330 
4331  GVL_UNLOCK_BEGIN();
4332  interrupted = lock_func(th, mutex, (int)timeout_ms);
4333  native_mutex_unlock(&mutex->lock);
4334  GVL_UNLOCK_END();
4335 
4336  if (patrol_thread == th)
4337  patrol_thread = NULL;
4338 
4339  reset_unblock_function(th, &oldubf);
4340 
4341  th->locking_mutex = Qfalse;
4342  if (mutex->th && interrupted == 2) {
4343  rb_check_deadlock(th->vm);
4344  }
4345  if (th->status == THREAD_STOPPED_FOREVER) {
4346  th->status = prev_status;
4347  }
4348  th->vm->sleeper--;
4349 
4350  if (mutex->th == th) mutex_locked(th, self);
4351 
4352  if (interrupted) {
4354  }
4355  }
4356  }
4357  return self;
4358 }
4359 
4360 /*
4361  * call-seq:
4362  * mutex.owned? -> true or false
4363  *
4364  * Returns +true+ if this lock is currently held by current thread.
4365  * <em>This API is experimental, and subject to change.</em>
4366  */
4367 VALUE
4369 {
4370  VALUE owned = Qfalse;
4371  rb_thread_t *th = GET_THREAD();
4372  rb_mutex_t *mutex;
4373 
4374  GetMutexPtr(self, mutex);
4375 
4376  if (mutex->th == th)
4377  owned = Qtrue;
4378 
4379  return owned;
4380 }
4381 
4382 static const char *
4384 {
4385  const char *err = NULL;
4386 
4387  native_mutex_lock(&mutex->lock);
4388 
4389  if (mutex->th == 0) {
4390  err = "Attempt to unlock a mutex which is not locked";
4391  }
4392  else if (mutex->th != th) {
4393  err = "Attempt to unlock a mutex which is locked by another thread";
4394  }
4395  else {
4396  mutex->th = 0;
4397  if (mutex->cond_waiting > 0)
4398  native_cond_signal(&mutex->cond);
4399  }
4400 
4401  native_mutex_unlock(&mutex->lock);
4402 
4403  if (!err) {
4404  rb_mutex_t *volatile *th_mutex = &th->keeping_mutexes;
4405  while (*th_mutex != mutex) {
4406  th_mutex = &(*th_mutex)->next_mutex;
4407  }
4408  *th_mutex = mutex->next_mutex;
4409  mutex->next_mutex = NULL;
4410  }
4411 
4412  return err;
4413 }
4414 
4415 /*
4416  * call-seq:
4417  * mutex.unlock -> self
4418  *
4419  * Releases the lock.
4420  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
4421  */
4422 VALUE
4424 {
4425  const char *err;
4426  rb_mutex_t *mutex;
4427  GetMutexPtr(self, mutex);
4428 
4429  err = rb_mutex_unlock_th(mutex, GET_THREAD());
4430  if (err) rb_raise(rb_eThreadError, "%s", err);
4431 
4432  return self;
4433 }
4434 
4435 static void
4437 {
4438  if (th->keeping_mutexes) {
4440  }
4441  th->keeping_mutexes = NULL;
4442 }
4443 
4444 static void
4446 {
4447  rb_mutex_t *mutex;
4448 
4449  if (!th->locking_mutex) return;
4450 
4451  GetMutexPtr(th->locking_mutex, mutex);
4452  if (mutex->th == th)
4453  rb_mutex_abandon_all(mutex);
4454  th->locking_mutex = Qfalse;
4455 }
4456 
4457 static void
4459 {
4460  rb_mutex_t *mutex;
4461 
4462  while (mutexes) {
4463  mutex = mutexes;
4464  mutexes = mutex->next_mutex;
4465  mutex->th = 0;
4466  mutex->next_mutex = 0;
4467  }
4468 }
4469 
4470 static VALUE
4472 {
4473  sleep_forever(GET_THREAD(), 1, 0); /* permit spurious check */
4474  return Qnil;
4475 }
4476 
4477 static VALUE
4479 {
4480  struct timeval *t = (struct timeval *)time;
4481  sleep_timeval(GET_THREAD(), *t, 0); /* permit spurious check */
4482  return Qnil;
4483 }
4484 
4485 VALUE
4487 {
4488  time_t beg, end;
4489  struct timeval t;
4490 
4491  if (!NIL_P(timeout)) {
4492  t = rb_time_interval(timeout);
4493  }
4494  rb_mutex_unlock(self);
4495  beg = time(0);
4496  if (NIL_P(timeout)) {
4498  }
4499  else {
4501  }
4502  end = time(0) - beg;
4503  return INT2FIX(end);
4504 }
4505 
4506 /*
4507  * call-seq:
4508  * mutex.sleep(timeout = nil) -> number
4509  *
4510  * Releases the lock and sleeps +timeout+ seconds if it is given and
4511  * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
4512  * the current thread.
4513  *
4514  * Note that this method can wakeup without explicit Thread#wakeup call.
4515  * For example, receiving signal and so on.
4516  */
4517 static VALUE
4519 {
4520  VALUE timeout;
4521 
4522  rb_scan_args(argc, argv, "01", &timeout);
4523  return rb_mutex_sleep(self, timeout);
4524 }
4525 
4526 /*
4527  * call-seq:
4528  * mutex.synchronize { ... } -> result of the block
4529  *
4530  * Obtains a lock, runs the block, and releases the lock when the block
4531  * completes. See the example under +Mutex+.
4532  */
4533 
4534 VALUE
4536 {
4537  rb_mutex_lock(mutex);
4538  return rb_ensure(func, arg, rb_mutex_unlock, mutex);
4539 }
4540 
4541 /*
4542  * call-seq:
4543  * mutex.synchronize { ... } -> result of the block
4544  *
4545  * Obtains a lock, runs the block, and releases the lock when the block
4546  * completes. See the example under +Mutex+.
4547  */
4548 static VALUE
4550 {
4551  if (!rb_block_given_p()) {
4552  rb_raise(rb_eThreadError, "must be called with a block");
4553  }
4554 
4555  return rb_mutex_synchronize(self, rb_yield, Qundef);
4556 }
4557 
4558 void rb_mutex_allow_trap(VALUE self, int val)
4559 {
4560  rb_mutex_t *m;
4561  GetMutexPtr(self, m);
4562 
4563  m->allow_trap = val;
4564 }
4565 
4566 /*
4567  * Document-class: ThreadShield
4568  */
4569 static void
4571 {
4572  rb_gc_mark((VALUE)ptr);
4573 }
4574 
4576  "thread_shield",
4577  {thread_shield_mark, 0, 0,},
4578 };
4579 
4580 static VALUE
4582 {
4583  return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4584 }
4585 
4586 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4587 #define THREAD_SHIELD_WAITING_MASK (FL_USER0|FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7|FL_USER8|FL_USER9|FL_USER10|FL_USER11|FL_USER12|FL_USER13|FL_USER14|FL_USER15|FL_USER16|FL_USER17|FL_USER18|FL_USER19)
4588 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4589 #define rb_thread_shield_waiting(b) (int)((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT)
4590 
4591 static inline void
4593 {
4594  unsigned int w = rb_thread_shield_waiting(b);
4595  w++;
4597  rb_raise(rb_eRuntimeError, "waiting count overflow");
4598  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4599  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4600 }
4601 
4602 static inline void
4604 {
4605  unsigned int w = rb_thread_shield_waiting(b);
4606  if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4607  w--;
4608  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4609  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4610 }
4611 
4612 VALUE
4614 {
4615  VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4616  rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4617  return thread_shield;
4618 }
4619 
4620 /*
4621  * Wait a thread shield.
4622  *
4623  * Returns
4624  * true: acquired the thread shield
4625  * false: the thread shield was destroyed and no other threads waiting
4626  * nil: the thread shield was destroyed but still in use
4627  */
4628 VALUE
4630 {
4631  VALUE mutex = GetThreadShieldPtr(self);
4632  rb_mutex_t *m;
4633 
4634  if (!mutex) return Qfalse;
4635  GetMutexPtr(mutex, m);
4636  if (m->th == GET_THREAD()) return Qnil;
4638  rb_mutex_lock(mutex);
4640  if (DATA_PTR(self)) return Qtrue;
4641  rb_mutex_unlock(mutex);
4642  return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4643 }
4644 
4645 /*
4646  * Release a thread shield, and return true if it has waiting threads.
4647  */
4648 VALUE
4650 {
4651  VALUE mutex = GetThreadShieldPtr(self);
4652  rb_mutex_unlock(mutex);
4653  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4654 }
4655 
4656 /*
4657  * Release and destroy a thread shield, and return true if it has waiting threads.
4658  */
4659 VALUE
4661 {
4662  VALUE mutex = GetThreadShieldPtr(self);
4663  DATA_PTR(self) = 0;
4664  rb_mutex_unlock(mutex);
4665  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4666 }
4667 
4668 /* variables for recursive traversals */
4670 
4671 /*
4672  * Returns the current "recursive list" used to detect recursion.
4673  * This list is a hash table, unique for the current thread and for
4674  * the current __callee__.
4675  */
4676 
4677 static VALUE
4679 {
4680  volatile VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
4682  VALUE list;
4683  if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
4684  hash = rb_hash_new();
4685  OBJ_UNTRUST(hash);
4686  rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
4687  list = Qnil;
4688  }
4689  else {
4690  list = rb_hash_aref(hash, sym);
4691  }
4692  if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
4693  list = rb_hash_new();
4694  OBJ_UNTRUST(list);
4695  rb_hash_aset(hash, sym, list);
4696  }
4697  return list;
4698 }
4699 
4700 /*
4701  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
4702  * in the recursion list.
4703  * Assumes the recursion list is valid.
4704  */
4705 
4706 static VALUE
4707 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
4708 {
4709 #if SIZEOF_LONG == SIZEOF_VOIDP
4710  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
4711 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4712  #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
4713  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
4714 #endif
4715 
4716  VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
4717  if (pair_list == Qundef)
4718  return Qfalse;
4719  if (paired_obj_id) {
4720  if (!RB_TYPE_P(pair_list, T_HASH)) {
4721  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
4722  return Qfalse;
4723  }
4724  else {
4725  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
4726  return Qfalse;
4727  }
4728  }
4729  return Qtrue;
4730 }
4731 
4732 /*
4733  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
4734  * For a single obj_id, it sets list[obj_id] to Qtrue.
4735  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
4736  * otherwise list[obj_id] becomes a hash like:
4737  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
4738  * Assumes the recursion list is valid.
4739  */
4740 
4741 static void
4743 {
4744  VALUE pair_list;
4745 
4746  if (!paired_obj) {
4747  rb_hash_aset(list, obj, Qtrue);
4748  }
4749  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
4750  rb_hash_aset(list, obj, paired_obj);
4751  }
4752  else {
4753  if (!RB_TYPE_P(pair_list, T_HASH)){
4754  VALUE other_paired_obj = pair_list;
4755  pair_list = rb_hash_new();
4756  OBJ_UNTRUST(pair_list);
4757  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
4758  rb_hash_aset(list, obj, pair_list);
4759  }
4760  rb_hash_aset(pair_list, paired_obj, Qtrue);
4761  }
4762 }
4763 
4764 /*
4765  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
4766  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
4767  * removed from the hash and no attempt is made to simplify
4768  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
4769  * Assumes the recursion list is valid.
4770  */
4771 
4772 static void
4774 {
4775  if (paired_obj) {
4776  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
4777  if (pair_list == Qundef) {
4778  VALUE symname = rb_inspect(ID2SYM(rb_frame_this_func()));
4779  VALUE thrname = rb_inspect(rb_thread_current());
4780  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list for %s in %s",
4781  StringValuePtr(symname), StringValuePtr(thrname));
4782  }
4783  if (RB_TYPE_P(pair_list, T_HASH)) {
4784  rb_hash_delete(pair_list, paired_obj);
4785  if (!RHASH_EMPTY_P(pair_list)) {
4786  return; /* keep hash until is empty */
4787  }
4788  }
4789  }
4790  rb_hash_delete(list, obj);
4791 }
4792 
4800 };
4801 
4802 static VALUE
4804 {
4805  VALUE result = Qundef;
4806  int state;
4807 
4808  recursive_push(p->list, p->objid, p->pairid);
4809  PUSH_TAG();
4810  if ((state = EXEC_TAG()) == 0) {
4811  result = (*p->func)(p->obj, p->arg, FALSE);
4812  }
4813  POP_TAG();
4814  recursive_pop(p->list, p->objid, p->pairid);
4815  if (state)
4816  JUMP_TAG(state);
4817  return result;
4818 }
4819 
4820 /*
4821  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4822  * current method is called recursively on obj, or on the pair <obj, pairid>
4823  * If outer is 0, then the innermost func will be called with recursive set
4824  * to Qtrue, otherwise the outermost func will be called. In the latter case,
4825  * all inner func are short-circuited by throw.
4826  * Implementation details: the value thrown is the recursive list which is
4827  * proper to the current method and unlikely to be catched anywhere else.
4828  * list[recursive_key] is used as a flag for the outermost call.
4829  */
4830 
4831 static VALUE
4832 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
4833 {
4834  VALUE result = Qundef;
4835  struct exec_recursive_params p;
4836  int outermost;
4838  p.objid = rb_obj_id(obj);
4839  p.obj = obj;
4840  p.pairid = pairid;
4841  p.arg = arg;
4842  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
4843 
4844  if (recursive_check(p.list, p.objid, pairid)) {
4845  if (outer && !outermost) {
4846  rb_throw_obj(p.list, p.list);
4847  }
4848  return (*func)(obj, arg, TRUE);
4849  }
4850  else {
4851  p.func = func;
4852 
4853  if (outermost) {
4854  recursive_push(p.list, ID2SYM(recursive_key), 0);
4855  result = rb_catch_obj(p.list, exec_recursive_i, (VALUE)&p);
4856  recursive_pop(p.list, ID2SYM(recursive_key), 0);
4857  if (result == p.list) {
4858  result = (*func)(obj, arg, TRUE);
4859  }
4860  }
4861  else {
4862  result = exec_recursive_i(0, &p);
4863  }
4864  }
4865  *(volatile struct exec_recursive_params *)&p;
4866  return result;
4867 }
4868 
4869 /*
4870  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4871  * current method is called recursively on obj
4872  */
4873 
4874 VALUE
4876 {
4877  return exec_recursive(func, obj, 0, arg, 0);
4878 }
4879 
4880 /*
4881  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4882  * current method is called recursively on the ordered pair <obj, paired_obj>
4883  */
4884 
4885 VALUE
4887 {
4888  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
4889 }
4890 
4891 /*
4892  * If recursion is detected on the current method and obj, the outermost
4893  * func will be called with (obj, arg, Qtrue). All inner func will be
4894  * short-circuited using throw.
4895  */
4896 
4897 VALUE
4899 {
4900  return exec_recursive(func, obj, 0, arg, 1);
4901 }
4902 
4903 /*
4904  * If recursion is detected on the current method, obj and paired_obj,
4905  * the outermost func will be called with (obj, arg, Qtrue). All inner
4906  * func will be short-circuited using throw.
4907  */
4908 
4909 VALUE
4911 {
4912  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 1);
4913 }
4914 
4915 /*
4916  * call-seq:
4917  * thr.backtrace -> array
4918  *
4919  * Returns the current backtrace of the target thread.
4920  *
4921  */
4922 
4923 static VALUE
4925 {
4926  return vm_thread_backtrace(argc, argv, thval);
4927 }
4928 
4929 /* call-seq:
4930  * thr.backtrace_locations(*args) -> array or nil
4931  *
4932  * Returns the execution stack for the target thread---an array containing
4933  * backtrace location objects.
4934  *
4935  * See Thread::Backtrace::Location for more information.
4936  *
4937  * This method behaves similarly to Kernel#caller_locations except it applies
4938  * to a specific thread.
4939  */
4940 static VALUE
4942 {
4943  return vm_thread_backtrace_locations(argc, argv, thval);
4944 }
4945 
4946 /*
4947  * Document-class: ThreadError
4948  *
4949  * Raised when an invalid operation is attempted on a thread.
4950  *
4951  * For example, when no other thread has been started:
4952  *
4953  * Thread.stop
4954  *
4955  * <em>raises the exception:</em>
4956  *
4957  * ThreadError: stopping only thread
4958  */
4959 
4960 /*
4961  * +Thread+ encapsulates the behavior of a thread of
4962  * execution, including the main thread of the Ruby script.
4963  *
4964  * In the descriptions of the methods in this class, the parameter _sym_
4965  * refers to a symbol, which is either a quoted string or a
4966  * +Symbol+ (such as <code>:name</code>).
4967  */
4968 
4969 void
4971 {
4972 #undef rb_intern
4973 #define rb_intern(str) rb_intern_const(str)
4974 
4975  VALUE cThGroup;
4976  rb_thread_t *th = GET_THREAD();
4977 
4978  sym_never = ID2SYM(rb_intern("never"));
4979  sym_immediate = ID2SYM(rb_intern("immediate"));
4980  sym_on_blocking = ID2SYM(rb_intern("on_blocking"));
4981 
4992  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
4994 #if THREAD_DEBUG < 0
4995  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
4996  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
4997 #endif
5000  rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5001 
5002  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5007  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5018  rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5019  rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5020  rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5021  rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5024  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5025  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5029  rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5030 
5032 
5033  closed_stream_error = rb_exc_new2(rb_eIOError, "stream closed");
5036 
5037  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5039  rb_define_method(cThGroup, "list", thgroup_list, 0);
5040  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5041  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5042  rb_define_method(cThGroup, "add", thgroup_add, 1);
5043 
5044  {
5045  th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
5046  rb_define_const(cThGroup, "Default", th->thgroup);
5047  }
5048 
5049  rb_cMutex = rb_define_class("Mutex", rb_cObject);
5051  rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
5053  rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
5056  rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
5059 
5060  recursive_key = rb_intern("__recursive_key__");
5062 
5063  /* init thread core */
5064  {
5065  /* main thread setting */
5066  {
5067  /* acquire global vm lock */
5068  gvl_init(th->vm);
5069  gvl_acquire(th->vm, th);
5070  native_mutex_initialize(&th->vm->thread_destruct_lock);
5071  native_mutex_initialize(&th->interrupt_lock);
5072 
5076 
5077  th->interrupt_mask = 0;
5078  }
5079  }
5080 
5081  rb_thread_create_timer_thread();
5082 
5083  /* suppress warnings on cygwin, mingw and mswin.*/
5084  (void)native_mutex_trylock;
5085 }
5086 
5087 int
5089 {
5090  rb_thread_t *th = ruby_thread_from_native();
5091 
5092  return th != 0;
5093 }
5094 
5095 static int
5097 {
5098  VALUE thval = key;
5099  rb_thread_t *th;
5100  GetThreadPtr(thval, th);
5101 
5103  *found = 1;
5104  }
5105  else if (th->locking_mutex) {
5106  rb_mutex_t *mutex;
5107  GetMutexPtr(th->locking_mutex, mutex);
5108 
5109  native_mutex_lock(&mutex->lock);
5110  if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
5111  *found = 1;
5112  }
5113  native_mutex_unlock(&mutex->lock);
5114  }
5115 
5116  return (*found) ? ST_STOP : ST_CONTINUE;
5117 }
5118 
5119 #ifdef DEBUG_DEADLOCK_CHECK
5120 static int
5121 debug_i(st_data_t key, st_data_t val, int *found)
5122 {
5123  VALUE thval = key;
5124  rb_thread_t *th;
5125  GetThreadPtr(thval, th);
5126 
5127  printf("th:%p %d %d", th, th->status, th->interrupt_flag);
5128  if (th->locking_mutex) {
5129  rb_mutex_t *mutex;
5130  GetMutexPtr(th->locking_mutex, mutex);
5131 
5132  native_mutex_lock(&mutex->lock);
5133  printf(" %p %d\n", mutex->th, mutex->cond_waiting);
5134  native_mutex_unlock(&mutex->lock);
5135  }
5136  else
5137  puts("");
5138 
5139  return ST_CONTINUE;
5140 }
5141 #endif
5142 
5143 static void
5145 {
5146  int found = 0;
5147 
5148  if (vm_living_thread_num(vm) > vm->sleeper) return;
5149  if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5150  if (patrol_thread && patrol_thread != GET_THREAD()) return;
5151 
5153 
5154  if (!found) {
5155  VALUE argv[2];
5156  argv[0] = rb_eFatal;
5157  argv[1] = rb_str_new2("No live threads left. Deadlock?");
5158 #ifdef DEBUG_DEADLOCK_CHECK
5159  printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
5160  st_foreach(vm->living_threads, debug_i, (st_data_t)0);
5161 #endif
5162  vm->sleeper--;
5163  rb_threadptr_raise(vm->main_thread, 2, argv);
5164  }
5165 }
5166 
5167 static void
5169 {
5171  if (coverage && RBASIC(coverage)->klass == 0) {
5172  long line = rb_sourceline() - 1;
5173  long count;
5174  if (RARRAY_PTR(coverage)[line] == Qnil) {
5175  return;
5176  }
5177  count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
5178  if (POSFIXABLE(count)) {
5179  RARRAY_PTR(coverage)[line] = LONG2FIX(count);
5180  }
5181  }
5182 }
5183 
5184 VALUE
5186 {
5187  return GET_VM()->coverages;
5188 }
5189 
5190 void
5192 {
5193  GET_VM()->coverages = coverages;
5195 }
5196 
5197 void
5199 {
5200  GET_VM()->coverages = Qfalse;
5202 }
5203 
5204 VALUE
5206 {
5207  VALUE interrupt_mask = rb_hash_new();
5208  rb_thread_t *cur_th = GET_THREAD();
5209 
5210  rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5211  rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5212 
5213  return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack);
5214 }
VALUE data
Definition: tcltklib.c:3367
static int vm_living_thread_num(rb_vm_t *vm)
Definition: thread.c:2903
#define RB_TYPE_P(obj, type)
VALUE rb_mutex_locked_p(VALUE mutex)
Definition: thread.c:4182
rb_control_frame_t * cfp
Definition: vm_core.h:500
RARRAY_PTR(q->result)[0]
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:392
rb_thread_list_t * join_list
Definition: vm_core.h:581
static VALUE sym_never
Definition: thread.c:84
#define ALLOC(type)
volatile VALUE tmp
Definition: tcltklib.c:10208
static VALUE thgroup_enclose(VALUE group)
Definition: thread.c:3985
VALUE rb_eStandardError
Definition: error.c:514
static VALUE rb_thread_variable_p(VALUE thread, VALUE key)
Definition: thread.c:3000
#define eKillSignal
Definition: thread.c:94
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:953
unsigned long running_time_us
Definition: vm_core.h:617
#define RUBY_EVENT_THREAD_END
rb_vm_t * vm
Definition: vm_core.h:495
#define rb_fd_init_copy(d, s)
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Definition: error.c:541
ssize_t n
Definition: bigdecimal.c:5676
VALUE sym
Definition: tkutil.c:1298
static VALUE thgroup_add(VALUE group, VALUE thread)
Definition: thread.c:4043
volatile VALUE ary
Definition: tcltklib.c:9712
static int check_deadlock_i(st_data_t key, st_data_t val, int *found)
Definition: thread.c:5096
#define RUBY_EVENT_THREAD_BEGIN
VALUE rb_thread_main(void)
Definition: thread.c:2376
int rb_thread_check_trap_pending()
Definition: thread.c:1105
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1448
VP_EXPORT int
Definition: bigdecimal.c:5071
VALUE rb_ary_pop(VALUE ary)
Definition: array.c:866
VALUE rb_get_coverages(void)
Definition: thread.c:5185
static VALUE rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
Definition: thread.c:1744
static const rb_thread_t * patrol_thread
Definition: thread.c:4282
struct rb_mutex_struct * next_mutex
Definition: thread.c:382
void ruby_thread_stack_overflow(rb_thread_t *th)
Definition: thread.c:2036
void rb_bug(const char *fmt,...)
Definition: error.c:295
VALUE * root_lep
Definition: vm_core.h:526
static VALUE rb_thread_priority(VALUE thread)
Definition: thread.c:3032
int gettimeofday(struct timeval *, struct timezone *)
Definition: win32.c:4017
#define FALSE
Definition: nkf.h:174
#define rb_hash_lookup
Definition: tcltklib.c:268
#define mutex_mark
Definition: thread.c:4105
RUBY_EXTERN VALUE rb_cModule
Definition: ripper.y:1445
static VALUE VALUE th
Definition: tcltklib.c:2947
static int lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
Definition: thread.c:4228
static const char * rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
Definition: thread.c:4383
static void thread_cleanup_func_before_exec(void *th_ptr)
Definition: thread.c:443
static VALUE trap(int sig, sighandler_t func, VALUE command)
Definition: signal.c:916
#define OBJ_INFECT(x, s)
struct rb_thread_struct * running_thread
Definition: vm_core.h:344
VALUE rb_make_exception(int argc, VALUE *argv)
Definition: eval.c:642
void rb_thread_wait_fd(int)
Definition: thread.c:3436
struct timeval * tv
Definition: thread.c:3654
void rb_thread_atfork_before_exec(void)
Definition: thread.c:3871
const char * rb_obj_classname(VALUE)
Definition: variable.c:396
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:921
static VALUE rb_thread_abort_exc_set(VALUE thread, VALUE val)
Definition: thread.c:2475
st_table * local_storage
Definition: vm_core.h:579
double limit
Definition: thread.c:738
VALUE rb_proc_location(VALUE self)
Definition: proc.c:737
void rb_thread_lock_unlock(rb_thread_lock_t *lock)
Definition: thread.c:281
Win32OLEIDispatch * p
Definition: win32ole.c:786
int pending_interrupt_queue_checked
Definition: vm_core.h:551
VALUE rb_eSignal
Definition: error.c:512
static void rb_mutex_abandon_all(rb_mutex_t *mutexes)
Definition: thread.c:4458
struct rb_blocking_region_buffer * rb_thread_blocking_region_begin(void)
Definition: thread.c:1188
rb_fdset_t * read
Definition: thread.c:3651
int count
Definition: encoding.c:51
static int max(int a, int b)
Definition: strftime.c:141
int st_lookup(st_table *, st_data_t, st_data_t *)
VALUE(* func)(VALUE, VALUE, int)
Definition: thread.c:4794
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1501
static VALUE thgroup_enclosed_p(VALUE group)
Definition: thread.c:4005
void rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
Definition: thread.c:1896
VALUE rb_thread_list(void)
Definition: thread.c:2347
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:665
static VALUE thread_join_sleep(VALUE arg)
Definition: thread.c:764
VALUE rb_blocking_function_t(void *)
Definition: ripper.y:836
rb_thread_lock_t interrupt_lock
Definition: vm_core.h:556
pthread_mutex_t rb_thread_lock_t
rb_thread_lock_t thread_destruct_lock
Definition: vm_core.h:341
st_table * st_init_numtable(void)
Definition: st.c:272
static int terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
Definition: thread.c:3858
void rb_thread_blocking_region_end(struct rb_blocking_region_buffer *region)
Definition: thread.c:1197
VALUE proc
Definition: tcltklib.c:2958
static VALUE rb_thread_variables(VALUE thread)
Definition: thread.c:2971
struct rb_thread_struct * th
Definition: vm_core.h:489
VALUE rb_ary_delete_at(VALUE ary, long pos)
Definition: array.c:2801
static VALUE recursive_list_access(void)
Definition: thread.c:4678
static int VALUE table
Definition: tcltklib.c:10137
SSL_METHOD *(* func)(void)
Definition: ossl_ssl.c:108
rb_unblock_function_t * func
Definition: vm_core.h:480
void rb_secure(int)
Definition: safe.c:79
static void update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
Definition: thread.c:5168
static VALUE thread_s_new(int argc, VALUE *argv, VALUE klass)
Definition: thread.c:667
ssize_t i
Definition: bigdecimal.c:5676
void rb_error_frozen(const char *what)
Definition: error.c:1980
#define T_NODE
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:552
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:916
VALUE rb_mod_ancestors(VALUE mod)
Definition: class.c:924
static VALUE mutex_initialize(VALUE self)
Definition: thread.c:4164
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1322
VALUE coverage
Definition: vm_core.h:219
VALUE rb_hash_lookup2(VALUE, VALUE, VALUE)
Definition: hash.c:581
static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
Definition: thread.c:4436
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2012
VALUE rb_iv_get(VALUE, const char *)
Definition: variable.c:2586
struct rb_thread_struct volatile * th
Definition: thread.c:380
static struct timeval double2timeval(double d)
Definition: thread.c:923
ID rb_frame_this_func(void)
Definition: eval.c:902
#define RHASH(obj)
int status
Definition: tcltklib.c:2196
#define sysstack_error
Definition: vm_core.h:866
SOCKET rb_w32_get_osfhandle(int)
Definition: win32.c:958
VALUE rb_eTypeError
Definition: error.c:516
#define OBJ_FREEZE(x)
#define TH_JUMP_TAG(th, st)
Definition: eval_intern.h:144
static VALUE mutex_alloc(VALUE klass)
Definition: thread.c:4146
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
param thread
Definition: tcltklib.c:4132
static const rb_data_type_t mutex_data_type
Definition: thread.c:4129
VALUE exc
Definition: tcltklib.c:3095
#define RUBY_UBF_IO
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:822
VALUE rb_mutex_lock(VALUE mutex)
Definition: thread.c:4292
st_table * living_threads
Definition: vm_core.h:346
void rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:728
static int handle_interrupt_arg_check_i(VALUE key, VALUE val)
Definition: thread.c:1628
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:465
#define RHASH_TBL(h)
#define RSTRING_PTR(str)
#define CLASS_OF(v)
NIL_P(eventloop_thread)
Definition: tcltklib.c:4067
static VALUE rb_thread_safe_level(VALUE thread)
Definition: thread.c:2636
static VALUE rb_thread_aset(VALUE self, VALUE id, VALUE val)
Definition: thread.c:2786
#define xfree
VALUE rb_exec_recursive_paired_outer(VALUE(*)(VALUE, VALUE, int), VALUE, VALUE, VALUE)
Definition: thread.c:4910
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1500
#define OBJ_ID_EQL(obj_id, other)
register C_block * tp
Definition: crypt.c:311
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:1788
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2071
static VALUE rb_mutex_sleep_forever(VALUE time)
Definition: thread.c:4471
static VALUE rb_thread_abort_exc(VALUE thread)
Definition: thread.c:2457
static void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
Definition: thread.c:1174
#define T_HASH
return Qtrue
Definition: tcltklib.c:9609
VALUE rb_obj_id(VALUE)
Definition: gc.c:1690
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:3208
static void clear_coverage(void)
Definition: thread.c:3808
#define POSFIXABLE(f)
#define TH_EXEC_TAG()
Definition: eval_intern.h:139
static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check)
Definition: thread.c:943
RUBY_EXTERN VALUE rb_eIOError
Definition: ripper.y:1476
VALUE rb_eSecurityError
Definition: error.c:525
#define RUBY_VM_SET_TRAP_INTERRUPT(th)
Definition: vm_core.h:923
static size_t thgroup_memsize(const void *ptr)
Definition: thread.c:3882
rb_thread_cond_t cond
Definition: thread.c:379
static void rb_mutex_abandon_locking_mutex(rb_thread_t *th)
Definition: thread.c:4445
static VALUE sym_immediate
Definition: thread.c:82
static int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region, rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
Definition: thread.c:1156
static void thread_shield_mark(void *ptr)
Definition: thread.c:4570
r
Definition: bigdecimal.c:1210
#define vsnprintf
#define TAG_RAISE
Definition: eval_intern.h:168
#define PUSH_TAG()
Definition: eval_intern.h:136
#define rb_str_new2
VALUE rb_catch_obj(VALUE, VALUE(*)(ANYARGS), VALUE)
long tv_sec
Definition: ossl_asn1.c:17
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, const rb_block_t *blockptr)
Definition: vm.c:780
static size_t mutex_memsize(const void *ptr)
Definition: thread.c:4124
static volatile int system_working
Definition: thread.c:96
int state
Definition: tcltklib.c:1461
static VALUE thread_join(rb_thread_t *target_th, double delay)
Definition: thread.c:790
static VALUE remove_from_join_list(VALUE arg)
Definition: thread.c:743
static int rb_threadptr_dead(rb_thread_t *th)
Definition: thread.c:2530
static VALUE rb_thread_alive_p(VALUE thread)
Definition: thread.c:2588
rb_fdset_t * write
Definition: thread.c:3652
#define ID2SYM(x)
VALUE VALUE args
Definition: tcltklib.c:2560
#define T_OBJECT
#define rb_fd_rcopy(d, s)
Definition: thread.c:3335
static VALUE exec_recursive_i(VALUE tag, struct exec_recursive_params *p)
Definition: thread.c:4803
void rb_thread_start_timer_thread(void)
Definition: thread.c:3787
static rb_fdset_t * init_set_fd(int fd, rb_fdset_t *fds)
Definition: thread.c:3638
void rb_thread_terminate_all(void)
Definition: thread.c:409
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Definition: thread.c:4486
#define rb_fd_term(f)
#define rb_fd_max(f)
VALUE rb_cMutex
Definition: thread.c:79
d
Definition: strlcat.c:58
void rb_hash_foreach(VALUE, int(*)(ANYARGS), VALUE)
Definition: hash.c:200
unsigned long rb_event_flag_t
Definition: ripper.y:1603
int allow_trap
Definition: thread.c:383
const char * fmt
Definition: tcltklib.c:841
VALUE rb_hash_delete(VALUE, VALUE)
Definition: hash.c:869
#define NOINLINE(x)
Definition: ruby.h:37
#define RB_WAITFD_OUT
Definition: io.h:49
VALUE thgroup_default
Definition: vm_core.h:347
time_t tv_sec
Definition: ripper.y:47
static VALUE rb_thread_stop_p(VALUE thread)
Definition: thread.c:2611
static void thread_cleanup_func(void *th_ptr, int atfork)
Definition: thread.c:454
static double timeofday(void)
Definition: thread.c:1039
#define TAG_FATAL
Definition: eval_intern.h:170
VALUE vm_thread_backtrace(int argc, VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:861
static VALUE rb_thread_s_abort_exc_set(VALUE self, VALUE val)
Definition: thread.c:2439
VALUE(* first_func)(ANYARGS)
Definition: vm_core.h:585
#define MEMZERO(p, type, n)
static VALUE rb_thread_s_main(VALUE klass)
Definition: thread.c:2389
void rb_exc_raise(VALUE mesg)
Definition: eval.c:527
static void rb_thread_wait_fd_rw(int fd, int read)
Definition: thread.c:3416
static VALUE sym_on_blocking
Definition: thread.c:83
unsigned long st_data_t
Definition: ripper.y:35
VALUE * stack
Definition: vm_core.h:498
static void rb_thread_schedule_limits(unsigned long limits_us)
Definition: thread.c:1126
int st_delete(st_table *, st_data_t *, st_data_t *)
static void rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
Definition: thread.c:330
static VALUE thgroup_s_alloc(VALUE klass)
Definition: thread.c:3911
VALUE hash
Definition: tkutil.c:267
#define RUBY_VM_INTERRUPTED_ANY(th)
Definition: vm_core.h:925
#define TH_POP_TAG()
Definition: eval_intern.h:129
static int thread_list_i(st_data_t key, st_data_t val, void *data)
Definition: thread.c:2307
static VALUE coverage(VALUE fname, int n)
Definition: ripper.c:11805
#define closed_stream_error
Definition: thread.c:98
static const char * thread_status_name(rb_thread_t *th)
Definition: thread.c:2511
rb_thread_t * target
Definition: thread.c:737
memset(y->frac+ix+1, 0,(y->Prec-(ix+1))*sizeof(BDIGIT))
VALUE rb_block_proc(void)
Definition: proc.c:458
#define rb_fd_select(n, rfds, wfds, efds, timeout)
#define RUBY_THREAD_PRIORITY_MAX
Definition: thread.c:68
BDIGIT m
Definition: bigdecimal.c:5106
static VALUE rb_thread_priority_set(VALUE thread, VALUE prio)
Definition: thread.c:3067
static int do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:3339
#define FIXNUM_P(f)
return Qfalse
Definition: tcltklib.c:6778
static void sleep_for_polling(rb_thread_t *th)
Definition: thread.c:1062
#define RUBY_EVENT_COVERAGE
#define TypedData_Get_Struct(obj, type, data_type, sval)
int rb_block_given_p(void)
Definition: eval.c:672
#define EXEC_TAG()
Definition: eval_intern.h:141
int rb_threadptr_set_raised(rb_thread_t *th)
Definition: thread.c:2048
#define RARRAY_LEN(a)
VALUE locking_mutex
Definition: vm_core.h:558
#define Qnil
Definition: tcltklib.c:1895
#define StringValuePtr(v)
static const rb_data_type_t thread_shield_data_type
Definition: thread.c:4575
#define val
Definition: tcltklib.c:1948
VALUE * rb_vm_ep_local_ep(VALUE *ep)
Definition: vm.c:36
long tv_usec
Definition: ossl_asn1.c:18
VALUE rb_eRuntimeError
Definition: error.c:515
void rb_gc_finalize_deferred(void)
Definition: gc.c:1457
rb_thread_lock_t lock
Definition: thread.c:378
static VALUE rb_thread_inspect(VALUE thread)
Definition: thread.c:2652
#define RB_WAITFD_PRI
Definition: io.h:48
static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *)
Definition: thread.c:1992
static VALUE char * str
Definition: tcltklib.c:3546
fd_set rb_fdset_t
Definition: ripper.y:326
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:478
VALUE rb_ary_new(void)
Definition: array.c:424
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
Definition: thread.c:1336
void * blocking_region_buffer
Definition: vm_core.h:536
unsigned long ID
Definition: ripper.y:105
va_end(args)
static VALUE exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
Definition: thread.c:4832
static VALUE thread_create_core(VALUE thval, VALUE args, VALUE(*fn)(ANYARGS))
Definition: thread.c:608
void rb_gc_mark(VALUE)
Definition: gc.c:2600
void Init_Thread(void)
Definition: thread.c:4970
#define JUMP_TAG(st)
Definition: eval_intern.h:148
rb_iseq_t * iseq
Definition: vm_core.h:428
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2204
static int rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
Definition: thread.c:1559
long tv_nsec
Definition: ripper.y:48
void rb_thread_stop_timer_thread(int close_anyway)
Definition: thread.c:3773
#define UNLIKELY(x)
Definition: vm_core.h:115
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:503
#define PRIxVALUE
static VALUE VALUE obj
Definition: tcltklib.c:3157
static void rb_threadptr_ready(rb_thread_t *th)
Definition: thread.c:1986
int enclosed
Definition: thread.c:3877
#define INT2FIX(i)
#define FIX2LONG(x)
#define ANYARGS
#define thread_debug
Definition: thread.c:211
static int rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th)
Definition: thread.c:1553
int rb_thread_alone(void)
Definition: thread.c:2909
double rb_num2dbl(VALUE)
Definition: object.c:2742
#define rb_fd_zero(f)
void rb_thread_recycle_stack_release(VALUE *)
Definition: vm.c:1835
VALUE rb_thread_shield_wait(VALUE self)
Definition: thread.c:4629
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:3732
#define xmalloc
#define xrealloc
int thread_abort_on_exception
Definition: vm_core.h:350
rb_thread_status
Definition: vm_core.h:455
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:476
VALUE rb_mutex_owned_p(VALUE self)
Definition: thread.c:4368
static VALUE rb_thread_exit(void)
Definition: thread.c:2200
#define TypedData_Wrap_Struct(klass, data_type, sval)
void rb_exit(int status)
Definition: process.c:3567
#define rb_fd_ptr(f)
VALUE rb_thread_shield_release(VALUE self)
Definition: thread.c:4649
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4308
void rb_unblock_function_t(void *)
Definition: ripper.y:835
volatile int sleeper
Definition: vm_core.h:352
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:261
int err
Definition: win32.c:87
void rb_thread_check_ints(void)
Definition: thread.c:1095
#define EXIT_FAILURE
Definition: eval_intern.h:24
#define POP_TAG()
Definition: eval_intern.h:137
VALUE * machine_stack_start
Definition: vm_core.h:588
#define GVL_UNLOCK_BEGIN()
Definition: thread.c:137
static const rb_data_type_t thgroup_data_type
Definition: thread.c:3887
void rb_throw_obj(VALUE tag, VALUE value)
Definition: vm_eval.c:1720
static VALUE thread_s_current(VALUE klass)
Definition: thread.c:2370
#define FD_SET(fd, set)
Definition: win32.h:594
VALUE rb_cThreadShield
Definition: thread.c:80
static int VALUE key
Definition: tkutil.c:265
static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
Definition: thread.c:1056
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
#define TIMET_MAX
Definition: thread.c:76
#define ATOMIC_CAS(var, oldval, newval)
Definition: ruby_atomic.h:132
void rb_thread_polling(void)
Definition: thread.c:1078
VALUE read
Definition: io.c:8257
#define rb_fd_set(n, f)
VALUE * argv
Definition: tcltklib.c:1970
#define GetMutexPtr(obj, tobj)
Definition: thread.c:4102
VALUE rb_hash_aset(VALUE, VALUE, VALUE)
static VALUE rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:4941
VALUE rb_yield(VALUE)
Definition: vm_eval.c:933
int rb_thread_select(int max, fd_set *read, fd_set *write, fd_set *except, struct timeval *timeout)
Definition: thread.c:3449
memcpy(buf+1, str, len)
VALUE rb_thread_shield_destroy(VALUE self)
Definition: thread.c:4660
#define RTEST(v)
int st_foreach(st_table *, int(*)(ANYARGS), st_data_t)
Definition: st.c:1006
struct rb_unblock_callback oldubf
Definition: thread.c:112
#define rb_thread_set_current(th)
Definition: vm_core.h:901
int errno
#define TRUE
Definition: nkf.h:175
q result
Definition: tcltklib.c:7069
void rb_thread_atfork(void)
Definition: thread.c:3848
VALUE rb_thread_current(void)
Definition: thread.c:2355
static int thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
Definition: thread.c:3929
#define EXIT_SUCCESS
Definition: error.c:31
VALUE special_exceptions[ruby_special_error_count]
Definition: vm_core.h:357
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:559
VALUE rb_sprintf(const char *format,...)
Definition: sprintf.c:1275
volatile VALUE value
Definition: tcltklib.c:9441
VALUE rb_mutex_trylock(VALUE mutex)
Definition: thread.c:4209
static int set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg, struct rb_unblock_callback *old, int fail_if_interrupted)
Definition: thread.c:293
#define const
Definition: strftime.c:102
register char * s
Definition: os2.c:56
static int thread_fd_close_i(st_data_t key, st_data_t val, st_data_t data)
Definition: thread.c:2068
#define rb_fd_copy(d, s, n)
void ruby_xfree(void *x)
Definition: gc.c:3653
VP_EXPORT void
Definition: bigdecimal.c:5104
VALUE rb_class_inherited_p(VALUE, VALUE)
Definition: object.c:1503
#define DELAY_INFTY
Definition: thread.c:734
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:1574
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread.c:4135
VALUE retval
Definition: tcltklib.c:7829
#define RUBY_VM_SET_TIMER_INTERRUPT(th)
Definition: vm_core.h:920
union select_args::@140 as
static VALUE thread_initialize(VALUE thread, VALUE args)
Definition: thread.c:702
handle_interrupt_timing
Definition: thread.c:1506
static void rb_check_deadlock(rb_vm_t *vm)
Definition: thread.c:5144
static VALUE rb_mutex_synchronize_m(VALUE self, VALUE args)
Definition: thread.c:4549
#define GVL_UNLOCK_END()
Definition: thread.c:142
VALUE rb_exc_new2(VALUE etype, const char *s)
Definition: error.c:547
VALUE rb_thread_run(VALUE)
Definition: thread.c:2269
#define OBJ_FROZEN(x)
static VALUE thread_shield_alloc(VALUE klass)
Definition: thread.c:4581
void rb_mutex_allow_trap(VALUE self, int val)
Definition: thread.c:4558
VALUE group
Definition: thread.c:3878
VALUE vm_thread_backtrace_locations(int argc, VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:867
VALUE rb_thread_kill(VALUE)
Definition: thread.c:2138
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Definition: thread.c:4535
#define THREAD_SHIELD_WAITING_MASK
Definition: thread.c:4587
int argc
Definition: tcltklib.c:1969
#define SAVE_ROOT_JMPBUF(th, stmt)
Definition: eval_intern.h:112
#define RUBY_TYPED_DEFAULT_FREE
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:194
static int keys_i(VALUE key, VALUE value, VALUE ary)
Definition: thread.c:2947
#define UNINITIALIZED_VAR(x)
Definition: vm_core.h:121
struct rb_thread_struct * main_thread
Definition: vm_core.h:343
VALUE rb_thread_local_aref(VALUE, ID)
Definition: thread.c:2668
static int clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
Definition: thread.c:3794
int error
Definition: thread.c:3649
static VALUE rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
Definition: thread.c:1880
VALUE first_proc
Definition: vm_core.h:583
int rb_threadptr_reset_raised(rb_thread_t *th)
Definition: thread.c:2058
VALUE rb_thread_wakeup_alive(VALUE)
Definition: thread.c:2234
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1494
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
Definition: thread.c:3692
void rb_gc_set_stack_end(VALUE **stack_end_p)
Definition: thread.c:3719
static void rb_thread_shield_waiting_dec(VALUE b)
Definition: thread.c:4603
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:122
VALUE rb_exec_recursive_outer(VALUE(*)(VALUE, VALUE, int), VALUE, VALUE)
Definition: thread.c:4898
static VALUE rb_thread_variable_get(VALUE thread, VALUE id)
Definition: thread.c:2822
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
Definition: eval.c:804
static VALUE thread_value(VALUE self)
Definition: thread.c:910
static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
Definition: thread.c:322
rb_atomic_t interrupt_flag
Definition: vm_core.h:554
RUBY_EXTERN int isinf(double)
Definition: isinf.c:56
static void timer_thread_function(void *)
Definition: thread.c:3742
#define rb_fd_isset(n, f)
int rb_sourceline(void)
Definition: vm.c:884
void rb_thread_schedule(void)
Definition: thread.c:1143
void rb_sys_fail(const char *mesg)
Definition: error.c:1907
Real * b
Definition: bigdecimal.c:1196
return ptr
Definition: tcltklib.c:784
#define rb_fd_resize(n, f)
static VALUE rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:4924
int abort_on_exception
Definition: vm_core.h:613
gz end
Definition: zlib.c:2270
static VALUE rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
Definition: thread.c:1795
ID rb_to_id(VALUE)
Definition: string.c:8169
enum rb_thread_status status
Definition: vm_core.h:531
static void st_delete_wrap(st_table *table, st_data_t key)
Definition: thread.c:101
void rb_thread_sleep_forever(void)
Definition: thread.c:1025
static VALUE thread_s_pass(VALUE klass)
Definition: thread.c:1469
static VALUE thread_join_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:882
const char * rb_class2name(VALUE)
Definition: variable.c:389
VALUE rb_thread_wakeup(VALUE)
Definition: thread.c:2225
#define thread_start_func_2(th, st, rst)
Definition: thread.c:215
VALUE rb_mutex_unlock(VALUE mutex)
Definition: thread.c:4423
static void rb_thread_sleep_deadly(void)
Definition: thread.c:1032
arg
Definition: ripper.y:1317
enum rb_thread_status prev_status
Definition: thread.c:111
static VALUE mutex_sleep(int argc, VALUE *argv, VALUE self)
Definition: thread.c:4518
VALUE * machine_stack_end
Definition: vm_core.h:589
VALUE src
Definition: tcltklib.c:7952
VALUE first_args
Definition: vm_core.h:584
struct timeval rb_time_interval(VALUE num)
Definition: time.c:2496
#define THREAD_SHIELD_WAITING_SHIFT
Definition: thread.c:4588
static void rb_threadptr_to_kill(rb_thread_t *th)
Definition: thread.c:1886
int size
Definition: encoding.c:52
void rb_reset_coverages(void)
Definition: thread.c:5198
#define f
VALUE rb_mutex_new(void)
Definition: thread.c:4170
void rb_thread_wait_for(struct timeval)
Definition: thread.c:1071
void rb_thread_lock_destroy(rb_thread_lock_t *lock)
Definition: thread.c:287
static VALUE thgroup_list(VALUE group)
Definition: thread.c:3954
VALUE root_svar
Definition: vm_core.h:527
rb_block_t block
Definition: vm_core.h:669
#define Qundef
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Definition: object.c:593
if(RB_TYPE_P(r, T_FLOAT))
Definition: bigdecimal.c:1200
#define RUBY_EVENT_SWITCH
unsigned long interrupt_mask
Definition: vm_core.h:555
#define rb_fd_clr(n, f)
#define RUBY_THREAD_PRIORITY_MIN
Definition: thread.c:69
int t
Definition: ripper.c:14654
void rb_thread_sleep(int)
Definition: thread.c:1120
VALUE rb_thread_group(VALUE thread)
Definition: thread.c:2497
struct rb_unblock_callback unblock
Definition: vm_core.h:557
static VALUE rb_thread_aref(VALUE thread, VALUE id)
Definition: thread.c:2747
VALUE rb_thread_shield_new(void)
Definition: thread.c:4613
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:152
void rb_thread_execute_interrupts(VALUE th)
Definition: thread.c:1978
DATA_PTR(self)
VALUE rb_exec_recursive_paired(VALUE(*)(VALUE, VALUE, int), VALUE, VALUE, VALUE)
Definition: thread.c:4886
ruby_debug
Definition: tcltklib.c:5816
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
Definition: thread.c:129
#define TypedData_Make_Struct(klass, type, data_type, sval)
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1406
void rb_thread_reset_timer_thread(void)
Definition: thread.c:3781
RUBY_EXTERN VALUE rb_cObject
Definition: ripper.y:1426
st_data_t st_index_t
Definition: ripper.y:63
int rb_signal_buff_size(void)
Definition: signal.c:563
static void rb_thread_shield_waiting_inc(VALUE b)
Definition: thread.c:4592
#define LONG2FIX(i)
int rb_thread_fd_select(int, rb_fdset_t *, rb_fdset_t *, rb_fdset_t *, struct timeval *)
Definition: thread.c:3493
#define RBASIC(obj)
#define FD_CLR(f, s)
Definition: win32.h:612
VALUE root_fiber
Definition: vm_core.h:608
rb_thread_t * waiting
Definition: thread.c:737
klass
Definition: tcltklib.c:3503
#define INT2NUM(x)
struct rb_encoding_entry * list
Definition: encoding.c:50
#define ETIMEDOUT
Definition: win32.h:549
static VALUE rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
Definition: thread.c:1572
static void recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4742
static VALUE thread_start(VALUE klass, VALUE args)
Definition: thread.c:695
static VALUE rb_mutex_wait_for(VALUE time)
Definition: thread.c:4478
VALUE rb_ary_dup(VALUE ary)
Definition: array.c:1766
int st_insert(st_table *, st_data_t, st_data_t)
void rb_thread_fd_close(int)
Definition: thread.c:2083
#define GetThreadPtr(obj, ptr)
Definition: vm_core.h:452
VALUE rb_exec_recursive(VALUE(*)(VALUE, VALUE, int), VALUE, VALUE)
Definition: thread.c:4875
int rb_atomic_t
Definition: ruby_atomic.h:120
static VALUE thread_raise_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:2110
#define OBJ_UNTRUST(x)
#define rb_fd_dup(d, s)
#define rb_safe_level()
Definition: tcltklib.c:94
#define rb_thread_shield_waiting(b)
Definition: thread.c:4589
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, klass_, data_)
Definition: vm_core.h:998
static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check)
Definition: thread.c:983
void st_clear(st_table *)
Definition: st.c:308
#define PRIdVALUE
VALUE rb_thread_stop(void)
Definition: thread.c:2296
#define NUM2INT(x)
VALUE rb_hash_new(void)
Definition: hash.c:234
VALUE rb_obj_alloc(VALUE)
Definition: object.c:1740
void rb_threadptr_trap_interrupt(rb_thread_t *th)
Definition: thread.c:353
VALUE rb_eFatal
Definition: error.c:513
int forever
Definition: thread.c:739
#define rb_fd_init(f)
#define ruby_native_thread_p()
Definition: tcltklib.c:82
static int terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
Definition: thread.c:359
struct rb_thread_list_struct * next
Definition: vm_core.h:488
#define RUBY_VM_INTERRUPTED(th)
Definition: vm_core.h:924
BDIGIT e
Definition: bigdecimal.c:5106
static VALUE rb_thread_s_abort_exc(void)
Definition: thread.c:2408
static void recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4773
VALUE rb_hash_aref(VALUE, VALUE)
Definition: hash.c:570
unsigned long VALUE
Definition: ripper.y:104
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:273
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
Definition: thread.c:1609
VALUE rb_uninterruptible(VALUE(*b_proc)(ANYARGS), VALUE data)
Definition: thread.c:5205
rb_fdset_t * except
Definition: thread.c:3653
static void mutex_locked(rb_thread_t *th, VALUE self)
Definition: thread.c:4190
VALUE rb_thread_create(VALUE(*)(ANYARGS), void *)
Definition: thread.c:727
#define FD_ISSET(f, s)
Definition: win32.h:615
VALUE rb_thread_local_aset(VALUE, ID, VALUE)
Definition: thread.c:2753
static VALUE rb_thread_keys(VALUE self)
Definition: thread.c:2934
#define GetThreadShieldPtr(obj)
Definition: thread.c:4586
#define RB_WAITFD_IN
Definition: io.h:47
#define RHASH_EMPTY_P(h)
VALUE pending_interrupt_queue
Definition: vm_core.h:550
#define OBJ_TAINT(x)
VALUE write
Definition: io.c:8257
static void getclockofday(struct timeval *tp)
Definition: thread.c:967
static VALUE select_single_cleanup(VALUE ptr)
Definition: thread.c:3680
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:975
#define rb_intern(str)
static VALUE select_single(VALUE ptr)
Definition: thread.c:3658
BDIGIT v
Definition: bigdecimal.c:5677
struct rb_mutex_struct rb_mutex_t
#define eTerminateSignal
Definition: thread.c:95
int cond_waiting
Definition: thread.c:381
VALUE except
Definition: io.c:8257
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Definition: vm_trace.c:135
VALUE rb_eSystemExit
Definition: error.c:510
#define NULL
Definition: _sdbm.c:103
int rb_get_next_signal(void)
Definition: signal.c:593
VALUE time
Definition: tcltklib.c:1865
#define RUBY_UBF_PROCESS
static int thread_keys_i(ID key, VALUE value, VALUE ary)
Definition: thread.c:2896
static void * call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
Definition: thread.c:1208
#define GET_THROWOBJ_STATE(obj)
Definition: eval_intern.h:182
static VALUE rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
Definition: thread.c:2847
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:347
st_index_t num_entries
Definition: ripper.y:93
VALUE rb_thread_blocking_region(rb_blocking_function_t *func, void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1369
void rb_obj_call_init(VALUE obj, int argc, VALUE *argv)
Definition: eval.c:1220
static void mutex_free(void *ptr)
Definition: thread.c:4108
static rb_thread_t * GET_THREAD(void)
Definition: vm_core.h:888
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1348
int retry
Definition: tcltklib.c:10150
int rb_thread_fd_writable(int)
Definition: thread.c:3442
char * dst
Definition: tcltklib.c:9867
#define GET_THROWOBJ_VAL(obj)
Definition: eval_intern.h:180
void rb_set_coverages(VALUE coverages)
Definition: thread.c:5191
int select(int num_fds, fd_set *in_fds, fd_set *out_fds, fd_set *ex_fds, struct timeval *timeout)
VALUE rb_eThreadError
Definition: eval.c:690
static VALUE rb_thread_key_p(VALUE self, VALUE key)
Definition: thread.c:2879
VALUE rb_eArgError
Definition: error.c:517
#define RUBY_VM_CHECK_INTS_BLOCKING(th)
Definition: vm_core.h:942
VALUE rb_convert_type(VALUE, int, const char *, const char *)
Definition: object.c:2400
static VALUE rb_thread_s_kill(VALUE obj, VALUE th)
Definition: thread.c:2183
static VALUE recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
Definition: thread.c:4707
RUBY_EXTERN VALUE rb_cThread
Definition: ripper.y:1459
void rb_reset_random_seed(void)
Definition: random.c:1443
rb_thread_id_t thread_id
Definition: vm_core.h:530
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2022
static void lock_interrupt(void *ptr)
Definition: thread.c:4268
int dummy
Definition: tcltklib.c:4482
static void rb_thread_atfork_internal(int(*atfork)(st_data_t, st_data_t, st_data_t))
Definition: thread.c:3817
VALUE * ep
Definition: vm_core.h:445
struct timeval rb_time_timeval(VALUE)
Definition: time.c:2502
VALUE rb_inspect(VALUE)
Definition: object.c:411
int rb_thread_interrupted(VALUE thval)
Definition: thread.c:1112
static enum handle_interrupt_timing rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
Definition: thread.c:1514
static int terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)
Definition: thread.c:3833
#define GET_VM()
Definition: vm_core.h:881
static ID recursive_key
Definition: thread.c:4669