Ruby  2.0.0p648(2015-12-16revision53162)
thread.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author: usa $
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23 ------------------------------------------------------------------------
24 
25  model 2:
26  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
27  When thread scheduling, running thread release GVL. If running thread
28  try blocking operation, this thread must release GVL and another
29  thread can continue this flow. After blocking operation, thread
30  must check interrupt (RUBY_VM_CHECK_INTS).
31 
32  Every VM can run parallel.
33 
34  Ruby threads are scheduled by OS thread scheduler.
35 
36 ------------------------------------------------------------------------
37 
38  model 3:
39  Every threads run concurrent or parallel and to access shared object
40  exclusive access control is needed. For example, to access String
41  object or Array object, fine grain lock must be locked every time.
42  */
43 
44 
45 /*
46  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
47  * 2.15 or later and set _FORTIFY_SOURCE > 0.
48  * However, the implementation is wrong. Even though Linux's select(2)
49  * support large fd size (>FD_SETSIZE), it wrongly assume fd is always
50  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
51  * it doesn't work correctly and makes program abort. Therefore we need to
52  * disable FORTY_SOURCE until glibc fixes it.
53  */
54 #undef _FORTIFY_SOURCE
55 #undef __USE_FORTIFY_LEVEL
56 #define __USE_FORTIFY_LEVEL 0
57 
58 /* for model 2 */
59 
60 #include "eval_intern.h"
61 #include "gc.h"
62 #include "internal.h"
63 #include "ruby/io.h"
64 #include "ruby/thread.h"
65 
66 #ifndef USE_NATIVE_THREAD_PRIORITY
67 #define USE_NATIVE_THREAD_PRIORITY 0
68 #define RUBY_THREAD_PRIORITY_MAX 3
69 #define RUBY_THREAD_PRIORITY_MIN -3
70 #endif
71 
72 #ifndef THREAD_DEBUG
73 #define THREAD_DEBUG 0
74 #endif
75 
76 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
77 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
78 
81 
85 
86 static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check);
87 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check);
88 static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check);
89 static double timeofday(void);
90 static int rb_threadptr_dead(rb_thread_t *th);
91 static void rb_check_deadlock(rb_vm_t *vm);
93 
94 #define eKillSignal INT2FIX(0)
95 #define eTerminateSignal INT2FIX(1)
96 static volatile int system_working = 1;
97 
98 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
99 
100 inline static void
102 {
103  st_delete(table, &key, 0);
104 }
105 
106 /********************************************************************************/
107 
108 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
109 
113 };
114 
116  struct rb_unblock_callback *old, int fail_if_interrupted);
117 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
118 
119 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
120  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
121 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
122 
123 #ifdef __ia64
124 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) \
125  do{(th)->machine_register_stack_end = rb_ia64_bsp();}while(0)
126 #else
127 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th)
128 #endif
129 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
130  do { \
131  FLUSH_REGISTER_WINDOWS; \
132  RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \
133  setjmp((th)->machine_regs); \
134  SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
135  } while (0)
136 
137 #define GVL_UNLOCK_BEGIN() do { \
138  rb_thread_t *_th_stored = GET_THREAD(); \
139  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
140  gvl_release(_th_stored->vm);
141 
142 #define GVL_UNLOCK_END() \
143  gvl_acquire(_th_stored->vm, _th_stored); \
144  rb_thread_set_current(_th_stored); \
145 } while(0)
146 
147 #ifdef __GNUC__
148 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
149 #else
150 #define only_if_constant(expr, notconst) notconst
151 #endif
152 #define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \
153  rb_thread_t *__th = GET_THREAD(); \
154  struct rb_blocking_region_buffer __region; \
155  if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
156  /* always return true unless fail_if_interrupted */ \
157  !only_if_constant(fail_if_interrupted, TRUE)) { \
158  exec; \
159  blocking_region_end(__th, &__region); \
160  }; \
161 } while(0)
162 
163 #if THREAD_DEBUG
164 #ifdef HAVE_VA_ARGS_MACRO
165 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
166 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
167 #define POSITION_FORMAT "%s:%d:"
168 #define POSITION_ARGS ,file, line
169 #else
170 void rb_thread_debug(const char *fmt, ...);
171 #define thread_debug rb_thread_debug
172 #define POSITION_FORMAT
173 #define POSITION_ARGS
174 #endif
175 
176 # if THREAD_DEBUG < 0
177 static int rb_thread_debug_enabled;
178 
179 /*
180  * call-seq:
181  * Thread.DEBUG -> num
182  *
183  * Returns the thread debug level. Available only if compiled with
184  * THREAD_DEBUG=-1.
185  */
186 
187 static VALUE
188 rb_thread_s_debug(void)
189 {
190  return INT2NUM(rb_thread_debug_enabled);
191 }
192 
193 /*
194  * call-seq:
195  * Thread.DEBUG = num
196  *
197  * Sets the thread debug level. Available only if compiled with
198  * THREAD_DEBUG=-1.
199  */
200 
201 static VALUE
202 rb_thread_s_debug_set(VALUE self, VALUE val)
203 {
204  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
205  return val;
206 }
207 # else
208 # define rb_thread_debug_enabled THREAD_DEBUG
209 # endif
210 #else
211 #define thread_debug if(0)printf
212 #endif
213 
214 #ifndef __ia64
215 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
216 #endif
217 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
218  VALUE *register_stack_start));
219 static void timer_thread_function(void *);
220 
221 #if defined(_WIN32)
222 #include "thread_win32.c"
223 
224 #define DEBUG_OUT() \
225  WaitForSingleObject(&debug_mutex, INFINITE); \
226  printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
227  fflush(stdout); \
228  ReleaseMutex(&debug_mutex);
229 
230 #elif defined(HAVE_PTHREAD_H)
231 #include "thread_pthread.c"
232 
233 #define DEBUG_OUT() \
234  pthread_mutex_lock(&debug_mutex); \
235  printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
236  fflush(stdout); \
237  pthread_mutex_unlock(&debug_mutex);
238 
239 #else
240 #error "unsupported thread type"
241 #endif
242 
243 #if THREAD_DEBUG
244 static int debug_mutex_initialized = 1;
245 static rb_thread_lock_t debug_mutex;
246 
247 void
248 rb_thread_debug(
249 #ifdef HAVE_VA_ARGS_MACRO
250  const char *file, int line,
251 #endif
252  const char *fmt, ...)
253 {
254  va_list args;
255  char buf[BUFSIZ];
256 
257  if (!rb_thread_debug_enabled) return;
258 
259  if (debug_mutex_initialized == 1) {
260  debug_mutex_initialized = 0;
261  native_mutex_initialize(&debug_mutex);
262  }
263 
264  va_start(args, fmt);
265  vsnprintf(buf, BUFSIZ, fmt, args);
266  va_end(args);
267 
268  DEBUG_OUT();
269 }
270 #endif
271 
272 void
274 {
275  gvl_release(vm);
276  gvl_destroy(vm);
277  native_mutex_destroy(&vm->thread_destruct_lock);
278 }
279 
280 void
282 {
283  native_mutex_unlock(lock);
284 }
285 
286 void
288 {
289  native_mutex_destroy(lock);
290 }
291 
292 static int
294  struct rb_unblock_callback *old, int fail_if_interrupted)
295 {
296  check_ints:
297  if (fail_if_interrupted) {
298  if (RUBY_VM_INTERRUPTED_ANY(th)) {
299  return FALSE;
300  }
301  }
302  else {
303  RUBY_VM_CHECK_INTS(th);
304  }
305 
306  native_mutex_lock(&th->interrupt_lock);
307  if (RUBY_VM_INTERRUPTED_ANY(th)) {
308  native_mutex_unlock(&th->interrupt_lock);
309  goto check_ints;
310  }
311  else {
312  if (old) *old = th->unblock;
313  th->unblock.func = func;
314  th->unblock.arg = arg;
315  }
316  native_mutex_unlock(&th->interrupt_lock);
317 
318  return TRUE;
319 }
320 
321 static void
323 {
324  native_mutex_lock(&th->interrupt_lock);
325  th->unblock = *old;
326  native_mutex_unlock(&th->interrupt_lock);
327 }
328 
329 static void
331 {
332  native_mutex_lock(&th->interrupt_lock);
333  if (trap)
335  else
337  if (th->unblock.func) {
338  (th->unblock.func)(th->unblock.arg);
339  }
340  else {
341  /* none */
342  }
343  native_mutex_unlock(&th->interrupt_lock);
344 }
345 
346 void
348 {
350 }
351 
352 void
354 {
356 }
357 
358 static int
360 {
361  VALUE thval = key;
362  rb_thread_t *th;
363  GetThreadPtr(thval, th);
364 
365  if (th != main_thread) {
366  thread_debug("terminate_i: %p\n", (void *)th);
369  }
370  else {
371  thread_debug("terminate_i: main thread (%p)\n", (void *)th);
372  }
373  return ST_CONTINUE;
374 }
375 
376 typedef struct rb_mutex_struct
377 {
380  struct rb_thread_struct volatile *th;
384 } rb_mutex_t;
385 
386 static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
389 static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
390 
391 void
393 {
394  const char *err;
395  rb_mutex_t *mutex;
396  rb_mutex_t *mutexes = th->keeping_mutexes;
397 
398  while (mutexes) {
399  mutex = mutexes;
400  /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
401  mutexes); */
402  mutexes = mutex->next_mutex;
403  err = rb_mutex_unlock_th(mutex, th);
404  if (err) rb_bug("invalid keeping_mutexes: %s", err);
405  }
406 }
407 
408 void
410 {
411  rb_thread_t *th = GET_THREAD(); /* main thread */
412  rb_vm_t *vm = th->vm;
413 
414  if (vm->main_thread != th) {
415  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
416  (void *)vm->main_thread, (void *)th);
417  }
418 
419  /* unlock all locking mutexes */
421 
422  retry:
423  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
425 
426  while (!rb_thread_alone()) {
427  int state;
428 
429  TH_PUSH_TAG(th);
430  if ((state = TH_EXEC_TAG()) == 0) {
431  native_sleep(th, 0);
433  }
434  TH_POP_TAG();
435 
436  if (state) {
437  goto retry;
438  }
439  }
440 }
441 
442 static void
444 {
445  rb_thread_t *th = th_ptr;
446  th->status = THREAD_KILLED;
448 #ifdef __ia64
449  th->machine_register_stack_start = th->machine_register_stack_end = 0;
450 #endif
451 }
452 
453 static void
454 thread_cleanup_func(void *th_ptr, int atfork)
455 {
456  rb_thread_t *th = th_ptr;
457 
458  th->locking_mutex = Qfalse;
460 
461  /*
462  * Unfortunately, we can't release native threading resource at fork
463  * because libc may have unstable locking state therefore touching
464  * a threading resource may cause a deadlock.
465  */
466  if (atfork)
467  return;
468 
469  native_mutex_destroy(&th->interrupt_lock);
470  native_thread_destroy(th);
471 }
472 
473 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
474 
475 void
477 {
478  native_thread_init_stack(th);
479 }
480 
481 static int
482 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
483 {
484  int state;
485  VALUE args = th->first_args;
486  rb_proc_t *proc;
487  rb_thread_list_t *join_list;
488  rb_thread_t *main_th;
489  VALUE errinfo = Qnil;
490 # ifdef USE_SIGALTSTACK
491  void rb_register_sigaltstack(rb_thread_t *th);
492 
493  rb_register_sigaltstack(th);
494 # endif
495 
496  if (th == th->vm->main_thread)
497  rb_bug("thread_start_func_2 must not used for main thread");
498 
499  ruby_thread_set_native(th);
500 
501  th->machine_stack_start = stack_start;
502 #ifdef __ia64
503  th->machine_register_stack_start = register_stack_start;
504 #endif
505  thread_debug("thread start: %p\n", (void *)th);
506 
507  gvl_acquire(th->vm, th);
508  {
509  thread_debug("thread start (get lock): %p\n", (void *)th);
511 
512  TH_PUSH_TAG(th);
513  if ((state = EXEC_TAG()) == 0) {
514  SAVE_ROOT_JMPBUF(th, {
515  if (!th->first_func) {
516  GetProcPtr(th->first_proc, proc);
517  th->errinfo = Qnil;
518  th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
519  th->root_svar = Qnil;
520  EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, Qundef);
521  th->value = rb_vm_invoke_proc(th, proc, (int)RARRAY_LEN(args), RARRAY_PTR(args), 0);
522  EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_END, th->self, 0, 0, Qundef);
523  }
524  else {
525  th->value = (*th->first_func)((void *)args);
526  }
527  });
528  }
529  else {
530  errinfo = th->errinfo;
531  if (state == TAG_FATAL) {
532  /* fatal error within this thread, need to stop whole script */
533  }
534  else if (th->safe_level >= 4) {
535  /* Ignore it. Main thread shouldn't be harmed from untrusted thread. */
536  errinfo = Qnil;
537  }
538  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
539  /* exit on main_thread. */
540  }
541  else if (th->vm->thread_abort_on_exception ||
543  /* exit on main_thread */
544  }
545  else {
546  errinfo = Qnil;
547  }
548  th->value = Qnil;
549  }
550 
551  th->status = THREAD_KILLED;
552  thread_debug("thread end: %p\n", (void *)th);
553 
554  main_th = th->vm->main_thread;
555  if (main_th == th) {
556  ruby_stop(0);
557  }
558  if (RB_TYPE_P(errinfo, T_OBJECT)) {
559  /* treat with normal error object */
560  rb_threadptr_raise(main_th, 1, &errinfo);
561  }
562  TH_POP_TAG();
563 
564  /* locking_mutex must be Qfalse */
565  if (th->locking_mutex != Qfalse) {
566  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
567  (void *)th, th->locking_mutex);
568  }
569 
570  /* delete self other than main thread from living_threads */
572  if (rb_thread_alone()) {
573  /* I'm last thread. wake up main thread from rb_thread_terminate_all */
574  rb_threadptr_interrupt(main_th);
575  }
576 
577  /* wake up joining threads */
578  join_list = th->join_list;
579  while (join_list) {
580  rb_threadptr_interrupt(join_list->th);
581  switch (join_list->th->status) {
583  join_list->th->status = THREAD_RUNNABLE;
584  default: break;
585  }
586  join_list = join_list->next;
587  }
588 
590  rb_check_deadlock(th->vm);
591 
592  if (!th->root_fiber) {
594  th->stack = 0;
595  }
596  }
597  native_mutex_lock(&th->vm->thread_destruct_lock);
598  /* make sure vm->running_thread never point me after this point.*/
599  th->vm->running_thread = NULL;
600  native_mutex_unlock(&th->vm->thread_destruct_lock);
602  gvl_release(th->vm);
603 
604  return 0;
605 }
606 
607 static VALUE
609 {
610  rb_thread_t *th, *current_th = GET_THREAD();
611  int err;
612 
613  if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
615  "can't start a new thread (frozen ThreadGroup)");
616  }
617  GetThreadPtr(thval, th);
618 
619  /* setup thread environment */
620  th->first_func = fn;
621  th->first_proc = fn ? Qfalse : rb_block_proc();
622  th->first_args = args; /* GC: shouldn't put before above line */
623 
624  th->priority = current_th->priority;
625  th->thgroup = current_th->thgroup;
626 
630  RBASIC(th->pending_interrupt_mask_stack)->klass = 0;
631 
632  th->interrupt_mask = 0;
633 
634  native_mutex_initialize(&th->interrupt_lock);
635 
636  /* kick thread */
637  err = native_thread_create(th);
638  if (err) {
639  th->status = THREAD_KILLED;
640  rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
641  }
642  st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
643  return thval;
644 }
645 
646 /*
647  * call-seq:
648  * Thread.new { ... } -> thread
649  * Thread.new(*args, &proc) -> thread
650  * Thread.new(*args) { |args| ... } -> thread
651  *
652  * Creates a new thread executing the given block.
653  *
654  * Any +args+ given to ::new will be passed to the block:
655  *
656  * arr = []
657  * a, b, c = 1, 2, 3
658  * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
659  * arr #=> [1, 2, 3]
660  *
661  * A ThreadError exception is raised if ::new is called without a block.
662  *
663  * If you're going to subclass Thread, be sure to call super in your
664  * +initialize+ method, otherwise a ThreadError will be raised.
665  */
666 static VALUE
668 {
669  rb_thread_t *th;
670  VALUE thread = rb_thread_alloc(klass);
671 
672  if (GET_VM()->main_thread->status == THREAD_KILLED)
673  rb_raise(rb_eThreadError, "can't alloc thread");
674 
675  rb_obj_call_init(thread, argc, argv);
676  GetThreadPtr(thread, th);
677  if (!th->first_args) {
678  rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
679  rb_class2name(klass));
680  }
681  return thread;
682 }
683 
684 /*
685  * call-seq:
686  * Thread.start([args]*) {|args| block } -> thread
687  * Thread.fork([args]*) {|args| block } -> thread
688  *
689  * Basically the same as ::new. However, if class Thread is subclassed, then
690  * calling +start+ in that subclass will not invoke the subclass's
691  * +initialize+ method.
692  */
693 
694 static VALUE
696 {
697  return thread_create_core(rb_thread_alloc(klass), args, 0);
698 }
699 
700 /* :nodoc: */
701 static VALUE
703 {
704  rb_thread_t *th;
705  if (!rb_block_given_p()) {
706  rb_raise(rb_eThreadError, "must be called with a block");
707  }
708  GetThreadPtr(thread, th);
709  if (th->first_args) {
710  VALUE proc = th->first_proc, line, loc;
711  const char *file;
712  if (!proc || !RTEST(loc = rb_proc_location(proc))) {
713  rb_raise(rb_eThreadError, "already initialized thread");
714  }
715  file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
716  if (NIL_P(line = RARRAY_PTR(loc)[1])) {
717  rb_raise(rb_eThreadError, "already initialized thread - %s",
718  file);
719  }
720  rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
721  file, NUM2INT(line));
722  }
723  return thread_create_core(thread, args, 0);
724 }
725 
726 VALUE
727 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
728 {
730 }
731 
732 
733 /* +infty, for this purpose */
734 #define DELAY_INFTY 1E30
735 
736 struct join_arg {
738  double limit;
739  int forever;
740 };
741 
742 static VALUE
744 {
745  struct join_arg *p = (struct join_arg *)arg;
746  rb_thread_t *target_th = p->target, *th = p->waiting;
747 
748  if (target_th->status != THREAD_KILLED) {
749  rb_thread_list_t **p = &target_th->join_list;
750 
751  while (*p) {
752  if ((*p)->th == th) {
753  *p = (*p)->next;
754  break;
755  }
756  p = &(*p)->next;
757  }
758  }
759 
760  return Qnil;
761 }
762 
763 static VALUE
765 {
766  struct join_arg *p = (struct join_arg *)arg;
767  rb_thread_t *target_th = p->target, *th = p->waiting;
768  double now, limit = p->limit;
769 
770  while (target_th->status != THREAD_KILLED) {
771  if (p->forever) {
772  sleep_forever(th, 1, 0);
773  }
774  else {
775  now = timeofday();
776  if (now > limit) {
777  thread_debug("thread_join: timeout (thid: %p)\n",
778  (void *)target_th->thread_id);
779  return Qfalse;
780  }
781  sleep_wait_for_interrupt(th, limit - now, 0);
782  }
783  thread_debug("thread_join: interrupted (thid: %p)\n",
784  (void *)target_th->thread_id);
785  }
786  return Qtrue;
787 }
788 
789 static VALUE
790 thread_join(rb_thread_t *target_th, double delay)
791 {
792  rb_thread_t *th = GET_THREAD();
793  struct join_arg arg;
794 
795  if (th == target_th) {
796  rb_raise(rb_eThreadError, "Target thread must not be current thread");
797  }
798  if (GET_VM()->main_thread == target_th) {
799  rb_raise(rb_eThreadError, "Target thread must not be main thread");
800  }
801 
802  arg.target = target_th;
803  arg.waiting = th;
804  arg.limit = timeofday() + delay;
805  arg.forever = delay == DELAY_INFTY;
806 
807  thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
808 
809  if (target_th->status != THREAD_KILLED) {
811  list.next = target_th->join_list;
812  list.th = th;
813  target_th->join_list = &list;
814  if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
815  remove_from_join_list, (VALUE)&arg)) {
816  return Qnil;
817  }
818  }
819 
820  thread_debug("thread_join: success (thid: %p)\n",
821  (void *)target_th->thread_id);
822 
823  if (target_th->errinfo != Qnil) {
824  VALUE err = target_th->errinfo;
825 
826  if (FIXNUM_P(err)) {
827  /* */
828  }
829  else if (RB_TYPE_P(target_th->errinfo, T_NODE)) {
832  }
833  else {
834  /* normal exception */
835  rb_exc_raise(err);
836  }
837  }
838  return target_th->self;
839 }
840 
841 /*
842  * call-seq:
843  * thr.join -> thr
844  * thr.join(limit) -> thr
845  *
846  * The calling thread will suspend execution and run <i>thr</i>. Does not
847  * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
848  * the time limit expires, <code>nil</code> will be returned, otherwise
849  * <i>thr</i> is returned.
850  *
851  * Any threads not joined will be killed when the main program exits. If
852  * <i>thr</i> had previously raised an exception and the
853  * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
854  * (so the exception has not yet been processed) it will be processed at this
855  * time.
856  *
857  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
858  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
859  * x.join # Let x thread finish, a will be killed on exit.
860  *
861  * <em>produces:</em>
862  *
863  * axyz
864  *
865  * The following example illustrates the <i>limit</i> parameter.
866  *
867  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
868  * puts "Waiting" until y.join(0.15)
869  *
870  * <em>produces:</em>
871  *
872  * tick...
873  * Waiting
874  * tick...
875  * Waitingtick...
876  *
877  *
878  * tick...
879  */
880 
881 static VALUE
883 {
884  rb_thread_t *target_th;
885  double delay = DELAY_INFTY;
886  VALUE limit;
887 
888  GetThreadPtr(self, target_th);
889 
890  rb_scan_args(argc, argv, "01", &limit);
891  if (!NIL_P(limit)) {
892  delay = rb_num2dbl(limit);
893  }
894 
895  return thread_join(target_th, delay);
896 }
897 
898 /*
899  * call-seq:
900  * thr.value -> obj
901  *
902  * Waits for +thr+ to complete, using #join, and returns its value or raises
903  * the exception which terminated the thread.
904  *
905  * a = Thread.new { 2 + 2 }
906  * a.value #=> 4
907  *
908  * b = Thread.new { raise 'something went wrong' }
909  * b.value #=> RuntimeError: something went wrong
910  */
911 
912 static VALUE
914 {
915  rb_thread_t *th;
916  GetThreadPtr(self, th);
918  return th->value;
919 }
920 
921 /*
922  * Thread Scheduling
923  */
924 
925 static struct timeval
926 double2timeval(double d)
927 {
928  struct timeval time;
929 
930  if (isinf(d)) {
931  time.tv_sec = TIMET_MAX;
932  time.tv_usec = 0;
933  return time;
934  }
935 
936  time.tv_sec = (int)d;
937  time.tv_usec = (int)((d - (int)d) * 1e6);
938  if (time.tv_usec < 0) {
939  time.tv_usec += (int)1e6;
940  time.tv_sec -= 1;
941  }
942  return time;
943 }
944 
945 static void
946 sleep_forever(rb_thread_t *th, int deadlockable, int spurious_check)
947 {
948  enum rb_thread_status prev_status = th->status;
949  enum rb_thread_status status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
950 
951  th->status = status;
953  while (th->status == status) {
954  if (deadlockable) {
955  th->vm->sleeper++;
956  rb_check_deadlock(th->vm);
957  }
958  native_sleep(th, 0);
959  if (deadlockable) {
960  th->vm->sleeper--;
961  }
963  if (!spurious_check)
964  break;
965  }
966  th->status = prev_status;
967 }
968 
969 static void
971 {
972 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
973  struct timespec ts;
974 
975  if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
976  tp->tv_sec = ts.tv_sec;
977  tp->tv_usec = ts.tv_nsec / 1000;
978  } else
979 #endif
980  {
981  gettimeofday(tp, NULL);
982  }
983 }
984 
985 static void
986 sleep_timeval(rb_thread_t *th, struct timeval tv, int spurious_check)
987 {
988  struct timeval to, tvn;
989  enum rb_thread_status prev_status = th->status;
990 
991  getclockofday(&to);
992  if (TIMET_MAX - tv.tv_sec < to.tv_sec)
993  to.tv_sec = TIMET_MAX;
994  else
995  to.tv_sec += tv.tv_sec;
996  if ((to.tv_usec += tv.tv_usec) >= 1000000) {
997  if (to.tv_sec == TIMET_MAX)
998  to.tv_usec = 999999;
999  else {
1000  to.tv_sec++;
1001  to.tv_usec -= 1000000;
1002  }
1003  }
1004 
1005  th->status = THREAD_STOPPED;
1007  while (th->status == THREAD_STOPPED) {
1008  native_sleep(th, &tv);
1010  getclockofday(&tvn);
1011  if (to.tv_sec < tvn.tv_sec) break;
1012  if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
1013  thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
1014  (long)to.tv_sec, (long)to.tv_usec,
1015  (long)tvn.tv_sec, (long)tvn.tv_usec);
1016  tv.tv_sec = to.tv_sec - tvn.tv_sec;
1017  if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
1018  --tv.tv_sec;
1019  tv.tv_usec += 1000000;
1020  }
1021  if (!spurious_check)
1022  break;
1023  }
1024  th->status = prev_status;
1025 }
1026 
1027 void
1029 {
1030  thread_debug("rb_thread_sleep_forever\n");
1031  sleep_forever(GET_THREAD(), 0, 1);
1032 }
1033 
1034 static void
1036 {
1037  thread_debug("rb_thread_sleep_deadly\n");
1038  sleep_forever(GET_THREAD(), 1, 1);
1039 }
1040 
1041 static double
1043 {
1044 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1045  struct timespec tp;
1046 
1047  if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
1048  return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
1049  } else
1050 #endif
1051  {
1052  struct timeval tv;
1053  gettimeofday(&tv, NULL);
1054  return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
1055  }
1056 }
1057 
1058 static void
1059 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
1060 {
1061  sleep_timeval(th, double2timeval(sleepsec), spurious_check);
1062 }
1063 
1064 static void
1066 {
1067  struct timeval time;
1068  time.tv_sec = 0;
1069  time.tv_usec = 100 * 1000; /* 0.1 sec */
1070  sleep_timeval(th, time, 1);
1071 }
1072 
1073 void
1075 {
1076  rb_thread_t *th = GET_THREAD();
1077  sleep_timeval(th, time, 1);
1078 }
1079 
1080 void
1082 {
1083  if (!rb_thread_alone()) {
1084  rb_thread_t *th = GET_THREAD();
1086  sleep_for_polling(th);
1087  }
1088 }
1089 
1090 /*
1091  * CAUTION: This function causes thread switching.
1092  * rb_thread_check_ints() check ruby's interrupts.
1093  * some interrupt needs thread switching/invoke handlers,
1094  * and so on.
1095  */
1096 
1097 void
1099 {
1101 }
1102 
1103 /*
1104  * Hidden API for tcl/tk wrapper.
1105  * There is no guarantee to perpetuate it.
1106  */
1107 int
1109 {
1110  return rb_signal_buff_size() != 0;
1111 }
1112 
1113 /* This function can be called in blocking region. */
1114 int
1116 {
1117  rb_thread_t *th;
1118  GetThreadPtr(thval, th);
1119  return (int)RUBY_VM_INTERRUPTED(th);
1120 }
1121 
1122 void
1124 {
1126 }
1127 
1128 static void
1129 rb_thread_schedule_limits(unsigned long limits_us)
1130 {
1131  thread_debug("rb_thread_schedule\n");
1132  if (!rb_thread_alone()) {
1133  rb_thread_t *th = GET_THREAD();
1134 
1135  if (th->running_time_us >= limits_us) {
1136  thread_debug("rb_thread_schedule/switch start\n");
1138  gvl_yield(th->vm, th);
1140  thread_debug("rb_thread_schedule/switch done\n");
1141  }
1142  }
1143 }
1144 
1145 void
1147 {
1148  rb_thread_t *cur_th = GET_THREAD();
1150 
1151  if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(cur_th))) {
1153  }
1154 }
1155 
1156 /* blocking region */
1157 
1158 static inline int
1160  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1161 {
1162  region->prev_status = th->status;
1163  if (set_unblock_function(th, ubf, arg, &region->oldubf, fail_if_interrupted)) {
1164  th->blocking_region_buffer = region;
1165  th->status = THREAD_STOPPED;
1166  thread_debug("enter blocking region (%p)\n", (void *)th);
1168  gvl_release(th->vm);
1169  return TRUE;
1170  }
1171  else {
1172  return FALSE;
1173  }
1174 }
1175 
1176 static inline void
1178 {
1179  gvl_acquire(th->vm, th);
1181  thread_debug("leave blocking region (%p)\n", (void *)th);
1182  remove_signal_thread_list(th);
1183  th->blocking_region_buffer = 0;
1184  reset_unblock_function(th, &region->oldubf);
1185  if (th->status == THREAD_STOPPED) {
1186  th->status = region->prev_status;
1187  }
1188 }
1189 
1192 {
1193  rb_thread_t *th = GET_THREAD();
1195  blocking_region_begin(th, region, ubf_select, th, FALSE);
1196  return region;
1197 }
1198 
1199 void
1201 {
1202  int saved_errno = errno;
1203  rb_thread_t *th = ruby_thread_from_native();
1204  blocking_region_end(th, region);
1205  xfree(region);
1207  errno = saved_errno;
1208 }
1209 
1210 static void *
1211 call_without_gvl(void *(*func)(void *), void *data1,
1212  rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
1213 {
1214  void *val = 0;
1215 
1216  rb_thread_t *th = GET_THREAD();
1217  int saved_errno = 0;
1218 
1219  th->waiting_fd = -1;
1220  if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1221  ubf = ubf_select;
1222  data2 = th;
1223  }
1224 
1225  BLOCKING_REGION({
1226  val = func(data1);
1227  saved_errno = errno;
1228  }, ubf, data2, fail_if_interrupted);
1229 
1230  if (!fail_if_interrupted) {
1232  }
1233 
1234  errno = saved_errno;
1235 
1236  return val;
1237 }
1238 
1239 /*
1240  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1241  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1242  * without interrupt proceess.
1243  *
1244  * rb_thread_call_without_gvl() does:
1245  * (1) Check interrupts.
1246  * (2) release GVL.
1247  * Other Ruby threads may run in parallel.
1248  * (3) call func with data1
1249  * (4) acquire GVL.
1250  * Other Ruby threads can not run in parallel any more.
1251  * (5) Check interrupts.
1252  *
1253  * rb_thread_call_without_gvl2() does:
1254  * (1) Check interrupt and return if interrupted.
1255  * (2) release GVL.
1256  * (3) call func with data1 and a pointer to the flags.
1257  * (4) acquire GVL.
1258  *
1259  * If another thread interrupts this thread (Thread#kill, signal delivery,
1260  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1261  * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1262  * toggling a cancellation flag, canceling the invocation of a call inside
1263  * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1264  *
1265  * There are built-in ubfs and you can specify these ubfs:
1266  *
1267  * * RUBY_UBF_IO: ubf for IO operation
1268  * * RUBY_UBF_PROCESS: ubf for process operation
1269  *
1270  * However, we can not guarantee our built-in ubfs interrupt your `func()'
1271  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1272  * provide proper ubf(), your program will not stop for Control+C or other
1273  * shutdown events.
1274  *
1275  * "Check interrupts" on above list means that check asynchronous
1276  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1277  * request, and so on) and call corresponding procedures
1278  * (such as `trap' for signals, raise an exception for Thread#raise).
1279  * If `func()' finished and receive interrupts, you may skip interrupt
1280  * checking. For example, assume the following func() it read data from file.
1281  *
1282  * read_func(...) {
1283  * // (a) before read
1284  * read(buffer); // (b) reading
1285  * // (c) after read
1286  * }
1287  *
1288  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1289  * `read_func()' and interrupts are checked. However, if an interrupt occurs
1290  * at (c), after *read* operation is completed, check intterrupts is harmful
1291  * because it causes irrevocable side-effect, the read data will vanish. To
1292  * avoid such problem, the `read_func()' should be used with
1293  * `rb_thread_call_without_gvl2()'.
1294  *
1295  * If `rb_thread_call_without_gvl2()' detects interrupt, return its execution
1296  * immediately. This function does not show when the execution was interrupted.
1297  * For example, there are 4 possible timing (a), (b), (c) and before calling
1298  * read_func(). You need to record progress of a read_func() and check
1299  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1300  * `rb_thread_check_ints()' correctly or your program can not process proper
1301  * process such as `trap' and so on.
1302  *
1303  * NOTE: You can not execute most of Ruby C API and touch Ruby
1304  * objects in `func()' and `ubf()', including raising an
1305  * exception, because current thread doesn't acquire GVL
1306  * (it causes synchronization problems). If you need to
1307  * call ruby functions either use rb_thread_call_with_gvl()
1308  * or read source code of C APIs and confirm safety by
1309  * yourself.
1310  *
1311  * NOTE: In short, this API is difficult to use safely. I recommend you
1312  * use other ways if you have. We lack experiences to use this API.
1313  * Please report your problem related on it.
1314  *
1315  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1316  * for a short running `func()'. Be sure to benchmark and use this
1317  * mechanism when `func()' consumes enough time.
1318  *
1319  * Safe C API:
1320  * * rb_thread_interrupted() - check interrupt flag
1321  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1322  * they will work without GVL, and may acquire GVL when GC is needed.
1323  */
1324 void *
1325 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1326  rb_unblock_function_t *ubf, void *data2)
1327 {
1328  return call_without_gvl(func, data1, ubf, data2, TRUE);
1329 }
1330 
1331 void *
1332 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1333  rb_unblock_function_t *ubf, void *data2)
1334 {
1335  return call_without_gvl(func, data1, ubf, data2, FALSE);
1336 }
1337 
1338 VALUE
1340 {
1341  VALUE val = Qundef; /* shouldn't be used */
1342  rb_thread_t *th = GET_THREAD();
1343  int saved_errno = 0;
1344  int state;
1345 
1346  th->waiting_fd = fd;
1347 
1348  TH_PUSH_TAG(th);
1349  if ((state = EXEC_TAG()) == 0) {
1350  BLOCKING_REGION({
1351  val = func(data1);
1352  saved_errno = errno;
1353  }, ubf_select, th, FALSE);
1354  }
1355  TH_POP_TAG();
1356 
1357  /* clear waitinf_fd anytime */
1358  th->waiting_fd = -1;
1359 
1360  if (state) {
1361  JUMP_TAG(state);
1362  }
1363  /* TODO: check func() */
1365 
1366  errno = saved_errno;
1367 
1368  return val;
1369 }
1370 
1371 VALUE
1373  rb_blocking_function_t *func, void *data1,
1374  rb_unblock_function_t *ubf, void *data2)
1375 {
1376  void *(*f)(void*) = (void *(*)(void*))func;
1377  return (VALUE)rb_thread_call_without_gvl(f, data1, ubf, data2);
1378 }
1379 
1380 /*
1381  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1382  *
1383  * After releasing GVL using rb_thread_blocking_region() or
1384  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1385  * methods. If you need to access Ruby you must use this function
1386  * rb_thread_call_with_gvl().
1387  *
1388  * This function rb_thread_call_with_gvl() does:
1389  * (1) acquire GVL.
1390  * (2) call passed function `func'.
1391  * (3) release GVL.
1392  * (4) return a value which is returned at (2).
1393  *
1394  * NOTE: You should not return Ruby object at (2) because such Object
1395  * will not marked.
1396  *
1397  * NOTE: If an exception is raised in `func', this function DOES NOT
1398  * protect (catch) the exception. If you have any resources
1399  * which should free before throwing exception, you need use
1400  * rb_protect() in `func' and return a value which represents
1401  * exception is raised.
1402  *
1403  * NOTE: This function should not be called by a thread which was not
1404  * created as Ruby thread (created by Thread.new or so). In other
1405  * words, this function *DOES NOT* associate or convert a NON-Ruby
1406  * thread to a Ruby thread.
1407  */
1408 void *
1409 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1410 {
1411  rb_thread_t *th = ruby_thread_from_native();
1412  struct rb_blocking_region_buffer *brb;
1413  struct rb_unblock_callback prev_unblock;
1414  void *r;
1415 
1416  if (th == 0) {
1417  /* Error is occurred, but we can't use rb_bug()
1418  * because this thread is not Ruby's thread.
1419  * What should we do?
1420  */
1421 
1422  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1423  exit(EXIT_FAILURE);
1424  }
1425 
1427  prev_unblock = th->unblock;
1428 
1429  if (brb == 0) {
1430  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1431  }
1432 
1433  blocking_region_end(th, brb);
1434  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1435  r = (*func)(data1);
1436  /* leave from Ruby world: You can not access Ruby values, etc. */
1437  blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1438  return r;
1439 }
1440 
1441 /*
1442  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1443  *
1444  ***
1445  *** This API is EXPERIMENTAL!
1446  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1447  ***
1448  */
1449 
1450 int
1452 {
1453  rb_thread_t *th = ruby_thread_from_native();
1454 
1455  if (th && th->blocking_region_buffer == 0) {
1456  return 1;
1457  }
1458  else {
1459  return 0;
1460  }
1461 }
1462 
1463 /*
1464  * call-seq:
1465  * Thread.pass -> nil
1466  *
1467  * Give the thread scheduler a hint to pass execution to another thread.
1468  * A running thread may or may not switch, it depends on OS and processor.
1469  */
1470 
1471 static VALUE
1473 {
1475  return Qnil;
1476 }
1477 
1478 /*****************************************************/
1479 
1480 /*
1481  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1482  *
1483  * Async events such as an exception throwed by Thread#raise,
1484  * Thread#kill and thread termination (after main thread termination)
1485  * will be queued to th->pending_interrupt_queue.
1486  * - clear: clear the queue.
1487  * - enque: enque err object into queue.
1488  * - deque: deque err object from queue.
1489  * - active_p: return 1 if the queue should be checked.
1490  *
1491  * All rb_threadptr_pending_interrupt_* functions are called by
1492  * a GVL acquired thread, of course.
1493  * Note that all "rb_" prefix APIs need GVL to call.
1494  */
1495 
1496 void
1498 {
1500 }
1501 
1502 void
1504 {
1507 }
1508 
1514 };
1515 
1516 static enum handle_interrupt_timing
1518 {
1519  VALUE mask;
1520  long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1521  VALUE *mask_stack = RARRAY_PTR(th->pending_interrupt_mask_stack);
1522  VALUE ancestors = rb_mod_ancestors(err); /* TODO: GC guard */
1523  long ancestors_len = RARRAY_LEN(ancestors);
1524  VALUE *ancestors_ptr = RARRAY_PTR(ancestors);
1525  int i, j;
1526 
1527  for (i=0; i<mask_stack_len; i++) {
1528  mask = mask_stack[mask_stack_len-(i+1)];
1529 
1530  for (j=0; j<ancestors_len; j++) {
1531  VALUE klass = ancestors_ptr[j];
1532  VALUE sym;
1533 
1534  /* TODO: remove rb_intern() */
1535  if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1536  if (sym == sym_immediate) {
1537  return INTERRUPT_IMMEDIATE;
1538  }
1539  else if (sym == sym_on_blocking) {
1540  return INTERRUPT_ON_BLOCKING;
1541  }
1542  else if (sym == sym_never) {
1543  return INTERRUPT_NEVER;
1544  }
1545  else {
1546  rb_raise(rb_eThreadError, "unknown mask signature");
1547  }
1548  }
1549  }
1550  /* try to next mask */
1551  }
1552  return INTERRUPT_NONE;
1553 }
1554 
1555 static int
1557 {
1558  return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1559 }
1560 
1561 static int
1563 {
1564  int i;
1565  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1567  if (rb_class_inherited_p(e, err)) {
1568  return TRUE;
1569  }
1570  }
1571  return FALSE;
1572 }
1573 
1574 static VALUE
1576 {
1577 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1578  int i;
1579 
1580  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1582 
1584 
1585  switch (mask_timing) {
1586  case INTERRUPT_ON_BLOCKING:
1587  if (timing != INTERRUPT_ON_BLOCKING) {
1588  break;
1589  }
1590  /* fall through */
1591  case INTERRUPT_NONE: /* default: IMMEDIATE */
1592  case INTERRUPT_IMMEDIATE:
1594  return err;
1595  case INTERRUPT_NEVER:
1596  break;
1597  }
1598  }
1599 
1601  return Qundef;
1602 #else
1606  }
1607  return err;
1608 #endif
1609 }
1610 
1611 int
1613 {
1614  /*
1615  * For optimization, we don't check async errinfo queue
1616  * if it nor a thread interrupt mask were not changed
1617  * since last check.
1618  */
1620  return 0;
1621  }
1622 
1624  return 0;
1625  }
1626 
1627  return 1;
1628 }
1629 
1630 static int
1632 {
1633  if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
1634  rb_raise(rb_eArgError, "unknown mask signature");
1635  }
1636 
1637  return ST_CONTINUE;
1638 }
1639 
1640 /*
1641  * call-seq:
1642  * Thread.handle_interrupt(hash) { ... } -> result of the block
1643  *
1644  * Changes asynchronous interrupt timing.
1645  *
1646  * _interrupt_ means asynchronous event and corresponding procedure
1647  * by Thread#raise, Thread#kill, signal trap (not supported yet)
1648  * and main thread termination (if main thread terminates, then all
1649  * other thread will be killed).
1650  *
1651  * The given +hash+ has pairs like <code>ExceptionClass =>
1652  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
1653  * the given block. The TimingSymbol can be one of the following symbols:
1654  *
1655  * [+:immediate+] Invoke interrupts immediately.
1656  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
1657  * [+:never+] Never invoke all interrupts.
1658  *
1659  * _BlockingOperation_ means that the operation will block the calling thread,
1660  * such as read and write. On CRuby implementation, _BlockingOperation_ is any
1661  * operation executed without GVL.
1662  *
1663  * Masked asynchronous interrupts are delayed until they are enabled.
1664  * This method is similar to sigprocmask(3).
1665  *
1666  * === NOTE
1667  *
1668  * Asynchronous interrupts are difficult to use.
1669  *
1670  * If you need to communicate between threads, please consider to use another way such as Queue.
1671  *
1672  * Or use them with deep understanding about this method.
1673  *
1674  * === Usage
1675  *
1676  * In this example, we can guard from Thread#raise exceptions.
1677  *
1678  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
1679  * ignored in the first block of the main thread. In the second
1680  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
1681  *
1682  * th = Thread.new do
1683  * Thead.handle_interrupt(RuntimeError => :never) {
1684  * begin
1685  * # You can write resource allocation code safely.
1686  * Thread.handle_interrupt(RuntimeError => :immediate) {
1687  * # ...
1688  * }
1689  * ensure
1690  * # You can write resource deallocation code safely.
1691  * end
1692  * }
1693  * end
1694  * Thread.pass
1695  * # ...
1696  * th.raise "stop"
1697  *
1698  * While we are ignoring the RuntimeError exception, it's safe to write our
1699  * resource allocation code. Then, the ensure block is where we can safely
1700  * deallocate your resources.
1701  *
1702  * ==== Guarding from TimeoutError
1703  *
1704  * In the next example, we will guard from the TimeoutError exception. This
1705  * will help prevent from leaking resources when TimeoutError exceptions occur
1706  * during normal ensure clause. For this example we use the help of the
1707  * standard library Timeout, from lib/timeout.rb
1708  *
1709  * require 'timeout'
1710  * Thread.handle_interrupt(TimeoutError => :never) {
1711  * timeout(10){
1712  * # TimeoutError doesn't occur here
1713  * Thread.handle_interrupt(TimeoutError => :on_blocking) {
1714  * # possible to be killed by TimeoutError
1715  * # while blocking operation
1716  * }
1717  * # TimeoutError doesn't occur here
1718  * }
1719  * }
1720  *
1721  * In the first part of the +timeout+ block, we can rely on TimeoutError being
1722  * ignored. Then in the <code>TimeoutError => :on_blocking</code> block, any
1723  * operation that will block the calling thread is susceptible to a
1724  * TimeoutError exception being raised.
1725  *
1726  * ==== Stack control settings
1727  *
1728  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
1729  * to control more than one ExceptionClass and TimingSymbol at a time.
1730  *
1731  * Thread.handle_interrupt(FooError => :never) {
1732  * Thread.handle_interrupt(BarError => :never) {
1733  * # FooError and BarError are prohibited.
1734  * }
1735  * }
1736  *
1737  * ==== Inheritance with ExceptionClass
1738  *
1739  * All exceptions inherited from the ExceptionClass parameter will be considered.
1740  *
1741  * Thread.handle_interrupt(Exception => :never) {
1742  * # all exceptions inherited from Exception are prohibited.
1743  * }
1744  *
1745  */
1746 static VALUE
1748 {
1749  VALUE mask;
1750  rb_thread_t *th = GET_THREAD();
1751  VALUE r = Qnil;
1752  int state;
1753 
1754  if (!rb_block_given_p()) {
1755  rb_raise(rb_eArgError, "block is needed.");
1756  }
1757 
1758  mask = rb_convert_type(mask_arg, T_HASH, "Hash", "to_hash");
1764  }
1765 
1766  TH_PUSH_TAG(th);
1767  if ((state = EXEC_TAG()) == 0) {
1768  r = rb_yield(Qnil);
1769  }
1770  TH_POP_TAG();
1771 
1776  }
1777 
1778  RUBY_VM_CHECK_INTS(th);
1779 
1780  if (state) {
1781  JUMP_TAG(state);
1782  }
1783 
1784  return r;
1785 }
1786 
1787 /*
1788  * call-seq:
1789  * target_thread.pending_interrupt?(error = nil) -> true/false
1790  *
1791  * Returns whether or not the asychronous queue is empty for the target thread.
1792  *
1793  * If +error+ is given, then check only for +error+ type deferred events.
1794  *
1795  * See ::pending_interrupt? for more information.
1796  */
1797 static VALUE
1799 {
1800  rb_thread_t *target_th;
1801 
1802  GetThreadPtr(target_thread, target_th);
1803 
1804  if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
1805  return Qfalse;
1806  }
1807  else {
1808  if (argc == 1) {
1809  VALUE err;
1810  rb_scan_args(argc, argv, "01", &err);
1811  if (!rb_obj_is_kind_of(err, rb_cModule)) {
1812  rb_raise(rb_eTypeError, "class or module required for rescue clause");
1813  }
1814  if (rb_threadptr_pending_interrupt_include_p(target_th, err)) {
1815  return Qtrue;
1816  }
1817  else {
1818  return Qfalse;
1819  }
1820  }
1821  return Qtrue;
1822  }
1823 }
1824 
1825 /*
1826  * call-seq:
1827  * Thread.pending_interrupt?(error = nil) -> true/false
1828  *
1829  * Returns whether or not the asynchronous queue is empty.
1830  *
1831  * Since Thread::handle_interrupt can be used to defer asynchronous events.
1832  * This method can be used to determine if there are any deferred events.
1833  *
1834  * If you find this method returns true, then you may finish +:never+ blocks.
1835  *
1836  * For example, the following method processes deferred asynchronous events
1837  * immediately.
1838  *
1839  * def Thread.kick_interrupt_immediately
1840  * Thread.handle_interrupt(Object => :immediate) {
1841  * Thread.pass
1842  * }
1843  * end
1844  *
1845  * If +error+ is given, then check only for +error+ type deferred events.
1846  *
1847  * === Usage
1848  *
1849  * th = Thread.new{
1850  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1851  * while true
1852  * ...
1853  * # reach safe point to invoke interrupt
1854  * if Thread.pending_interrupt?
1855  * Thread.handle_interrupt(Object => :immediate){}
1856  * end
1857  * ...
1858  * end
1859  * }
1860  * }
1861  * ...
1862  * th.raise # stop thread
1863  *
1864  * This example can also be written as the following, which you should use to
1865  * avoid asynchronous interrupts.
1866  *
1867  * flag = true
1868  * th = Thread.new{
1869  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1870  * while true
1871  * ...
1872  * # reach safe point to invoke interrupt
1873  * break if flag == false
1874  * ...
1875  * end
1876  * }
1877  * }
1878  * ...
1879  * flag = false # stop thread
1880  */
1881 
1882 static VALUE
1884 {
1885  return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
1886 }
1887 
1888 static void
1890 {
1892  th->status = THREAD_RUNNABLE;
1893  th->to_kill = 1;
1894  th->errinfo = INT2FIX(TAG_FATAL);
1895  TH_JUMP_TAG(th, TAG_FATAL);
1896 }
1897 
1898 void
1900 {
1901  if (th->raised_flag) return;
1902 
1903  while (1) {
1904  rb_atomic_t interrupt;
1905  rb_atomic_t old;
1906  int sig;
1907  int timer_interrupt;
1908  int pending_interrupt;
1909  int finalizer_interrupt;
1910  int trap_interrupt;
1911 
1912  do {
1913  interrupt = th->interrupt_flag;
1914  old = ATOMIC_CAS(th->interrupt_flag, interrupt, interrupt & th->interrupt_mask);
1915  } while (old != interrupt);
1916 
1917  interrupt &= (rb_atomic_t)~th->interrupt_mask;
1918  if (!interrupt)
1919  return;
1920 
1921  timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
1922  pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
1923  finalizer_interrupt = interrupt & FINALIZER_INTERRUPT_MASK;
1924  trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
1925 
1926  /* signal handling */
1927  if (trap_interrupt && (th == th->vm->main_thread)) {
1928  enum rb_thread_status prev_status = th->status;
1929  th->status = THREAD_RUNNABLE;
1930  while ((sig = rb_get_next_signal()) != 0) {
1931  rb_signal_exec(th, sig);
1932  }
1933  th->status = prev_status;
1934  }
1935 
1936  /* exception from another thread */
1937  if (pending_interrupt && rb_threadptr_pending_interrupt_active_p(th)) {
1939  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
1940 
1941  if (err == Qundef) {
1942  /* no error */
1943  }
1944  else if (err == eKillSignal /* Thread#kill receieved */ ||
1945  err == eTerminateSignal /* Terminate thread */ ||
1946  err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
1948  }
1949  else {
1950  /* set runnable if th was slept. */
1951  if (th->status == THREAD_STOPPED ||
1953  th->status = THREAD_RUNNABLE;
1954  rb_exc_raise(err);
1955  }
1956  }
1957 
1958  if (finalizer_interrupt) {
1960  }
1961 
1962  if (timer_interrupt) {
1963  unsigned long limits_us = TIME_QUANTUM_USEC;
1964 
1965  if (th->priority > 0)
1966  limits_us <<= th->priority;
1967  else
1968  limits_us >>= -th->priority;
1969 
1970  if (th->status == THREAD_RUNNABLE)
1971  th->running_time_us += TIME_QUANTUM_USEC;
1972 
1973  EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0, Qundef);
1974 
1975  rb_thread_schedule_limits(limits_us);
1976  }
1977  }
1978 }
1979 
1980 void
1982 {
1983  rb_thread_t *th;
1984  GetThreadPtr(thval, th);
1986 }
1987 
1988 static void
1990 {
1992 }
1993 
1994 static VALUE
1996 {
1997  VALUE exc;
1998 
1999  if (rb_threadptr_dead(th)) {
2000  return Qnil;
2001  }
2002 
2003  if (argc == 0) {
2004  exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2005  }
2006  else {
2007  exc = rb_make_exception(argc, argv);
2008  }
2011  return Qnil;
2012 }
2013 
2014 void
2016 {
2017  VALUE argv[2];
2018 
2019  argv[0] = rb_eSignal;
2020  argv[1] = INT2FIX(sig);
2021  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2022 }
2023 
2024 void
2026 {
2027  VALUE argv[2];
2028 
2029  argv[0] = rb_eSystemExit;
2030  argv[1] = rb_str_new2("exit");
2031  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2032 }
2033 
2034 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
2035 #define USE_SIGALTSTACK
2036 #endif
2037 
2038 void
2040 {
2041  th->raised_flag = 0;
2042 #ifdef USE_SIGALTSTACK
2044 #else
2045  th->errinfo = sysstack_error;
2046  TH_JUMP_TAG(th, TAG_RAISE);
2047 #endif
2048 }
2049 
2050 int
2052 {
2053  if (th->raised_flag & RAISED_EXCEPTION) {
2054  return 1;
2055  }
2057  return 0;
2058 }
2059 
2060 int
2062 {
2063  if (!(th->raised_flag & RAISED_EXCEPTION)) {
2064  return 0;
2065  }
2066  th->raised_flag &= ~RAISED_EXCEPTION;
2067  return 1;
2068 }
2069 
2070 static int
2072 {
2073  int fd = (int)data;
2074  rb_thread_t *th;
2075  GetThreadPtr((VALUE)key, th);
2076 
2077  if (th->waiting_fd == fd) {
2081  }
2082  return ST_CONTINUE;
2083 }
2084 
2085 void
2087 {
2088  st_foreach(GET_THREAD()->vm->living_threads, thread_fd_close_i, (st_index_t)fd);
2089 }
2090 
2091 /*
2092  * call-seq:
2093  * thr.raise
2094  * thr.raise(string)
2095  * thr.raise(exception [, string [, array]])
2096  *
2097  * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
2098  * caller does not have to be <i>thr</i>.
2099  *
2100  * Thread.abort_on_exception = true
2101  * a = Thread.new { sleep(200) }
2102  * a.raise("Gotcha")
2103  *
2104  * <em>produces:</em>
2105  *
2106  * prog.rb:3: Gotcha (RuntimeError)
2107  * from prog.rb:2:in `initialize'
2108  * from prog.rb:2:in `new'
2109  * from prog.rb:2
2110  */
2111 
2112 static VALUE
2114 {
2115  rb_thread_t *target_th;
2116  rb_thread_t *th = GET_THREAD();
2117  GetThreadPtr(self, target_th);
2118  rb_threadptr_raise(target_th, argc, argv);
2119 
2120  /* To perform Thread.current.raise as Kernel.raise */
2121  if (th == target_th) {
2122  RUBY_VM_CHECK_INTS(th);
2123  }
2124  return Qnil;
2125 }
2126 
2127 
2128 /*
2129  * call-seq:
2130  * thr.exit -> thr or nil
2131  * thr.kill -> thr or nil
2132  * thr.terminate -> thr or nil
2133  *
2134  * Terminates <i>thr</i> and schedules another thread to be run. If this thread
2135  * is already marked to be killed, <code>exit</code> returns the
2136  * <code>Thread</code>. If this is the main thread, or the last thread, exits
2137  * the process.
2138  */
2139 
2140 VALUE
2142 {
2143  rb_thread_t *th;
2144 
2145  GetThreadPtr(thread, th);
2146 
2147  if (th != GET_THREAD() && th->safe_level < 4) {
2148  rb_secure(4);
2149  }
2150  if (th->to_kill || th->status == THREAD_KILLED) {
2151  return thread;
2152  }
2153  if (th == th->vm->main_thread) {
2155  }
2156 
2157  thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
2158 
2159  if (th == GET_THREAD()) {
2160  /* kill myself immediately */
2162  }
2163  else {
2166  }
2167  return thread;
2168 }
2169 
2170 
2171 /*
2172  * call-seq:
2173  * Thread.kill(thread) -> thread
2174  *
2175  * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
2176  *
2177  * count = 0
2178  * a = Thread.new { loop { count += 1 } }
2179  * sleep(0.1) #=> 0
2180  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2181  * count #=> 93947
2182  * a.alive? #=> false
2183  */
2184 
2185 static VALUE
2187 {
2188  return rb_thread_kill(th);
2189 }
2190 
2191 
2192 /*
2193  * call-seq:
2194  * Thread.exit -> thread
2195  *
2196  * Terminates the currently running thread and schedules another thread to be
2197  * run. If this thread is already marked to be killed, <code>exit</code>
2198  * returns the <code>Thread</code>. If this is the main thread, or the last
2199  * thread, exit the process.
2200  */
2201 
2202 static VALUE
2204 {
2205  rb_thread_t *th = GET_THREAD();
2206  return rb_thread_kill(th->self);
2207 }
2208 
2209 
2210 /*
2211  * call-seq:
2212  * thr.wakeup -> thr
2213  *
2214  * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
2215  * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
2216  *
2217  * c = Thread.new { Thread.stop; puts "hey!" }
2218  * sleep 0.1 while c.status!='sleep'
2219  * c.wakeup
2220  * c.join
2221  *
2222  * <em>produces:</em>
2223  *
2224  * hey!
2225  */
2226 
2227 VALUE
2229 {
2230  if (!RTEST(rb_thread_wakeup_alive(thread))) {
2231  rb_raise(rb_eThreadError, "killed thread");
2232  }
2233  return thread;
2234 }
2235 
2236 VALUE
2238 {
2239  rb_thread_t *th;
2240  GetThreadPtr(thread, th);
2241 
2242  if (th->status == THREAD_KILLED) {
2243  return Qnil;
2244  }
2245  rb_threadptr_ready(th);
2246  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2247  th->status = THREAD_RUNNABLE;
2248  return thread;
2249 }
2250 
2251 
2252 /*
2253  * call-seq:
2254  * thr.run -> thr
2255  *
2256  * Wakes up <i>thr</i>, making it eligible for scheduling.
2257  *
2258  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2259  * sleep 0.1 while a.status!='sleep'
2260  * puts "Got here"
2261  * a.run
2262  * a.join
2263  *
2264  * <em>produces:</em>
2265  *
2266  * a
2267  * Got here
2268  * c
2269  */
2270 
2271 VALUE
2273 {
2274  rb_thread_wakeup(thread);
2276  return thread;
2277 }
2278 
2279 
2280 /*
2281  * call-seq:
2282  * Thread.stop -> nil
2283  *
2284  * Stops execution of the current thread, putting it into a ``sleep'' state,
2285  * and schedules execution of another thread.
2286  *
2287  * a = Thread.new { print "a"; Thread.stop; print "c" }
2288  * sleep 0.1 while a.status!='sleep'
2289  * print "b"
2290  * a.run
2291  * a.join
2292  *
2293  * <em>produces:</em>
2294  *
2295  * abc
2296  */
2297 
2298 VALUE
2300 {
2301  if (rb_thread_alone()) {
2303  "stopping only thread\n\tnote: use sleep to stop forever");
2304  }
2306  return Qnil;
2307 }
2308 
2309 static int
2311 {
2312  VALUE ary = (VALUE)data;
2313  rb_thread_t *th;
2314  GetThreadPtr((VALUE)key, th);
2315 
2316  switch (th->status) {
2317  case THREAD_RUNNABLE:
2318  case THREAD_STOPPED:
2320  rb_ary_push(ary, th->self);
2321  default:
2322  break;
2323  }
2324  return ST_CONTINUE;
2325 }
2326 
2327 /********************************************************************/
2328 
2329 /*
2330  * call-seq:
2331  * Thread.list -> array
2332  *
2333  * Returns an array of <code>Thread</code> objects for all threads that are
2334  * either runnable or stopped.
2335  *
2336  * Thread.new { sleep(200) }
2337  * Thread.new { 1000000.times {|i| i*i } }
2338  * Thread.new { Thread.stop }
2339  * Thread.list.each {|t| p t}
2340  *
2341  * <em>produces:</em>
2342  *
2343  * #<Thread:0x401b3e84 sleep>
2344  * #<Thread:0x401b3f38 run>
2345  * #<Thread:0x401b3fb0 sleep>
2346  * #<Thread:0x401bdf4c run>
2347  */
2348 
2349 VALUE
2351 {
2352  VALUE ary = rb_ary_new();
2353  st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
2354  return ary;
2355 }
2356 
2357 VALUE
2359 {
2360  return GET_THREAD()->self;
2361 }
2362 
2363 /*
2364  * call-seq:
2365  * Thread.current -> thread
2366  *
2367  * Returns the currently executing thread.
2368  *
2369  * Thread.current #=> #<Thread:0x401bdf4c run>
2370  */
2371 
2372 static VALUE
2374 {
2375  return rb_thread_current();
2376 }
2377 
2378 VALUE
2380 {
2381  return GET_THREAD()->vm->main_thread->self;
2382 }
2383 
2384 /*
2385  * call-seq:
2386  * Thread.main -> thread
2387  *
2388  * Returns the main thread.
2389  */
2390 
2391 static VALUE
2393 {
2394  return rb_thread_main();
2395 }
2396 
2397 
2398 /*
2399  * call-seq:
2400  * Thread.abort_on_exception -> true or false
2401  *
2402  * Returns the status of the global ``abort on exception'' condition. The
2403  * default is <code>false</code>. When set to <code>true</code>, or if the
2404  * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
2405  * command line option <code>-d</code> was specified) all threads will abort
2406  * (the process will <code>exit(0)</code>) if an exception is raised in any
2407  * thread. See also <code>Thread::abort_on_exception=</code>.
2408  */
2409 
2410 static VALUE
2412 {
2414 }
2415 
2416 
2417 /*
2418  * call-seq:
2419  * Thread.abort_on_exception= boolean -> true or false
2420  *
2421  * When set to <code>true</code>, all threads will abort if an exception is
2422  * raised. Returns the new state.
2423  *
2424  * Thread.abort_on_exception = true
2425  * t1 = Thread.new do
2426  * puts "In new thread"
2427  * raise "Exception from thread"
2428  * end
2429  * sleep(1)
2430  * puts "not reached"
2431  *
2432  * <em>produces:</em>
2433  *
2434  * In new thread
2435  * prog.rb:4: Exception from thread (RuntimeError)
2436  * from prog.rb:2:in `initialize'
2437  * from prog.rb:2:in `new'
2438  * from prog.rb:2
2439  */
2440 
2441 static VALUE
2443 {
2444  rb_secure(4);
2446  return val;
2447 }
2448 
2449 
2450 /*
2451  * call-seq:
2452  * thr.abort_on_exception -> true or false
2453  *
2454  * Returns the status of the thread-local ``abort on exception'' condition for
2455  * <i>thr</i>. The default is <code>false</code>. See also
2456  * <code>Thread::abort_on_exception=</code>.
2457  */
2458 
2459 static VALUE
2461 {
2462  rb_thread_t *th;
2463  GetThreadPtr(thread, th);
2464  return th->abort_on_exception ? Qtrue : Qfalse;
2465 }
2466 
2467 
2468 /*
2469  * call-seq:
2470  * thr.abort_on_exception= boolean -> true or false
2471  *
2472  * When set to <code>true</code>, causes all threads (including the main
2473  * program) to abort if an exception is raised in <i>thr</i>. The process will
2474  * effectively <code>exit(0)</code>.
2475  */
2476 
2477 static VALUE
2479 {
2480  rb_thread_t *th;
2481  rb_secure(4);
2482 
2483  GetThreadPtr(thread, th);
2484  th->abort_on_exception = RTEST(val);
2485  return val;
2486 }
2487 
2488 
2489 /*
2490  * call-seq:
2491  * thr.group -> thgrp or nil
2492  *
2493  * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
2494  * the thread is not a member of any group.
2495  *
2496  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
2497  */
2498 
2499 VALUE
2501 {
2502  rb_thread_t *th;
2503  VALUE group;
2504  GetThreadPtr(thread, th);
2505  group = th->thgroup;
2506 
2507  if (!group) {
2508  group = Qnil;
2509  }
2510  return group;
2511 }
2512 
2513 static const char *
2515 {
2516  switch (th->status) {
2517  case THREAD_RUNNABLE:
2518  if (th->to_kill)
2519  return "aborting";
2520  else
2521  return "run";
2522  case THREAD_STOPPED:
2524  return "sleep";
2525  case THREAD_KILLED:
2526  return "dead";
2527  default:
2528  return "unknown";
2529  }
2530 }
2531 
2532 static int
2534 {
2535  return th->status == THREAD_KILLED;
2536 }
2537 
2538 
2539 /*
2540  * call-seq:
2541  * thr.status -> string, false or nil
2542  *
2543  * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
2544  * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
2545  * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
2546  * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
2547  * terminated with an exception.
2548  *
2549  * a = Thread.new { raise("die now") }
2550  * b = Thread.new { Thread.stop }
2551  * c = Thread.new { Thread.exit }
2552  * d = Thread.new { sleep }
2553  * d.kill #=> #<Thread:0x401b3678 aborting>
2554  * a.status #=> nil
2555  * b.status #=> "sleep"
2556  * c.status #=> false
2557  * d.status #=> "aborting"
2558  * Thread.current.status #=> "run"
2559  */
2560 
2561 static VALUE
2563 {
2564  rb_thread_t *th;
2565  GetThreadPtr(thread, th);
2566 
2567  if (rb_threadptr_dead(th)) {
2568  if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
2569  /* TODO */ ) {
2570  return Qnil;
2571  }
2572  return Qfalse;
2573  }
2574  return rb_str_new2(thread_status_name(th));
2575 }
2576 
2577 
2578 /*
2579  * call-seq:
2580  * thr.alive? -> true or false
2581  *
2582  * Returns <code>true</code> if <i>thr</i> is running or sleeping.
2583  *
2584  * thr = Thread.new { }
2585  * thr.join #=> #<Thread:0x401b3fb0 dead>
2586  * Thread.current.alive? #=> true
2587  * thr.alive? #=> false
2588  */
2589 
2590 static VALUE
2592 {
2593  rb_thread_t *th;
2594  GetThreadPtr(thread, th);
2595 
2596  if (rb_threadptr_dead(th))
2597  return Qfalse;
2598  return Qtrue;
2599 }
2600 
2601 /*
2602  * call-seq:
2603  * thr.stop? -> true or false
2604  *
2605  * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
2606  *
2607  * a = Thread.new { Thread.stop }
2608  * b = Thread.current
2609  * a.stop? #=> true
2610  * b.stop? #=> false
2611  */
2612 
2613 static VALUE
2615 {
2616  rb_thread_t *th;
2617  GetThreadPtr(thread, th);
2618 
2619  if (rb_threadptr_dead(th))
2620  return Qtrue;
2621  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2622  return Qtrue;
2623  return Qfalse;
2624 }
2625 
2626 /*
2627  * call-seq:
2628  * thr.safe_level -> integer
2629  *
2630  * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
2631  * levels can help when implementing sandboxes which run insecure code.
2632  *
2633  * thr = Thread.new { $SAFE = 3; sleep }
2634  * Thread.current.safe_level #=> 0
2635  * thr.safe_level #=> 3
2636  */
2637 
2638 static VALUE
2640 {
2641  rb_thread_t *th;
2642  GetThreadPtr(thread, th);
2643 
2644  return INT2NUM(th->safe_level);
2645 }
2646 
2647 /*
2648  * call-seq:
2649  * thr.inspect -> string
2650  *
2651  * Dump the name, id, and status of _thr_ to a string.
2652  */
2653 
2654 static VALUE
2656 {
2657  const char *cname = rb_obj_classname(thread);
2658  rb_thread_t *th;
2659  const char *status;
2660  VALUE str;
2661 
2662  GetThreadPtr(thread, th);
2663  status = thread_status_name(th);
2664  str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
2665  OBJ_INFECT(str, thread);
2666 
2667  return str;
2668 }
2669 
2670 VALUE
2672 {
2673  rb_thread_t *th;
2674  st_data_t val;
2675 
2676  GetThreadPtr(thread, th);
2677  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2678  rb_raise(rb_eSecurityError, "Insecure: thread locals");
2679  }
2680  if (!th->local_storage) {
2681  return Qnil;
2682  }
2683  if (st_lookup(th->local_storage, id, &val)) {
2684  return (VALUE)val;
2685  }
2686  return Qnil;
2687 }
2688 
2689 /*
2690  * call-seq:
2691  * thr[sym] -> obj or nil
2692  *
2693  * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
2694  * if not explicitely inside a Fiber), using either a symbol or a string name.
2695  * If the specified variable does not exist, returns <code>nil</code>.
2696  *
2697  * [
2698  * Thread.new { Thread.current["name"] = "A" },
2699  * Thread.new { Thread.current[:name] = "B" },
2700  * Thread.new { Thread.current["name"] = "C" }
2701  * ].each do |th|
2702  * th.join
2703  * puts "#{th.inspect}: #{th[:name]}"
2704  * end
2705  *
2706  * <em>produces:</em>
2707  *
2708  * #<Thread:0x00000002a54220 dead>: A
2709  * #<Thread:0x00000002a541a8 dead>: B
2710  * #<Thread:0x00000002a54130 dead>: C
2711  *
2712  * Thread#[] and Thread#[]= are not thread-local but fiber-local.
2713  * This confusion did not exist in Ruby 1.8 because
2714  * fibers were only available since Ruby 1.9.
2715  * Ruby 1.9 chooses that the methods behaves fiber-local to save
2716  * following idiom for dynamic scope.
2717  *
2718  * def meth(newvalue)
2719  * begin
2720  * oldvalue = Thread.current[:name]
2721  * Thread.current[:name] = newvalue
2722  * yield
2723  * ensure
2724  * Thread.current[:name] = oldvalue
2725  * end
2726  * end
2727  *
2728  * The idiom may not work as dynamic scope if the methods are thread-local
2729  * and a given block switches fiber.
2730  *
2731  * f = Fiber.new {
2732  * meth(1) {
2733  * Fiber.yield
2734  * }
2735  * }
2736  * meth(2) {
2737  * f.resume
2738  * }
2739  * f.resume
2740  * p Thread.current[:name]
2741  * #=> nil if fiber-local
2742  * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
2743  *
2744  * For thread-local variables, please see <code>Thread#thread_local_get</code>
2745  * and <code>Thread#thread_local_set</code>.
2746  *
2747  */
2748 
2749 static VALUE
2751 {
2752  return rb_thread_local_aref(thread, rb_to_id(id));
2753 }
2754 
2755 VALUE
2757 {
2758  rb_thread_t *th;
2759  GetThreadPtr(thread, th);
2760 
2761  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2762  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2763  }
2764  if (OBJ_FROZEN(thread)) {
2765  rb_error_frozen("thread locals");
2766  }
2767  if (!th->local_storage) {
2769  }
2770  if (NIL_P(val)) {
2771  st_delete_wrap(th->local_storage, id);
2772  return Qnil;
2773  }
2774  st_insert(th->local_storage, id, val);
2775  return val;
2776 }
2777 
2778 /*
2779  * call-seq:
2780  * thr[sym] = obj -> obj
2781  *
2782  * Attribute Assignment---Sets or creates the value of a fiber-local variable,
2783  * using either a symbol or a string. See also <code>Thread#[]</code>. For
2784  * thread-local variables, please see <code>Thread#thread_variable_set</code>
2785  * and <code>Thread#thread_variable_get</code>.
2786  */
2787 
2788 static VALUE
2790 {
2791  return rb_thread_local_aset(self, rb_to_id(id), val);
2792 }
2793 
2794 /*
2795  * call-seq:
2796  * thr.thread_variable_get(key) -> obj or nil
2797  *
2798  * Returns the value of a thread local variable that has been set. Note that
2799  * these are different than fiber local values. For fiber local values,
2800  * please see Thread#[] and Thread#[]=.
2801  *
2802  * Thread local values are carried along with threads, and do not respect
2803  * fibers. For example:
2804  *
2805  * Thread.new {
2806  * Thread.current.thread_variable_set("foo", "bar") # set a thread local
2807  * Thread.current["foo"] = "bar" # set a fiber local
2808  *
2809  * Fiber.new {
2810  * Fiber.yield [
2811  * Thread.current.thread_variable_get("foo"), # get the thread local
2812  * Thread.current["foo"], # get the fiber local
2813  * ]
2814  * }.resume
2815  * }.join.value # => ['bar', nil]
2816  *
2817  * The value "bar" is returned for the thread local, where nil is returned
2818  * for the fiber local. The fiber is executed in the same thread, so the
2819  * thread local values are available.
2820  *
2821  * See also Thread#[]
2822  */
2823 
2824 static VALUE
2826 {
2827  VALUE locals;
2828  rb_thread_t *th;
2829 
2830  GetThreadPtr(thread, th);
2831 
2832  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2833  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2834  }
2835 
2836  locals = rb_iv_get(thread, "locals");
2837  return rb_hash_aref(locals, ID2SYM(rb_to_id(id)));
2838 }
2839 
2840 /*
2841  * call-seq:
2842  * thr.thread_variable_set(key, value)
2843  *
2844  * Sets a thread local with +key+ to +value+. Note that these are local to
2845  * threads, and not to fibers. Please see Thread#thread_variable_get and
2846  * Thread#[] for more information.
2847  */
2848 
2849 static VALUE
2851 {
2852  VALUE locals;
2853  rb_thread_t *th;
2854 
2855  GetThreadPtr(thread, th);
2856 
2857  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2858  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2859  }
2860  if (OBJ_FROZEN(thread)) {
2861  rb_error_frozen("thread locals");
2862  }
2863 
2864  locals = rb_iv_get(thread, "locals");
2865  return rb_hash_aset(locals, ID2SYM(rb_to_id(id)), val);
2866 }
2867 
2868 /*
2869  * call-seq:
2870  * thr.key?(sym) -> true or false
2871  *
2872  * Returns <code>true</code> if the given string (or symbol) exists as a
2873  * fiber-local variable.
2874  *
2875  * me = Thread.current
2876  * me[:oliver] = "a"
2877  * me.key?(:oliver) #=> true
2878  * me.key?(:stanley) #=> false
2879  */
2880 
2881 static VALUE
2883 {
2884  rb_thread_t *th;
2885  ID id = rb_to_id(key);
2886 
2887  GetThreadPtr(self, th);
2888 
2889  if (!th->local_storage) {
2890  return Qfalse;
2891  }
2892  if (st_lookup(th->local_storage, id, 0)) {
2893  return Qtrue;
2894  }
2895  return Qfalse;
2896 }
2897 
2898 static int
2900 {
2901  rb_ary_push(ary, ID2SYM(key));
2902  return ST_CONTINUE;
2903 }
2904 
2905 static int
2907 {
2908  return (int)vm->living_threads->num_entries;
2909 }
2910 
2911 int
2913 {
2914  int num = 1;
2915  if (GET_THREAD()->vm->living_threads) {
2916  num = vm_living_thread_num(GET_THREAD()->vm);
2917  thread_debug("rb_thread_alone: %d\n", num);
2918  }
2919  return num == 1;
2920 }
2921 
2922 /*
2923  * call-seq:
2924  * thr.keys -> array
2925  *
2926  * Returns an an array of the names of the fiber-local variables (as Symbols).
2927  *
2928  * thr = Thread.new do
2929  * Thread.current[:cat] = 'meow'
2930  * Thread.current["dog"] = 'woof'
2931  * end
2932  * thr.join #=> #<Thread:0x401b3f10 dead>
2933  * thr.keys #=> [:dog, :cat]
2934  */
2935 
2936 static VALUE
2938 {
2939  rb_thread_t *th;
2940  VALUE ary = rb_ary_new();
2941  GetThreadPtr(self, th);
2942 
2943  if (th->local_storage) {
2945  }
2946  return ary;
2947 }
2948 
2949 static int
2951 {
2952  rb_ary_push(ary, key);
2953  return ST_CONTINUE;
2954 }
2955 
2956 /*
2957  * call-seq:
2958  * thr.thread_variables -> array
2959  *
2960  * Returns an an array of the names of the thread-local variables (as Symbols).
2961  *
2962  * thr = Thread.new do
2963  * Thread.current.thread_variable_set(:cat, 'meow')
2964  * Thread.current.thread_variable_set("dog", 'woof')
2965  * end
2966  * thr.join #=> #<Thread:0x401b3f10 dead>
2967  * thr.thread_variables #=> [:dog, :cat]
2968  *
2969  * Note that these are not fiber local variables. Please see Thread#[] and
2970  * Thread#thread_variable_get for more details.
2971  */
2972 
2973 static VALUE
2975 {
2976  VALUE locals;
2977  VALUE ary;
2978 
2979  locals = rb_iv_get(thread, "locals");
2980  ary = rb_ary_new();
2981  rb_hash_foreach(locals, keys_i, ary);
2982 
2983  return ary;
2984 }
2985 
2986 /*
2987  * call-seq:
2988  * thr.thread_variable?(key) -> true or false
2989  *
2990  * Returns <code>true</code> if the given string (or symbol) exists as a
2991  * thread-local variable.
2992  *
2993  * me = Thread.current
2994  * me.thread_variable_set(:oliver, "a")
2995  * me.thread_variable?(:oliver) #=> true
2996  * me.thread_variable?(:stanley) #=> false
2997  *
2998  * Note that these are not fiber local variables. Please see Thread#[] and
2999  * Thread#thread_variable_get for more details.
3000  */
3001 
3002 static VALUE
3004 {
3005  VALUE locals;
3006 
3007  locals = rb_iv_get(thread, "locals");
3008 
3009  if (!RHASH(locals)->ntbl)
3010  return Qfalse;
3011 
3012  if (st_lookup(RHASH(locals)->ntbl, ID2SYM(rb_to_id(key)), 0)) {
3013  return Qtrue;
3014  }
3015 
3016  return Qfalse;
3017 }
3018 
3019 /*
3020  * call-seq:
3021  * thr.priority -> integer
3022  *
3023  * Returns the priority of <i>thr</i>. Default is inherited from the
3024  * current thread which creating the new thread, or zero for the
3025  * initial main thread; higher-priority thread will run more frequently
3026  * than lower-priority threads (but lower-priority threads can also run).
3027  *
3028  * This is just hint for Ruby thread scheduler. It may be ignored on some
3029  * platform.
3030  *
3031  * Thread.current.priority #=> 0
3032  */
3033 
3034 static VALUE
3036 {
3037  rb_thread_t *th;
3038  GetThreadPtr(thread, th);
3039  return INT2NUM(th->priority);
3040 }
3041 
3042 
3043 /*
3044  * call-seq:
3045  * thr.priority= integer -> thr
3046  *
3047  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3048  * will run more frequently than lower-priority threads (but lower-priority
3049  * threads can also run).
3050  *
3051  * This is just hint for Ruby thread scheduler. It may be ignored on some
3052  * platform.
3053  *
3054  * count1 = count2 = 0
3055  * a = Thread.new do
3056  * loop { count1 += 1 }
3057  * end
3058  * a.priority = -1
3059  *
3060  * b = Thread.new do
3061  * loop { count2 += 1 }
3062  * end
3063  * b.priority = -2
3064  * sleep 1 #=> 1
3065  * count1 #=> 622504
3066  * count2 #=> 5832
3067  */
3068 
3069 static VALUE
3071 {
3072  rb_thread_t *th;
3073  int priority;
3074  GetThreadPtr(thread, th);
3075 
3076  rb_secure(4);
3077 
3078 #if USE_NATIVE_THREAD_PRIORITY
3079  th->priority = NUM2INT(prio);
3080  native_thread_apply_priority(th);
3081 #else
3082  priority = NUM2INT(prio);
3083  if (priority > RUBY_THREAD_PRIORITY_MAX) {
3084  priority = RUBY_THREAD_PRIORITY_MAX;
3085  }
3086  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3087  priority = RUBY_THREAD_PRIORITY_MIN;
3088  }
3089  th->priority = priority;
3090 #endif
3091  return INT2NUM(th->priority);
3092 }
3093 
3094 /* for IO */
3095 
3096 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3097 
3098 /*
3099  * several Unix platforms support file descriptors bigger than FD_SETSIZE
3100  * in select(2) system call.
3101  *
3102  * - Linux 2.2.12 (?)
3103  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3104  * select(2) documents how to allocate fd_set dynamically.
3105  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3106  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3107  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3108  * select(2) documents how to allocate fd_set dynamically.
3109  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3110  * - HP-UX documents how to allocate fd_set dynamically.
3111  * http://docs.hp.com/en/B2355-60105/select.2.html
3112  * - Solaris 8 has select_large_fdset
3113  * - Mac OS X 10.7 (Lion)
3114  * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3115  * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3116  * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3117  *
3118  * When fd_set is not big enough to hold big file descriptors,
3119  * it should be allocated dynamically.
3120  * Note that this assumes fd_set is structured as bitmap.
3121  *
3122  * rb_fd_init allocates the memory.
3123  * rb_fd_term free the memory.
3124  * rb_fd_set may re-allocates bitmap.
3125  *
3126  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3127  */
3128 
3129 void
3130 rb_fd_init(rb_fdset_t *fds)
3131 {
3132  fds->maxfd = 0;
3133  fds->fdset = ALLOC(fd_set);
3134  FD_ZERO(fds->fdset);
3135 }
3136 
3137 void
3139 {
3140  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3141 
3142  if (size < sizeof(fd_set))
3143  size = sizeof(fd_set);
3144  dst->maxfd = src->maxfd;
3145  dst->fdset = xmalloc(size);
3146  memcpy(dst->fdset, src->fdset, size);
3147 }
3148 
3149 void
3150 rb_fd_term(rb_fdset_t *fds)
3151 {
3152  if (fds->fdset) xfree(fds->fdset);
3153  fds->maxfd = 0;
3154  fds->fdset = 0;
3155 }
3156 
3157 void
3158 rb_fd_zero(rb_fdset_t *fds)
3159 {
3160  if (fds->fdset)
3161  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3162 }
3163 
3164 static void
3165 rb_fd_resize(int n, rb_fdset_t *fds)
3166 {
3167  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3168  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3169 
3170  if (m < sizeof(fd_set)) m = sizeof(fd_set);
3171  if (o < sizeof(fd_set)) o = sizeof(fd_set);
3172 
3173  if (m > o) {
3174  fds->fdset = xrealloc(fds->fdset, m);
3175  memset((char *)fds->fdset + o, 0, m - o);
3176  }
3177  if (n >= fds->maxfd) fds->maxfd = n + 1;
3178 }
3179 
3180 void
3181 rb_fd_set(int n, rb_fdset_t *fds)
3182 {
3183  rb_fd_resize(n, fds);
3184  FD_SET(n, fds->fdset);
3185 }
3186 
3187 void
3188 rb_fd_clr(int n, rb_fdset_t *fds)
3189 {
3190  if (n >= fds->maxfd) return;
3191  FD_CLR(n, fds->fdset);
3192 }
3193 
3194 int
3195 rb_fd_isset(int n, const rb_fdset_t *fds)
3196 {
3197  if (n >= fds->maxfd) return 0;
3198  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3199 }
3200 
3201 void
3202 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3203 {
3204  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3205 
3206  if (size < sizeof(fd_set)) size = sizeof(fd_set);
3207  dst->maxfd = max;
3208  dst->fdset = xrealloc(dst->fdset, size);
3209  memcpy(dst->fdset, src, size);
3210 }
3211 
3212 static void
3213 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
3214 {
3215  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3216 
3217  if (size > sizeof(fd_set)) {
3218  rb_raise(rb_eArgError, "too large fdsets");
3219  }
3220  memcpy(dst, rb_fd_ptr(src), sizeof(fd_set));
3221 }
3222 
3223 void
3224 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3225 {
3226  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3227 
3228  if (size < sizeof(fd_set))
3229  size = sizeof(fd_set);
3230  dst->maxfd = src->maxfd;
3231  dst->fdset = xrealloc(dst->fdset, size);
3232  memcpy(dst->fdset, src->fdset, size);
3233 }
3234 
3235 #ifdef __native_client__
3236 int select(int nfds, fd_set *readfds, fd_set *writefds,
3237  fd_set *exceptfds, struct timeval *timeout);
3238 #endif
3239 
3240 int
3241 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3242 {
3243  fd_set *r = NULL, *w = NULL, *e = NULL;
3244  if (readfds) {
3245  rb_fd_resize(n - 1, readfds);
3246  r = rb_fd_ptr(readfds);
3247  }
3248  if (writefds) {
3249  rb_fd_resize(n - 1, writefds);
3250  w = rb_fd_ptr(writefds);
3251  }
3252  if (exceptfds) {
3253  rb_fd_resize(n - 1, exceptfds);
3254  e = rb_fd_ptr(exceptfds);
3255  }
3256  return select(n, r, w, e, timeout);
3257 }
3258 
3259 #undef FD_ZERO
3260 #undef FD_SET
3261 #undef FD_CLR
3262 #undef FD_ISSET
3263 
3264 #define FD_ZERO(f) rb_fd_zero(f)
3265 #define FD_SET(i, f) rb_fd_set((i), (f))
3266 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3267 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3268 
3269 #elif defined(_WIN32)
3270 
3271 void
3272 rb_fd_init(rb_fdset_t *set)
3273 {
3274  set->capa = FD_SETSIZE;
3275  set->fdset = ALLOC(fd_set);
3276  FD_ZERO(set->fdset);
3277 }
3278 
3279 void
3281 {
3282  rb_fd_init(dst);
3283  rb_fd_dup(dst, src);
3284 }
3285 
3286 static void
3287 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
3288 {
3289  int max = rb_fd_max(src);
3290 
3291  /* we assume src is the result of select() with dst, so dst should be
3292  * larger or equal than src. */
3293  if (max > FD_SETSIZE || (UINT)max > dst->fd_count) {
3294  rb_raise(rb_eArgError, "too large fdsets");
3295  }
3296 
3297  memcpy(dst->fd_array, src->fdset->fd_array, max);
3298  dst->fd_count = max;
3299 }
3300 
3301 void
3302 rb_fd_term(rb_fdset_t *set)
3303 {
3304  xfree(set->fdset);
3305  set->fdset = NULL;
3306  set->capa = 0;
3307 }
3308 
3309 void
3310 rb_fd_set(int fd, rb_fdset_t *set)
3311 {
3312  unsigned int i;
3313  SOCKET s = rb_w32_get_osfhandle(fd);
3314 
3315  for (i = 0; i < set->fdset->fd_count; i++) {
3316  if (set->fdset->fd_array[i] == s) {
3317  return;
3318  }
3319  }
3320  if (set->fdset->fd_count >= (unsigned)set->capa) {
3321  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3322  set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
3323  }
3324  set->fdset->fd_array[set->fdset->fd_count++] = s;
3325 }
3326 
3327 #undef FD_ZERO
3328 #undef FD_SET
3329 #undef FD_CLR
3330 #undef FD_ISSET
3331 
3332 #define FD_ZERO(f) rb_fd_zero(f)
3333 #define FD_SET(i, f) rb_fd_set((i), (f))
3334 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3335 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3336 
3337 #else
3338 #define rb_fd_rcopy(d, s) (*(d) = *(s))
3339 #endif
3340 
3341 static int
3342 do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except,
3343  struct timeval *timeout)
3344 {
3346  int lerrno;
3347  rb_fdset_t UNINITIALIZED_VAR(orig_read);
3348  rb_fdset_t UNINITIALIZED_VAR(orig_write);
3349  rb_fdset_t UNINITIALIZED_VAR(orig_except);
3350  double limit = 0;
3351  struct timeval wait_rest;
3352  rb_thread_t *th = GET_THREAD();
3353 
3354  if (timeout) {
3355  limit = timeofday();
3356  limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
3357  wait_rest = *timeout;
3358  timeout = &wait_rest;
3359  }
3360 
3361  if (read)
3362  rb_fd_init_copy(&orig_read, read);
3363  if (write)
3364  rb_fd_init_copy(&orig_write, write);
3365  if (except)
3366  rb_fd_init_copy(&orig_except, except);
3367 
3368  retry:
3369  lerrno = 0;
3370 
3371  BLOCKING_REGION({
3372  result = native_fd_select(n, read, write, except, timeout, th);
3373  if (result < 0) lerrno = errno;
3374  }, ubf_select, th, FALSE);
3375 
3377 
3378  errno = lerrno;
3379 
3380  if (result < 0) {
3381  switch (errno) {
3382  case EINTR:
3383 #ifdef ERESTART
3384  case ERESTART:
3385 #endif
3386  if (read)
3387  rb_fd_dup(read, &orig_read);
3388  if (write)
3389  rb_fd_dup(write, &orig_write);
3390  if (except)
3391  rb_fd_dup(except, &orig_except);
3392 
3393  if (timeout) {
3394  double d = limit - timeofday();
3395 
3396  wait_rest.tv_sec = (time_t)d;
3397  wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
3398  if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
3399  if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
3400  }
3401 
3402  goto retry;
3403  default:
3404  break;
3405  }
3406  }
3407 
3408  if (read)
3409  rb_fd_term(&orig_read);
3410  if (write)
3411  rb_fd_term(&orig_write);
3412  if (except)
3413  rb_fd_term(&orig_except);
3414 
3415  return result;
3416 }
3417 
3418 static void
3419 rb_thread_wait_fd_rw(int fd, int read)
3420 {
3421  int result = 0;
3422  int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
3423 
3424  thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
3425 
3426  if (fd < 0) {
3427  rb_raise(rb_eIOError, "closed stream");
3428  }
3429 
3430  result = rb_wait_for_single_fd(fd, events, NULL);
3431  if (result < 0) {
3432  rb_sys_fail(0);
3433  }
3434 
3435  thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
3436 }
3437 
3438 void
3440 {
3441  rb_thread_wait_fd_rw(fd, 1);
3442 }
3443 
3444 int
3446 {
3447  rb_thread_wait_fd_rw(fd, 0);
3448  return TRUE;
3449 }
3450 
3451 int
3452 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
3453  struct timeval *timeout)
3454 {
3455  rb_fdset_t fdsets[3];
3456  rb_fdset_t *rfds = NULL;
3457  rb_fdset_t *wfds = NULL;
3458  rb_fdset_t *efds = NULL;
3459  int retval;
3460 
3461  if (read) {
3462  rfds = &fdsets[0];
3463  rb_fd_init(rfds);
3464  rb_fd_copy(rfds, read, max);
3465  }
3466  if (write) {
3467  wfds = &fdsets[1];
3468  rb_fd_init(wfds);
3469  rb_fd_copy(wfds, write, max);
3470  }
3471  if (except) {
3472  efds = &fdsets[2];
3473  rb_fd_init(efds);
3474  rb_fd_copy(efds, except, max);
3475  }
3476 
3477  retval = rb_thread_fd_select(max, rfds, wfds, efds, timeout);
3478 
3479  if (rfds) {
3480  rb_fd_rcopy(read, rfds);
3481  rb_fd_term(rfds);
3482  }
3483  if (wfds) {
3484  rb_fd_rcopy(write, wfds);
3485  rb_fd_term(wfds);
3486  }
3487  if (efds) {
3488  rb_fd_rcopy(except, efds);
3489  rb_fd_term(efds);
3490  }
3491 
3492  return retval;
3493 }
3494 
3495 int
3496 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
3497  struct timeval *timeout)
3498 {
3499  if (!read && !write && !except) {
3500  if (!timeout) {
3502  return 0;
3503  }
3504  rb_thread_wait_for(*timeout);
3505  return 0;
3506  }
3507 
3508  if (read) {
3509  rb_fd_resize(max - 1, read);
3510  }
3511  if (write) {
3512  rb_fd_resize(max - 1, write);
3513  }
3514  if (except) {
3515  rb_fd_resize(max - 1, except);
3516  }
3517  return do_select(max, read, write, except, timeout);
3518 }
3519 
3520 /*
3521  * poll() is supported by many OSes, but so far Linux is the only
3522  * one we know of that supports using poll() in all places select()
3523  * would work.
3524  */
3525 #if defined(HAVE_POLL) && defined(__linux__)
3526 # define USE_POLL
3527 #endif
3528 
3529 #ifdef USE_POLL
3530 
3531 /* The same with linux kernel. TODO: make platform independent definition. */
3532 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
3533 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
3534 #define POLLEX_SET (POLLPRI)
3535 
3536 #ifndef HAVE_PPOLL
3537 /* TODO: don't ignore sigmask */
3538 int
3539 ppoll(struct pollfd *fds, nfds_t nfds,
3540  const struct timespec *ts, const sigset_t *sigmask)
3541 {
3542  int timeout_ms;
3543 
3544  if (ts) {
3545  int tmp, tmp2;
3546 
3547  if (ts->tv_sec > TIMET_MAX/1000)
3548  timeout_ms = -1;
3549  else {
3550  tmp = ts->tv_sec * 1000;
3551  tmp2 = ts->tv_nsec / (1000 * 1000);
3552  if (TIMET_MAX - tmp < tmp2)
3553  timeout_ms = -1;
3554  else
3555  timeout_ms = tmp + tmp2;
3556  }
3557  }
3558  else
3559  timeout_ms = -1;
3560 
3561  return poll(fds, nfds, timeout_ms);
3562 }
3563 #endif
3564 
3565 /*
3566  * returns a mask of events
3567  */
3568 int
3569 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3570 {
3571  struct pollfd fds;
3572  int result = 0, lerrno;
3573  double limit = 0;
3574  struct timespec ts;
3575  struct timespec *timeout = NULL;
3576  rb_thread_t *th = GET_THREAD();
3577 
3578  if (tv) {
3579  ts.tv_sec = tv->tv_sec;
3580  ts.tv_nsec = tv->tv_usec * 1000;
3581  limit = timeofday();
3582  limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
3583  timeout = &ts;
3584  }
3585 
3586  fds.fd = fd;
3587  fds.events = (short)events;
3588 
3589 retry:
3590  lerrno = 0;
3591  BLOCKING_REGION({
3592  result = ppoll(&fds, 1, timeout, NULL);
3593  if (result < 0) lerrno = errno;
3594  }, ubf_select, th, FALSE);
3595 
3597 
3598  if (result < 0) {
3599  errno = lerrno;
3600  switch (errno) {
3601  case EINTR:
3602 #ifdef ERESTART
3603  case ERESTART:
3604 #endif
3605  if (timeout) {
3606  double d = limit - timeofday();
3607 
3608  ts.tv_sec = (long)d;
3609  ts.tv_nsec = (long)((d - (double)ts.tv_sec) * 1e9);
3610  if (ts.tv_sec < 0)
3611  ts.tv_sec = 0;
3612  if (ts.tv_nsec < 0)
3613  ts.tv_nsec = 0;
3614  }
3615  goto retry;
3616  }
3617  return -1;
3618  }
3619 
3620  if (fds.revents & POLLNVAL) {
3621  errno = EBADF;
3622  return -1;
3623  }
3624 
3625  /*
3626  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
3627  * Therefore we need fix it up.
3628  */
3629  result = 0;
3630  if (fds.revents & POLLIN_SET)
3631  result |= RB_WAITFD_IN;
3632  if (fds.revents & POLLOUT_SET)
3633  result |= RB_WAITFD_OUT;
3634  if (fds.revents & POLLEX_SET)
3635  result |= RB_WAITFD_PRI;
3636 
3637  return result;
3638 }
3639 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
3640 static rb_fdset_t *
3642 {
3643  rb_fd_init(fds);
3644  rb_fd_set(fd, fds);
3645 
3646  return fds;
3647 }
3648 
3649 struct select_args {
3650  union {
3651  int fd;
3652  int error;
3653  } as;
3657  struct timeval *tv;
3658 };
3659 
3660 static VALUE
3662 {
3663  struct select_args *args = (struct select_args *)ptr;
3664  int r;
3665 
3666  r = rb_thread_fd_select(args->as.fd + 1,
3667  args->read, args->write, args->except, args->tv);
3668  if (r == -1)
3669  args->as.error = errno;
3670  if (r > 0) {
3671  r = 0;
3672  if (args->read && rb_fd_isset(args->as.fd, args->read))
3673  r |= RB_WAITFD_IN;
3674  if (args->write && rb_fd_isset(args->as.fd, args->write))
3675  r |= RB_WAITFD_OUT;
3676  if (args->except && rb_fd_isset(args->as.fd, args->except))
3677  r |= RB_WAITFD_PRI;
3678  }
3679  return (VALUE)r;
3680 }
3681 
3682 static VALUE
3684 {
3685  struct select_args *args = (struct select_args *)ptr;
3686 
3687  if (args->read) rb_fd_term(args->read);
3688  if (args->write) rb_fd_term(args->write);
3689  if (args->except) rb_fd_term(args->except);
3690 
3691  return (VALUE)-1;
3692 }
3693 
3694 int
3695 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3696 {
3697  rb_fdset_t rfds, wfds, efds;
3698  struct select_args args;
3699  int r;
3700  VALUE ptr = (VALUE)&args;
3701 
3702  args.as.fd = fd;
3703  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
3704  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
3705  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
3706  args.tv = tv;
3707 
3708  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
3709  if (r == -1)
3710  errno = args.as.error;
3711 
3712  return r;
3713 }
3714 #endif /* ! USE_POLL */
3715 
3716 /*
3717  * for GC
3718  */
3719 
3720 #ifdef USE_CONSERVATIVE_STACK_END
3721 void
3723 {
3724  VALUE stack_end;
3725  *stack_end_p = &stack_end;
3726 }
3727 #endif
3728 
3729 
3730 /*
3731  *
3732  */
3733 
3734 void
3736 {
3737  /* mth must be main_thread */
3738  if (rb_signal_buff_size() > 0) {
3739  /* wakeup main thread */
3741  }
3742 }
3743 
3744 static void
3746 {
3747  rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
3748 
3749  /*
3750  * Tricky: thread_destruct_lock doesn't close a race against
3751  * vm->running_thread switch. however it guarantee th->running_thread
3752  * point to valid pointer or NULL.
3753  */
3754  native_mutex_lock(&vm->thread_destruct_lock);
3755  /* for time slice */
3756  if (vm->running_thread)
3758  native_mutex_unlock(&vm->thread_destruct_lock);
3759 
3760  /* check signal */
3762 
3763 #if 0
3764  /* prove profiler */
3765  if (vm->prove_profile.enable) {
3766  rb_thread_t *th = vm->running_thread;
3767 
3768  if (vm->during_gc) {
3769  /* GC prove profiling */
3770  }
3771  }
3772 #endif
3773 }
3774 
3775 void
3777 {
3778  if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3779  native_reset_timer_thread();
3780  }
3781 }
3782 
3783 void
3785 {
3786  native_reset_timer_thread();
3787 }
3788 
3789 void
3791 {
3792  system_working = 1;
3793  rb_thread_create_timer_thread();
3794 }
3795 
3796 static int
3798 {
3799  int i;
3800  VALUE lines = (VALUE)val;
3801 
3802  for (i = 0; i < RARRAY_LEN(lines); i++) {
3803  if (RARRAY_PTR(lines)[i] != Qnil) {
3804  RARRAY_PTR(lines)[i] = INT2FIX(0);
3805  }
3806  }
3807  return ST_CONTINUE;
3808 }
3809 
3810 static void
3812 {
3813  VALUE coverages = rb_get_coverages();
3814  if (RTEST(coverages)) {
3815  st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
3816  }
3817 }
3818 
3819 static void
3821 {
3822  rb_thread_t *th = GET_THREAD();
3823  rb_vm_t *vm = th->vm;
3824  VALUE thval = th->self;
3825  vm->main_thread = th;
3826 
3827  gvl_atfork(th->vm);
3828  st_foreach(vm->living_threads, atfork, (st_data_t)th);
3829  st_clear(vm->living_threads);
3830  st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
3831  vm->sleeper = 0;
3832  clear_coverage();
3833 }
3834 
3835 static int
3837 {
3838  VALUE thval = key;
3839  rb_thread_t *th;
3840  GetThreadPtr(thval, th);
3841 
3842  if (th != (rb_thread_t *)current_th) {
3846  }
3847  return ST_CONTINUE;
3848 }
3849 
3850 void
3852 {
3854  GET_THREAD()->join_list = NULL;
3855 
3856  /* We don't want reproduce CVE-2003-0900. */
3858 }
3859 
3860 static int
3862 {
3863  VALUE thval = key;
3864  rb_thread_t *th;
3865  GetThreadPtr(thval, th);
3866 
3867  if (th != (rb_thread_t *)current_th) {
3869  }
3870  return ST_CONTINUE;
3871 }
3872 
3873 void
3875 {
3877 }
3878 
3879 struct thgroup {
3882 };
3883 
3884 static size_t
3885 thgroup_memsize(const void *ptr)
3886 {
3887  return ptr ? sizeof(struct thgroup) : 0;
3888 }
3889 
3891  "thgroup",
3893 };
3894 
3895 /*
3896  * Document-class: ThreadGroup
3897  *
3898  * <code>ThreadGroup</code> provides a means of keeping track of a number of
3899  * threads as a group. A <code>Thread</code> can belong to only one
3900  * <code>ThreadGroup</code> at a time; adding a thread to a new group will
3901  * remove it from any previous group.
3902  *
3903  * Newly created threads belong to the same group as the thread from which they
3904  * were created.
3905  */
3906 
3907 /*
3908  * Document-const: Default
3909  *
3910  * The default ThreadGroup created when Ruby starts; all Threads belong to it
3911  * by default.
3912  */
3913 static VALUE
3915 {
3916  VALUE group;
3917  struct thgroup *data;
3918 
3919  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
3920  data->enclosed = 0;
3921  data->group = group;
3922 
3923  return group;
3924 }
3925 
3929 };
3930 
3931 static int
3933 {
3934  VALUE thread = (VALUE)key;
3935  VALUE ary = ((struct thgroup_list_params *)data)->ary;
3936  VALUE group = ((struct thgroup_list_params *)data)->group;
3937  rb_thread_t *th;
3938  GetThreadPtr(thread, th);
3939 
3940  if (th->thgroup == group) {
3941  rb_ary_push(ary, thread);
3942  }
3943  return ST_CONTINUE;
3944 }
3945 
3946 /*
3947  * call-seq:
3948  * thgrp.list -> array
3949  *
3950  * Returns an array of all existing <code>Thread</code> objects that belong to
3951  * this group.
3952  *
3953  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
3954  */
3955 
3956 static VALUE
3958 {
3959  VALUE ary = rb_ary_new();
3960  struct thgroup_list_params param;
3961 
3962  param.ary = ary;
3963  param.group = group;
3964  st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
3965  return ary;
3966 }
3967 
3968 
3969 /*
3970  * call-seq:
3971  * thgrp.enclose -> thgrp
3972  *
3973  * Prevents threads from being added to or removed from the receiving
3974  * <code>ThreadGroup</code>. New threads can still be started in an enclosed
3975  * <code>ThreadGroup</code>.
3976  *
3977  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
3978  * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
3979  * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
3980  * tg.add thr
3981  *
3982  * <em>produces:</em>
3983  *
3984  * ThreadError: can't move from the enclosed thread group
3985  */
3986 
3987 static VALUE
3989 {
3990  struct thgroup *data;
3991 
3992  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3993  data->enclosed = 1;
3994 
3995  return group;
3996 }
3997 
3998 
3999 /*
4000  * call-seq:
4001  * thgrp.enclosed? -> true or false
4002  *
4003  * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
4004  * ThreadGroup#enclose.
4005  */
4006 
4007 static VALUE
4009 {
4010  struct thgroup *data;
4011 
4012  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4013  if (data->enclosed)
4014  return Qtrue;
4015  return Qfalse;
4016 }
4017 
4018 
4019 /*
4020  * call-seq:
4021  * thgrp.add(thread) -> thgrp
4022  *
4023  * Adds the given <em>thread</em> to this group, removing it from any other
4024  * group to which it may have previously belonged.
4025  *
4026  * puts "Initial group is #{ThreadGroup::Default.list}"
4027  * tg = ThreadGroup.new
4028  * t1 = Thread.new { sleep }
4029  * t2 = Thread.new { sleep }
4030  * puts "t1 is #{t1}"
4031  * puts "t2 is #{t2}"
4032  * tg.add(t1)
4033  * puts "Initial group now #{ThreadGroup::Default.list}"
4034  * puts "tg group now #{tg.list}"
4035  *
4036  * <em>produces:</em>
4037  *
4038  * Initial group is #<Thread:0x401bdf4c>
4039  * t1 is #<Thread:0x401b3c90>
4040  * t2 is #<Thread:0x401b3c18>
4041  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4042  * tg group now #<Thread:0x401b3c90>
4043  */
4044 
4045 static VALUE
4047 {
4048  rb_thread_t *th;
4049  struct thgroup *data;
4050 
4051  rb_secure(4);
4052  GetThreadPtr(thread, th);
4053 
4054  if (OBJ_FROZEN(group)) {
4055  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4056  }
4057  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4058  if (data->enclosed) {
4059  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4060  }
4061 
4062  if (!th->thgroup) {
4063  return Qnil;
4064  }
4065 
4066  if (OBJ_FROZEN(th->thgroup)) {
4067  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4068  }
4070  if (data->enclosed) {
4072  "can't move from the enclosed thread group");
4073  }
4074 
4075  th->thgroup = group;
4076  return group;
4077 }
4078 
4079 
4080 /*
4081  * Document-class: Mutex
4082  *
4083  * Mutex implements a simple semaphore that can be used to coordinate access to
4084  * shared data from multiple concurrent threads.
4085  *
4086  * Example:
4087  *
4088  * require 'thread'
4089  * semaphore = Mutex.new
4090  *
4091  * a = Thread.new {
4092  * semaphore.synchronize {
4093  * # access shared resource
4094  * }
4095  * }
4096  *
4097  * b = Thread.new {
4098  * semaphore.synchronize {
4099  * # access shared resource
4100  * }
4101  * }
4102  *
4103  */
4104 
4105 #define GetMutexPtr(obj, tobj) \
4106  TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
4107 
4108 #define mutex_mark NULL
4109 
4110 static void
4111 mutex_free(void *ptr)
4112 {
4113  if (ptr) {
4114  rb_mutex_t *mutex = ptr;
4115  if (mutex->th) {
4116  /* rb_warn("free locked mutex"); */
4117  const char *err = rb_mutex_unlock_th(mutex, mutex->th);
4118  if (err) rb_bug("%s", err);
4119  }
4120  native_mutex_destroy(&mutex->lock);
4121  native_cond_destroy(&mutex->cond);
4122  }
4123  ruby_xfree(ptr);
4124 }
4125 
4126 static size_t
4127 mutex_memsize(const void *ptr)
4128 {
4129  return ptr ? sizeof(rb_mutex_t) : 0;
4130 }
4131 
4133  "mutex",
4135 };
4136 
4137 VALUE
4139 {
4140  if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
4141  return Qtrue;
4142  }
4143  else {
4144  return Qfalse;
4145  }
4146 }
4147 
4148 static VALUE
4150 {
4151  VALUE volatile obj;
4152  rb_mutex_t *mutex;
4153 
4154  obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
4155  native_mutex_initialize(&mutex->lock);
4156  native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
4157  return obj;
4158 }
4159 
4160 /*
4161  * call-seq:
4162  * Mutex.new -> mutex
4163  *
4164  * Creates a new Mutex
4165  */
4166 static VALUE
4168 {
4169  return self;
4170 }
4171 
4172 VALUE
4174 {
4175  return mutex_alloc(rb_cMutex);
4176 }
4177 
4178 /*
4179  * call-seq:
4180  * mutex.locked? -> true or false
4181  *
4182  * Returns +true+ if this lock is currently held by some thread.
4183  */
4184 VALUE
4186 {
4187  rb_mutex_t *mutex;
4188  GetMutexPtr(self, mutex);
4189  return mutex->th ? Qtrue : Qfalse;
4190 }
4191 
4192 static void
4194 {
4195  rb_mutex_t *mutex;
4196  GetMutexPtr(self, mutex);
4197 
4198  if (th->keeping_mutexes) {
4199  mutex->next_mutex = th->keeping_mutexes;
4200  }
4201  th->keeping_mutexes = mutex;
4202 }
4203 
4204 /*
4205  * call-seq:
4206  * mutex.try_lock -> true or false
4207  *
4208  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
4209  * lock was granted.
4210  */
4211 VALUE
4213 {
4214  rb_mutex_t *mutex;
4215  VALUE locked = Qfalse;
4216  GetMutexPtr(self, mutex);
4217 
4218  native_mutex_lock(&mutex->lock);
4219  if (mutex->th == 0) {
4220  mutex->th = GET_THREAD();
4221  locked = Qtrue;
4222 
4223  mutex_locked(GET_THREAD(), self);
4224  }
4225  native_mutex_unlock(&mutex->lock);
4226 
4227  return locked;
4228 }
4229 
4230 static int
4231 lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
4232 {
4233  int interrupted = 0;
4234  int err = 0;
4235 
4236  mutex->cond_waiting++;
4237  for (;;) {
4238  if (!mutex->th) {
4239  mutex->th = th;
4240  break;
4241  }
4242  if (RUBY_VM_INTERRUPTED(th)) {
4243  interrupted = 1;
4244  break;
4245  }
4246  if (err == ETIMEDOUT) {
4247  interrupted = 2;
4248  break;
4249  }
4250 
4251  if (timeout_ms) {
4252  struct timespec timeout_rel;
4253  struct timespec timeout;
4254 
4255  timeout_rel.tv_sec = 0;
4256  timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
4257  timeout = native_cond_timeout(&mutex->cond, timeout_rel);
4258  err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
4259  }
4260  else {
4261  native_cond_wait(&mutex->cond, &mutex->lock);
4262  err = 0;
4263  }
4264  }
4265  mutex->cond_waiting--;
4266 
4267  return interrupted;
4268 }
4269 
4270 static void
4271 lock_interrupt(void *ptr)
4272 {
4273  rb_mutex_t *mutex = (rb_mutex_t *)ptr;
4274  native_mutex_lock(&mutex->lock);
4275  if (mutex->cond_waiting > 0)
4276  native_cond_broadcast(&mutex->cond);
4277  native_mutex_unlock(&mutex->lock);
4278 }
4279 
4280 /*
4281  * At maximum, only one thread can use cond_timedwait and watch deadlock
4282  * periodically. Multiple polling thread (i.e. concurrent deadlock check)
4283  * introduces new race conditions. [Bug #6278] [ruby-core:44275]
4284  */
4286 
4287 /*
4288  * call-seq:
4289  * mutex.lock -> self
4290  *
4291  * Attempts to grab the lock and waits if it isn't available.
4292  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
4293  */
4294 VALUE
4296 {
4297  rb_thread_t *th = GET_THREAD();
4298  rb_mutex_t *mutex;
4299  GetMutexPtr(self, mutex);
4300 
4301  /* When running trap handler */
4302  if (!mutex->allow_trap && th->interrupt_mask & TRAP_INTERRUPT_MASK) {
4303  rb_raise(rb_eThreadError, "can't be called from trap context");
4304  }
4305 
4306  if (rb_mutex_trylock(self) == Qfalse) {
4307  if (mutex->th == GET_THREAD()) {
4308  rb_raise(rb_eThreadError, "deadlock; recursive locking");
4309  }
4310 
4311  while (mutex->th != th) {
4312  int interrupted;
4313  enum rb_thread_status prev_status = th->status;
4314  volatile int timeout_ms = 0;
4315  struct rb_unblock_callback oldubf;
4316 
4317  set_unblock_function(th, lock_interrupt, mutex, &oldubf, FALSE);
4319  th->locking_mutex = self;
4320 
4321  native_mutex_lock(&mutex->lock);
4322  th->vm->sleeper++;
4323  /*
4324  * Carefully! while some contended threads are in lock_func(),
4325  * vm->sleepr is unstable value. we have to avoid both deadlock
4326  * and busy loop.
4327  */
4328  if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
4329  !patrol_thread) {
4330  timeout_ms = 100;
4331  patrol_thread = th;
4332  }
4333 
4334  GVL_UNLOCK_BEGIN();
4335  interrupted = lock_func(th, mutex, (int)timeout_ms);
4336  native_mutex_unlock(&mutex->lock);
4337  GVL_UNLOCK_END();
4338 
4339  if (patrol_thread == th)
4340  patrol_thread = NULL;
4341 
4342  reset_unblock_function(th, &oldubf);
4343 
4344  th->locking_mutex = Qfalse;
4345  if (mutex->th && interrupted == 2) {
4346  rb_check_deadlock(th->vm);
4347  }
4348  if (th->status == THREAD_STOPPED_FOREVER) {
4349  th->status = prev_status;
4350  }
4351  th->vm->sleeper--;
4352 
4353  if (mutex->th == th) mutex_locked(th, self);
4354 
4355  if (interrupted) {
4357  }
4358  }
4359  }
4360  return self;
4361 }
4362 
4363 /*
4364  * call-seq:
4365  * mutex.owned? -> true or false
4366  *
4367  * Returns +true+ if this lock is currently held by current thread.
4368  * <em>This API is experimental, and subject to change.</em>
4369  */
4370 VALUE
4372 {
4373  VALUE owned = Qfalse;
4374  rb_thread_t *th = GET_THREAD();
4375  rb_mutex_t *mutex;
4376 
4377  GetMutexPtr(self, mutex);
4378 
4379  if (mutex->th == th)
4380  owned = Qtrue;
4381 
4382  return owned;
4383 }
4384 
4385 static const char *
4387 {
4388  const char *err = NULL;
4389 
4390  native_mutex_lock(&mutex->lock);
4391 
4392  if (mutex->th == 0) {
4393  err = "Attempt to unlock a mutex which is not locked";
4394  }
4395  else if (mutex->th != th) {
4396  err = "Attempt to unlock a mutex which is locked by another thread";
4397  }
4398  else {
4399  mutex->th = 0;
4400  if (mutex->cond_waiting > 0)
4401  native_cond_signal(&mutex->cond);
4402  }
4403 
4404  native_mutex_unlock(&mutex->lock);
4405 
4406  if (!err) {
4407  rb_mutex_t *volatile *th_mutex = &th->keeping_mutexes;
4408  while (*th_mutex != mutex) {
4409  th_mutex = &(*th_mutex)->next_mutex;
4410  }
4411  *th_mutex = mutex->next_mutex;
4412  mutex->next_mutex = NULL;
4413  }
4414 
4415  return err;
4416 }
4417 
4418 /*
4419  * call-seq:
4420  * mutex.unlock -> self
4421  *
4422  * Releases the lock.
4423  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
4424  */
4425 VALUE
4427 {
4428  const char *err;
4429  rb_mutex_t *mutex;
4430  GetMutexPtr(self, mutex);
4431 
4432  err = rb_mutex_unlock_th(mutex, GET_THREAD());
4433  if (err) rb_raise(rb_eThreadError, "%s", err);
4434 
4435  return self;
4436 }
4437 
4438 static void
4440 {
4441  if (th->keeping_mutexes) {
4443  }
4444  th->keeping_mutexes = NULL;
4445 }
4446 
4447 static void
4449 {
4450  rb_mutex_t *mutex;
4451 
4452  if (!th->locking_mutex) return;
4453 
4454  GetMutexPtr(th->locking_mutex, mutex);
4455  if (mutex->th == th)
4456  rb_mutex_abandon_all(mutex);
4457  th->locking_mutex = Qfalse;
4458 }
4459 
4460 static void
4462 {
4463  rb_mutex_t *mutex;
4464 
4465  while (mutexes) {
4466  mutex = mutexes;
4467  mutexes = mutex->next_mutex;
4468  mutex->th = 0;
4469  mutex->next_mutex = 0;
4470  }
4471 }
4472 
4473 static VALUE
4475 {
4476  sleep_forever(GET_THREAD(), 1, 0); /* permit spurious check */
4477  return Qnil;
4478 }
4479 
4480 static VALUE
4482 {
4483  struct timeval *t = (struct timeval *)time;
4484  sleep_timeval(GET_THREAD(), *t, 0); /* permit spurious check */
4485  return Qnil;
4486 }
4487 
4488 VALUE
4490 {
4491  time_t beg, end;
4492  struct timeval t;
4493 
4494  if (!NIL_P(timeout)) {
4495  t = rb_time_interval(timeout);
4496  }
4497  rb_mutex_unlock(self);
4498  beg = time(0);
4499  if (NIL_P(timeout)) {
4501  }
4502  else {
4504  }
4505  end = time(0) - beg;
4506  return INT2FIX(end);
4507 }
4508 
4509 /*
4510  * call-seq:
4511  * mutex.sleep(timeout = nil) -> number
4512  *
4513  * Releases the lock and sleeps +timeout+ seconds if it is given and
4514  * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
4515  * the current thread.
4516  *
4517  * Note that this method can wakeup without explicit Thread#wakeup call.
4518  * For example, receiving signal and so on.
4519  */
4520 static VALUE
4522 {
4523  VALUE timeout;
4524 
4525  rb_scan_args(argc, argv, "01", &timeout);
4526  return rb_mutex_sleep(self, timeout);
4527 }
4528 
4529 /*
4530  * call-seq:
4531  * mutex.synchronize { ... } -> result of the block
4532  *
4533  * Obtains a lock, runs the block, and releases the lock when the block
4534  * completes. See the example under +Mutex+.
4535  */
4536 
4537 VALUE
4539 {
4540  rb_mutex_lock(mutex);
4541  return rb_ensure(func, arg, rb_mutex_unlock, mutex);
4542 }
4543 
4544 /*
4545  * call-seq:
4546  * mutex.synchronize { ... } -> result of the block
4547  *
4548  * Obtains a lock, runs the block, and releases the lock when the block
4549  * completes. See the example under +Mutex+.
4550  */
4551 static VALUE
4553 {
4554  if (!rb_block_given_p()) {
4555  rb_raise(rb_eThreadError, "must be called with a block");
4556  }
4557 
4558  return rb_mutex_synchronize(self, rb_yield, Qundef);
4559 }
4560 
4561 void rb_mutex_allow_trap(VALUE self, int val)
4562 {
4563  rb_mutex_t *m;
4564  GetMutexPtr(self, m);
4565 
4566  m->allow_trap = val;
4567 }
4568 
4569 /*
4570  * Document-class: ThreadShield
4571  */
4572 static void
4574 {
4575  rb_gc_mark((VALUE)ptr);
4576 }
4577 
4579  "thread_shield",
4580  {thread_shield_mark, 0, 0,},
4581 };
4582 
4583 static VALUE
4585 {
4586  return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4587 }
4588 
4589 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4590 #define THREAD_SHIELD_WAITING_MASK (FL_USER0|FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7|FL_USER8|FL_USER9|FL_USER10|FL_USER11|FL_USER12|FL_USER13|FL_USER14|FL_USER15|FL_USER16|FL_USER17|FL_USER18|FL_USER19)
4591 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4592 #define rb_thread_shield_waiting(b) (int)((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT)
4593 
4594 static inline void
4596 {
4597  unsigned int w = rb_thread_shield_waiting(b);
4598  w++;
4600  rb_raise(rb_eRuntimeError, "waiting count overflow");
4601  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4602  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4603 }
4604 
4605 static inline void
4607 {
4608  unsigned int w = rb_thread_shield_waiting(b);
4609  if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4610  w--;
4611  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4612  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4613 }
4614 
4615 VALUE
4617 {
4618  VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4619  rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4620  return thread_shield;
4621 }
4622 
4623 /*
4624  * Wait a thread shield.
4625  *
4626  * Returns
4627  * true: acquired the thread shield
4628  * false: the thread shield was destroyed and no other threads waiting
4629  * nil: the thread shield was destroyed but still in use
4630  */
4631 VALUE
4633 {
4634  VALUE mutex = GetThreadShieldPtr(self);
4635  rb_mutex_t *m;
4636 
4637  if (!mutex) return Qfalse;
4638  GetMutexPtr(mutex, m);
4639  if (m->th == GET_THREAD()) return Qnil;
4641  rb_mutex_lock(mutex);
4643  if (DATA_PTR(self)) return Qtrue;
4644  rb_mutex_unlock(mutex);
4645  return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4646 }
4647 
4648 /*
4649  * Release a thread shield, and return true if it has waiting threads.
4650  */
4651 VALUE
4653 {
4654  VALUE mutex = GetThreadShieldPtr(self);
4655  rb_mutex_unlock(mutex);
4656  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4657 }
4658 
4659 /*
4660  * Release and destroy a thread shield, and return true if it has waiting threads.
4661  */
4662 VALUE
4664 {
4665  VALUE mutex = GetThreadShieldPtr(self);
4666  DATA_PTR(self) = 0;
4667  rb_mutex_unlock(mutex);
4668  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4669 }
4670 
4671 /* variables for recursive traversals */
4673 
4674 /*
4675  * Returns the current "recursive list" used to detect recursion.
4676  * This list is a hash table, unique for the current thread and for
4677  * the current __callee__.
4678  */
4679 
4680 static VALUE
4682 {
4683  volatile VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
4684  VALUE list;
4685  if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
4686  hash = rb_hash_new();
4687  OBJ_UNTRUST(hash);
4688  rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
4689  list = Qnil;
4690  }
4691  else {
4692  list = rb_hash_aref(hash, sym);
4693  }
4694  if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
4695  list = rb_hash_new();
4696  OBJ_UNTRUST(list);
4697  rb_hash_aset(hash, sym, list);
4698  }
4699  return list;
4700 }
4701 
4702 /*
4703  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
4704  * in the recursion list.
4705  * Assumes the recursion list is valid.
4706  */
4707 
4708 static VALUE
4709 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
4710 {
4711 #if SIZEOF_LONG == SIZEOF_VOIDP
4712  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
4713 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4714  #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
4715  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
4716 #endif
4717 
4718  VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
4719  if (pair_list == Qundef)
4720  return Qfalse;
4721  if (paired_obj_id) {
4722  if (!RB_TYPE_P(pair_list, T_HASH)) {
4723  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
4724  return Qfalse;
4725  }
4726  else {
4727  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
4728  return Qfalse;
4729  }
4730  }
4731  return Qtrue;
4732 }
4733 
4734 /*
4735  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
4736  * For a single obj_id, it sets list[obj_id] to Qtrue.
4737  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
4738  * otherwise list[obj_id] becomes a hash like:
4739  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
4740  * Assumes the recursion list is valid.
4741  */
4742 
4743 static void
4745 {
4746  VALUE pair_list;
4747 
4748  if (!paired_obj) {
4749  rb_hash_aset(list, obj, Qtrue);
4750  }
4751  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
4752  rb_hash_aset(list, obj, paired_obj);
4753  }
4754  else {
4755  if (!RB_TYPE_P(pair_list, T_HASH)){
4756  VALUE other_paired_obj = pair_list;
4757  pair_list = rb_hash_new();
4758  OBJ_UNTRUST(pair_list);
4759  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
4760  rb_hash_aset(list, obj, pair_list);
4761  }
4762  rb_hash_aset(pair_list, paired_obj, Qtrue);
4763  }
4764 }
4765 
4766 /*
4767  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
4768  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
4769  * removed from the hash and no attempt is made to simplify
4770  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
4771  * Assumes the recursion list is valid.
4772  */
4773 
4774 static int
4776 {
4777  if (paired_obj) {
4778  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
4779  if (pair_list == Qundef) {
4780  return 0;
4781  }
4782  if (RB_TYPE_P(pair_list, T_HASH)) {
4783  rb_hash_delete(pair_list, paired_obj);
4784  if (!RHASH_EMPTY_P(pair_list)) {
4785  return 1; /* keep hash until is empty */
4786  }
4787  }
4788  }
4789  rb_hash_delete(list, obj);
4790  return 1;
4791 }
4792 
4794  VALUE (*func) (VALUE, VALUE, int);
4795  VALUE list;
4796  VALUE obj;
4797  VALUE objid;
4798  VALUE pairid;
4799  VALUE arg;
4800 };
4801 
4802 static VALUE
4804 {
4805  VALUE result = Qundef;
4806  int state;
4807 
4808  recursive_push(p->list, p->objid, p->pairid);
4809  PUSH_TAG();
4810  if ((state = EXEC_TAG()) == 0) {
4811  result = (*p->func)(p->obj, p->arg, FALSE);
4812  }
4813  POP_TAG();
4814  recursive_pop(p->list, p->objid, p->pairid);
4815  if (state)
4816  JUMP_TAG(state);
4817  return result;
4818 }
4819 
4820 /*
4821  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4822  * current method is called recursively on obj, or on the pair <obj, pairid>
4823  * If outer is 0, then the innermost func will be called with recursive set
4824  * to Qtrue, otherwise the outermost func will be called. In the latter case,
4825  * all inner func are short-circuited by throw.
4826  * Implementation details: the value thrown is the recursive list which is
4827  * proper to the current method and unlikely to be catched anywhere else.
4828  * list[recursive_key] is used as a flag for the outermost call.
4829  */
4830 
4831 static VALUE
4832 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
4833 {
4834  VALUE result = Qundef;
4835  const ID mid = rb_frame_last_func();
4836  const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
4837  struct exec_recursive_params p;
4838  int outermost;
4839  p.list = recursive_list_access(sym);
4840  p.objid = rb_obj_id(obj);
4841  p.obj = obj;
4842  p.pairid = pairid;
4843  p.arg = arg;
4844  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
4845 
4846  if (recursive_check(p.list, p.objid, pairid)) {
4847  if (outer && !outermost) {
4848  rb_throw_obj(p.list, p.list);
4849  }
4850  return (*func)(obj, arg, TRUE);
4851  }
4852  else {
4853  p.func = func;
4854 
4855  if (outermost) {
4856  recursive_push(p.list, ID2SYM(recursive_key), 0);
4857  result = rb_catch_obj(p.list, exec_recursive_i, (VALUE)&p);
4858  if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) {
4859  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
4860  "for %+"PRIsVALUE" in %+"PRIsVALUE,
4861  sym, rb_thread_current());
4862  }
4863  if (result == p.list) {
4864  result = (*func)(obj, arg, TRUE);
4865  }
4866  }
4867  else {
4868  result = exec_recursive_i(0, &p);
4869  }
4870  }
4871  *(volatile struct exec_recursive_params *)&p;
4872  return result;
4873 }
4874 
4875 /*
4876  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4877  * current method is called recursively on obj
4878  */
4879 
4880 VALUE
4882 {
4883  return exec_recursive(func, obj, 0, arg, 0);
4884 }
4885 
4886 /*
4887  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4888  * current method is called recursively on the ordered pair <obj, paired_obj>
4889  */
4890 
4891 VALUE
4893 {
4894  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
4895 }
4896 
4897 /*
4898  * If recursion is detected on the current method and obj, the outermost
4899  * func will be called with (obj, arg, Qtrue). All inner func will be
4900  * short-circuited using throw.
4901  */
4902 
4903 VALUE
4905 {
4906  return exec_recursive(func, obj, 0, arg, 1);
4907 }
4908 
4909 /*
4910  * If recursion is detected on the current method, obj and paired_obj,
4911  * the outermost func will be called with (obj, arg, Qtrue). All inner
4912  * func will be short-circuited using throw.
4913  */
4914 
4915 VALUE
4917 {
4918  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 1);
4919 }
4920 
4921 /*
4922  * call-seq:
4923  * thr.backtrace -> array
4924  *
4925  * Returns the current backtrace of the target thread.
4926  *
4927  */
4928 
4929 static VALUE
4931 {
4932  return vm_thread_backtrace(argc, argv, thval);
4933 }
4934 
4935 /* call-seq:
4936  * thr.backtrace_locations(*args) -> array or nil
4937  *
4938  * Returns the execution stack for the target thread---an array containing
4939  * backtrace location objects.
4940  *
4941  * See Thread::Backtrace::Location for more information.
4942  *
4943  * This method behaves similarly to Kernel#caller_locations except it applies
4944  * to a specific thread.
4945  */
4946 static VALUE
4948 {
4949  return vm_thread_backtrace_locations(argc, argv, thval);
4950 }
4951 
4952 /*
4953  * Document-class: ThreadError
4954  *
4955  * Raised when an invalid operation is attempted on a thread.
4956  *
4957  * For example, when no other thread has been started:
4958  *
4959  * Thread.stop
4960  *
4961  * <em>raises the exception:</em>
4962  *
4963  * ThreadError: stopping only thread
4964  */
4965 
4966 /*
4967  * +Thread+ encapsulates the behavior of a thread of
4968  * execution, including the main thread of the Ruby script.
4969  *
4970  * In the descriptions of the methods in this class, the parameter _sym_
4971  * refers to a symbol, which is either a quoted string or a
4972  * +Symbol+ (such as <code>:name</code>).
4973  */
4974 
4975 void
4977 {
4978 #undef rb_intern
4979 #define rb_intern(str) rb_intern_const(str)
4980 
4981  VALUE cThGroup;
4982  rb_thread_t *th = GET_THREAD();
4983 
4984  sym_never = ID2SYM(rb_intern("never"));
4985  sym_immediate = ID2SYM(rb_intern("immediate"));
4986  sym_on_blocking = ID2SYM(rb_intern("on_blocking"));
4987 
4998  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5000 #if THREAD_DEBUG < 0
5001  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
5002  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
5003 #endif
5006  rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5007 
5008  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5013  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5024  rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5025  rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5026  rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5027  rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5030  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5031  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5035  rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5036 
5038 
5039  closed_stream_error = rb_exc_new2(rb_eIOError, "stream closed");
5042 
5043  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5045  rb_define_method(cThGroup, "list", thgroup_list, 0);
5046  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5047  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5048  rb_define_method(cThGroup, "add", thgroup_add, 1);
5049 
5050  {
5051  th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
5052  rb_define_const(cThGroup, "Default", th->thgroup);
5053  }
5054 
5055  rb_cMutex = rb_define_class("Mutex", rb_cObject);
5057  rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
5059  rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
5062  rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
5065 
5066  recursive_key = rb_intern("__recursive_key__");
5068 
5069  /* init thread core */
5070  {
5071  /* main thread setting */
5072  {
5073  /* acquire global vm lock */
5074  gvl_init(th->vm);
5075  gvl_acquire(th->vm, th);
5076  native_mutex_initialize(&th->vm->thread_destruct_lock);
5077  native_mutex_initialize(&th->interrupt_lock);
5078 
5082 
5083  th->interrupt_mask = 0;
5084  }
5085  }
5086 
5087  rb_thread_create_timer_thread();
5088 
5089  /* suppress warnings on cygwin, mingw and mswin.*/
5090  (void)native_mutex_trylock;
5091 }
5092 
5093 int
5095 {
5096  rb_thread_t *th = ruby_thread_from_native();
5097 
5098  return th != 0;
5099 }
5100 
5101 static int
5103 {
5104  VALUE thval = key;
5105  rb_thread_t *th;
5106  GetThreadPtr(thval, th);
5107 
5109  *found = 1;
5110  }
5111  else if (th->locking_mutex) {
5112  rb_mutex_t *mutex;
5113  GetMutexPtr(th->locking_mutex, mutex);
5114 
5115  native_mutex_lock(&mutex->lock);
5116  if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
5117  *found = 1;
5118  }
5119  native_mutex_unlock(&mutex->lock);
5120  }
5121 
5122  return (*found) ? ST_STOP : ST_CONTINUE;
5123 }
5124 
5125 #ifdef DEBUG_DEADLOCK_CHECK
5126 static int
5127 debug_i(st_data_t key, st_data_t val, int *found)
5128 {
5129  VALUE thval = key;
5130  rb_thread_t *th;
5131  GetThreadPtr(thval, th);
5132 
5133  printf("th:%p %d %d", th, th->status, th->interrupt_flag);
5134  if (th->locking_mutex) {
5135  rb_mutex_t *mutex;
5136  GetMutexPtr(th->locking_mutex, mutex);
5137 
5138  native_mutex_lock(&mutex->lock);
5139  printf(" %p %d\n", mutex->th, mutex->cond_waiting);
5140  native_mutex_unlock(&mutex->lock);
5141  }
5142  else
5143  puts("");
5144 
5145  return ST_CONTINUE;
5146 }
5147 #endif
5148 
5149 static void
5151 {
5152  int found = 0;
5153 
5154  if (vm_living_thread_num(vm) > vm->sleeper) return;
5155  if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5156  if (patrol_thread && patrol_thread != GET_THREAD()) return;
5157 
5159 
5160  if (!found) {
5161  VALUE argv[2];
5162  argv[0] = rb_eFatal;
5163  argv[1] = rb_str_new2("No live threads left. Deadlock?");
5164 #ifdef DEBUG_DEADLOCK_CHECK
5165  printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
5166  st_foreach(vm->living_threads, debug_i, (st_data_t)0);
5167 #endif
5168  vm->sleeper--;
5169  rb_threadptr_raise(vm->main_thread, 2, argv);
5170  }
5171 }
5172 
5173 static void
5174 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
5175 {
5177  if (coverage && RBASIC(coverage)->klass == 0) {
5178  long line = rb_sourceline() - 1;
5179  long count;
5180  if (RARRAY_PTR(coverage)[line] == Qnil) {
5181  return;
5182  }
5183  count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
5184  if (POSFIXABLE(count)) {
5185  RARRAY_PTR(coverage)[line] = LONG2FIX(count);
5186  }
5187  }
5188 }
5189 
5190 VALUE
5192 {
5193  return GET_VM()->coverages;
5194 }
5195 
5196 void
5198 {
5199  GET_VM()->coverages = coverages;
5201 }
5202 
5203 void
5205 {
5206  GET_VM()->coverages = Qfalse;
5208 }
5209 
5210 VALUE
5212 {
5213  VALUE interrupt_mask = rb_hash_new();
5214  rb_thread_t *cur_th = GET_THREAD();
5215 
5216  rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5217  rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5218 
5219  return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack);
5220 }
static int vm_living_thread_num(rb_vm_t *vm)
Definition: thread.c:2906
struct timeval rb_time_interval(VALUE num)
Definition: time.c:2496
rb_control_frame_t * cfp
Definition: vm_core.h:500
void rb_gc_finalize_deferred(void)
Definition: gc.c:1457
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:392
#define RUBY_EVENT_SWITCH
Definition: ruby.h:1600
rb_thread_list_t * join_list
Definition: vm_core.h:581
#define T_OBJECT
Definition: ruby.h:485
static VALUE sym_never
Definition: thread.c:84
static VALUE thgroup_enclose(VALUE group)
Definition: thread.c:3988
VALUE rb_eStandardError
Definition: error.c:514
static VALUE rb_thread_variable_p(VALUE thread, VALUE key)
Definition: thread.c:3003
#define eKillSignal
Definition: thread.c:94
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:955
unsigned long running_time_us
Definition: vm_core.h:617
rb_vm_t * vm
Definition: vm_core.h:495
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Definition: error.c:541
static VALUE thgroup_add(VALUE group, VALUE thread)
Definition: thread.c:4046
static int check_deadlock_i(st_data_t key, st_data_t val, int *found)
Definition: thread.c:5102
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Definition: thread.c:4489
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1451
VALUE rb_ary_pop(VALUE ary)
Definition: array.c:866
static VALUE rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
Definition: thread.c:1747
static const rb_thread_t * patrol_thread
Definition: thread.c:4285
struct rb_mutex_struct * next_mutex
Definition: thread.c:382
void ruby_thread_stack_overflow(rb_thread_t *th)
Definition: thread.c:2039
#define RARRAY_LEN(a)
Definition: ruby.h:899
void rb_bug(const char *fmt,...)
Definition: error.c:295
static VALUE rb_thread_priority(VALUE thread)
Definition: thread.c:3035
int gettimeofday(struct timeval *, struct timezone *)
Definition: win32.c:4023
#define FALSE
Definition: nkf.h:174
#define rb_hash_lookup
Definition: tcltklib.c:268
#define mutex_mark
Definition: thread.c:4108
static int lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
Definition: thread.c:4231
static const char * rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
Definition: thread.c:4386
VALUE rb_obj_id(VALUE obj)
Definition: gc.c:1690
static void thread_cleanup_func_before_exec(void *th_ptr)
Definition: thread.c:443
#define INT2NUM(x)
Definition: ruby.h:1178
static VALUE trap(int sig, sighandler_t func, VALUE command)
Definition: signal.c:928
struct rb_thread_struct * running_thread
Definition: vm_core.h:344
int i
Definition: win32ole.c:784
VALUE rb_make_exception(int argc, VALUE *argv)
Definition: eval.c:642
void rb_mutex_allow_trap(VALUE self, int val)
Definition: thread.c:4561
struct timeval * tv
Definition: thread.c:3657
Definition: st.h:77
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:923
static VALUE rb_thread_abort_exc_set(VALUE thread, VALUE val)
Definition: thread.c:2478
VALUE rb_mutex_owned_p(VALUE self)
Definition: thread.c:4371
st_table * local_storage
Definition: vm_core.h:579
double limit
Definition: thread.c:738
void rb_thread_lock_unlock(rb_thread_lock_t *lock)
Definition: thread.c:281
Definition: st.h:108
int pending_interrupt_queue_checked
Definition: vm_core.h:551
VALUE rb_eSignal
Definition: error.c:512
static void rb_mutex_abandon_all(rb_mutex_t *mutexes)
Definition: thread.c:4461
struct rb_blocking_region_buffer * rb_thread_blocking_region_begin(void)
Definition: thread.c:1191
rb_fdset_t * read
Definition: thread.c:3654
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:4904
#define NUM2INT(x)
Definition: ruby.h:622
int count
Definition: encoding.c:51
static int max(int a, int b)
Definition: strftime.c:141
VALUE(* func)(VALUE, VALUE, int)
Definition: thread.c:4794
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1497
static VALUE thgroup_enclosed_p(VALUE group)
Definition: thread.c:4008
int rb_thread_check_trap_pending(void)
Definition: thread.c:1108
if(dispIdMember==DISPID_VALUE)
Definition: win32ole.c:791
void rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
Definition: thread.c:1899
VALUE rb_thread_list(void)
Definition: thread.c:2350
static VALUE thread_join_sleep(VALUE arg)
Definition: thread.c:764
rb_thread_lock_t interrupt_lock
Definition: vm_core.h:556
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:4881
pthread_mutex_t rb_thread_lock_t
rb_thread_lock_t thread_destruct_lock
Definition: vm_core.h:341
#define CLASS_OF(v)
Definition: ruby.h:448
static int terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
Definition: thread.c:3861
void rb_thread_blocking_region_end(struct rb_blocking_region_buffer *region)
Definition: thread.c:1200
static VALUE rb_thread_variables(VALUE thread)
Definition: thread.c:2974
struct rb_thread_struct * th
Definition: vm_core.h:489
void rb_unblock_function_t(void *)
Definition: intern.h:835
VALUE rb_ary_delete_at(VALUE ary, long pos)
Definition: array.c:2801
Definition: id.h:81
rb_unblock_function_t * func
Definition: vm_core.h:480
#define Qtrue
Definition: ruby.h:434
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:194
int st_insert(st_table *, st_data_t, st_data_t)
static void update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
Definition: thread.c:5174
static VALUE thread_s_new(int argc, VALUE *argv, VALUE klass)
Definition: thread.c:667
void rb_error_frozen(const char *what)
Definition: error.c:1980
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: ruby.h:1016
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:552
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:916
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1030
VALUE rb_mod_ancestors(VALUE mod)
Definition: class.c:909
static VALUE mutex_initialize(VALUE self)
Definition: thread.c:4167
VALUE coverage
Definition: vm_core.h:219
static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
Definition: thread.c:4439
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2015
long tv_sec
Definition: ossl_asn1.c:17
struct rb_thread_struct volatile * th
Definition: thread.c:380
static struct timeval double2timeval(double d)
Definition: thread.c:926
#define sysstack_error
Definition: vm_core.h:868
SOCKET rb_w32_get_osfhandle(int)
Definition: win32.c:972
VALUE rb_eTypeError
Definition: error.c:516
VALUE rb_thread_stop(void)
Definition: thread.c:2299
#define TH_JUMP_TAG(th, st)
Definition: eval_intern.h:144
static VALUE mutex_alloc(VALUE klass)
Definition: thread.c:4149
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Definition: thread.c:4538
VALUE rb_catch_obj(VALUE, VALUE(*)(ANYARGS), VALUE)
static const rb_data_type_t mutex_data_type
Definition: thread.c:4132
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:822
void rb_thread_wait_for(struct timeval time)
Definition: thread.c:1074
st_table * living_threads
Definition: vm_core.h:346
void rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:740
static int handle_interrupt_arg_check_i(VALUE key, VALUE val)
Definition: thread.c:1631
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:465
#define rb_fd_zero(f)
Definition: intern.h:327
static VALUE rb_thread_safe_level(VALUE thread)
Definition: thread.c:2639
static VALUE rb_thread_aset(VALUE self, VALUE id, VALUE val)
Definition: thread.c:2789
VALUE rb_thread_current(void)
Definition: thread.c:2358
#define PRIxVALUE
Definition: ruby.h:145
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1503
#define OBJ_ID_EQL(obj_id, other)
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:1788
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2071
static VALUE rb_mutex_sleep_forever(VALUE time)
Definition: thread.c:4474
static VALUE rb_thread_abort_exc(VALUE thread)
Definition: thread.c:2460
static void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
Definition: thread.c:1177
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1332
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:3208
static void clear_coverage(void)
Definition: thread.c:3811
int rb_thread_alone(void)
Definition: thread.c:2912
VALUE rb_convert_type(VALUE, int, const char *, const char *)
Definition: object.c:2425
#define TH_EXEC_TAG()
Definition: eval_intern.h:139
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Definition: object.c:593
#define T_HASH
Definition: ruby.h:493
static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check)
Definition: thread.c:946
VALUE rb_thread_local_aref(VALUE thread, ID id)
Definition: thread.c:2671
VALUE rb_eSecurityError
Definition: error.c:525
#define DATA_PTR(dta)
Definition: ruby.h:985
#define RUBY_VM_SET_TRAP_INTERRUPT(th)
Definition: vm_core.h:925
static size_t thgroup_memsize(const void *ptr)
Definition: thread.c:3885
rb_thread_cond_t cond
Definition: thread.c:379
static void rb_mutex_abandon_locking_mutex(rb_thread_t *th)
Definition: thread.c:4448
static VALUE sym_immediate
Definition: thread.c:82
static int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region, rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
Definition: thread.c:1159
void rb_gc_mark(VALUE ptr)
Definition: gc.c:2600
static void thread_shield_mark(void *ptr)
Definition: thread.c:4573
st_data_t st_index_t
Definition: st.h:63
#define TAG_RAISE
Definition: eval_intern.h:168
#define PUSH_TAG()
Definition: eval_intern.h:136
static size_t mutex_memsize(const void *ptr)
Definition: thread.c:4127
static volatile int system_working
Definition: thread.c:96
static VALUE thread_join(rb_thread_t *target_th, double delay)
Definition: thread.c:790
static VALUE remove_from_join_list(VALUE arg)
Definition: thread.c:743
VALUE rb_thread_kill(VALUE thread)
Definition: thread.c:2141
VALUE rb_mutex_locked_p(VALUE self)
Definition: thread.c:4185
static int rb_threadptr_dead(rb_thread_t *th)
Definition: thread.c:2533
#define FIXNUM_P(f)
Definition: ruby.h:355
static VALUE rb_thread_alive_p(VALUE thread)
Definition: thread.c:2591
rb_fdset_t * write
Definition: thread.c:3655
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:4892
#define rb_fd_rcopy(d, s)
Definition: thread.c:3338
static VALUE exec_recursive_i(VALUE tag, struct exec_recursive_params *p)
Definition: thread.c:4803
void rb_thread_start_timer_thread(void)
Definition: thread.c:3790
static rb_fdset_t * init_set_fd(int fd, rb_fdset_t *fds)
Definition: thread.c:3641
ID rb_frame_last_func(void)
Definition: eval.c:935
const char * rb_obj_classname(VALUE)
Definition: variable.c:396
VALUE rb_cMutex
Definition: thread.c:79
#define RHASH_TBL(h)
Definition: ruby.h:928
int allow_trap
Definition: thread.c:383
#define RB_WAITFD_OUT
Definition: io.h:49
VALUE thgroup_default
Definition: vm_core.h:347
#define rb_fd_set(n, f)
Definition: intern.h:328
time_t tv_sec
Definition: missing.h:46
#define sym(x)
Definition: date_core.c:3715
static VALUE rb_thread_stop_p(VALUE thread)
Definition: thread.c:2614
static void thread_cleanup_func(void *th_ptr, int atfork)
Definition: thread.c:454
static double timeofday(void)
Definition: thread.c:1042
#define TAG_FATAL
Definition: eval_intern.h:170
int ruby_native_thread_p(void)
Definition: thread.c:5094
static VALUE rb_thread_s_abort_exc_set(VALUE self, VALUE val)
Definition: thread.c:2442
#define rb_fd_isset(n, f)
Definition: intern.h:330
Win32OLEIDispatch * p
Definition: win32ole.c:786
VALUE(* first_func)(ANYARGS)
Definition: vm_core.h:585
void rb_hash_foreach(VALUE hash, int(*func)(ANYARGS), VALUE farg)
Definition: hash.c:200
VALUE rb_thread_wakeup(VALUE thread)
Definition: thread.c:2228
static VALUE rb_thread_s_main(VALUE klass)
Definition: thread.c:2392
void rb_exc_raise(VALUE mesg)
Definition: eval.c:527
static void rb_thread_wait_fd_rw(int fd, int read)
Definition: thread.c:3419
static VALUE sym_on_blocking
Definition: thread.c:83
int args
Definition: win32ole.c:785
VALUE * stack
Definition: vm_core.h:498
static void rb_thread_schedule_limits(unsigned long limits_us)
Definition: thread.c:1129
#define RB_TYPE_P(obj, type)
Definition: ruby.h:1537
void rb_reset_random_seed(void)
Definition: random.c:1443
int rb_thread_fd_writable(int fd)
Definition: thread.c:3445
#define RHASH(obj)
Definition: ruby.h:1102
static void rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
Definition: thread.c:330
static VALUE thgroup_s_alloc(VALUE klass)
Definition: thread.c:3914
#define POSFIXABLE(f)
Definition: ruby.h:356
#define RUBY_VM_INTERRUPTED_ANY(th)
Definition: vm_core.h:927
#define TH_POP_TAG()
Definition: eval_intern.h:129
int st_lookup(st_table *, st_data_t, st_data_t *)
static int thread_list_i(st_data_t key, st_data_t val, void *data)
Definition: thread.c:2310
#define MEMZERO(p, type, n)
Definition: ruby.h:1241
static VALUE coverage(VALUE fname, int n)
Definition: ripper.c:11811
#define closed_stream_error
Definition: thread.c:98
static const char * thread_status_name(rb_thread_t *th)
Definition: thread.c:2514
rb_thread_t * target
Definition: thread.c:737
#define RUBY_THREAD_PRIORITY_MAX
Definition: thread.c:68
fd_set rb_fdset_t
Definition: intern.h:326
#define rb_fd_term(f)
Definition: intern.h:337
static VALUE rb_thread_priority_set(VALUE thread, VALUE prio)
Definition: thread.c:3070
double rb_num2dbl(VALUE)
Definition: object.c:2769
static int do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:3342
static void sleep_for_polling(rb_thread_t *th)
Definition: thread.c:1065
int rb_block_given_p(void)
Definition: eval.c:672
#define EXEC_TAG()
Definition: eval_intern.h:141
VALUE locking_mutex
Definition: vm_core.h:558
static const rb_data_type_t thread_shield_data_type
Definition: thread.c:4578
#define val
long tv_usec
Definition: ossl_asn1.c:18
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:1426
VALUE rb_eRuntimeError
Definition: error.c:515
rb_thread_lock_t lock
Definition: thread.c:378
static VALUE rb_thread_inspect(VALUE thread)
Definition: thread.c:2655
#define RB_WAITFD_PRI
Definition: io.h:48
#define PRIdVALUE
Definition: ruby.h:142
static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *)
Definition: thread.c:1995
#define rb_fd_ptr(f)
Definition: intern.h:334
VALUE rb_mutex_trylock(VALUE self)
Definition: thread.c:4212
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:478
VALUE rb_ary_new(void)
Definition: array.c:424
void * blocking_region_buffer
Definition: vm_core.h:536
static VALUE exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
Definition: thread.c:4832
static VALUE thread_create_core(VALUE thval, VALUE args, VALUE(*fn)(ANYARGS))
Definition: thread.c:608
void Init_Thread(void)
Definition: thread.c:4976
VALUE rb_iv_get(VALUE, const char *)
Definition: variable.c:2586
#define JUMP_TAG(st)
Definition: eval_intern.h:148
rb_iseq_t * iseq
Definition: vm_core.h:428
VALUE vm_thread_backtrace(int argc, VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:861
#define NIL_P(v)
Definition: ruby.h:446
static int rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
Definition: thread.c:1562
long tv_nsec
Definition: missing.h:47
void rb_thread_stop_timer_thread(int close_anyway)
Definition: thread.c:3776
#define UNLIKELY(x)
Definition: vm_core.h:115
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:488
static void rb_threadptr_ready(rb_thread_t *th)
Definition: thread.c:1989
int st_delete(st_table *, st_data_t *, st_data_t *)
int enclosed
Definition: thread.c:3880
#define rb_intern(str)
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2204
void rb_thread_atfork_before_exec(void)
Definition: thread.c:3874
#define thread_debug
Definition: thread.c:211
static int rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th)
Definition: thread.c:1556
#define OBJ_FROZEN(x)
Definition: ruby.h:1163
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:3735
#define OBJ_UNTRUST(x)
Definition: ruby.h:1156
VALUE rb_class_inherited_p(VALUE, VALUE)
Definition: object.c:1503
int thread_abort_on_exception
Definition: vm_core.h:350
int argc
Definition: ruby.c:130
rb_thread_status
Definition: vm_core.h:455
#define Qfalse
Definition: ruby.h:433
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:476
VALUE rb_proc_location(VALUE self)
Definition: proc.c:737
static VALUE rb_thread_exit(void)
Definition: thread.c:2203
RUBY_EXTERN VALUE rb_cModule
Definition: ruby.h:1445
void rb_thread_check_ints(void)
Definition: thread.c:1098
#define RUBY_UBF_PROCESS
Definition: intern.h:844
void rb_exit(int status)
Definition: process.c:3567
void rb_thread_fd_close(int fd)
Definition: thread.c:2086
RUBY_EXTERN int isinf(double)
Definition: isinf.c:56
#define T_NODE
Definition: ruby.h:506
VALUE rb_thread_shield_new(void)
Definition: thread.c:4616
volatile int sleeper
Definition: vm_core.h:352
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:261
VALUE rb_obj_alloc(VALUE)
Definition: object.c:1740
int err
Definition: win32.c:87
#define OBJ_FREEZE(x)
Definition: ruby.h:1164
#define EXIT_FAILURE
Definition: eval_intern.h:24
VALUE rb_thread_shield_release(VALUE self)
Definition: thread.c:4652
void rb_thread_atfork(void)
Definition: thread.c:3851
#define POP_TAG()
Definition: eval_intern.h:137
VALUE * machine_stack_start
Definition: vm_core.h:588
#define GVL_UNLOCK_BEGIN()
Definition: thread.c:137
static const rb_data_type_t thgroup_data_type
Definition: thread.c:3890
VALUE rb_thread_create(VALUE(*fn)(ANYARGS), void *arg)
Definition: thread.c:727
void rb_throw_obj(VALUE tag, VALUE value)
Definition: vm_eval.c:1721
static VALUE thread_s_current(VALUE klass)
Definition: thread.c:2373
#define FD_SET(fd, set)
Definition: win32.h:594
VALUE rb_cThreadShield
Definition: thread.c:80
static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
Definition: thread.c:1059
#define TIMET_MAX
Definition: thread.c:76
#define ATOMIC_CAS(var, oldval, newval)
Definition: ruby_atomic.h:132
#define ALLOC(type)
Definition: ruby.h:1224
void rb_thread_polling(void)
Definition: thread.c:1081
VALUE read
Definition: io.c:8257
#define GetMutexPtr(obj, tobj)
Definition: thread.c:4105
static VALUE rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:4947
int rb_thread_select(int max, fd_set *read, fd_set *write, fd_set *except, struct timeval *timeout)
Definition: thread.c:3452
VALUE rb_yield(VALUE)
Definition: vm_eval.c:933
static int recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4775
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1325
struct rb_unblock_callback oldubf
Definition: thread.c:112
SSL_METHOD *(* func)(void)
Definition: ossl_ssl.c:108
#define rb_thread_set_current(th)
Definition: vm_core.h:903
int errno
#define TRUE
Definition: nkf.h:175
VALUE rb_uninterruptible(VALUE(*b_proc)(ANYARGS), VALUE data)
Definition: thread.c:5211
static int thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
Definition: thread.c:3932
#define EXIT_SUCCESS
Definition: error.c:31
VALUE special_exceptions[ruby_special_error_count]
Definition: vm_core.h:357
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:559
VALUE rb_thread_shield_wait(VALUE self)
Definition: thread.c:4632
VALUE rb_sprintf(const char *format,...)
Definition: sprintf.c:1272
int rb_get_next_signal(void)
Definition: signal.c:604
VALUE rb_hash_delete(VALUE hash, VALUE key)
Definition: hash.c:869
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:3496
#define rb_fd_copy(d, s, n)
Definition: intern.h:331
static int set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg, struct rb_unblock_callback *old, int fail_if_interrupted)
Definition: thread.c:293
#define const
Definition: strftime.c:102
static int thread_fd_close_i(st_data_t key, st_data_t val, st_data_t data)
Definition: thread.c:2071
VALUE rb_hash_new(void)
Definition: hash.c:234
void ruby_xfree(void *x)
Definition: gc.c:3653
#define DELAY_INFTY
Definition: thread.c:734
int rb_threadptr_reset_raised(rb_thread_t *th)
Definition: thread.c:2061
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:1570
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4308
#define RUBY_VM_SET_TIMER_INTERRUPT(th)
Definition: vm_core.h:922
#define PRIsVALUE
Definition: ruby.h:147
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Definition: vm_trace.c:135
unsigned long ID
Definition: ruby.h:105
static VALUE thread_initialize(VALUE thread, VALUE args)
Definition: thread.c:702
handle_interrupt_timing
Definition: thread.c:1509
static void rb_check_deadlock(rb_vm_t *vm)
Definition: thread.c:5150
static VALUE rb_mutex_synchronize_m(VALUE self, VALUE args)
Definition: thread.c:4552
#define GVL_UNLOCK_END()
Definition: thread.c:142
#define Qnil
Definition: ruby.h:435
void rb_thread_sleep_forever(void)
Definition: thread.c:1028
VALUE rb_exc_new2(VALUE etype, const char *s)
Definition: error.c:547
static VALUE thread_shield_alloc(VALUE klass)
Definition: thread.c:4584
VALUE group
Definition: thread.c:3881
#define OBJ_TAINT(x)
Definition: ruby.h:1154
unsigned long VALUE
Definition: ruby.h:104
#define THREAD_SHIELD_WAITING_MASK
Definition: thread.c:4590
#define SAVE_ROOT_JMPBUF(th, stmt)
Definition: eval_intern.h:112
static VALUE result
Definition: nkf.c:40
RUBY_EXTERN VALUE rb_cThread
Definition: ruby.h:1459
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
Definition: thread.c:3695
static int keys_i(VALUE key, VALUE value, VALUE ary)
Definition: thread.c:2950
#define UNINITIALIZED_VAR(x)
Definition: vm_core.h:121
#define RBASIC(obj)
Definition: ruby.h:1094
union select_args::@118 as
const char * rb_class2name(VALUE)
Definition: variable.c:389
struct rb_thread_struct * main_thread
Definition: vm_core.h:343
static int clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
Definition: thread.c:3797
int error
Definition: thread.c:3652
static VALUE rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
Definition: thread.c:1883
VALUE first_proc
Definition: vm_core.h:583
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1497
void rb_gc_set_stack_end(VALUE **stack_end_p)
Definition: thread.c:3722
static void rb_thread_shield_waiting_dec(VALUE b)
Definition: thread.c:4606
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:122
void rb_thread_schedule(void)
Definition: thread.c:1146
VALUE rb_mutex_new(void)
Definition: thread.c:4173
static VALUE rb_thread_variable_get(VALUE thread, VALUE id)
Definition: thread.c:2825
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
Definition: eval.c:804
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:4916
static VALUE thread_value(VALUE self)
Definition: thread.c:913
static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
Definition: thread.c:322
rb_atomic_t interrupt_flag
Definition: vm_core.h:554
static void timer_thread_function(void *)
Definition: thread.c:3745
void rb_thread_wait_fd(int fd)
Definition: thread.c:3439
st_table * st_init_numtable(void)
Definition: st.c:272
VALUE rb_blocking_function_t(void *)
Definition: intern.h:836
void rb_sys_fail(const char *mesg)
Definition: error.c:1907
VALUE rb_thread_main(void)
Definition: thread.c:2379
void xfree(void *)
static VALUE rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:4930
int abort_on_exception
Definition: vm_core.h:613
static VALUE rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
Definition: thread.c:1798
enum rb_thread_status status
Definition: vm_core.h:531
static void st_delete_wrap(st_table *table, st_data_t key)
Definition: thread.c:101
void rb_thread_sleep(int sec)
Definition: thread.c:1123
#define rb_fd_max(f)
Definition: intern.h:338
static VALUE thread_s_pass(VALUE klass)
Definition: thread.c:1472
static VALUE thread_join_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:882
#define RSTRING_PTR(str)
Definition: ruby.h:866
#define thread_start_func_2(th, st, rst)
Definition: thread.c:215
static void rb_thread_sleep_deadly(void)
Definition: thread.c:1035
enum rb_thread_status prev_status
Definition: thread.c:111
static VALUE mutex_sleep(int argc, VALUE *argv, VALUE self)
Definition: thread.c:4521
VALUE * machine_stack_end
Definition: vm_core.h:589
VALUE first_args
Definition: vm_core.h:584
void rb_thread_recycle_stack_release(VALUE *)
Definition: vm.c:1835
void rb_thread_terminate_all(void)
Definition: thread.c:409
#define THREAD_SHIELD_WAITING_SHIFT
Definition: thread.c:4591
static void rb_threadptr_to_kill(rb_thread_t *th)
Definition: thread.c:1889
int size
Definition: encoding.c:52
void rb_reset_coverages(void)
Definition: thread.c:5204
#define f
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
Definition: hash.c:581
#define INT2FIX(i)
Definition: ruby.h:241
void rb_thread_execute_interrupts(VALUE thval)
Definition: thread.c:1981
int rb_sourceline(void)
Definition: vm.c:884
void rb_thread_lock_destroy(rb_thread_lock_t *lock)
Definition: thread.c:287
static VALUE thgroup_list(VALUE group)
Definition: thread.c:3957
unsigned long interrupt_mask
Definition: vm_core.h:555
VALUE rb_block_proc(void)
Definition: proc.c:458
#define xmalloc
Definition: defines.h:64
#define RUBY_THREAD_PRIORITY_MIN
Definition: thread.c:69
#define ANYARGS
Definition: defines.h:57
VALUE rb_thread_group(VALUE thread)
Definition: thread.c:2500
struct rb_unblock_callback unblock
Definition: vm_core.h:557
static VALUE rb_thread_aref(VALUE thread, VALUE id)
Definition: thread.c:2750
unsigned long rb_event_flag_t
Definition: ruby.h:1603
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:152
VALUE rb_hash_aref(VALUE hash, VALUE key)
Definition: hash.c:570
static VALUE recursive_list_access(VALUE sym)
Definition: thread.c:4681
#define RARRAY_PTR(a)
Definition: ruby.h:904
#define rb_fd_select(n, rfds, wfds, efds, timeout)
Definition: intern.h:339
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
Definition: thread.c:129
void rb_thread_reset_timer_thread(void)
Definition: thread.c:3784
static VALUE rb_thread_status(VALUE thread)
Definition: thread.c:2562
int rb_signal_buff_size(void)
Definition: signal.c:574
static void rb_thread_shield_waiting_inc(VALUE b)
Definition: thread.c:4595
uint8_t key[16]
Definition: random.c:1370
#define rb_fd_clr(n, f)
Definition: intern.h:329
#define LONG2FIX(i)
Definition: ruby.h:242
#define RTEST(v)
Definition: ruby.h:445
#define FD_CLR(f, s)
Definition: win32.h:612
VALUE root_fiber
Definition: vm_core.h:608
rb_thread_t * waiting
Definition: thread.c:737
#define OBJ_INFECT(x, s)
Definition: ruby.h:1157
struct rb_encoding_entry * list
Definition: encoding.c:50
#define ETIMEDOUT
Definition: win32.h:549
VALUE rb_thread_shield_destroy(VALUE self)
Definition: thread.c:4663
static VALUE rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
Definition: thread.c:1575
static void recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4744
v
Definition: win32ole.c:798
static VALUE thread_start(VALUE klass, VALUE args)
Definition: thread.c:695
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1019
static VALUE rb_mutex_wait_for(VALUE time)
Definition: thread.c:4481
VALUE rb_ary_dup(VALUE ary)
Definition: array.c:1766
#define GetThreadPtr(obj, ptr)
Definition: vm_core.h:452
static unsigned int hash(const char *str, unsigned int len)
Definition: lex.c:56
int rb_atomic_t
Definition: ruby_atomic.h:120
static VALUE thread_raise_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:2113
#define rb_safe_level()
Definition: tcltklib.c:94
#define rb_fd_resize(n, f)
Definition: intern.h:333
#define rb_thread_shield_waiting(b)
Definition: thread.c:4592
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, klass_, data_)
Definition: vm_core.h:1000
static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check)
Definition: thread.c:986
#define ruby_debug
Definition: ruby.h:1364
#define RUBY_EVENT_COVERAGE
Definition: ruby.h:1601
#define xrealloc
Definition: defines.h:67
RUBY_EXTERN VALUE rb_eIOError
Definition: ruby.h:1476
#define ID2SYM(x)
Definition: ruby.h:363
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
Definition: thread.c:1339
void rb_threadptr_trap_interrupt(rb_thread_t *th)
Definition: thread.c:353
unsigned long st_data_t
Definition: st.h:35
VALUE rb_eFatal
Definition: error.c:513
int forever
Definition: thread.c:739
#define rb_fd_init_copy(d, s)
Definition: intern.h:336
static int terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
Definition: thread.c:359
struct rb_thread_list_struct * next
Definition: vm_core.h:488
#define RUBY_VM_INTERRUPTED(th)
Definition: vm_core.h:926
#define rb_fd_init(f)
Definition: intern.h:335
static VALUE rb_thread_s_abort_exc(void)
Definition: thread.c:2411
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
Definition: thread.c:2756
#define rb_fd_dup(d, s)
Definition: intern.h:332
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:273
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
Definition: thread.c:1612
void st_clear(st_table *)
Definition: st.c:308
void rb_secure(int)
Definition: safe.c:79
rb_fdset_t * except
Definition: thread.c:3656
static void mutex_locked(rb_thread_t *th, VALUE self)
Definition: thread.c:4193
#define FD_ISSET(f, s)
Definition: win32.h:615
#define RUBY_TYPED_DEFAULT_FREE
Definition: ruby.h:1004
NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start))
VALUE vm_thread_backtrace_locations(int argc, VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:867
static VALUE rb_thread_keys(VALUE self)
Definition: thread.c:2937
#define GetThreadShieldPtr(obj)
Definition: thread.c:4589
#define vsnprintf
Definition: subst.h:7
#define RB_WAITFD_IN
Definition: io.h:47
VALUE pending_interrupt_queue
Definition: vm_core.h:550
#define RHASH_EMPTY_P(h)
Definition: ruby.h:932
VALUE write
Definition: io.c:8257
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1409
static void getclockofday(struct timeval *tp)
Definition: thread.c:970
static VALUE select_single_cleanup(VALUE ptr)
Definition: thread.c:3683
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:975
static VALUE select_single(VALUE ptr)
Definition: thread.c:3661
struct rb_mutex_struct rb_mutex_t
#define eTerminateSignal
Definition: thread.c:95
int cond_waiting
Definition: thread.c:381
VALUE rb_get_coverages(void)
Definition: thread.c:5191
VALUE except
Definition: io.c:8257
VALUE rb_eSystemExit
Definition: error.c:510
#define NULL
Definition: _sdbm.c:102
#define FIX2LONG(x)
Definition: ruby.h:353
#define Qundef
Definition: ruby.h:436
VALUE rb_hash_aset(VALUE, VALUE, VALUE)
static int thread_keys_i(ID key, VALUE value, VALUE ary)
Definition: thread.c:2899
static void * call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
Definition: thread.c:1211
#define GET_THROWOBJ_STATE(obj)
Definition: eval_intern.h:182
static VALUE rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
Definition: thread.c:2850
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:347
st_index_t num_entries
Definition: st.h:93
VALUE rb_thread_wakeup_alive(VALUE thread)
Definition: thread.c:2237
VALUE rb_thread_blocking_region(rb_blocking_function_t *func, void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1372
void rb_obj_call_init(VALUE obj, int argc, VALUE *argv)
Definition: eval.c:1233
static void mutex_free(void *ptr)
Definition: thread.c:4111
VALUE rb_mutex_unlock(VALUE self)
Definition: thread.c:4426
static rb_thread_t * GET_THREAD(void)
Definition: vm_core.h:890
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1344
int st_foreach(st_table *, int(*)(ANYARGS), st_data_t)
Definition: st.c:1006
VALUE rb_str_new2(const char *)
#define GET_THROWOBJ_VAL(obj)
Definition: eval_intern.h:180
void rb_set_coverages(VALUE coverages)
Definition: thread.c:5197
ID rb_to_id(VALUE)
Definition: string.c:8172
int select(int num_fds, fd_set *in_fds, fd_set *out_fds, fd_set *ex_fds, struct timeval *timeout)
VALUE rb_eThreadError
Definition: eval.c:690
static VALUE rb_thread_key_p(VALUE self, VALUE key)
Definition: thread.c:2882
VALUE rb_eArgError
Definition: error.c:517
#define RUBY_VM_CHECK_INTS_BLOCKING(th)
Definition: vm_core.h:944
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread.c:4138
static VALUE rb_thread_s_kill(VALUE obj, VALUE th)
Definition: thread.c:2186
static VALUE recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
Definition: thread.c:4709
rb_thread_id_t thread_id
Definition: vm_core.h:530
VALUE rb_thread_run(VALUE thread)
Definition: thread.c:2272
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2025
static void lock_interrupt(void *ptr)
Definition: thread.c:4271
static void rb_thread_atfork_internal(int(*atfork)(st_data_t, st_data_t, st_data_t))
Definition: thread.c:3820
char ** argv
Definition: ruby.c:131
int rb_thread_interrupted(VALUE thval)
Definition: thread.c:1115
struct timeval rb_time_timeval(VALUE)
Definition: time.c:2502
VALUE rb_mutex_lock(VALUE self)
Definition: thread.c:4295
int rb_threadptr_set_raised(rb_thread_t *th)
Definition: thread.c:2051
#define RUBY_UBF_IO
Definition: intern.h:843
static enum handle_interrupt_timing rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
Definition: thread.c:1517
static int terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)
Definition: thread.c:3836
#define GET_VM()
Definition: vm_core.h:883
static ID recursive_key
Definition: thread.c:4672