Ruby  1.9.3p484(2013-11-22revision43786)
thread.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author: usa $
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23 ------------------------------------------------------------------------
24 
25  model 2:
26  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
27  When thread scheduling, running thread release GVL. If running thread
28  try blocking operation, this thread must release GVL and another
29  thread can continue this flow. After blocking operation, thread
30  must check interrupt (RUBY_VM_CHECK_INTS).
31 
32  Every VM can run parallel.
33 
34  Ruby threads are scheduled by OS thread scheduler.
35 
36 ------------------------------------------------------------------------
37 
38  model 3:
39  Every threads run concurrent or parallel and to access shared object
40  exclusive access control is needed. For example, to access String
41  object or Array object, fine grain lock must be locked every time.
42  */
43 
44 
45 /*
46  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
47  * 2.15 or later and set _FORTIFY_SOURCE > 0.
48  * However, the implementation is wrong. Even though Linux's select(2)
49  * support large fd size (>FD_SETSIZE), it wrongly assume fd is always
50  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
51  * it doesn't work correctly and makes program abort. Therefore we need to
52  * disable FORTY_SOURCE until glibc fixes it.
53  */
54 #undef _FORTIFY_SOURCE
55 #undef __USE_FORTIFY_LEVEL
56 #define __USE_FORTIFY_LEVEL 0
57 
58 /* for model 2 */
59 
60 #include "eval_intern.h"
61 #include "gc.h"
62 #include "internal.h"
63 #include "ruby/io.h"
64 
65 #ifndef USE_NATIVE_THREAD_PRIORITY
66 #define USE_NATIVE_THREAD_PRIORITY 0
67 #define RUBY_THREAD_PRIORITY_MAX 3
68 #define RUBY_THREAD_PRIORITY_MIN -3
69 #endif
70 
71 #ifndef THREAD_DEBUG
72 #define THREAD_DEBUG 0
73 #endif
74 
77 
78 static void sleep_timeval(rb_thread_t *th, struct timeval time);
79 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
80 static void sleep_forever(rb_thread_t *th, int nodeadlock);
81 static double timeofday(void);
82 static int rb_threadptr_dead(rb_thread_t *th);
83 
84 static void rb_check_deadlock(rb_vm_t *vm);
85 
86 #define eKillSignal INT2FIX(0)
87 #define eTerminateSignal INT2FIX(1)
88 static volatile int system_working = 1;
89 
90 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
91 
92 inline static void
94 {
95  st_delete(table, &key, 0);
96 }
97 
98 /********************************************************************************/
99 
100 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
101 
105 };
106 
108  struct rb_unblock_callback *old);
109 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
110 
111 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
112 
113 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
114  do { \
115  rb_gc_save_machine_context(th); \
116  SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
117  } while (0)
118 
119 #define GVL_UNLOCK_BEGIN() do { \
120  rb_thread_t *_th_stored = GET_THREAD(); \
121  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
122  gvl_release(_th_stored->vm);
123 
124 #define GVL_UNLOCK_END() \
125  gvl_acquire(_th_stored->vm, _th_stored); \
126  rb_thread_set_current(_th_stored); \
127 } while(0)
128 
129 #define blocking_region_begin(th, region, func, arg) \
130  do { \
131  (region)->prev_status = (th)->status; \
132  set_unblock_function((th), (func), (arg), &(region)->oldubf); \
133  (th)->blocking_region_buffer = (region); \
134  (th)->status = THREAD_STOPPED; \
135  thread_debug("enter blocking region (%p)\n", (void *)(th)); \
136  RB_GC_SAVE_MACHINE_CONTEXT(th); \
137  gvl_release((th)->vm); \
138  } while (0)
139 
140 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
141  rb_thread_t *__th = GET_THREAD(); \
142  struct rb_blocking_region_buffer __region; \
143  blocking_region_begin(__th, &__region, (ubf), (ubfarg)); \
144  exec; \
145  blocking_region_end(__th, &__region); \
146  RUBY_VM_CHECK_INTS(); \
147 } while(0)
148 
149 #if THREAD_DEBUG
150 #ifdef HAVE_VA_ARGS_MACRO
151 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
152 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
153 #define POSITION_FORMAT "%s:%d:"
154 #define POSITION_ARGS ,file, line
155 #else
156 void rb_thread_debug(const char *fmt, ...);
157 #define thread_debug rb_thread_debug
158 #define POSITION_FORMAT
159 #define POSITION_ARGS
160 #endif
161 
162 # if THREAD_DEBUG < 0
163 static int rb_thread_debug_enabled;
164 
165 /*
166  * call-seq:
167  * Thread.DEBUG -> num
168  *
169  * Returns the thread debug level. Available only if compiled with
170  * THREAD_DEBUG=-1.
171  */
172 
173 static VALUE
174 rb_thread_s_debug(void)
175 {
176  return INT2NUM(rb_thread_debug_enabled);
177 }
178 
179 /*
180  * call-seq:
181  * Thread.DEBUG = num
182  *
183  * Sets the thread debug level. Available only if compiled with
184  * THREAD_DEBUG=-1.
185  */
186 
187 static VALUE
188 rb_thread_s_debug_set(VALUE self, VALUE val)
189 {
190  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
191  return val;
192 }
193 # else
194 # define rb_thread_debug_enabled THREAD_DEBUG
195 # endif
196 #else
197 #define thread_debug if(0)printf
198 #endif
199 
200 #ifndef __ia64
201 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
202 #endif
203 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
204  VALUE *register_stack_start));
205 static void timer_thread_function(void *);
206 
207 #if defined(_WIN32)
208 #include "thread_win32.c"
209 
210 #define DEBUG_OUT() \
211  WaitForSingleObject(&debug_mutex, INFINITE); \
212  printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
213  fflush(stdout); \
214  ReleaseMutex(&debug_mutex);
215 
216 #elif defined(HAVE_PTHREAD_H)
217 #include "thread_pthread.c"
218 
219 #define DEBUG_OUT() \
220  pthread_mutex_lock(&debug_mutex); \
221  printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
222  fflush(stdout); \
223  pthread_mutex_unlock(&debug_mutex);
224 
225 #else
226 #error "unsupported thread type"
227 #endif
228 
229 #if THREAD_DEBUG
230 static int debug_mutex_initialized = 1;
231 static rb_thread_lock_t debug_mutex;
232 
233 void
234 rb_thread_debug(
235 #ifdef HAVE_VA_ARGS_MACRO
236  const char *file, int line,
237 #endif
238  const char *fmt, ...)
239 {
240  va_list args;
241  char buf[BUFSIZ];
242 
243  if (!rb_thread_debug_enabled) return;
244 
245  if (debug_mutex_initialized == 1) {
246  debug_mutex_initialized = 0;
247  native_mutex_initialize(&debug_mutex);
248  }
249 
250  va_start(args, fmt);
251  vsnprintf(buf, BUFSIZ, fmt, args);
252  va_end(args);
253 
254  DEBUG_OUT();
255 }
256 #endif
257 
258 void
260 {
261  gvl_release(vm);
262  gvl_destroy(vm);
263 }
264 
265 void
267 {
268  native_mutex_unlock(lock);
269 }
270 
271 void
273 {
274  native_mutex_destroy(lock);
275 }
276 
277 static void
279  struct rb_unblock_callback *old)
280 {
281  check_ints:
282  RUBY_VM_CHECK_INTS(); /* check signal or so */
283  native_mutex_lock(&th->interrupt_lock);
284  if (th->interrupt_flag) {
285  native_mutex_unlock(&th->interrupt_lock);
286  goto check_ints;
287  }
288  else {
289  if (old) *old = th->unblock;
290  th->unblock.func = func;
291  th->unblock.arg = arg;
292  }
293  native_mutex_unlock(&th->interrupt_lock);
294 }
295 
296 static void
298 {
299  native_mutex_lock(&th->interrupt_lock);
300  th->unblock = *old;
301  native_mutex_unlock(&th->interrupt_lock);
302 }
303 
304 void
306 {
307  native_mutex_lock(&th->interrupt_lock);
309  if (th->unblock.func) {
310  (th->unblock.func)(th->unblock.arg);
311  }
312  else {
313  /* none */
314  }
315  native_mutex_unlock(&th->interrupt_lock);
316 }
317 
318 
319 static int
321 {
322  VALUE thval = key;
323  rb_thread_t *th;
324  GetThreadPtr(thval, th);
325 
326  if (th != main_thread) {
327  thread_debug("terminate_i: %p\n", (void *)th);
330  th->status = THREAD_TO_KILL;
331  }
332  else {
333  thread_debug("terminate_i: main thread (%p)\n", (void *)th);
334  }
335  return ST_CONTINUE;
336 }
337 
338 typedef struct rb_mutex_struct
339 {
342  struct rb_thread_struct volatile *th;
345 } rb_mutex_t;
346 
347 static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
348 static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
349 
350 void
352 {
353  const char *err;
354  rb_mutex_t *mutex;
355  rb_mutex_t *mutexes = th->keeping_mutexes;
356 
357  while (mutexes) {
358  mutex = mutexes;
359  /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
360  mutexes); */
361  mutexes = mutex->next_mutex;
362  err = rb_mutex_unlock_th(mutex, th);
363  if (err) rb_bug("invalid keeping_mutexes: %s", err);
364  }
365 }
366 
367 void
369 {
370  rb_thread_t *th = GET_THREAD(); /* main thread */
371  rb_vm_t *vm = th->vm;
372 
373  if (vm->main_thread != th) {
374  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
375  (void *)vm->main_thread, (void *)th);
376  }
377 
378  /* unlock all locking mutexes */
380 
381  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
383  vm->inhibit_thread_creation = 1;
384 
385  while (!rb_thread_alone()) {
386  PUSH_TAG();
387  if (EXEC_TAG() == 0) {
389  }
390  else {
391  /* ignore exception */
392  }
393  POP_TAG();
394  }
395 }
396 
397 static void
399 {
400  rb_thread_t *th = th_ptr;
401  th->status = THREAD_KILLED;
403 #ifdef __ia64
404  th->machine_register_stack_start = th->machine_register_stack_end = 0;
405 #endif
406 }
407 
408 static void
409 thread_cleanup_func(void *th_ptr, int atfork)
410 {
411  rb_thread_t *th = th_ptr;
412 
413  th->locking_mutex = Qfalse;
415 
416  /*
417  * Unfortunately, we can't release native threading resource at fork
418  * because libc may have unstable locking state therefore touching
419  * a threading resource may cause a deadlock.
420  */
421  if (atfork)
422  return;
423 
424  native_mutex_destroy(&th->interrupt_lock);
425  native_thread_destroy(th);
426 }
427 
428 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
429 
430 void
432 {
433  native_thread_init_stack(th);
434 }
435 
436 static int
437 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
438 {
439  int state;
440  VALUE args = th->first_args;
441  rb_proc_t *proc;
442  rb_thread_t *join_th;
443  rb_thread_t *main_th;
444  VALUE errinfo = Qnil;
445 # ifdef USE_SIGALTSTACK
446  void rb_register_sigaltstack(rb_thread_t *th);
447 
448  rb_register_sigaltstack(th);
449 # endif
450 
451  ruby_thread_set_native(th);
452 
453  th->machine_stack_start = stack_start;
454 #ifdef __ia64
455  th->machine_register_stack_start = register_stack_start;
456 #endif
457  thread_debug("thread start: %p\n", (void *)th);
458 
459  gvl_acquire(th->vm, th);
460  {
461  thread_debug("thread start (get lock): %p\n", (void *)th);
463 
464  TH_PUSH_TAG(th);
465  if ((state = EXEC_TAG()) == 0) {
466  SAVE_ROOT_JMPBUF(th, {
467  if (!th->first_func) {
468  GetProcPtr(th->first_proc, proc);
469  th->errinfo = Qnil;
470  th->local_lfp = proc->block.lfp;
471  th->local_svar = Qnil;
472  th->value = rb_vm_invoke_proc(th, proc, proc->block.self,
473  (int)RARRAY_LEN(args), RARRAY_PTR(args), 0);
474  }
475  else {
476  th->value = (*th->first_func)((void *)args);
477  }
478  });
479  }
480  else {
481  errinfo = th->errinfo;
482  if (NIL_P(errinfo)) errinfo = rb_errinfo();
483  if (state == TAG_FATAL) {
484  /* fatal error within this thread, need to stop whole script */
485  }
486  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
487  if (th->safe_level >= 4) {
489  rb_sprintf("Insecure exit at level %d", th->safe_level));
490  errinfo = Qnil;
491  }
492  }
493  else if (th->safe_level < 4 &&
496  /* exit on main_thread */
497  }
498  else {
499  errinfo = Qnil;
500  }
501  th->value = Qnil;
502  }
503 
504  th->status = THREAD_KILLED;
505  thread_debug("thread end: %p\n", (void *)th);
506 
507  main_th = th->vm->main_thread;
508  if (th != main_th) {
509  if (TYPE(errinfo) == T_OBJECT) {
510  /* treat with normal error object */
511  rb_threadptr_raise(main_th, 1, &errinfo);
512  }
513  }
514  TH_POP_TAG();
515 
516  /* locking_mutex must be Qfalse */
517  if (th->locking_mutex != Qfalse) {
518  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
519  (void *)th, th->locking_mutex);
520  }
521 
522  /* delete self other than main thread from living_threads */
523  if (th != main_th) {
525  }
526 
527  /* wake up joining threads */
528  join_th = th->join_list_head;
529  while (join_th) {
530  if (join_th == main_th) errinfo = Qnil;
531  rb_threadptr_interrupt(join_th);
532  switch (join_th->status) {
534  join_th->status = THREAD_RUNNABLE;
535  default: break;
536  }
537  join_th = join_th->join_list_next;
538  }
539 
541  if (th != main_th) rb_check_deadlock(th->vm);
542 
543  if (!th->root_fiber) {
545  th->stack = 0;
546  }
547  }
548  if (th->vm->main_thread == th) {
550  }
551  else {
553  gvl_release(th->vm);
554  }
555 
556  return 0;
557 }
558 
559 static VALUE
561 {
562  rb_thread_t *th;
563  int err;
564 
565  if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
567  "can't start a new thread (frozen ThreadGroup)");
568  }
569  GetThreadPtr(thval, th);
570 
571  /* setup thread environment */
572  th->first_func = fn;
573  th->first_proc = fn ? Qfalse : rb_block_proc();
574  th->first_args = args; /* GC: shouldn't put before above line */
575 
576  th->priority = GET_THREAD()->priority;
577  th->thgroup = GET_THREAD()->thgroup;
578 
579  native_mutex_initialize(&th->interrupt_lock);
580  if (GET_VM()->event_hooks != NULL)
581  th->event_flags |= RUBY_EVENT_VM;
582 
583  /* kick thread */
584  st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
585  err = native_thread_create(th);
586  if (err) {
588  th->status = THREAD_KILLED;
589  rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
590  }
591  return thval;
592 }
593 
594 /* :nodoc: */
595 static VALUE
597 {
598  rb_thread_t *th;
599  VALUE thread = rb_thread_alloc(klass);
600 
601  if (GET_VM()->inhibit_thread_creation)
602  rb_raise(rb_eThreadError, "can't alloc thread");
603 
604  rb_obj_call_init(thread, argc, argv);
605  GetThreadPtr(thread, th);
606  if (!th->first_args) {
607  rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
608  rb_class2name(klass));
609  }
610  return thread;
611 }
612 
613 /*
614  * call-seq:
615  * Thread.start([args]*) {|args| block } -> thread
616  * Thread.fork([args]*) {|args| block } -> thread
617  *
618  * Basically the same as <code>Thread::new</code>. However, if class
619  * <code>Thread</code> is subclassed, then calling <code>start</code> in that
620  * subclass will not invoke the subclass's <code>initialize</code> method.
621  */
622 
623 static VALUE
625 {
626  return thread_create_core(rb_thread_alloc(klass), args, 0);
627 }
628 
629 /* :nodoc: */
630 static VALUE
632 {
633  rb_thread_t *th;
634  if (!rb_block_given_p()) {
635  rb_raise(rb_eThreadError, "must be called with a block");
636  }
637  GetThreadPtr(thread, th);
638  if (th->first_args) {
639  VALUE proc = th->first_proc, line, loc;
640  const char *file;
641  if (!proc || !RTEST(loc = rb_proc_location(proc))) {
642  rb_raise(rb_eThreadError, "already initialized thread");
643  }
644  file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
645  if (NIL_P(line = RARRAY_PTR(loc)[1])) {
646  rb_raise(rb_eThreadError, "already initialized thread - %s",
647  file);
648  }
649  rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
650  file, NUM2INT(line));
651  }
652  return thread_create_core(thread, args, 0);
653 }
654 
655 VALUE
657 {
659 }
660 
661 
662 /* +infty, for this purpose */
663 #define DELAY_INFTY 1E30
664 
665 struct join_arg {
667  double limit;
668  int forever;
669 };
670 
671 static VALUE
673 {
674  struct join_arg *p = (struct join_arg *)arg;
675  rb_thread_t *target_th = p->target, *th = p->waiting;
676 
677  if (target_th->status != THREAD_KILLED) {
678  rb_thread_t **pth = &target_th->join_list_head;
679 
680  while (*pth) {
681  if (*pth == th) {
682  *pth = th->join_list_next;
683  break;
684  }
685  pth = &(*pth)->join_list_next;
686  }
687  }
688 
689  return Qnil;
690 }
691 
692 static VALUE
694 {
695  struct join_arg *p = (struct join_arg *)arg;
696  rb_thread_t *target_th = p->target, *th = p->waiting;
697  double now, limit = p->limit;
698 
699  while (target_th->status != THREAD_KILLED) {
700  if (p->forever) {
701  sleep_forever(th, 1);
702  }
703  else {
704  now = timeofday();
705  if (now > limit) {
706  thread_debug("thread_join: timeout (thid: %p)\n",
707  (void *)target_th->thread_id);
708  return Qfalse;
709  }
710  sleep_wait_for_interrupt(th, limit - now);
711  }
712  thread_debug("thread_join: interrupted (thid: %p)\n",
713  (void *)target_th->thread_id);
714  }
715  return Qtrue;
716 }
717 
718 static VALUE
719 thread_join(rb_thread_t *target_th, double delay)
720 {
721  rb_thread_t *th = GET_THREAD();
722  struct join_arg arg;
723 
724  arg.target = target_th;
725  arg.waiting = th;
726  arg.limit = timeofday() + delay;
727  arg.forever = delay == DELAY_INFTY;
728 
729  thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
730 
731  if (target_th->status != THREAD_KILLED) {
732  th->join_list_next = target_th->join_list_head;
733  target_th->join_list_head = th;
734  if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
735  remove_from_join_list, (VALUE)&arg)) {
736  return Qnil;
737  }
738  }
739 
740  thread_debug("thread_join: success (thid: %p)\n",
741  (void *)target_th->thread_id);
742 
743  if (target_th->errinfo != Qnil) {
744  VALUE err = target_th->errinfo;
745 
746  if (FIXNUM_P(err)) {
747  /* */
748  }
749  else if (TYPE(target_th->errinfo) == T_NODE) {
752  }
753  else {
754  /* normal exception */
755  rb_exc_raise(err);
756  }
757  }
758  return target_th->self;
759 }
760 
761 /*
762  * call-seq:
763  * thr.join -> thr
764  * thr.join(limit) -> thr
765  *
766  * The calling thread will suspend execution and run <i>thr</i>. Does not
767  * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
768  * the time limit expires, <code>nil</code> will be returned, otherwise
769  * <i>thr</i> is returned.
770  *
771  * Any threads not joined will be killed when the main program exits. If
772  * <i>thr</i> had previously raised an exception and the
773  * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
774  * (so the exception has not yet been processed) it will be processed at this
775  * time.
776  *
777  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
778  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
779  * x.join # Let x thread finish, a will be killed on exit.
780  *
781  * <em>produces:</em>
782  *
783  * axyz
784  *
785  * The following example illustrates the <i>limit</i> parameter.
786  *
787  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
788  * puts "Waiting" until y.join(0.15)
789  *
790  * <em>produces:</em>
791  *
792  * tick...
793  * Waiting
794  * tick...
795  * Waitingtick...
796  *
797  *
798  * tick...
799  */
800 
801 static VALUE
803 {
804  rb_thread_t *target_th;
805  double delay = DELAY_INFTY;
806  VALUE limit;
807 
808  GetThreadPtr(self, target_th);
809 
810  rb_scan_args(argc, argv, "01", &limit);
811  if (!NIL_P(limit)) {
812  delay = rb_num2dbl(limit);
813  }
814 
815  return thread_join(target_th, delay);
816 }
817 
818 /*
819  * call-seq:
820  * thr.value -> obj
821  *
822  * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
823  * its value.
824  *
825  * a = Thread.new { 2 + 2 }
826  * a.value #=> 4
827  */
828 
829 static VALUE
831 {
832  rb_thread_t *th;
833  GetThreadPtr(self, th);
835  return th->value;
836 }
837 
838 /*
839  * Thread Scheduling
840  */
841 
842 static struct timeval
843 double2timeval(double d)
844 {
845  struct timeval time;
846 
847  time.tv_sec = (int)d;
848  time.tv_usec = (int)((d - (int)d) * 1e6);
849  if (time.tv_usec < 0) {
850  time.tv_usec += (int)1e6;
851  time.tv_sec -= 1;
852  }
853  return time;
854 }
855 
856 static void
857 sleep_forever(rb_thread_t *th, int deadlockable)
858 {
859  enum rb_thread_status prev_status = th->status;
860  enum rb_thread_status status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
861 
862  th->status = status;
863  do {
864  if (deadlockable) {
865  th->vm->sleeper++;
866  rb_check_deadlock(th->vm);
867  }
868  native_sleep(th, 0);
869  if (deadlockable) {
870  th->vm->sleeper--;
871  }
873  } while (th->status == status);
874  th->status = prev_status;
875 }
876 
877 static void
879 {
880 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
881  struct timespec ts;
882 
883  if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
884  tp->tv_sec = ts.tv_sec;
885  tp->tv_usec = ts.tv_nsec / 1000;
886  } else
887 #endif
888  {
889  gettimeofday(tp, NULL);
890  }
891 }
892 
893 static void
895 {
896  struct timeval to, tvn;
897  enum rb_thread_status prev_status = th->status;
898 
899  getclockofday(&to);
900  to.tv_sec += tv.tv_sec;
901  if ((to.tv_usec += tv.tv_usec) >= 1000000) {
902  to.tv_sec++;
903  to.tv_usec -= 1000000;
904  }
905 
906  th->status = THREAD_STOPPED;
907  do {
908  native_sleep(th, &tv);
910  getclockofday(&tvn);
911  if (to.tv_sec < tvn.tv_sec) break;
912  if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
913  thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
914  (long)to.tv_sec, (long)to.tv_usec,
915  (long)tvn.tv_sec, (long)tvn.tv_usec);
916  tv.tv_sec = to.tv_sec - tvn.tv_sec;
917  if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
918  --tv.tv_sec;
919  tv.tv_usec += 1000000;
920  }
921  } while (th->status == THREAD_STOPPED);
922  th->status = prev_status;
923 }
924 
925 void
927 {
928  thread_debug("rb_thread_sleep_forever\n");
930 }
931 
932 static void
934 {
935  thread_debug("rb_thread_sleep_deadly\n");
937 }
938 
939 static double
941 {
942 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
943  struct timespec tp;
944 
945  if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
946  return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
947  } else
948 #endif
949  {
950  struct timeval tv;
951  gettimeofday(&tv, NULL);
952  return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
953  }
954 }
955 
956 static void
958 {
959  sleep_timeval(th, double2timeval(sleepsec));
960 }
961 
962 static void
964 {
965  struct timeval time;
966  time.tv_sec = 0;
967  time.tv_usec = 100 * 1000; /* 0.1 sec */
968  sleep_timeval(th, time);
969 }
970 
971 void
973 {
974  rb_thread_t *th = GET_THREAD();
975  sleep_timeval(th, time);
976 }
977 
978 void
980 {
982  if (!rb_thread_alone()) {
983  rb_thread_t *th = GET_THREAD();
984  sleep_for_polling(th);
985  }
986 }
987 
988 /*
989  * CAUTION: This function causes thread switching.
990  * rb_thread_check_ints() check ruby's interrupts.
991  * some interrupt needs thread switching/invoke handlers,
992  * and so on.
993  */
994 
995 void
997 {
999 }
1000 
1001 /*
1002  * Hidden API for tcl/tk wrapper.
1003  * There is no guarantee to perpetuate it.
1004  */
1005 int
1007 {
1008  return rb_signal_buff_size() != 0;
1009 }
1010 
1011 /* This function can be called in blocking region. */
1012 int
1014 {
1015  rb_thread_t *th;
1016  GetThreadPtr(thval, th);
1017  return RUBY_VM_INTERRUPTED(th);
1018 }
1019 
1020 void
1022 {
1024 }
1025 
1027 
1028 static void
1029 rb_thread_schedule_limits(unsigned long limits_us)
1030 {
1031  thread_debug("rb_thread_schedule\n");
1032  if (!rb_thread_alone()) {
1033  rb_thread_t *th = GET_THREAD();
1034 
1035  if (th->running_time_us >= limits_us) {
1036  thread_debug("rb_thread_schedule/switch start\n");
1038  gvl_yield(th->vm, th);
1040  thread_debug("rb_thread_schedule/switch done\n");
1041  }
1042  }
1043 }
1044 
1045 void
1047 {
1049 
1050  if (UNLIKELY(GET_THREAD()->interrupt_flag)) {
1052  }
1053 }
1054 
1055 /* blocking region */
1056 
1057 static inline void
1059 {
1060  gvl_acquire(th->vm, th);
1062  thread_debug("leave blocking region (%p)\n", (void *)th);
1063  remove_signal_thread_list(th);
1064  th->blocking_region_buffer = 0;
1065  reset_unblock_function(th, &region->oldubf);
1066  if (th->status == THREAD_STOPPED) {
1067  th->status = region->prev_status;
1068  }
1069 }
1070 
1073 {
1074  rb_thread_t *th = GET_THREAD();
1076  blocking_region_begin(th, region, ubf_select, th);
1077  return region;
1078 }
1079 
1080 void
1082 {
1083  int saved_errno = errno;
1084  rb_thread_t *th = GET_THREAD();
1085  blocking_region_end(th, region);
1086  xfree(region);
1088  errno = saved_errno;
1089 }
1090 
1091 /*
1092  * rb_thread_blocking_region - permit concurrent/parallel execution.
1093  *
1094  * This function does:
1095  * (1) release GVL.
1096  * Other Ruby threads may run in parallel.
1097  * (2) call func with data1.
1098  * (3) acquire GVL.
1099  * Other Ruby threads can not run in parallel any more.
1100  *
1101  * If another thread interrupts this thread (Thread#kill, signal delivery,
1102  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1103  * "un-blocking function"). `ubf()' should interrupt `func()' execution.
1104  *
1105  * There are built-in ubfs and you can specify these ubfs.
1106  * However, we can not guarantee our built-in ubfs interrupt
1107  * your `func()' correctly. Be careful to use rb_thread_blocking_region().
1108  *
1109  * * RUBY_UBF_IO: ubf for IO operation
1110  * * RUBY_UBF_PROCESS: ubf for process operation
1111  *
1112  * NOTE: You can not execute most of Ruby C API and touch Ruby
1113  * objects in `func()' and `ubf()', including raising an
1114  * exception, because current thread doesn't acquire GVL
1115  * (cause synchronization problem). If you need to do it,
1116  * read source code of C APIs and confirm by yourself.
1117  *
1118  * NOTE: In short, this API is difficult to use safely. I recommend you
1119  * use other ways if you have. We lack experiences to use this API.
1120  * Please report your problem related on it.
1121  *
1122  * Safe C API:
1123  * * rb_thread_interrupted() - check interrupt flag
1124  * * ruby_xalloc(), ruby_xrealloc(), ruby_xfree() -
1125  * if they called without GVL, acquire GVL automatically.
1126  */
1127 VALUE
1129  rb_blocking_function_t *func, void *data1,
1130  rb_unblock_function_t *ubf, void *data2)
1131 {
1132  VALUE val;
1133  rb_thread_t *th = GET_THREAD();
1134  int saved_errno = 0;
1135 
1136  th->waiting_fd = -1;
1137  if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1138  ubf = ubf_select;
1139  data2 = th;
1140  }
1141 
1142  BLOCKING_REGION({
1143  val = func(data1);
1144  saved_errno = errno;
1145  }, ubf, data2);
1146  errno = saved_errno;
1147 
1148  return val;
1149 }
1150 
1151 VALUE
1153 {
1154  VALUE val;
1155  rb_thread_t *th = GET_THREAD();
1156  int saved_errno = 0;
1157 
1158  th->waiting_fd = fd;
1159  BLOCKING_REGION({
1160  val = func(data1);
1161  saved_errno = errno;
1162  }, ubf_select, th);
1163  th->waiting_fd = -1;
1164  errno = saved_errno;
1165 
1166  return val;
1167 }
1168 
1169 /* alias of rb_thread_blocking_region() */
1170 
1171 VALUE
1173  rb_blocking_function_t *func, void *data1,
1174  rb_unblock_function_t *ubf, void *data2)
1175 {
1176  return rb_thread_blocking_region(func, data1, ubf, data2);
1177 }
1178 
1179 /*
1180  * rb_thread_call_with_gvl - re-enter into Ruby world while releasing GVL.
1181  *
1182  ***
1183  *** This API is EXPERIMENTAL!
1184  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1185  ***
1186  *
1187  * While releasing GVL using rb_thread_blocking_region() or
1188  * rb_thread_call_without_gvl(), you can not access Ruby values or invoke methods.
1189  * If you need to access it, you must use this function rb_thread_call_with_gvl().
1190  *
1191  * This function rb_thread_call_with_gvl() does:
1192  * (1) acquire GVL.
1193  * (2) call passed function `func'.
1194  * (3) release GVL.
1195  * (4) return a value which is returned at (2).
1196  *
1197  * NOTE: You should not return Ruby object at (2) because such Object
1198  * will not marked.
1199  *
1200  * NOTE: If an exception is raised in `func', this function "DOES NOT"
1201  * protect (catch) the exception. If you have any resources
1202  * which should free before throwing exception, you need use
1203  * rb_protect() in `func' and return a value which represents
1204  * exception is raised.
1205  *
1206  * NOTE: This functions should not be called by a thread which
1207  * is not created as Ruby thread (created by Thread.new or so).
1208  * In other words, this function *DOES NOT* associate
1209  * NON-Ruby thread to Ruby thread.
1210  */
1211 void *
1212 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1213 {
1214  rb_thread_t *th = ruby_thread_from_native();
1215  struct rb_blocking_region_buffer *brb;
1216  struct rb_unblock_callback prev_unblock;
1217  void *r;
1218 
1219  if (th == 0) {
1220  /* Error is occurred, but we can't use rb_bug()
1221  * because this thread is not Ruby's thread.
1222  * What should we do?
1223  */
1224 
1225  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1226  exit(EXIT_FAILURE);
1227  }
1228 
1230  prev_unblock = th->unblock;
1231 
1232  if (brb == 0) {
1233  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1234  }
1235 
1236  blocking_region_end(th, brb);
1237  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1238  r = (*func)(data1);
1239  /* leave from Ruby world: You can not access Ruby values, etc. */
1240  blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg);
1241  return r;
1242 }
1243 
1244 /*
1245  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1246  *
1247  ***
1248  *** This API is EXPERIMENTAL!
1249  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1250  ***
1251  */
1252 
1253 int
1255 {
1256  rb_thread_t *th = ruby_thread_from_native();
1257 
1258  if (th && th->blocking_region_buffer == 0) {
1259  return 1;
1260  }
1261  else {
1262  return 0;
1263  }
1264 }
1265 
1266 /*
1267  * call-seq:
1268  * Thread.pass -> nil
1269  *
1270  * Give the thread scheduler a hint to pass execution to another thread.
1271  * A running thread may or may not switch, it depends on OS and processor.
1272  */
1273 
1274 static VALUE
1276 {
1278  return Qnil;
1279 }
1280 
1281 /*
1282  *
1283  */
1284 
1285 static void
1287 {
1288  rb_atomic_t interrupt;
1289 
1290  if (th->raised_flag) return;
1291 
1292  while ((interrupt = ATOMIC_EXCHANGE(th->interrupt_flag, 0)) != 0) {
1293  enum rb_thread_status status = th->status;
1294  int timer_interrupt = interrupt & 0x01;
1295  int finalizer_interrupt = interrupt & 0x04;
1296  int sig;
1297 
1298  th->status = THREAD_RUNNABLE;
1299 
1300  /* signal handling */
1301  if (th == th->vm->main_thread) {
1302  while ((sig = rb_get_next_signal()) != 0) {
1303  rb_signal_exec(th, sig);
1304  }
1305  }
1306 
1307  /* exception from another thread */
1308  if (th->thrown_errinfo) {
1309  VALUE err = th->thrown_errinfo;
1310  th->thrown_errinfo = 0;
1311  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
1312 
1313  if (err == eKillSignal || err == eTerminateSignal) {
1314  th->errinfo = INT2FIX(TAG_FATAL);
1315  TH_JUMP_TAG(th, TAG_FATAL);
1316  }
1317  else {
1318  rb_exc_raise(err);
1319  }
1320  }
1321  th->status = status;
1322 
1323  if (finalizer_interrupt) {
1325  }
1326 
1327  if (timer_interrupt) {
1328  unsigned long limits_us = 250 * 1000;
1329 
1330  if (th->priority > 0)
1331  limits_us <<= th->priority;
1332  else
1333  limits_us >>= -th->priority;
1334 
1335  if (status == THREAD_RUNNABLE)
1336  th->running_time_us += TIME_QUANTUM_USEC;
1337 
1338  EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
1339 
1340  rb_thread_schedule_limits(limits_us);
1341  }
1342  }
1343 }
1344 
1345 void
1347 {
1349 }
1350 
1351 void
1353 {
1354  rb_thread_t *th;
1355  GetThreadPtr(thval, th);
1357 }
1358 
1359 void
1361 {
1362  rb_bug("deprecated function rb_gc_mark_threads is called");
1363 }
1364 
1365 /*****************************************************/
1366 
1367 static void
1369 {
1371 }
1372 
1373 static VALUE
1375 {
1376  VALUE exc;
1377 
1378  again:
1379  if (rb_threadptr_dead(th)) {
1380  return Qnil;
1381  }
1382 
1383  if (th->thrown_errinfo != 0 || th->raised_flag) {
1385  goto again;
1386  }
1387 
1388  exc = rb_make_exception(argc, argv);
1389  th->thrown_errinfo = exc;
1390  rb_threadptr_ready(th);
1391  return Qnil;
1392 }
1393 
1394 void
1396 {
1397  VALUE argv[2];
1398 
1399  argv[0] = rb_eSignal;
1400  argv[1] = INT2FIX(sig);
1401  rb_threadptr_raise(th->vm->main_thread, 2, argv);
1402 }
1403 
1404 void
1406 {
1407  VALUE argv[2];
1408 
1409  argv[0] = rb_eSystemExit;
1410  argv[1] = rb_str_new2("exit");
1411  rb_threadptr_raise(th->vm->main_thread, 2, argv);
1412 }
1413 
1414 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
1415 #define USE_SIGALTSTACK
1416 #endif
1417 
1418 void
1420 {
1421  th->raised_flag = 0;
1422 #ifdef USE_SIGALTSTACK
1424 #else
1425  th->errinfo = sysstack_error;
1426  TH_JUMP_TAG(th, TAG_RAISE);
1427 #endif
1428 }
1429 
1430 int
1432 {
1433  if (th->raised_flag & RAISED_EXCEPTION) {
1434  return 1;
1435  }
1437  return 0;
1438 }
1439 
1440 int
1442 {
1443  if (!(th->raised_flag & RAISED_EXCEPTION)) {
1444  return 0;
1445  }
1446  th->raised_flag &= ~RAISED_EXCEPTION;
1447  return 1;
1448 }
1449 
1450 #define THREAD_IO_WAITING_P(th) ( \
1451  ((th)->status == THREAD_STOPPED || \
1452  (th)->status == THREAD_STOPPED_FOREVER) && \
1453  (th)->blocking_region_buffer && \
1454  (th)->unblock.func == ubf_select && \
1455  1)
1456 
1457 static int
1459 {
1460  int fd = (int)data;
1461  rb_thread_t *th;
1462  GetThreadPtr((VALUE)key, th);
1463 
1464  if (THREAD_IO_WAITING_P(th)) {
1465  native_mutex_lock(&th->interrupt_lock);
1466  if (THREAD_IO_WAITING_P(th) && th->waiting_fd == fd) {
1469  (th->unblock.func)(th->unblock.arg);
1470  }
1471  native_mutex_unlock(&th->interrupt_lock);
1472  }
1473  return ST_CONTINUE;
1474 }
1475 
1476 void
1478 {
1479  st_foreach(GET_THREAD()->vm->living_threads, thread_fd_close_i, (st_index_t)fd);
1480 }
1481 
1482 /*
1483  * call-seq:
1484  * thr.raise
1485  * thr.raise(string)
1486  * thr.raise(exception [, string [, array]])
1487  *
1488  * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1489  * caller does not have to be <i>thr</i>.
1490  *
1491  * Thread.abort_on_exception = true
1492  * a = Thread.new { sleep(200) }
1493  * a.raise("Gotcha")
1494  *
1495  * <em>produces:</em>
1496  *
1497  * prog.rb:3: Gotcha (RuntimeError)
1498  * from prog.rb:2:in `initialize'
1499  * from prog.rb:2:in `new'
1500  * from prog.rb:2
1501  */
1502 
1503 static VALUE
1505 {
1506  rb_thread_t *th;
1507  GetThreadPtr(self, th);
1508  rb_threadptr_raise(th, argc, argv);
1509  return Qnil;
1510 }
1511 
1512 
1513 /*
1514  * call-seq:
1515  * thr.exit -> thr or nil
1516  * thr.kill -> thr or nil
1517  * thr.terminate -> thr or nil
1518  *
1519  * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1520  * is already marked to be killed, <code>exit</code> returns the
1521  * <code>Thread</code>. If this is the main thread, or the last thread, exits
1522  * the process.
1523  */
1524 
1525 VALUE
1527 {
1528  rb_thread_t *th;
1529 
1530  GetThreadPtr(thread, th);
1531 
1532  if (th != GET_THREAD() && th->safe_level < 4) {
1533  rb_secure(4);
1534  }
1535  if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
1536  return thread;
1537  }
1538  if (th == th->vm->main_thread) {
1540  }
1541 
1542  thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
1543 
1546  th->status = THREAD_TO_KILL;
1547 
1548  return thread;
1549 }
1550 
1551 
1552 /*
1553  * call-seq:
1554  * Thread.kill(thread) -> thread
1555  *
1556  * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1557  *
1558  * count = 0
1559  * a = Thread.new { loop { count += 1 } }
1560  * sleep(0.1) #=> 0
1561  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1562  * count #=> 93947
1563  * a.alive? #=> false
1564  */
1565 
1566 static VALUE
1568 {
1569  return rb_thread_kill(th);
1570 }
1571 
1572 
1573 /*
1574  * call-seq:
1575  * Thread.exit -> thread
1576  *
1577  * Terminates the currently running thread and schedules another thread to be
1578  * run. If this thread is already marked to be killed, <code>exit</code>
1579  * returns the <code>Thread</code>. If this is the main thread, or the last
1580  * thread, exit the process.
1581  */
1582 
1583 static VALUE
1585 {
1586  return rb_thread_kill(GET_THREAD()->self);
1587 }
1588 
1589 
1590 /*
1591  * call-seq:
1592  * thr.wakeup -> thr
1593  *
1594  * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1595  * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1596  *
1597  * c = Thread.new { Thread.stop; puts "hey!" }
1598  * sleep 0.1 while c.status!='sleep'
1599  * c.wakeup
1600  * c.join
1601  *
1602  * <em>produces:</em>
1603  *
1604  * hey!
1605  */
1606 
1607 VALUE
1609 {
1610  if (!RTEST(rb_thread_wakeup_alive(thread))) {
1611  rb_raise(rb_eThreadError, "killed thread");
1612  }
1613  return thread;
1614 }
1615 
1616 VALUE
1618 {
1619  rb_thread_t *th;
1620  GetThreadPtr(thread, th);
1621 
1622  if (th->status == THREAD_KILLED) {
1623  return Qnil;
1624  }
1625  rb_threadptr_ready(th);
1626  if (th->status != THREAD_TO_KILL) {
1627  th->status = THREAD_RUNNABLE;
1628  }
1629  return thread;
1630 }
1631 
1632 
1633 /*
1634  * call-seq:
1635  * thr.run -> thr
1636  *
1637  * Wakes up <i>thr</i>, making it eligible for scheduling.
1638  *
1639  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1640  * sleep 0.1 while a.status!='sleep'
1641  * puts "Got here"
1642  * a.run
1643  * a.join
1644  *
1645  * <em>produces:</em>
1646  *
1647  * a
1648  * Got here
1649  * c
1650  */
1651 
1652 VALUE
1654 {
1655  rb_thread_wakeup(thread);
1657  return thread;
1658 }
1659 
1660 
1661 /*
1662  * call-seq:
1663  * Thread.stop -> nil
1664  *
1665  * Stops execution of the current thread, putting it into a ``sleep'' state,
1666  * and schedules execution of another thread.
1667  *
1668  * a = Thread.new { print "a"; Thread.stop; print "c" }
1669  * sleep 0.1 while a.status!='sleep'
1670  * print "b"
1671  * a.run
1672  * a.join
1673  *
1674  * <em>produces:</em>
1675  *
1676  * abc
1677  */
1678 
1679 VALUE
1681 {
1682  if (rb_thread_alone()) {
1684  "stopping only thread\n\tnote: use sleep to stop forever");
1685  }
1687  return Qnil;
1688 }
1689 
1690 static int
1692 {
1693  VALUE ary = (VALUE)data;
1694  rb_thread_t *th;
1695  GetThreadPtr((VALUE)key, th);
1696 
1697  switch (th->status) {
1698  case THREAD_RUNNABLE:
1699  case THREAD_STOPPED:
1701  case THREAD_TO_KILL:
1702  rb_ary_push(ary, th->self);
1703  default:
1704  break;
1705  }
1706  return ST_CONTINUE;
1707 }
1708 
1709 /********************************************************************/
1710 
1711 /*
1712  * call-seq:
1713  * Thread.list -> array
1714  *
1715  * Returns an array of <code>Thread</code> objects for all threads that are
1716  * either runnable or stopped.
1717  *
1718  * Thread.new { sleep(200) }
1719  * Thread.new { 1000000.times {|i| i*i } }
1720  * Thread.new { Thread.stop }
1721  * Thread.list.each {|t| p t}
1722  *
1723  * <em>produces:</em>
1724  *
1725  * #<Thread:0x401b3e84 sleep>
1726  * #<Thread:0x401b3f38 run>
1727  * #<Thread:0x401b3fb0 sleep>
1728  * #<Thread:0x401bdf4c run>
1729  */
1730 
1731 VALUE
1733 {
1734  VALUE ary = rb_ary_new();
1735  st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
1736  return ary;
1737 }
1738 
1739 VALUE
1741 {
1742  return GET_THREAD()->self;
1743 }
1744 
1745 /*
1746  * call-seq:
1747  * Thread.current -> thread
1748  *
1749  * Returns the currently executing thread.
1750  *
1751  * Thread.current #=> #<Thread:0x401bdf4c run>
1752  */
1753 
1754 static VALUE
1756 {
1757  return rb_thread_current();
1758 }
1759 
1760 VALUE
1762 {
1763  return GET_THREAD()->vm->main_thread->self;
1764 }
1765 
1766 /*
1767  * call-seq:
1768  * Thread.main -> thread
1769  *
1770  * Returns the main thread.
1771  */
1772 
1773 static VALUE
1775 {
1776  return rb_thread_main();
1777 }
1778 
1779 
1780 /*
1781  * call-seq:
1782  * Thread.abort_on_exception -> true or false
1783  *
1784  * Returns the status of the global ``abort on exception'' condition. The
1785  * default is <code>false</code>. When set to <code>true</code>, or if the
1786  * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1787  * command line option <code>-d</code> was specified) all threads will abort
1788  * (the process will <code>exit(0)</code>) if an exception is raised in any
1789  * thread. See also <code>Thread::abort_on_exception=</code>.
1790  */
1791 
1792 static VALUE
1794 {
1795  return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
1796 }
1797 
1798 
1799 /*
1800  * call-seq:
1801  * Thread.abort_on_exception= boolean -> true or false
1802  *
1803  * When set to <code>true</code>, all threads will abort if an exception is
1804  * raised. Returns the new state.
1805  *
1806  * Thread.abort_on_exception = true
1807  * t1 = Thread.new do
1808  * puts "In new thread"
1809  * raise "Exception from thread"
1810  * end
1811  * sleep(1)
1812  * puts "not reached"
1813  *
1814  * <em>produces:</em>
1815  *
1816  * In new thread
1817  * prog.rb:4: Exception from thread (RuntimeError)
1818  * from prog.rb:2:in `initialize'
1819  * from prog.rb:2:in `new'
1820  * from prog.rb:2
1821  */
1822 
1823 static VALUE
1825 {
1826  rb_secure(4);
1827  GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
1828  return val;
1829 }
1830 
1831 
1832 /*
1833  * call-seq:
1834  * thr.abort_on_exception -> true or false
1835  *
1836  * Returns the status of the thread-local ``abort on exception'' condition for
1837  * <i>thr</i>. The default is <code>false</code>. See also
1838  * <code>Thread::abort_on_exception=</code>.
1839  */
1840 
1841 static VALUE
1843 {
1844  rb_thread_t *th;
1845  GetThreadPtr(thread, th);
1846  return th->abort_on_exception ? Qtrue : Qfalse;
1847 }
1848 
1849 
1850 /*
1851  * call-seq:
1852  * thr.abort_on_exception= boolean -> true or false
1853  *
1854  * When set to <code>true</code>, causes all threads (including the main
1855  * program) to abort if an exception is raised in <i>thr</i>. The process will
1856  * effectively <code>exit(0)</code>.
1857  */
1858 
1859 static VALUE
1861 {
1862  rb_thread_t *th;
1863  rb_secure(4);
1864 
1865  GetThreadPtr(thread, th);
1866  th->abort_on_exception = RTEST(val);
1867  return val;
1868 }
1869 
1870 
1871 /*
1872  * call-seq:
1873  * thr.group -> thgrp or nil
1874  *
1875  * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1876  * the thread is not a member of any group.
1877  *
1878  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1879  */
1880 
1881 VALUE
1883 {
1884  rb_thread_t *th;
1885  VALUE group;
1886  GetThreadPtr(thread, th);
1887  group = th->thgroup;
1888 
1889  if (!group) {
1890  group = Qnil;
1891  }
1892  return group;
1893 }
1894 
1895 static const char *
1897 {
1898  switch (status) {
1899  case THREAD_RUNNABLE:
1900  return "run";
1901  case THREAD_STOPPED:
1903  return "sleep";
1904  case THREAD_TO_KILL:
1905  return "aborting";
1906  case THREAD_KILLED:
1907  return "dead";
1908  default:
1909  return "unknown";
1910  }
1911 }
1912 
1913 static int
1915 {
1916  return th->status == THREAD_KILLED;
1917 }
1918 
1919 
1920 /*
1921  * call-seq:
1922  * thr.status -> string, false or nil
1923  *
1924  * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1925  * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1926  * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1927  * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1928  * terminated with an exception.
1929  *
1930  * a = Thread.new { raise("die now") }
1931  * b = Thread.new { Thread.stop }
1932  * c = Thread.new { Thread.exit }
1933  * d = Thread.new { sleep }
1934  * d.kill #=> #<Thread:0x401b3678 aborting>
1935  * a.status #=> nil
1936  * b.status #=> "sleep"
1937  * c.status #=> false
1938  * d.status #=> "aborting"
1939  * Thread.current.status #=> "run"
1940  */
1941 
1942 static VALUE
1944 {
1945  rb_thread_t *th;
1946  GetThreadPtr(thread, th);
1947 
1948  if (rb_threadptr_dead(th)) {
1949  if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
1950  /* TODO */ ) {
1951  return Qnil;
1952  }
1953  return Qfalse;
1954  }
1955  return rb_str_new2(thread_status_name(th->status));
1956 }
1957 
1958 
1959 /*
1960  * call-seq:
1961  * thr.alive? -> true or false
1962  *
1963  * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1964  *
1965  * thr = Thread.new { }
1966  * thr.join #=> #<Thread:0x401b3fb0 dead>
1967  * Thread.current.alive? #=> true
1968  * thr.alive? #=> false
1969  */
1970 
1971 static VALUE
1973 {
1974  rb_thread_t *th;
1975  GetThreadPtr(thread, th);
1976 
1977  if (rb_threadptr_dead(th))
1978  return Qfalse;
1979  return Qtrue;
1980 }
1981 
1982 /*
1983  * call-seq:
1984  * thr.stop? -> true or false
1985  *
1986  * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1987  *
1988  * a = Thread.new { Thread.stop }
1989  * b = Thread.current
1990  * a.stop? #=> true
1991  * b.stop? #=> false
1992  */
1993 
1994 static VALUE
1996 {
1997  rb_thread_t *th;
1998  GetThreadPtr(thread, th);
1999 
2000  if (rb_threadptr_dead(th))
2001  return Qtrue;
2002  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2003  return Qtrue;
2004  return Qfalse;
2005 }
2006 
2007 /*
2008  * call-seq:
2009  * thr.safe_level -> integer
2010  *
2011  * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
2012  * levels can help when implementing sandboxes which run insecure code.
2013  *
2014  * thr = Thread.new { $SAFE = 3; sleep }
2015  * Thread.current.safe_level #=> 0
2016  * thr.safe_level #=> 3
2017  */
2018 
2019 static VALUE
2021 {
2022  rb_thread_t *th;
2023  GetThreadPtr(thread, th);
2024 
2025  return INT2NUM(th->safe_level);
2026 }
2027 
2028 /*
2029  * call-seq:
2030  * thr.inspect -> string
2031  *
2032  * Dump the name, id, and status of _thr_ to a string.
2033  */
2034 
2035 static VALUE
2037 {
2038  const char *cname = rb_obj_classname(thread);
2039  rb_thread_t *th;
2040  const char *status;
2041  VALUE str;
2042 
2043  GetThreadPtr(thread, th);
2044  status = thread_status_name(th->status);
2045  str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
2046  OBJ_INFECT(str, thread);
2047 
2048  return str;
2049 }
2050 
2051 VALUE
2053 {
2054  rb_thread_t *th;
2055  st_data_t val;
2056 
2057  GetThreadPtr(thread, th);
2058  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2059  rb_raise(rb_eSecurityError, "Insecure: thread locals");
2060  }
2061  if (!th->local_storage) {
2062  return Qnil;
2063  }
2064  if (st_lookup(th->local_storage, id, &val)) {
2065  return (VALUE)val;
2066  }
2067  return Qnil;
2068 }
2069 
2070 /*
2071  * call-seq:
2072  * thr[sym] -> obj or nil
2073  *
2074  * Attribute Reference---Returns the value of a thread-local variable, using
2075  * either a symbol or a string name. If the specified variable does not exist,
2076  * returns <code>nil</code>.
2077  *
2078  * [
2079  * Thread.new { Thread.current["name"] = "A" },
2080  * Thread.new { Thread.current[:name] = "B" },
2081  * Thread.new { Thread.current["name"] = "C" }
2082  * ].each do |th|
2083  * th.join
2084  * puts "#{th.inspect}: #{th[:name]}"
2085  * end
2086  *
2087  * <em>produces:</em>
2088  *
2089  * #<Thread:0x00000002a54220 dead>: A
2090  * #<Thread:0x00000002a541a8 dead>: B
2091  * #<Thread:0x00000002a54130 dead>: C
2092  */
2093 
2094 static VALUE
2096 {
2097  return rb_thread_local_aref(thread, rb_to_id(id));
2098 }
2099 
2100 VALUE
2102 {
2103  rb_thread_t *th;
2104  GetThreadPtr(thread, th);
2105 
2106  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2107  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2108  }
2109  if (OBJ_FROZEN(thread)) {
2110  rb_error_frozen("thread locals");
2111  }
2112  if (!th->local_storage) {
2114  }
2115  if (NIL_P(val)) {
2116  st_delete_wrap(th->local_storage, id);
2117  return Qnil;
2118  }
2119  st_insert(th->local_storage, id, val);
2120  return val;
2121 }
2122 
2123 /*
2124  * call-seq:
2125  * thr[sym] = obj -> obj
2126  *
2127  * Attribute Assignment---Sets or creates the value of a thread-local variable,
2128  * using either a symbol or a string. See also <code>Thread#[]</code>.
2129  */
2130 
2131 static VALUE
2133 {
2134  return rb_thread_local_aset(self, rb_to_id(id), val);
2135 }
2136 
2137 /*
2138  * call-seq:
2139  * thr.key?(sym) -> true or false
2140  *
2141  * Returns <code>true</code> if the given string (or symbol) exists as a
2142  * thread-local variable.
2143  *
2144  * me = Thread.current
2145  * me[:oliver] = "a"
2146  * me.key?(:oliver) #=> true
2147  * me.key?(:stanley) #=> false
2148  */
2149 
2150 static VALUE
2152 {
2153  rb_thread_t *th;
2154  ID id = rb_to_id(key);
2155 
2156  GetThreadPtr(self, th);
2157 
2158  if (!th->local_storage) {
2159  return Qfalse;
2160  }
2161  if (st_lookup(th->local_storage, id, 0)) {
2162  return Qtrue;
2163  }
2164  return Qfalse;
2165 }
2166 
2167 static int
2169 {
2170  rb_ary_push(ary, ID2SYM(key));
2171  return ST_CONTINUE;
2172 }
2173 
2174 static int
2176 {
2177  return (int)vm->living_threads->num_entries;
2178 }
2179 
2180 int
2182 {
2183  int num = 1;
2184  if (GET_THREAD()->vm->living_threads) {
2185  num = vm_living_thread_num(GET_THREAD()->vm);
2186  thread_debug("rb_thread_alone: %d\n", num);
2187  }
2188  return num == 1;
2189 }
2190 
2191 /*
2192  * call-seq:
2193  * thr.keys -> array
2194  *
2195  * Returns an an array of the names of the thread-local variables (as Symbols).
2196  *
2197  * thr = Thread.new do
2198  * Thread.current[:cat] = 'meow'
2199  * Thread.current["dog"] = 'woof'
2200  * end
2201  * thr.join #=> #<Thread:0x401b3f10 dead>
2202  * thr.keys #=> [:dog, :cat]
2203  */
2204 
2205 static VALUE
2207 {
2208  rb_thread_t *th;
2209  VALUE ary = rb_ary_new();
2210  GetThreadPtr(self, th);
2211 
2212  if (th->local_storage) {
2214  }
2215  return ary;
2216 }
2217 
2218 /*
2219  * call-seq:
2220  * thr.priority -> integer
2221  *
2222  * Returns the priority of <i>thr</i>. Default is inherited from the
2223  * current thread which creating the new thread, or zero for the
2224  * initial main thread; higher-priority thread will run more frequently
2225  * than lower-priority threads (but lower-priority threads can also run).
2226  *
2227  * This is just hint for Ruby thread scheduler. It may be ignored on some
2228  * platform.
2229  *
2230  * Thread.current.priority #=> 0
2231  */
2232 
2233 static VALUE
2235 {
2236  rb_thread_t *th;
2237  GetThreadPtr(thread, th);
2238  return INT2NUM(th->priority);
2239 }
2240 
2241 
2242 /*
2243  * call-seq:
2244  * thr.priority= integer -> thr
2245  *
2246  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
2247  * will run more frequently than lower-priority threads (but lower-priority
2248  * threads can also run).
2249  *
2250  * This is just hint for Ruby thread scheduler. It may be ignored on some
2251  * platform.
2252  *
2253  * count1 = count2 = 0
2254  * a = Thread.new do
2255  * loop { count1 += 1 }
2256  * end
2257  * a.priority = -1
2258  *
2259  * b = Thread.new do
2260  * loop { count2 += 1 }
2261  * end
2262  * b.priority = -2
2263  * sleep 1 #=> 1
2264  * count1 #=> 622504
2265  * count2 #=> 5832
2266  */
2267 
2268 static VALUE
2270 {
2271  rb_thread_t *th;
2272  int priority;
2273  GetThreadPtr(thread, th);
2274 
2275  rb_secure(4);
2276 
2277 #if USE_NATIVE_THREAD_PRIORITY
2278  th->priority = NUM2INT(prio);
2279  native_thread_apply_priority(th);
2280 #else
2281  priority = NUM2INT(prio);
2282  if (priority > RUBY_THREAD_PRIORITY_MAX) {
2283  priority = RUBY_THREAD_PRIORITY_MAX;
2284  }
2285  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
2286  priority = RUBY_THREAD_PRIORITY_MIN;
2287  }
2288  th->priority = priority;
2289 #endif
2290  return INT2NUM(th->priority);
2291 }
2292 
2293 /* for IO */
2294 
2295 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
2296 
2297 /*
2298  * several Unix platforms support file descriptors bigger than FD_SETSIZE
2299  * in select(2) system call.
2300  *
2301  * - Linux 2.2.12 (?)
2302  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
2303  * select(2) documents how to allocate fd_set dynamically.
2304  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
2305  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
2306  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
2307  * select(2) documents how to allocate fd_set dynamically.
2308  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
2309  * - HP-UX documents how to allocate fd_set dynamically.
2310  * http://docs.hp.com/en/B2355-60105/select.2.html
2311  * - Solaris 8 has select_large_fdset
2312  *
2313  * When fd_set is not big enough to hold big file descriptors,
2314  * it should be allocated dynamically.
2315  * Note that this assumes fd_set is structured as bitmap.
2316  *
2317  * rb_fd_init allocates the memory.
2318  * rb_fd_term free the memory.
2319  * rb_fd_set may re-allocates bitmap.
2320  *
2321  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
2322  */
2323 
2324 void
2325 rb_fd_init(rb_fdset_t *fds)
2326 {
2327  fds->maxfd = 0;
2328  fds->fdset = ALLOC(fd_set);
2329  FD_ZERO(fds->fdset);
2330 }
2331 
2332 void
2334 {
2335  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
2336 
2337  if (size < sizeof(fd_set))
2338  size = sizeof(fd_set);
2339  dst->maxfd = src->maxfd;
2340  dst->fdset = xmalloc(size);
2341  memcpy(dst->fdset, src->fdset, size);
2342 }
2343 
2344 void
2345 rb_fd_term(rb_fdset_t *fds)
2346 {
2347  if (fds->fdset) xfree(fds->fdset);
2348  fds->maxfd = 0;
2349  fds->fdset = 0;
2350 }
2351 
2352 void
2353 rb_fd_zero(rb_fdset_t *fds)
2354 {
2355  if (fds->fdset)
2356  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
2357 }
2358 
2359 static void
2360 rb_fd_resize(int n, rb_fdset_t *fds)
2361 {
2362  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
2363  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
2364 
2365  if (m < sizeof(fd_set)) m = sizeof(fd_set);
2366  if (o < sizeof(fd_set)) o = sizeof(fd_set);
2367 
2368  if (m > o) {
2369  fds->fdset = xrealloc(fds->fdset, m);
2370  memset((char *)fds->fdset + o, 0, m - o);
2371  }
2372  if (n >= fds->maxfd) fds->maxfd = n + 1;
2373 }
2374 
2375 void
2376 rb_fd_set(int n, rb_fdset_t *fds)
2377 {
2378  rb_fd_resize(n, fds);
2379  FD_SET(n, fds->fdset);
2380 }
2381 
2382 void
2383 rb_fd_clr(int n, rb_fdset_t *fds)
2384 {
2385  if (n >= fds->maxfd) return;
2386  FD_CLR(n, fds->fdset);
2387 }
2388 
2389 int
2390 rb_fd_isset(int n, const rb_fdset_t *fds)
2391 {
2392  if (n >= fds->maxfd) return 0;
2393  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
2394 }
2395 
2396 void
2397 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
2398 {
2399  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
2400 
2401  if (size < sizeof(fd_set)) size = sizeof(fd_set);
2402  dst->maxfd = max;
2403  dst->fdset = xrealloc(dst->fdset, size);
2404  memcpy(dst->fdset, src, size);
2405 }
2406 
2407 static void
2408 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
2409 {
2410  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
2411 
2412  if (size > sizeof(fd_set)) {
2413  rb_raise(rb_eArgError, "too large fdsets");
2414  }
2415  memcpy(dst, rb_fd_ptr(src), sizeof(fd_set));
2416 }
2417 
2418 void
2419 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
2420 {
2421  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
2422 
2423  if (size < sizeof(fd_set))
2424  size = sizeof(fd_set);
2425  dst->maxfd = src->maxfd;
2426  dst->fdset = xrealloc(dst->fdset, size);
2427  memcpy(dst->fdset, src->fdset, size);
2428 }
2429 
2430 int
2431 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
2432 {
2433  fd_set *r = NULL, *w = NULL, *e = NULL;
2434  if (readfds) {
2435  rb_fd_resize(n - 1, readfds);
2436  r = rb_fd_ptr(readfds);
2437  }
2438  if (writefds) {
2439  rb_fd_resize(n - 1, writefds);
2440  w = rb_fd_ptr(writefds);
2441  }
2442  if (exceptfds) {
2443  rb_fd_resize(n - 1, exceptfds);
2444  e = rb_fd_ptr(exceptfds);
2445  }
2446  return select(n, r, w, e, timeout);
2447 }
2448 
2449 #undef FD_ZERO
2450 #undef FD_SET
2451 #undef FD_CLR
2452 #undef FD_ISSET
2453 
2454 #define FD_ZERO(f) rb_fd_zero(f)
2455 #define FD_SET(i, f) rb_fd_set((i), (f))
2456 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2457 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2458 
2459 #elif defined(_WIN32)
2460 
2461 void
2462 rb_fd_init(rb_fdset_t *set)
2463 {
2464  set->capa = FD_SETSIZE;
2465  set->fdset = ALLOC(fd_set);
2466  FD_ZERO(set->fdset);
2467 }
2468 
2469 void
2471 {
2472  rb_fd_init(dst);
2473  rb_fd_dup(dst, src);
2474 }
2475 
2476 static void
2477 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
2478 {
2479  int max = rb_fd_max(src);
2480 
2481  /* we assume src is the result of select() with dst, so dst should be
2482  * larger or equal than src. */
2483  if (max > FD_SETSIZE || max > dst->fd_count) {
2484  rb_raise(rb_eArgError, "too large fdsets");
2485  }
2486 
2487  memcpy(dst->fd_array, src->fdset->fd_array, max);
2488  dst->fd_count = max;
2489 }
2490 
2491 void
2492 rb_fd_term(rb_fdset_t *set)
2493 {
2494  xfree(set->fdset);
2495  set->fdset = NULL;
2496  set->capa = 0;
2497 }
2498 
2499 void
2500 rb_fd_set(int fd, rb_fdset_t *set)
2501 {
2502  unsigned int i;
2503  SOCKET s = rb_w32_get_osfhandle(fd);
2504 
2505  for (i = 0; i < set->fdset->fd_count; i++) {
2506  if (set->fdset->fd_array[i] == s) {
2507  return;
2508  }
2509  }
2510  if (set->fdset->fd_count >= (unsigned)set->capa) {
2511  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
2512  set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
2513  }
2514  set->fdset->fd_array[set->fdset->fd_count++] = s;
2515 }
2516 
2517 #undef FD_ZERO
2518 #undef FD_SET
2519 #undef FD_CLR
2520 #undef FD_ISSET
2521 
2522 #define FD_ZERO(f) rb_fd_zero(f)
2523 #define FD_SET(i, f) rb_fd_set((i), (f))
2524 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2525 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2526 
2527 #else
2528 #define rb_fd_rcopy(d, s) (*(d) = *(s))
2529 #endif
2530 
2531 #if defined(__CYGWIN__)
2532 static long
2533 cmp_tv(const struct timeval *a, const struct timeval *b)
2534 {
2535  long d = (a->tv_sec - b->tv_sec);
2536  return (d != 0) ? d : (a->tv_usec - b->tv_usec);
2537 }
2538 
2539 static int
2540 subtract_tv(struct timeval *rest, const struct timeval *wait)
2541 {
2542  if (rest->tv_sec < wait->tv_sec) {
2543  return 0;
2544  }
2545  while (rest->tv_usec < wait->tv_usec) {
2546  if (rest->tv_sec <= wait->tv_sec) {
2547  return 0;
2548  }
2549  rest->tv_sec -= 1;
2550  rest->tv_usec += 1000 * 1000;
2551  }
2552  rest->tv_sec -= wait->tv_sec;
2553  rest->tv_usec -= wait->tv_usec;
2554  return rest->tv_sec != 0 || rest->tv_usec != 0;
2555 }
2556 #endif
2557 
2558 static int
2559 do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except,
2560  struct timeval *timeout)
2561 {
2562  int result, lerrno;
2563  rb_fdset_t UNINITIALIZED_VAR(orig_read);
2564  rb_fdset_t UNINITIALIZED_VAR(orig_write);
2565  rb_fdset_t UNINITIALIZED_VAR(orig_except);
2566  double limit = 0;
2567  struct timeval wait_rest;
2568 # if defined(__CYGWIN__)
2569  struct timeval start_time;
2570 # endif
2571 
2572  if (timeout) {
2573 # if defined(__CYGWIN__)
2574  gettimeofday(&start_time, NULL);
2575  limit = (double)start_time.tv_sec + (double)start_time.tv_usec*1e-6;
2576 # else
2577  limit = timeofday();
2578 # endif
2579  limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
2580  wait_rest = *timeout;
2581  timeout = &wait_rest;
2582  }
2583 
2584  if (read)
2585  rb_fd_init_copy(&orig_read, read);
2586  if (write)
2587  rb_fd_init_copy(&orig_write, write);
2588  if (except)
2589  rb_fd_init_copy(&orig_except, except);
2590 
2591  retry:
2592  lerrno = 0;
2593 
2594 #if defined(__CYGWIN__)
2595  {
2596  int finish = 0;
2597  /* polling duration: 100ms */
2598  struct timeval wait_100ms, *wait;
2599  wait_100ms.tv_sec = 0;
2600  wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
2601 
2602  do {
2603  wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) < 0) ? &wait_100ms : timeout;
2604  BLOCKING_REGION({
2605  do {
2606  result = rb_fd_select(n, read, write, except, wait);
2607  if (result < 0) lerrno = errno;
2608  if (result != 0) break;
2609 
2610  if (read)
2611  rb_fd_dup(read, &orig_read);
2612  if (write)
2613  rb_fd_dup(write, &orig_write);
2614  if (except)
2615  rb_fd_dup(except, &orig_except);
2616  if (timeout) {
2617  struct timeval elapsed;
2618  gettimeofday(&elapsed, NULL);
2619  subtract_tv(&elapsed, &start_time);
2620  gettimeofday(&start_time, NULL);
2621  if (!subtract_tv(timeout, &elapsed)) {
2622  finish = 1;
2623  break;
2624  }
2625  if (cmp_tv(&wait_100ms, timeout) > 0) wait = timeout;
2626  }
2627  } while (__th->interrupt_flag == 0);
2628  }, 0, 0);
2629  } while (result == 0 && !finish);
2630  }
2631 #elif defined(_WIN32)
2632  {
2633  rb_thread_t *th = GET_THREAD();
2634  BLOCKING_REGION({
2635  result = native_fd_select(n, read, write, except, timeout, th);
2636  if (result < 0) lerrno = errno;
2637  }, ubf_select, th);
2638  }
2639 #else
2640  BLOCKING_REGION({
2641  result = rb_fd_select(n, read, write, except, timeout);
2642  if (result < 0) lerrno = errno;
2643  }, ubf_select, GET_THREAD());
2644 #endif
2645 
2646  errno = lerrno;
2647 
2648  if (result < 0) {
2649  switch (errno) {
2650  case EINTR:
2651 #ifdef ERESTART
2652  case ERESTART:
2653 #endif
2654  if (read)
2655  rb_fd_dup(read, &orig_read);
2656  if (write)
2657  rb_fd_dup(write, &orig_write);
2658  if (except)
2659  rb_fd_dup(except, &orig_except);
2660 
2661  if (timeout) {
2662  double d = limit - timeofday();
2663 
2664  wait_rest.tv_sec = (unsigned int)d;
2665  wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
2666  if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
2667  if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
2668  }
2669 
2670  goto retry;
2671  default:
2672  break;
2673  }
2674  }
2675 
2676  if (read)
2677  rb_fd_term(&orig_read);
2678  if (write)
2679  rb_fd_term(&orig_write);
2680  if (except)
2681  rb_fd_term(&orig_except);
2682 
2683  return result;
2684 }
2685 
2686 static void
2687 rb_thread_wait_fd_rw(int fd, int read)
2688 {
2689  int result = 0;
2690  int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
2691 
2692  thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
2693 
2694  if (fd < 0) {
2695  rb_raise(rb_eIOError, "closed stream");
2696  }
2697  if (rb_thread_alone()) return;
2698  while (result <= 0) {
2699  result = rb_wait_for_single_fd(fd, events, NULL);
2700 
2701  if (result < 0) {
2702  rb_sys_fail(0);
2703  }
2704  }
2705 
2706  thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
2707 }
2708 
2709 void
2711 {
2712  rb_thread_wait_fd_rw(fd, 1);
2713 }
2714 
2715 int
2717 {
2718  rb_thread_wait_fd_rw(fd, 0);
2719  return TRUE;
2720 }
2721 
2722 int
2723 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
2724  struct timeval *timeout)
2725 {
2726  rb_fdset_t fdsets[3];
2727  rb_fdset_t *rfds = NULL;
2728  rb_fdset_t *wfds = NULL;
2729  rb_fdset_t *efds = NULL;
2730  int retval;
2731 
2732  if (read) {
2733  rfds = &fdsets[0];
2734  rb_fd_init(rfds);
2735  rb_fd_copy(rfds, read, max);
2736  }
2737  if (write) {
2738  wfds = &fdsets[1];
2739  rb_fd_init(wfds);
2740  rb_fd_copy(wfds, write, max);
2741  }
2742  if (except) {
2743  efds = &fdsets[2];
2744  rb_fd_init(efds);
2745  rb_fd_copy(efds, except, max);
2746  }
2747 
2748  retval = rb_thread_fd_select(max, rfds, wfds, efds, timeout);
2749 
2750  if (rfds) {
2751  rb_fd_rcopy(read, rfds);
2752  rb_fd_term(rfds);
2753  }
2754  if (wfds) {
2755  rb_fd_rcopy(write, wfds);
2756  rb_fd_term(wfds);
2757  }
2758  if (efds) {
2759  rb_fd_rcopy(except, efds);
2760  rb_fd_term(efds);
2761  }
2762 
2763  return retval;
2764 }
2765 
2766 int
2767 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
2768  struct timeval *timeout)
2769 {
2770  if (!read && !write && !except) {
2771  if (!timeout) {
2773  return 0;
2774  }
2775  rb_thread_wait_for(*timeout);
2776  return 0;
2777  }
2778 
2779  if (read) {
2780  rb_fd_resize(max - 1, read);
2781  }
2782  if (write) {
2783  rb_fd_resize(max - 1, write);
2784  }
2785  if (except) {
2786  rb_fd_resize(max - 1, except);
2787  }
2788  return do_select(max, read, write, except, timeout);
2789 }
2790 
2791 /*
2792  * poll() is supported by many OSes, but so far Linux is the only
2793  * one we know of that supports using poll() in all places select()
2794  * would work.
2795  */
2796 #if defined(HAVE_POLL) && defined(linux)
2797 # define USE_POLL
2798 #endif
2799 
2800 #ifdef USE_POLL
2801 
2802 /* The same with linux kernel. TODO: make platform independent definition. */
2803 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
2804 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
2805 #define POLLEX_SET (POLLPRI)
2806 
2807 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
2808 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
2809 
2810 #ifndef HAVE_PPOLL
2811 /* TODO: don't ignore sigmask */
2812 int ppoll(struct pollfd *fds, nfds_t nfds,
2813  const struct timespec *ts, const sigset_t *sigmask)
2814 {
2815  int timeout_ms;
2816 
2817  if (ts) {
2818  int tmp, tmp2;
2819 
2820  if (ts->tv_sec > TIMET_MAX/1000)
2821  timeout_ms = -1;
2822  else {
2823  tmp = ts->tv_sec * 1000;
2824  tmp2 = ts->tv_nsec / (1000 * 1000);
2825  if (TIMET_MAX - tmp < tmp2)
2826  timeout_ms = -1;
2827  else
2828  timeout_ms = tmp + tmp2;
2829  }
2830  } else
2831  timeout_ms = -1;
2832 
2833  return poll(fds, nfds, timeout_ms);
2834 }
2835 #endif
2836 
2837 /*
2838  * returns a mask of events
2839  */
2840 int
2841 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
2842 {
2843  struct pollfd fds;
2844  int result, lerrno;
2845  double limit = 0;
2846  struct timespec ts;
2847  struct timespec *timeout = NULL;
2848 
2849  if (tv) {
2850  ts.tv_sec = tv->tv_sec;
2851  ts.tv_nsec = tv->tv_usec * 1000;
2852  limit = timeofday();
2853  limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
2854  timeout = &ts;
2855  }
2856 
2857  fds.fd = fd;
2858  fds.events = (short)events;
2859 
2860 retry:
2861  lerrno = 0;
2862  BLOCKING_REGION({
2863  result = ppoll(&fds, 1, timeout, NULL);
2864  if (result < 0) lerrno = errno;
2865  }, ubf_select, GET_THREAD());
2866 
2867  if (result < 0) {
2868  errno = lerrno;
2869  switch (errno) {
2870  case EINTR:
2871 #ifdef ERESTART
2872  case ERESTART:
2873 #endif
2874  if (timeout) {
2875  double d = limit - timeofday();
2876 
2877  ts.tv_sec = (long)d;
2878  ts.tv_nsec = (long)((d - (double)ts.tv_sec) * 1e9);
2879  if (ts.tv_sec < 0)
2880  ts.tv_sec = 0;
2881  if (ts.tv_nsec < 0)
2882  ts.tv_nsec = 0;
2883  }
2884  goto retry;
2885  }
2886  return -1;
2887  }
2888 
2889  if (fds.revents & POLLNVAL) {
2890  errno = EBADF;
2891  return -1;
2892  }
2893 
2894  /*
2895  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
2896  * Therefore we need fix it up.
2897  */
2898  result = 0;
2899  if (fds.revents & POLLIN_SET)
2900  result |= RB_WAITFD_IN;
2901  if (fds.revents & POLLOUT_SET)
2902  result |= RB_WAITFD_OUT;
2903  if (fds.revents & POLLEX_SET)
2904  result |= RB_WAITFD_PRI;
2905 
2906  return result;
2907 }
2908 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
2909 static rb_fdset_t *init_set_fd(int fd, rb_fdset_t *fds)
2910 {
2911  rb_fd_init(fds);
2912  rb_fd_set(fd, fds);
2913 
2914  return fds;
2915 }
2916 
2917 struct select_args {
2918  union {
2919  int fd;
2920  int error;
2921  } as;
2925  struct timeval *tv;
2926 };
2927 
2928 static VALUE
2930 {
2931  struct select_args *args = (struct select_args *)ptr;
2932  int r;
2933 
2934  r = rb_thread_fd_select(args->as.fd + 1,
2935  args->read, args->write, args->except, args->tv);
2936  if (r == -1)
2937  args->as.error = errno;
2938  if (r > 0) {
2939  r = 0;
2940  if (args->read && rb_fd_isset(args->as.fd, args->read))
2941  r |= RB_WAITFD_IN;
2942  if (args->write && rb_fd_isset(args->as.fd, args->write))
2943  r |= RB_WAITFD_OUT;
2944  if (args->except && rb_fd_isset(args->as.fd, args->except))
2945  r |= RB_WAITFD_PRI;
2946  }
2947  return (VALUE)r;
2948 }
2949 
2950 static VALUE
2952 {
2953  struct select_args *args = (struct select_args *)ptr;
2954 
2955  if (args->read) rb_fd_term(args->read);
2956  if (args->write) rb_fd_term(args->write);
2957  if (args->except) rb_fd_term(args->except);
2958 
2959  return (VALUE)-1;
2960 }
2961 
2962 int
2963 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
2964 {
2965  rb_fdset_t rfds, wfds, efds;
2966  struct select_args args;
2967  int r;
2968  VALUE ptr = (VALUE)&args;
2969 
2970  args.as.fd = fd;
2971  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
2972  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
2973  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
2974  args.tv = tv;
2975 
2976  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
2977  if (r == -1)
2978  errno = args.as.error;
2979 
2980  return r;
2981 }
2982 #endif /* ! USE_POLL */
2983 
2984 /*
2985  * for GC
2986  */
2987 
2988 #ifdef USE_CONSERVATIVE_STACK_END
2989 void
2991 {
2992  VALUE stack_end;
2993  *stack_end_p = &stack_end;
2994 }
2995 #endif
2996 
2997 void
2999 {
3001 #ifdef __ia64
3002  th->machine_register_stack_end = rb_ia64_bsp();
3003 #endif
3004  setjmp(th->machine_regs);
3005 }
3006 
3007 /*
3008  *
3009  */
3010 
3011 void
3013 {
3014  /* mth must be main_thread */
3015  if (rb_signal_buff_size() > 0) {
3016  /* wakeup main thread */
3018  }
3019 }
3020 
3021 static void
3023 {
3024  rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
3025 
3026  /* for time slice */
3028 
3029  /* check signal */
3031 
3032 #if 0
3033  /* prove profiler */
3034  if (vm->prove_profile.enable) {
3035  rb_thread_t *th = vm->running_thread;
3036 
3037  if (vm->during_gc) {
3038  /* GC prove profiling */
3039  }
3040  }
3041 #endif
3042 }
3043 
3044 void
3046 {
3047  if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3048  native_reset_timer_thread();
3049  }
3050 }
3051 
3052 void
3054 {
3055  native_reset_timer_thread();
3056 }
3057 
3058 void
3060 {
3061  system_working = 1;
3062  rb_thread_create_timer_thread();
3063 }
3064 
3065 static int
3067 {
3068  int i;
3069  VALUE lines = (VALUE)val;
3070 
3071  for (i = 0; i < RARRAY_LEN(lines); i++) {
3072  if (RARRAY_PTR(lines)[i] != Qnil) {
3073  RARRAY_PTR(lines)[i] = INT2FIX(0);
3074  }
3075  }
3076  return ST_CONTINUE;
3077 }
3078 
3079 static void
3081 {
3082  VALUE coverages = rb_get_coverages();
3083  if (RTEST(coverages)) {
3084  st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
3085  }
3086 }
3087 
3088 static void
3090 {
3091  rb_thread_t *th = GET_THREAD();
3092  rb_vm_t *vm = th->vm;
3093  VALUE thval = th->self;
3094  vm->main_thread = th;
3095 
3096  gvl_atfork(th->vm);
3097  st_foreach(vm->living_threads, atfork, (st_data_t)th);
3098  st_clear(vm->living_threads);
3099  st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
3100  vm->sleeper = 0;
3101  clear_coverage();
3102 }
3103 
3104 static int
3106 {
3107  VALUE thval = key;
3108  rb_thread_t *th;
3109  GetThreadPtr(thval, th);
3110 
3111  if (th != (rb_thread_t *)current_th) {
3112  if (th->keeping_mutexes) {
3114  }
3115  th->keeping_mutexes = NULL;
3117  }
3118  return ST_CONTINUE;
3119 }
3120 
3121 void
3123 {
3125  GET_THREAD()->join_list_head = 0;
3126 
3127  /* We don't want reproduce CVE-2003-0900. */
3129 }
3130 
3131 static int
3133 {
3134  VALUE thval = key;
3135  rb_thread_t *th;
3136  GetThreadPtr(thval, th);
3137 
3138  if (th != (rb_thread_t *)current_th) {
3140  }
3141  return ST_CONTINUE;
3142 }
3143 
3144 void
3146 {
3148 }
3149 
3150 struct thgroup {
3153 };
3154 
3155 static size_t
3156 thgroup_memsize(const void *ptr)
3157 {
3158  return ptr ? sizeof(struct thgroup) : 0;
3159 }
3160 
3162  "thgroup",
3164 };
3165 
3166 /*
3167  * Document-class: ThreadGroup
3168  *
3169  * <code>ThreadGroup</code> provides a means of keeping track of a number of
3170  * threads as a group. A <code>Thread</code> can belong to only one
3171  * <code>ThreadGroup</code> at a time; adding a thread to a new group will
3172  * remove it from any previous group.
3173  *
3174  * Newly created threads belong to the same group as the thread from which they
3175  * were created.
3176  */
3177 
3178 static VALUE
3180 {
3181  VALUE group;
3182  struct thgroup *data;
3183 
3184  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
3185  data->enclosed = 0;
3186  data->group = group;
3187 
3188  return group;
3189 }
3190 
3194 };
3195 
3196 static int
3198 {
3199  VALUE thread = (VALUE)key;
3200  VALUE ary = ((struct thgroup_list_params *)data)->ary;
3201  VALUE group = ((struct thgroup_list_params *)data)->group;
3202  rb_thread_t *th;
3203  GetThreadPtr(thread, th);
3204 
3205  if (th->thgroup == group) {
3206  rb_ary_push(ary, thread);
3207  }
3208  return ST_CONTINUE;
3209 }
3210 
3211 /*
3212  * call-seq:
3213  * thgrp.list -> array
3214  *
3215  * Returns an array of all existing <code>Thread</code> objects that belong to
3216  * this group.
3217  *
3218  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
3219  */
3220 
3221 static VALUE
3223 {
3224  VALUE ary = rb_ary_new();
3225  struct thgroup_list_params param;
3226 
3227  param.ary = ary;
3228  param.group = group;
3229  st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
3230  return ary;
3231 }
3232 
3233 
3234 /*
3235  * call-seq:
3236  * thgrp.enclose -> thgrp
3237  *
3238  * Prevents threads from being added to or removed from the receiving
3239  * <code>ThreadGroup</code>. New threads can still be started in an enclosed
3240  * <code>ThreadGroup</code>.
3241  *
3242  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
3243  * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
3244  * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
3245  * tg.add thr
3246  *
3247  * <em>produces:</em>
3248  *
3249  * ThreadError: can't move from the enclosed thread group
3250  */
3251 
3252 static VALUE
3254 {
3255  struct thgroup *data;
3256 
3257  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3258  data->enclosed = 1;
3259 
3260  return group;
3261 }
3262 
3263 
3264 /*
3265  * call-seq:
3266  * thgrp.enclosed? -> true or false
3267  *
3268  * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
3269  * ThreadGroup#enclose.
3270  */
3271 
3272 static VALUE
3274 {
3275  struct thgroup *data;
3276 
3277  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3278  if (data->enclosed)
3279  return Qtrue;
3280  return Qfalse;
3281 }
3282 
3283 
3284 /*
3285  * call-seq:
3286  * thgrp.add(thread) -> thgrp
3287  *
3288  * Adds the given <em>thread</em> to this group, removing it from any other
3289  * group to which it may have previously belonged.
3290  *
3291  * puts "Initial group is #{ThreadGroup::Default.list}"
3292  * tg = ThreadGroup.new
3293  * t1 = Thread.new { sleep }
3294  * t2 = Thread.new { sleep }
3295  * puts "t1 is #{t1}"
3296  * puts "t2 is #{t2}"
3297  * tg.add(t1)
3298  * puts "Initial group now #{ThreadGroup::Default.list}"
3299  * puts "tg group now #{tg.list}"
3300  *
3301  * <em>produces:</em>
3302  *
3303  * Initial group is #<Thread:0x401bdf4c>
3304  * t1 is #<Thread:0x401b3c90>
3305  * t2 is #<Thread:0x401b3c18>
3306  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
3307  * tg group now #<Thread:0x401b3c90>
3308  */
3309 
3310 static VALUE
3312 {
3313  rb_thread_t *th;
3314  struct thgroup *data;
3315 
3316  rb_secure(4);
3317  GetThreadPtr(thread, th);
3318 
3319  if (OBJ_FROZEN(group)) {
3320  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
3321  }
3322  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3323  if (data->enclosed) {
3324  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
3325  }
3326 
3327  if (!th->thgroup) {
3328  return Qnil;
3329  }
3330 
3331  if (OBJ_FROZEN(th->thgroup)) {
3332  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
3333  }
3335  if (data->enclosed) {
3337  "can't move from the enclosed thread group");
3338  }
3339 
3340  th->thgroup = group;
3341  return group;
3342 }
3343 
3344 
3345 /*
3346  * Document-class: Mutex
3347  *
3348  * Mutex implements a simple semaphore that can be used to coordinate access to
3349  * shared data from multiple concurrent threads.
3350  *
3351  * Example:
3352  *
3353  * require 'thread'
3354  * semaphore = Mutex.new
3355  *
3356  * a = Thread.new {
3357  * semaphore.synchronize {
3358  * # access shared resource
3359  * }
3360  * }
3361  *
3362  * b = Thread.new {
3363  * semaphore.synchronize {
3364  * # access shared resource
3365  * }
3366  * }
3367  *
3368  */
3369 
3370 #define GetMutexPtr(obj, tobj) \
3371  TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
3372 
3373 static const char *rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
3374 
3375 #define mutex_mark NULL
3376 
3377 static void
3378 mutex_free(void *ptr)
3379 {
3380  if (ptr) {
3381  rb_mutex_t *mutex = ptr;
3382  if (mutex->th) {
3383  /* rb_warn("free locked mutex"); */
3384  const char *err = rb_mutex_unlock_th(mutex, mutex->th);
3385  if (err) rb_bug("%s", err);
3386  }
3387  native_mutex_destroy(&mutex->lock);
3388  native_cond_destroy(&mutex->cond);
3389  }
3390  ruby_xfree(ptr);
3391 }
3392 
3393 static size_t
3394 mutex_memsize(const void *ptr)
3395 {
3396  return ptr ? sizeof(rb_mutex_t) : 0;
3397 }
3398 
3400  "mutex",
3402 };
3403 
3404 VALUE
3406 {
3407  if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
3408  return Qtrue;
3409  }
3410  else {
3411  return Qfalse;
3412  }
3413 }
3414 
3415 static VALUE
3417 {
3418  VALUE volatile obj;
3419  rb_mutex_t *mutex;
3420 
3421  obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
3422  native_mutex_initialize(&mutex->lock);
3423  native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
3424  return obj;
3425 }
3426 
3427 /*
3428  * call-seq:
3429  * Mutex.new -> mutex
3430  *
3431  * Creates a new Mutex
3432  */
3433 static VALUE
3435 {
3436  return self;
3437 }
3438 
3439 VALUE
3441 {
3442  return mutex_alloc(rb_cMutex);
3443 }
3444 
3445 /*
3446  * call-seq:
3447  * mutex.locked? -> true or false
3448  *
3449  * Returns +true+ if this lock is currently held by some thread.
3450  */
3451 VALUE
3453 {
3454  rb_mutex_t *mutex;
3455  GetMutexPtr(self, mutex);
3456  return mutex->th ? Qtrue : Qfalse;
3457 }
3458 
3459 static void
3461 {
3462  rb_mutex_t *mutex;
3463  GetMutexPtr(self, mutex);
3464 
3465  if (th->keeping_mutexes) {
3466  mutex->next_mutex = th->keeping_mutexes;
3467  }
3468  th->keeping_mutexes = mutex;
3469 }
3470 
3471 /*
3472  * call-seq:
3473  * mutex.try_lock -> true or false
3474  *
3475  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
3476  * lock was granted.
3477  */
3478 VALUE
3480 {
3481  rb_mutex_t *mutex;
3482  VALUE locked = Qfalse;
3483  GetMutexPtr(self, mutex);
3484 
3485  native_mutex_lock(&mutex->lock);
3486  if (mutex->th == 0) {
3487  mutex->th = GET_THREAD();
3488  locked = Qtrue;
3489 
3490  mutex_locked(GET_THREAD(), self);
3491  }
3492  native_mutex_unlock(&mutex->lock);
3493 
3494  return locked;
3495 }
3496 
3497 static int
3498 lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
3499 {
3500  int interrupted = 0;
3501  int err = 0;
3502 
3503  mutex->cond_waiting++;
3504  for (;;) {
3505  if (!mutex->th) {
3506  mutex->th = th;
3507  break;
3508  }
3509  if (RUBY_VM_INTERRUPTED(th)) {
3510  interrupted = 1;
3511  break;
3512  }
3513  if (err == ETIMEDOUT) {
3514  interrupted = 2;
3515  break;
3516  }
3517 
3518  if (timeout_ms) {
3519  struct timespec timeout_rel;
3520  struct timespec timeout;
3521 
3522  timeout_rel.tv_sec = 0;
3523  timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
3524  timeout = native_cond_timeout(&mutex->cond, timeout_rel);
3525  err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
3526  }
3527  else {
3528  native_cond_wait(&mutex->cond, &mutex->lock);
3529  err = 0;
3530  }
3531  }
3532  mutex->cond_waiting--;
3533 
3534  return interrupted;
3535 }
3536 
3537 static void
3538 lock_interrupt(void *ptr)
3539 {
3540  rb_mutex_t *mutex = (rb_mutex_t *)ptr;
3541  native_mutex_lock(&mutex->lock);
3542  if (mutex->cond_waiting > 0)
3543  native_cond_broadcast(&mutex->cond);
3544  native_mutex_unlock(&mutex->lock);
3545 }
3546 
3547 /*
3548  * At maximum, only one thread can use cond_timedwait and watch deadlock
3549  * periodically. Multiple polling thread (i.e. concurrent deadlock check)
3550  * introduces new race conditions. [Bug #6278] [ruby-core:44275]
3551  */
3553 
3554 /*
3555  * call-seq:
3556  * mutex.lock -> self
3557  *
3558  * Attempts to grab the lock and waits if it isn't available.
3559  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
3560  */
3561 VALUE
3563 {
3564 
3565  if (rb_mutex_trylock(self) == Qfalse) {
3566  rb_mutex_t *mutex;
3567  rb_thread_t *th = GET_THREAD();
3568  GetMutexPtr(self, mutex);
3569 
3570  if (mutex->th == GET_THREAD()) {
3571  rb_raise(rb_eThreadError, "deadlock; recursive locking");
3572  }
3573 
3574  while (mutex->th != th) {
3575  int interrupted;
3576  enum rb_thread_status prev_status = th->status;
3577  int timeout_ms = 0;
3578  struct rb_unblock_callback oldubf;
3579 
3580  set_unblock_function(th, lock_interrupt, mutex, &oldubf);
3582  th->locking_mutex = self;
3583 
3584  native_mutex_lock(&mutex->lock);
3585  th->vm->sleeper++;
3586  /*
3587  * Carefully! while some contended threads are in lock_func(),
3588  * vm->sleepr is unstable value. we have to avoid both deadlock
3589  * and busy loop.
3590  */
3591  if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
3592  !patrol_thread) {
3593  timeout_ms = 100;
3594  patrol_thread = th;
3595  }
3596 
3597  GVL_UNLOCK_BEGIN();
3598  interrupted = lock_func(th, mutex, timeout_ms);
3599  native_mutex_unlock(&mutex->lock);
3600  GVL_UNLOCK_END();
3601 
3602  if (patrol_thread == th)
3603  patrol_thread = NULL;
3604 
3605  reset_unblock_function(th, &oldubf);
3606 
3607  th->locking_mutex = Qfalse;
3608  if (mutex->th && interrupted == 2) {
3609  rb_check_deadlock(th->vm);
3610  }
3611  if (th->status == THREAD_STOPPED_FOREVER) {
3612  th->status = prev_status;
3613  }
3614  th->vm->sleeper--;
3615 
3616  if (mutex->th == th) mutex_locked(th, self);
3617 
3618  if (interrupted) {
3620  }
3621  }
3622  }
3623  return self;
3624 }
3625 
3626 static const char *
3628 {
3629  const char *err = NULL;
3630  rb_mutex_t *th_mutex;
3631 
3632  native_mutex_lock(&mutex->lock);
3633 
3634  if (mutex->th == 0) {
3635  err = "Attempt to unlock a mutex which is not locked";
3636  }
3637  else if (mutex->th != th) {
3638  err = "Attempt to unlock a mutex which is locked by another thread";
3639  }
3640  else {
3641  mutex->th = 0;
3642  if (mutex->cond_waiting > 0)
3643  native_cond_signal(&mutex->cond);
3644  }
3645 
3646  native_mutex_unlock(&mutex->lock);
3647 
3648  if (!err) {
3649  th_mutex = th->keeping_mutexes;
3650  if (th_mutex == mutex) {
3651  th->keeping_mutexes = mutex->next_mutex;
3652  }
3653  else {
3654  while (1) {
3655  rb_mutex_t *tmp_mutex;
3656  tmp_mutex = th_mutex->next_mutex;
3657  if (tmp_mutex == mutex) {
3658  th_mutex->next_mutex = tmp_mutex->next_mutex;
3659  break;
3660  }
3661  th_mutex = tmp_mutex;
3662  }
3663  }
3664  mutex->next_mutex = NULL;
3665  }
3666 
3667  return err;
3668 }
3669 
3670 /*
3671  * call-seq:
3672  * mutex.unlock -> self
3673  *
3674  * Releases the lock.
3675  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
3676  */
3677 VALUE
3679 {
3680  const char *err;
3681  rb_mutex_t *mutex;
3682  GetMutexPtr(self, mutex);
3683 
3684  err = rb_mutex_unlock_th(mutex, GET_THREAD());
3685  if (err) rb_raise(rb_eThreadError, "%s", err);
3686 
3687  return self;
3688 }
3689 
3690 static void
3692 {
3693  rb_mutex_t *mutex;
3694 
3695  while (mutexes) {
3696  mutex = mutexes;
3697  mutexes = mutex->next_mutex;
3698  mutex->th = 0;
3699  mutex->next_mutex = 0;
3700  }
3701 }
3702 
3703 static VALUE
3705 {
3707  return Qnil;
3708 }
3709 
3710 static VALUE
3712 {
3713  const struct timeval *t = (struct timeval *)time;
3714  rb_thread_wait_for(*t);
3715  return Qnil;
3716 }
3717 
3718 VALUE
3720 {
3721  time_t beg, end;
3722  struct timeval t;
3723 
3724  if (!NIL_P(timeout)) {
3725  t = rb_time_interval(timeout);
3726  }
3727  rb_mutex_unlock(self);
3728  beg = time(0);
3729  if (NIL_P(timeout)) {
3731  }
3732  else {
3734  }
3735  end = time(0) - beg;
3736  return INT2FIX(end);
3737 }
3738 
3739 /*
3740  * call-seq:
3741  * mutex.sleep(timeout = nil) -> number
3742  *
3743  * Releases the lock and sleeps +timeout+ seconds if it is given and
3744  * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
3745  * the current thread.
3746  */
3747 static VALUE
3749 {
3750  VALUE timeout;
3751 
3752  rb_scan_args(argc, argv, "01", &timeout);
3753  return rb_mutex_sleep(self, timeout);
3754 }
3755 
3756 /*
3757  * call-seq:
3758  * mutex.synchronize { ... } -> result of the block
3759  *
3760  * Obtains a lock, runs the block, and releases the lock when the block
3761  * completes. See the example under +Mutex+.
3762  */
3763 
3764 VALUE
3766 {
3767  rb_mutex_lock(mutex);
3768  return rb_ensure(func, arg, rb_mutex_unlock, mutex);
3769 }
3770 
3771 /*
3772  * Document-class: Barrier
3773  */
3774 static void
3775 barrier_mark(void *ptr)
3776 {
3777  rb_gc_mark((VALUE)ptr);
3778 }
3779 
3781  "barrier",
3782  {barrier_mark, 0, 0,},
3783 };
3784 
3785 static VALUE
3787 {
3788  return TypedData_Wrap_Struct(klass, &barrier_data_type, (void *)mutex_alloc(0));
3789 }
3790 
3791 #define GetBarrierPtr(obj) ((VALUE)rb_check_typeddata((obj), &barrier_data_type))
3792 
3793 VALUE
3795 {
3796  VALUE barrier = barrier_alloc(rb_cBarrier);
3797  rb_mutex_lock((VALUE)DATA_PTR(barrier));
3798  return barrier;
3799 }
3800 
3801 VALUE
3803 {
3804  VALUE mutex = GetBarrierPtr(self);
3805  rb_mutex_t *m;
3806 
3807  if (!mutex) return Qfalse;
3808  GetMutexPtr(mutex, m);
3809  if (m->th == GET_THREAD()) return Qfalse;
3810  rb_mutex_lock(mutex);
3811  if (DATA_PTR(self)) return Qtrue;
3812  rb_mutex_unlock(mutex);
3813  return Qfalse;
3814 }
3815 
3816 VALUE
3818 {
3819  return rb_mutex_unlock(GetBarrierPtr(self));
3820 }
3821 
3822 VALUE
3824 {
3825  VALUE mutex = GetBarrierPtr(self);
3826  DATA_PTR(self) = 0;
3827  return rb_mutex_unlock(mutex);
3828 }
3829 
3830 /* variables for recursive traversals */
3832 
3833 /*
3834  * Returns the current "recursive list" used to detect recursion.
3835  * This list is a hash table, unique for the current thread and for
3836  * the current __callee__.
3837  */
3838 
3839 static VALUE
3841 {
3842  volatile VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
3844  VALUE list;
3845  if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3846  hash = rb_hash_new();
3847  OBJ_UNTRUST(hash);
3848  rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
3849  list = Qnil;
3850  }
3851  else {
3852  list = rb_hash_aref(hash, sym);
3853  }
3854  if (NIL_P(list) || TYPE(list) != T_HASH) {
3855  list = rb_hash_new();
3856  OBJ_UNTRUST(list);
3857  rb_hash_aset(hash, sym, list);
3858  }
3859  return list;
3860 }
3861 
3862 /*
3863  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
3864  * in the recursion list.
3865  * Assumes the recursion list is valid.
3866  */
3867 
3868 static VALUE
3869 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
3870 {
3871 #if SIZEOF_LONG == SIZEOF_VOIDP
3872  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
3873 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3874  #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
3875  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
3876 #endif
3877 
3878  VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
3879  if (pair_list == Qundef)
3880  return Qfalse;
3881  if (paired_obj_id) {
3882  if (TYPE(pair_list) != T_HASH) {
3883  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
3884  return Qfalse;
3885  }
3886  else {
3887  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
3888  return Qfalse;
3889  }
3890  }
3891  return Qtrue;
3892 }
3893 
3894 /*
3895  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
3896  * For a single obj_id, it sets list[obj_id] to Qtrue.
3897  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
3898  * otherwise list[obj_id] becomes a hash like:
3899  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
3900  * Assumes the recursion list is valid.
3901  */
3902 
3903 static void
3905 {
3906  VALUE pair_list;
3907 
3908  if (!paired_obj) {
3909  rb_hash_aset(list, obj, Qtrue);
3910  }
3911  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
3912  rb_hash_aset(list, obj, paired_obj);
3913  }
3914  else {
3915  if (TYPE(pair_list) != T_HASH){
3916  VALUE other_paired_obj = pair_list;
3917  pair_list = rb_hash_new();
3918  OBJ_UNTRUST(pair_list);
3919  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
3920  rb_hash_aset(list, obj, pair_list);
3921  }
3922  rb_hash_aset(pair_list, paired_obj, Qtrue);
3923  }
3924 }
3925 
3926 /*
3927  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
3928  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
3929  * removed from the hash and no attempt is made to simplify
3930  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
3931  * Assumes the recursion list is valid.
3932  */
3933 
3934 static void
3936 {
3937  if (paired_obj) {
3938  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
3939  if (pair_list == Qundef) {
3940  VALUE symname = rb_inspect(ID2SYM(rb_frame_this_func()));
3941  VALUE thrname = rb_inspect(rb_thread_current());
3942  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list for %s in %s",
3943  StringValuePtr(symname), StringValuePtr(thrname));
3944  }
3945  if (TYPE(pair_list) == T_HASH) {
3946  rb_hash_delete(pair_list, paired_obj);
3947  if (!RHASH_EMPTY_P(pair_list)) {
3948  return; /* keep hash until is empty */
3949  }
3950  }
3951  }
3952  rb_hash_delete(list, obj);
3953 }
3954 
3956  VALUE (*func) (VALUE, VALUE, int);
3962 };
3963 
3964 static VALUE
3966 {
3967  VALUE result = Qundef;
3968  int state;
3969 
3970  recursive_push(p->list, p->objid, p->pairid);
3971  PUSH_TAG();
3972  if ((state = EXEC_TAG()) == 0) {
3973  result = (*p->func)(p->obj, p->arg, FALSE);
3974  }
3975  POP_TAG();
3976  recursive_pop(p->list, p->objid, p->pairid);
3977  if (state)
3978  JUMP_TAG(state);
3979  return result;
3980 }
3981 
3982 /*
3983  * Calls func(obj, arg, recursive), where recursive is non-zero if the
3984  * current method is called recursively on obj, or on the pair <obj, pairid>
3985  * If outer is 0, then the innermost func will be called with recursive set
3986  * to Qtrue, otherwise the outermost func will be called. In the latter case,
3987  * all inner func are short-circuited by throw.
3988  * Implementation details: the value thrown is the recursive list which is
3989  * proper to the current method and unlikely to be catched anywhere else.
3990  * list[recursive_key] is used as a flag for the outermost call.
3991  */
3992 
3993 static VALUE
3994 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
3995 {
3996  VALUE result = Qundef;
3997  struct exec_recursive_params p;
3998  int outermost;
4000  p.objid = rb_obj_id(obj);
4001  p.obj = obj;
4002  p.pairid = pairid;
4003  p.arg = arg;
4004  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
4005 
4006  if (recursive_check(p.list, p.objid, pairid)) {
4007  if (outer && !outermost) {
4008  rb_throw_obj(p.list, p.list);
4009  }
4010  return (*func)(obj, arg, TRUE);
4011  }
4012  else {
4013  p.func = func;
4014 
4015  if (outermost) {
4016  recursive_push(p.list, ID2SYM(recursive_key), 0);
4017  result = rb_catch_obj(p.list, exec_recursive_i, (VALUE)&p);
4018  recursive_pop(p.list, ID2SYM(recursive_key), 0);
4019  if (result == p.list) {
4020  result = (*func)(obj, arg, TRUE);
4021  }
4022  }
4023  else {
4024  result = exec_recursive_i(0, &p);
4025  }
4026  }
4027  *(volatile struct exec_recursive_params *)&p;
4028  return result;
4029 }
4030 
4031 /*
4032  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4033  * current method is called recursively on obj
4034  */
4035 
4036 VALUE
4038 {
4039  return exec_recursive(func, obj, 0, arg, 0);
4040 }
4041 
4042 /*
4043  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4044  * current method is called recursively on the ordered pair <obj, paired_obj>
4045  */
4046 
4047 VALUE
4049 {
4050  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
4051 }
4052 
4053 /*
4054  * If recursion is detected on the current method and obj, the outermost
4055  * func will be called with (obj, arg, Qtrue). All inner func will be
4056  * short-circuited using throw.
4057  */
4058 
4059 VALUE
4061 {
4062  return exec_recursive(func, obj, 0, arg, 1);
4063 }
4064 
4065 /* tracer */
4066 #define RUBY_EVENT_REMOVED 0x1000000
4067 
4068 enum {
4074 };
4075 
4076 static VALUE thread_suppress_tracing(rb_thread_t *th, int ev, VALUE (*func)(VALUE, int), VALUE arg, int always, int pop_p);
4077 
4081  VALUE self;
4085 };
4086 
4087 static rb_event_hook_t *
4089 {
4091  hook->func = func;
4092  hook->flag = events;
4093  hook->data = data;
4094  return hook;
4095 }
4096 
4097 static void
4099 {
4100  rb_event_hook_t *hook = th->event_hooks;
4102 
4103  while (hook) {
4104  if (!(flag & RUBY_EVENT_REMOVED))
4105  flag |= hook->flag;
4106  hook = hook->next;
4107  }
4108  th->event_flags = flag;
4109 }
4110 
4111 static void
4114 {
4115  rb_event_hook_t *hook = alloc_event_hook(func, events, data);
4116  hook->next = th->event_hooks;
4117  th->event_hooks = hook;
4119 }
4120 
4121 static rb_thread_t *
4123 {
4124  rb_thread_t *th;
4125  GetThreadPtr(thval, th);
4126  return th;
4127 }
4128 
4129 void
4132 {
4133  rb_threadptr_add_event_hook(thval2thread_t(thval), func, events, data);
4134 }
4135 
4136 static int
4138 {
4139  VALUE thval = key;
4140  rb_thread_t *th;
4141  GetThreadPtr(thval, th);
4142 
4143  if (flag) {
4144  th->event_flags |= RUBY_EVENT_VM;
4145  }
4146  else {
4147  th->event_flags &= (~RUBY_EVENT_VM);
4148  }
4149  return ST_CONTINUE;
4150 }
4151 
4152 static void
4154 {
4155  st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
4156 }
4157 
4158 static inline int
4159 exec_event_hooks(const rb_event_hook_t *hook, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
4160 {
4161  int removed = 0;
4162  for (; hook; hook = hook->next) {
4163  if (hook->flag & RUBY_EVENT_REMOVED) {
4164  removed++;
4165  continue;
4166  }
4167  if (flag & hook->flag) {
4168  (*hook->func)(flag, hook->data, self, id, klass);
4169  }
4170  }
4171  return removed;
4172 }
4173 
4174 static int remove_defered_event_hook(rb_event_hook_t **root);
4175 
4176 static VALUE
4177 thread_exec_event_hooks(VALUE args, int running)
4178 {
4179  struct event_call_args *argp = (struct event_call_args *)args;
4180  rb_thread_t *th = argp->th;
4181  rb_event_flag_t flag = argp->event;
4182  VALUE self = argp->self;
4183  ID id = argp->id;
4184  VALUE klass = argp->klass;
4185  const rb_event_flag_t wait_event = th->event_flags;
4186  int removed;
4187 
4188  if (self == rb_mRubyVMFrozenCore) return 0;
4189 
4190  if ((wait_event & flag) && !(running & EVENT_RUNNING_THREAD)) {
4192  removed = exec_event_hooks(th->event_hooks, flag, self, id, klass);
4193  th->tracing &= ~EVENT_RUNNING_THREAD;
4194  if (removed) {
4196  }
4197  }
4198  if (wait_event & RUBY_EVENT_VM) {
4199  if (th->vm->event_hooks == NULL) {
4200  th->event_flags &= (~RUBY_EVENT_VM);
4201  }
4202  else if (!(running & EVENT_RUNNING_VM)) {
4203  th->tracing |= EVENT_RUNNING_VM;
4204  removed = exec_event_hooks(th->vm->event_hooks, flag, self, id, klass);
4205  th->tracing &= ~EVENT_RUNNING_VM;
4206  if (removed) {
4208  }
4209  }
4210  }
4211  return 0;
4212 }
4213 
4214 void
4216 {
4217  const VALUE errinfo = th->errinfo;
4218  struct event_call_args args;
4219  args.th = th;
4220  args.event = flag;
4221  args.self = self;
4222  args.id = id;
4223  args.klass = klass;
4224  args.proc = 0;
4226  th->errinfo = errinfo;
4227 }
4228 
4229 void
4231 {
4232  rb_event_hook_t *hook = alloc_event_hook(func, events, data);
4233  rb_vm_t *vm = GET_VM();
4234 
4235  hook->next = vm->event_hooks;
4236  vm->event_hooks = hook;
4237 
4239 }
4240 
4241 static int
4243 {
4244  while (hook) {
4245  if (func == 0 || hook->func == func) {
4246  hook->flag |= RUBY_EVENT_REMOVED;
4247  }
4248  hook = hook->next;
4249  }
4250  return -1;
4251 }
4252 
4253 static int
4255 {
4256  rb_event_hook_t *hook = *root, *next;
4257 
4258  while (hook) {
4259  next = hook->next;
4260  if (func == 0 || hook->func == func || (hook->flag & RUBY_EVENT_REMOVED)) {
4261  *root = next;
4262  xfree(hook);
4263  }
4264  else {
4265  root = &hook->next;
4266  }
4267  hook = next;
4268  }
4269  return -1;
4270 }
4271 
4272 static int
4274 {
4275  rb_event_hook_t *hook = *root, *next;
4276 
4277  while (hook) {
4278  next = hook->next;
4279  if (hook->flag & RUBY_EVENT_REMOVED) {
4280  *root = next;
4281  xfree(hook);
4282  }
4283  else {
4284  root = &hook->next;
4285  }
4286  hook = next;
4287  }
4288  return -1;
4289 }
4290 
4291 static int
4293 {
4294  int ret;
4295  if (th->tracing & EVENT_RUNNING_THREAD) {
4296  ret = defer_remove_event_hook(th->event_hooks, func);
4297  }
4298  else {
4299  ret = remove_event_hook(&th->event_hooks, func);
4300  }
4302  return ret;
4303 }
4304 
4305 int
4307 {
4308  return rb_threadptr_remove_event_hook(thval2thread_t(thval), func);
4309 }
4310 
4311 static rb_event_hook_t *
4313 {
4314  while (hook) {
4315  if (!(hook->flag & RUBY_EVENT_REMOVED))
4316  return hook;
4317  hook = hook->next;
4318  }
4319  return NULL;
4320 }
4321 
4322 static int
4324 {
4325  rb_thread_t *th = thval2thread_t((VALUE)key);
4326  if (!(th->tracing & EVENT_RUNNING_VM)) return ST_CONTINUE;
4327  *(rb_thread_t **)data = th;
4328  return ST_STOP;
4329 }
4330 
4331 static rb_thread_t *
4333 {
4334  rb_thread_t *found = NULL;
4336  return found;
4337 }
4338 
4339 int
4341 {
4342  rb_vm_t *vm = GET_VM();
4344  int ret;
4345 
4347  ret = defer_remove_event_hook(vm->event_hooks, func);
4348  }
4349  else {
4350  ret = remove_event_hook(&vm->event_hooks, func);
4351  }
4352 
4353  if (hook && !search_live_hook(vm->event_hooks)) {
4355  }
4356 
4357  return ret;
4358 }
4359 
4360 static int
4362 {
4363  rb_thread_t *th;
4364  GetThreadPtr((VALUE)key, th);
4366  return ST_CONTINUE;
4367 }
4368 
4369 void
4371 {
4372  st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
4374 }
4375 
4376 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
4377 
4378 /*
4379  * call-seq:
4380  * set_trace_func(proc) -> proc
4381  * set_trace_func(nil) -> nil
4382  *
4383  * Establishes _proc_ as the handler for tracing, or disables
4384  * tracing if the parameter is +nil+. _proc_ takes up
4385  * to six parameters: an event name, a filename, a line number, an
4386  * object id, a binding, and the name of a class. _proc_ is
4387  * invoked whenever an event occurs. Events are: <code>c-call</code>
4388  * (call a C-language routine), <code>c-return</code> (return from a
4389  * C-language routine), <code>call</code> (call a Ruby method),
4390  * <code>class</code> (start a class or module definition),
4391  * <code>end</code> (finish a class or module definition),
4392  * <code>line</code> (execute code on a new line), <code>raise</code>
4393  * (raise an exception), and <code>return</code> (return from a Ruby
4394  * method). Tracing is disabled within the context of _proc_.
4395  *
4396  * class Test
4397  * def test
4398  * a = 1
4399  * b = 2
4400  * end
4401  * end
4402  *
4403  * set_trace_func proc { |event, file, line, id, binding, classname|
4404  * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
4405  * }
4406  * t = Test.new
4407  * t.test
4408  *
4409  * line prog.rb:11 false
4410  * c-call prog.rb:11 new Class
4411  * c-call prog.rb:11 initialize Object
4412  * c-return prog.rb:11 initialize Object
4413  * c-return prog.rb:11 new Class
4414  * line prog.rb:12 false
4415  * call prog.rb:2 test Test
4416  * line prog.rb:3 test Test
4417  * line prog.rb:4 test Test
4418  * return prog.rb:4 test Test
4419  */
4420 
4421 static VALUE
4423 {
4425 
4426  if (NIL_P(trace)) {
4427  GET_THREAD()->tracing = EVENT_RUNNING_NOTHING;
4428  return Qnil;
4429  }
4430 
4431  if (!rb_obj_is_proc(trace)) {
4432  rb_raise(rb_eTypeError, "trace_func needs to be Proc");
4433  }
4434 
4436  return trace;
4437 }
4438 
4439 static void
4441 {
4442  if (!rb_obj_is_proc(trace)) {
4443  rb_raise(rb_eTypeError, "trace_func needs to be Proc");
4444  }
4445 
4447 }
4448 
4449 /*
4450  * call-seq:
4451  * thr.add_trace_func(proc) -> proc
4452  *
4453  * Adds _proc_ as a handler for tracing.
4454  * See <code>Thread#set_trace_func</code> and +set_trace_func+.
4455  */
4456 
4457 static VALUE
4459 {
4460  rb_thread_t *th;
4461  GetThreadPtr(obj, th);
4462  thread_add_trace_func(th, trace);
4463  return trace;
4464 }
4465 
4466 /*
4467  * call-seq:
4468  * thr.set_trace_func(proc) -> proc
4469  * thr.set_trace_func(nil) -> nil
4470  *
4471  * Establishes _proc_ on _thr_ as the handler for tracing, or
4472  * disables tracing if the parameter is +nil+.
4473  * See +set_trace_func+.
4474  */
4475 
4476 static VALUE
4478 {
4479  rb_thread_t *th;
4480  GetThreadPtr(obj, th);
4482 
4483  if (NIL_P(trace)) {
4485  return Qnil;
4486  }
4487  thread_add_trace_func(th, trace);
4488  return trace;
4489 }
4490 
4491 static const char *
4493 {
4494  switch (event) {
4495  case RUBY_EVENT_LINE:
4496  return "line";
4497  case RUBY_EVENT_CLASS:
4498  return "class";
4499  case RUBY_EVENT_END:
4500  return "end";
4501  case RUBY_EVENT_CALL:
4502  return "call";
4503  case RUBY_EVENT_RETURN:
4504  return "return";
4505  case RUBY_EVENT_C_CALL:
4506  return "c-call";
4507  case RUBY_EVENT_C_RETURN:
4508  return "c-return";
4509  case RUBY_EVENT_RAISE:
4510  return "raise";
4511  default:
4512  return "unknown";
4513  }
4514 }
4515 
4516 static VALUE
4517 call_trace_proc(VALUE args, int tracing)
4518 {
4519  struct event_call_args *p = (struct event_call_args *)args;
4520  const char *srcfile = rb_sourcefile();
4521  VALUE eventname = rb_str_new2(get_event_name(p->event));
4522  VALUE filename = srcfile ? rb_str_new2(srcfile) : Qnil;
4523  VALUE argv[6];
4524  int line = rb_sourceline();
4525  ID id = 0;
4526  VALUE klass = 0;
4527 
4528  if (p->klass != 0) {
4529  id = p->id;
4530  klass = p->klass;
4531  }
4532  else {
4533  rb_thread_method_id_and_class(p->th, &id, &klass);
4534  }
4535  if (id == ID_ALLOCATOR)
4536  return Qnil;
4537  if (klass) {
4538  if (TYPE(klass) == T_ICLASS) {
4539  klass = RBASIC(klass)->klass;
4540  }
4541  else if (FL_TEST(klass, FL_SINGLETON)) {
4542  klass = rb_iv_get(klass, "__attached__");
4543  }
4544  }
4545 
4546  argv[0] = eventname;
4547  argv[1] = filename;
4548  argv[2] = INT2FIX(line);
4549  argv[3] = id ? ID2SYM(id) : Qnil;
4550  argv[4] = (p->self && srcfile) ? rb_binding_new() : Qnil;
4551  argv[5] = klass ? klass : Qnil;
4552 
4553  return rb_proc_call_with_block(p->proc, 6, argv, Qnil);
4554 }
4555 
4556 static void
4558 {
4559  struct event_call_args args;
4560 
4561  args.th = GET_THREAD();
4562  args.event = event;
4563  args.proc = proc;
4564  args.self = self;
4565  args.id = id;
4566  args.klass = klass;
4568 }
4569 
4570 VALUE
4572 {
4573  rb_thread_t *th = GET_THREAD();
4574  return thread_suppress_tracing(th, EVENT_RUNNING_TRACE, func, arg, always, 0);
4575 }
4576 
4577 static VALUE
4578 thread_suppress_tracing(rb_thread_t *th, int ev, VALUE (*func)(VALUE, int), VALUE arg, int always, int pop_p)
4579 {
4580  int state, tracing = th->tracing, running = tracing & ev;
4581  volatile int raised;
4582  volatile int outer_state;
4583  VALUE result = Qnil;
4584 
4585  if (running == ev && !always) {
4586  return Qnil;
4587  }
4588  else {
4589  th->tracing |= ev;
4590  }
4591 
4592  raised = rb_threadptr_reset_raised(th);
4593  outer_state = th->state;
4594  th->state = 0;
4595 
4596  PUSH_TAG();
4597  if ((state = EXEC_TAG()) == 0) {
4598  result = (*func)(arg, running);
4599  }
4600 
4601  if (raised) {
4603  }
4604  POP_TAG();
4605 
4606  th->tracing = tracing;
4607  if (state) {
4608  if (pop_p) {
4610  }
4611  JUMP_TAG(state);
4612  }
4613  th->state = outer_state;
4614 
4615  return result;
4616 }
4617 
4618 /*
4619  * call-seq:
4620  * thr.backtrace -> array
4621  *
4622  * Returns the current back trace of the _thr_.
4623  */
4624 
4625 static VALUE
4627 {
4628  return rb_thread_backtrace(thval);
4629 }
4630 
4631 /*
4632  * Document-class: ThreadError
4633  *
4634  * Raised when an invalid operation is attempted on a thread.
4635  *
4636  * For example, when no other thread has been started:
4637  *
4638  * Thread.stop
4639  *
4640  * <em>raises the exception:</em>
4641  *
4642  * ThreadError: stopping only thread
4643  */
4644 
4645 /*
4646  * +Thread+ encapsulates the behavior of a thread of
4647  * execution, including the main thread of the Ruby script.
4648  *
4649  * In the descriptions of the methods in this class, the parameter _sym_
4650  * refers to a symbol, which is either a quoted string or a
4651  * +Symbol+ (such as <code>:name</code>).
4652  */
4653 
4654 void
4656 {
4657 #undef rb_intern
4658 #define rb_intern(str) rb_intern_const(str)
4659 
4660  VALUE cThGroup;
4661  rb_thread_t *th = GET_THREAD();
4662 
4673  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
4675 #if THREAD_DEBUG < 0
4676  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
4677  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
4678 #endif
4679 
4680  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
4685  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
4698  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
4699  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
4703 
4705 
4706  closed_stream_error = rb_exc_new2(rb_eIOError, "stream closed");
4709 
4710  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
4712  rb_define_method(cThGroup, "list", thgroup_list, 0);
4713  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
4714  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
4715  rb_define_method(cThGroup, "add", thgroup_add, 1);
4716 
4717  {
4718  th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
4719  rb_define_const(cThGroup, "Default", th->thgroup);
4720  }
4721 
4722  rb_cMutex = rb_define_class("Mutex", rb_cObject);
4724  rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
4726  rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
4729  rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
4730 
4731  recursive_key = rb_intern("__recursive_key__");
4733 
4734  /* trace */
4735  rb_define_global_function("set_trace_func", set_trace_func, 1);
4736  rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
4737  rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
4738 
4739  /* init thread core */
4740  {
4741  /* main thread setting */
4742  {
4743  /* acquire global vm lock */
4744  gvl_init(th->vm);
4745  gvl_acquire(th->vm, th);
4746  native_mutex_initialize(&th->interrupt_lock);
4747  }
4748  }
4749 
4750  rb_thread_create_timer_thread();
4751 
4752  /* suppress warnings on cygwin, mingw and mswin.*/
4753  (void)native_mutex_trylock;
4754 }
4755 
4756 int
4758 {
4759  rb_thread_t *th = ruby_thread_from_native();
4760 
4761  return th != 0;
4762 }
4763 
4764 static int
4766 {
4767  VALUE thval = key;
4768  rb_thread_t *th;
4769  GetThreadPtr(thval, th);
4770 
4772  *found = 1;
4773  }
4774  else if (th->locking_mutex) {
4775  rb_mutex_t *mutex;
4776  GetMutexPtr(th->locking_mutex, mutex);
4777 
4778  native_mutex_lock(&mutex->lock);
4779  if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
4780  *found = 1;
4781  }
4782  native_mutex_unlock(&mutex->lock);
4783  }
4784 
4785  return (*found) ? ST_STOP : ST_CONTINUE;
4786 }
4787 
4788 #ifdef DEBUG_DEADLOCK_CHECK
4789 static int
4790 debug_i(st_data_t key, st_data_t val, int *found)
4791 {
4792  VALUE thval = key;
4793  rb_thread_t *th;
4794  GetThreadPtr(thval, th);
4795 
4796  printf("th:%p %d %d", th, th->status, th->interrupt_flag);
4797  if (th->locking_mutex) {
4798  rb_mutex_t *mutex;
4799  GetMutexPtr(th->locking_mutex, mutex);
4800 
4801  native_mutex_lock(&mutex->lock);
4802  printf(" %p %d\n", mutex->th, mutex->cond_waiting);
4803  native_mutex_unlock(&mutex->lock);
4804  }
4805  else
4806  puts("");
4807 
4808  return ST_CONTINUE;
4809 }
4810 #endif
4811 
4812 static void
4814 {
4815  int found = 0;
4816 
4817  if (vm_living_thread_num(vm) > vm->sleeper) return;
4818  if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
4819  if (patrol_thread && patrol_thread != GET_THREAD()) return;
4820 
4822 
4823  if (!found) {
4824  VALUE argv[2];
4825  argv[0] = rb_eFatal;
4826  argv[1] = rb_str_new2("deadlock detected");
4827 #ifdef DEBUG_DEADLOCK_CHECK
4828  printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
4829  st_foreach(vm->living_threads, debug_i, (st_data_t)0);
4830 #endif
4831  vm->sleeper--;
4832  rb_threadptr_raise(vm->main_thread, 2, argv);
4833  }
4834 }
4835 
4836 static void
4838 {
4839  VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
4840  if (coverage && RBASIC(coverage)->klass == 0) {
4841  long line = rb_sourceline() - 1;
4842  long count;
4843  if (RARRAY_PTR(coverage)[line] == Qnil) {
4844  return;
4845  }
4846  count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
4847  if (POSFIXABLE(count)) {
4848  RARRAY_PTR(coverage)[line] = LONG2FIX(count);
4849  }
4850  }
4851 }
4852 
4853 VALUE
4855 {
4856  return GET_VM()->coverages;
4857 }
4858 
4859 void
4861 {
4862  GET_VM()->coverages = coverages;
4864 }
4865 
4866 void
4868 {
4869  GET_VM()->coverages = Qfalse;
4871 }
4872 
4873