Ruby  1.9.3p551(2014-11-13revision48407)
thread.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author: usa $
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23 ------------------------------------------------------------------------
24 
25  model 2:
26  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
27  When thread scheduling, running thread release GVL. If running thread
28  try blocking operation, this thread must release GVL and another
29  thread can continue this flow. After blocking operation, thread
30  must check interrupt (RUBY_VM_CHECK_INTS).
31 
32  Every VM can run parallel.
33 
34  Ruby threads are scheduled by OS thread scheduler.
35 
36 ------------------------------------------------------------------------
37 
38  model 3:
39  Every threads run concurrent or parallel and to access shared object
40  exclusive access control is needed. For example, to access String
41  object or Array object, fine grain lock must be locked every time.
42  */
43 
44 
45 /*
46  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
47  * 2.15 or later and set _FORTIFY_SOURCE > 0.
48  * However, the implementation is wrong. Even though Linux's select(2)
49  * support large fd size (>FD_SETSIZE), it wrongly assume fd is always
50  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
51  * it doesn't work correctly and makes program abort. Therefore we need to
52  * disable FORTY_SOURCE until glibc fixes it.
53  */
54 #undef _FORTIFY_SOURCE
55 #undef __USE_FORTIFY_LEVEL
56 #define __USE_FORTIFY_LEVEL 0
57 
58 /* for model 2 */
59 
60 #include "eval_intern.h"
61 #include "gc.h"
62 #include "internal.h"
63 #include "ruby/io.h"
64 
65 #ifndef USE_NATIVE_THREAD_PRIORITY
66 #define USE_NATIVE_THREAD_PRIORITY 0
67 #define RUBY_THREAD_PRIORITY_MAX 3
68 #define RUBY_THREAD_PRIORITY_MIN -3
69 #endif
70 
71 #ifndef THREAD_DEBUG
72 #define THREAD_DEBUG 0
73 #endif
74 
77 
78 static void sleep_timeval(rb_thread_t *th, struct timeval time);
79 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
80 static void sleep_forever(rb_thread_t *th, int nodeadlock);
81 static double timeofday(void);
82 static int rb_threadptr_dead(rb_thread_t *th);
83 
84 static void rb_check_deadlock(rb_vm_t *vm);
85 
86 #define eKillSignal INT2FIX(0)
87 #define eTerminateSignal INT2FIX(1)
88 static volatile int system_working = 1;
89 
90 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
91 
92 inline static void
94 {
95  st_delete(table, &key, 0);
96 }
97 
98 /********************************************************************************/
99 
100 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
101 
105 };
106 
108  struct rb_unblock_callback *old);
109 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
110 
111 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
112 
113 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
114  do { \
115  rb_gc_save_machine_context(th); \
116  SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
117  } while (0)
118 
119 #define GVL_UNLOCK_BEGIN() do { \
120  rb_thread_t *_th_stored = GET_THREAD(); \
121  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
122  gvl_release(_th_stored->vm);
123 
124 #define GVL_UNLOCK_END() \
125  gvl_acquire(_th_stored->vm, _th_stored); \
126  rb_thread_set_current(_th_stored); \
127 } while(0)
128 
129 #define blocking_region_begin(th, region, func, arg) \
130  do { \
131  (region)->prev_status = (th)->status; \
132  set_unblock_function((th), (func), (arg), &(region)->oldubf); \
133  (th)->blocking_region_buffer = (region); \
134  (th)->status = THREAD_STOPPED; \
135  thread_debug("enter blocking region (%p)\n", (void *)(th)); \
136  RB_GC_SAVE_MACHINE_CONTEXT(th); \
137  gvl_release((th)->vm); \
138  } while (0)
139 
140 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
141  rb_thread_t *__th = GET_THREAD(); \
142  struct rb_blocking_region_buffer __region; \
143  blocking_region_begin(__th, &__region, (ubf), (ubfarg)); \
144  exec; \
145  blocking_region_end(__th, &__region); \
146  RUBY_VM_CHECK_INTS(); \
147 } while(0)
148 
149 #if THREAD_DEBUG
150 #ifdef HAVE_VA_ARGS_MACRO
151 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
152 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
153 #define POSITION_FORMAT "%s:%d:"
154 #define POSITION_ARGS ,file, line
155 #else
156 void rb_thread_debug(const char *fmt, ...);
157 #define thread_debug rb_thread_debug
158 #define POSITION_FORMAT
159 #define POSITION_ARGS
160 #endif
161 
162 # if THREAD_DEBUG < 0
163 static int rb_thread_debug_enabled;
164 
165 /*
166  * call-seq:
167  * Thread.DEBUG -> num
168  *
169  * Returns the thread debug level. Available only if compiled with
170  * THREAD_DEBUG=-1.
171  */
172 
173 static VALUE
174 rb_thread_s_debug(void)
175 {
176  return INT2NUM(rb_thread_debug_enabled);
177 }
178 
179 /*
180  * call-seq:
181  * Thread.DEBUG = num
182  *
183  * Sets the thread debug level. Available only if compiled with
184  * THREAD_DEBUG=-1.
185  */
186 
187 static VALUE
188 rb_thread_s_debug_set(VALUE self, VALUE val)
189 {
190  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
191  return val;
192 }
193 # else
194 # define rb_thread_debug_enabled THREAD_DEBUG
195 # endif
196 #else
197 #define thread_debug if(0)printf
198 #endif
199 
200 #ifndef __ia64
201 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
202 #endif
203 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
204  VALUE *register_stack_start));
205 static void timer_thread_function(void *);
206 
207 #if defined(_WIN32)
208 #include "thread_win32.c"
209 
210 #define DEBUG_OUT() \
211  WaitForSingleObject(&debug_mutex, INFINITE); \
212  printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
213  fflush(stdout); \
214  ReleaseMutex(&debug_mutex);
215 
216 #elif defined(HAVE_PTHREAD_H)
217 #include "thread_pthread.c"
218 
219 #define DEBUG_OUT() \
220  pthread_mutex_lock(&debug_mutex); \
221  printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
222  fflush(stdout); \
223  pthread_mutex_unlock(&debug_mutex);
224 
225 #else
226 #error "unsupported thread type"
227 #endif
228 
229 #if THREAD_DEBUG
230 static int debug_mutex_initialized = 1;
231 static rb_thread_lock_t debug_mutex;
232 
233 void
234 rb_thread_debug(
235 #ifdef HAVE_VA_ARGS_MACRO
236  const char *file, int line,
237 #endif
238  const char *fmt, ...)
239 {
240  va_list args;
241  char buf[BUFSIZ];
242 
243  if (!rb_thread_debug_enabled) return;
244 
245  if (debug_mutex_initialized == 1) {
246  debug_mutex_initialized = 0;
247  native_mutex_initialize(&debug_mutex);
248  }
249 
250  va_start(args, fmt);
251  vsnprintf(buf, BUFSIZ, fmt, args);
252  va_end(args);
253 
254  DEBUG_OUT();
255 }
256 #endif
257 
258 void
260 {
261  gvl_release(vm);
262  gvl_destroy(vm);
263 }
264 
265 void
267 {
268  native_mutex_unlock(lock);
269 }
270 
271 void
273 {
274  native_mutex_destroy(lock);
275 }
276 
277 static void
279  struct rb_unblock_callback *old)
280 {
281  check_ints:
282  RUBY_VM_CHECK_INTS(); /* check signal or so */
283  native_mutex_lock(&th->interrupt_lock);
284  if (th->interrupt_flag) {
285  native_mutex_unlock(&th->interrupt_lock);
286  goto check_ints;
287  }
288  else {
289  if (old) *old = th->unblock;
290  th->unblock.func = func;
291  th->unblock.arg = arg;
292  }
293  native_mutex_unlock(&th->interrupt_lock);
294 }
295 
296 static void
298 {
299  native_mutex_lock(&th->interrupt_lock);
300  th->unblock = *old;
301  native_mutex_unlock(&th->interrupt_lock);
302 }
303 
304 void
306 {
307  native_mutex_lock(&th->interrupt_lock);
309  if (th->unblock.func) {
310  (th->unblock.func)(th->unblock.arg);
311  }
312  else {
313  /* none */
314  }
315  native_mutex_unlock(&th->interrupt_lock);
316 }
317 
318 
319 static int
321 {
322  VALUE thval = key;
323  rb_thread_t *th;
324  GetThreadPtr(thval, th);
325 
326  if (th != main_thread) {
327  thread_debug("terminate_i: %p\n", (void *)th);
330  th->status = THREAD_TO_KILL;
331  }
332  else {
333  thread_debug("terminate_i: main thread (%p)\n", (void *)th);
334  }
335  return ST_CONTINUE;
336 }
337 
338 typedef struct rb_mutex_struct
339 {
342  struct rb_thread_struct volatile *th;
345 } rb_mutex_t;
346 
347 static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
350 static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
351 
352 void
354 {
355  const char *err;
356  rb_mutex_t *mutex;
357  rb_mutex_t *mutexes = th->keeping_mutexes;
358 
359  while (mutexes) {
360  mutex = mutexes;
361  /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
362  mutexes); */
363  mutexes = mutex->next_mutex;
364  err = rb_mutex_unlock_th(mutex, th);
365  if (err) rb_bug("invalid keeping_mutexes: %s", err);
366  }
367 }
368 
369 void
371 {
372  rb_thread_t *th = GET_THREAD(); /* main thread */
373  rb_vm_t *vm = th->vm;
374 
375  if (vm->main_thread != th) {
376  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
377  (void *)vm->main_thread, (void *)th);
378  }
379 
380  /* unlock all locking mutexes */
382 
383  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
385  vm->inhibit_thread_creation = 1;
386 
387  while (!rb_thread_alone()) {
388  PUSH_TAG();
389  if (EXEC_TAG() == 0) {
391  }
392  else {
393  /* ignore exception */
394  }
395  POP_TAG();
396  }
397 }
398 
399 static void
401 {
402  rb_thread_t *th = th_ptr;
403  th->status = THREAD_KILLED;
405 #ifdef __ia64
406  th->machine_register_stack_start = th->machine_register_stack_end = 0;
407 #endif
408 }
409 
410 static void
411 thread_cleanup_func(void *th_ptr, int atfork)
412 {
413  rb_thread_t *th = th_ptr;
414 
415  th->locking_mutex = Qfalse;
417 
418  /*
419  * Unfortunately, we can't release native threading resource at fork
420  * because libc may have unstable locking state therefore touching
421  * a threading resource may cause a deadlock.
422  */
423  if (atfork)
424  return;
425 
426  native_mutex_destroy(&th->interrupt_lock);
427  native_thread_destroy(th);
428 }
429 
430 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
431 
432 void
434 {
435  native_thread_init_stack(th);
436 }
437 
438 static int
439 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
440 {
441  int state;
442  VALUE args = th->first_args;
443  rb_proc_t *proc;
444  rb_thread_t *join_th;
445  rb_thread_t *main_th;
446  VALUE errinfo = Qnil;
447 # ifdef USE_SIGALTSTACK
448  void rb_register_sigaltstack(rb_thread_t *th);
449 
450  rb_register_sigaltstack(th);
451 # endif
452 
453  ruby_thread_set_native(th);
454 
455  th->machine_stack_start = stack_start;
456 #ifdef __ia64
457  th->machine_register_stack_start = register_stack_start;
458 #endif
459  thread_debug("thread start: %p\n", (void *)th);
460 
461  gvl_acquire(th->vm, th);
462  {
463  thread_debug("thread start (get lock): %p\n", (void *)th);
465 
466  TH_PUSH_TAG(th);
467  if ((state = EXEC_TAG()) == 0) {
468  SAVE_ROOT_JMPBUF(th, {
469  if (!th->first_func) {
470  GetProcPtr(th->first_proc, proc);
471  th->errinfo = Qnil;
472  th->local_lfp = proc->block.lfp;
473  th->local_svar = Qnil;
474  th->value = rb_vm_invoke_proc(th, proc, proc->block.self,
475  (int)RARRAY_LEN(args), RARRAY_PTR(args), 0);
476  }
477  else {
478  th->value = (*th->first_func)((void *)args);
479  }
480  });
481  }
482  else {
483  errinfo = th->errinfo;
484  if (NIL_P(errinfo)) errinfo = rb_errinfo();
485  if (state == TAG_FATAL) {
486  /* fatal error within this thread, need to stop whole script */
487  }
488  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
489  if (th->safe_level >= 4) {
491  rb_sprintf("Insecure exit at level %d", th->safe_level));
492  errinfo = Qnil;
493  }
494  }
495  else if (th->safe_level < 4 &&
498  /* exit on main_thread */
499  }
500  else {
501  errinfo = Qnil;
502  }
503  th->value = Qnil;
504  }
505 
506  th->status = THREAD_KILLED;
507  thread_debug("thread end: %p\n", (void *)th);
508 
509  main_th = th->vm->main_thread;
510  if (th != main_th) {
511  if (TYPE(errinfo) == T_OBJECT) {
512  /* treat with normal error object */
513  rb_threadptr_raise(main_th, 1, &errinfo);
514  }
515  }
516  TH_POP_TAG();
517 
518  /* locking_mutex must be Qfalse */
519  if (th->locking_mutex != Qfalse) {
520  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
521  (void *)th, th->locking_mutex);
522  }
523 
524  /* delete self other than main thread from living_threads */
525  if (th != main_th) {
527  }
528 
529  /* wake up joining threads */
530  join_th = th->join_list_head;
531  while (join_th) {
532  if (join_th == main_th) errinfo = Qnil;
533  rb_threadptr_interrupt(join_th);
534  switch (join_th->status) {
536  join_th->status = THREAD_RUNNABLE;
537  default: break;
538  }
539  join_th = join_th->join_list_next;
540  }
541 
543  if (th != main_th) rb_check_deadlock(th->vm);
544 
545  if (!th->root_fiber) {
547  th->stack = 0;
548  }
549  }
550  if (th->vm->main_thread == th) {
552  }
553  else {
555  gvl_release(th->vm);
556  }
557 
558  return 0;
559 }
560 
561 static VALUE
563 {
564  rb_thread_t *th;
565  int err;
566 
567  if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
569  "can't start a new thread (frozen ThreadGroup)");
570  }
571  GetThreadPtr(thval, th);
572 
573  /* setup thread environment */
574  th->first_func = fn;
575  th->first_proc = fn ? Qfalse : rb_block_proc();
576  th->first_args = args; /* GC: shouldn't put before above line */
577 
578  th->priority = GET_THREAD()->priority;
579  th->thgroup = GET_THREAD()->thgroup;
580 
581  native_mutex_initialize(&th->interrupt_lock);
582  if (GET_VM()->event_hooks != NULL)
583  th->event_flags |= RUBY_EVENT_VM;
584 
585  /* kick thread */
586  st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
587  err = native_thread_create(th);
588  if (err) {
590  th->status = THREAD_KILLED;
591  rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
592  }
593  return thval;
594 }
595 
596 /* :nodoc: */
597 static VALUE
599 {
600  rb_thread_t *th;
601  VALUE thread = rb_thread_alloc(klass);
602 
603  if (GET_VM()->inhibit_thread_creation)
604  rb_raise(rb_eThreadError, "can't alloc thread");
605 
606  rb_obj_call_init(thread, argc, argv);
607  GetThreadPtr(thread, th);
608  if (!th->first_args) {
609  rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
610  rb_class2name(klass));
611  }
612  return thread;
613 }
614 
615 /*
616  * call-seq:
617  * Thread.start([args]*) {|args| block } -> thread
618  * Thread.fork([args]*) {|args| block } -> thread
619  *
620  * Basically the same as <code>Thread::new</code>. However, if class
621  * <code>Thread</code> is subclassed, then calling <code>start</code> in that
622  * subclass will not invoke the subclass's <code>initialize</code> method.
623  */
624 
625 static VALUE
627 {
628  return thread_create_core(rb_thread_alloc(klass), args, 0);
629 }
630 
631 /* :nodoc: */
632 static VALUE
634 {
635  rb_thread_t *th;
636  if (!rb_block_given_p()) {
637  rb_raise(rb_eThreadError, "must be called with a block");
638  }
639  GetThreadPtr(thread, th);
640  if (th->first_args) {
641  VALUE proc = th->first_proc, line, loc;
642  const char *file;
643  if (!proc || !RTEST(loc = rb_proc_location(proc))) {
644  rb_raise(rb_eThreadError, "already initialized thread");
645  }
646  file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
647  if (NIL_P(line = RARRAY_PTR(loc)[1])) {
648  rb_raise(rb_eThreadError, "already initialized thread - %s",
649  file);
650  }
651  rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
652  file, NUM2INT(line));
653  }
654  return thread_create_core(thread, args, 0);
655 }
656 
657 VALUE
659 {
661 }
662 
663 
664 /* +infty, for this purpose */
665 #define DELAY_INFTY 1E30
666 
667 struct join_arg {
669  double limit;
670  int forever;
671 };
672 
673 static VALUE
675 {
676  struct join_arg *p = (struct join_arg *)arg;
677  rb_thread_t *target_th = p->target, *th = p->waiting;
678 
679  if (target_th->status != THREAD_KILLED) {
680  rb_thread_t **pth = &target_th->join_list_head;
681 
682  while (*pth) {
683  if (*pth == th) {
684  *pth = th->join_list_next;
685  break;
686  }
687  pth = &(*pth)->join_list_next;
688  }
689  }
690 
691  return Qnil;
692 }
693 
694 static VALUE
696 {
697  struct join_arg *p = (struct join_arg *)arg;
698  rb_thread_t *target_th = p->target, *th = p->waiting;
699  double now, limit = p->limit;
700 
701  while (target_th->status != THREAD_KILLED) {
702  if (p->forever) {
703  sleep_forever(th, 1);
704  }
705  else {
706  now = timeofday();
707  if (now > limit) {
708  thread_debug("thread_join: timeout (thid: %p)\n",
709  (void *)target_th->thread_id);
710  return Qfalse;
711  }
712  sleep_wait_for_interrupt(th, limit - now);
713  }
714  thread_debug("thread_join: interrupted (thid: %p)\n",
715  (void *)target_th->thread_id);
716  }
717  return Qtrue;
718 }
719 
720 static VALUE
721 thread_join(rb_thread_t *target_th, double delay)
722 {
723  rb_thread_t *th = GET_THREAD();
724  struct join_arg arg;
725 
726  arg.target = target_th;
727  arg.waiting = th;
728  arg.limit = timeofday() + delay;
729  arg.forever = delay == DELAY_INFTY;
730 
731  thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
732 
733  if (target_th->status != THREAD_KILLED) {
734  th->join_list_next = target_th->join_list_head;
735  target_th->join_list_head = th;
736  if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
737  remove_from_join_list, (VALUE)&arg)) {
738  return Qnil;
739  }
740  }
741 
742  thread_debug("thread_join: success (thid: %p)\n",
743  (void *)target_th->thread_id);
744 
745  if (target_th->errinfo != Qnil) {
746  VALUE err = target_th->errinfo;
747 
748  if (FIXNUM_P(err)) {
749  /* */
750  }
751  else if (TYPE(target_th->errinfo) == T_NODE) {
754  }
755  else {
756  /* normal exception */
757  rb_exc_raise(err);
758  }
759  }
760  return target_th->self;
761 }
762 
763 /*
764  * call-seq:
765  * thr.join -> thr
766  * thr.join(limit) -> thr
767  *
768  * The calling thread will suspend execution and run <i>thr</i>. Does not
769  * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
770  * the time limit expires, <code>nil</code> will be returned, otherwise
771  * <i>thr</i> is returned.
772  *
773  * Any threads not joined will be killed when the main program exits. If
774  * <i>thr</i> had previously raised an exception and the
775  * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
776  * (so the exception has not yet been processed) it will be processed at this
777  * time.
778  *
779  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
780  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
781  * x.join # Let x thread finish, a will be killed on exit.
782  *
783  * <em>produces:</em>
784  *
785  * axyz
786  *
787  * The following example illustrates the <i>limit</i> parameter.
788  *
789  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
790  * puts "Waiting" until y.join(0.15)
791  *
792  * <em>produces:</em>
793  *
794  * tick...
795  * Waiting
796  * tick...
797  * Waitingtick...
798  *
799  *
800  * tick...
801  */
802 
803 static VALUE
805 {
806  rb_thread_t *target_th;
807  double delay = DELAY_INFTY;
808  VALUE limit;
809 
810  GetThreadPtr(self, target_th);
811 
812  rb_scan_args(argc, argv, "01", &limit);
813  if (!NIL_P(limit)) {
814  delay = rb_num2dbl(limit);
815  }
816 
817  return thread_join(target_th, delay);
818 }
819 
820 /*
821  * call-seq:
822  * thr.value -> obj
823  *
824  * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
825  * its value.
826  *
827  * a = Thread.new { 2 + 2 }
828  * a.value #=> 4
829  */
830 
831 static VALUE
833 {
834  rb_thread_t *th;
835  GetThreadPtr(self, th);
837  return th->value;
838 }
839 
840 /*
841  * Thread Scheduling
842  */
843 
844 static struct timeval
845 double2timeval(double d)
846 {
847  struct timeval time;
848 
849  time.tv_sec = (int)d;
850  time.tv_usec = (int)((d - (int)d) * 1e6);
851  if (time.tv_usec < 0) {
852  time.tv_usec += (int)1e6;
853  time.tv_sec -= 1;
854  }
855  return time;
856 }
857 
858 static void
859 sleep_forever(rb_thread_t *th, int deadlockable)
860 {
861  enum rb_thread_status prev_status = th->status;
862  enum rb_thread_status status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
863 
864  th->status = status;
865  do {
866  if (deadlockable) {
867  th->vm->sleeper++;
868  rb_check_deadlock(th->vm);
869  }
870  native_sleep(th, 0);
871  if (deadlockable) {
872  th->vm->sleeper--;
873  }
875  } while (th->status == status);
876  th->status = prev_status;
877 }
878 
879 static void
881 {
882 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
883  struct timespec ts;
884 
885  if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
886  tp->tv_sec = ts.tv_sec;
887  tp->tv_usec = ts.tv_nsec / 1000;
888  } else
889 #endif
890  {
891  gettimeofday(tp, NULL);
892  }
893 }
894 
895 static void
897 {
898  struct timeval to, tvn;
899  enum rb_thread_status prev_status = th->status;
900 
901  getclockofday(&to);
902  to.tv_sec += tv.tv_sec;
903  if ((to.tv_usec += tv.tv_usec) >= 1000000) {
904  to.tv_sec++;
905  to.tv_usec -= 1000000;
906  }
907 
908  th->status = THREAD_STOPPED;
909  do {
910  native_sleep(th, &tv);
912  getclockofday(&tvn);
913  if (to.tv_sec < tvn.tv_sec) break;
914  if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
915  thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
916  (long)to.tv_sec, (long)to.tv_usec,
917  (long)tvn.tv_sec, (long)tvn.tv_usec);
918  tv.tv_sec = to.tv_sec - tvn.tv_sec;
919  if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
920  --tv.tv_sec;
921  tv.tv_usec += 1000000;
922  }
923  } while (th->status == THREAD_STOPPED);
924  th->status = prev_status;
925 }
926 
927 void
929 {
930  thread_debug("rb_thread_sleep_forever\n");
932 }
933 
934 static void
936 {
937  thread_debug("rb_thread_sleep_deadly\n");
939 }
940 
941 static double
943 {
944 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
945  struct timespec tp;
946 
947  if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
948  return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
949  } else
950 #endif
951  {
952  struct timeval tv;
953  gettimeofday(&tv, NULL);
954  return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
955  }
956 }
957 
958 static void
960 {
961  sleep_timeval(th, double2timeval(sleepsec));
962 }
963 
964 static void
966 {
967  struct timeval time;
968  time.tv_sec = 0;
969  time.tv_usec = 100 * 1000; /* 0.1 sec */
970  sleep_timeval(th, time);
971 }
972 
973 void
975 {
976  rb_thread_t *th = GET_THREAD();
977  sleep_timeval(th, time);
978 }
979 
980 void
982 {
984  if (!rb_thread_alone()) {
985  rb_thread_t *th = GET_THREAD();
986  sleep_for_polling(th);
987  }
988 }
989 
990 /*
991  * CAUTION: This function causes thread switching.
992  * rb_thread_check_ints() check ruby's interrupts.
993  * some interrupt needs thread switching/invoke handlers,
994  * and so on.
995  */
996 
997 void
999 {
1001 }
1002 
1003 /*
1004  * Hidden API for tcl/tk wrapper.
1005  * There is no guarantee to perpetuate it.
1006  */
1007 int
1009 {
1010  return rb_signal_buff_size() != 0;
1011 }
1012 
1013 /* This function can be called in blocking region. */
1014 int
1016 {
1017  rb_thread_t *th;
1018  GetThreadPtr(thval, th);
1019  return RUBY_VM_INTERRUPTED(th);
1020 }
1021 
1022 void
1024 {
1026 }
1027 
1029 
1030 static void
1031 rb_thread_schedule_limits(unsigned long limits_us)
1032 {
1033  thread_debug("rb_thread_schedule\n");
1034  if (!rb_thread_alone()) {
1035  rb_thread_t *th = GET_THREAD();
1036 
1037  if (th->running_time_us >= limits_us) {
1038  thread_debug("rb_thread_schedule/switch start\n");
1040  gvl_yield(th->vm, th);
1042  thread_debug("rb_thread_schedule/switch done\n");
1043  }
1044  }
1045 }
1046 
1047 void
1049 {
1051 
1052  if (UNLIKELY(GET_THREAD()->interrupt_flag)) {
1054  }
1055 }
1056 
1057 /* blocking region */
1058 
1059 static inline void
1061 {
1062  gvl_acquire(th->vm, th);
1064  thread_debug("leave blocking region (%p)\n", (void *)th);
1065  remove_signal_thread_list(th);
1066  th->blocking_region_buffer = 0;
1067  reset_unblock_function(th, &region->oldubf);
1068  if (th->status == THREAD_STOPPED) {
1069  th->status = region->prev_status;
1070  }
1071 }
1072 
1075 {
1076  rb_thread_t *th = GET_THREAD();
1078  blocking_region_begin(th, region, ubf_select, th);
1079  return region;
1080 }
1081 
1082 void
1084 {
1085  int saved_errno = errno;
1086  rb_thread_t *th = GET_THREAD();
1087  blocking_region_end(th, region);
1088  xfree(region);
1090  errno = saved_errno;
1091 }
1092 
1093 /*
1094  * rb_thread_blocking_region - permit concurrent/parallel execution.
1095  *
1096  * This function does:
1097  * (1) release GVL.
1098  * Other Ruby threads may run in parallel.
1099  * (2) call func with data1.
1100  * (3) acquire GVL.
1101  * Other Ruby threads can not run in parallel any more.
1102  *
1103  * If another thread interrupts this thread (Thread#kill, signal delivery,
1104  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1105  * "un-blocking function"). `ubf()' should interrupt `func()' execution.
1106  *
1107  * There are built-in ubfs and you can specify these ubfs.
1108  * However, we can not guarantee our built-in ubfs interrupt
1109  * your `func()' correctly. Be careful to use rb_thread_blocking_region().
1110  *
1111  * * RUBY_UBF_IO: ubf for IO operation
1112  * * RUBY_UBF_PROCESS: ubf for process operation
1113  *
1114  * NOTE: You can not execute most of Ruby C API and touch Ruby
1115  * objects in `func()' and `ubf()', including raising an
1116  * exception, because current thread doesn't acquire GVL
1117  * (cause synchronization problem). If you need to do it,
1118  * read source code of C APIs and confirm by yourself.
1119  *
1120  * NOTE: In short, this API is difficult to use safely. I recommend you
1121  * use other ways if you have. We lack experiences to use this API.
1122  * Please report your problem related on it.
1123  *
1124  * Safe C API:
1125  * * rb_thread_interrupted() - check interrupt flag
1126  * * ruby_xalloc(), ruby_xrealloc(), ruby_xfree() -
1127  * if they called without GVL, acquire GVL automatically.
1128  */
1129 VALUE
1131  rb_blocking_function_t *func, void *data1,
1132  rb_unblock_function_t *ubf, void *data2)
1133 {
1134  VALUE val;
1135  rb_thread_t *th = GET_THREAD();
1136  int saved_errno = 0;
1137 
1138  th->waiting_fd = -1;
1139  if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1140  ubf = ubf_select;
1141  data2 = th;
1142  }
1143 
1144  BLOCKING_REGION({
1145  val = func(data1);
1146  saved_errno = errno;
1147  }, ubf, data2);
1148  errno = saved_errno;
1149 
1150  return val;
1151 }
1152 
1153 VALUE
1155 {
1156  VALUE val;
1157  rb_thread_t *th = GET_THREAD();
1158  int saved_errno = 0;
1159 
1160  th->waiting_fd = fd;
1161  BLOCKING_REGION({
1162  val = func(data1);
1163  saved_errno = errno;
1164  }, ubf_select, th);
1165  th->waiting_fd = -1;
1166  errno = saved_errno;
1167 
1168  return val;
1169 }
1170 
1171 /* alias of rb_thread_blocking_region() */
1172 
1173 VALUE
1175  rb_blocking_function_t *func, void *data1,
1176  rb_unblock_function_t *ubf, void *data2)
1177 {
1178  return rb_thread_blocking_region(func, data1, ubf, data2);
1179 }
1180 
1181 /*
1182  * rb_thread_call_with_gvl - re-enter into Ruby world while releasing GVL.
1183  *
1184  ***
1185  *** This API is EXPERIMENTAL!
1186  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1187  ***
1188  *
1189  * While releasing GVL using rb_thread_blocking_region() or
1190  * rb_thread_call_without_gvl(), you can not access Ruby values or invoke methods.
1191  * If you need to access it, you must use this function rb_thread_call_with_gvl().
1192  *
1193  * This function rb_thread_call_with_gvl() does:
1194  * (1) acquire GVL.
1195  * (2) call passed function `func'.
1196  * (3) release GVL.
1197  * (4) return a value which is returned at (2).
1198  *
1199  * NOTE: You should not return Ruby object at (2) because such Object
1200  * will not marked.
1201  *
1202  * NOTE: If an exception is raised in `func', this function "DOES NOT"
1203  * protect (catch) the exception. If you have any resources
1204  * which should free before throwing exception, you need use
1205  * rb_protect() in `func' and return a value which represents
1206  * exception is raised.
1207  *
1208  * NOTE: This functions should not be called by a thread which
1209  * is not created as Ruby thread (created by Thread.new or so).
1210  * In other words, this function *DOES NOT* associate
1211  * NON-Ruby thread to Ruby thread.
1212  */
1213 void *
1214 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1215 {
1216  rb_thread_t *th = ruby_thread_from_native();
1217  struct rb_blocking_region_buffer *brb;
1218  struct rb_unblock_callback prev_unblock;
1219  void *r;
1220 
1221  if (th == 0) {
1222  /* Error is occurred, but we can't use rb_bug()
1223  * because this thread is not Ruby's thread.
1224  * What should we do?
1225  */
1226 
1227  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1228  exit(EXIT_FAILURE);
1229  }
1230 
1232  prev_unblock = th->unblock;
1233 
1234  if (brb == 0) {
1235  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1236  }
1237 
1238  blocking_region_end(th, brb);
1239  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1240  r = (*func)(data1);
1241  /* leave from Ruby world: You can not access Ruby values, etc. */
1242  blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg);
1243  return r;
1244 }
1245 
1246 /*
1247  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1248  *
1249  ***
1250  *** This API is EXPERIMENTAL!
1251  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1252  ***
1253  */
1254 
1255 int
1257 {
1258  rb_thread_t *th = ruby_thread_from_native();
1259 
1260  if (th && th->blocking_region_buffer == 0) {
1261  return 1;
1262  }
1263  else {
1264  return 0;
1265  }
1266 }
1267 
1268 /*
1269  * call-seq:
1270  * Thread.pass -> nil
1271  *
1272  * Give the thread scheduler a hint to pass execution to another thread.
1273  * A running thread may or may not switch, it depends on OS and processor.
1274  */
1275 
1276 static VALUE
1278 {
1280  return Qnil;
1281 }
1282 
1283 /*
1284  *
1285  */
1286 
1287 static void
1289 {
1290  rb_atomic_t interrupt;
1291 
1292  if (th->raised_flag) return;
1293 
1294  while ((interrupt = ATOMIC_EXCHANGE(th->interrupt_flag, 0)) != 0) {
1295  enum rb_thread_status status = th->status;
1296  int timer_interrupt = interrupt & 0x01;
1297  int finalizer_interrupt = interrupt & 0x04;
1298  int sig;
1299 
1300  th->status = THREAD_RUNNABLE;
1301 
1302  /* signal handling */
1303  if (th == th->vm->main_thread) {
1304  while ((sig = rb_get_next_signal()) != 0) {
1305  rb_signal_exec(th, sig);
1306  }
1307  }
1308 
1309  /* exception from another thread */
1310  if (th->thrown_errinfo) {
1311  VALUE err = th->thrown_errinfo;
1312  th->thrown_errinfo = 0;
1313  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
1314 
1315  if (err == eKillSignal || err == eTerminateSignal) {
1316  th->errinfo = INT2FIX(TAG_FATAL);
1317  TH_JUMP_TAG(th, TAG_FATAL);
1318  }
1319  else {
1320  rb_exc_raise(err);
1321  }
1322  }
1323  th->status = status;
1324 
1325  if (finalizer_interrupt) {
1327  }
1328 
1329  if (timer_interrupt) {
1330  unsigned long limits_us = 250 * 1000;
1331 
1332  if (th->priority > 0)
1333  limits_us <<= th->priority;
1334  else
1335  limits_us >>= -th->priority;
1336 
1337  if (status == THREAD_RUNNABLE)
1338  th->running_time_us += TIME_QUANTUM_USEC;
1339 
1340  EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
1341 
1342  rb_thread_schedule_limits(limits_us);
1343  }
1344  }
1345 }
1346 
1347 void
1349 {
1351 }
1352 
1353 void
1355 {
1356  rb_thread_t *th;
1357  GetThreadPtr(thval, th);
1359 }
1360 
1361 void
1363 {
1364  rb_bug("deprecated function rb_gc_mark_threads is called");
1365 }
1366 
1367 /*****************************************************/
1368 
1369 static void
1371 {
1373 }
1374 
1375 static VALUE
1377 {
1378  VALUE exc;
1379 
1380  again:
1381  if (rb_threadptr_dead(th)) {
1382  return Qnil;
1383  }
1384 
1385  if (th->thrown_errinfo != 0 || th->raised_flag) {
1387  goto again;
1388  }
1389 
1390  exc = rb_make_exception(argc, argv);
1391  th->thrown_errinfo = exc;
1392  rb_threadptr_ready(th);
1393  return Qnil;
1394 }
1395 
1396 void
1398 {
1399  VALUE argv[2];
1400 
1401  argv[0] = rb_eSignal;
1402  argv[1] = INT2FIX(sig);
1403  rb_threadptr_raise(th->vm->main_thread, 2, argv);
1404 }
1405 
1406 void
1408 {
1409  VALUE argv[2];
1410 
1411  argv[0] = rb_eSystemExit;
1412  argv[1] = rb_str_new2("exit");
1413  rb_threadptr_raise(th->vm->main_thread, 2, argv);
1414 }
1415 
1416 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
1417 #define USE_SIGALTSTACK
1418 #endif
1419 
1420 void
1422 {
1423  th->raised_flag = 0;
1424 #ifdef USE_SIGALTSTACK
1426 #else
1427  th->errinfo = sysstack_error;
1428  TH_JUMP_TAG(th, TAG_RAISE);
1429 #endif
1430 }
1431 
1432 int
1434 {
1435  if (th->raised_flag & RAISED_EXCEPTION) {
1436  return 1;
1437  }
1439  return 0;
1440 }
1441 
1442 int
1444 {
1445  if (!(th->raised_flag & RAISED_EXCEPTION)) {
1446  return 0;
1447  }
1448  th->raised_flag &= ~RAISED_EXCEPTION;
1449  return 1;
1450 }
1451 
1452 #define THREAD_IO_WAITING_P(th) ( \
1453  ((th)->status == THREAD_STOPPED || \
1454  (th)->status == THREAD_STOPPED_FOREVER) && \
1455  (th)->blocking_region_buffer && \
1456  (th)->unblock.func == ubf_select && \
1457  1)
1458 
1459 static int
1461 {
1462  int fd = (int)data;
1463  rb_thread_t *th;
1464  GetThreadPtr((VALUE)key, th);
1465 
1466  if (THREAD_IO_WAITING_P(th)) {
1467  native_mutex_lock(&th->interrupt_lock);
1468  if (THREAD_IO_WAITING_P(th) && th->waiting_fd == fd) {
1471  (th->unblock.func)(th->unblock.arg);
1472  }
1473  native_mutex_unlock(&th->interrupt_lock);
1474  }
1475  return ST_CONTINUE;
1476 }
1477 
1478 void
1480 {
1481  st_foreach(GET_THREAD()->vm->living_threads, thread_fd_close_i, (st_index_t)fd);
1482 }
1483 
1484 /*
1485  * call-seq:
1486  * thr.raise
1487  * thr.raise(string)
1488  * thr.raise(exception [, string [, array]])
1489  *
1490  * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1491  * caller does not have to be <i>thr</i>.
1492  *
1493  * Thread.abort_on_exception = true
1494  * a = Thread.new { sleep(200) }
1495  * a.raise("Gotcha")
1496  *
1497  * <em>produces:</em>
1498  *
1499  * prog.rb:3: Gotcha (RuntimeError)
1500  * from prog.rb:2:in `initialize'
1501  * from prog.rb:2:in `new'
1502  * from prog.rb:2
1503  */
1504 
1505 static VALUE
1507 {
1508  rb_thread_t *th;
1509  GetThreadPtr(self, th);
1510  rb_threadptr_raise(th, argc, argv);
1511  return Qnil;
1512 }
1513 
1514 
1515 /*
1516  * call-seq:
1517  * thr.exit -> thr or nil
1518  * thr.kill -> thr or nil
1519  * thr.terminate -> thr or nil
1520  *
1521  * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1522  * is already marked to be killed, <code>exit</code> returns the
1523  * <code>Thread</code>. If this is the main thread, or the last thread, exits
1524  * the process.
1525  */
1526 
1527 VALUE
1529 {
1530  rb_thread_t *th;
1531 
1532  GetThreadPtr(thread, th);
1533 
1534  if (th != GET_THREAD() && th->safe_level < 4) {
1535  rb_secure(4);
1536  }
1537  if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
1538  return thread;
1539  }
1540  if (th == th->vm->main_thread) {
1542  }
1543 
1544  thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
1545 
1548  th->status = THREAD_TO_KILL;
1549 
1550  return thread;
1551 }
1552 
1553 
1554 /*
1555  * call-seq:
1556  * Thread.kill(thread) -> thread
1557  *
1558  * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1559  *
1560  * count = 0
1561  * a = Thread.new { loop { count += 1 } }
1562  * sleep(0.1) #=> 0
1563  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1564  * count #=> 93947
1565  * a.alive? #=> false
1566  */
1567 
1568 static VALUE
1570 {
1571  return rb_thread_kill(th);
1572 }
1573 
1574 
1575 /*
1576  * call-seq:
1577  * Thread.exit -> thread
1578  *
1579  * Terminates the currently running thread and schedules another thread to be
1580  * run. If this thread is already marked to be killed, <code>exit</code>
1581  * returns the <code>Thread</code>. If this is the main thread, or the last
1582  * thread, exit the process.
1583  */
1584 
1585 static VALUE
1587 {
1588  return rb_thread_kill(GET_THREAD()->self);
1589 }
1590 
1591 
1592 /*
1593  * call-seq:
1594  * thr.wakeup -> thr
1595  *
1596  * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1597  * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1598  *
1599  * c = Thread.new { Thread.stop; puts "hey!" }
1600  * sleep 0.1 while c.status!='sleep'
1601  * c.wakeup
1602  * c.join
1603  *
1604  * <em>produces:</em>
1605  *
1606  * hey!
1607  */
1608 
1609 VALUE
1611 {
1612  if (!RTEST(rb_thread_wakeup_alive(thread))) {
1613  rb_raise(rb_eThreadError, "killed thread");
1614  }
1615  return thread;
1616 }
1617 
1618 VALUE
1620 {
1621  rb_thread_t *th;
1622  GetThreadPtr(thread, th);
1623 
1624  if (th->status == THREAD_KILLED) {
1625  return Qnil;
1626  }
1627  rb_threadptr_ready(th);
1628  if (th->status != THREAD_TO_KILL) {
1629  th->status = THREAD_RUNNABLE;
1630  }
1631  return thread;
1632 }
1633 
1634 
1635 /*
1636  * call-seq:
1637  * thr.run -> thr
1638  *
1639  * Wakes up <i>thr</i>, making it eligible for scheduling.
1640  *
1641  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1642  * sleep 0.1 while a.status!='sleep'
1643  * puts "Got here"
1644  * a.run
1645  * a.join
1646  *
1647  * <em>produces:</em>
1648  *
1649  * a
1650  * Got here
1651  * c
1652  */
1653 
1654 VALUE
1656 {
1657  rb_thread_wakeup(thread);
1659  return thread;
1660 }
1661 
1662 
1663 /*
1664  * call-seq:
1665  * Thread.stop -> nil
1666  *
1667  * Stops execution of the current thread, putting it into a ``sleep'' state,
1668  * and schedules execution of another thread.
1669  *
1670  * a = Thread.new { print "a"; Thread.stop; print "c" }
1671  * sleep 0.1 while a.status!='sleep'
1672  * print "b"
1673  * a.run
1674  * a.join
1675  *
1676  * <em>produces:</em>
1677  *
1678  * abc
1679  */
1680 
1681 VALUE
1683 {
1684  if (rb_thread_alone()) {
1686  "stopping only thread\n\tnote: use sleep to stop forever");
1687  }
1689  return Qnil;
1690 }
1691 
1692 static int
1694 {
1695  VALUE ary = (VALUE)data;
1696  rb_thread_t *th;
1697  GetThreadPtr((VALUE)key, th);
1698 
1699  switch (th->status) {
1700  case THREAD_RUNNABLE:
1701  case THREAD_STOPPED:
1703  case THREAD_TO_KILL:
1704  rb_ary_push(ary, th->self);
1705  default:
1706  break;
1707  }
1708  return ST_CONTINUE;
1709 }
1710 
1711 /********************************************************************/
1712 
1713 /*
1714  * call-seq:
1715  * Thread.list -> array
1716  *
1717  * Returns an array of <code>Thread</code> objects for all threads that are
1718  * either runnable or stopped.
1719  *
1720  * Thread.new { sleep(200) }
1721  * Thread.new { 1000000.times {|i| i*i } }
1722  * Thread.new { Thread.stop }
1723  * Thread.list.each {|t| p t}
1724  *
1725  * <em>produces:</em>
1726  *
1727  * #<Thread:0x401b3e84 sleep>
1728  * #<Thread:0x401b3f38 run>
1729  * #<Thread:0x401b3fb0 sleep>
1730  * #<Thread:0x401bdf4c run>
1731  */
1732 
1733 VALUE
1735 {
1736  VALUE ary = rb_ary_new();
1737  st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
1738  return ary;
1739 }
1740 
1741 VALUE
1743 {
1744  return GET_THREAD()->self;
1745 }
1746 
1747 /*
1748  * call-seq:
1749  * Thread.current -> thread
1750  *
1751  * Returns the currently executing thread.
1752  *
1753  * Thread.current #=> #<Thread:0x401bdf4c run>
1754  */
1755 
1756 static VALUE
1758 {
1759  return rb_thread_current();
1760 }
1761 
1762 VALUE
1764 {
1765  return GET_THREAD()->vm->main_thread->self;
1766 }
1767 
1768 /*
1769  * call-seq:
1770  * Thread.main -> thread
1771  *
1772  * Returns the main thread.
1773  */
1774 
1775 static VALUE
1777 {
1778  return rb_thread_main();
1779 }
1780 
1781 
1782 /*
1783  * call-seq:
1784  * Thread.abort_on_exception -> true or false
1785  *
1786  * Returns the status of the global ``abort on exception'' condition. The
1787  * default is <code>false</code>. When set to <code>true</code>, or if the
1788  * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1789  * command line option <code>-d</code> was specified) all threads will abort
1790  * (the process will <code>exit(0)</code>) if an exception is raised in any
1791  * thread. See also <code>Thread::abort_on_exception=</code>.
1792  */
1793 
1794 static VALUE
1796 {
1797  return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
1798 }
1799 
1800 
1801 /*
1802  * call-seq:
1803  * Thread.abort_on_exception= boolean -> true or false
1804  *
1805  * When set to <code>true</code>, all threads will abort if an exception is
1806  * raised. Returns the new state.
1807  *
1808  * Thread.abort_on_exception = true
1809  * t1 = Thread.new do
1810  * puts "In new thread"
1811  * raise "Exception from thread"
1812  * end
1813  * sleep(1)
1814  * puts "not reached"
1815  *
1816  * <em>produces:</em>
1817  *
1818  * In new thread
1819  * prog.rb:4: Exception from thread (RuntimeError)
1820  * from prog.rb:2:in `initialize'
1821  * from prog.rb:2:in `new'
1822  * from prog.rb:2
1823  */
1824 
1825 static VALUE
1827 {
1828  rb_secure(4);
1829  GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
1830  return val;
1831 }
1832 
1833 
1834 /*
1835  * call-seq:
1836  * thr.abort_on_exception -> true or false
1837  *
1838  * Returns the status of the thread-local ``abort on exception'' condition for
1839  * <i>thr</i>. The default is <code>false</code>. See also
1840  * <code>Thread::abort_on_exception=</code>.
1841  */
1842 
1843 static VALUE
1845 {
1846  rb_thread_t *th;
1847  GetThreadPtr(thread, th);
1848  return th->abort_on_exception ? Qtrue : Qfalse;
1849 }
1850 
1851 
1852 /*
1853  * call-seq:
1854  * thr.abort_on_exception= boolean -> true or false
1855  *
1856  * When set to <code>true</code>, causes all threads (including the main
1857  * program) to abort if an exception is raised in <i>thr</i>. The process will
1858  * effectively <code>exit(0)</code>.
1859  */
1860 
1861 static VALUE
1863 {
1864  rb_thread_t *th;
1865  rb_secure(4);
1866 
1867  GetThreadPtr(thread, th);
1868  th->abort_on_exception = RTEST(val);
1869  return val;
1870 }
1871 
1872 
1873 /*
1874  * call-seq:
1875  * thr.group -> thgrp or nil
1876  *
1877  * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1878  * the thread is not a member of any group.
1879  *
1880  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1881  */
1882 
1883 VALUE
1885 {
1886  rb_thread_t *th;
1887  VALUE group;
1888  GetThreadPtr(thread, th);
1889  group = th->thgroup;
1890 
1891  if (!group) {
1892  group = Qnil;
1893  }
1894  return group;
1895 }
1896 
1897 static const char *
1899 {
1900  switch (status) {
1901  case THREAD_RUNNABLE:
1902  return "run";
1903  case THREAD_STOPPED:
1905  return "sleep";
1906  case THREAD_TO_KILL:
1907  return "aborting";
1908  case THREAD_KILLED:
1909  return "dead";
1910  default:
1911  return "unknown";
1912  }
1913 }
1914 
1915 static int
1917 {
1918  return th->status == THREAD_KILLED;
1919 }
1920 
1921 
1922 /*
1923  * call-seq:
1924  * thr.status -> string, false or nil
1925  *
1926  * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1927  * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1928  * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1929  * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1930  * terminated with an exception.
1931  *
1932  * a = Thread.new { raise("die now") }
1933  * b = Thread.new { Thread.stop }
1934  * c = Thread.new { Thread.exit }
1935  * d = Thread.new { sleep }
1936  * d.kill #=> #<Thread:0x401b3678 aborting>
1937  * a.status #=> nil
1938  * b.status #=> "sleep"
1939  * c.status #=> false
1940  * d.status #=> "aborting"
1941  * Thread.current.status #=> "run"
1942  */
1943 
1944 static VALUE
1946 {
1947  rb_thread_t *th;
1948  GetThreadPtr(thread, th);
1949 
1950  if (rb_threadptr_dead(th)) {
1951  if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
1952  /* TODO */ ) {
1953  return Qnil;
1954  }
1955  return Qfalse;
1956  }
1957  return rb_str_new2(thread_status_name(th->status));
1958 }
1959 
1960 
1961 /*
1962  * call-seq:
1963  * thr.alive? -> true or false
1964  *
1965  * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1966  *
1967  * thr = Thread.new { }
1968  * thr.join #=> #<Thread:0x401b3fb0 dead>
1969  * Thread.current.alive? #=> true
1970  * thr.alive? #=> false
1971  */
1972 
1973 static VALUE
1975 {
1976  rb_thread_t *th;
1977  GetThreadPtr(thread, th);
1978 
1979  if (rb_threadptr_dead(th))
1980  return Qfalse;
1981  return Qtrue;
1982 }
1983 
1984 /*
1985  * call-seq:
1986  * thr.stop? -> true or false
1987  *
1988  * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1989  *
1990  * a = Thread.new { Thread.stop }
1991  * b = Thread.current
1992  * a.stop? #=> true
1993  * b.stop? #=> false
1994  */
1995 
1996 static VALUE
1998 {
1999  rb_thread_t *th;
2000  GetThreadPtr(thread, th);
2001 
2002  if (rb_threadptr_dead(th))
2003  return Qtrue;
2004  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2005  return Qtrue;
2006  return Qfalse;
2007 }
2008 
2009 /*
2010  * call-seq:
2011  * thr.safe_level -> integer
2012  *
2013  * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
2014  * levels can help when implementing sandboxes which run insecure code.
2015  *
2016  * thr = Thread.new { $SAFE = 3; sleep }
2017  * Thread.current.safe_level #=> 0
2018  * thr.safe_level #=> 3
2019  */
2020 
2021 static VALUE
2023 {
2024  rb_thread_t *th;
2025  GetThreadPtr(thread, th);
2026 
2027  return INT2NUM(th->safe_level);
2028 }
2029 
2030 /*
2031  * call-seq:
2032  * thr.inspect -> string
2033  *
2034  * Dump the name, id, and status of _thr_ to a string.
2035  */
2036 
2037 static VALUE
2039 {
2040  const char *cname = rb_obj_classname(thread);
2041  rb_thread_t *th;
2042  const char *status;
2043  VALUE str;
2044 
2045  GetThreadPtr(thread, th);
2046  status = thread_status_name(th->status);
2047  str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
2048  OBJ_INFECT(str, thread);
2049 
2050  return str;
2051 }
2052 
2053 VALUE
2055 {
2056  rb_thread_t *th;
2057  st_data_t val;
2058 
2059  GetThreadPtr(thread, th);
2060  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2061  rb_raise(rb_eSecurityError, "Insecure: thread locals");
2062  }
2063  if (!th->local_storage) {
2064  return Qnil;
2065  }
2066  if (st_lookup(th->local_storage, id, &val)) {
2067  return (VALUE)val;
2068  }
2069  return Qnil;
2070 }
2071 
2072 /*
2073  * call-seq:
2074  * thr[sym] -> obj or nil
2075  *
2076  * Attribute Reference---Returns the value of a thread-local variable, using
2077  * either a symbol or a string name. If the specified variable does not exist,
2078  * returns <code>nil</code>.
2079  *
2080  * [
2081  * Thread.new { Thread.current["name"] = "A" },
2082  * Thread.new { Thread.current[:name] = "B" },
2083  * Thread.new { Thread.current["name"] = "C" }
2084  * ].each do |th|
2085  * th.join
2086  * puts "#{th.inspect}: #{th[:name]}"
2087  * end
2088  *
2089  * <em>produces:</em>
2090  *
2091  * #<Thread:0x00000002a54220 dead>: A
2092  * #<Thread:0x00000002a541a8 dead>: B
2093  * #<Thread:0x00000002a54130 dead>: C
2094  */
2095 
2096 static VALUE
2098 {
2099  return rb_thread_local_aref(thread, rb_to_id(id));
2100 }
2101 
2102 VALUE
2104 {
2105  rb_thread_t *th;
2106  GetThreadPtr(thread, th);
2107 
2108  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2109  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2110  }
2111  if (OBJ_FROZEN(thread)) {
2112  rb_error_frozen("thread locals");
2113  }
2114  if (!th->local_storage) {
2116  }
2117  if (NIL_P(val)) {
2118  st_delete_wrap(th->local_storage, id);
2119  return Qnil;
2120  }
2121  st_insert(th->local_storage, id, val);
2122  return val;
2123 }
2124 
2125 /*
2126  * call-seq:
2127  * thr[sym] = obj -> obj
2128  *
2129  * Attribute Assignment---Sets or creates the value of a thread-local variable,
2130  * using either a symbol or a string. See also <code>Thread#[]</code>.
2131  */
2132 
2133 static VALUE
2135 {
2136  return rb_thread_local_aset(self, rb_to_id(id), val);
2137 }
2138 
2139 /*
2140  * call-seq:
2141  * thr.key?(sym) -> true or false
2142  *
2143  * Returns <code>true</code> if the given string (or symbol) exists as a
2144  * thread-local variable.
2145  *
2146  * me = Thread.current
2147  * me[:oliver] = "a"
2148  * me.key?(:oliver) #=> true
2149  * me.key?(:stanley) #=> false
2150  */
2151 
2152 static VALUE
2154 {
2155  rb_thread_t *th;
2156  ID id = rb_to_id(key);
2157 
2158  GetThreadPtr(self, th);
2159 
2160  if (!th->local_storage) {
2161  return Qfalse;
2162  }
2163  if (st_lookup(th->local_storage, id, 0)) {
2164  return Qtrue;
2165  }
2166  return Qfalse;
2167 }
2168 
2169 static int
2171 {
2172  rb_ary_push(ary, ID2SYM(key));
2173  return ST_CONTINUE;
2174 }
2175 
2176 static int
2178 {
2179  return (int)vm->living_threads->num_entries;
2180 }
2181 
2182 int
2184 {
2185  int num = 1;
2186  if (GET_THREAD()->vm->living_threads) {
2187  num = vm_living_thread_num(GET_THREAD()->vm);
2188  thread_debug("rb_thread_alone: %d\n", num);
2189  }
2190  return num == 1;
2191 }
2192 
2193 /*
2194  * call-seq:
2195  * thr.keys -> array
2196  *
2197  * Returns an an array of the names of the thread-local variables (as Symbols).
2198  *
2199  * thr = Thread.new do
2200  * Thread.current[:cat] = 'meow'
2201  * Thread.current["dog"] = 'woof'
2202  * end
2203  * thr.join #=> #<Thread:0x401b3f10 dead>
2204  * thr.keys #=> [:dog, :cat]
2205  */
2206 
2207 static VALUE
2209 {
2210  rb_thread_t *th;
2211  VALUE ary = rb_ary_new();
2212  GetThreadPtr(self, th);
2213 
2214  if (th->local_storage) {
2216  }
2217  return ary;
2218 }
2219 
2220 /*
2221  * call-seq:
2222  * thr.priority -> integer
2223  *
2224  * Returns the priority of <i>thr</i>. Default is inherited from the
2225  * current thread which creating the new thread, or zero for the
2226  * initial main thread; higher-priority thread will run more frequently
2227  * than lower-priority threads (but lower-priority threads can also run).
2228  *
2229  * This is just hint for Ruby thread scheduler. It may be ignored on some
2230  * platform.
2231  *
2232  * Thread.current.priority #=> 0
2233  */
2234 
2235 static VALUE
2237 {
2238  rb_thread_t *th;
2239  GetThreadPtr(thread, th);
2240  return INT2NUM(th->priority);
2241 }
2242 
2243 
2244 /*
2245  * call-seq:
2246  * thr.priority= integer -> thr
2247  *
2248  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
2249  * will run more frequently than lower-priority threads (but lower-priority
2250  * threads can also run).
2251  *
2252  * This is just hint for Ruby thread scheduler. It may be ignored on some
2253  * platform.
2254  *
2255  * count1 = count2 = 0
2256  * a = Thread.new do
2257  * loop { count1 += 1 }
2258  * end
2259  * a.priority = -1
2260  *
2261  * b = Thread.new do
2262  * loop { count2 += 1 }
2263  * end
2264  * b.priority = -2
2265  * sleep 1 #=> 1
2266  * count1 #=> 622504
2267  * count2 #=> 5832
2268  */
2269 
2270 static VALUE
2272 {
2273  rb_thread_t *th;
2274  int priority;
2275  GetThreadPtr(thread, th);
2276 
2277  rb_secure(4);
2278 
2279 #if USE_NATIVE_THREAD_PRIORITY
2280  th->priority = NUM2INT(prio);
2281  native_thread_apply_priority(th);
2282 #else
2283  priority = NUM2INT(prio);
2284  if (priority > RUBY_THREAD_PRIORITY_MAX) {
2285  priority = RUBY_THREAD_PRIORITY_MAX;
2286  }
2287  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
2288  priority = RUBY_THREAD_PRIORITY_MIN;
2289  }
2290  th->priority = priority;
2291 #endif
2292  return INT2NUM(th->priority);
2293 }
2294 
2295 /* for IO */
2296 
2297 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
2298 
2299 /*
2300  * several Unix platforms support file descriptors bigger than FD_SETSIZE
2301  * in select(2) system call.
2302  *
2303  * - Linux 2.2.12 (?)
2304  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
2305  * select(2) documents how to allocate fd_set dynamically.
2306  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
2307  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
2308  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
2309  * select(2) documents how to allocate fd_set dynamically.
2310  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
2311  * - HP-UX documents how to allocate fd_set dynamically.
2312  * http://docs.hp.com/en/B2355-60105/select.2.html
2313  * - Solaris 8 has select_large_fdset
2314  *
2315  * When fd_set is not big enough to hold big file descriptors,
2316  * it should be allocated dynamically.
2317  * Note that this assumes fd_set is structured as bitmap.
2318  *
2319  * rb_fd_init allocates the memory.
2320  * rb_fd_term free the memory.
2321  * rb_fd_set may re-allocates bitmap.
2322  *
2323  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
2324  */
2325 
2326 void
2327 rb_fd_init(rb_fdset_t *fds)
2328 {
2329  fds->maxfd = 0;
2330  fds->fdset = ALLOC(fd_set);
2331  FD_ZERO(fds->fdset);
2332 }
2333 
2334 void
2336 {
2337  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
2338 
2339  if (size < sizeof(fd_set))
2340  size = sizeof(fd_set);
2341  dst->maxfd = src->maxfd;
2342  dst->fdset = xmalloc(size);
2343  memcpy(dst->fdset, src->fdset, size);
2344 }
2345 
2346 void
2347 rb_fd_term(rb_fdset_t *fds)
2348 {
2349  if (fds->fdset) xfree(fds->fdset);
2350  fds->maxfd = 0;
2351  fds->fdset = 0;
2352 }
2353 
2354 void
2355 rb_fd_zero(rb_fdset_t *fds)
2356 {
2357  if (fds->fdset)
2358  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
2359 }
2360 
2361 static void
2362 rb_fd_resize(int n, rb_fdset_t *fds)
2363 {
2364  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
2365  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
2366 
2367  if (m < sizeof(fd_set)) m = sizeof(fd_set);
2368  if (o < sizeof(fd_set)) o = sizeof(fd_set);
2369 
2370  if (m > o) {
2371  fds->fdset = xrealloc(fds->fdset, m);
2372  memset((char *)fds->fdset + o, 0, m - o);
2373  }
2374  if (n >= fds->maxfd) fds->maxfd = n + 1;
2375 }
2376 
2377 void
2378 rb_fd_set(int n, rb_fdset_t *fds)
2379 {
2380  rb_fd_resize(n, fds);
2381  FD_SET(n, fds->fdset);
2382 }
2383 
2384 void
2385 rb_fd_clr(int n, rb_fdset_t *fds)
2386 {
2387  if (n >= fds->maxfd) return;
2388  FD_CLR(n, fds->fdset);
2389 }
2390 
2391 int
2392 rb_fd_isset(int n, const rb_fdset_t *fds)
2393 {
2394  if (n >= fds->maxfd) return 0;
2395  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
2396 }
2397 
2398 void
2399 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
2400 {
2401  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
2402 
2403  if (size < sizeof(fd_set)) size = sizeof(fd_set);
2404  dst->maxfd = max;
2405  dst->fdset = xrealloc(dst->fdset, size);
2406  memcpy(dst->fdset, src, size);
2407 }
2408 
2409 static void
2410 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
2411 {
2412  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
2413 
2414  if (size > sizeof(fd_set)) {
2415  rb_raise(rb_eArgError, "too large fdsets");
2416  }
2417  memcpy(dst, rb_fd_ptr(src), sizeof(fd_set));
2418 }
2419 
2420 void
2421 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
2422 {
2423  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
2424 
2425  if (size < sizeof(fd_set))
2426  size = sizeof(fd_set);
2427  dst->maxfd = src->maxfd;
2428  dst->fdset = xrealloc(dst->fdset, size);
2429  memcpy(dst->fdset, src->fdset, size);
2430 }
2431 
2432 int
2433 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
2434 {
2435  fd_set *r = NULL, *w = NULL, *e = NULL;
2436  if (readfds) {
2437  rb_fd_resize(n - 1, readfds);
2438  r = rb_fd_ptr(readfds);
2439  }
2440  if (writefds) {
2441  rb_fd_resize(n - 1, writefds);
2442  w = rb_fd_ptr(writefds);
2443  }
2444  if (exceptfds) {
2445  rb_fd_resize(n - 1, exceptfds);
2446  e = rb_fd_ptr(exceptfds);
2447  }
2448  return select(n, r, w, e, timeout);
2449 }
2450 
2451 #undef FD_ZERO
2452 #undef FD_SET
2453 #undef FD_CLR
2454 #undef FD_ISSET
2455 
2456 #define FD_ZERO(f) rb_fd_zero(f)
2457 #define FD_SET(i, f) rb_fd_set((i), (f))
2458 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2459 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2460 
2461 #elif defined(_WIN32)
2462 
2463 void
2464 rb_fd_init(rb_fdset_t *set)
2465 {
2466  set->capa = FD_SETSIZE;
2467  set->fdset = ALLOC(fd_set);
2468  FD_ZERO(set->fdset);
2469 }
2470 
2471 void
2473 {
2474  rb_fd_init(dst);
2475  rb_fd_dup(dst, src);
2476 }
2477 
2478 static void
2479 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
2480 {
2481  int max = rb_fd_max(src);
2482 
2483  /* we assume src is the result of select() with dst, so dst should be
2484  * larger or equal than src. */
2485  if (max > FD_SETSIZE || max > dst->fd_count) {
2486  rb_raise(rb_eArgError, "too large fdsets");
2487  }
2488 
2489  memcpy(dst->fd_array, src->fdset->fd_array, max);
2490  dst->fd_count = max;
2491 }
2492 
2493 void
2494 rb_fd_term(rb_fdset_t *set)
2495 {
2496  xfree(set->fdset);
2497  set->fdset = NULL;
2498  set->capa = 0;
2499 }
2500 
2501 void
2502 rb_fd_set(int fd, rb_fdset_t *set)
2503 {
2504  unsigned int i;
2505  SOCKET s = rb_w32_get_osfhandle(fd);
2506 
2507  for (i = 0; i < set->fdset->fd_count; i++) {
2508  if (set->fdset->fd_array[i] == s) {
2509  return;
2510  }
2511  }
2512  if (set->fdset->fd_count >= (unsigned)set->capa) {
2513  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
2514  set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
2515  }
2516  set->fdset->fd_array[set->fdset->fd_count++] = s;
2517 }
2518 
2519 #undef FD_ZERO
2520 #undef FD_SET
2521 #undef FD_CLR
2522 #undef FD_ISSET
2523 
2524 #define FD_ZERO(f) rb_fd_zero(f)
2525 #define FD_SET(i, f) rb_fd_set((i), (f))
2526 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2527 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2528 
2529 #else
2530 #define rb_fd_rcopy(d, s) (*(d) = *(s))
2531 #endif
2532 
2533 #if defined(__CYGWIN__)
2534 static long
2535 cmp_tv(const struct timeval *a, const struct timeval *b)
2536 {
2537  long d = (a->tv_sec - b->tv_sec);
2538  return (d != 0) ? d : (a->tv_usec - b->tv_usec);
2539 }
2540 
2541 static int
2542 subtract_tv(struct timeval *rest, const struct timeval *wait)
2543 {
2544  if (rest->tv_sec < wait->tv_sec) {
2545  return 0;
2546  }
2547  while (rest->tv_usec < wait->tv_usec) {
2548  if (rest->tv_sec <= wait->tv_sec) {
2549  return 0;
2550  }
2551  rest->tv_sec -= 1;
2552  rest->tv_usec += 1000 * 1000;
2553  }
2554  rest->tv_sec -= wait->tv_sec;
2555  rest->tv_usec -= wait->tv_usec;
2556  return rest->tv_sec != 0 || rest->tv_usec != 0;
2557 }
2558 #endif
2559 
2560 static int
2561 do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except,
2562  struct timeval *timeout)
2563 {
2564  int result, lerrno;
2565  rb_fdset_t UNINITIALIZED_VAR(orig_read);
2566  rb_fdset_t UNINITIALIZED_VAR(orig_write);
2567  rb_fdset_t UNINITIALIZED_VAR(orig_except);
2568  double limit = 0;
2569  struct timeval wait_rest;
2570 # if defined(__CYGWIN__)
2571  struct timeval start_time;
2572 # endif
2573 
2574  if (timeout) {
2575 # if defined(__CYGWIN__)
2576  gettimeofday(&start_time, NULL);
2577  limit = (double)start_time.tv_sec + (double)start_time.tv_usec*1e-6;
2578 # else
2579  limit = timeofday();
2580 # endif
2581  limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
2582  wait_rest = *timeout;
2583  timeout = &wait_rest;
2584  }
2585 
2586  if (read)
2587  rb_fd_init_copy(&orig_read, read);
2588  if (write)
2589  rb_fd_init_copy(&orig_write, write);
2590  if (except)
2591  rb_fd_init_copy(&orig_except, except);
2592 
2593  retry:
2594  lerrno = 0;
2595 
2596 #if defined(__CYGWIN__)
2597  {
2598  int finish = 0;
2599  /* polling duration: 100ms */
2600  struct timeval wait_100ms, *wait;
2601  wait_100ms.tv_sec = 0;
2602  wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
2603 
2604  do {
2605  wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) < 0) ? &wait_100ms : timeout;
2606  BLOCKING_REGION({
2607  do {
2608  result = rb_fd_select(n, read, write, except, wait);
2609  if (result < 0) lerrno = errno;
2610  if (result != 0) break;
2611 
2612  if (read)
2613  rb_fd_dup(read, &orig_read);
2614  if (write)
2615  rb_fd_dup(write, &orig_write);
2616  if (except)
2617  rb_fd_dup(except, &orig_except);
2618  if (timeout) {
2619  struct timeval elapsed;
2620  gettimeofday(&elapsed, NULL);
2621  subtract_tv(&elapsed, &start_time);
2622  gettimeofday(&start_time, NULL);
2623  if (!subtract_tv(timeout, &elapsed)) {
2624  finish = 1;
2625  break;
2626  }
2627  if (cmp_tv(&wait_100ms, timeout) > 0) wait = timeout;
2628  }
2629  } while (__th->interrupt_flag == 0);
2630  }, 0, 0);
2631  } while (result == 0 && !finish);
2632  }
2633 #elif defined(_WIN32)
2634  {
2635  rb_thread_t *th = GET_THREAD();
2636  BLOCKING_REGION({
2637  result = native_fd_select(n, read, write, except, timeout, th);
2638  if (result < 0) lerrno = errno;
2639  }, ubf_select, th);
2640  }
2641 #else
2642  BLOCKING_REGION({
2643  result = rb_fd_select(n, read, write, except, timeout);
2644  if (result < 0) lerrno = errno;
2645  }, ubf_select, GET_THREAD());
2646 #endif
2647 
2648  errno = lerrno;
2649 
2650  if (result < 0) {
2651  switch (errno) {
2652  case EINTR:
2653 #ifdef ERESTART
2654  case ERESTART:
2655 #endif
2656  if (read)
2657  rb_fd_dup(read, &orig_read);
2658  if (write)
2659  rb_fd_dup(write, &orig_write);
2660  if (except)
2661  rb_fd_dup(except, &orig_except);
2662 
2663  if (timeout) {
2664  double d = limit - timeofday();
2665 
2666  wait_rest.tv_sec = (unsigned int)d;
2667  wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
2668  if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
2669  if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
2670  }
2671 
2672  goto retry;
2673  default:
2674  break;
2675  }
2676  }
2677 
2678  if (read)
2679  rb_fd_term(&orig_read);
2680  if (write)
2681  rb_fd_term(&orig_write);
2682  if (except)
2683  rb_fd_term(&orig_except);
2684 
2685  return result;
2686 }
2687 
2688 static void
2689 rb_thread_wait_fd_rw(int fd, int read)
2690 {
2691  int result = 0;
2692  int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
2693 
2694  thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
2695 
2696  if (fd < 0) {
2697  rb_raise(rb_eIOError, "closed stream");
2698  }
2699  if (rb_thread_alone()) return;
2700  while (result <= 0) {
2701  result = rb_wait_for_single_fd(fd, events, NULL);
2702 
2703  if (result < 0) {
2704  rb_sys_fail(0);
2705  }
2706  }
2707 
2708  thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
2709 }
2710 
2711 void
2713 {
2714  rb_thread_wait_fd_rw(fd, 1);
2715 }
2716 
2717 int
2719 {
2720  rb_thread_wait_fd_rw(fd, 0);
2721  return TRUE;
2722 }
2723 
2724 int
2725 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
2726  struct timeval *timeout)
2727 {
2728  rb_fdset_t fdsets[3];
2729  rb_fdset_t *rfds = NULL;
2730  rb_fdset_t *wfds = NULL;
2731  rb_fdset_t *efds = NULL;
2732  int retval;
2733 
2734  if (read) {
2735  rfds = &fdsets[0];
2736  rb_fd_init(rfds);
2737  rb_fd_copy(rfds, read, max);
2738  }
2739  if (write) {
2740  wfds = &fdsets[1];
2741  rb_fd_init(wfds);
2742  rb_fd_copy(wfds, write, max);
2743  }
2744  if (except) {
2745  efds = &fdsets[2];
2746  rb_fd_init(efds);
2747  rb_fd_copy(efds, except, max);
2748  }
2749 
2750  retval = rb_thread_fd_select(max, rfds, wfds, efds, timeout);
2751 
2752  if (rfds) {
2753  rb_fd_rcopy(read, rfds);
2754  rb_fd_term(rfds);
2755  }
2756  if (wfds) {
2757  rb_fd_rcopy(write, wfds);
2758  rb_fd_term(wfds);
2759  }
2760  if (efds) {
2761  rb_fd_rcopy(except, efds);
2762  rb_fd_term(efds);
2763  }
2764 
2765  return retval;
2766 }
2767 
2768 int
2769 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
2770  struct timeval *timeout)
2771 {
2772  if (!read && !write && !except) {
2773  if (!timeout) {
2775  return 0;
2776  }
2777  rb_thread_wait_for(*timeout);
2778  return 0;
2779  }
2780 
2781  if (read) {
2782  rb_fd_resize(max - 1, read);
2783  }
2784  if (write) {
2785  rb_fd_resize(max - 1, write);
2786  }
2787  if (except) {
2788  rb_fd_resize(max - 1, except);
2789  }
2790  return do_select(max, read, write, except, timeout);
2791 }
2792 
2793 /*
2794  * poll() is supported by many OSes, but so far Linux is the only
2795  * one we know of that supports using poll() in all places select()
2796  * would work.
2797  */
2798 #if defined(HAVE_POLL) && defined(linux)
2799 # define USE_POLL
2800 #endif
2801 
2802 #ifdef USE_POLL
2803 
2804 /* The same with linux kernel. TODO: make platform independent definition. */
2805 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
2806 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
2807 #define POLLEX_SET (POLLPRI)
2808 
2809 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
2810 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
2811 
2812 #ifndef HAVE_PPOLL
2813 /* TODO: don't ignore sigmask */
2814 int ppoll(struct pollfd *fds, nfds_t nfds,
2815  const struct timespec *ts, const sigset_t *sigmask)
2816 {
2817  int timeout_ms;
2818 
2819  if (ts) {
2820  int tmp, tmp2;
2821 
2822  if (ts->tv_sec > TIMET_MAX/1000)
2823  timeout_ms = -1;
2824  else {
2825  tmp = ts->tv_sec * 1000;
2826  tmp2 = ts->tv_nsec / (1000 * 1000);
2827  if (TIMET_MAX - tmp < tmp2)
2828  timeout_ms = -1;
2829  else
2830  timeout_ms = tmp + tmp2;
2831  }
2832  } else
2833  timeout_ms = -1;
2834 
2835  return poll(fds, nfds, timeout_ms);
2836 }
2837 #endif
2838 
2839 /*
2840  * returns a mask of events
2841  */
2842 int
2843 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
2844 {
2845  struct pollfd fds;
2846  int result, lerrno;
2847  double limit = 0;
2848  struct timespec ts;
2849  struct timespec *timeout = NULL;
2850 
2851  if (tv) {
2852  ts.tv_sec = tv->tv_sec;
2853  ts.tv_nsec = tv->tv_usec * 1000;
2854  limit = timeofday();
2855  limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
2856  timeout = &ts;
2857  }
2858 
2859  fds.fd = fd;
2860  fds.events = (short)events;
2861 
2862 retry:
2863  lerrno = 0;
2864  BLOCKING_REGION({
2865  result = ppoll(&fds, 1, timeout, NULL);
2866  if (result < 0) lerrno = errno;
2867  }, ubf_select, GET_THREAD());
2868 
2869  if (result < 0) {
2870  errno = lerrno;
2871  switch (errno) {
2872  case EINTR:
2873 #ifdef ERESTART
2874  case ERESTART:
2875 #endif
2876  if (timeout) {
2877  double d = limit - timeofday();
2878 
2879  ts.tv_sec = (long)d;
2880  ts.tv_nsec = (long)((d - (double)ts.tv_sec) * 1e9);
2881  if (ts.tv_sec < 0)
2882  ts.tv_sec = 0;
2883  if (ts.tv_nsec < 0)
2884  ts.tv_nsec = 0;
2885  }
2886  goto retry;
2887  }
2888  return -1;
2889  }
2890 
2891  if (fds.revents & POLLNVAL) {
2892  errno = EBADF;
2893  return -1;
2894  }
2895 
2896  /*
2897  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
2898  * Therefore we need fix it up.
2899  */
2900  result = 0;
2901  if (fds.revents & POLLIN_SET)
2902  result |= RB_WAITFD_IN;
2903  if (fds.revents & POLLOUT_SET)
2904  result |= RB_WAITFD_OUT;
2905  if (fds.revents & POLLEX_SET)
2906  result |= RB_WAITFD_PRI;
2907 
2908  return result;
2909 }
2910 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
2911 static rb_fdset_t *init_set_fd(int fd, rb_fdset_t *fds)
2912 {
2913  rb_fd_init(fds);
2914  rb_fd_set(fd, fds);
2915 
2916  return fds;
2917 }
2918 
2919 struct select_args {
2920  union {
2921  int fd;
2922  int error;
2923  } as;
2927  struct timeval *tv;
2928 };
2929 
2930 static VALUE
2932 {
2933  struct select_args *args = (struct select_args *)ptr;
2934  int r;
2935 
2936  r = rb_thread_fd_select(args->as.fd + 1,
2937  args->read, args->write, args->except, args->tv);
2938  if (r == -1)
2939  args->as.error = errno;
2940  if (r > 0) {
2941  r = 0;
2942  if (args->read && rb_fd_isset(args->as.fd, args->read))
2943  r |= RB_WAITFD_IN;
2944  if (args->write && rb_fd_isset(args->as.fd, args->write))
2945  r |= RB_WAITFD_OUT;
2946  if (args->except && rb_fd_isset(args->as.fd, args->except))
2947  r |= RB_WAITFD_PRI;
2948  }
2949  return (VALUE)r;
2950 }
2951 
2952 static VALUE
2954 {
2955  struct select_args *args = (struct select_args *)ptr;
2956 
2957  if (args->read) rb_fd_term(args->read);
2958  if (args->write) rb_fd_term(args->write);
2959  if (args->except) rb_fd_term(args->except);
2960 
2961  return (VALUE)-1;
2962 }
2963 
2964 int
2965 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
2966 {
2967  rb_fdset_t rfds, wfds, efds;
2968  struct select_args args;
2969  int r;
2970  VALUE ptr = (VALUE)&args;
2971 
2972  args.as.fd = fd;
2973  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
2974  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
2975  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
2976  args.tv = tv;
2977 
2978  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
2979  if (r == -1)
2980  errno = args.as.error;
2981 
2982  return r;
2983 }
2984 #endif /* ! USE_POLL */
2985 
2986 /*
2987  * for GC
2988  */
2989 
2990 #ifdef USE_CONSERVATIVE_STACK_END
2991 void
2993 {
2994  VALUE stack_end;
2995  *stack_end_p = &stack_end;
2996 }
2997 #endif
2998 
2999 void
3001 {
3003 #ifdef __ia64
3004  th->machine_register_stack_end = rb_ia64_bsp();
3005 #endif
3006  setjmp(th->machine_regs);
3007 }
3008 
3009 /*
3010  *
3011  */
3012 
3013 void
3015 {
3016  /* mth must be main_thread */
3017  if (rb_signal_buff_size() > 0) {
3018  /* wakeup main thread */
3020  }
3021 }
3022 
3023 static void
3025 {
3026  rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
3027 
3028  /* for time slice */
3030 
3031  /* check signal */
3033 
3034 #if 0
3035  /* prove profiler */
3036  if (vm->prove_profile.enable) {
3037  rb_thread_t *th = vm->running_thread;
3038 
3039  if (vm->during_gc) {
3040  /* GC prove profiling */
3041  }
3042  }
3043 #endif
3044 }
3045 
3046 void
3048 {
3049  if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3050  native_reset_timer_thread();
3051  }
3052 }
3053 
3054 void
3056 {
3057  native_reset_timer_thread();
3058 }
3059 
3060 void
3062 {
3063  system_working = 1;
3064  rb_thread_create_timer_thread();
3065 }
3066 
3067 static int
3069 {
3070  int i;
3071  VALUE lines = (VALUE)val;
3072 
3073  for (i = 0; i < RARRAY_LEN(lines); i++) {
3074  if (RARRAY_PTR(lines)[i] != Qnil) {
3075  RARRAY_PTR(lines)[i] = INT2FIX(0);
3076  }
3077  }
3078  return ST_CONTINUE;
3079 }
3080 
3081 static void
3083 {
3084  VALUE coverages = rb_get_coverages();
3085  if (RTEST(coverages)) {
3086  st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
3087  }
3088 }
3089 
3090 static void
3092 {
3093  rb_thread_t *th = GET_THREAD();
3094  rb_vm_t *vm = th->vm;
3095  VALUE thval = th->self;
3096  vm->main_thread = th;
3097 
3098  gvl_atfork(th->vm);
3099  st_foreach(vm->living_threads, atfork, (st_data_t)th);
3100  st_clear(vm->living_threads);
3101  st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
3102  vm->sleeper = 0;
3103  clear_coverage();
3104 }
3105 
3106 static int
3108 {
3109  VALUE thval = key;
3110  rb_thread_t *th;
3111  GetThreadPtr(thval, th);
3112 
3113  if (th != (rb_thread_t *)current_th) {
3117  }
3118  return ST_CONTINUE;
3119 }
3120 
3121 void
3123 {
3125  GET_THREAD()->join_list_head = 0;
3126 
3127  /* We don't want reproduce CVE-2003-0900. */
3129 }
3130 
3131 static int
3133 {
3134  VALUE thval = key;
3135  rb_thread_t *th;
3136  GetThreadPtr(thval, th);
3137 
3138  if (th != (rb_thread_t *)current_th) {
3140  }
3141  return ST_CONTINUE;
3142 }
3143 
3144 void
3146 {
3148 }
3149 
3150 struct thgroup {
3153 };
3154 
3155 static size_t
3156 thgroup_memsize(const void *ptr)
3157 {
3158  return ptr ? sizeof(struct thgroup) : 0;
3159 }
3160 
3162  "thgroup",
3164 };
3165 
3166 /*
3167  * Document-class: ThreadGroup
3168  *
3169  * <code>ThreadGroup</code> provides a means of keeping track of a number of
3170  * threads as a group. A <code>Thread</code> can belong to only one
3171  * <code>ThreadGroup</code> at a time; adding a thread to a new group will
3172  * remove it from any previous group.
3173  *
3174  * Newly created threads belong to the same group as the thread from which they
3175  * were created.
3176  */
3177 
3178 static VALUE
3180 {
3181  VALUE group;
3182  struct thgroup *data;
3183 
3184  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
3185  data->enclosed = 0;
3186  data->group = group;
3187 
3188  return group;
3189 }
3190 
3194 };
3195 
3196 static int
3198 {
3199  VALUE thread = (VALUE)key;
3200  VALUE ary = ((struct thgroup_list_params *)data)->ary;
3201  VALUE group = ((struct thgroup_list_params *)data)->group;
3202  rb_thread_t *th;
3203  GetThreadPtr(thread, th);
3204 
3205  if (th->thgroup == group) {
3206  rb_ary_push(ary, thread);
3207  }
3208  return ST_CONTINUE;
3209 }
3210 
3211 /*
3212  * call-seq:
3213  * thgrp.list -> array
3214  *
3215  * Returns an array of all existing <code>Thread</code> objects that belong to
3216  * this group.
3217  *
3218  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
3219  */
3220 
3221 static VALUE
3223 {
3224  VALUE ary = rb_ary_new();
3225  struct thgroup_list_params param;
3226 
3227  param.ary = ary;
3228  param.group = group;
3229  st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
3230  return ary;
3231 }
3232 
3233 
3234 /*
3235  * call-seq:
3236  * thgrp.enclose -> thgrp
3237  *
3238  * Prevents threads from being added to or removed from the receiving
3239  * <code>ThreadGroup</code>. New threads can still be started in an enclosed
3240  * <code>ThreadGroup</code>.
3241  *
3242  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
3243  * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
3244  * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
3245  * tg.add thr
3246  *
3247  * <em>produces:</em>
3248  *
3249  * ThreadError: can't move from the enclosed thread group
3250  */
3251 
3252 static VALUE
3254 {
3255  struct thgroup *data;
3256 
3257  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3258  data->enclosed = 1;
3259 
3260  return group;
3261 }
3262 
3263 
3264 /*
3265  * call-seq:
3266  * thgrp.enclosed? -> true or false
3267  *
3268  * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
3269  * ThreadGroup#enclose.
3270  */
3271 
3272 static VALUE
3274 {
3275  struct thgroup *data;
3276 
3277  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3278  if (data->enclosed)
3279  return Qtrue;
3280  return Qfalse;
3281 }
3282 
3283 
3284 /*
3285  * call-seq:
3286  * thgrp.add(thread) -> thgrp
3287  *
3288  * Adds the given <em>thread</em> to this group, removing it from any other
3289  * group to which it may have previously belonged.
3290  *
3291  * puts "Initial group is #{ThreadGroup::Default.list}"
3292  * tg = ThreadGroup.new
3293  * t1 = Thread.new { sleep }
3294  * t2 = Thread.new { sleep }
3295  * puts "t1 is #{t1}"
3296  * puts "t2 is #{t2}"
3297  * tg.add(t1)
3298  * puts "Initial group now #{ThreadGroup::Default.list}"
3299  * puts "tg group now #{tg.list}"
3300  *
3301  * <em>produces:</em>
3302  *
3303  * Initial group is #<Thread:0x401bdf4c>
3304  * t1 is #<Thread:0x401b3c90>
3305  * t2 is #<Thread:0x401b3c18>
3306  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
3307  * tg group now #<Thread:0x401b3c90>
3308  */
3309 
3310 static VALUE
3312 {
3313  rb_thread_t *th;
3314  struct thgroup *data;
3315 
3316  rb_secure(4);
3317  GetThreadPtr(thread, th);
3318 
3319  if (OBJ_FROZEN(group)) {
3320  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
3321  }
3322  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3323  if (data->enclosed) {
3324  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
3325  }
3326 
3327  if (!th->thgroup) {
3328  return Qnil;
3329  }
3330 
3331  if (OBJ_FROZEN(th->thgroup)) {
3332  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
3333  }
3335  if (data->enclosed) {
3337  "can't move from the enclosed thread group");
3338  }
3339 
3340  th->thgroup = group;
3341  return group;
3342 }
3343 
3344 
3345 /*
3346  * Document-class: Mutex
3347  *
3348  * Mutex implements a simple semaphore that can be used to coordinate access to
3349  * shared data from multiple concurrent threads.
3350  *
3351  * Example:
3352  *
3353  * require 'thread'
3354  * semaphore = Mutex.new
3355  *
3356  * a = Thread.new {
3357  * semaphore.synchronize {
3358  * # access shared resource
3359  * }
3360  * }
3361  *
3362  * b = Thread.new {
3363  * semaphore.synchronize {
3364  * # access shared resource
3365  * }
3366  * }
3367  *
3368  */
3369 
3370 #define GetMutexPtr(obj, tobj) \
3371  TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
3372 
3373 #define mutex_mark NULL
3374 
3375 static void
3376 mutex_free(void *ptr)
3377 {
3378  if (ptr) {
3379  rb_mutex_t *mutex = ptr;
3380  if (mutex->th) {
3381  /* rb_warn("free locked mutex"); */
3382  const char *err = rb_mutex_unlock_th(mutex, mutex->th);
3383  if (err) rb_bug("%s", err);
3384  }
3385  native_mutex_destroy(&mutex->lock);
3386  native_cond_destroy(&mutex->cond);
3387  }
3388  ruby_xfree(ptr);
3389 }
3390 
3391 static size_t
3392 mutex_memsize(const void *ptr)
3393 {
3394  return ptr ? sizeof(rb_mutex_t) : 0;
3395 }
3396 
3398  "mutex",
3400 };
3401 
3402 VALUE
3404 {
3405  if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
3406  return Qtrue;
3407  }
3408  else {
3409  return Qfalse;
3410  }
3411 }
3412 
3413 static VALUE
3415 {
3416  VALUE volatile obj;
3417  rb_mutex_t *mutex;
3418 
3419  obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
3420  native_mutex_initialize(&mutex->lock);
3421  native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
3422  return obj;
3423 }
3424 
3425 /*
3426  * call-seq:
3427  * Mutex.new -> mutex
3428  *
3429  * Creates a new Mutex
3430  */
3431 static VALUE
3433 {
3434  return self;
3435 }
3436 
3437 VALUE
3439 {
3440  return mutex_alloc(rb_cMutex);
3441 }
3442 
3443 /*
3444  * call-seq:
3445  * mutex.locked? -> true or false
3446  *
3447  * Returns +true+ if this lock is currently held by some thread.
3448  */
3449 VALUE
3451 {
3452  rb_mutex_t *mutex;
3453  GetMutexPtr(self, mutex);
3454  return mutex->th ? Qtrue : Qfalse;
3455 }
3456 
3457 static void
3459 {
3460  rb_mutex_t *mutex;
3461  GetMutexPtr(self, mutex);
3462 
3463  if (th->keeping_mutexes) {
3464  mutex->next_mutex = th->keeping_mutexes;
3465  }
3466  th->keeping_mutexes = mutex;
3467 }
3468 
3469 /*
3470  * call-seq:
3471  * mutex.try_lock -> true or false
3472  *
3473  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
3474  * lock was granted.
3475  */
3476 VALUE
3478 {
3479  rb_mutex_t *mutex;
3480  VALUE locked = Qfalse;
3481  GetMutexPtr(self, mutex);
3482 
3483  native_mutex_lock(&mutex->lock);
3484  if (mutex->th == 0) {
3485  mutex->th = GET_THREAD();
3486  locked = Qtrue;
3487 
3488  mutex_locked(GET_THREAD(), self);
3489  }
3490  native_mutex_unlock(&mutex->lock);
3491 
3492  return locked;
3493 }
3494 
3495 static int
3496 lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
3497 {
3498  int interrupted = 0;
3499  int err = 0;
3500 
3501  mutex->cond_waiting++;
3502  for (;;) {
3503  if (!mutex->th) {
3504  mutex->th = th;
3505  break;
3506  }
3507  if (RUBY_VM_INTERRUPTED(th)) {
3508  interrupted = 1;
3509  break;
3510  }
3511  if (err == ETIMEDOUT) {
3512  interrupted = 2;
3513  break;
3514  }
3515 
3516  if (timeout_ms) {
3517  struct timespec timeout_rel;
3518  struct timespec timeout;
3519 
3520  timeout_rel.tv_sec = 0;
3521  timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
3522  timeout = native_cond_timeout(&mutex->cond, timeout_rel);
3523  err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
3524  }
3525  else {
3526  native_cond_wait(&mutex->cond, &mutex->lock);
3527  err = 0;
3528  }
3529  }
3530  mutex->cond_waiting--;
3531 
3532  return interrupted;
3533 }
3534 
3535 static void
3536 lock_interrupt(void *ptr)
3537 {
3538  rb_mutex_t *mutex = (rb_mutex_t *)ptr;
3539  native_mutex_lock(&mutex->lock);
3540  if (mutex->cond_waiting > 0)
3541  native_cond_broadcast(&mutex->cond);
3542  native_mutex_unlock(&mutex->lock);
3543 }
3544 
3545 /*
3546  * At maximum, only one thread can use cond_timedwait and watch deadlock
3547  * periodically. Multiple polling thread (i.e. concurrent deadlock check)
3548  * introduces new race conditions. [Bug #6278] [ruby-core:44275]
3549  */
3551 
3552 /*
3553  * call-seq:
3554  * mutex.lock -> self
3555  *
3556  * Attempts to grab the lock and waits if it isn't available.
3557  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
3558  */
3559 VALUE
3561 {
3562 
3563  if (rb_mutex_trylock(self) == Qfalse) {
3564  rb_mutex_t *mutex;
3565  rb_thread_t *th = GET_THREAD();
3566  GetMutexPtr(self, mutex);
3567 
3568  if (mutex->th == GET_THREAD()) {
3569  rb_raise(rb_eThreadError, "deadlock; recursive locking");
3570  }
3571 
3572  while (mutex->th != th) {
3573  int interrupted;
3574  enum rb_thread_status prev_status = th->status;
3575  int timeout_ms = 0;
3576  struct rb_unblock_callback oldubf;
3577 
3578  set_unblock_function(th, lock_interrupt, mutex, &oldubf);
3580  th->locking_mutex = self;
3581 
3582  native_mutex_lock(&mutex->lock);
3583  th->vm->sleeper++;
3584  /*
3585  * Carefully! while some contended threads are in lock_func(),
3586  * vm->sleepr is unstable value. we have to avoid both deadlock
3587  * and busy loop.
3588  */
3589  if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
3590  !patrol_thread) {
3591  timeout_ms = 100;
3592  patrol_thread = th;
3593  }
3594 
3595  GVL_UNLOCK_BEGIN();
3596  interrupted = lock_func(th, mutex, timeout_ms);
3597  native_mutex_unlock(&mutex->lock);
3598  GVL_UNLOCK_END();
3599 
3600  if (patrol_thread == th)
3601  patrol_thread = NULL;
3602 
3603  reset_unblock_function(th, &oldubf);
3604 
3605  th->locking_mutex = Qfalse;
3606  if (mutex->th && interrupted == 2) {
3607  rb_check_deadlock(th->vm);
3608  }
3609  if (th->status == THREAD_STOPPED_FOREVER) {
3610  th->status = prev_status;
3611  }
3612  th->vm->sleeper--;
3613 
3614  if (mutex->th == th) mutex_locked(th, self);
3615 
3616  if (interrupted) {
3618  }
3619  }
3620  }
3621  return self;
3622 }
3623 
3624 static const char *
3626 {
3627  const char *err = NULL;
3628  rb_mutex_t *th_mutex;
3629 
3630  native_mutex_lock(&mutex->lock);
3631 
3632  if (mutex->th == 0) {
3633  err = "Attempt to unlock a mutex which is not locked";
3634  }
3635  else if (mutex->th != th) {
3636  err = "Attempt to unlock a mutex which is locked by another thread";
3637  }
3638  else {
3639  mutex->th = 0;
3640  if (mutex->cond_waiting > 0)
3641  native_cond_signal(&mutex->cond);
3642  }
3643 
3644  native_mutex_unlock(&mutex->lock);
3645 
3646  if (!err) {
3647  th_mutex = th->keeping_mutexes;
3648  if (th_mutex == mutex) {
3649  th->keeping_mutexes = mutex->next_mutex;
3650  }
3651  else {
3652  while (1) {
3653  rb_mutex_t *tmp_mutex;
3654  tmp_mutex = th_mutex->next_mutex;
3655  if (tmp_mutex == mutex) {
3656  th_mutex->next_mutex = tmp_mutex->next_mutex;
3657  break;
3658  }
3659  th_mutex = tmp_mutex;
3660  }
3661  }
3662  mutex->next_mutex = NULL;
3663  }
3664 
3665  return err;
3666 }
3667 
3668 /*
3669  * call-seq:
3670  * mutex.unlock -> self
3671  *
3672  * Releases the lock.
3673  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
3674  */
3675 VALUE
3677 {
3678  const char *err;
3679  rb_mutex_t *mutex;
3680  GetMutexPtr(self, mutex);
3681 
3682  err = rb_mutex_unlock_th(mutex, GET_THREAD());
3683  if (err) rb_raise(rb_eThreadError, "%s", err);
3684 
3685  return self;
3686 }
3687 
3688 static void
3690 {
3691  if (th->keeping_mutexes) {
3693  }
3694  th->keeping_mutexes = NULL;
3695 }
3696 
3697 static void
3699 {
3700  rb_mutex_t *mutex;
3701 
3702  if (!th->locking_mutex) return;
3703 
3704  GetMutexPtr(th->locking_mutex, mutex);
3705  if (mutex->th == th)
3706  rb_mutex_abandon_all(mutex);
3707  th->locking_mutex = Qfalse;
3708 }
3709 
3710 static void
3712 {
3713  rb_mutex_t *mutex;
3714 
3715  while (mutexes) {
3716  mutex = mutexes;
3717  mutexes = mutex->next_mutex;
3718  mutex->th = 0;
3719  mutex->next_mutex = 0;
3720  }
3721 }
3722 
3723 static VALUE
3725 {
3727  return Qnil;
3728 }
3729 
3730 static VALUE
3732 {
3733  const struct timeval *t = (struct timeval *)time;
3734  rb_thread_wait_for(*t);
3735  return Qnil;
3736 }
3737 
3738 VALUE
3740 {
3741  time_t beg, end;
3742  struct timeval t;
3743 
3744  if (!NIL_P(timeout)) {
3745  t = rb_time_interval(timeout);
3746  }
3747  rb_mutex_unlock(self);
3748  beg = time(0);
3749  if (NIL_P(timeout)) {
3751  }
3752  else {
3754  }
3755  end = time(0) - beg;
3756  return INT2FIX(end);
3757 }
3758 
3759 /*
3760  * call-seq:
3761  * mutex.sleep(timeout = nil) -> number
3762  *
3763  * Releases the lock and sleeps +timeout+ seconds if it is given and
3764  * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
3765  * the current thread.
3766  */
3767 static VALUE
3769 {
3770  VALUE timeout;
3771 
3772  rb_scan_args(argc, argv, "01", &timeout);
3773  return rb_mutex_sleep(self, timeout);
3774 }
3775 
3776 /*
3777  * call-seq:
3778  * mutex.synchronize { ... } -> result of the block
3779  *
3780  * Obtains a lock, runs the block, and releases the lock when the block
3781  * completes. See the example under +Mutex+.
3782  */
3783 
3784 VALUE
3786 {
3787  rb_mutex_lock(mutex);
3788  return rb_ensure(func, arg, rb_mutex_unlock, mutex);
3789 }
3790 
3791 /*
3792  * Document-class: Barrier
3793  */
3794 static void
3795 barrier_mark(void *ptr)
3796 {
3797  rb_gc_mark((VALUE)ptr);
3798 }
3799 
3801  "barrier",
3802  {barrier_mark, 0, 0,},
3803 };
3804 
3805 static VALUE
3807 {
3808  return TypedData_Wrap_Struct(klass, &barrier_data_type, (void *)mutex_alloc(0));
3809 }
3810 
3811 #define GetBarrierPtr(obj) ((VALUE)rb_check_typeddata((obj), &barrier_data_type))
3812 
3813 VALUE
3815 {
3816  VALUE barrier = barrier_alloc(rb_cBarrier);
3817  rb_mutex_lock((VALUE)DATA_PTR(barrier));
3818  return barrier;
3819 }
3820 
3821 VALUE
3823 {
3824  VALUE mutex = GetBarrierPtr(self);
3825  rb_mutex_t *m;
3826 
3827  if (!mutex) return Qfalse;
3828  GetMutexPtr(mutex, m);
3829  if (m->th == GET_THREAD()) return Qfalse;
3830  rb_mutex_lock(mutex);
3831  if (DATA_PTR(self)) return Qtrue;
3832  rb_mutex_unlock(mutex);
3833  return Qfalse;
3834 }
3835 
3836 VALUE
3838 {
3839  return rb_mutex_unlock(GetBarrierPtr(self));
3840 }
3841 
3842 VALUE
3844 {
3845  VALUE mutex = GetBarrierPtr(self);
3846  DATA_PTR(self) = 0;
3847  return rb_mutex_unlock(mutex);
3848 }
3849 
3850 /* variables for recursive traversals */
3852 
3853 /*
3854  * Returns the current "recursive list" used to detect recursion.
3855  * This list is a hash table, unique for the current thread and for
3856  * the current __callee__.
3857  */
3858 
3859 static VALUE
3861 {
3862  volatile VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
3864  VALUE list;
3865  if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3866  hash = rb_hash_new();
3867  OBJ_UNTRUST(hash);
3868  rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
3869  list = Qnil;
3870  }
3871  else {
3872  list = rb_hash_aref(hash, sym);
3873  }
3874  if (NIL_P(list) || TYPE(list) != T_HASH) {
3875  list = rb_hash_new();
3876  OBJ_UNTRUST(list);
3877  rb_hash_aset(hash, sym, list);
3878  }
3879  return list;
3880 }
3881 
3882 /*
3883  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
3884  * in the recursion list.
3885  * Assumes the recursion list is valid.
3886  */
3887 
3888 static VALUE
3889 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
3890 {
3891 #if SIZEOF_LONG == SIZEOF_VOIDP
3892  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
3893 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3894  #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
3895  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
3896 #endif
3897 
3898  VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
3899  if (pair_list == Qundef)
3900  return Qfalse;
3901  if (paired_obj_id) {
3902  if (TYPE(pair_list) != T_HASH) {
3903  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
3904  return Qfalse;
3905  }
3906  else {
3907  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
3908  return Qfalse;
3909  }
3910  }
3911  return Qtrue;
3912 }
3913 
3914 /*
3915  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
3916  * For a single obj_id, it sets list[obj_id] to Qtrue.
3917  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
3918  * otherwise list[obj_id] becomes a hash like:
3919  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
3920  * Assumes the recursion list is valid.
3921  */
3922 
3923 static void
3925 {
3926  VALUE pair_list;
3927 
3928  if (!paired_obj) {
3929  rb_hash_aset(list, obj, Qtrue);
3930  }
3931  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
3932  rb_hash_aset(list, obj, paired_obj);
3933  }
3934  else {
3935  if (TYPE(pair_list) != T_HASH){
3936  VALUE other_paired_obj = pair_list;
3937  pair_list = rb_hash_new();
3938  OBJ_UNTRUST(pair_list);
3939  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
3940  rb_hash_aset(list, obj, pair_list);
3941  }
3942  rb_hash_aset(pair_list, paired_obj, Qtrue);
3943  }
3944 }
3945 
3946 /*
3947  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
3948  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
3949  * removed from the hash and no attempt is made to simplify
3950  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
3951  * Assumes the recursion list is valid.
3952  */
3953 
3954 static void
3956 {
3957  if (paired_obj) {
3958  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
3959  if (pair_list == Qundef) {
3960  VALUE symname = rb_inspect(ID2SYM(rb_frame_this_func()));
3961  VALUE thrname = rb_inspect(rb_thread_current());
3962  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list for %s in %s",
3963  StringValuePtr(symname), StringValuePtr(thrname));
3964  }
3965  if (TYPE(pair_list) == T_HASH) {
3966  rb_hash_delete(pair_list, paired_obj);
3967  if (!RHASH_EMPTY_P(pair_list)) {
3968  return; /* keep hash until is empty */
3969  }
3970  }
3971  }
3972  rb_hash_delete(list, obj);
3973 }
3974 
3976  VALUE (*func) (VALUE, VALUE, int);
3982 };
3983 
3984 static VALUE
3986 {
3987  VALUE result = Qundef;
3988  int state;
3989 
3990  recursive_push(p->list, p->objid, p->pairid);
3991  PUSH_TAG();
3992  if ((state = EXEC_TAG()) == 0) {
3993  result = (*p->func)(p->obj, p->arg, FALSE);
3994  }
3995  POP_TAG();
3996  recursive_pop(p->list, p->objid, p->pairid);
3997  if (state)
3998  JUMP_TAG(state);
3999  return result;
4000 }
4001 
4002 /*
4003  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4004  * current method is called recursively on obj, or on the pair <obj, pairid>
4005  * If outer is 0, then the innermost func will be called with recursive set
4006  * to Qtrue, otherwise the outermost func will be called. In the latter case,
4007  * all inner func are short-circuited by throw.
4008  * Implementation details: the value thrown is the recursive list which is
4009  * proper to the current method and unlikely to be catched anywhere else.
4010  * list[recursive_key] is used as a flag for the outermost call.
4011  */
4012 
4013 static VALUE
4014 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
4015 {
4016  VALUE result = Qundef;
4017  struct exec_recursive_params p;
4018  int outermost;
4020  p.objid = rb_obj_id(obj);
4021  p.obj = obj;
4022  p.pairid = pairid;
4023  p.arg = arg;
4024  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
4025 
4026  if (recursive_check(p.list, p.objid, pairid)) {
4027  if (outer && !outermost) {
4028  rb_throw_obj(p.list, p.list);
4029  }
4030  return (*func)(obj, arg, TRUE);
4031  }
4032  else {
4033  p.func = func;
4034 
4035  if (outermost) {
4036  recursive_push(p.list, ID2SYM(recursive_key), 0);
4037  result = rb_catch_obj(p.list, exec_recursive_i, (VALUE)&p);
4038  recursive_pop(p.list, ID2SYM(recursive_key), 0);
4039  if (result == p.list) {
4040  result = (*func)(obj, arg, TRUE);
4041  }
4042  }
4043  else {
4044  result = exec_recursive_i(0, &p);
4045  }
4046  }
4047  *(volatile struct exec_recursive_params *)&p;
4048  return result;
4049 }
4050 
4051 /*
4052  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4053  * current method is called recursively on obj
4054  */
4055 
4056 VALUE
4058 {
4059  return exec_recursive(func, obj, 0, arg, 0);
4060 }
4061 
4062 /*
4063  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4064  * current method is called recursively on the ordered pair <obj, paired_obj>
4065  */
4066 
4067 VALUE
4069 {
4070  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
4071 }
4072 
4073 /*
4074  * If recursion is detected on the current method and obj, the outermost
4075  * func will be called with (obj, arg, Qtrue). All inner func will be
4076  * short-circuited using throw.
4077  */
4078 
4079 VALUE
4081 {
4082  return exec_recursive(func, obj, 0, arg, 1);
4083 }
4084 
4085 /* tracer */
4086 #define RUBY_EVENT_REMOVED 0x1000000
4087 
4088 enum {
4094 };
4095 
4096 static VALUE thread_suppress_tracing(rb_thread_t *th, int ev, VALUE (*func)(VALUE, int), VALUE arg, int always, int pop_p);
4097 
4101  VALUE self;
4105 };
4106 
4107 static rb_event_hook_t *
4109 {
4111  hook->func = func;
4112  hook->flag = events;
4113  hook->data = data;
4114  return hook;
4115 }
4116 
4117 static void
4119 {
4120  rb_event_hook_t *hook = th->event_hooks;
4122 
4123  while (hook) {
4124  if (!(flag & RUBY_EVENT_REMOVED))
4125  flag |= hook->flag;
4126  hook = hook->next;
4127  }
4128  th->event_flags = flag;
4129 }
4130 
4131 static void
4134 {
4135  rb_event_hook_t *hook = alloc_event_hook(func, events, data);
4136  hook->next = th->event_hooks;
4137  th->event_hooks = hook;
4139 }
4140 
4141 static rb_thread_t *
4143 {
4144  rb_thread_t *th;
4145  GetThreadPtr(thval, th);
4146  return th;
4147 }
4148 
4149 void
4152 {
4153  rb_threadptr_add_event_hook(thval2thread_t(thval), func, events, data);
4154 }
4155 
4156 static int
4158 {
4159  VALUE thval = key;
4160  rb_thread_t *th;
4161  GetThreadPtr(thval, th);
4162 
4163  if (flag) {
4164  th->event_flags |= RUBY_EVENT_VM;
4165  }
4166  else {
4167  th->event_flags &= (~RUBY_EVENT_VM);
4168  }
4169  return ST_CONTINUE;
4170 }
4171 
4172 static void
4174 {
4175  st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
4176 }
4177 
4178 static inline int
4179 exec_event_hooks(const rb_event_hook_t *hook, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
4180 {
4181  int removed = 0;
4182  for (; hook; hook = hook->next) {
4183  if (hook->flag & RUBY_EVENT_REMOVED) {
4184  removed++;
4185  continue;
4186  }
4187  if (flag & hook->flag) {
4188  (*hook->func)(flag, hook->data, self, id, klass);
4189  }
4190  }
4191  return removed;
4192 }
4193 
4194 static int remove_defered_event_hook(rb_event_hook_t **root);
4195 
4196 static VALUE
4197 thread_exec_event_hooks(VALUE args, int running)
4198 {
4199  struct event_call_args *argp = (struct event_call_args *)args;
4200  rb_thread_t *th = argp->th;
4201  rb_event_flag_t flag = argp->event;
4202  VALUE self = argp->self;
4203  ID id = argp->id;
4204  VALUE klass = argp->klass;
4205  const rb_event_flag_t wait_event = th->event_flags;
4206  int removed;
4207 
4208  if (self == rb_mRubyVMFrozenCore) return 0;
4209 
4210  if ((wait_event & flag) && !(running & EVENT_RUNNING_THREAD)) {
4212  removed = exec_event_hooks(th->event_hooks, flag, self, id, klass);
4213  th->tracing &= ~EVENT_RUNNING_THREAD;
4214  if (removed) {
4216  }
4217  }
4218  if (wait_event & RUBY_EVENT_VM) {
4219  if (th->vm->event_hooks == NULL) {
4220  th->event_flags &= (~RUBY_EVENT_VM);
4221  }
4222  else if (!(running & EVENT_RUNNING_VM)) {
4223  th->tracing |= EVENT_RUNNING_VM;
4224  removed = exec_event_hooks(th->vm->event_hooks, flag, self, id, klass);
4225  th->tracing &= ~EVENT_RUNNING_VM;
4226  if (removed) {
4228  }
4229  }
4230  }
4231  return 0;
4232 }
4233 
4234 void
4236 {
4237  const VALUE errinfo = th->errinfo;
4238  struct event_call_args args;
4239  args.th = th;
4240  args.event = flag;
4241  args.self = self;
4242  args.id = id;
4243  args.klass = klass;
4244  args.proc = 0;
4246  th->errinfo = errinfo;
4247 }
4248 
4249 void
4251 {
4252  rb_event_hook_t *hook = alloc_event_hook(func, events, data);
4253  rb_vm_t *vm = GET_VM();
4254 
4255  hook->next = vm->event_hooks;
4256  vm->event_hooks = hook;
4257 
4259 }
4260 
4261 static int
4263 {
4264  while (hook) {
4265  if (func == 0 || hook->func == func) {
4266  hook->flag |= RUBY_EVENT_REMOVED;
4267  }
4268  hook = hook->next;
4269  }
4270  return -1;
4271 }
4272 
4273 static int
4275 {
4276  rb_event_hook_t *hook = *root, *next;
4277 
4278  while (hook) {
4279  next = hook->next;
4280  if (func == 0 || hook->func == func || (hook->flag & RUBY_EVENT_REMOVED)) {
4281  *root = next;
4282  xfree(hook);
4283  }
4284  else {
4285  root = &hook->next;
4286  }
4287  hook = next;
4288  }
4289  return -1;
4290 }
4291 
4292 static int
4294 {
4295  rb_event_hook_t *hook = *root, *next;
4296 
4297  while (hook) {
4298  next = hook->next;
4299  if (hook->flag & RUBY_EVENT_REMOVED) {
4300  *root = next;
4301  xfree(hook);
4302  }
4303  else {
4304  root = &hook->next;
4305  }
4306  hook = next;
4307  }
4308  return -1;
4309 }
4310 
4311 static int
4313 {
4314  int ret;
4315  if (th->tracing & EVENT_RUNNING_THREAD) {
4316  ret = defer_remove_event_hook(th->event_hooks, func);
4317  }
4318  else {
4319  ret = remove_event_hook(&th->event_hooks, func);
4320  }
4322  return ret;
4323 }
4324 
4325 int
4327 {
4328  return rb_threadptr_remove_event_hook(thval2thread_t(thval), func);
4329 }
4330 
4331 static rb_event_hook_t *
4333 {
4334  while (hook) {
4335  if (!(hook->flag & RUBY_EVENT_REMOVED))
4336  return hook;
4337  hook = hook->next;
4338  }
4339  return NULL;
4340 }
4341 
4342 static int
4344 {
4345  rb_thread_t *th = thval2thread_t((VALUE)key);
4346  if (!(th->tracing & EVENT_RUNNING_VM)) return ST_CONTINUE;
4347  *(rb_thread_t **)data = th;
4348  return ST_STOP;
4349 }
4350 
4351 static rb_thread_t *
4353 {
4354  rb_thread_t *found = NULL;
4356  return found;
4357 }
4358 
4359 int
4361 {
4362  rb_vm_t *vm = GET_VM();
4364  int ret;
4365 
4367  ret = defer_remove_event_hook(vm->event_hooks, func);
4368  }
4369  else {
4370  ret = remove_event_hook(&vm->event_hooks, func);
4371  }
4372 
4373  if (hook && !search_live_hook(vm->event_hooks)) {
4375  }
4376 
4377  return ret;
4378 }
4379 
4380 static int
4382 {
4383  rb_thread_t *th;
4384  GetThreadPtr((VALUE)key, th);
4386  return ST_CONTINUE;
4387 }
4388 
4389 void
4391 {
4392  st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
4394 }
4395 
4396 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
4397 
4398 /*
4399  * If recursion is detected on the current method, obj and paired_obj,
4400  * the outermost func will be called with (obj, arg, Qtrue). All inner
4401  * func will be short-circuited using throw.
4402  */
4403 
4404 VALUE
4406 {
4407  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 1);
4408 }
4409 
4410 /*
4411  * call-seq:
4412  * set_trace_func(proc) -> proc
4413  * set_trace_func(nil) -> nil
4414  *
4415  * Establishes _proc_ as the handler for tracing, or disables
4416  * tracing if the parameter is +nil+. _proc_ takes up
4417  * to six parameters: an event name, a filename, a line number, an
4418  * object id, a binding, and the name of a class. _proc_ is
4419  * invoked whenever an event occurs. Events are: <code>c-call</code>
4420  * (call a C-language routine), <code>c-return</code> (return from a
4421  * C-language routine), <code>call</code> (call a Ruby method),
4422  * <code>class</code> (start a class or module definition),
4423  * <code>end</code> (finish a class or module definition),
4424  * <code>line</code> (execute code on a new line), <code>raise</code>
4425  * (raise an exception), and <code>return</code> (return from a Ruby
4426  * method). Tracing is disabled within the context of _proc_.
4427  *
4428  * class Test
4429  * def test
4430  * a = 1
4431  * b = 2
4432  * end
4433  * end
4434  *
4435  * set_trace_func proc { |event, file, line, id, binding, classname|
4436  * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
4437  * }
4438  * t = Test.new
4439  * t.test
4440  *
4441  * line prog.rb:11 false
4442  * c-call prog.rb:11 new Class
4443  * c-call prog.rb:11 initialize Object
4444  * c-return prog.rb:11 initialize Object
4445  * c-return prog.rb:11 new Class
4446  * line prog.rb:12 false
4447  * call prog.rb:2 test Test
4448  * line prog.rb:3 test Test
4449  * line prog.rb:4 test Test
4450  * return prog.rb:4 test Test
4451  */
4452 
4453 static VALUE
4455 {
4457 
4458  if (NIL_P(trace)) {
4459  GET_THREAD()->tracing = EVENT_RUNNING_NOTHING;
4460  return Qnil;
4461  }
4462 
4463  if (!rb_obj_is_proc(trace)) {
4464  rb_raise(rb_eTypeError, "trace_func needs to be Proc");
4465  }
4466 
4468  return trace;
4469 }
4470 
4471 static void
4473 {
4474  if (!rb_obj_is_proc(trace)) {
4475  rb_raise(rb_eTypeError, "trace_func needs to be Proc");
4476  }
4477 
4479 }
4480 
4481 /*
4482  * call-seq:
4483  * thr.add_trace_func(proc) -> proc
4484  *
4485  * Adds _proc_ as a handler for tracing.
4486  * See <code>Thread#set_trace_func</code> and +set_trace_func+.
4487  */
4488 
4489 static VALUE
4491 {
4492  rb_thread_t *th;
4493  GetThreadPtr(obj, th);
4494  thread_add_trace_func(th, trace);
4495  return trace;
4496 }
4497 
4498 /*
4499  * call-seq:
4500  * thr.set_trace_func(proc) -> proc
4501  * thr.set_trace_func(nil) -> nil
4502  *
4503  * Establishes _proc_ on _thr_ as the handler for tracing, or
4504  * disables tracing if the parameter is +nil+.
4505  * See +set_trace_func+.
4506  */
4507 
4508 static VALUE
4510 {
4511  rb_thread_t *th;
4512  GetThreadPtr(obj, th);
4514 
4515  if (NIL_P(trace)) {
4517  return Qnil;
4518  }
4519  thread_add_trace_func(th, trace);
4520  return trace;
4521 }
4522 
4523 static const char *
4525 {
4526  switch (event) {
4527  case RUBY_EVENT_LINE:
4528  return "line";
4529  case RUBY_EVENT_CLASS:
4530  return "class";
4531  case RUBY_EVENT_END:
4532  return "end";
4533  case RUBY_EVENT_CALL:
4534  return "call";
4535  case RUBY_EVENT_RETURN:
4536  return "return";
4537  case RUBY_EVENT_C_CALL:
4538  return "c-call";
4539  case RUBY_EVENT_C_RETURN:
4540  return "c-return";
4541  case RUBY_EVENT_RAISE:
4542  return "raise";
4543  default:
4544  return "unknown";
4545  }
4546 }
4547 
4548 static VALUE
4549 call_trace_proc(VALUE args, int tracing)
4550 {
4551  struct event_call_args *p = (struct event_call_args *)args;
4552  const char *srcfile = rb_sourcefile();
4553  VALUE eventname = rb_str_new2(get_event_name(p->event));
4554  VALUE filename = srcfile ? rb_str_new2(srcfile) : Qnil;
4555  VALUE argv[6];
4556  int line = rb_sourceline();
4557  ID id = 0;
4558  VALUE klass = 0;
4559 
4560  if (p->klass != 0) {
4561  id = p->id;
4562  klass = p->klass;
4563  }
4564  else {
4565  rb_thread_method_id_and_class(p->th, &id, &klass);
4566  }
4567  if (id == ID_ALLOCATOR)
4568  return Qnil;
4569  if (klass) {
4570  if (TYPE(klass) == T_ICLASS) {
4571  klass = RBASIC(klass)->klass;
4572  }
4573  else if (FL_TEST(klass, FL_SINGLETON)) {
4574  klass = rb_iv_get(klass, "__attached__");
4575  }
4576  }
4577 
4578  argv[0] = eventname;
4579  argv[1] = filename;
4580  argv[2] = INT2FIX(line);
4581  argv[3] = id ? ID2SYM(id) : Qnil;
4582  argv[4] = (p->self && srcfile) ? rb_binding_new() : Qnil;
4583  argv[5] = klass ? klass : Qnil;
4584 
4585  return rb_proc_call_with_block(p->proc, 6, argv, Qnil);
4586 }
4587 
4588 static void
4590 {
4591  struct event_call_args args;
4592 
4593  args.th = GET_THREAD();
4594  args.event = event;
4595  args.proc = proc;
4596  args.self = self;
4597  args.id = id;
4598  args.klass = klass;
4600 }
4601 
4602 VALUE
4604 {
4605  rb_thread_t *th = GET_THREAD();
4606  return thread_suppress_tracing(th, EVENT_RUNNING_TRACE, func, arg, always, 0);
4607 }
4608 
4609 static VALUE
4610 thread_suppress_tracing(rb_thread_t *th, int ev, VALUE (*func)(VALUE, int), VALUE arg, int always, int pop_p)
4611 {
4612  int state, tracing = th->tracing, running = tracing & ev;
4613  volatile int raised;
4614  volatile int outer_state;
4615  VALUE result = Qnil;
4616 
4617  if (running == ev && !always) {
4618  return Qnil;
4619  }
4620  else {
4621  th->tracing |= ev;
4622  }
4623 
4624  raised = rb_threadptr_reset_raised(th);
4625  outer_state = th->state;
4626  th->state = 0;
4627 
4628  PUSH_TAG();
4629  if ((state = EXEC_TAG()) == 0) {
4630  result = (*func)(arg, running);
4631  }
4632 
4633  if (raised) {
4635  }
4636  POP_TAG();
4637 
4638  th->tracing = tracing;
4639  if (state) {
4640  if (pop_p) {
4642  }
4643  JUMP_TAG(state);
4644  }
4645  th->state = outer_state;
4646 
4647  return result;
4648 }
4649 
4650 /*
4651  * call-seq:
4652  * thr.backtrace -> array
4653  *
4654  * Returns the current back trace of the _thr_.
4655  */
4656 
4657 static VALUE
4659 {
4660  return rb_thread_backtrace(thval);
4661 }
4662 
4663 /*
4664  * Document-class: ThreadError
4665  *
4666  * Raised when an invalid operation is attempted on a thread.
4667  *
4668  * For example, when no other thread has been started:
4669  *
4670  * Thread.stop
4671  *
4672  * <em>raises the exception:</em>
4673  *
4674  * ThreadError: stopping only thread
4675  */
4676 
4677 /*
4678  * +Thread+ encapsulates the behavior of a thread of
4679  * execution, including the main thread of the Ruby script.
4680  *
4681  * In the descriptions of the methods in this class, the parameter _sym_
4682  * refers to a symbol, which is either a quoted string or a
4683  * +Symbol+ (such as <code>:name</code>).
4684  */
4685 
4686 void
4688 {
4689 #undef rb_intern
4690 #define rb_intern(str) rb_intern_const(str)
4691 
4692  VALUE cThGroup;
4693  rb_thread_t *th = GET_THREAD();
4694 
4705  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
4707 #if THREAD_DEBUG < 0
4708  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
4709  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
4710 #endif
4711 
4712  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
4717  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
4730  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
4731  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
4735 
4737 
4738  closed_stream_error = rb_exc_new2(rb_eIOError, "stream closed");
4741 
4742  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
4744  rb_define_method(cThGroup, "list", thgroup_list, 0);
4745  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
4746  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
4747  rb_define_method(cThGroup, "add", thgroup_add, 1);
4748 
4749  {
4750  th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
4751  rb_define_const(cThGroup, "Default", th->thgroup);
4752  }
4753 
4754  rb_cMutex = rb_define_class("Mutex", rb_cObject);
4756  rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
4758  rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
4761  rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
4762 
4763  recursive_key = rb_intern("__recursive_key__");
4765 
4766  /* trace */
4767  rb_define_global_function("set_trace_func", set_trace_func, 1);
4768  rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
4769  rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
4770 
4771  /* init thread core */
4772  {
4773  /* main thread setting */
4774  {
4775  /* acquire global vm lock */
4776  gvl_init(th->vm);
4777  gvl_acquire(th->vm, th);
4778  native_mutex_initialize(&th->interrupt_lock);
4779  }
4780  }
4781 
4782  rb_thread_create_timer_thread();
4783 
4784  /* suppress warnings on cygwin, mingw and mswin.*/
4785  (void)native_mutex_trylock;
4786 }
4787 
4788 int
4790 {
4791  rb_thread_t *th = ruby_thread_from_native();
4792 
4793  return th != 0;
4794 }
4795 
4796 static int
4798 {
4799  VALUE thval = key;
4800  rb_thread_t *th;
4801  GetThreadPtr(thval, th);
4802 
4804  *found = 1;
4805  }
4806  else if (th->locking_mutex) {
4807  rb_mutex_t *mutex;
4808  GetMutexPtr(th->locking_mutex, mutex);
4809 
4810  native_mutex_lock(&mutex->lock);
4811  if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
4812  *found = 1;
4813  }
4814  native_mutex_unlock(&mutex->lock);
4815  }
4816 
4817  return (*found) ? ST_STOP : ST_CONTINUE;
4818 }
4819 
4820 #ifdef DEBUG_DEADLOCK_CHECK
4821 static int
4822 debug_i(st_data_t key, st_data_t val, int *found)
4823 {
4824  VALUE thval = key;
4825  rb_thread_t *th;
4826  GetThreadPtr(thval, th);
4827 
4828  printf("th:%p %d %d", th, th->status, th->interrupt_flag);
4829  if (th->locking_mutex) {
4830  rb_mutex_t *mutex;
4831  GetMutexPtr(th->locking_mutex, mutex);
4832 
4833  native_mutex_lock(&mutex->lock);
4834  printf(" %p %d\n", mutex->th, mutex->cond_waiting);
4835  native_mutex_unlock(&mutex->lock);
4836  }
4837  else
4838  puts("");
4839 
4840  return ST_CONTINUE;
4841 }
4842 #endif
4843 
4844 static void
4846 {
4847  int found = 0;
4848 
4849  if (vm_living_thread_num(vm) > vm->sleeper) return;
4850  if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
4851  if (patrol_thread && patrol_thread != GET_THREAD()) return;
4852 
4854 
4855  if (!found) {
4856  VALUE argv[2];
4857  argv[0] = rb_eFatal;
4858  argv[1] = rb_str_new2("deadlock detected");
4859 #ifdef DEBUG_DEADLOCK_CHECK
4860  printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
4861  st_foreach(vm->living_threads, debug_i, (st_data_t)0);
4862 #endif
4863  vm->sleeper--;
4864  rb_threadptr_raise(vm->main_thread, 2, argv);
4865  }
4866 }
4867 
4868 static void
4870 {
4871  VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
4872  if (coverage && RBASIC(coverage)->klass == 0) {
4873  long line = rb_sourceline() - 1;
4874  long count;
4875  if (RARRAY_PTR(coverage)[line] == Qnil) {
4876  return;
4877  }
4878  count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
4879  if (POSFIXABLE(count)) {
4880  RARRAY_PTR(coverage)[line] = LONG2FIX(count);
4881  }
4882  }
4883 }
4884 
4885 VALUE
4887 {
4888  return GET_VM()->coverages;
4889 }
4890 
4891 void
4893 {
4894  GET_VM()->coverages = coverages;
4896 }
4897 
4898 void
4900 {
4901  GET_VM()->coverages = Qfalse;
4903 }
4904 
4905