• Main Page
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

thread.c

Go to the documentation of this file.
00001 /**********************************************************************
00002 
00003   thread.c -
00004 
00005   $Author: yugui $
00006 
00007   Copyright (C) 2004-2007 Koichi Sasada
00008 
00009 **********************************************************************/
00010 
00011 /*
00012   YARV Thread Design
00013 
00014   model 1: Userlevel Thread
00015     Same as traditional ruby thread.
00016 
00017   model 2: Native Thread with Global VM lock
00018     Using pthread (or Windows thread) and Ruby threads run concurrent.
00019 
00020   model 3: Native Thread with fine grain lock
00021     Using pthread and Ruby threads run concurrent or parallel.
00022 
00023 ------------------------------------------------------------------------
00024 
00025   model 2:
00026     A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
00027     When thread scheduling, running thread release GVL.  If running thread
00028     try blocking operation, this thread must release GVL and another
00029     thread can continue this flow.  After blocking operation, thread
00030     must check interrupt (RUBY_VM_CHECK_INTS).
00031 
00032     Every VM can run parallel.
00033 
00034     Ruby threads are scheduled by OS thread scheduler.
00035 
00036 ------------------------------------------------------------------------
00037 
00038   model 3:
00039     Every threads run concurrent or parallel and to access shared object
00040     exclusive access control is needed.  For example, to access String
00041     object or Array object, fine grain lock must be locked every time.
00042  */
00043 
00044 
00045 /* for model 2 */
00046 
00047 #include "eval_intern.h"
00048 #include "gc.h"
00049 
00050 #ifndef USE_NATIVE_THREAD_PRIORITY
00051 #define USE_NATIVE_THREAD_PRIORITY 0
00052 #define RUBY_THREAD_PRIORITY_MAX 3
00053 #define RUBY_THREAD_PRIORITY_MIN -3
00054 #endif
00055 
00056 #ifndef THREAD_DEBUG
00057 #define THREAD_DEBUG 0
00058 #endif
00059 
00060 VALUE rb_cMutex;
00061 VALUE rb_cBarrier;
00062 
00063 static void sleep_timeval(rb_thread_t *th, struct timeval time);
00064 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
00065 static void sleep_forever(rb_thread_t *th, int nodeadlock);
00066 static double timeofday(void);
00067 struct timeval rb_time_interval(VALUE);
00068 static int rb_threadptr_dead(rb_thread_t *th);
00069 
00070 static void rb_check_deadlock(rb_vm_t *vm);
00071 
00072 int rb_signal_buff_size(void);
00073 void rb_signal_exec(rb_thread_t *th, int sig);
00074 void rb_disable_interrupt(void);
00075 void rb_thread_stop_timer_thread(void);
00076 
00077 static const VALUE eKillSignal = INT2FIX(0);
00078 static const VALUE eTerminateSignal = INT2FIX(1);
00079 static volatile int system_working = 1;
00080 
00081 inline static void
00082 st_delete_wrap(st_table *table, st_data_t key)
00083 {
00084     st_delete(table, &key, 0);
00085 }
00086 
00087 /********************************************************************************/
00088 
00089 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
00090 
00091 struct rb_blocking_region_buffer {
00092     enum rb_thread_status prev_status;
00093     struct rb_unblock_callback oldubf;
00094 };
00095 
00096 static void set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
00097                                  struct rb_unblock_callback *old);
00098 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
00099 
00100 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
00101 
00102 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
00103   do { \
00104     rb_gc_save_machine_context(th); \
00105     SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
00106   } while (0)
00107 
00108 #define GVL_UNLOCK_BEGIN() do { \
00109   rb_thread_t *_th_stored = GET_THREAD(); \
00110   RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
00111   native_mutex_unlock(&_th_stored->vm->global_vm_lock)
00112 
00113 #define GVL_UNLOCK_END() \
00114   native_mutex_lock(&_th_stored->vm->global_vm_lock); \
00115   rb_thread_set_current(_th_stored); \
00116 } while(0)
00117 
00118 #define BLOCKING_REGION_CORE(exec) do { \
00119     GVL_UNLOCK_BEGIN(); {\
00120             exec; \
00121     } \
00122     GVL_UNLOCK_END(); \
00123 } while(0);
00124 
00125 #define blocking_region_begin(th, region, func, arg) \
00126   do { \
00127     (region)->prev_status = (th)->status; \
00128     set_unblock_function((th), (func), (arg), &(region)->oldubf); \
00129     (th)->blocking_region_buffer = (region); \
00130     (th)->status = THREAD_STOPPED; \
00131     thread_debug("enter blocking region (%p)\n", (void *)(th)); \
00132     RB_GC_SAVE_MACHINE_CONTEXT(th); \
00133     native_mutex_unlock(&(th)->vm->global_vm_lock); \
00134   } while (0)
00135 
00136 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
00137     rb_thread_t *__th = GET_THREAD(); \
00138     struct rb_blocking_region_buffer __region; \
00139     blocking_region_begin(__th, &__region, ubf, ubfarg); \
00140     exec; \
00141     blocking_region_end(__th, &__region); \
00142     RUBY_VM_CHECK_INTS(); \
00143 } while(0)
00144 
00145 #if THREAD_DEBUG
00146 #ifdef HAVE_VA_ARGS_MACRO
00147 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
00148 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
00149 #define POSITION_FORMAT "%s:%d:"
00150 #define POSITION_ARGS ,file, line
00151 #else
00152 void rb_thread_debug(const char *fmt, ...);
00153 #define thread_debug rb_thread_debug
00154 #define POSITION_FORMAT
00155 #define POSITION_ARGS
00156 #endif
00157 
00158 # if THREAD_DEBUG < 0
00159 static int rb_thread_debug_enabled;
00160 
00161 /*
00162  *  call-seq:
00163  *     Thread.DEBUG     -> num
00164  *
00165  *  Returns the thread debug level.  Available only if compiled with
00166  *  THREAD_DEBUG=-1.
00167  */
00168 
00169 static VALUE
00170 rb_thread_s_debug(void)
00171 {
00172     return INT2NUM(rb_thread_debug_enabled);
00173 }
00174 
00175 /*
00176  *  call-seq:
00177  *     Thread.DEBUG = num
00178  *
00179  *  Sets the thread debug level.  Available only if compiled with
00180  *  THREAD_DEBUG=-1.
00181  */
00182 
00183 static VALUE
00184 rb_thread_s_debug_set(VALUE self, VALUE val)
00185 {
00186     rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
00187     return val;
00188 }
00189 # else
00190 # define rb_thread_debug_enabled THREAD_DEBUG
00191 # endif
00192 #else
00193 #define thread_debug if(0)printf
00194 #endif
00195 
00196 #ifndef __ia64
00197 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
00198 #endif
00199 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
00200                                         VALUE *register_stack_start));
00201 static void timer_thread_function(void *);
00202 
00203 #if   defined(_WIN32)
00204 #include "thread_win32.c"
00205 
00206 #define DEBUG_OUT() \
00207   WaitForSingleObject(&debug_mutex, INFINITE); \
00208   printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
00209   fflush(stdout); \
00210   ReleaseMutex(&debug_mutex);
00211 
00212 #elif defined(HAVE_PTHREAD_H)
00213 #include "thread_pthread.c"
00214 
00215 #define DEBUG_OUT() \
00216   pthread_mutex_lock(&debug_mutex); \
00217   printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
00218   fflush(stdout); \
00219   pthread_mutex_unlock(&debug_mutex);
00220 
00221 #else
00222 #error "unsupported thread type"
00223 #endif
00224 
00225 #if THREAD_DEBUG
00226 static int debug_mutex_initialized = 1;
00227 static rb_thread_lock_t debug_mutex;
00228 
00229 void
00230 rb_thread_debug(
00231 #ifdef HAVE_VA_ARGS_MACRO
00232     const char *file, int line,
00233 #endif
00234     const char *fmt, ...)
00235 {
00236     va_list args;
00237     char buf[BUFSIZ];
00238 
00239     if (!rb_thread_debug_enabled) return;
00240 
00241     if (debug_mutex_initialized == 1) {
00242         debug_mutex_initialized = 0;
00243         native_mutex_initialize(&debug_mutex);
00244     }
00245 
00246     va_start(args, fmt);
00247     vsnprintf(buf, BUFSIZ, fmt, args);
00248     va_end(args);
00249 
00250     DEBUG_OUT();
00251 }
00252 #endif
00253 
00254 void
00255 rb_thread_lock_unlock(rb_thread_lock_t *lock)
00256 {
00257     native_mutex_unlock(lock);
00258 }
00259 
00260 void
00261 rb_thread_lock_destroy(rb_thread_lock_t *lock)
00262 {
00263     native_mutex_destroy(lock);
00264 }
00265 
00266 static void
00267 set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
00268                      struct rb_unblock_callback *old)
00269 {
00270   check_ints:
00271     RUBY_VM_CHECK_INTS(); /* check signal or so */
00272     native_mutex_lock(&th->interrupt_lock);
00273     if (th->interrupt_flag) {
00274         native_mutex_unlock(&th->interrupt_lock);
00275         goto check_ints;
00276     }
00277     else {
00278         if (old) *old = th->unblock;
00279         th->unblock.func = func;
00280         th->unblock.arg = arg;
00281     }
00282     native_mutex_unlock(&th->interrupt_lock);
00283 }
00284 
00285 static void
00286 reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
00287 {
00288     native_mutex_lock(&th->interrupt_lock);
00289     th->unblock = *old;
00290     native_mutex_unlock(&th->interrupt_lock);
00291 }
00292 
00293 void
00294 rb_threadptr_interrupt(rb_thread_t *th)
00295 {
00296     native_mutex_lock(&th->interrupt_lock);
00297     RUBY_VM_SET_INTERRUPT(th);
00298     if (th->unblock.func) {
00299         (th->unblock.func)(th->unblock.arg);
00300     }
00301     else {
00302         /* none */
00303     }
00304     native_mutex_unlock(&th->interrupt_lock);
00305 }
00306 
00307 
00308 static int
00309 terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
00310 {
00311     VALUE thval = key;
00312     rb_thread_t *th;
00313     GetThreadPtr(thval, th);
00314 
00315     if (th != main_thread) {
00316         thread_debug("terminate_i: %p\n", (void *)th);
00317         rb_threadptr_interrupt(th);
00318         th->thrown_errinfo = eTerminateSignal;
00319         th->status = THREAD_TO_KILL;
00320     }
00321     else {
00322         thread_debug("terminate_i: main thread (%p)\n", (void *)th);
00323     }
00324     return ST_CONTINUE;
00325 }
00326 
00327 typedef struct rb_mutex_struct
00328 {
00329     rb_thread_lock_t lock;
00330     rb_thread_cond_t cond;
00331     struct rb_thread_struct volatile *th;
00332     volatile int cond_waiting, cond_notified;
00333     struct rb_mutex_struct *next_mutex;
00334 } mutex_t;
00335 
00336 static void rb_mutex_unlock_all(mutex_t *mutex, rb_thread_t *th);
00337 static void rb_mutex_abandon_all(mutex_t *mutexes);
00338 
00339 void
00340 rb_thread_terminate_all(void)
00341 {
00342     rb_thread_t *th = GET_THREAD(); /* main thread */
00343     rb_vm_t *vm = th->vm;
00344 
00345     if (vm->main_thread != th) {
00346         rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
00347                (void *)vm->main_thread, (void *)th);
00348     }
00349 
00350     /* unlock all locking mutexes */
00351     if (th->keeping_mutexes) {
00352         rb_mutex_unlock_all(th->keeping_mutexes, GET_THREAD());
00353     }
00354 
00355     thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
00356     st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
00357 
00358     while (!rb_thread_alone()) {
00359         PUSH_TAG();
00360         if (EXEC_TAG() == 0) {
00361             rb_thread_schedule();
00362         }
00363         else {
00364             /* ignore exception */
00365         }
00366         POP_TAG();
00367     }
00368     rb_thread_stop_timer_thread();
00369 }
00370 
00371 static void
00372 thread_unlock_all_locking_mutexes(rb_thread_t *th)
00373 {
00374     if (th->keeping_mutexes) {
00375         rb_mutex_unlock_all(th->keeping_mutexes, th);
00376         th->keeping_mutexes = NULL;
00377     }
00378 }
00379 
00380 static void
00381 thread_cleanup_func_before_exec(void *th_ptr)
00382 {
00383     rb_thread_t *th = th_ptr;
00384     th->status = THREAD_KILLED;
00385     th->machine_stack_start = th->machine_stack_end = 0;
00386 #ifdef __ia64
00387     th->machine_register_stack_start = th->machine_register_stack_end = 0;
00388 #endif
00389 }
00390 
00391 static void
00392 thread_cleanup_func(void *th_ptr, int atfork)
00393 {
00394     rb_thread_t *th = th_ptr;
00395 
00396     th->locking_mutex = Qfalse;
00397     thread_cleanup_func_before_exec(th_ptr);
00398 
00399     /*
00400      * Unfortunately, we can't release native threading resource at fork
00401      * because libc may have unstable locking state therefore touching
00402      * a threading resource may cause a deadlock.
00403      */
00404     if (atfork)
00405         return;
00406 
00407     native_thread_destroy(th);
00408 }
00409 
00410 extern void ruby_error_print(void);
00411 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
00412 void rb_thread_recycle_stack_release(VALUE *);
00413 
00414 void
00415 ruby_thread_init_stack(rb_thread_t *th)
00416 {
00417     native_thread_init_stack(th);
00418 }
00419 
00420 static int
00421 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
00422 {
00423     int state;
00424     VALUE args = th->first_args;
00425     rb_proc_t *proc;
00426     rb_thread_t *join_th;
00427     rb_thread_t *main_th;
00428     VALUE errinfo = Qnil;
00429 # ifdef USE_SIGALTSTACK
00430     void rb_register_sigaltstack(rb_thread_t *th);
00431 
00432     rb_register_sigaltstack(th);
00433 # endif
00434 
00435     ruby_thread_set_native(th);
00436 
00437     th->machine_stack_start = stack_start;
00438 #ifdef __ia64
00439     th->machine_register_stack_start = register_stack_start;
00440 #endif
00441     thread_debug("thread start: %p\n", (void *)th);
00442 
00443     native_mutex_lock(&th->vm->global_vm_lock);
00444     {
00445         thread_debug("thread start (get lock): %p\n", (void *)th);
00446         rb_thread_set_current(th);
00447 
00448         TH_PUSH_TAG(th);
00449         if ((state = EXEC_TAG()) == 0) {
00450             SAVE_ROOT_JMPBUF(th, {
00451                 if (!th->first_func) {
00452                     GetProcPtr(th->first_proc, proc);
00453                     th->errinfo = Qnil;
00454                     th->local_lfp = proc->block.lfp;
00455                     th->local_svar = Qnil;
00456                     th->value = rb_vm_invoke_proc(th, proc, proc->block.self,
00457                                                   (int)RARRAY_LEN(args), RARRAY_PTR(args), 0);
00458                 }
00459                 else {
00460                     th->value = (*th->first_func)((void *)args);
00461                 }
00462             });
00463         }
00464         else {
00465             errinfo = th->errinfo;
00466             if (NIL_P(errinfo)) errinfo = rb_errinfo();
00467             if (state == TAG_FATAL) {
00468                 /* fatal error within this thread, need to stop whole script */
00469             }
00470             else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
00471                 if (th->safe_level >= 4) {
00472                     th->errinfo = rb_exc_new3(rb_eSecurityError,
00473                                               rb_sprintf("Insecure exit at level %d", th->safe_level));
00474                     errinfo = Qnil;
00475                 }
00476             }
00477             else if (th->safe_level < 4 &&
00478                      (th->vm->thread_abort_on_exception ||
00479                       th->abort_on_exception || RTEST(ruby_debug))) {
00480                 /* exit on main_thread */
00481             }
00482             else {
00483                 errinfo = Qnil;
00484             }
00485             th->value = Qnil;
00486         }
00487 
00488         th->status = THREAD_KILLED;
00489         thread_debug("thread end: %p\n", (void *)th);
00490 
00491         main_th = th->vm->main_thread;
00492         if (th != main_th) {
00493             if (TYPE(errinfo) == T_OBJECT) {
00494                 /* treat with normal error object */
00495                 rb_threadptr_raise(main_th, 1, &errinfo);
00496             }
00497         }
00498         TH_POP_TAG();
00499 
00500         /* locking_mutex must be Qfalse */
00501         if (th->locking_mutex != Qfalse) {
00502             rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
00503                    (void *)th, th->locking_mutex);
00504         }
00505 
00506         /* delete self other than main thread from living_threads */
00507         if (th != main_th) {
00508             st_delete_wrap(th->vm->living_threads, th->self);
00509         }
00510 
00511         /* wake up joining threads */
00512         join_th = th->join_list_head;
00513         while (join_th) {
00514             if (join_th == main_th) errinfo = Qnil;
00515             rb_threadptr_interrupt(join_th);
00516             switch (join_th->status) {
00517               case THREAD_STOPPED: case THREAD_STOPPED_FOREVER:
00518                 join_th->status = THREAD_RUNNABLE;
00519               default: break;
00520             }
00521             join_th = join_th->join_list_next;
00522         }
00523 
00524         thread_unlock_all_locking_mutexes(th);
00525         if (th != main_th) rb_check_deadlock(th->vm);
00526 
00527         if (!th->root_fiber) {
00528             rb_thread_recycle_stack_release(th->stack);
00529             th->stack = 0;
00530         }
00531     }
00532     if (th->vm->main_thread == th) {
00533         ruby_cleanup(state);
00534     }
00535     else {
00536         thread_cleanup_func(th, FALSE);
00537         native_mutex_unlock(&th->vm->global_vm_lock);
00538     }
00539 
00540     return 0;
00541 }
00542 
00543 static VALUE
00544 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
00545 {
00546     rb_thread_t *th;
00547     int err;
00548 
00549     if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
00550         rb_raise(rb_eThreadError,
00551                  "can't start a new thread (frozen ThreadGroup)");
00552     }
00553     GetThreadPtr(thval, th);
00554 
00555     /* setup thread environment */
00556     th->first_func = fn;
00557     th->first_proc = fn ? Qfalse : rb_block_proc();
00558     th->first_args = args; /* GC: shouldn't put before above line */
00559 
00560     th->priority = GET_THREAD()->priority;
00561     th->thgroup = GET_THREAD()->thgroup;
00562 
00563     native_mutex_initialize(&th->interrupt_lock);
00564     if (GET_VM()->event_hooks != NULL)
00565         th->event_flags |= RUBY_EVENT_VM;
00566 
00567     /* kick thread */
00568     st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
00569     err = native_thread_create(th);
00570     if (err) {
00571         st_delete_wrap(th->vm->living_threads, th->self);
00572         th->status = THREAD_KILLED;
00573         rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
00574     }
00575     return thval;
00576 }
00577 
00578 /* :nodoc: */
00579 static VALUE
00580 thread_s_new(int argc, VALUE *argv, VALUE klass)
00581 {
00582     rb_thread_t *th;
00583     VALUE thread = rb_thread_alloc(klass);
00584     rb_obj_call_init(thread, argc, argv);
00585     GetThreadPtr(thread, th);
00586     if (!th->first_args) {
00587         rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
00588                  rb_class2name(klass));
00589     }
00590     return thread;
00591 }
00592 
00593 /*
00594  *  call-seq:
00595  *     Thread.start([args]*) {|args| block }   -> thread
00596  *     Thread.fork([args]*) {|args| block }    -> thread
00597  *
00598  *  Basically the same as <code>Thread::new</code>. However, if class
00599  *  <code>Thread</code> is subclassed, then calling <code>start</code> in that
00600  *  subclass will not invoke the subclass's <code>initialize</code> method.
00601  */
00602 
00603 static VALUE
00604 thread_start(VALUE klass, VALUE args)
00605 {
00606     return thread_create_core(rb_thread_alloc(klass), args, 0);
00607 }
00608 
00609 /* :nodoc: */
00610 static VALUE
00611 thread_initialize(VALUE thread, VALUE args)
00612 {
00613     rb_thread_t *th;
00614     if (!rb_block_given_p()) {
00615         rb_raise(rb_eThreadError, "must be called with a block");
00616     }
00617     GetThreadPtr(thread, th);
00618     if (th->first_args) {
00619         VALUE rb_proc_location(VALUE self);
00620         VALUE proc = th->first_proc, line, loc;
00621         const char *file;
00622         if (!proc || !RTEST(loc = rb_proc_location(proc))) {
00623             rb_raise(rb_eThreadError, "already initialized thread");
00624         }
00625         file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
00626         if (NIL_P(line = RARRAY_PTR(loc)[1])) {
00627             rb_raise(rb_eThreadError, "already initialized thread - %s",
00628                      file);
00629         }
00630         rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
00631                  file, NUM2INT(line));
00632     }
00633     return thread_create_core(thread, args, 0);
00634 }
00635 
00636 VALUE
00637 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
00638 {
00639     return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
00640 }
00641 
00642 
00643 /* +infty, for this purpose */
00644 #define DELAY_INFTY 1E30
00645 
00646 struct join_arg {
00647     rb_thread_t *target, *waiting;
00648     double limit;
00649     int forever;
00650 };
00651 
00652 static VALUE
00653 remove_from_join_list(VALUE arg)
00654 {
00655     struct join_arg *p = (struct join_arg *)arg;
00656     rb_thread_t *target_th = p->target, *th = p->waiting;
00657 
00658     if (target_th->status != THREAD_KILLED) {
00659         rb_thread_t **pth = &target_th->join_list_head;
00660 
00661         while (*pth) {
00662             if (*pth == th) {
00663                 *pth = th->join_list_next;
00664                 break;
00665             }
00666             pth = &(*pth)->join_list_next;
00667         }
00668     }
00669 
00670     return Qnil;
00671 }
00672 
00673 static VALUE
00674 thread_join_sleep(VALUE arg)
00675 {
00676     struct join_arg *p = (struct join_arg *)arg;
00677     rb_thread_t *target_th = p->target, *th = p->waiting;
00678     double now, limit = p->limit;
00679 
00680     while (target_th->status != THREAD_KILLED) {
00681         if (p->forever) {
00682             sleep_forever(th, 1);
00683         }
00684         else {
00685             now = timeofday();
00686             if (now > limit) {
00687                 thread_debug("thread_join: timeout (thid: %p)\n",
00688                              (void *)target_th->thread_id);
00689                 return Qfalse;
00690             }
00691             sleep_wait_for_interrupt(th, limit - now);
00692         }
00693         thread_debug("thread_join: interrupted (thid: %p)\n",
00694                      (void *)target_th->thread_id);
00695     }
00696     return Qtrue;
00697 }
00698 
00699 static VALUE
00700 thread_join(rb_thread_t *target_th, double delay)
00701 {
00702     rb_thread_t *th = GET_THREAD();
00703     struct join_arg arg;
00704 
00705     arg.target = target_th;
00706     arg.waiting = th;
00707     arg.limit = timeofday() + delay;
00708     arg.forever = delay == DELAY_INFTY;
00709 
00710     thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
00711 
00712     if (target_th->status != THREAD_KILLED) {
00713         th->join_list_next = target_th->join_list_head;
00714         target_th->join_list_head = th;
00715         if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
00716                        remove_from_join_list, (VALUE)&arg)) {
00717             return Qnil;
00718         }
00719     }
00720 
00721     thread_debug("thread_join: success (thid: %p)\n",
00722                  (void *)target_th->thread_id);
00723 
00724     if (target_th->errinfo != Qnil) {
00725         VALUE err = target_th->errinfo;
00726 
00727         if (FIXNUM_P(err)) {
00728             /* */
00729         }
00730         else if (TYPE(target_th->errinfo) == T_NODE) {
00731             rb_exc_raise(rb_vm_make_jump_tag_but_local_jump(
00732                 GET_THROWOBJ_STATE(err), GET_THROWOBJ_VAL(err)));
00733         }
00734         else {
00735             /* normal exception */
00736             rb_exc_raise(err);
00737         }
00738     }
00739     return target_th->self;
00740 }
00741 
00742 /*
00743  *  call-seq:
00744  *     thr.join          -> thr
00745  *     thr.join(limit)   -> thr
00746  *
00747  *  The calling thread will suspend execution and run <i>thr</i>. Does not
00748  *  return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
00749  *  the time limit expires, <code>nil</code> will be returned, otherwise
00750  *  <i>thr</i> is returned.
00751  *
00752  *  Any threads not joined will be killed when the main program exits.  If
00753  *  <i>thr</i> had previously raised an exception and the
00754  *  <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
00755  *  (so the exception has not yet been processed) it will be processed at this
00756  *  time.
00757  *
00758  *     a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
00759  *     x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
00760  *     x.join # Let x thread finish, a will be killed on exit.
00761  *
00762  *  <em>produces:</em>
00763  *
00764  *     axyz
00765  *
00766  *  The following example illustrates the <i>limit</i> parameter.
00767  *
00768  *     y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
00769  *     puts "Waiting" until y.join(0.15)
00770  *
00771  *  <em>produces:</em>
00772  *
00773  *     tick...
00774  *     Waiting
00775  *     tick...
00776  *     Waitingtick...
00777  *
00778  *
00779  *     tick...
00780  */
00781 
00782 static VALUE
00783 thread_join_m(int argc, VALUE *argv, VALUE self)
00784 {
00785     rb_thread_t *target_th;
00786     double delay = DELAY_INFTY;
00787     VALUE limit;
00788 
00789     GetThreadPtr(self, target_th);
00790 
00791     rb_scan_args(argc, argv, "01", &limit);
00792     if (!NIL_P(limit)) {
00793         delay = rb_num2dbl(limit);
00794     }
00795 
00796     return thread_join(target_th, delay);
00797 }
00798 
00799 /*
00800  *  call-seq:
00801  *     thr.value   -> obj
00802  *
00803  *  Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
00804  *  its value.
00805  *
00806  *     a = Thread.new { 2 + 2 }
00807  *     a.value   #=> 4
00808  */
00809 
00810 static VALUE
00811 thread_value(VALUE self)
00812 {
00813     rb_thread_t *th;
00814     GetThreadPtr(self, th);
00815     thread_join(th, DELAY_INFTY);
00816     return th->value;
00817 }
00818 
00819 /*
00820  * Thread Scheduling
00821  */
00822 
00823 static struct timeval
00824 double2timeval(double d)
00825 {
00826     struct timeval time;
00827 
00828     time.tv_sec = (int)d;
00829     time.tv_usec = (int)((d - (int)d) * 1e6);
00830     if (time.tv_usec < 0) {
00831         time.tv_usec += (int)1e6;
00832         time.tv_sec -= 1;
00833     }
00834     return time;
00835 }
00836 
00837 static void
00838 sleep_forever(rb_thread_t *th, int deadlockable)
00839 {
00840     enum rb_thread_status prev_status = th->status;
00841 
00842     th->status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
00843     do {
00844         if (deadlockable) {
00845             th->vm->sleeper++;
00846             rb_check_deadlock(th->vm);
00847         }
00848         native_sleep(th, 0);
00849         if (deadlockable) {
00850             th->vm->sleeper--;
00851         }
00852         RUBY_VM_CHECK_INTS();
00853     } while (th->status == THREAD_STOPPED_FOREVER);
00854     th->status = prev_status;
00855 }
00856 
00857 static void
00858 getclockofday(struct timeval *tp)
00859 {
00860 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
00861     struct timespec ts;
00862 
00863     if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
00864         tp->tv_sec = ts.tv_sec;
00865         tp->tv_usec = ts.tv_nsec / 1000;
00866     } else
00867 #endif
00868     {
00869         gettimeofday(tp, NULL);
00870     }
00871 }
00872 
00873 static void
00874 sleep_timeval(rb_thread_t *th, struct timeval tv)
00875 {
00876     struct timeval to, tvn;
00877     enum rb_thread_status prev_status = th->status;
00878 
00879     getclockofday(&to);
00880     to.tv_sec += tv.tv_sec;
00881     if ((to.tv_usec += tv.tv_usec) >= 1000000) {
00882         to.tv_sec++;
00883         to.tv_usec -= 1000000;
00884     }
00885 
00886     th->status = THREAD_STOPPED;
00887     do {
00888         native_sleep(th, &tv);
00889         RUBY_VM_CHECK_INTS();
00890         getclockofday(&tvn);
00891         if (to.tv_sec < tvn.tv_sec) break;
00892         if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
00893         thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
00894                      (long)to.tv_sec, (long)to.tv_usec,
00895                      (long)tvn.tv_sec, (long)tvn.tv_usec);
00896         tv.tv_sec = to.tv_sec - tvn.tv_sec;
00897         if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
00898             --tv.tv_sec;
00899             tv.tv_usec += 1000000;
00900         }
00901     } while (th->status == THREAD_STOPPED);
00902     th->status = prev_status;
00903 }
00904 
00905 void
00906 rb_thread_sleep_forever(void)
00907 {
00908     thread_debug("rb_thread_sleep_forever\n");
00909     sleep_forever(GET_THREAD(), 0);
00910 }
00911 
00912 static void
00913 rb_thread_sleep_deadly(void)
00914 {
00915     thread_debug("rb_thread_sleep_deadly\n");
00916     sleep_forever(GET_THREAD(), 1);
00917 }
00918 
00919 static double
00920 timeofday(void)
00921 {
00922 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
00923     struct timespec tp;
00924 
00925     if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
00926         return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
00927     } else
00928 #endif
00929     {
00930         struct timeval tv;
00931         gettimeofday(&tv, NULL);
00932         return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
00933     }
00934 }
00935 
00936 static void
00937 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec)
00938 {
00939     sleep_timeval(th, double2timeval(sleepsec));
00940 }
00941 
00942 static void
00943 sleep_for_polling(rb_thread_t *th)
00944 {
00945     struct timeval time;
00946     time.tv_sec = 0;
00947     time.tv_usec = 100 * 1000;  /* 0.1 sec */
00948     sleep_timeval(th, time);
00949 }
00950 
00951 void
00952 rb_thread_wait_for(struct timeval time)
00953 {
00954     rb_thread_t *th = GET_THREAD();
00955     sleep_timeval(th, time);
00956 }
00957 
00958 void
00959 rb_thread_polling(void)
00960 {
00961     RUBY_VM_CHECK_INTS();
00962     if (!rb_thread_alone()) {
00963         rb_thread_t *th = GET_THREAD();
00964         sleep_for_polling(th);
00965     }
00966 }
00967 
00968 /*
00969  * CAUTION: This function causes thread switching.
00970  *          rb_thread_check_ints() check ruby's interrupts.
00971  *          some interrupt needs thread switching/invoke handlers,
00972  *          and so on.
00973  */
00974 
00975 void
00976 rb_thread_check_ints(void)
00977 {
00978     RUBY_VM_CHECK_INTS();
00979 }
00980 
00981 /*
00982  * Hidden API for tcl/tk wrapper.
00983  * There is no guarantee to perpetuate it.
00984  */
00985 int
00986 rb_thread_check_trap_pending(void)
00987 {
00988     return GET_THREAD()->exec_signal != 0;
00989 }
00990 
00991 /* This function can be called in blocking region. */
00992 int
00993 rb_thread_interrupted(VALUE thval)
00994 {
00995     rb_thread_t *th;
00996     GetThreadPtr(thval, th);
00997     return RUBY_VM_INTERRUPTED(th);
00998 }
00999 
01000 struct timeval rb_time_timeval(VALUE);
01001 
01002 void
01003 rb_thread_sleep(int sec)
01004 {
01005     rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
01006 }
01007 
01008 static void rb_threadptr_execute_interrupts_rec(rb_thread_t *, int);
01009 
01010 static void
01011 rb_thread_schedule_rec(int sched_depth)
01012 {
01013     thread_debug("rb_thread_schedule\n");
01014     if (!rb_thread_alone()) {
01015         rb_thread_t *th = GET_THREAD();
01016 
01017         thread_debug("rb_thread_schedule/switch start\n");
01018 
01019         RB_GC_SAVE_MACHINE_CONTEXT(th);
01020         native_mutex_unlock(&th->vm->global_vm_lock);
01021         {
01022             native_thread_yield();
01023         }
01024         native_mutex_lock(&th->vm->global_vm_lock);
01025 
01026         rb_thread_set_current(th);
01027         thread_debug("rb_thread_schedule/switch done\n");
01028 
01029         if (!sched_depth && UNLIKELY(GET_THREAD()->interrupt_flag)) {
01030             rb_threadptr_execute_interrupts_rec(GET_THREAD(), sched_depth+1);
01031         }
01032     }
01033 }
01034 
01035 void
01036 rb_thread_schedule(void)
01037 {
01038     rb_thread_schedule_rec(0);
01039 }
01040 
01041 /* blocking region */
01042 
01043 static inline void
01044 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
01045 {
01046     native_mutex_lock(&th->vm->global_vm_lock);
01047     rb_thread_set_current(th);
01048     thread_debug("leave blocking region (%p)\n", (void *)th);
01049     remove_signal_thread_list(th);
01050     th->blocking_region_buffer = 0;
01051     reset_unblock_function(th, &region->oldubf);
01052     if (th->status == THREAD_STOPPED) {
01053         th->status = region->prev_status;
01054     }
01055 }
01056 
01057 struct rb_blocking_region_buffer *
01058 rb_thread_blocking_region_begin(void)
01059 {
01060     rb_thread_t *th = GET_THREAD();
01061     struct rb_blocking_region_buffer *region = ALLOC(struct rb_blocking_region_buffer);
01062     blocking_region_begin(th, region, ubf_select, th);
01063     return region;
01064 }
01065 
01066 void
01067 rb_thread_blocking_region_end(struct rb_blocking_region_buffer *region)
01068 {
01069     int saved_errno = errno;
01070     rb_thread_t *th = GET_THREAD();
01071     blocking_region_end(th, region);
01072     xfree(region);
01073     RUBY_VM_CHECK_INTS();
01074     errno = saved_errno;
01075 }
01076 
01077 /*
01078  * rb_thread_blocking_region - permit concurrent/parallel execution.
01079  *
01080  * This function does:
01081  *   (1) release GVL.
01082  *       Other Ruby threads may run in parallel.
01083  *   (2) call func with data1.
01084  *   (3) acquire GVL.
01085  *       Other Ruby threads can not run in parallel any more.
01086  *
01087  *   If another thread interrupts this thread (Thread#kill, signal delivery,
01088  *   VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
01089  *   "un-blocking function").  `ubf()' should interrupt `func()' execution.
01090  *
01091  *   There are built-in ubfs and you can specify these ubfs.
01092  *   However, we can not guarantee our built-in ubfs interrupt
01093  *   your `func()' correctly.  Be careful to use rb_thread_blocking_region().
01094  *
01095  *     * RUBY_UBF_IO: ubf for IO operation
01096  *     * RUBY_UBF_PROCESS: ubf for process operation
01097  *
01098  *   NOTE: You can not execute most of Ruby C API and touch Ruby
01099  *         objects in `func()' and `ubf()', including raising an
01100  *         exception, because current thread doesn't acquire GVL
01101  *         (cause synchronization problem).  If you need to do it,
01102  *         read source code of C APIs and confirm by yourself.
01103  *
01104  *   NOTE: In short, this API is difficult to use safely.  I recommend you
01105  *         use other ways if you have.  We lack experiences to use this API.
01106  *         Please report your problem related on it.
01107  *
01108  *   Safe C API:
01109  *     * rb_thread_interrupted() - check interrupt flag
01110  *     * ruby_xalloc(), ruby_xrealloc(), ruby_xfree() -
01111  *         if they called without GVL, acquire GVL automatically.
01112  */
01113 VALUE
01114 rb_thread_blocking_region(
01115     rb_blocking_function_t *func, void *data1,
01116     rb_unblock_function_t *ubf, void *data2)
01117 {
01118     VALUE val;
01119     rb_thread_t *th = GET_THREAD();
01120     int saved_errno = 0;
01121 
01122     if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
01123         ubf = ubf_select;
01124         data2 = th;
01125     }
01126 
01127     BLOCKING_REGION({
01128         val = func(data1);
01129         saved_errno = errno;
01130     }, ubf, data2);
01131     errno = saved_errno;
01132 
01133     return val;
01134 }
01135 
01136 /* alias of rb_thread_blocking_region() */
01137 
01138 VALUE
01139 rb_thread_call_without_gvl(
01140     rb_blocking_function_t *func, void *data1,
01141     rb_unblock_function_t *ubf, void *data2)
01142 {
01143     return rb_thread_blocking_region(func, data1, ubf, data2);
01144 }
01145 
01146 /*
01147  * rb_thread_call_with_gvl - re-enter into Ruby world while releasing GVL.
01148  *
01149  ***
01150  *** This API is EXPERIMENTAL!
01151  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
01152  ***
01153  *
01154  * While releasing GVL using rb_thread_blocking_region() or
01155  * rb_thread_call_without_gvl(), you can not access Ruby values or invoke methods.
01156  * If you need to access it, you must use this function rb_thread_call_with_gvl().
01157  *
01158  * This function rb_thread_call_with_gvl() does:
01159  * (1) acquire GVL.
01160  * (2) call passed function `func'.
01161  * (3) release GVL.
01162  * (4) return a value which is returned at (2).
01163  *
01164  * NOTE: You should not return Ruby object at (2) because such Object
01165  *       will not marked.
01166  *
01167  * NOTE: If an exception is raised in `func', this function "DOES NOT"
01168  *       protect (catch) the exception.  If you have any resources
01169  *       which should free before throwing exception, you need use
01170  *       rb_protect() in `func' and return a value which represents
01171  *       exception is raised.
01172  *
01173  * NOTE: This functions should not be called by a thread which
01174  *       is not created as Ruby thread (created by Thread.new or so).
01175  *       In other words, this function *DOES NOT* associate
01176  *       NON-Ruby thread to Ruby thread.
01177  */
01178 void *
01179 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
01180 {
01181     rb_thread_t *th = ruby_thread_from_native();
01182     struct rb_blocking_region_buffer *brb;
01183     struct rb_unblock_callback prev_unblock;
01184     void *r;
01185 
01186     if (th == 0) {
01187         /* Error is occurred, but we can't use rb_bug()
01188          * because this thread is not Ruby's thread.
01189          * What should we do?
01190          */
01191 
01192         fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
01193         exit(1);
01194     }
01195 
01196     brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
01197     prev_unblock = th->unblock;
01198 
01199     if (brb == 0) {
01200         rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
01201     }
01202 
01203     blocking_region_end(th, brb);
01204     /* enter to Ruby world: You can access Ruby values, methods and so on. */
01205     r = (*func)(data1);
01206     /* leave from Ruby world: You can not access Ruby values, etc. */
01207     blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg);
01208     return r;
01209 }
01210 
01211 /*
01212  * ruby_thread_has_gvl_p - check if current native thread has GVL.
01213  *
01214  ***
01215  *** This API is EXPERIMENTAL!
01216  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
01217  ***
01218  */
01219 
01220 int
01221 ruby_thread_has_gvl_p(void)
01222 {
01223     rb_thread_t *th = ruby_thread_from_native();
01224 
01225     if (th && th->blocking_region_buffer == 0) {
01226         return 1;
01227     }
01228     else {
01229         return 0;
01230     }
01231 }
01232 
01233 /*
01234  *  call-seq:
01235  *     Thread.pass   -> nil
01236  *
01237  *  Invokes the thread scheduler to pass execution to another thread.
01238  *
01239  *     a = Thread.new { print "a"; Thread.pass;
01240  *                      print "b"; Thread.pass;
01241  *                      print "c" }
01242  *     b = Thread.new { print "x"; Thread.pass;
01243  *                      print "y"; Thread.pass;
01244  *                      print "z" }
01245  *     a.join
01246  *     b.join
01247  *
01248  *  <em>produces:</em>
01249  *
01250  *     axbycz
01251  */
01252 
01253 static VALUE
01254 thread_s_pass(VALUE klass)
01255 {
01256     rb_thread_schedule();
01257     return Qnil;
01258 }
01259 
01260 /*
01261  *
01262  */
01263 
01264 static void
01265 rb_threadptr_execute_interrupts_rec(rb_thread_t *th, int sched_depth)
01266 {
01267     if (GET_VM()->main_thread == th) {
01268         while (rb_signal_buff_size() && !th->exec_signal) native_thread_yield();
01269     }
01270 
01271     if (th->raised_flag) return;
01272 
01273     while (th->interrupt_flag) {
01274         enum rb_thread_status status = th->status;
01275         int timer_interrupt = th->interrupt_flag & 0x01;
01276         int finalizer_interrupt = th->interrupt_flag & 0x04;
01277 
01278         th->status = THREAD_RUNNABLE;
01279         th->interrupt_flag = 0;
01280 
01281         /* signal handling */
01282         if (th->exec_signal) {
01283             int sig = th->exec_signal;
01284             th->exec_signal = 0;
01285             rb_signal_exec(th, sig);
01286         }
01287 
01288         /* exception from another thread */
01289         if (th->thrown_errinfo) {
01290             VALUE err = th->thrown_errinfo;
01291             th->thrown_errinfo = 0;
01292             thread_debug("rb_thread_execute_interrupts: %ld\n", err);
01293 
01294             if (err == eKillSignal || err == eTerminateSignal) {
01295                 th->errinfo = INT2FIX(TAG_FATAL);
01296                 TH_JUMP_TAG(th, TAG_FATAL);
01297             }
01298             else {
01299                 rb_exc_raise(err);
01300             }
01301         }
01302         th->status = status;
01303 
01304         if (finalizer_interrupt) {
01305             rb_gc_finalize_deferred();
01306         }
01307 
01308         if (!sched_depth && timer_interrupt) {
01309             sched_depth++;
01310             EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
01311 
01312             if (th->slice > 0) {
01313                 th->slice--;
01314             }
01315             else {
01316               reschedule:
01317                 rb_thread_schedule_rec(sched_depth+1);
01318                 if (th->slice < 0) {
01319                     th->slice++;
01320                     goto reschedule;
01321                 }
01322                 else {
01323                     th->slice = th->priority;
01324                 }
01325             }
01326         }
01327     }
01328 }
01329 
01330 void
01331 rb_threadptr_execute_interrupts(rb_thread_t *th)
01332 {
01333     rb_threadptr_execute_interrupts_rec(th, 0);
01334 }
01335 
01336 void
01337 rb_gc_mark_threads(void)
01338 {
01339     /* TODO: remove */
01340 }
01341 
01342 /*****************************************************/
01343 
01344 static void
01345 rb_threadptr_ready(rb_thread_t *th)
01346 {
01347     rb_threadptr_interrupt(th);
01348 }
01349 
01350 static VALUE
01351 rb_threadptr_raise(rb_thread_t *th, int argc, VALUE *argv)
01352 {
01353     VALUE exc;
01354 
01355   again:
01356     if (rb_threadptr_dead(th)) {
01357         return Qnil;
01358     }
01359 
01360     if (th->thrown_errinfo != 0 || th->raised_flag) {
01361         rb_thread_schedule();
01362         goto again;
01363     }
01364 
01365     exc = rb_make_exception(argc, argv);
01366     th->thrown_errinfo = exc;
01367     rb_threadptr_ready(th);
01368     return Qnil;
01369 }
01370 
01371 void
01372 rb_threadptr_signal_raise(rb_thread_t *th, int sig)
01373 {
01374     VALUE argv[2];
01375 
01376     argv[0] = rb_eSignal;
01377     argv[1] = INT2FIX(sig);
01378     rb_threadptr_raise(th->vm->main_thread, 2, argv);
01379 }
01380 
01381 void
01382 rb_threadptr_signal_exit(rb_thread_t *th)
01383 {
01384     VALUE argv[2];
01385 
01386     argv[0] = rb_eSystemExit;
01387     argv[1] = rb_str_new2("exit");
01388     rb_threadptr_raise(th->vm->main_thread, 2, argv);
01389 }
01390 
01391 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
01392 #define USE_SIGALTSTACK
01393 #endif
01394 
01395 void
01396 ruby_thread_stack_overflow(rb_thread_t *th)
01397 {
01398     th->raised_flag = 0;
01399 #ifdef USE_SIGALTSTACK
01400     th->raised_flag = 0;
01401     rb_exc_raise(sysstack_error);
01402 #else
01403     th->errinfo = sysstack_error;
01404     TH_JUMP_TAG(th, TAG_RAISE);
01405 #endif
01406 }
01407 
01408 int
01409 rb_threadptr_set_raised(rb_thread_t *th)
01410 {
01411     if (th->raised_flag & RAISED_EXCEPTION) {
01412         return 1;
01413     }
01414     th->raised_flag |= RAISED_EXCEPTION;
01415     return 0;
01416 }
01417 
01418 int
01419 rb_threadptr_reset_raised(rb_thread_t *th)
01420 {
01421     if (!(th->raised_flag & RAISED_EXCEPTION)) {
01422         return 0;
01423     }
01424     th->raised_flag &= ~RAISED_EXCEPTION;
01425     return 1;
01426 }
01427 
01428 void
01429 rb_thread_fd_close(int fd)
01430 {
01431     /* TODO: fix me */
01432 }
01433 
01434 /*
01435  *  call-seq:
01436  *     thr.raise
01437  *     thr.raise(string)
01438  *     thr.raise(exception [, string [, array]])
01439  *
01440  *  Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
01441  *  caller does not have to be <i>thr</i>.
01442  *
01443  *     Thread.abort_on_exception = true
01444  *     a = Thread.new { sleep(200) }
01445  *     a.raise("Gotcha")
01446  *
01447  *  <em>produces:</em>
01448  *
01449  *     prog.rb:3: Gotcha (RuntimeError)
01450  *      from prog.rb:2:in `initialize'
01451  *      from prog.rb:2:in `new'
01452  *      from prog.rb:2
01453  */
01454 
01455 static VALUE
01456 thread_raise_m(int argc, VALUE *argv, VALUE self)
01457 {
01458     rb_thread_t *th;
01459     GetThreadPtr(self, th);
01460     rb_threadptr_raise(th, argc, argv);
01461     return Qnil;
01462 }
01463 
01464 
01465 /*
01466  *  call-seq:
01467  *     thr.exit        -> thr or nil
01468  *     thr.kill        -> thr or nil
01469  *     thr.terminate   -> thr or nil
01470  *
01471  *  Terminates <i>thr</i> and schedules another thread to be run. If this thread
01472  *  is already marked to be killed, <code>exit</code> returns the
01473  *  <code>Thread</code>. If this is the main thread, or the last thread, exits
01474  *  the process.
01475  */
01476 
01477 VALUE
01478 rb_thread_kill(VALUE thread)
01479 {
01480     rb_thread_t *th;
01481 
01482     GetThreadPtr(thread, th);
01483 
01484     if (th != GET_THREAD() && th->safe_level < 4) {
01485         rb_secure(4);
01486     }
01487     if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
01488         return thread;
01489     }
01490     if (th == th->vm->main_thread) {
01491         rb_exit(EXIT_SUCCESS);
01492     }
01493 
01494     thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
01495 
01496     rb_threadptr_interrupt(th);
01497     th->thrown_errinfo = eKillSignal;
01498     th->status = THREAD_TO_KILL;
01499 
01500     return thread;
01501 }
01502 
01503 
01504 /*
01505  *  call-seq:
01506  *     Thread.kill(thread)   -> thread
01507  *
01508  *  Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
01509  *
01510  *     count = 0
01511  *     a = Thread.new { loop { count += 1 } }
01512  *     sleep(0.1)       #=> 0
01513  *     Thread.kill(a)   #=> #<Thread:0x401b3d30 dead>
01514  *     count            #=> 93947
01515  *     a.alive?         #=> false
01516  */
01517 
01518 static VALUE
01519 rb_thread_s_kill(VALUE obj, VALUE th)
01520 {
01521     if (CLASS_OF(th) != rb_cThread) {
01522         rb_raise(rb_eTypeError, 
01523                 "wrong argument type %s (expected Thread)",
01524                 rb_obj_classname(th));
01525     }
01526     return rb_thread_kill(th);
01527 }
01528 
01529 
01530 /*
01531  *  call-seq:
01532  *     Thread.exit   -> thread
01533  *
01534  *  Terminates the currently running thread and schedules another thread to be
01535  *  run. If this thread is already marked to be killed, <code>exit</code>
01536  *  returns the <code>Thread</code>. If this is the main thread, or the last
01537  *  thread, exit the process.
01538  */
01539 
01540 static VALUE
01541 rb_thread_exit(void)
01542 {
01543     return rb_thread_kill(GET_THREAD()->self);
01544 }
01545 
01546 
01547 /*
01548  *  call-seq:
01549  *     thr.wakeup   -> thr
01550  *
01551  *  Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
01552  *  I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
01553  *
01554  *     c = Thread.new { Thread.stop; puts "hey!" }
01555  *     c.wakeup
01556  *
01557  *  <em>produces:</em>
01558  *
01559  *     hey!
01560  */
01561 
01562 VALUE
01563 rb_thread_wakeup(VALUE thread)
01564 {
01565     rb_thread_t *th;
01566     GetThreadPtr(thread, th);
01567 
01568     if (th->status == THREAD_KILLED) {
01569         rb_raise(rb_eThreadError, "killed thread");
01570     }
01571     rb_threadptr_ready(th);
01572     if (th->status != THREAD_TO_KILL) {
01573         th->status = THREAD_RUNNABLE;
01574     }
01575     return thread;
01576 }
01577 
01578 
01579 /*
01580  *  call-seq:
01581  *     thr.run   -> thr
01582  *
01583  *  Wakes up <i>thr</i>, making it eligible for scheduling.
01584  *
01585  *     a = Thread.new { puts "a"; Thread.stop; puts "c" }
01586  *     Thread.pass
01587  *     puts "Got here"
01588  *     a.run
01589  *     a.join
01590  *
01591  *  <em>produces:</em>
01592  *
01593  *     a
01594  *     Got here
01595  *     c
01596  */
01597 
01598 VALUE
01599 rb_thread_run(VALUE thread)
01600 {
01601     rb_thread_wakeup(thread);
01602     rb_thread_schedule();
01603     return thread;
01604 }
01605 
01606 
01607 /*
01608  *  call-seq:
01609  *     Thread.stop   -> nil
01610  *
01611  *  Stops execution of the current thread, putting it into a ``sleep'' state,
01612  *  and schedules execution of another thread.
01613  *
01614  *     a = Thread.new { print "a"; Thread.stop; print "c" }
01615  *     Thread.pass
01616  *     print "b"
01617  *     a.run
01618  *     a.join
01619  *
01620  *  <em>produces:</em>
01621  *
01622  *     abc
01623  */
01624 
01625 VALUE
01626 rb_thread_stop(void)
01627 {
01628     if (rb_thread_alone()) {
01629         rb_raise(rb_eThreadError,
01630                  "stopping only thread\n\tnote: use sleep to stop forever");
01631     }
01632     rb_thread_sleep_deadly();
01633     return Qnil;
01634 }
01635 
01636 static int
01637 thread_list_i(st_data_t key, st_data_t val, void *data)
01638 {
01639     VALUE ary = (VALUE)data;
01640     rb_thread_t *th;
01641     GetThreadPtr((VALUE)key, th);
01642 
01643     switch (th->status) {
01644       case THREAD_RUNNABLE:
01645       case THREAD_STOPPED:
01646       case THREAD_STOPPED_FOREVER:
01647       case THREAD_TO_KILL:
01648         rb_ary_push(ary, th->self);
01649       default:
01650         break;
01651     }
01652     return ST_CONTINUE;
01653 }
01654 
01655 /********************************************************************/
01656 
01657 /*
01658  *  call-seq:
01659  *     Thread.list   -> array
01660  *
01661  *  Returns an array of <code>Thread</code> objects for all threads that are
01662  *  either runnable or stopped.
01663  *
01664  *     Thread.new { sleep(200) }
01665  *     Thread.new { 1000000.times {|i| i*i } }
01666  *     Thread.new { Thread.stop }
01667  *     Thread.list.each {|t| p t}
01668  *
01669  *  <em>produces:</em>
01670  *
01671  *     #<Thread:0x401b3e84 sleep>
01672  *     #<Thread:0x401b3f38 run>
01673  *     #<Thread:0x401b3fb0 sleep>
01674  *     #<Thread:0x401bdf4c run>
01675  */
01676 
01677 VALUE
01678 rb_thread_list(void)
01679 {
01680     VALUE ary = rb_ary_new();
01681     st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
01682     return ary;
01683 }
01684 
01685 VALUE
01686 rb_thread_current(void)
01687 {
01688     return GET_THREAD()->self;
01689 }
01690 
01691 /*
01692  *  call-seq:
01693  *     Thread.current   -> thread
01694  *
01695  *  Returns the currently executing thread.
01696  *
01697  *     Thread.current   #=> #<Thread:0x401bdf4c run>
01698  */
01699 
01700 static VALUE
01701 thread_s_current(VALUE klass)
01702 {
01703     return rb_thread_current();
01704 }
01705 
01706 VALUE
01707 rb_thread_main(void)
01708 {
01709     return GET_THREAD()->vm->main_thread->self;
01710 }
01711 
01712 /*
01713  *  call-seq:
01714  *     Thread.main   -> thread
01715  *
01716  *  Returns the main thread.
01717  */
01718 
01719 static VALUE
01720 rb_thread_s_main(VALUE klass)
01721 {
01722     return rb_thread_main();
01723 }
01724 
01725 
01726 /*
01727  *  call-seq:
01728  *     Thread.abort_on_exception   -> true or false
01729  *
01730  *  Returns the status of the global ``abort on exception'' condition.  The
01731  *  default is <code>false</code>. When set to <code>true</code>, or if the
01732  *  global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
01733  *  command line option <code>-d</code> was specified) all threads will abort
01734  *  (the process will <code>exit(0)</code>) if an exception is raised in any
01735  *  thread. See also <code>Thread::abort_on_exception=</code>.
01736  */
01737 
01738 static VALUE
01739 rb_thread_s_abort_exc(void)
01740 {
01741     return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
01742 }
01743 
01744 
01745 /*
01746  *  call-seq:
01747  *     Thread.abort_on_exception= boolean   -> true or false
01748  *
01749  *  When set to <code>true</code>, all threads will abort if an exception is
01750  *  raised. Returns the new state.
01751  *
01752  *     Thread.abort_on_exception = true
01753  *     t1 = Thread.new do
01754  *       puts  "In new thread"
01755  *       raise "Exception from thread"
01756  *     end
01757  *     sleep(1)
01758  *     puts "not reached"
01759  *
01760  *  <em>produces:</em>
01761  *
01762  *     In new thread
01763  *     prog.rb:4: Exception from thread (RuntimeError)
01764  *      from prog.rb:2:in `initialize'
01765  *      from prog.rb:2:in `new'
01766  *      from prog.rb:2
01767  */
01768 
01769 static VALUE
01770 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
01771 {
01772     rb_secure(4);
01773     GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
01774     return val;
01775 }
01776 
01777 
01778 /*
01779  *  call-seq:
01780  *     thr.abort_on_exception   -> true or false
01781  *
01782  *  Returns the status of the thread-local ``abort on exception'' condition for
01783  *  <i>thr</i>. The default is <code>false</code>. See also
01784  *  <code>Thread::abort_on_exception=</code>.
01785  */
01786 
01787 static VALUE
01788 rb_thread_abort_exc(VALUE thread)
01789 {
01790     rb_thread_t *th;
01791     GetThreadPtr(thread, th);
01792     return th->abort_on_exception ? Qtrue : Qfalse;
01793 }
01794 
01795 
01796 /*
01797  *  call-seq:
01798  *     thr.abort_on_exception= boolean   -> true or false
01799  *
01800  *  When set to <code>true</code>, causes all threads (including the main
01801  *  program) to abort if an exception is raised in <i>thr</i>. The process will
01802  *  effectively <code>exit(0)</code>.
01803  */
01804 
01805 static VALUE
01806 rb_thread_abort_exc_set(VALUE thread, VALUE val)
01807 {
01808     rb_thread_t *th;
01809     rb_secure(4);
01810 
01811     GetThreadPtr(thread, th);
01812     th->abort_on_exception = RTEST(val);
01813     return val;
01814 }
01815 
01816 
01817 /*
01818  *  call-seq:
01819  *     thr.group   -> thgrp or nil
01820  *
01821  *  Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
01822  *  the thread is not a member of any group.
01823  *
01824  *     Thread.main.group   #=> #<ThreadGroup:0x4029d914>
01825  */
01826 
01827 VALUE
01828 rb_thread_group(VALUE thread)
01829 {
01830     rb_thread_t *th;
01831     VALUE group;
01832     GetThreadPtr(thread, th);
01833     group = th->thgroup;
01834 
01835     if (!group) {
01836         group = Qnil;
01837     }
01838     return group;
01839 }
01840 
01841 static const char *
01842 thread_status_name(enum rb_thread_status status)
01843 {
01844     switch (status) {
01845       case THREAD_RUNNABLE:
01846         return "run";
01847       case THREAD_STOPPED:
01848       case THREAD_STOPPED_FOREVER:
01849         return "sleep";
01850       case THREAD_TO_KILL:
01851         return "aborting";
01852       case THREAD_KILLED:
01853         return "dead";
01854       default:
01855         return "unknown";
01856     }
01857 }
01858 
01859 static int
01860 rb_threadptr_dead(rb_thread_t *th)
01861 {
01862     return th->status == THREAD_KILLED;
01863 }
01864 
01865 
01866 /*
01867  *  call-seq:
01868  *     thr.status   -> string, false or nil
01869  *
01870  *  Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
01871  *  sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
01872  *  ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
01873  *  <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
01874  *  terminated with an exception.
01875  *
01876  *     a = Thread.new { raise("die now") }
01877  *     b = Thread.new { Thread.stop }
01878  *     c = Thread.new { Thread.exit }
01879  *     d = Thread.new { sleep }
01880  *     d.kill                  #=> #<Thread:0x401b3678 aborting>
01881  *     a.status                #=> nil
01882  *     b.status                #=> "sleep"
01883  *     c.status                #=> false
01884  *     d.status                #=> "aborting"
01885  *     Thread.current.status   #=> "run"
01886  */
01887 
01888 static VALUE
01889 rb_thread_status(VALUE thread)
01890 {
01891     rb_thread_t *th;
01892     GetThreadPtr(thread, th);
01893 
01894     if (rb_threadptr_dead(th)) {
01895         if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
01896             /* TODO */ ) {
01897             return Qnil;
01898         }
01899         return Qfalse;
01900     }
01901     return rb_str_new2(thread_status_name(th->status));
01902 }
01903 
01904 
01905 /*
01906  *  call-seq:
01907  *     thr.alive?   -> true or false
01908  *
01909  *  Returns <code>true</code> if <i>thr</i> is running or sleeping.
01910  *
01911  *     thr = Thread.new { }
01912  *     thr.join                #=> #<Thread:0x401b3fb0 dead>
01913  *     Thread.current.alive?   #=> true
01914  *     thr.alive?              #=> false
01915  */
01916 
01917 static VALUE
01918 rb_thread_alive_p(VALUE thread)
01919 {
01920     rb_thread_t *th;
01921     GetThreadPtr(thread, th);
01922 
01923     if (rb_threadptr_dead(th))
01924         return Qfalse;
01925     return Qtrue;
01926 }
01927 
01928 /*
01929  *  call-seq:
01930  *     thr.stop?   -> true or false
01931  *
01932  *  Returns <code>true</code> if <i>thr</i> is dead or sleeping.
01933  *
01934  *     a = Thread.new { Thread.stop }
01935  *     b = Thread.current
01936  *     a.stop?   #=> true
01937  *     b.stop?   #=> false
01938  */
01939 
01940 static VALUE
01941 rb_thread_stop_p(VALUE thread)
01942 {
01943     rb_thread_t *th;
01944     GetThreadPtr(thread, th);
01945 
01946     if (rb_threadptr_dead(th))
01947         return Qtrue;
01948     if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
01949         return Qtrue;
01950     return Qfalse;
01951 }
01952 
01953 /*
01954  *  call-seq:
01955  *     thr.safe_level   -> integer
01956  *
01957  *  Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
01958  *  levels can help when implementing sandboxes which run insecure code.
01959  *
01960  *     thr = Thread.new { $SAFE = 3; sleep }
01961  *     Thread.current.safe_level   #=> 0
01962  *     thr.safe_level              #=> 3
01963  */
01964 
01965 static VALUE
01966 rb_thread_safe_level(VALUE thread)
01967 {
01968     rb_thread_t *th;
01969     GetThreadPtr(thread, th);
01970 
01971     return INT2NUM(th->safe_level);
01972 }
01973 
01974 /*
01975  * call-seq:
01976  *   thr.inspect   -> string
01977  *
01978  * Dump the name, id, and status of _thr_ to a string.
01979  */
01980 
01981 static VALUE
01982 rb_thread_inspect(VALUE thread)
01983 {
01984     const char *cname = rb_obj_classname(thread);
01985     rb_thread_t *th;
01986     const char *status;
01987     VALUE str;
01988 
01989     GetThreadPtr(thread, th);
01990     status = thread_status_name(th->status);
01991     str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
01992     OBJ_INFECT(str, thread);
01993 
01994     return str;
01995 }
01996 
01997 VALUE
01998 rb_thread_local_aref(VALUE thread, ID id)
01999 {
02000     rb_thread_t *th;
02001     VALUE val;
02002 
02003     GetThreadPtr(thread, th);
02004     if (rb_safe_level() >= 4 && th != GET_THREAD()) {
02005         rb_raise(rb_eSecurityError, "Insecure: thread locals");
02006     }
02007     if (!th->local_storage) {
02008         return Qnil;
02009     }
02010     if (st_lookup(th->local_storage, id, &val)) {
02011         return val;
02012     }
02013     return Qnil;
02014 }
02015 
02016 /*
02017  *  call-seq:
02018  *      thr[sym]   -> obj or nil
02019  *
02020  *  Attribute Reference---Returns the value of a thread-local variable, using
02021  *  either a symbol or a string name. If the specified variable does not exist,
02022  *  returns <code>nil</code>.
02023  *
02024  *     a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
02025  *     b = Thread.new { Thread.current[:name]  = "B"; Thread.stop }
02026  *     c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
02027  *     Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
02028  *
02029  *  <em>produces:</em>
02030  *
02031  *     #<Thread:0x401b3b3c sleep>: C
02032  *     #<Thread:0x401b3bc8 sleep>: B
02033  *     #<Thread:0x401b3c68 sleep>: A
02034  *     #<Thread:0x401bdf4c run>:
02035  */
02036 
02037 static VALUE
02038 rb_thread_aref(VALUE thread, VALUE id)
02039 {
02040     return rb_thread_local_aref(thread, rb_to_id(id));
02041 }
02042 
02043 VALUE
02044 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
02045 {
02046     rb_thread_t *th;
02047     GetThreadPtr(thread, th);
02048 
02049     if (rb_safe_level() >= 4 && th != GET_THREAD()) {
02050         rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
02051     }
02052     if (OBJ_FROZEN(thread)) {
02053         rb_error_frozen("thread locals");
02054     }
02055     if (!th->local_storage) {
02056         th->local_storage = st_init_numtable();
02057     }
02058     if (NIL_P(val)) {
02059         st_delete_wrap(th->local_storage, id);
02060         return Qnil;
02061     }
02062     st_insert(th->local_storage, id, val);
02063     return val;
02064 }
02065 
02066 /*
02067  *  call-seq:
02068  *      thr[sym] = obj   -> obj
02069  *
02070  *  Attribute Assignment---Sets or creates the value of a thread-local variable,
02071  *  using either a symbol or a string. See also <code>Thread#[]</code>.
02072  */
02073 
02074 static VALUE
02075 rb_thread_aset(VALUE self, VALUE id, VALUE val)
02076 {
02077     return rb_thread_local_aset(self, rb_to_id(id), val);
02078 }
02079 
02080 /*
02081  *  call-seq:
02082  *     thr.key?(sym)   -> true or false
02083  *
02084  *  Returns <code>true</code> if the given string (or symbol) exists as a
02085  *  thread-local variable.
02086  *
02087  *     me = Thread.current
02088  *     me[:oliver] = "a"
02089  *     me.key?(:oliver)    #=> true
02090  *     me.key?(:stanley)   #=> false
02091  */
02092 
02093 static VALUE
02094 rb_thread_key_p(VALUE self, VALUE key)
02095 {
02096     rb_thread_t *th;
02097     ID id = rb_to_id(key);
02098 
02099     GetThreadPtr(self, th);
02100 
02101     if (!th->local_storage) {
02102         return Qfalse;
02103     }
02104     if (st_lookup(th->local_storage, id, 0)) {
02105         return Qtrue;
02106     }
02107     return Qfalse;
02108 }
02109 
02110 static int
02111 thread_keys_i(ID key, VALUE value, VALUE ary)
02112 {
02113     rb_ary_push(ary, ID2SYM(key));
02114     return ST_CONTINUE;
02115 }
02116 
02117 static int
02118 vm_living_thread_num(rb_vm_t *vm)
02119 {
02120     return vm->living_threads->num_entries;
02121 }
02122 
02123 int
02124 rb_thread_alone(void)
02125 {
02126     int num = 1;
02127     if (GET_THREAD()->vm->living_threads) {
02128         num = vm_living_thread_num(GET_THREAD()->vm);
02129         thread_debug("rb_thread_alone: %d\n", num);
02130     }
02131     return num == 1;
02132 }
02133 
02134 /*
02135  *  call-seq:
02136  *     thr.keys   -> array
02137  *
02138  *  Returns an an array of the names of the thread-local variables (as Symbols).
02139  *
02140  *     thr = Thread.new do
02141  *       Thread.current[:cat] = 'meow'
02142  *       Thread.current["dog"] = 'woof'
02143  *     end
02144  *     thr.join   #=> #<Thread:0x401b3f10 dead>
02145  *     thr.keys   #=> [:dog, :cat]
02146  */
02147 
02148 static VALUE
02149 rb_thread_keys(VALUE self)
02150 {
02151     rb_thread_t *th;
02152     VALUE ary = rb_ary_new();
02153     GetThreadPtr(self, th);
02154 
02155     if (th->local_storage) {
02156         st_foreach(th->local_storage, thread_keys_i, ary);
02157     }
02158     return ary;
02159 }
02160 
02161 /*
02162  *  call-seq:
02163  *     thr.priority   -> integer
02164  *
02165  *  Returns the priority of <i>thr</i>. Default is inherited from the
02166  *  current thread which creating the new thread, or zero for the
02167  *  initial main thread; higher-priority thread will run more frequently
02168  *  than lower-priority threads (but lower-priority threads can also run).
02169  *
02170  *  This is just hint for Ruby thread scheduler.  It may be ignored on some
02171  *  platform.
02172  *
02173  *     Thread.current.priority   #=> 0
02174  */
02175 
02176 static VALUE
02177 rb_thread_priority(VALUE thread)
02178 {
02179     rb_thread_t *th;
02180     GetThreadPtr(thread, th);
02181     return INT2NUM(th->priority);
02182 }
02183 
02184 
02185 /*
02186  *  call-seq:
02187  *     thr.priority= integer   -> thr
02188  *
02189  *  Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
02190  *  will run more frequently than lower-priority threads (but lower-priority
02191  *  threads can also run).
02192  *
02193  *  This is just hint for Ruby thread scheduler.  It may be ignored on some
02194  *  platform.
02195  *
02196  *     count1 = count2 = 0
02197  *     a = Thread.new do
02198  *           loop { count1 += 1 }
02199  *         end
02200  *     a.priority = -1
02201  *
02202  *     b = Thread.new do
02203  *           loop { count2 += 1 }
02204  *         end
02205  *     b.priority = -2
02206  *     sleep 1   #=> 1
02207  *     count1    #=> 622504
02208  *     count2    #=> 5832
02209  */
02210 
02211 static VALUE
02212 rb_thread_priority_set(VALUE thread, VALUE prio)
02213 {
02214     rb_thread_t *th;
02215     int priority;
02216     GetThreadPtr(thread, th);
02217 
02218     rb_secure(4);
02219 
02220 #if USE_NATIVE_THREAD_PRIORITY
02221     th->priority = NUM2INT(prio);
02222     native_thread_apply_priority(th);
02223 #else
02224     priority = NUM2INT(prio);
02225     if (priority > RUBY_THREAD_PRIORITY_MAX) {
02226         priority = RUBY_THREAD_PRIORITY_MAX;
02227     }
02228     else if (priority < RUBY_THREAD_PRIORITY_MIN) {
02229         priority = RUBY_THREAD_PRIORITY_MIN;
02230     }
02231     th->priority = priority;
02232     th->slice = priority;
02233 #endif
02234     return INT2NUM(th->priority);
02235 }
02236 
02237 /* for IO */
02238 
02239 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
02240 
02241 /*
02242  * several Unix platforms support file descriptors bigger than FD_SETSIZE
02243  * in select(2) system call.
02244  *
02245  * - Linux 2.2.12 (?)
02246  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
02247  *   select(2) documents how to allocate fd_set dynamically.
02248  *   http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
02249  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
02250  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
02251  *   select(2) documents how to allocate fd_set dynamically.
02252  *   http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
02253  * - HP-UX documents how to allocate fd_set dynamically.
02254  *   http://docs.hp.com/en/B2355-60105/select.2.html
02255  * - Solaris 8 has select_large_fdset
02256  *
02257  * When fd_set is not big enough to hold big file descriptors,
02258  * it should be allocated dynamically.
02259  * Note that this assumes fd_set is structured as bitmap.
02260  *
02261  * rb_fd_init allocates the memory.
02262  * rb_fd_term free the memory.
02263  * rb_fd_set may re-allocates bitmap.
02264  *
02265  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
02266  */
02267 
02268 void
02269 rb_fd_init(volatile rb_fdset_t *fds)
02270 {
02271     fds->maxfd = 0;
02272     fds->fdset = ALLOC(fd_set);
02273     FD_ZERO(fds->fdset);
02274 }
02275 
02276 void
02277 rb_fd_term(rb_fdset_t *fds)
02278 {
02279     if (fds->fdset) xfree(fds->fdset);
02280     fds->maxfd = 0;
02281     fds->fdset = 0;
02282 }
02283 
02284 void
02285 rb_fd_zero(rb_fdset_t *fds)
02286 {
02287     if (fds->fdset) {
02288         MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
02289         FD_ZERO(fds->fdset);
02290     }
02291 }
02292 
02293 static void
02294 rb_fd_resize(int n, rb_fdset_t *fds)
02295 {
02296     size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
02297     size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
02298 
02299     if (m < sizeof(fd_set)) m = sizeof(fd_set);
02300     if (o < sizeof(fd_set)) o = sizeof(fd_set);
02301 
02302     if (m > o) {
02303         fds->fdset = xrealloc(fds->fdset, m);
02304         memset((char *)fds->fdset + o, 0, m - o);
02305     }
02306     if (n >= fds->maxfd) fds->maxfd = n + 1;
02307 }
02308 
02309 void
02310 rb_fd_set(int n, rb_fdset_t *fds)
02311 {
02312     rb_fd_resize(n, fds);
02313     FD_SET(n, fds->fdset);
02314 }
02315 
02316 void
02317 rb_fd_clr(int n, rb_fdset_t *fds)
02318 {
02319     if (n >= fds->maxfd) return;
02320     FD_CLR(n, fds->fdset);
02321 }
02322 
02323 int
02324 rb_fd_isset(int n, const rb_fdset_t *fds)
02325 {
02326     if (n >= fds->maxfd) return 0;
02327     return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
02328 }
02329 
02330 void
02331 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
02332 {
02333     size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
02334 
02335     if (size < sizeof(fd_set)) size = sizeof(fd_set);
02336     dst->maxfd = max;
02337     dst->fdset = xrealloc(dst->fdset, size);
02338     memcpy(dst->fdset, src, size);
02339 }
02340 
02341 int
02342 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
02343 {
02344     fd_set *r = NULL, *w = NULL, *e = NULL;
02345     if (readfds) {
02346         rb_fd_resize(n - 1, readfds);
02347         r = rb_fd_ptr(readfds);
02348     }
02349     if (writefds) {
02350         rb_fd_resize(n - 1, writefds);
02351         w = rb_fd_ptr(writefds);
02352     }
02353     if (exceptfds) {
02354         rb_fd_resize(n - 1, exceptfds);
02355         e = rb_fd_ptr(exceptfds);
02356     }
02357     return select(n, r, w, e, timeout);
02358 }
02359 
02360 #undef FD_ZERO
02361 #undef FD_SET
02362 #undef FD_CLR
02363 #undef FD_ISSET
02364 
02365 #define FD_ZERO(f)      rb_fd_zero(f)
02366 #define FD_SET(i, f)    rb_fd_set(i, f)
02367 #define FD_CLR(i, f)    rb_fd_clr(i, f)
02368 #define FD_ISSET(i, f)  rb_fd_isset(i, f)
02369 
02370 #elif defined(_WIN32)
02371 
02372 void
02373 rb_fd_init(volatile rb_fdset_t *set)
02374 {
02375     set->capa = FD_SETSIZE;
02376     set->fdset = ALLOC(fd_set);
02377     FD_ZERO(set->fdset);
02378 }
02379 
02380 void
02381 rb_fd_term(rb_fdset_t *set)
02382 {
02383     xfree(set->fdset);
02384     set->fdset = NULL;
02385     set->capa = 0;
02386 }
02387 
02388 void
02389 rb_fd_set(int fd, rb_fdset_t *set)
02390 {
02391     unsigned int i;
02392     SOCKET s = rb_w32_get_osfhandle(fd);
02393 
02394     for (i = 0; i < set->fdset->fd_count; i++) {
02395         if (set->fdset->fd_array[i] == s) {
02396             return;
02397         }
02398     }
02399     if (set->fdset->fd_count >= (unsigned)set->capa) {
02400         set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
02401         set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
02402     }
02403     set->fdset->fd_array[set->fdset->fd_count++] = s;
02404 }
02405 
02406 #undef FD_ZERO
02407 #undef FD_SET
02408 #undef FD_CLR
02409 #undef FD_ISSET
02410 
02411 #define FD_ZERO(f)      rb_fd_zero(f)
02412 #define FD_SET(i, f)    rb_fd_set(i, f)
02413 #define FD_CLR(i, f)    rb_fd_clr(i, f)
02414 #define FD_ISSET(i, f)  rb_fd_isset(i, f)
02415 
02416 #endif
02417 
02418 #if defined(__CYGWIN__) || defined(_WIN32)
02419 static long
02420 cmp_tv(const struct timeval *a, const struct timeval *b)
02421 {
02422     long d = (a->tv_sec - b->tv_sec);
02423     return (d != 0) ? d : (a->tv_usec - b->tv_usec);
02424 }
02425 
02426 static int
02427 subtract_tv(struct timeval *rest, const struct timeval *wait)
02428 {
02429     if (rest->tv_sec < wait->tv_sec) {
02430         return 0;
02431     }
02432     while (rest->tv_usec < wait->tv_usec) {
02433         if (rest->tv_sec <= wait->tv_sec) {
02434             return 0;
02435         }
02436         rest->tv_sec -= 1;
02437         rest->tv_usec += 1000 * 1000;
02438     }
02439     rest->tv_sec -= wait->tv_sec;
02440     rest->tv_usec -= wait->tv_usec;
02441     return rest->tv_sec != 0 || rest->tv_usec != 0;
02442 }
02443 #endif
02444 
02445 static int
02446 do_select(int n, fd_set *read, fd_set *write, fd_set *except,
02447           struct timeval *timeout)
02448 {
02449     int result, lerrno;
02450     fd_set UNINITIALIZED_VAR(orig_read);
02451     fd_set UNINITIALIZED_VAR(orig_write);
02452     fd_set UNINITIALIZED_VAR(orig_except);
02453 
02454 #ifndef linux
02455     double limit = 0;
02456     struct timeval wait_rest;
02457 # if defined(__CYGWIN__) || defined(_WIN32)
02458     struct timeval start_time;
02459 # endif
02460 
02461     if (timeout) {
02462 # if defined(__CYGWIN__) || defined(_WIN32)
02463         gettimeofday(&start_time, NULL);
02464         limit = (double)start_time.tv_sec + (double)start_time.tv_usec*1e-6;
02465 # else
02466         limit = timeofday();
02467 # endif
02468         limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
02469         wait_rest = *timeout;
02470         timeout = &wait_rest;
02471     }
02472 #endif
02473 
02474     if (read) orig_read = *read;
02475     if (write) orig_write = *write;
02476     if (except) orig_except = *except;
02477 
02478   retry:
02479     lerrno = 0;
02480 
02481 #if defined(__CYGWIN__) || defined(_WIN32)
02482     {
02483         int finish = 0;
02484         /* polling duration: 100ms */
02485         struct timeval wait_100ms, *wait;
02486         wait_100ms.tv_sec = 0;
02487         wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
02488 
02489         do {
02490             wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) < 0) ? &wait_100ms : timeout;
02491             BLOCKING_REGION({
02492                 do {
02493                     result = select(n, read, write, except, wait);
02494                     if (result < 0) lerrno = errno;
02495                     if (result != 0) break;
02496 
02497                     if (read) *read = orig_read;
02498                     if (write) *write = orig_write;
02499                     if (except) *except = orig_except;
02500                     if (timeout) {
02501                         struct timeval elapsed;
02502                         gettimeofday(&elapsed, NULL);
02503                         subtract_tv(&elapsed, &start_time);
02504                         gettimeofday(&start_time, NULL);
02505                         if (!subtract_tv(timeout, &elapsed)) {
02506                             finish = 1;
02507                             break;
02508                         }
02509                         if (cmp_tv(&wait_100ms, timeout) > 0) wait = timeout;
02510                     }
02511                 } while (__th->interrupt_flag == 0);
02512             }, 0, 0);
02513         } while (result == 0 && !finish);
02514     }
02515 #else
02516     BLOCKING_REGION({
02517         result = select(n, read, write, except, timeout);
02518         if (result < 0) lerrno = errno;
02519     }, ubf_select, GET_THREAD());
02520 #endif
02521 
02522     errno = lerrno;
02523 
02524     if (result < 0) {
02525         switch (errno) {
02526           case EINTR:
02527 #ifdef ERESTART
02528           case ERESTART:
02529 #endif
02530             if (read) *read = orig_read;
02531             if (write) *write = orig_write;
02532             if (except) *except = orig_except;
02533 #ifndef linux
02534             if (timeout) {
02535                 double d = limit - timeofday();
02536 
02537                 wait_rest.tv_sec = (unsigned int)d;
02538                 wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
02539                 if (wait_rest.tv_sec < 0)  wait_rest.tv_sec = 0;
02540                 if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
02541             }
02542 #endif
02543             goto retry;
02544           default:
02545             break;
02546         }
02547     }
02548     return result;
02549 }
02550 
02551 static void
02552 rb_thread_wait_fd_rw(int fd, int read)
02553 {
02554     int result = 0;
02555     thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
02556 
02557     if (fd < 0) {
02558         rb_raise(rb_eIOError, "closed stream");
02559     }
02560     if (rb_thread_alone()) return;
02561     while (result <= 0) {
02562         rb_fdset_t set;
02563         rb_fd_init(&set);
02564         FD_SET(fd, &set);
02565 
02566         if (read) {
02567             result = do_select(fd + 1, rb_fd_ptr(&set), 0, 0, 0);
02568         }
02569         else {
02570             result = do_select(fd + 1, 0, rb_fd_ptr(&set), 0, 0);
02571         }
02572 
02573         rb_fd_term(&set);
02574 
02575         if (result < 0) {
02576             rb_sys_fail(0);
02577         }
02578     }
02579 
02580     thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
02581 }
02582 
02583 void
02584 rb_thread_wait_fd(int fd)
02585 {
02586     rb_thread_wait_fd_rw(fd, 1);
02587 }
02588 
02589 int
02590 rb_thread_fd_writable(int fd)
02591 {
02592     rb_thread_wait_fd_rw(fd, 0);
02593     return TRUE;
02594 }
02595 
02596 int
02597 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
02598                  struct timeval *timeout)
02599 {
02600     if (!read && !write && !except) {
02601         if (!timeout) {
02602             rb_thread_sleep_forever();
02603             return 0;
02604         }
02605         rb_thread_wait_for(*timeout);
02606         return 0;
02607     }
02608     else {
02609         return do_select(max, read, write, except, timeout);
02610     }
02611 }
02612 
02613 
02614 int
02615 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
02616                     struct timeval *timeout)
02617 {
02618     fd_set *r = NULL, *w = NULL, *e = NULL;
02619 
02620     if (!read && !write && !except) {
02621         if (!timeout) {
02622             rb_thread_sleep_forever();
02623             return 0;
02624         }
02625         rb_thread_wait_for(*timeout);
02626         return 0;
02627     }
02628 
02629     if (read) {
02630         rb_fd_resize(max - 1, read);
02631         r = rb_fd_ptr(read);
02632     }
02633     if (write) {
02634         rb_fd_resize(max - 1, write);
02635         w = rb_fd_ptr(write);
02636     }
02637     if (except) {
02638         rb_fd_resize(max - 1, except);
02639         e = rb_fd_ptr(except);
02640     }
02641     return do_select(max, r, w, e, timeout);
02642 }
02643 
02644 
02645 /*
02646  * for GC
02647  */
02648 
02649 #ifdef USE_CONSERVATIVE_STACK_END
02650 void
02651 rb_gc_set_stack_end(VALUE **stack_end_p)
02652 {
02653     VALUE stack_end;
02654     *stack_end_p = &stack_end;
02655 }
02656 #endif
02657 
02658 void
02659 rb_gc_save_machine_context(rb_thread_t *th)
02660 {
02661     FLUSH_REGISTER_WINDOWS;
02662 #ifdef __ia64
02663     th->machine_register_stack_end = rb_ia64_bsp();
02664 #endif
02665     setjmp(th->machine_regs);
02666 }
02667 
02668 /*
02669  *
02670  */
02671 
02672 int rb_get_next_signal(void);
02673 
02674 void
02675 rb_threadptr_check_signal(rb_thread_t *mth)
02676 {
02677     int sig;
02678 
02679     /* mth must be main_thread */
02680 
02681     if (!mth->exec_signal && (sig = rb_get_next_signal()) > 0) {
02682         enum rb_thread_status prev_status = mth->status;
02683         thread_debug("main_thread: %s, sig: %d\n",
02684                      thread_status_name(prev_status), sig);
02685         mth->exec_signal = sig;
02686         if (mth->status != THREAD_KILLED) mth->status = THREAD_RUNNABLE;
02687         rb_threadptr_interrupt(mth);
02688         mth->status = prev_status;
02689     }
02690 }
02691 
02692 static void
02693 timer_thread_function(void *arg)
02694 {
02695     rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
02696 
02697     /* for time slice */
02698     RUBY_VM_SET_TIMER_INTERRUPT(vm->running_thread);
02699 
02700     /* check signal */
02701     rb_threadptr_check_signal(vm->main_thread);
02702 
02703 #if 0
02704     /* prove profiler */
02705     if (vm->prove_profile.enable) {
02706         rb_thread_t *th = vm->running_thread;
02707 
02708         if (vm->during_gc) {
02709             /* GC prove profiling */
02710         }
02711     }
02712 #endif
02713 }
02714 
02715 void
02716 rb_thread_stop_timer_thread(void)
02717 {
02718     if (timer_thread_id && native_stop_timer_thread()) {
02719         native_reset_timer_thread();
02720     }
02721 }
02722 
02723 void
02724 rb_thread_reset_timer_thread(void)
02725 {
02726     native_reset_timer_thread();
02727 }
02728 
02729 void
02730 rb_thread_start_timer_thread(void)
02731 {
02732     system_working = 1;
02733     rb_thread_create_timer_thread();
02734 }
02735 
02736 static int
02737 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
02738 {
02739     int i;
02740     VALUE lines = (VALUE)val;
02741 
02742     for (i = 0; i < RARRAY_LEN(lines); i++) {
02743         if (RARRAY_PTR(lines)[i] != Qnil) {
02744             RARRAY_PTR(lines)[i] = INT2FIX(0);
02745         }
02746     }
02747     return ST_CONTINUE;
02748 }
02749 
02750 static void
02751 clear_coverage(void)
02752 {
02753     extern VALUE rb_get_coverages(void);
02754     VALUE coverages = rb_get_coverages();
02755     if (RTEST(coverages)) {
02756         st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
02757     }
02758 }
02759 
02760 static void
02761 rb_thread_atfork_internal(int (*atfork)(st_data_t, st_data_t, st_data_t))
02762 {
02763     rb_thread_t *th = GET_THREAD();
02764     rb_vm_t *vm = th->vm;
02765     VALUE thval = th->self;
02766     vm->main_thread = th;
02767 
02768     native_mutex_reinitialize_atfork(&th->vm->global_vm_lock);
02769     st_foreach(vm->living_threads, atfork, (st_data_t)th);
02770     st_clear(vm->living_threads);
02771     st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
02772     vm->sleeper = 0;
02773     clear_coverage();
02774 }
02775 
02776 static int
02777 terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)
02778 {
02779     VALUE thval = key;
02780     rb_thread_t *th;
02781     GetThreadPtr(thval, th);
02782 
02783     if (th != (rb_thread_t *)current_th) {
02784         if (th->keeping_mutexes) {
02785             rb_mutex_abandon_all(th->keeping_mutexes);
02786         }
02787         th->keeping_mutexes = NULL;
02788         thread_cleanup_func(th, TRUE);
02789     }
02790     return ST_CONTINUE;
02791 }
02792 
02793 void
02794 rb_thread_atfork(void)
02795 {
02796     rb_thread_atfork_internal(terminate_atfork_i);
02797     GET_THREAD()->join_list_head = 0;
02798     rb_reset_random_seed();
02799 }
02800 
02801 static int
02802 terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
02803 {
02804     VALUE thval = key;
02805     rb_thread_t *th;
02806     GetThreadPtr(thval, th);
02807 
02808     if (th != (rb_thread_t *)current_th) {
02809         thread_cleanup_func_before_exec(th);
02810     }
02811     return ST_CONTINUE;
02812 }
02813 
02814 void
02815 rb_thread_atfork_before_exec(void)
02816 {
02817     rb_thread_atfork_internal(terminate_atfork_before_exec_i);
02818 }
02819 
02820 struct thgroup {
02821     int enclosed;
02822     VALUE group;
02823 };
02824 
02825 static size_t
02826 thgroup_memsize(const void *ptr)
02827 {
02828     return ptr ? sizeof(struct thgroup) : 0;
02829 }
02830 
02831 static const rb_data_type_t thgroup_data_type = {
02832     "thgroup",
02833     NULL, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,
02834 };
02835 
02836 /*
02837  * Document-class: ThreadGroup
02838  *
02839  *  <code>ThreadGroup</code> provides a means of keeping track of a number of
02840  *  threads as a group. A <code>Thread</code> can belong to only one
02841  *  <code>ThreadGroup</code> at a time; adding a thread to a new group will
02842  *  remove it from any previous group.
02843  *
02844  *  Newly created threads belong to the same group as the thread from which they
02845  *  were created.
02846  */
02847 
02848 static VALUE
02849 thgroup_s_alloc(VALUE klass)
02850 {
02851     VALUE group;
02852     struct thgroup *data;
02853 
02854     group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
02855     data->enclosed = 0;
02856     data->group = group;
02857 
02858     return group;
02859 }
02860 
02861 struct thgroup_list_params {
02862     VALUE ary;
02863     VALUE group;
02864 };
02865 
02866 static int
02867 thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
02868 {
02869     VALUE thread = (VALUE)key;
02870     VALUE ary = ((struct thgroup_list_params *)data)->ary;
02871     VALUE group = ((struct thgroup_list_params *)data)->group;
02872     rb_thread_t *th;
02873     GetThreadPtr(thread, th);
02874 
02875     if (th->thgroup == group) {
02876         rb_ary_push(ary, thread);
02877     }
02878     return ST_CONTINUE;
02879 }
02880 
02881 /*
02882  *  call-seq:
02883  *     thgrp.list   -> array
02884  *
02885  *  Returns an array of all existing <code>Thread</code> objects that belong to
02886  *  this group.
02887  *
02888  *     ThreadGroup::Default.list   #=> [#<Thread:0x401bdf4c run>]
02889  */
02890 
02891 static VALUE
02892 thgroup_list(VALUE group)
02893 {
02894     VALUE ary = rb_ary_new();
02895     struct thgroup_list_params param;
02896 
02897     param.ary = ary;
02898     param.group = group;
02899     st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
02900     return ary;
02901 }
02902 
02903 
02904 /*
02905  *  call-seq:
02906  *     thgrp.enclose   -> thgrp
02907  *
02908  *  Prevents threads from being added to or removed from the receiving
02909  *  <code>ThreadGroup</code>. New threads can still be started in an enclosed
02910  *  <code>ThreadGroup</code>.
02911  *
02912  *     ThreadGroup::Default.enclose        #=> #<ThreadGroup:0x4029d914>
02913  *     thr = Thread::new { Thread.stop }   #=> #<Thread:0x402a7210 sleep>
02914  *     tg = ThreadGroup::new               #=> #<ThreadGroup:0x402752d4>
02915  *     tg.add thr
02916  *
02917  *  <em>produces:</em>
02918  *
02919  *     ThreadError: can't move from the enclosed thread group
02920  */
02921 
02922 static VALUE
02923 thgroup_enclose(VALUE group)
02924 {
02925     struct thgroup *data;
02926 
02927     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
02928     data->enclosed = 1;
02929 
02930     return group;
02931 }
02932 
02933 
02934 /*
02935  *  call-seq:
02936  *     thgrp.enclosed?   -> true or false
02937  *
02938  *  Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
02939  *  ThreadGroup#enclose.
02940  */
02941 
02942 static VALUE
02943 thgroup_enclosed_p(VALUE group)
02944 {
02945     struct thgroup *data;
02946 
02947     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
02948     if (data->enclosed)
02949         return Qtrue;
02950     return Qfalse;
02951 }
02952 
02953 
02954 /*
02955  *  call-seq:
02956  *     thgrp.add(thread)   -> thgrp
02957  *
02958  *  Adds the given <em>thread</em> to this group, removing it from any other
02959  *  group to which it may have previously belonged.
02960  *
02961  *     puts "Initial group is #{ThreadGroup::Default.list}"
02962  *     tg = ThreadGroup.new
02963  *     t1 = Thread.new { sleep }
02964  *     t2 = Thread.new { sleep }
02965  *     puts "t1 is #{t1}"
02966  *     puts "t2 is #{t2}"
02967  *     tg.add(t1)
02968  *     puts "Initial group now #{ThreadGroup::Default.list}"
02969  *     puts "tg group now #{tg.list}"
02970  *
02971  *  <em>produces:</em>
02972  *
02973  *     Initial group is #<Thread:0x401bdf4c>
02974  *     t1 is #<Thread:0x401b3c90>
02975  *     t2 is #<Thread:0x401b3c18>
02976  *     Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
02977  *     tg group now #<Thread:0x401b3c90>
02978  */
02979 
02980 static VALUE
02981 thgroup_add(VALUE group, VALUE thread)
02982 {
02983     rb_thread_t *th;
02984     struct thgroup *data;
02985 
02986     rb_secure(4);
02987     GetThreadPtr(thread, th);
02988 
02989     if (OBJ_FROZEN(group)) {
02990         rb_raise(rb_eThreadError, "can't move to the frozen thread group");
02991     }
02992     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
02993     if (data->enclosed) {
02994         rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
02995     }
02996 
02997     if (!th->thgroup) {
02998         return Qnil;
02999     }
03000 
03001     if (OBJ_FROZEN(th->thgroup)) {
03002         rb_raise(rb_eThreadError, "can't move from the frozen thread group");
03003     }
03004     TypedData_Get_Struct(th->thgroup, struct thgroup, &thgroup_data_type, data);
03005     if (data->enclosed) {
03006         rb_raise(rb_eThreadError,
03007                  "can't move from the enclosed thread group");
03008     }
03009 
03010     th->thgroup = group;
03011     return group;
03012 }
03013 
03014 
03015 /*
03016  *  Document-class: Mutex
03017  *
03018  *  Mutex implements a simple semaphore that can be used to coordinate access to
03019  *  shared data from multiple concurrent threads.
03020  *
03021  *  Example:
03022  *
03023  *    require 'thread'
03024  *    semaphore = Mutex.new
03025  *
03026  *    a = Thread.new {
03027  *      semaphore.synchronize {
03028  *        # access shared resource
03029  *      }
03030  *    }
03031  *
03032  *    b = Thread.new {
03033  *      semaphore.synchronize {
03034  *        # access shared resource
03035  *      }
03036  *    }
03037  *
03038  */
03039 
03040 #define GetMutexPtr(obj, tobj) \
03041     TypedData_Get_Struct(obj, mutex_t, &mutex_data_type, tobj)
03042 
03043 static const char *mutex_unlock(mutex_t *mutex, rb_thread_t volatile *th);
03044 
03045 #define mutex_mark NULL
03046 
03047 static void
03048 mutex_free(void *ptr)
03049 {
03050     if (ptr) {
03051         mutex_t *mutex = ptr;
03052         if (mutex->th) {
03053             /* rb_warn("free locked mutex"); */
03054             const char *err = mutex_unlock(mutex, mutex->th);
03055             if (err) rb_bug("%s", err);
03056         }
03057         native_mutex_destroy(&mutex->lock);
03058         native_cond_destroy(&mutex->cond);
03059     }
03060     ruby_xfree(ptr);
03061 }
03062 
03063 static size_t
03064 mutex_memsize(const void *ptr)
03065 {
03066     return ptr ? sizeof(mutex_t) : 0;
03067 }
03068 
03069 static const rb_data_type_t mutex_data_type = {
03070     "mutex",
03071     mutex_mark, mutex_free, mutex_memsize,
03072 };
03073 
03074 static VALUE
03075 mutex_alloc(VALUE klass)
03076 {
03077     VALUE volatile obj;
03078     mutex_t *mutex;
03079 
03080     obj = TypedData_Make_Struct(klass, mutex_t, &mutex_data_type, mutex);
03081     native_mutex_initialize(&mutex->lock);
03082     native_cond_initialize(&mutex->cond);
03083     return obj;
03084 }
03085 
03086 /*
03087  *  call-seq:
03088  *     Mutex.new   -> mutex
03089  *
03090  *  Creates a new Mutex
03091  */
03092 static VALUE
03093 mutex_initialize(VALUE self)
03094 {
03095     return self;
03096 }
03097 
03098 VALUE
03099 rb_mutex_new(void)
03100 {
03101     return mutex_alloc(rb_cMutex);
03102 }
03103 
03104 /*
03105  * call-seq:
03106  *    mutex.locked?  -> true or false
03107  *
03108  * Returns +true+ if this lock is currently held by some thread.
03109  */
03110 VALUE
03111 rb_mutex_locked_p(VALUE self)
03112 {
03113     mutex_t *mutex;
03114     GetMutexPtr(self, mutex);
03115     return mutex->th ? Qtrue : Qfalse;
03116 }
03117 
03118 static void
03119 mutex_locked(rb_thread_t *th, VALUE self)
03120 {
03121     mutex_t *mutex;
03122     GetMutexPtr(self, mutex);
03123 
03124     if (th->keeping_mutexes) {
03125         mutex->next_mutex = th->keeping_mutexes;
03126     }
03127     th->keeping_mutexes = mutex;
03128 }
03129 
03130 /*
03131  * call-seq:
03132  *    mutex.try_lock  -> true or false
03133  *
03134  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
03135  * lock was granted.
03136  */
03137 VALUE
03138 rb_mutex_trylock(VALUE self)
03139 {
03140     mutex_t *mutex;
03141     VALUE locked = Qfalse;
03142     GetMutexPtr(self, mutex);
03143 
03144     native_mutex_lock(&mutex->lock);
03145     if (mutex->th == 0) {
03146         mutex->th = GET_THREAD();
03147         locked = Qtrue;
03148 
03149         mutex_locked(GET_THREAD(), self);
03150     }
03151     native_mutex_unlock(&mutex->lock);
03152 
03153     return locked;
03154 }
03155 
03156 static int
03157 lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
03158 {
03159     int interrupted = 0;
03160 #if 0 /* for debug */
03161     native_thread_yield();
03162 #endif
03163 
03164     native_mutex_lock(&mutex->lock);
03165     th->transition_for_lock = 0;
03166     while (mutex->th || (mutex->th = th, 0)) {
03167         if (last_thread) {
03168             interrupted = 2;
03169             break;
03170         }
03171 
03172         mutex->cond_waiting++;
03173         native_cond_wait(&mutex->cond, &mutex->lock);
03174         mutex->cond_notified--;
03175 
03176         if (RUBY_VM_INTERRUPTED(th)) {
03177             interrupted = 1;
03178             break;
03179         }
03180     }
03181     th->transition_for_lock = 1;
03182     native_mutex_unlock(&mutex->lock);
03183 
03184     if (interrupted == 2) native_thread_yield();
03185 #if 0 /* for debug */
03186     native_thread_yield();
03187 #endif
03188 
03189     return interrupted;
03190 }
03191 
03192 static void
03193 lock_interrupt(void *ptr)
03194 {
03195     mutex_t *mutex = (mutex_t *)ptr;
03196     native_mutex_lock(&mutex->lock);
03197     if (mutex->cond_waiting > 0) {
03198         native_cond_broadcast(&mutex->cond);
03199         mutex->cond_notified = mutex->cond_waiting;
03200         mutex->cond_waiting = 0;
03201     }
03202     native_mutex_unlock(&mutex->lock);
03203 }
03204 
03205 /*
03206  * call-seq:
03207  *    mutex.lock  -> self
03208  *
03209  * Attempts to grab the lock and waits if it isn't available.
03210  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
03211  */
03212 VALUE
03213 rb_mutex_lock(VALUE self)
03214 {
03215 
03216     if (rb_mutex_trylock(self) == Qfalse) {
03217         mutex_t *mutex;
03218         rb_thread_t *th = GET_THREAD();
03219         GetMutexPtr(self, mutex);
03220 
03221         if (mutex->th == GET_THREAD()) {
03222             rb_raise(rb_eThreadError, "deadlock; recursive locking");
03223         }
03224 
03225         while (mutex->th != th) {
03226             int interrupted;
03227             enum rb_thread_status prev_status = th->status;
03228             int last_thread = 0;
03229             struct rb_unblock_callback oldubf;
03230 
03231             set_unblock_function(th, lock_interrupt, mutex, &oldubf);
03232             th->status = THREAD_STOPPED_FOREVER;
03233             th->vm->sleeper++;
03234             th->locking_mutex = self;
03235             if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
03236                 last_thread = 1;
03237             }
03238 
03239             th->transition_for_lock = 1;
03240             BLOCKING_REGION_CORE({
03241                 interrupted = lock_func(th, mutex, last_thread);
03242             });
03243             th->transition_for_lock = 0;
03244             remove_signal_thread_list(th);
03245             reset_unblock_function(th, &oldubf);
03246 
03247             th->locking_mutex = Qfalse;
03248             if (mutex->th && interrupted == 2) {
03249                 rb_check_deadlock(th->vm);
03250             }
03251             if (th->status == THREAD_STOPPED_FOREVER) {
03252                 th->status = prev_status;
03253             }
03254             th->vm->sleeper--;
03255 
03256             if (mutex->th == th) mutex_locked(th, self);
03257 
03258             if (interrupted) {
03259                 RUBY_VM_CHECK_INTS();
03260             }
03261         }
03262     }
03263     return self;
03264 }
03265 
03266 static const char *
03267 mutex_unlock(mutex_t *mutex, rb_thread_t volatile *th)
03268 {
03269     const char *err = NULL;
03270     mutex_t *th_mutex;
03271 
03272     native_mutex_lock(&mutex->lock);
03273 
03274     if (mutex->th == 0) {
03275         err = "Attempt to unlock a mutex which is not locked";
03276     }
03277     else if (mutex->th != th) {
03278         err = "Attempt to unlock a mutex which is locked by another thread";
03279     }
03280     else {
03281         mutex->th = 0;
03282         if (mutex->cond_waiting > 0) {
03283             /* waiting thread */
03284             native_cond_signal(&mutex->cond);
03285             mutex->cond_waiting--;
03286             mutex->cond_notified++;
03287         }
03288     }
03289 
03290     native_mutex_unlock(&mutex->lock);
03291 
03292     if (!err) {
03293         th_mutex = th->keeping_mutexes;
03294         if (th_mutex == mutex) {
03295             th->keeping_mutexes = mutex->next_mutex;
03296         }
03297         else {
03298             while (1) {
03299                 mutex_t *tmp_mutex;
03300                 tmp_mutex = th_mutex->next_mutex;
03301                 if (tmp_mutex == mutex) {
03302                     th_mutex->next_mutex = tmp_mutex->next_mutex;
03303                     break;
03304                 }
03305                 th_mutex = tmp_mutex;
03306             }
03307         }
03308         mutex->next_mutex = NULL;
03309     }
03310 
03311     return err;
03312 }
03313 
03314 /*
03315  * call-seq:
03316  *    mutex.unlock    -> self
03317  *
03318  * Releases the lock.
03319  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
03320  */
03321 VALUE
03322 rb_mutex_unlock(VALUE self)
03323 {
03324     const char *err;
03325     mutex_t *mutex;
03326     GetMutexPtr(self, mutex);
03327 
03328     err = mutex_unlock(mutex, GET_THREAD());
03329     if (err) rb_raise(rb_eThreadError, "%s", err);
03330 
03331     return self;
03332 }
03333 
03334 static void
03335 rb_mutex_unlock_all(mutex_t *mutexes, rb_thread_t *th)
03336 {
03337     const char *err;
03338     mutex_t *mutex;
03339 
03340     while (mutexes) {
03341         mutex = mutexes;
03342         /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
03343                 mutexes); */
03344         mutexes = mutex->next_mutex;
03345         err = mutex_unlock(mutex, th);
03346         if (err) rb_bug("invalid keeping_mutexes: %s", err);
03347     }
03348 }
03349 
03350 static void
03351 rb_mutex_abandon_all(mutex_t *mutexes)
03352 {
03353     mutex_t *mutex;
03354 
03355     while (mutexes) {
03356         mutex = mutexes;
03357         mutexes = mutex->next_mutex;
03358         mutex->th = 0;
03359         mutex->next_mutex = 0;
03360     }
03361 }
03362 
03363 static VALUE
03364 rb_mutex_sleep_forever(VALUE time)
03365 {
03366     rb_thread_sleep_deadly();
03367     return Qnil;
03368 }
03369 
03370 static VALUE
03371 rb_mutex_wait_for(VALUE time)
03372 {
03373     const struct timeval *t = (struct timeval *)time;
03374     rb_thread_wait_for(*t);
03375     return Qnil;
03376 }
03377 
03378 VALUE
03379 rb_mutex_sleep(VALUE self, VALUE timeout)
03380 {
03381     time_t beg, end;
03382     struct timeval t;
03383 
03384     if (!NIL_P(timeout)) {
03385         t = rb_time_interval(timeout);
03386     }
03387     rb_mutex_unlock(self);
03388     beg = time(0);
03389     if (NIL_P(timeout)) {
03390         rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
03391     }
03392     else {
03393         rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
03394     }
03395     end = time(0) - beg;
03396     return INT2FIX(end);
03397 }
03398 
03399 /*
03400  * call-seq:
03401  *    mutex.sleep(timeout = nil)    -> number
03402  *
03403  * Releases the lock and sleeps +timeout+ seconds if it is given and
03404  * non-nil or forever.  Raises +ThreadError+ if +mutex+ wasn't locked by
03405  * the current thread.
03406  */
03407 static VALUE
03408 mutex_sleep(int argc, VALUE *argv, VALUE self)
03409 {
03410     VALUE timeout;
03411 
03412     rb_scan_args(argc, argv, "01", &timeout);
03413     return rb_mutex_sleep(self, timeout);
03414 }
03415 
03416 /*
03417  * call-seq:
03418  *    mutex.synchronize { ... }    -> result of the block
03419  *
03420  * Obtains a lock, runs the block, and releases the lock when the block
03421  * completes.  See the example under +Mutex+.
03422  */
03423 
03424 VALUE
03425 rb_mutex_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
03426 {
03427     rb_mutex_lock(mutex);
03428     return rb_ensure(func, arg, rb_mutex_unlock, mutex);
03429 }
03430 
03431 /*
03432  * Document-class: Barrier
03433  */
03434 static void
03435 barrier_mark(void *ptr)
03436 {
03437     rb_gc_mark((VALUE)ptr);
03438 }
03439 
03440 static const rb_data_type_t barrier_data_type = {
03441     "barrier",
03442     barrier_mark, 0, 0,
03443 };
03444 
03445 static VALUE
03446 barrier_alloc(VALUE klass)
03447 {
03448     return TypedData_Wrap_Struct(klass, &barrier_data_type, (void *)mutex_alloc(0));
03449 }
03450 
03451 #define GetBarrierPtr(obj) (VALUE)rb_check_typeddata(obj, &barrier_data_type)
03452 
03453 VALUE
03454 rb_barrier_new(void)
03455 {
03456     VALUE barrier = barrier_alloc(rb_cBarrier);
03457     rb_mutex_lock((VALUE)DATA_PTR(barrier));
03458     return barrier;
03459 }
03460 
03461 VALUE
03462 rb_barrier_wait(VALUE self)
03463 {
03464     VALUE mutex = GetBarrierPtr(self);
03465     mutex_t *m;
03466 
03467     if (!mutex) return Qfalse;
03468     GetMutexPtr(mutex, m);
03469     if (m->th == GET_THREAD()) return Qfalse;
03470     rb_mutex_lock(mutex);
03471     if (DATA_PTR(self)) return Qtrue;
03472     rb_mutex_unlock(mutex);
03473     return Qfalse;
03474 }
03475 
03476 VALUE
03477 rb_barrier_release(VALUE self)
03478 {
03479     return rb_mutex_unlock(GetBarrierPtr(self));
03480 }
03481 
03482 VALUE
03483 rb_barrier_destroy(VALUE self)
03484 {
03485     VALUE mutex = GetBarrierPtr(self);
03486     DATA_PTR(self) = 0;
03487     return rb_mutex_unlock(mutex);
03488 }
03489 
03490 /* variables for recursive traversals */
03491 static ID recursive_key;
03492 
03493 /*
03494  * Returns the current "recursive list" used to detect recursion.
03495  * This list is a hash table, unique for the current thread and for
03496  * the current __callee__.
03497  */
03498 
03499 static VALUE
03500 recursive_list_access(void)
03501 {
03502     volatile VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
03503     VALUE sym = ID2SYM(rb_frame_this_func());
03504     VALUE list;
03505     if (NIL_P(hash) || TYPE(hash) != T_HASH) {
03506         hash = rb_hash_new();
03507         OBJ_UNTRUST(hash);
03508         rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
03509         list = Qnil;
03510     }
03511     else {
03512         list = rb_hash_aref(hash, sym);
03513     }
03514     if (NIL_P(list) || TYPE(list) != T_HASH) {
03515         list = rb_hash_new();
03516         OBJ_UNTRUST(list);
03517         rb_hash_aset(hash, sym, list);
03518     }
03519     return list;
03520 }
03521 
03522 /*
03523  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
03524  * in the recursion list.
03525  * Assumes the recursion list is valid.
03526  */
03527 
03528 static VALUE
03529 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
03530 {
03531     VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
03532     if (pair_list == Qundef)
03533         return Qfalse;
03534     if (paired_obj_id) {
03535         if (TYPE(pair_list) != T_HASH) {
03536         if (pair_list != paired_obj_id)
03537             return Qfalse;
03538         }
03539         else {
03540         if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
03541             return Qfalse;
03542         }
03543     }
03544     return Qtrue;
03545 }
03546 
03547 /*
03548  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
03549  * For a single obj_id, it sets list[obj_id] to Qtrue.
03550  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
03551  * otherwise list[obj_id] becomes a hash like:
03552  *   {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
03553  * Assumes the recursion list is valid.
03554  */
03555 
03556 static void
03557 recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
03558 {
03559     VALUE pair_list;
03560 
03561     if (!paired_obj) {
03562         rb_hash_aset(list, obj, Qtrue);
03563     }
03564     else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
03565         rb_hash_aset(list, obj, paired_obj);
03566     }
03567     else {
03568         if (TYPE(pair_list) != T_HASH){
03569             VALUE other_paired_obj = pair_list;
03570             pair_list = rb_hash_new();
03571             OBJ_UNTRUST(pair_list);
03572             rb_hash_aset(pair_list, other_paired_obj, Qtrue);
03573             rb_hash_aset(list, obj, pair_list);
03574         }
03575         rb_hash_aset(pair_list, paired_obj, Qtrue);
03576     }
03577 }
03578 
03579 /*
03580  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
03581  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
03582  * removed from the hash and no attempt is made to simplify
03583  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
03584  * Assumes the recursion list is valid.
03585  */
03586 
03587 static void
03588 recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
03589 {
03590     if (paired_obj) {
03591         VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
03592         if (pair_list == Qundef) {
03593             VALUE symname = rb_inspect(ID2SYM(rb_frame_this_func()));
03594             VALUE thrname = rb_inspect(rb_thread_current());
03595             rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list for %s in %s",
03596                      StringValuePtr(symname), StringValuePtr(thrname));
03597         }
03598         if (TYPE(pair_list) == T_HASH) {
03599             rb_hash_delete(pair_list, paired_obj);
03600             if (!RHASH_EMPTY_P(pair_list)) {
03601                 return; /* keep hash until is empty */
03602             }
03603         }
03604     }
03605     rb_hash_delete(list, obj);
03606 }
03607 
03608 struct exec_recursive_params {
03609     VALUE (*func) (VALUE, VALUE, int);
03610     VALUE list;
03611     VALUE obj;
03612     VALUE objid;
03613     VALUE pairid;
03614     VALUE arg;
03615 };
03616 
03617 static VALUE
03618 exec_recursive_i(VALUE tag, struct exec_recursive_params *p)
03619 {
03620     VALUE result = Qundef;
03621     int state;
03622 
03623     recursive_push(p->list, p->objid, p->pairid);
03624     PUSH_TAG();
03625     if ((state = EXEC_TAG()) == 0) {
03626         result = (*p->func)(p->obj, p->arg, FALSE);
03627     }
03628     POP_TAG();
03629     recursive_pop(p->list, p->objid, p->pairid);
03630     if (state)
03631         JUMP_TAG(state);
03632     return result;
03633 }
03634 
03635 /*
03636  * Calls func(obj, arg, recursive), where recursive is non-zero if the
03637  * current method is called recursively on obj, or on the pair <obj, pairid>
03638  * If outer is 0, then the innermost func will be called with recursive set
03639  * to Qtrue, otherwise the outermost func will be called. In the latter case,
03640  * all inner func are short-circuited by throw.
03641  * Implementation details: the value thrown is the recursive list which is
03642  * proper to the current method and unlikely to be catched anywhere else.
03643  * list[recursive_key] is used as a flag for the outermost call.
03644  */
03645 
03646 static VALUE
03647 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
03648 {
03649     VALUE result = Qundef;
03650     struct exec_recursive_params p;
03651     int outermost;
03652     p.list = recursive_list_access();
03653     p.objid = rb_obj_id(obj);
03654     p.obj = obj;
03655     p.pairid = pairid;
03656     p.arg = arg;
03657     outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
03658 
03659     if (recursive_check(p.list, p.objid, pairid)) {
03660         if (outer && !outermost) {
03661             rb_throw_obj(p.list, p.list);
03662         }
03663         return (*func)(obj, arg, TRUE);
03664     }
03665     else {
03666         p.func = func;
03667 
03668         if (outermost) {
03669             recursive_push(p.list, ID2SYM(recursive_key), 0);
03670             result = rb_catch_obj(p.list, exec_recursive_i, (VALUE)&p);
03671             recursive_pop(p.list, ID2SYM(recursive_key), 0);
03672             if (result == p.list) {
03673                 result = (*func)(obj, arg, TRUE);
03674             }
03675         }
03676         else {
03677             result = exec_recursive_i(0, &p);
03678         }
03679     }
03680     *(volatile struct exec_recursive_params *)&p;
03681     return result;
03682 }
03683 
03684 /*
03685  * Calls func(obj, arg, recursive), where recursive is non-zero if the
03686  * current method is called recursively on obj
03687  */
03688 
03689 VALUE
03690 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
03691 {
03692     return exec_recursive(func, obj, 0, arg, 0);
03693 }
03694 
03695 /*
03696  * Calls func(obj, arg, recursive), where recursive is non-zero if the
03697  * current method is called recursively on the ordered pair <obj, paired_obj>
03698  */
03699 
03700 VALUE
03701 rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
03702 {
03703     return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
03704 }
03705 
03706 /*
03707  * If recursion is detected on the current method and obj, the outermost
03708  * func will be called with (obj, arg, Qtrue). All inner func will be
03709  * short-circuited using throw.
03710  */
03711 
03712 VALUE
03713 rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
03714 {
03715     return exec_recursive(func, obj, 0, arg, 1);
03716 }
03717 
03718 /* tracer */
03719 
03720 static rb_event_hook_t *
03721 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03722 {
03723     rb_event_hook_t *hook = ALLOC(rb_event_hook_t);
03724     hook->func = func;
03725     hook->flag = events;
03726     hook->data = data;
03727     return hook;
03728 }
03729 
03730 static void
03731 thread_reset_event_flags(rb_thread_t *th)
03732 {
03733     rb_event_hook_t *hook = th->event_hooks;
03734     rb_event_flag_t flag = th->event_flags & RUBY_EVENT_VM;
03735 
03736     while (hook) {
03737         flag |= hook->flag;
03738         hook = hook->next;
03739     }
03740     th->event_flags = flag;
03741 }
03742 
03743 static void
03744 rb_threadptr_add_event_hook(rb_thread_t *th,
03745                          rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03746 {
03747     rb_event_hook_t *hook = alloc_event_hook(func, events, data);
03748     hook->next = th->event_hooks;
03749     th->event_hooks = hook;
03750     thread_reset_event_flags(th);
03751 }
03752 
03753 static rb_thread_t *
03754 thval2thread_t(VALUE thval)
03755 {
03756     rb_thread_t *th;
03757     GetThreadPtr(thval, th);
03758     return th;
03759 }
03760 
03761 void
03762 rb_thread_add_event_hook(VALUE thval,
03763                          rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03764 {
03765     rb_threadptr_add_event_hook(thval2thread_t(thval), func, events, data);
03766 }
03767 
03768 static int
03769 set_threads_event_flags_i(st_data_t key, st_data_t val, st_data_t flag)
03770 {
03771     VALUE thval = key;
03772     rb_thread_t *th;
03773     GetThreadPtr(thval, th);
03774 
03775     if (flag) {
03776         th->event_flags |= RUBY_EVENT_VM;
03777     }
03778     else {
03779         th->event_flags &= (~RUBY_EVENT_VM);
03780     }
03781     return ST_CONTINUE;
03782 }
03783 
03784 static void
03785 set_threads_event_flags(int flag)
03786 {
03787     st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
03788 }
03789 
03790 static inline void
03791 exec_event_hooks(const rb_event_hook_t *hook, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
03792 {
03793     for (; hook; hook = hook->next) {
03794         if (flag & hook->flag) {
03795             (*hook->func)(flag, hook->data, self, id, klass);
03796         }
03797     }
03798 }
03799 
03800 void
03801 rb_threadptr_exec_event_hooks(rb_thread_t *th, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
03802 {
03803     const VALUE errinfo = th->errinfo;
03804     const rb_event_flag_t wait_event = th->event_flags;
03805 
03806     if (self == rb_mRubyVMFrozenCore) return;
03807     if (wait_event & flag) {
03808         exec_event_hooks(th->event_hooks, flag, self, id, klass);
03809     }
03810     if (wait_event & RUBY_EVENT_VM) {
03811         if (th->vm->event_hooks == NULL) {
03812             th->event_flags &= (~RUBY_EVENT_VM);
03813         }
03814         else {
03815             exec_event_hooks(th->vm->event_hooks, flag, self, id, klass);
03816         }
03817     }
03818     th->errinfo = errinfo;
03819 }
03820 
03821 void
03822 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03823 {
03824     rb_event_hook_t *hook = alloc_event_hook(func, events, data);
03825     rb_vm_t *vm = GET_VM();
03826 
03827     hook->next = vm->event_hooks;
03828     vm->event_hooks = hook;
03829 
03830     set_threads_event_flags(1);
03831 }
03832 
03833 static int
03834 remove_event_hook(rb_event_hook_t **root, rb_event_hook_func_t func)
03835 {
03836     rb_event_hook_t *prev = NULL, *hook = *root, *next;
03837 
03838     while (hook) {
03839         next = hook->next;
03840         if (func == 0 || hook->func == func) {
03841             if (prev) {
03842                 prev->next = hook->next;
03843             }
03844             else {
03845                 *root = hook->next;
03846             }
03847             xfree(hook);
03848         }
03849         else {
03850             prev = hook;
03851         }
03852         hook = next;
03853     }
03854     return -1;
03855 }
03856 
03857 static int
03858 rb_threadptr_revmove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
03859 {
03860     int ret = remove_event_hook(&th->event_hooks, func);
03861     thread_reset_event_flags(th);
03862     return ret;
03863 }
03864 
03865 int
03866 rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
03867 {
03868     return rb_threadptr_revmove_event_hook(thval2thread_t(thval), func);
03869 }
03870 
03871 int
03872 rb_remove_event_hook(rb_event_hook_func_t func)
03873 {
03874     rb_vm_t *vm = GET_VM();
03875     rb_event_hook_t *hook = vm->event_hooks;
03876     int ret = remove_event_hook(&vm->event_hooks, func);
03877 
03878     if (hook != NULL && vm->event_hooks == NULL) {
03879         set_threads_event_flags(0);
03880     }
03881 
03882     return ret;
03883 }
03884 
03885 static int
03886 clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag)
03887 {
03888     rb_thread_t *th;
03889     GetThreadPtr((VALUE)key, th);
03890     rb_threadptr_revmove_event_hook(th, 0);
03891     return ST_CONTINUE;
03892 }
03893 
03894 void
03895 rb_clear_trace_func(void)
03896 {
03897     st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
03898     rb_remove_event_hook(0);
03899 }
03900 
03901 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
03902 
03903 /*
03904  *  call-seq:
03905  *     set_trace_func(proc)    -> proc
03906  *     set_trace_func(nil)     -> nil
03907  *
03908  *  Establishes _proc_ as the handler for tracing, or disables
03909  *  tracing if the parameter is +nil+. _proc_ takes up
03910  *  to six parameters: an event name, a filename, a line number, an
03911  *  object id, a binding, and the name of a class. _proc_ is
03912  *  invoked whenever an event occurs. Events are: <code>c-call</code>
03913  *  (call a C-language routine), <code>c-return</code> (return from a
03914  *  C-language routine), <code>call</code> (call a Ruby method),
03915  *  <code>class</code> (start a class or module definition),
03916  *  <code>end</code> (finish a class or module definition),
03917  *  <code>line</code> (execute code on a new line), <code>raise</code>
03918  *  (raise an exception), and <code>return</code> (return from a Ruby
03919  *  method). Tracing is disabled within the context of _proc_.
03920  *
03921  *      class Test
03922  *      def test
03923  *        a = 1
03924  *        b = 2
03925  *      end
03926  *      end
03927  *
03928  *      set_trace_func proc { |event, file, line, id, binding, classname|
03929  *         printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
03930  *      }
03931  *      t = Test.new
03932  *      t.test
03933  *
03934  *        line prog.rb:11               false
03935  *      c-call prog.rb:11        new    Class
03936  *      c-call prog.rb:11 initialize   Object
03937  *    c-return prog.rb:11 initialize   Object
03938  *    c-return prog.rb:11        new    Class
03939  *        line prog.rb:12               false
03940  *        call prog.rb:2        test     Test
03941  *        line prog.rb:3        test     Test
03942  *        line prog.rb:4        test     Test
03943  *      return prog.rb:4        test     Test
03944  */
03945 
03946 static VALUE
03947 set_trace_func(VALUE obj, VALUE trace)
03948 {
03949     rb_remove_event_hook(call_trace_func);
03950 
03951     if (NIL_P(trace)) {
03952         return Qnil;
03953     }
03954 
03955     if (!rb_obj_is_proc(trace)) {
03956         rb_raise(rb_eTypeError, "trace_func needs to be Proc");
03957     }
03958 
03959     rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
03960     return trace;
03961 }
03962 
03963 static void
03964 thread_add_trace_func(rb_thread_t *th, VALUE trace)
03965 {
03966     if (!rb_obj_is_proc(trace)) {
03967         rb_raise(rb_eTypeError, "trace_func needs to be Proc");
03968     }
03969 
03970     rb_threadptr_add_event_hook(th, call_trace_func, RUBY_EVENT_ALL, trace);
03971 }
03972 
03973 /*
03974  *  call-seq:
03975  *     thr.add_trace_func(proc)    -> proc
03976  *
03977  *  Adds _proc_ as a handler for tracing.
03978  *  See <code>Thread#set_trace_func</code> and +set_trace_func+.
03979  */
03980 
03981 static VALUE
03982 thread_add_trace_func_m(VALUE obj, VALUE trace)
03983 {
03984     rb_thread_t *th;
03985     GetThreadPtr(obj, th);
03986     thread_add_trace_func(th, trace);
03987     return trace;
03988 }
03989 
03990 /*
03991  *  call-seq:
03992  *     thr.set_trace_func(proc)    -> proc
03993  *     thr.set_trace_func(nil)     -> nil
03994  *
03995  *  Establishes _proc_ on _thr_ as the handler for tracing, or
03996  *  disables tracing if the parameter is +nil+.
03997  *  See +set_trace_func+.
03998  */
03999 
04000 static VALUE
04001 thread_set_trace_func_m(VALUE obj, VALUE trace)
04002 {
04003     rb_thread_t *th;
04004     GetThreadPtr(obj, th);
04005     rb_threadptr_revmove_event_hook(th, call_trace_func);
04006 
04007     if (NIL_P(trace)) {
04008         return Qnil;
04009     }
04010     thread_add_trace_func(th, trace);
04011     return trace;
04012 }
04013 
04014 static const char *
04015 get_event_name(rb_event_flag_t event)
04016 {
04017     switch (event) {
04018       case RUBY_EVENT_LINE:
04019         return "line";
04020       case RUBY_EVENT_CLASS:
04021         return "class";
04022       case RUBY_EVENT_END:
04023         return "end";
04024       case RUBY_EVENT_CALL:
04025         return "call";
04026       case RUBY_EVENT_RETURN:
04027         return "return";
04028       case RUBY_EVENT_C_CALL:
04029         return "c-call";
04030       case RUBY_EVENT_C_RETURN:
04031         return "c-return";
04032       case RUBY_EVENT_RAISE:
04033         return "raise";
04034       default:
04035         return "unknown";
04036     }
04037 }
04038 
04039 VALUE ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always);
04040 
04041 struct call_trace_func_args {
04042     rb_event_flag_t event;
04043     VALUE proc;
04044     VALUE self;
04045     ID id;
04046     VALUE klass;
04047 };
04048 
04049 static VALUE
04050 call_trace_proc(VALUE args, int tracing)
04051 {
04052     struct call_trace_func_args *p = (struct call_trace_func_args *)args;
04053     const char *srcfile = rb_sourcefile();
04054     VALUE eventname = rb_str_new2(get_event_name(p->event));
04055     VALUE filename = srcfile ? rb_str_new2(srcfile) : Qnil;
04056     VALUE argv[6];
04057     int line = rb_sourceline();
04058     ID id = 0;
04059     VALUE klass = 0;
04060 
04061     if (p->event == RUBY_EVENT_C_CALL ||
04062         p->event == RUBY_EVENT_C_RETURN) {
04063         id = p->id;
04064         klass = p->klass;
04065     }
04066     else {
04067         rb_thread_method_id_and_class(GET_THREAD(), &id, &klass);
04068     }
04069     if (id == ID_ALLOCATOR)
04070       return Qnil;
04071     if (klass) {
04072         if (TYPE(klass) == T_ICLASS) {
04073             klass = RBASIC(klass)->klass;
04074         }
04075         else if (FL_TEST(klass, FL_SINGLETON)) {
04076             klass = rb_iv_get(klass, "__attached__");
04077         }
04078     }
04079 
04080     argv[0] = eventname;
04081     argv[1] = filename;
04082     argv[2] = INT2FIX(line);
04083     argv[3] = id ? ID2SYM(id) : Qnil;
04084     argv[4] = (p->self && srcfile) ? rb_binding_new() : Qnil;
04085     argv[5] = klass ? klass : Qnil;
04086 
04087     return rb_proc_call_with_block(p->proc, 6, argv, Qnil);
04088 }
04089 
04090 static void
04091 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
04092 {
04093     struct call_trace_func_args args;
04094 
04095     args.event = event;
04096     args.proc = proc;
04097     args.self = self;
04098     args.id = id;
04099     args.klass = klass;
04100     ruby_suppress_tracing(call_trace_proc, (VALUE)&args, FALSE);
04101 }
04102 
04103 VALUE
04104 ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always)
04105 {
04106     rb_thread_t *th = GET_THREAD();
04107     int state, tracing;
04108     volatile int raised;
04109     volatile int outer_state;
04110     VALUE result = Qnil;
04111 
04112     if ((tracing = th->tracing) != 0 && !always) {
04113         return Qnil;
04114     }
04115     else {
04116         th->tracing = 1;
04117     }
04118 
04119     raised = rb_threadptr_reset_raised(th);
04120     outer_state = th->state;
04121     th->state = 0;
04122 
04123     PUSH_TAG();
04124     if ((state = EXEC_TAG()) == 0) {
04125         result = (*func)(arg, tracing);
04126     }
04127 
04128     if (raised) {
04129         rb_threadptr_set_raised(th);
04130     }
04131     POP_TAG();
04132 
04133     th->tracing = tracing;
04134     if (state) {
04135         JUMP_TAG(state);
04136     }
04137     th->state = outer_state;
04138 
04139     return result;
04140 }
04141 
04142 VALUE rb_thread_backtrace(VALUE thval);
04143 
04144 /*
04145  *  call-seq:
04146  *     thr.backtrace    -> array
04147  *
04148  *  Returns the current back trace of the _thr_.
04149  */
04150 
04151 static VALUE
04152 rb_thread_backtrace_m(VALUE thval)
04153 {
04154     return rb_thread_backtrace(thval);
04155 }
04156 
04157 /*
04158  *  Document-class: ThreadError
04159  *
04160  *  Raised when an invalid operation is attempted on a thread.
04161  *
04162  *  For example, when no other thread has been started:
04163  *
04164  *     Thread.stop
04165  *
04166  *  <em>raises the exception:</em>
04167  *
04168  *     ThreadError: stopping only thread
04169  */
04170 
04171 /*
04172  *  +Thread+ encapsulates the behavior of a thread of
04173  *  execution, including the main thread of the Ruby script.
04174  *
04175  *  In the descriptions of the methods in this class, the parameter _sym_
04176  *  refers to a symbol, which is either a quoted string or a
04177  *  +Symbol+ (such as <code>:name</code>).
04178  */
04179 
04180 void
04181 Init_Thread(void)
04182 {
04183 #undef rb_intern
04184 #define rb_intern(str) rb_intern_const(str)
04185 
04186     VALUE cThGroup;
04187 
04188     rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
04189     rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
04190     rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
04191     rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
04192     rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
04193     rb_define_singleton_method(rb_cThread, "stop", rb_thread_stop, 0);
04194     rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
04195     rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
04196     rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
04197     rb_define_singleton_method(rb_cThread, "list", rb_thread_list, 0);
04198     rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
04199     rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
04200 #if THREAD_DEBUG < 0
04201     rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
04202     rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
04203 #endif
04204 
04205     rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
04206     rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
04207     rb_define_method(rb_cThread, "join", thread_join_m, -1);
04208     rb_define_method(rb_cThread, "value", thread_value, 0);
04209     rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
04210     rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
04211     rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
04212     rb_define_method(rb_cThread, "run", rb_thread_run, 0);
04213     rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
04214     rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
04215     rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
04216     rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
04217     rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
04218     rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
04219     rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
04220     rb_define_method(rb_cThread, "status", rb_thread_status, 0);
04221     rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
04222     rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
04223     rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
04224     rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
04225     rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
04226     rb_define_method(rb_cThread, "group", rb_thread_group, 0);
04227     rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, 0);
04228 
04229     rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
04230 
04231     cThGroup = rb_define_class("ThreadGroup", rb_cObject);
04232     rb_define_alloc_func(cThGroup, thgroup_s_alloc);
04233     rb_define_method(cThGroup, "list", thgroup_list, 0);
04234     rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
04235     rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
04236     rb_define_method(cThGroup, "add", thgroup_add, 1);
04237 
04238     {
04239         rb_thread_t *th = GET_THREAD();
04240         th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
04241         rb_define_const(cThGroup, "Default", th->thgroup);
04242     }
04243 
04244     rb_cMutex = rb_define_class("Mutex", rb_cObject);
04245     rb_define_alloc_func(rb_cMutex, mutex_alloc);
04246     rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
04247     rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
04248     rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
04249     rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
04250     rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
04251     rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
04252 
04253     recursive_key = rb_intern("__recursive_key__");
04254     rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
04255 
04256     /* trace */
04257     rb_define_global_function("set_trace_func", set_trace_func, 1);
04258     rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
04259     rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
04260 
04261     /* init thread core */
04262     {
04263         /* main thread setting */
04264         {
04265             /* acquire global vm lock */
04266             rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock;
04267             native_mutex_initialize(lp);
04268             native_mutex_lock(lp);
04269             native_mutex_initialize(&GET_THREAD()->interrupt_lock);
04270         }
04271     }
04272 
04273     rb_thread_create_timer_thread();
04274 
04275     (void)native_mutex_trylock;
04276     (void)ruby_thread_set_native;
04277 }
04278 
04279 int
04280 ruby_native_thread_p(void)
04281 {
04282     rb_thread_t *th = ruby_thread_from_native();
04283 
04284     return th != 0;
04285 }
04286 
04287 static int
04288 check_deadlock_i(st_data_t key, st_data_t val, int *found)
04289 {
04290     VALUE thval = key;
04291     rb_thread_t *th;
04292     GetThreadPtr(thval, th);
04293 
04294     if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th) || th->transition_for_lock) {
04295         *found = 1;
04296     }
04297     else if (th->locking_mutex) {
04298         mutex_t *mutex;
04299         GetMutexPtr(th->locking_mutex, mutex);
04300 
04301         native_mutex_lock(&mutex->lock);
04302         if (mutex->th == th || (!mutex->th && mutex->cond_notified)) {
04303             *found = 1;
04304         }
04305         native_mutex_unlock(&mutex->lock);
04306     }
04307 
04308     return (*found) ? ST_STOP : ST_CONTINUE;
04309 }
04310 
04311 #if 0 /* for debug */
04312 static int
04313 debug_i(st_data_t key, st_data_t val, int *found)
04314 {
04315     VALUE thval = key;
04316     rb_thread_t *th;
04317     GetThreadPtr(thval, th);
04318 
04319     printf("th:%p %d %d %d", th, th->status, th->interrupt_flag, th->transition_for_lock);
04320     if (th->locking_mutex) {
04321         mutex_t *mutex;
04322         GetMutexPtr(th->locking_mutex, mutex);
04323 
04324         native_mutex_lock(&mutex->lock);
04325         printf(" %p %d\n", mutex->th, mutex->cond_notified);
04326         native_mutex_unlock(&mutex->lock);
04327     }
04328     else puts("");
04329 
04330     return ST_CONTINUE;
04331 }
04332 #endif
04333 
04334 static void
04335 rb_check_deadlock(rb_vm_t *vm)
04336 {
04337     int found = 0;
04338 
04339     if (vm_living_thread_num(vm) > vm->sleeper) return;
04340     if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
04341 
04342     st_foreach(vm->living_threads, check_deadlock_i, (st_data_t)&found);
04343 
04344     if (!found) {
04345         VALUE argv[2];
04346         argv[0] = rb_eFatal;
04347         argv[1] = rb_str_new2("deadlock detected");
04348 #if 0 /* for debug */
04349         printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
04350         st_foreach(vm->living_threads, debug_i, (st_data_t)0);
04351 #endif
04352         vm->sleeper--;
04353         rb_threadptr_raise(vm->main_thread, 2, argv);
04354     }
04355 }
04356 
04357 static void
04358 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
04359 {
04360     VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
04361     if (coverage && RBASIC(coverage)->klass == 0) {
04362         long line = rb_sourceline() - 1;
04363         long count;
04364         if (RARRAY_PTR(coverage)[line] == Qnil) {
04365             rb_bug("bug");
04366         }
04367         count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
04368         if (POSFIXABLE(count)) {
04369             RARRAY_PTR(coverage)[line] = LONG2FIX(count);
04370         }
04371     }
04372 }
04373 
04374 VALUE
04375 rb_get_coverages(void)
04376 {
04377     return GET_VM()->coverages;
04378 }
04379 
04380 void
04381 rb_set_coverages(VALUE coverages)
04382 {
04383     GET_VM()->coverages = coverages;
04384     rb_add_event_hook(update_coverage, RUBY_EVENT_COVERAGE, Qnil);
04385 }
04386 
04387 void
04388 rb_reset_coverages(void)
04389 {
04390     GET_VM()->coverages = Qfalse;
04391     rb_remove_event_hook(update_coverage);
04392 }
04393 

Generated on Thu Sep 8 2011 03:50:47 for Ruby by  doxygen 1.7.1