• Main Page
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

thread.c

Go to the documentation of this file.
00001 /**********************************************************************
00002 
00003   thread.c -
00004 
00005   $Author: yugui $
00006 
00007   Copyright (C) 2004-2007 Koichi Sasada
00008 
00009 **********************************************************************/
00010 
00011 /*
00012   YARV Thread Design
00013 
00014   model 1: Userlevel Thread
00015     Same as traditional ruby thread.
00016 
00017   model 2: Native Thread with Global VM lock
00018     Using pthread (or Windows thread) and Ruby threads run concurrent.
00019 
00020   model 3: Native Thread with fine grain lock
00021     Using pthread and Ruby threads run concurrent or parallel.
00022 
00023 ------------------------------------------------------------------------
00024 
00025   model 2:
00026     A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
00027     When thread scheduling, running thread release GVL.  If running thread
00028     try blocking operation, this thread must release GVL and another
00029     thread can continue this flow.  After blocking operation, thread
00030     must check interrupt (RUBY_VM_CHECK_INTS).
00031 
00032     Every VM can run parallel.
00033 
00034     Ruby threads are scheduled by OS thread scheduler.
00035 
00036 ------------------------------------------------------------------------
00037 
00038   model 3:
00039     Every threads run concurrent or parallel and to access shared object
00040     exclusive access control is needed.  For example, to access String
00041     object or Array object, fine grain lock must be locked every time.
00042  */
00043 
00044 
00045 /* for model 2 */
00046 
00047 #include "eval_intern.h"
00048 #include "gc.h"
00049 
00050 #ifndef USE_NATIVE_THREAD_PRIORITY
00051 #define USE_NATIVE_THREAD_PRIORITY 0
00052 #define RUBY_THREAD_PRIORITY_MAX 3
00053 #define RUBY_THREAD_PRIORITY_MIN -3
00054 #endif
00055 
00056 #ifndef THREAD_DEBUG
00057 #define THREAD_DEBUG 0
00058 #endif
00059 
00060 VALUE rb_cMutex;
00061 VALUE rb_cBarrier;
00062 
00063 static void sleep_timeval(rb_thread_t *th, struct timeval time);
00064 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
00065 static void sleep_forever(rb_thread_t *th, int nodeadlock);
00066 static double timeofday(void);
00067 struct timeval rb_time_interval(VALUE);
00068 static int rb_threadptr_dead(rb_thread_t *th);
00069 
00070 static void rb_check_deadlock(rb_vm_t *vm);
00071 
00072 int rb_signal_buff_size(void);
00073 void rb_signal_exec(rb_thread_t *th, int sig);
00074 void rb_disable_interrupt(void);
00075 void rb_thread_stop_timer_thread(void);
00076 
00077 static const VALUE eKillSignal = INT2FIX(0);
00078 static const VALUE eTerminateSignal = INT2FIX(1);
00079 static volatile int system_working = 1;
00080 
00081 inline static void
00082 st_delete_wrap(st_table *table, st_data_t key)
00083 {
00084     st_delete(table, &key, 0);
00085 }
00086 
00087 /********************************************************************************/
00088 
00089 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
00090 
00091 struct rb_blocking_region_buffer {
00092     enum rb_thread_status prev_status;
00093     struct rb_unblock_callback oldubf;
00094 };
00095 
00096 static void set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
00097                                  struct rb_unblock_callback *old);
00098 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
00099 
00100 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
00101 
00102 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
00103   do { \
00104     rb_gc_save_machine_context(th); \
00105     SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
00106   } while (0)
00107 
00108 #define GVL_UNLOCK_BEGIN() do { \
00109   rb_thread_t *_th_stored = GET_THREAD(); \
00110   RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
00111   native_mutex_unlock(&_th_stored->vm->global_vm_lock)
00112 
00113 #define GVL_UNLOCK_END() \
00114   native_mutex_lock(&_th_stored->vm->global_vm_lock); \
00115   rb_thread_set_current(_th_stored); \
00116 } while(0)
00117 
00118 #define BLOCKING_REGION_CORE(exec) do { \
00119     GVL_UNLOCK_BEGIN(); {\
00120             exec; \
00121     } \
00122     GVL_UNLOCK_END(); \
00123 } while(0);
00124 
00125 #define blocking_region_begin(th, region, func, arg) \
00126   do { \
00127     (region)->prev_status = (th)->status; \
00128     set_unblock_function((th), (func), (arg), &(region)->oldubf); \
00129     (th)->blocking_region_buffer = (region); \
00130     (th)->status = THREAD_STOPPED; \
00131     thread_debug("enter blocking region (%p)\n", (void *)(th)); \
00132     RB_GC_SAVE_MACHINE_CONTEXT(th); \
00133     native_mutex_unlock(&(th)->vm->global_vm_lock); \
00134   } while (0)
00135 
00136 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
00137     rb_thread_t *__th = GET_THREAD(); \
00138     struct rb_blocking_region_buffer __region; \
00139     blocking_region_begin(__th, &__region, ubf, ubfarg); \
00140     exec; \
00141     blocking_region_end(__th, &__region); \
00142     RUBY_VM_CHECK_INTS(); \
00143 } while(0)
00144 
00145 #if THREAD_DEBUG
00146 #ifdef HAVE_VA_ARGS_MACRO
00147 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
00148 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
00149 #define POSITION_FORMAT "%s:%d:"
00150 #define POSITION_ARGS ,file, line
00151 #else
00152 void rb_thread_debug(const char *fmt, ...);
00153 #define thread_debug rb_thread_debug
00154 #define POSITION_FORMAT
00155 #define POSITION_ARGS
00156 #endif
00157 
00158 # if THREAD_DEBUG < 0
00159 static int rb_thread_debug_enabled;
00160 
00161 /*
00162  *  call-seq:
00163  *     Thread.DEBUG     -> num
00164  *
00165  *  Returns the thread debug level.  Available only if compiled with
00166  *  THREAD_DEBUG=-1.
00167  */
00168 
00169 static VALUE
00170 rb_thread_s_debug(void)
00171 {
00172     return INT2NUM(rb_thread_debug_enabled);
00173 }
00174 
00175 /*
00176  *  call-seq:
00177  *     Thread.DEBUG = num
00178  *
00179  *  Sets the thread debug level.  Available only if compiled with
00180  *  THREAD_DEBUG=-1.
00181  */
00182 
00183 static VALUE
00184 rb_thread_s_debug_set(VALUE self, VALUE val)
00185 {
00186     rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
00187     return val;
00188 }
00189 # else
00190 # define rb_thread_debug_enabled THREAD_DEBUG
00191 # endif
00192 #else
00193 #define thread_debug if(0)printf
00194 #endif
00195 
00196 #ifndef __ia64
00197 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
00198 #endif
00199 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
00200                                         VALUE *register_stack_start));
00201 static void timer_thread_function(void *);
00202 
00203 #if   defined(_WIN32)
00204 #include "thread_win32.c"
00205 
00206 #define DEBUG_OUT() \
00207   WaitForSingleObject(&debug_mutex, INFINITE); \
00208   printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
00209   fflush(stdout); \
00210   ReleaseMutex(&debug_mutex);
00211 
00212 #elif defined(HAVE_PTHREAD_H)
00213 #include "thread_pthread.c"
00214 
00215 #define DEBUG_OUT() \
00216   pthread_mutex_lock(&debug_mutex); \
00217   printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
00218   fflush(stdout); \
00219   pthread_mutex_unlock(&debug_mutex);
00220 
00221 #else
00222 #error "unsupported thread type"
00223 #endif
00224 
00225 #if THREAD_DEBUG
00226 static int debug_mutex_initialized = 1;
00227 static rb_thread_lock_t debug_mutex;
00228 
00229 void
00230 rb_thread_debug(
00231 #ifdef HAVE_VA_ARGS_MACRO
00232     const char *file, int line,
00233 #endif
00234     const char *fmt, ...)
00235 {
00236     va_list args;
00237     char buf[BUFSIZ];
00238 
00239     if (!rb_thread_debug_enabled) return;
00240 
00241     if (debug_mutex_initialized == 1) {
00242         debug_mutex_initialized = 0;
00243         native_mutex_initialize(&debug_mutex);
00244     }
00245 
00246     va_start(args, fmt);
00247     vsnprintf(buf, BUFSIZ, fmt, args);
00248     va_end(args);
00249 
00250     DEBUG_OUT();
00251 }
00252 #endif
00253 
00254 void
00255 rb_thread_lock_unlock(rb_thread_lock_t *lock)
00256 {
00257     native_mutex_unlock(lock);
00258 }
00259 
00260 void
00261 rb_thread_lock_destroy(rb_thread_lock_t *lock)
00262 {
00263     native_mutex_destroy(lock);
00264 }
00265 
00266 static void
00267 set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
00268                      struct rb_unblock_callback *old)
00269 {
00270   check_ints:
00271     RUBY_VM_CHECK_INTS(); /* check signal or so */
00272     native_mutex_lock(&th->interrupt_lock);
00273     if (th->interrupt_flag) {
00274         native_mutex_unlock(&th->interrupt_lock);
00275         goto check_ints;
00276     }
00277     else {
00278         if (old) *old = th->unblock;
00279         th->unblock.func = func;
00280         th->unblock.arg = arg;
00281     }
00282     native_mutex_unlock(&th->interrupt_lock);
00283 }
00284 
00285 static void
00286 reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
00287 {
00288     native_mutex_lock(&th->interrupt_lock);
00289     th->unblock = *old;
00290     native_mutex_unlock(&th->interrupt_lock);
00291 }
00292 
00293 void
00294 rb_threadptr_interrupt(rb_thread_t *th)
00295 {
00296     native_mutex_lock(&th->interrupt_lock);
00297     RUBY_VM_SET_INTERRUPT(th);
00298     if (th->unblock.func) {
00299         (th->unblock.func)(th->unblock.arg);
00300     }
00301     else {
00302         /* none */
00303     }
00304     native_mutex_unlock(&th->interrupt_lock);
00305 }
00306 
00307 
00308 static int
00309 terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
00310 {
00311     VALUE thval = key;
00312     rb_thread_t *th;
00313     GetThreadPtr(thval, th);
00314 
00315     if (th != main_thread) {
00316         thread_debug("terminate_i: %p\n", (void *)th);
00317         rb_threadptr_interrupt(th);
00318         th->thrown_errinfo = eTerminateSignal;
00319         th->status = THREAD_TO_KILL;
00320     }
00321     else {
00322         thread_debug("terminate_i: main thread (%p)\n", (void *)th);
00323     }
00324     return ST_CONTINUE;
00325 }
00326 
00327 typedef struct rb_mutex_struct
00328 {
00329     rb_thread_lock_t lock;
00330     rb_thread_cond_t cond;
00331     struct rb_thread_struct volatile *th;
00332     volatile int cond_waiting, cond_notified;
00333     struct rb_mutex_struct *next_mutex;
00334 } mutex_t;
00335 
00336 static void rb_mutex_unlock_all(mutex_t *mutex, rb_thread_t *th);
00337 static void rb_mutex_abandon_all(mutex_t *mutexes);
00338 
00339 void
00340 rb_thread_terminate_all(void)
00341 {
00342     rb_thread_t *th = GET_THREAD(); /* main thread */
00343     rb_vm_t *vm = th->vm;
00344 
00345     if (vm->main_thread != th) {
00346         rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
00347                (void *)vm->main_thread, (void *)th);
00348     }
00349 
00350     /* unlock all locking mutexes */
00351     if (th->keeping_mutexes) {
00352         rb_mutex_unlock_all(th->keeping_mutexes, GET_THREAD());
00353     }
00354 
00355     thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
00356     st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
00357 
00358     while (!rb_thread_alone()) {
00359         PUSH_TAG();
00360         if (EXEC_TAG() == 0) {
00361             rb_thread_schedule();
00362         }
00363         else {
00364             /* ignore exception */
00365         }
00366         POP_TAG();
00367     }
00368     rb_thread_stop_timer_thread();
00369 }
00370 
00371 static void
00372 thread_unlock_all_locking_mutexes(rb_thread_t *th)
00373 {
00374     if (th->keeping_mutexes) {
00375         rb_mutex_unlock_all(th->keeping_mutexes, th);
00376         th->keeping_mutexes = NULL;
00377     }
00378 }
00379 
00380 static void
00381 thread_cleanup_func_before_exec(void *th_ptr)
00382 {
00383     rb_thread_t *th = th_ptr;
00384     th->status = THREAD_KILLED;
00385     th->machine_stack_start = th->machine_stack_end = 0;
00386 #ifdef __ia64
00387     th->machine_register_stack_start = th->machine_register_stack_end = 0;
00388 #endif
00389 }
00390 
00391 static void
00392 thread_cleanup_func(void *th_ptr, int atfork)
00393 {
00394     rb_thread_t *th = th_ptr;
00395 
00396     th->locking_mutex = Qfalse;
00397     thread_cleanup_func_before_exec(th_ptr);
00398 
00399     /*
00400      * Unfortunately, we can't release native threading resource at fork
00401      * because libc may have unstable locking state therefore touching
00402      * a threading resource may cause a deadlock.
00403      */
00404     if (atfork)
00405         return;
00406 
00407     native_thread_destroy(th);
00408 }
00409 
00410 extern void ruby_error_print(void);
00411 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
00412 void rb_thread_recycle_stack_release(VALUE *);
00413 
00414 void
00415 ruby_thread_init_stack(rb_thread_t *th)
00416 {
00417     native_thread_init_stack(th);
00418 }
00419 
00420 static int
00421 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
00422 {
00423     int state;
00424     VALUE args = th->first_args;
00425     rb_proc_t *proc;
00426     rb_thread_t *join_th;
00427     rb_thread_t *main_th;
00428     VALUE errinfo = Qnil;
00429 # ifdef USE_SIGALTSTACK
00430     void rb_register_sigaltstack(rb_thread_t *th);
00431 
00432     rb_register_sigaltstack(th);
00433 # endif
00434 
00435     ruby_thread_set_native(th);
00436 
00437     th->machine_stack_start = stack_start;
00438 #ifdef __ia64
00439     th->machine_register_stack_start = register_stack_start;
00440 #endif
00441     thread_debug("thread start: %p\n", (void *)th);
00442 
00443     native_mutex_lock(&th->vm->global_vm_lock);
00444     {
00445         thread_debug("thread start (get lock): %p\n", (void *)th);
00446         rb_thread_set_current(th);
00447 
00448         TH_PUSH_TAG(th);
00449         if ((state = EXEC_TAG()) == 0) {
00450             SAVE_ROOT_JMPBUF(th, {
00451                 if (!th->first_func) {
00452                     GetProcPtr(th->first_proc, proc);
00453                     th->errinfo = Qnil;
00454                     th->local_lfp = proc->block.lfp;
00455                     th->local_svar = Qnil;
00456                     th->value = rb_vm_invoke_proc(th, proc, proc->block.self,
00457                                                   (int)RARRAY_LEN(args), RARRAY_PTR(args), 0);
00458                 }
00459                 else {
00460                     th->value = (*th->first_func)((void *)args);
00461                 }
00462             });
00463         }
00464         else {
00465             errinfo = th->errinfo;
00466             if (NIL_P(errinfo)) errinfo = rb_errinfo();
00467             if (state == TAG_FATAL) {
00468                 /* fatal error within this thread, need to stop whole script */
00469             }
00470             else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
00471                 if (th->safe_level >= 4) {
00472                     th->errinfo = rb_exc_new3(rb_eSecurityError,
00473                                               rb_sprintf("Insecure exit at level %d", th->safe_level));
00474                     errinfo = Qnil;
00475                 }
00476             }
00477             else if (th->safe_level < 4 &&
00478                      (th->vm->thread_abort_on_exception ||
00479                       th->abort_on_exception || RTEST(ruby_debug))) {
00480                 /* exit on main_thread */
00481             }
00482             else {
00483                 errinfo = Qnil;
00484             }
00485             th->value = Qnil;
00486         }
00487 
00488         th->status = THREAD_KILLED;
00489         thread_debug("thread end: %p\n", (void *)th);
00490 
00491         main_th = th->vm->main_thread;
00492         if (th != main_th) {
00493             if (TYPE(errinfo) == T_OBJECT) {
00494                 /* treat with normal error object */
00495                 rb_threadptr_raise(main_th, 1, &errinfo);
00496             }
00497         }
00498         TH_POP_TAG();
00499 
00500         /* locking_mutex must be Qfalse */
00501         if (th->locking_mutex != Qfalse) {
00502             rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
00503                    (void *)th, th->locking_mutex);
00504         }
00505 
00506         /* delete self other than main thread from living_threads */
00507         if (th != main_th) {
00508             st_delete_wrap(th->vm->living_threads, th->self);
00509         }
00510 
00511         /* wake up joining threads */
00512         join_th = th->join_list_head;
00513         while (join_th) {
00514             if (join_th == main_th) errinfo = Qnil;
00515             rb_threadptr_interrupt(join_th);
00516             switch (join_th->status) {
00517               case THREAD_STOPPED: case THREAD_STOPPED_FOREVER:
00518                 join_th->status = THREAD_RUNNABLE;
00519               default: break;
00520             }
00521             join_th = join_th->join_list_next;
00522         }
00523 
00524         thread_unlock_all_locking_mutexes(th);
00525         if (th != main_th) rb_check_deadlock(th->vm);
00526 
00527         if (!th->root_fiber) {
00528             rb_thread_recycle_stack_release(th->stack);
00529             th->stack = 0;
00530         }
00531     }
00532     if (th->vm->main_thread == th) {
00533         ruby_cleanup(state);
00534     }
00535     else {
00536         thread_cleanup_func(th, FALSE);
00537         native_mutex_unlock(&th->vm->global_vm_lock);
00538     }
00539 
00540     return 0;
00541 }
00542 
00543 static VALUE
00544 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
00545 {
00546     rb_thread_t *th;
00547     int err;
00548 
00549     if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
00550         rb_raise(rb_eThreadError,
00551                  "can't start a new thread (frozen ThreadGroup)");
00552     }
00553     GetThreadPtr(thval, th);
00554 
00555     /* setup thread environment */
00556     th->first_func = fn;
00557     th->first_proc = fn ? Qfalse : rb_block_proc();
00558     th->first_args = args; /* GC: shouldn't put before above line */
00559 
00560     th->priority = GET_THREAD()->priority;
00561     th->thgroup = GET_THREAD()->thgroup;
00562 
00563     native_mutex_initialize(&th->interrupt_lock);
00564     if (GET_VM()->event_hooks != NULL)
00565         th->event_flags |= RUBY_EVENT_VM;
00566 
00567     /* kick thread */
00568     st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
00569     err = native_thread_create(th);
00570     if (err) {
00571         st_delete_wrap(th->vm->living_threads, th->self);
00572         th->status = THREAD_KILLED;
00573         rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
00574     }
00575     return thval;
00576 }
00577 
00578 /* :nodoc: */
00579 static VALUE
00580 thread_s_new(int argc, VALUE *argv, VALUE klass)
00581 {
00582     rb_thread_t *th;
00583     VALUE thread = rb_thread_alloc(klass);
00584     rb_obj_call_init(thread, argc, argv);
00585     GetThreadPtr(thread, th);
00586     if (!th->first_args) {
00587         rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
00588                  rb_class2name(klass));
00589     }
00590     return thread;
00591 }
00592 
00593 /*
00594  *  call-seq:
00595  *     Thread.start([args]*) {|args| block }   -> thread
00596  *     Thread.fork([args]*) {|args| block }    -> thread
00597  *
00598  *  Basically the same as <code>Thread::new</code>. However, if class
00599  *  <code>Thread</code> is subclassed, then calling <code>start</code> in that
00600  *  subclass will not invoke the subclass's <code>initialize</code> method.
00601  */
00602 
00603 static VALUE
00604 thread_start(VALUE klass, VALUE args)
00605 {
00606     return thread_create_core(rb_thread_alloc(klass), args, 0);
00607 }
00608 
00609 /* :nodoc: */
00610 static VALUE
00611 thread_initialize(VALUE thread, VALUE args)
00612 {
00613     rb_thread_t *th;
00614     if (!rb_block_given_p()) {
00615         rb_raise(rb_eThreadError, "must be called with a block");
00616     }
00617     GetThreadPtr(thread, th);
00618     if (th->first_args) {
00619         VALUE rb_proc_location(VALUE self);
00620         VALUE proc = th->first_proc, line, loc;
00621         const char *file;
00622         if (!proc || !RTEST(loc = rb_proc_location(proc))) {
00623             rb_raise(rb_eThreadError, "already initialized thread");
00624         }
00625         file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
00626         if (NIL_P(line = RARRAY_PTR(loc)[1])) {
00627             rb_raise(rb_eThreadError, "already initialized thread - %s",
00628                      file);
00629         }
00630         rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
00631                  file, NUM2INT(line));
00632     }
00633     return thread_create_core(thread, args, 0);
00634 }
00635 
00636 VALUE
00637 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
00638 {
00639     return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
00640 }
00641 
00642 
00643 /* +infty, for this purpose */
00644 #define DELAY_INFTY 1E30
00645 
00646 struct join_arg {
00647     rb_thread_t *target, *waiting;
00648     double limit;
00649     int forever;
00650 };
00651 
00652 static VALUE
00653 remove_from_join_list(VALUE arg)
00654 {
00655     struct join_arg *p = (struct join_arg *)arg;
00656     rb_thread_t *target_th = p->target, *th = p->waiting;
00657 
00658     if (target_th->status != THREAD_KILLED) {
00659         rb_thread_t **pth = &target_th->join_list_head;
00660 
00661         while (*pth) {
00662             if (*pth == th) {
00663                 *pth = th->join_list_next;
00664                 break;
00665             }
00666             pth = &(*pth)->join_list_next;
00667         }
00668     }
00669 
00670     return Qnil;
00671 }
00672 
00673 static VALUE
00674 thread_join_sleep(VALUE arg)
00675 {
00676     struct join_arg *p = (struct join_arg *)arg;
00677     rb_thread_t *target_th = p->target, *th = p->waiting;
00678     double now, limit = p->limit;
00679 
00680     while (target_th->status != THREAD_KILLED) {
00681         if (p->forever) {
00682             sleep_forever(th, 1);
00683         }
00684         else {
00685             now = timeofday();
00686             if (now > limit) {
00687                 thread_debug("thread_join: timeout (thid: %p)\n",
00688                              (void *)target_th->thread_id);
00689                 return Qfalse;
00690             }
00691             sleep_wait_for_interrupt(th, limit - now);
00692         }
00693         thread_debug("thread_join: interrupted (thid: %p)\n",
00694                      (void *)target_th->thread_id);
00695     }
00696     return Qtrue;
00697 }
00698 
00699 static VALUE
00700 thread_join(rb_thread_t *target_th, double delay)
00701 {
00702     rb_thread_t *th = GET_THREAD();
00703     struct join_arg arg;
00704 
00705     arg.target = target_th;
00706     arg.waiting = th;
00707     arg.limit = timeofday() + delay;
00708     arg.forever = delay == DELAY_INFTY;
00709 
00710     thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
00711 
00712     if (target_th->status != THREAD_KILLED) {
00713         th->join_list_next = target_th->join_list_head;
00714         target_th->join_list_head = th;
00715         if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
00716                        remove_from_join_list, (VALUE)&arg)) {
00717             return Qnil;
00718         }
00719     }
00720 
00721     thread_debug("thread_join: success (thid: %p)\n",
00722                  (void *)target_th->thread_id);
00723 
00724     if (target_th->errinfo != Qnil) {
00725         VALUE err = target_th->errinfo;
00726 
00727         if (FIXNUM_P(err)) {
00728             /* */
00729         }
00730         else if (TYPE(target_th->errinfo) == T_NODE) {
00731             rb_exc_raise(rb_vm_make_jump_tag_but_local_jump(
00732                 GET_THROWOBJ_STATE(err), GET_THROWOBJ_VAL(err)));
00733         }
00734         else {
00735             /* normal exception */
00736             rb_exc_raise(err);
00737         }
00738     }
00739     return target_th->self;
00740 }
00741 
00742 /*
00743  *  call-seq:
00744  *     thr.join          -> thr
00745  *     thr.join(limit)   -> thr
00746  *
00747  *  The calling thread will suspend execution and run <i>thr</i>. Does not
00748  *  return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
00749  *  the time limit expires, <code>nil</code> will be returned, otherwise
00750  *  <i>thr</i> is returned.
00751  *
00752  *  Any threads not joined will be killed when the main program exits.  If
00753  *  <i>thr</i> had previously raised an exception and the
00754  *  <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
00755  *  (so the exception has not yet been processed) it will be processed at this
00756  *  time.
00757  *
00758  *     a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
00759  *     x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
00760  *     x.join # Let x thread finish, a will be killed on exit.
00761  *
00762  *  <em>produces:</em>
00763  *
00764  *     axyz
00765  *
00766  *  The following example illustrates the <i>limit</i> parameter.
00767  *
00768  *     y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
00769  *     puts "Waiting" until y.join(0.15)
00770  *
00771  *  <em>produces:</em>
00772  *
00773  *     tick...
00774  *     Waiting
00775  *     tick...
00776  *     Waitingtick...
00777  *
00778  *
00779  *     tick...
00780  */
00781 
00782 static VALUE
00783 thread_join_m(int argc, VALUE *argv, VALUE self)
00784 {
00785     rb_thread_t *target_th;
00786     double delay = DELAY_INFTY;
00787     VALUE limit;
00788 
00789     GetThreadPtr(self, target_th);
00790 
00791     rb_scan_args(argc, argv, "01", &limit);
00792     if (!NIL_P(limit)) {
00793         delay = rb_num2dbl(limit);
00794     }
00795 
00796     return thread_join(target_th, delay);
00797 }
00798 
00799 /*
00800  *  call-seq:
00801  *     thr.value   -> obj
00802  *
00803  *  Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
00804  *  its value.
00805  *
00806  *     a = Thread.new { 2 + 2 }
00807  *     a.value   #=> 4
00808  */
00809 
00810 static VALUE
00811 thread_value(VALUE self)
00812 {
00813     rb_thread_t *th;
00814     GetThreadPtr(self, th);
00815     thread_join(th, DELAY_INFTY);
00816     return th->value;
00817 }
00818 
00819 /*
00820  * Thread Scheduling
00821  */
00822 
00823 static struct timeval
00824 double2timeval(double d)
00825 {
00826     struct timeval time;
00827 
00828     time.tv_sec = (int)d;
00829     time.tv_usec = (int)((d - (int)d) * 1e6);
00830     if (time.tv_usec < 0) {
00831         time.tv_usec += (int)1e6;
00832         time.tv_sec -= 1;
00833     }
00834     return time;
00835 }
00836 
00837 static void
00838 sleep_forever(rb_thread_t *th, int deadlockable)
00839 {
00840     enum rb_thread_status prev_status = th->status;
00841 
00842     th->status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
00843     do {
00844         if (deadlockable) {
00845             th->vm->sleeper++;
00846             rb_check_deadlock(th->vm);
00847         }
00848         native_sleep(th, 0);
00849         if (deadlockable) {
00850             th->vm->sleeper--;
00851         }
00852         RUBY_VM_CHECK_INTS();
00853     } while (th->status == THREAD_STOPPED_FOREVER);
00854     th->status = prev_status;
00855 }
00856 
00857 static void
00858 getclockofday(struct timeval *tp)
00859 {
00860 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
00861     struct timespec ts;
00862 
00863     if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
00864         tp->tv_sec = ts.tv_sec;
00865         tp->tv_usec = ts.tv_nsec / 1000;
00866     } else
00867 #endif
00868     {
00869         gettimeofday(tp, NULL);
00870     }
00871 }
00872 
00873 static void
00874 sleep_timeval(rb_thread_t *th, struct timeval tv)
00875 {
00876     struct timeval to, tvn;
00877     enum rb_thread_status prev_status = th->status;
00878 
00879     getclockofday(&to);
00880     to.tv_sec += tv.tv_sec;
00881     if ((to.tv_usec += tv.tv_usec) >= 1000000) {
00882         to.tv_sec++;
00883         to.tv_usec -= 1000000;
00884     }
00885 
00886     th->status = THREAD_STOPPED;
00887     do {
00888         native_sleep(th, &tv);
00889         RUBY_VM_CHECK_INTS();
00890         getclockofday(&tvn);
00891         if (to.tv_sec < tvn.tv_sec) break;
00892         if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
00893         thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
00894                      (long)to.tv_sec, (long)to.tv_usec,
00895                      (long)tvn.tv_sec, (long)tvn.tv_usec);
00896         tv.tv_sec = to.tv_sec - tvn.tv_sec;
00897         if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
00898             --tv.tv_sec;
00899             tv.tv_usec += 1000000;
00900         }
00901     } while (th->status == THREAD_STOPPED);
00902     th->status = prev_status;
00903 }
00904 
00905 void
00906 rb_thread_sleep_forever(void)
00907 {
00908     thread_debug("rb_thread_sleep_forever\n");
00909     sleep_forever(GET_THREAD(), 0);
00910 }
00911 
00912 static void
00913 rb_thread_sleep_deadly(void)
00914 {
00915     thread_debug("rb_thread_sleep_deadly\n");
00916     sleep_forever(GET_THREAD(), 1);
00917 }
00918 
00919 static double
00920 timeofday(void)
00921 {
00922 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
00923     struct timespec tp;
00924 
00925     if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
00926         return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
00927     } else
00928 #endif
00929     {
00930         struct timeval tv;
00931         gettimeofday(&tv, NULL);
00932         return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
00933     }
00934 }
00935 
00936 static void
00937 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec)
00938 {
00939     sleep_timeval(th, double2timeval(sleepsec));
00940 }
00941 
00942 static void
00943 sleep_for_polling(rb_thread_t *th)
00944 {
00945     struct timeval time;
00946     time.tv_sec = 0;
00947     time.tv_usec = 100 * 1000;  /* 0.1 sec */
00948     sleep_timeval(th, time);
00949 }
00950 
00951 void
00952 rb_thread_wait_for(struct timeval time)
00953 {
00954     rb_thread_t *th = GET_THREAD();
00955     sleep_timeval(th, time);
00956 }
00957 
00958 void
00959 rb_thread_polling(void)
00960 {
00961     RUBY_VM_CHECK_INTS();
00962     if (!rb_thread_alone()) {
00963         rb_thread_t *th = GET_THREAD();
00964         sleep_for_polling(th);
00965     }
00966 }
00967 
00968 /*
00969  * CAUTION: This function causes thread switching.
00970  *          rb_thread_check_ints() check ruby's interrupts.
00971  *          some interrupt needs thread switching/invoke handlers,
00972  *          and so on.
00973  */
00974 
00975 void
00976 rb_thread_check_ints(void)
00977 {
00978     RUBY_VM_CHECK_INTS();
00979 }
00980 
00981 /*
00982  * Hidden API for tcl/tk wrapper.
00983  * There is no guarantee to perpetuate it.
00984  */
00985 int
00986 rb_thread_check_trap_pending(void)
00987 {
00988     return rb_signal_buff_size() != 0;
00989 }
00990 
00991 /* This function can be called in blocking region. */
00992 int
00993 rb_thread_interrupted(VALUE thval)
00994 {
00995     rb_thread_t *th;
00996     GetThreadPtr(thval, th);
00997     return RUBY_VM_INTERRUPTED(th);
00998 }
00999 
01000 struct timeval rb_time_timeval(VALUE);
01001 
01002 void
01003 rb_thread_sleep(int sec)
01004 {
01005     rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
01006 }
01007 
01008 static void rb_threadptr_execute_interrupts_rec(rb_thread_t *, int);
01009 
01010 static void
01011 rb_thread_schedule_rec(int sched_depth)
01012 {
01013     thread_debug("rb_thread_schedule\n");
01014     if (!rb_thread_alone()) {
01015         rb_thread_t *th = GET_THREAD();
01016 
01017         thread_debug("rb_thread_schedule/switch start\n");
01018 
01019         RB_GC_SAVE_MACHINE_CONTEXT(th);
01020         native_mutex_unlock(&th->vm->global_vm_lock);
01021         {
01022             native_thread_yield();
01023         }
01024         native_mutex_lock(&th->vm->global_vm_lock);
01025 
01026         rb_thread_set_current(th);
01027         thread_debug("rb_thread_schedule/switch done\n");
01028 
01029         if (!sched_depth && UNLIKELY(GET_THREAD()->interrupt_flag)) {
01030             rb_threadptr_execute_interrupts_rec(GET_THREAD(), sched_depth+1);
01031         }
01032     }
01033 }
01034 
01035 void
01036 rb_thread_schedule(void)
01037 {
01038     rb_thread_schedule_rec(0);
01039 }
01040 
01041 /* blocking region */
01042 
01043 static inline void
01044 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
01045 {
01046     native_mutex_lock(&th->vm->global_vm_lock);
01047     rb_thread_set_current(th);
01048     thread_debug("leave blocking region (%p)\n", (void *)th);
01049     remove_signal_thread_list(th);
01050     th->blocking_region_buffer = 0;
01051     reset_unblock_function(th, &region->oldubf);
01052     if (th->status == THREAD_STOPPED) {
01053         th->status = region->prev_status;
01054     }
01055 }
01056 
01057 struct rb_blocking_region_buffer *
01058 rb_thread_blocking_region_begin(void)
01059 {
01060     rb_thread_t *th = GET_THREAD();
01061     struct rb_blocking_region_buffer *region = ALLOC(struct rb_blocking_region_buffer);
01062     blocking_region_begin(th, region, ubf_select, th);
01063     return region;
01064 }
01065 
01066 void
01067 rb_thread_blocking_region_end(struct rb_blocking_region_buffer *region)
01068 {
01069     int saved_errno = errno;
01070     rb_thread_t *th = GET_THREAD();
01071     blocking_region_end(th, region);
01072     xfree(region);
01073     RUBY_VM_CHECK_INTS();
01074     errno = saved_errno;
01075 }
01076 
01077 /*
01078  * rb_thread_blocking_region - permit concurrent/parallel execution.
01079  *
01080  * This function does:
01081  *   (1) release GVL.
01082  *       Other Ruby threads may run in parallel.
01083  *   (2) call func with data1.
01084  *   (3) acquire GVL.
01085  *       Other Ruby threads can not run in parallel any more.
01086  *
01087  *   If another thread interrupts this thread (Thread#kill, signal delivery,
01088  *   VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
01089  *   "un-blocking function").  `ubf()' should interrupt `func()' execution.
01090  *
01091  *   There are built-in ubfs and you can specify these ubfs.
01092  *   However, we can not guarantee our built-in ubfs interrupt
01093  *   your `func()' correctly.  Be careful to use rb_thread_blocking_region().
01094  *
01095  *     * RUBY_UBF_IO: ubf for IO operation
01096  *     * RUBY_UBF_PROCESS: ubf for process operation
01097  *
01098  *   NOTE: You can not execute most of Ruby C API and touch Ruby
01099  *         objects in `func()' and `ubf()', including raising an
01100  *         exception, because current thread doesn't acquire GVL
01101  *         (cause synchronization problem).  If you need to do it,
01102  *         read source code of C APIs and confirm by yourself.
01103  *
01104  *   NOTE: In short, this API is difficult to use safely.  I recommend you
01105  *         use other ways if you have.  We lack experiences to use this API.
01106  *         Please report your problem related on it.
01107  *
01108  *   Safe C API:
01109  *     * rb_thread_interrupted() - check interrupt flag
01110  *     * ruby_xalloc(), ruby_xrealloc(), ruby_xfree() -
01111  *         if they called without GVL, acquire GVL automatically.
01112  */
01113 VALUE
01114 rb_thread_blocking_region(
01115     rb_blocking_function_t *func, void *data1,
01116     rb_unblock_function_t *ubf, void *data2)
01117 {
01118     VALUE val;
01119     rb_thread_t *th = GET_THREAD();
01120     int saved_errno = 0;
01121 
01122     if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
01123         ubf = ubf_select;
01124         data2 = th;
01125     }
01126 
01127     BLOCKING_REGION({
01128         val = func(data1);
01129         saved_errno = errno;
01130     }, ubf, data2);
01131     errno = saved_errno;
01132 
01133     return val;
01134 }
01135 
01136 /* alias of rb_thread_blocking_region() */
01137 
01138 VALUE
01139 rb_thread_call_without_gvl(
01140     rb_blocking_function_t *func, void *data1,
01141     rb_unblock_function_t *ubf, void *data2)
01142 {
01143     return rb_thread_blocking_region(func, data1, ubf, data2);
01144 }
01145 
01146 /*
01147  * rb_thread_call_with_gvl - re-enter into Ruby world while releasing GVL.
01148  *
01149  ***
01150  *** This API is EXPERIMENTAL!
01151  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
01152  ***
01153  *
01154  * While releasing GVL using rb_thread_blocking_region() or
01155  * rb_thread_call_without_gvl(), you can not access Ruby values or invoke methods.
01156  * If you need to access it, you must use this function rb_thread_call_with_gvl().
01157  *
01158  * This function rb_thread_call_with_gvl() does:
01159  * (1) acquire GVL.
01160  * (2) call passed function `func'.
01161  * (3) release GVL.
01162  * (4) return a value which is returned at (2).
01163  *
01164  * NOTE: You should not return Ruby object at (2) because such Object
01165  *       will not marked.
01166  *
01167  * NOTE: If an exception is raised in `func', this function "DOES NOT"
01168  *       protect (catch) the exception.  If you have any resources
01169  *       which should free before throwing exception, you need use
01170  *       rb_protect() in `func' and return a value which represents
01171  *       exception is raised.
01172  *
01173  * NOTE: This functions should not be called by a thread which
01174  *       is not created as Ruby thread (created by Thread.new or so).
01175  *       In other words, this function *DOES NOT* associate
01176  *       NON-Ruby thread to Ruby thread.
01177  */
01178 void *
01179 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
01180 {
01181     rb_thread_t *th = ruby_thread_from_native();
01182     struct rb_blocking_region_buffer *brb;
01183     struct rb_unblock_callback prev_unblock;
01184     void *r;
01185 
01186     if (th == 0) {
01187         /* Error is occurred, but we can't use rb_bug()
01188          * because this thread is not Ruby's thread.
01189          * What should we do?
01190          */
01191 
01192         fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
01193         exit(1);
01194     }
01195 
01196     brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
01197     prev_unblock = th->unblock;
01198 
01199     if (brb == 0) {
01200         rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
01201     }
01202 
01203     blocking_region_end(th, brb);
01204     /* enter to Ruby world: You can access Ruby values, methods and so on. */
01205     r = (*func)(data1);
01206     /* leave from Ruby world: You can not access Ruby values, etc. */
01207     blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg);
01208     return r;
01209 }
01210 
01211 /*
01212  * ruby_thread_has_gvl_p - check if current native thread has GVL.
01213  *
01214  ***
01215  *** This API is EXPERIMENTAL!
01216  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
01217  ***
01218  */
01219 
01220 int
01221 ruby_thread_has_gvl_p(void)
01222 {
01223     rb_thread_t *th = ruby_thread_from_native();
01224 
01225     if (th && th->blocking_region_buffer == 0) {
01226         return 1;
01227     }
01228     else {
01229         return 0;
01230     }
01231 }
01232 
01233 /*
01234  *  call-seq:
01235  *     Thread.pass   -> nil
01236  *
01237  * Take the thrad scheduler a hint to pass execution to another thread.
01238  * A running thread may or may not switch. It depend on OS and processor.
01239  */
01240 
01241 static VALUE
01242 thread_s_pass(VALUE klass)
01243 {
01244     rb_thread_schedule();
01245     return Qnil;
01246 }
01247 
01248 /*
01249  *
01250  */
01251 
01252 static void
01253 rb_threadptr_execute_interrupts_rec(rb_thread_t *th, int sched_depth)
01254 {
01255     if (th->raised_flag) return;
01256 
01257     while (th->interrupt_flag) {
01258         enum rb_thread_status status = th->status;
01259         int timer_interrupt = th->interrupt_flag & 0x01;
01260         int finalizer_interrupt = th->interrupt_flag & 0x04;
01261         int sig;
01262 
01263         th->status = THREAD_RUNNABLE;
01264         th->interrupt_flag = 0;
01265 
01266         /* signal handling */
01267         if (th == th->vm->main_thread) {
01268             while ((sig = rb_get_next_signal()) != 0) {
01269                 rb_signal_exec(th, sig);
01270             }
01271         }
01272 
01273         /* exception from another thread */
01274         if (th->thrown_errinfo) {
01275             VALUE err = th->thrown_errinfo;
01276             th->thrown_errinfo = 0;
01277             thread_debug("rb_thread_execute_interrupts: %ld\n", err);
01278 
01279             if (err == eKillSignal || err == eTerminateSignal) {
01280                 th->errinfo = INT2FIX(TAG_FATAL);
01281                 TH_JUMP_TAG(th, TAG_FATAL);
01282             }
01283             else {
01284                 rb_exc_raise(err);
01285             }
01286         }
01287         th->status = status;
01288 
01289         if (finalizer_interrupt) {
01290             rb_gc_finalize_deferred();
01291         }
01292 
01293         if (!sched_depth && timer_interrupt) {
01294             sched_depth++;
01295             EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
01296 
01297             if (th->slice > 0) {
01298                 th->slice--;
01299             }
01300             else {
01301               reschedule:
01302                 rb_thread_schedule_rec(sched_depth+1);
01303                 if (th->slice < 0) {
01304                     th->slice++;
01305                     goto reschedule;
01306                 }
01307                 else {
01308                     th->slice = th->priority;
01309                 }
01310             }
01311         }
01312     }
01313 }
01314 
01315 void
01316 rb_threadptr_execute_interrupts(rb_thread_t *th)
01317 {
01318     rb_threadptr_execute_interrupts_rec(th, 0);
01319 }
01320 
01321 void
01322 rb_gc_mark_threads(void)
01323 {
01324     /* TODO: remove */
01325 }
01326 
01327 /*****************************************************/
01328 
01329 static void
01330 rb_threadptr_ready(rb_thread_t *th)
01331 {
01332     rb_threadptr_interrupt(th);
01333 }
01334 
01335 static VALUE
01336 rb_threadptr_raise(rb_thread_t *th, int argc, VALUE *argv)
01337 {
01338     VALUE exc;
01339 
01340   again:
01341     if (rb_threadptr_dead(th)) {
01342         return Qnil;
01343     }
01344 
01345     if (th->thrown_errinfo != 0 || th->raised_flag) {
01346         rb_thread_schedule();
01347         goto again;
01348     }
01349 
01350     exc = rb_make_exception(argc, argv);
01351     th->thrown_errinfo = exc;
01352     rb_threadptr_ready(th);
01353     return Qnil;
01354 }
01355 
01356 void
01357 rb_threadptr_signal_raise(rb_thread_t *th, int sig)
01358 {
01359     VALUE argv[2];
01360 
01361     argv[0] = rb_eSignal;
01362     argv[1] = INT2FIX(sig);
01363     rb_threadptr_raise(th->vm->main_thread, 2, argv);
01364 }
01365 
01366 void
01367 rb_threadptr_signal_exit(rb_thread_t *th)
01368 {
01369     VALUE argv[2];
01370 
01371     argv[0] = rb_eSystemExit;
01372     argv[1] = rb_str_new2("exit");
01373     rb_threadptr_raise(th->vm->main_thread, 2, argv);
01374 }
01375 
01376 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
01377 #define USE_SIGALTSTACK
01378 #endif
01379 
01380 void
01381 ruby_thread_stack_overflow(rb_thread_t *th)
01382 {
01383     th->raised_flag = 0;
01384 #ifdef USE_SIGALTSTACK
01385     th->raised_flag = 0;
01386     rb_exc_raise(sysstack_error);
01387 #else
01388     th->errinfo = sysstack_error;
01389     TH_JUMP_TAG(th, TAG_RAISE);
01390 #endif
01391 }
01392 
01393 int
01394 rb_threadptr_set_raised(rb_thread_t *th)
01395 {
01396     if (th->raised_flag & RAISED_EXCEPTION) {
01397         return 1;
01398     }
01399     th->raised_flag |= RAISED_EXCEPTION;
01400     return 0;
01401 }
01402 
01403 int
01404 rb_threadptr_reset_raised(rb_thread_t *th)
01405 {
01406     if (!(th->raised_flag & RAISED_EXCEPTION)) {
01407         return 0;
01408     }
01409     th->raised_flag &= ~RAISED_EXCEPTION;
01410     return 1;
01411 }
01412 
01413 void
01414 rb_thread_fd_close(int fd)
01415 {
01416     /* TODO: fix me */
01417 }
01418 
01419 /*
01420  *  call-seq:
01421  *     thr.raise
01422  *     thr.raise(string)
01423  *     thr.raise(exception [, string [, array]])
01424  *
01425  *  Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
01426  *  caller does not have to be <i>thr</i>.
01427  *
01428  *     Thread.abort_on_exception = true
01429  *     a = Thread.new { sleep(200) }
01430  *     a.raise("Gotcha")
01431  *
01432  *  <em>produces:</em>
01433  *
01434  *     prog.rb:3: Gotcha (RuntimeError)
01435  *      from prog.rb:2:in `initialize'
01436  *      from prog.rb:2:in `new'
01437  *      from prog.rb:2
01438  */
01439 
01440 static VALUE
01441 thread_raise_m(int argc, VALUE *argv, VALUE self)
01442 {
01443     rb_thread_t *th;
01444     GetThreadPtr(self, th);
01445     rb_threadptr_raise(th, argc, argv);
01446     return Qnil;
01447 }
01448 
01449 
01450 /*
01451  *  call-seq:
01452  *     thr.exit        -> thr or nil
01453  *     thr.kill        -> thr or nil
01454  *     thr.terminate   -> thr or nil
01455  *
01456  *  Terminates <i>thr</i> and schedules another thread to be run. If this thread
01457  *  is already marked to be killed, <code>exit</code> returns the
01458  *  <code>Thread</code>. If this is the main thread, or the last thread, exits
01459  *  the process.
01460  */
01461 
01462 VALUE
01463 rb_thread_kill(VALUE thread)
01464 {
01465     rb_thread_t *th;
01466 
01467     GetThreadPtr(thread, th);
01468 
01469     if (th != GET_THREAD() && th->safe_level < 4) {
01470         rb_secure(4);
01471     }
01472     if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
01473         return thread;
01474     }
01475     if (th == th->vm->main_thread) {
01476         rb_exit(EXIT_SUCCESS);
01477     }
01478 
01479     thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
01480 
01481     rb_threadptr_interrupt(th);
01482     th->thrown_errinfo = eKillSignal;
01483     th->status = THREAD_TO_KILL;
01484 
01485     return thread;
01486 }
01487 
01488 
01489 /*
01490  *  call-seq:
01491  *     Thread.kill(thread)   -> thread
01492  *
01493  *  Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
01494  *
01495  *     count = 0
01496  *     a = Thread.new { loop { count += 1 } }
01497  *     sleep(0.1)       #=> 0
01498  *     Thread.kill(a)   #=> #<Thread:0x401b3d30 dead>
01499  *     count            #=> 93947
01500  *     a.alive?         #=> false
01501  */
01502 
01503 static VALUE
01504 rb_thread_s_kill(VALUE obj, VALUE th)
01505 {
01506     if (CLASS_OF(th) != rb_cThread) {
01507         rb_raise(rb_eTypeError, 
01508                 "wrong argument type %s (expected Thread)",
01509                 rb_obj_classname(th));
01510     }
01511     return rb_thread_kill(th);
01512 }
01513 
01514 
01515 /*
01516  *  call-seq:
01517  *     Thread.exit   -> thread
01518  *
01519  *  Terminates the currently running thread and schedules another thread to be
01520  *  run. If this thread is already marked to be killed, <code>exit</code>
01521  *  returns the <code>Thread</code>. If this is the main thread, or the last
01522  *  thread, exit the process.
01523  */
01524 
01525 static VALUE
01526 rb_thread_exit(void)
01527 {
01528     return rb_thread_kill(GET_THREAD()->self);
01529 }
01530 
01531 
01532 /*
01533  *  call-seq:
01534  *     thr.wakeup   -> thr
01535  *
01536  *  Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
01537  *  I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
01538  *
01539  *     c = Thread.new { Thread.stop; puts "hey!" }
01540  *     sleep 0.1 while c.status!='sleep'
01541  *     c.wakeup
01542  *     c.join
01543  *
01544  *  <em>produces:</em>
01545  *
01546  *     hey!
01547  */
01548 
01549 VALUE
01550 rb_thread_wakeup(VALUE thread)
01551 {
01552     rb_thread_t *th;
01553     GetThreadPtr(thread, th);
01554 
01555     if (th->status == THREAD_KILLED) {
01556         rb_raise(rb_eThreadError, "killed thread");
01557     }
01558     rb_threadptr_ready(th);
01559     if (th->status != THREAD_TO_KILL) {
01560         th->status = THREAD_RUNNABLE;
01561     }
01562     return thread;
01563 }
01564 
01565 
01566 /*
01567  *  call-seq:
01568  *     thr.run   -> thr
01569  *
01570  *  Wakes up <i>thr</i>, making it eligible for scheduling.
01571  *
01572  *     a = Thread.new { puts "a"; Thread.stop; puts "c" }
01573  *     sleep 0.1 while a.status!='sleep'
01574  *     puts "Got here"
01575  *     a.run
01576  *     a.join
01577  *
01578  *  <em>produces:</em>
01579  *
01580  *     a
01581  *     Got here
01582  *     c
01583  */
01584 
01585 VALUE
01586 rb_thread_run(VALUE thread)
01587 {
01588     rb_thread_wakeup(thread);
01589     rb_thread_schedule();
01590     return thread;
01591 }
01592 
01593 
01594 /*
01595  *  call-seq:
01596  *     Thread.stop   -> nil
01597  *
01598  *  Stops execution of the current thread, putting it into a ``sleep'' state,
01599  *  and schedules execution of another thread.
01600  *
01601  *     a = Thread.new { print "a"; Thread.stop; print "c" }
01602  *     sleep 0.1 while a.status!='sleep'
01603  *     print "b"
01604  *     a.run
01605  *     a.join
01606  *
01607  *  <em>produces:</em>
01608  *
01609  *     abc
01610  */
01611 
01612 VALUE
01613 rb_thread_stop(void)
01614 {
01615     if (rb_thread_alone()) {
01616         rb_raise(rb_eThreadError,
01617                  "stopping only thread\n\tnote: use sleep to stop forever");
01618     }
01619     rb_thread_sleep_deadly();
01620     return Qnil;
01621 }
01622 
01623 static int
01624 thread_list_i(st_data_t key, st_data_t val, void *data)
01625 {
01626     VALUE ary = (VALUE)data;
01627     rb_thread_t *th;
01628     GetThreadPtr((VALUE)key, th);
01629 
01630     switch (th->status) {
01631       case THREAD_RUNNABLE:
01632       case THREAD_STOPPED:
01633       case THREAD_STOPPED_FOREVER:
01634       case THREAD_TO_KILL:
01635         rb_ary_push(ary, th->self);
01636       default:
01637         break;
01638     }
01639     return ST_CONTINUE;
01640 }
01641 
01642 /********************************************************************/
01643 
01644 /*
01645  *  call-seq:
01646  *     Thread.list   -> array
01647  *
01648  *  Returns an array of <code>Thread</code> objects for all threads that are
01649  *  either runnable or stopped.
01650  *
01651  *     Thread.new { sleep(200) }
01652  *     Thread.new { 1000000.times {|i| i*i } }
01653  *     Thread.new { Thread.stop }
01654  *     Thread.list.each {|t| p t}
01655  *
01656  *  <em>produces:</em>
01657  *
01658  *     #<Thread:0x401b3e84 sleep>
01659  *     #<Thread:0x401b3f38 run>
01660  *     #<Thread:0x401b3fb0 sleep>
01661  *     #<Thread:0x401bdf4c run>
01662  */
01663 
01664 VALUE
01665 rb_thread_list(void)
01666 {
01667     VALUE ary = rb_ary_new();
01668     st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
01669     return ary;
01670 }
01671 
01672 VALUE
01673 rb_thread_current(void)
01674 {
01675     return GET_THREAD()->self;
01676 }
01677 
01678 /*
01679  *  call-seq:
01680  *     Thread.current   -> thread
01681  *
01682  *  Returns the currently executing thread.
01683  *
01684  *     Thread.current   #=> #<Thread:0x401bdf4c run>
01685  */
01686 
01687 static VALUE
01688 thread_s_current(VALUE klass)
01689 {
01690     return rb_thread_current();
01691 }
01692 
01693 VALUE
01694 rb_thread_main(void)
01695 {
01696     return GET_THREAD()->vm->main_thread->self;
01697 }
01698 
01699 /*
01700  *  call-seq:
01701  *     Thread.main   -> thread
01702  *
01703  *  Returns the main thread.
01704  */
01705 
01706 static VALUE
01707 rb_thread_s_main(VALUE klass)
01708 {
01709     return rb_thread_main();
01710 }
01711 
01712 
01713 /*
01714  *  call-seq:
01715  *     Thread.abort_on_exception   -> true or false
01716  *
01717  *  Returns the status of the global ``abort on exception'' condition.  The
01718  *  default is <code>false</code>. When set to <code>true</code>, or if the
01719  *  global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
01720  *  command line option <code>-d</code> was specified) all threads will abort
01721  *  (the process will <code>exit(0)</code>) if an exception is raised in any
01722  *  thread. See also <code>Thread::abort_on_exception=</code>.
01723  */
01724 
01725 static VALUE
01726 rb_thread_s_abort_exc(void)
01727 {
01728     return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
01729 }
01730 
01731 
01732 /*
01733  *  call-seq:
01734  *     Thread.abort_on_exception= boolean   -> true or false
01735  *
01736  *  When set to <code>true</code>, all threads will abort if an exception is
01737  *  raised. Returns the new state.
01738  *
01739  *     Thread.abort_on_exception = true
01740  *     t1 = Thread.new do
01741  *       puts  "In new thread"
01742  *       raise "Exception from thread"
01743  *     end
01744  *     sleep(1)
01745  *     puts "not reached"
01746  *
01747  *  <em>produces:</em>
01748  *
01749  *     In new thread
01750  *     prog.rb:4: Exception from thread (RuntimeError)
01751  *      from prog.rb:2:in `initialize'
01752  *      from prog.rb:2:in `new'
01753  *      from prog.rb:2
01754  */
01755 
01756 static VALUE
01757 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
01758 {
01759     rb_secure(4);
01760     GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
01761     return val;
01762 }
01763 
01764 
01765 /*
01766  *  call-seq:
01767  *     thr.abort_on_exception   -> true or false
01768  *
01769  *  Returns the status of the thread-local ``abort on exception'' condition for
01770  *  <i>thr</i>. The default is <code>false</code>. See also
01771  *  <code>Thread::abort_on_exception=</code>.
01772  */
01773 
01774 static VALUE
01775 rb_thread_abort_exc(VALUE thread)
01776 {
01777     rb_thread_t *th;
01778     GetThreadPtr(thread, th);
01779     return th->abort_on_exception ? Qtrue : Qfalse;
01780 }
01781 
01782 
01783 /*
01784  *  call-seq:
01785  *     thr.abort_on_exception= boolean   -> true or false
01786  *
01787  *  When set to <code>true</code>, causes all threads (including the main
01788  *  program) to abort if an exception is raised in <i>thr</i>. The process will
01789  *  effectively <code>exit(0)</code>.
01790  */
01791 
01792 static VALUE
01793 rb_thread_abort_exc_set(VALUE thread, VALUE val)
01794 {
01795     rb_thread_t *th;
01796     rb_secure(4);
01797 
01798     GetThreadPtr(thread, th);
01799     th->abort_on_exception = RTEST(val);
01800     return val;
01801 }
01802 
01803 
01804 /*
01805  *  call-seq:
01806  *     thr.group   -> thgrp or nil
01807  *
01808  *  Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
01809  *  the thread is not a member of any group.
01810  *
01811  *     Thread.main.group   #=> #<ThreadGroup:0x4029d914>
01812  */
01813 
01814 VALUE
01815 rb_thread_group(VALUE thread)
01816 {
01817     rb_thread_t *th;
01818     VALUE group;
01819     GetThreadPtr(thread, th);
01820     group = th->thgroup;
01821 
01822     if (!group) {
01823         group = Qnil;
01824     }
01825     return group;
01826 }
01827 
01828 static const char *
01829 thread_status_name(enum rb_thread_status status)
01830 {
01831     switch (status) {
01832       case THREAD_RUNNABLE:
01833         return "run";
01834       case THREAD_STOPPED:
01835       case THREAD_STOPPED_FOREVER:
01836         return "sleep";
01837       case THREAD_TO_KILL:
01838         return "aborting";
01839       case THREAD_KILLED:
01840         return "dead";
01841       default:
01842         return "unknown";
01843     }
01844 }
01845 
01846 static int
01847 rb_threadptr_dead(rb_thread_t *th)
01848 {
01849     return th->status == THREAD_KILLED;
01850 }
01851 
01852 
01853 /*
01854  *  call-seq:
01855  *     thr.status   -> string, false or nil
01856  *
01857  *  Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
01858  *  sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
01859  *  ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
01860  *  <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
01861  *  terminated with an exception.
01862  *
01863  *     a = Thread.new { raise("die now") }
01864  *     b = Thread.new { Thread.stop }
01865  *     c = Thread.new { Thread.exit }
01866  *     d = Thread.new { sleep }
01867  *     d.kill                  #=> #<Thread:0x401b3678 aborting>
01868  *     a.status                #=> nil
01869  *     b.status                #=> "sleep"
01870  *     c.status                #=> false
01871  *     d.status                #=> "aborting"
01872  *     Thread.current.status   #=> "run"
01873  */
01874 
01875 static VALUE
01876 rb_thread_status(VALUE thread)
01877 {
01878     rb_thread_t *th;
01879     GetThreadPtr(thread, th);
01880 
01881     if (rb_threadptr_dead(th)) {
01882         if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
01883             /* TODO */ ) {
01884             return Qnil;
01885         }
01886         return Qfalse;
01887     }
01888     return rb_str_new2(thread_status_name(th->status));
01889 }
01890 
01891 
01892 /*
01893  *  call-seq:
01894  *     thr.alive?   -> true or false
01895  *
01896  *  Returns <code>true</code> if <i>thr</i> is running or sleeping.
01897  *
01898  *     thr = Thread.new { }
01899  *     thr.join                #=> #<Thread:0x401b3fb0 dead>
01900  *     Thread.current.alive?   #=> true
01901  *     thr.alive?              #=> false
01902  */
01903 
01904 static VALUE
01905 rb_thread_alive_p(VALUE thread)
01906 {
01907     rb_thread_t *th;
01908     GetThreadPtr(thread, th);
01909 
01910     if (rb_threadptr_dead(th))
01911         return Qfalse;
01912     return Qtrue;
01913 }
01914 
01915 /*
01916  *  call-seq:
01917  *     thr.stop?   -> true or false
01918  *
01919  *  Returns <code>true</code> if <i>thr</i> is dead or sleeping.
01920  *
01921  *     a = Thread.new { Thread.stop }
01922  *     b = Thread.current
01923  *     a.stop?   #=> true
01924  *     b.stop?   #=> false
01925  */
01926 
01927 static VALUE
01928 rb_thread_stop_p(VALUE thread)
01929 {
01930     rb_thread_t *th;
01931     GetThreadPtr(thread, th);
01932 
01933     if (rb_threadptr_dead(th))
01934         return Qtrue;
01935     if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
01936         return Qtrue;
01937     return Qfalse;
01938 }
01939 
01940 /*
01941  *  call-seq:
01942  *     thr.safe_level   -> integer
01943  *
01944  *  Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
01945  *  levels can help when implementing sandboxes which run insecure code.
01946  *
01947  *     thr = Thread.new { $SAFE = 3; sleep }
01948  *     Thread.current.safe_level   #=> 0
01949  *     thr.safe_level              #=> 3
01950  */
01951 
01952 static VALUE
01953 rb_thread_safe_level(VALUE thread)
01954 {
01955     rb_thread_t *th;
01956     GetThreadPtr(thread, th);
01957 
01958     return INT2NUM(th->safe_level);
01959 }
01960 
01961 /*
01962  * call-seq:
01963  *   thr.inspect   -> string
01964  *
01965  * Dump the name, id, and status of _thr_ to a string.
01966  */
01967 
01968 static VALUE
01969 rb_thread_inspect(VALUE thread)
01970 {
01971     const char *cname = rb_obj_classname(thread);
01972     rb_thread_t *th;
01973     const char *status;
01974     VALUE str;
01975 
01976     GetThreadPtr(thread, th);
01977     status = thread_status_name(th->status);
01978     str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
01979     OBJ_INFECT(str, thread);
01980 
01981     return str;
01982 }
01983 
01984 VALUE
01985 rb_thread_local_aref(VALUE thread, ID id)
01986 {
01987     rb_thread_t *th;
01988     VALUE val;
01989 
01990     GetThreadPtr(thread, th);
01991     if (rb_safe_level() >= 4 && th != GET_THREAD()) {
01992         rb_raise(rb_eSecurityError, "Insecure: thread locals");
01993     }
01994     if (!th->local_storage) {
01995         return Qnil;
01996     }
01997     if (st_lookup(th->local_storage, id, &val)) {
01998         return val;
01999     }
02000     return Qnil;
02001 }
02002 
02003 /*
02004  *  call-seq:
02005  *      thr[sym]   -> obj or nil
02006  *
02007  *  Attribute Reference---Returns the value of a thread-local variable, using
02008  *  either a symbol or a string name. If the specified variable does not exist,
02009  *  returns <code>nil</code>.
02010  *
02011  *     a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
02012  *     b = Thread.new { Thread.current[:name]  = "B"; Thread.stop }
02013  *     c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
02014  *     Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
02015  *
02016  *  <em>produces:</em>
02017  *
02018  *     #<Thread:0x401b3b3c sleep>: C
02019  *     #<Thread:0x401b3bc8 sleep>: B
02020  *     #<Thread:0x401b3c68 sleep>: A
02021  *     #<Thread:0x401bdf4c run>:
02022  */
02023 
02024 static VALUE
02025 rb_thread_aref(VALUE thread, VALUE id)
02026 {
02027     return rb_thread_local_aref(thread, rb_to_id(id));
02028 }
02029 
02030 VALUE
02031 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
02032 {
02033     rb_thread_t *th;
02034     GetThreadPtr(thread, th);
02035 
02036     if (rb_safe_level() >= 4 && th != GET_THREAD()) {
02037         rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
02038     }
02039     if (OBJ_FROZEN(thread)) {
02040         rb_error_frozen("thread locals");
02041     }
02042     if (!th->local_storage) {
02043         th->local_storage = st_init_numtable();
02044     }
02045     if (NIL_P(val)) {
02046         st_delete_wrap(th->local_storage, id);
02047         return Qnil;
02048     }
02049     st_insert(th->local_storage, id, val);
02050     return val;
02051 }
02052 
02053 /*
02054  *  call-seq:
02055  *      thr[sym] = obj   -> obj
02056  *
02057  *  Attribute Assignment---Sets or creates the value of a thread-local variable,
02058  *  using either a symbol or a string. See also <code>Thread#[]</code>.
02059  */
02060 
02061 static VALUE
02062 rb_thread_aset(VALUE self, VALUE id, VALUE val)
02063 {
02064     return rb_thread_local_aset(self, rb_to_id(id), val);
02065 }
02066 
02067 /*
02068  *  call-seq:
02069  *     thr.key?(sym)   -> true or false
02070  *
02071  *  Returns <code>true</code> if the given string (or symbol) exists as a
02072  *  thread-local variable.
02073  *
02074  *     me = Thread.current
02075  *     me[:oliver] = "a"
02076  *     me.key?(:oliver)    #=> true
02077  *     me.key?(:stanley)   #=> false
02078  */
02079 
02080 static VALUE
02081 rb_thread_key_p(VALUE self, VALUE key)
02082 {
02083     rb_thread_t *th;
02084     ID id = rb_to_id(key);
02085 
02086     GetThreadPtr(self, th);
02087 
02088     if (!th->local_storage) {
02089         return Qfalse;
02090     }
02091     if (st_lookup(th->local_storage, id, 0)) {
02092         return Qtrue;
02093     }
02094     return Qfalse;
02095 }
02096 
02097 static int
02098 thread_keys_i(ID key, VALUE value, VALUE ary)
02099 {
02100     rb_ary_push(ary, ID2SYM(key));
02101     return ST_CONTINUE;
02102 }
02103 
02104 static int
02105 vm_living_thread_num(rb_vm_t *vm)
02106 {
02107     return vm->living_threads->num_entries;
02108 }
02109 
02110 int
02111 rb_thread_alone(void)
02112 {
02113     int num = 1;
02114     if (GET_THREAD()->vm->living_threads) {
02115         num = vm_living_thread_num(GET_THREAD()->vm);
02116         thread_debug("rb_thread_alone: %d\n", num);
02117     }
02118     return num == 1;
02119 }
02120 
02121 /*
02122  *  call-seq:
02123  *     thr.keys   -> array
02124  *
02125  *  Returns an an array of the names of the thread-local variables (as Symbols).
02126  *
02127  *     thr = Thread.new do
02128  *       Thread.current[:cat] = 'meow'
02129  *       Thread.current["dog"] = 'woof'
02130  *     end
02131  *     thr.join   #=> #<Thread:0x401b3f10 dead>
02132  *     thr.keys   #=> [:dog, :cat]
02133  */
02134 
02135 static VALUE
02136 rb_thread_keys(VALUE self)
02137 {
02138     rb_thread_t *th;
02139     VALUE ary = rb_ary_new();
02140     GetThreadPtr(self, th);
02141 
02142     if (th->local_storage) {
02143         st_foreach(th->local_storage, thread_keys_i, ary);
02144     }
02145     return ary;
02146 }
02147 
02148 /*
02149  *  call-seq:
02150  *     thr.priority   -> integer
02151  *
02152  *  Returns the priority of <i>thr</i>. Default is inherited from the
02153  *  current thread which creating the new thread, or zero for the
02154  *  initial main thread; higher-priority thread will run more frequently
02155  *  than lower-priority threads (but lower-priority threads can also run).
02156  *
02157  *  This is just hint for Ruby thread scheduler.  It may be ignored on some
02158  *  platform.
02159  *
02160  *     Thread.current.priority   #=> 0
02161  */
02162 
02163 static VALUE
02164 rb_thread_priority(VALUE thread)
02165 {
02166     rb_thread_t *th;
02167     GetThreadPtr(thread, th);
02168     return INT2NUM(th->priority);
02169 }
02170 
02171 
02172 /*
02173  *  call-seq:
02174  *     thr.priority= integer   -> thr
02175  *
02176  *  Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
02177  *  will run more frequently than lower-priority threads (but lower-priority
02178  *  threads can also run).
02179  *
02180  *  This is just hint for Ruby thread scheduler.  It may be ignored on some
02181  *  platform.
02182  *
02183  *     count1 = count2 = 0
02184  *     a = Thread.new do
02185  *           loop { count1 += 1 }
02186  *         end
02187  *     a.priority = -1
02188  *
02189  *     b = Thread.new do
02190  *           loop { count2 += 1 }
02191  *         end
02192  *     b.priority = -2
02193  *     sleep 1   #=> 1
02194  *     count1    #=> 622504
02195  *     count2    #=> 5832
02196  */
02197 
02198 static VALUE
02199 rb_thread_priority_set(VALUE thread, VALUE prio)
02200 {
02201     rb_thread_t *th;
02202     int priority;
02203     GetThreadPtr(thread, th);
02204 
02205     rb_secure(4);
02206 
02207 #if USE_NATIVE_THREAD_PRIORITY
02208     th->priority = NUM2INT(prio);
02209     native_thread_apply_priority(th);
02210 #else
02211     priority = NUM2INT(prio);
02212     if (priority > RUBY_THREAD_PRIORITY_MAX) {
02213         priority = RUBY_THREAD_PRIORITY_MAX;
02214     }
02215     else if (priority < RUBY_THREAD_PRIORITY_MIN) {
02216         priority = RUBY_THREAD_PRIORITY_MIN;
02217     }
02218     th->priority = priority;
02219     th->slice = priority;
02220 #endif
02221     return INT2NUM(th->priority);
02222 }
02223 
02224 /* for IO */
02225 
02226 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
02227 
02228 /*
02229  * several Unix platforms support file descriptors bigger than FD_SETSIZE
02230  * in select(2) system call.
02231  *
02232  * - Linux 2.2.12 (?)
02233  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
02234  *   select(2) documents how to allocate fd_set dynamically.
02235  *   http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
02236  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
02237  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
02238  *   select(2) documents how to allocate fd_set dynamically.
02239  *   http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
02240  * - HP-UX documents how to allocate fd_set dynamically.
02241  *   http://docs.hp.com/en/B2355-60105/select.2.html
02242  * - Solaris 8 has select_large_fdset
02243  *
02244  * When fd_set is not big enough to hold big file descriptors,
02245  * it should be allocated dynamically.
02246  * Note that this assumes fd_set is structured as bitmap.
02247  *
02248  * rb_fd_init allocates the memory.
02249  * rb_fd_term free the memory.
02250  * rb_fd_set may re-allocates bitmap.
02251  *
02252  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
02253  */
02254 
02255 void
02256 rb_fd_init(volatile rb_fdset_t *fds)
02257 {
02258     fds->maxfd = 0;
02259     fds->fdset = ALLOC(fd_set);
02260     FD_ZERO(fds->fdset);
02261 }
02262 
02263 void
02264 rb_fd_term(rb_fdset_t *fds)
02265 {
02266     if (fds->fdset) xfree(fds->fdset);
02267     fds->maxfd = 0;
02268     fds->fdset = 0;
02269 }
02270 
02271 void
02272 rb_fd_zero(rb_fdset_t *fds)
02273 {
02274     if (fds->fdset) {
02275         MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
02276         FD_ZERO(fds->fdset);
02277     }
02278 }
02279 
02280 static void
02281 rb_fd_resize(int n, rb_fdset_t *fds)
02282 {
02283     size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
02284     size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
02285 
02286     if (m < sizeof(fd_set)) m = sizeof(fd_set);
02287     if (o < sizeof(fd_set)) o = sizeof(fd_set);
02288 
02289     if (m > o) {
02290         fds->fdset = xrealloc(fds->fdset, m);
02291         memset((char *)fds->fdset + o, 0, m - o);
02292     }
02293     if (n >= fds->maxfd) fds->maxfd = n + 1;
02294 }
02295 
02296 void
02297 rb_fd_set(int n, rb_fdset_t *fds)
02298 {
02299     rb_fd_resize(n, fds);
02300     FD_SET(n, fds->fdset);
02301 }
02302 
02303 void
02304 rb_fd_clr(int n, rb_fdset_t *fds)
02305 {
02306     if (n >= fds->maxfd) return;
02307     FD_CLR(n, fds->fdset);
02308 }
02309 
02310 int
02311 rb_fd_isset(int n, const rb_fdset_t *fds)
02312 {
02313     if (n >= fds->maxfd) return 0;
02314     return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
02315 }
02316 
02317 void
02318 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
02319 {
02320     size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
02321 
02322     if (size < sizeof(fd_set)) size = sizeof(fd_set);
02323     dst->maxfd = max;
02324     dst->fdset = xrealloc(dst->fdset, size);
02325     memcpy(dst->fdset, src, size);
02326 }
02327 
02328 int
02329 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
02330 {
02331     fd_set *r = NULL, *w = NULL, *e = NULL;
02332     if (readfds) {
02333         rb_fd_resize(n - 1, readfds);
02334         r = rb_fd_ptr(readfds);
02335     }
02336     if (writefds) {
02337         rb_fd_resize(n - 1, writefds);
02338         w = rb_fd_ptr(writefds);
02339     }
02340     if (exceptfds) {
02341         rb_fd_resize(n - 1, exceptfds);
02342         e = rb_fd_ptr(exceptfds);
02343     }
02344     return select(n, r, w, e, timeout);
02345 }
02346 
02347 #undef FD_ZERO
02348 #undef FD_SET
02349 #undef FD_CLR
02350 #undef FD_ISSET
02351 
02352 #define FD_ZERO(f)      rb_fd_zero(f)
02353 #define FD_SET(i, f)    rb_fd_set(i, f)
02354 #define FD_CLR(i, f)    rb_fd_clr(i, f)
02355 #define FD_ISSET(i, f)  rb_fd_isset(i, f)
02356 
02357 #elif defined(_WIN32)
02358 
02359 void
02360 rb_fd_init(volatile rb_fdset_t *set)
02361 {
02362     set->capa = FD_SETSIZE;
02363     set->fdset = ALLOC(fd_set);
02364     FD_ZERO(set->fdset);
02365 }
02366 
02367 void
02368 rb_fd_term(rb_fdset_t *set)
02369 {
02370     xfree(set->fdset);
02371     set->fdset = NULL;
02372     set->capa = 0;
02373 }
02374 
02375 void
02376 rb_fd_set(int fd, rb_fdset_t *set)
02377 {
02378     unsigned int i;
02379     SOCKET s = rb_w32_get_osfhandle(fd);
02380 
02381     for (i = 0; i < set->fdset->fd_count; i++) {
02382         if (set->fdset->fd_array[i] == s) {
02383             return;
02384         }
02385     }
02386     if (set->fdset->fd_count >= (unsigned)set->capa) {
02387         set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
02388         set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
02389     }
02390     set->fdset->fd_array[set->fdset->fd_count++] = s;
02391 }
02392 
02393 #undef FD_ZERO
02394 #undef FD_SET
02395 #undef FD_CLR
02396 #undef FD_ISSET
02397 
02398 #define FD_ZERO(f)      rb_fd_zero(f)
02399 #define FD_SET(i, f)    rb_fd_set(i, f)
02400 #define FD_CLR(i, f)    rb_fd_clr(i, f)
02401 #define FD_ISSET(i, f)  rb_fd_isset(i, f)
02402 
02403 #endif
02404 
02405 #if defined(__CYGWIN__) || defined(_WIN32)
02406 static long
02407 cmp_tv(const struct timeval *a, const struct timeval *b)
02408 {
02409     long d = (a->tv_sec - b->tv_sec);
02410     return (d != 0) ? d : (a->tv_usec - b->tv_usec);
02411 }
02412 
02413 static int
02414 subtract_tv(struct timeval *rest, const struct timeval *wait)
02415 {
02416     if (rest->tv_sec < wait->tv_sec) {
02417         return 0;
02418     }
02419     while (rest->tv_usec < wait->tv_usec) {
02420         if (rest->tv_sec <= wait->tv_sec) {
02421             return 0;
02422         }
02423         rest->tv_sec -= 1;
02424         rest->tv_usec += 1000 * 1000;
02425     }
02426     rest->tv_sec -= wait->tv_sec;
02427     rest->tv_usec -= wait->tv_usec;
02428     return rest->tv_sec != 0 || rest->tv_usec != 0;
02429 }
02430 #endif
02431 
02432 static int
02433 do_select(int n, fd_set *read, fd_set *write, fd_set *except,
02434           struct timeval *timeout)
02435 {
02436     int result, lerrno;
02437     fd_set UNINITIALIZED_VAR(orig_read);
02438     fd_set UNINITIALIZED_VAR(orig_write);
02439     fd_set UNINITIALIZED_VAR(orig_except);
02440 
02441 #ifndef linux
02442     double limit = 0;
02443     struct timeval wait_rest;
02444 # if defined(__CYGWIN__) || defined(_WIN32)
02445     struct timeval start_time;
02446 # endif
02447 
02448     if (timeout) {
02449 # if defined(__CYGWIN__) || defined(_WIN32)
02450         gettimeofday(&start_time, NULL);
02451         limit = (double)start_time.tv_sec + (double)start_time.tv_usec*1e-6;
02452 # else
02453         limit = timeofday();
02454 # endif
02455         limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
02456         wait_rest = *timeout;
02457         timeout = &wait_rest;
02458     }
02459 #endif
02460 
02461     if (read) orig_read = *read;
02462     if (write) orig_write = *write;
02463     if (except) orig_except = *except;
02464 
02465   retry:
02466     lerrno = 0;
02467 
02468 #if defined(__CYGWIN__) || defined(_WIN32)
02469     {
02470         int finish = 0;
02471         /* polling duration: 100ms */
02472         struct timeval wait_100ms, *wait;
02473         wait_100ms.tv_sec = 0;
02474         wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
02475 
02476         do {
02477             wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) < 0) ? &wait_100ms : timeout;
02478             BLOCKING_REGION({
02479                 do {
02480                     result = select(n, read, write, except, wait);
02481                     if (result < 0) lerrno = errno;
02482                     if (result != 0) break;
02483 
02484                     if (read) *read = orig_read;
02485                     if (write) *write = orig_write;
02486                     if (except) *except = orig_except;
02487                     if (timeout) {
02488                         struct timeval elapsed;
02489                         gettimeofday(&elapsed, NULL);
02490                         subtract_tv(&elapsed, &start_time);
02491                         gettimeofday(&start_time, NULL);
02492                         if (!subtract_tv(timeout, &elapsed)) {
02493                             finish = 1;
02494                             break;
02495                         }
02496                         if (cmp_tv(&wait_100ms, timeout) > 0) wait = timeout;
02497                     }
02498                 } while (__th->interrupt_flag == 0);
02499             }, 0, 0);
02500         } while (result == 0 && !finish);
02501     }
02502 #else
02503     BLOCKING_REGION({
02504         result = select(n, read, write, except, timeout);
02505         if (result < 0) lerrno = errno;
02506     }, ubf_select, GET_THREAD());
02507 #endif
02508 
02509     errno = lerrno;
02510 
02511     if (result < 0) {
02512         switch (errno) {
02513           case EINTR:
02514 #ifdef ERESTART
02515           case ERESTART:
02516 #endif
02517             if (read) *read = orig_read;
02518             if (write) *write = orig_write;
02519             if (except) *except = orig_except;
02520 #ifndef linux
02521             if (timeout) {
02522                 double d = limit - timeofday();
02523 
02524                 wait_rest.tv_sec = (unsigned int)d;
02525                 wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
02526                 if (wait_rest.tv_sec < 0)  wait_rest.tv_sec = 0;
02527                 if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
02528             }
02529 #endif
02530             goto retry;
02531           default:
02532             break;
02533         }
02534     }
02535     return result;
02536 }
02537 
02538 static void
02539 rb_thread_wait_fd_rw(int fd, int read)
02540 {
02541     int result = 0;
02542     thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
02543 
02544     if (fd < 0) {
02545         rb_raise(rb_eIOError, "closed stream");
02546     }
02547     if (rb_thread_alone()) return;
02548     while (result <= 0) {
02549         rb_fdset_t set;
02550         rb_fd_init(&set);
02551         FD_SET(fd, &set);
02552 
02553         if (read) {
02554             result = do_select(fd + 1, rb_fd_ptr(&set), 0, 0, 0);
02555         }
02556         else {
02557             result = do_select(fd + 1, 0, rb_fd_ptr(&set), 0, 0);
02558         }
02559 
02560         rb_fd_term(&set);
02561 
02562         if (result < 0) {
02563             rb_sys_fail(0);
02564         }
02565     }
02566 
02567     thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
02568 }
02569 
02570 void
02571 rb_thread_wait_fd(int fd)
02572 {
02573     rb_thread_wait_fd_rw(fd, 1);
02574 }
02575 
02576 int
02577 rb_thread_fd_writable(int fd)
02578 {
02579     rb_thread_wait_fd_rw(fd, 0);
02580     return TRUE;
02581 }
02582 
02583 int
02584 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
02585                  struct timeval *timeout)
02586 {
02587     if (!read && !write && !except) {
02588         if (!timeout) {
02589             rb_thread_sleep_forever();
02590             return 0;
02591         }
02592         rb_thread_wait_for(*timeout);
02593         return 0;
02594     }
02595     else {
02596         return do_select(max, read, write, except, timeout);
02597     }
02598 }
02599 
02600 
02601 int
02602 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
02603                     struct timeval *timeout)
02604 {
02605     fd_set *r = NULL, *w = NULL, *e = NULL;
02606 
02607     if (!read && !write && !except) {
02608         if (!timeout) {
02609             rb_thread_sleep_forever();
02610             return 0;
02611         }
02612         rb_thread_wait_for(*timeout);
02613         return 0;
02614     }
02615 
02616     if (read) {
02617         rb_fd_resize(max - 1, read);
02618         r = rb_fd_ptr(read);
02619     }
02620     if (write) {
02621         rb_fd_resize(max - 1, write);
02622         w = rb_fd_ptr(write);
02623     }
02624     if (except) {
02625         rb_fd_resize(max - 1, except);
02626         e = rb_fd_ptr(except);
02627     }
02628     return do_select(max, r, w, e, timeout);
02629 }
02630 
02631 
02632 /*
02633  * for GC
02634  */
02635 
02636 #ifdef USE_CONSERVATIVE_STACK_END
02637 void
02638 rb_gc_set_stack_end(VALUE **stack_end_p)
02639 {
02640     VALUE stack_end;
02641     *stack_end_p = &stack_end;
02642 }
02643 #endif
02644 
02645 void
02646 rb_gc_save_machine_context(rb_thread_t *th)
02647 {
02648     FLUSH_REGISTER_WINDOWS;
02649 #ifdef __ia64
02650     th->machine_register_stack_end = rb_ia64_bsp();
02651 #endif
02652     setjmp(th->machine_regs);
02653 }
02654 
02655 /*
02656  *
02657  */
02658 
02659 int rb_get_next_signal(void);
02660 
02661 void
02662 rb_threadptr_check_signal(rb_thread_t *mth)
02663 {
02664     /* mth must be main_thread */
02665     if (rb_signal_buff_size() > 0) {
02666         /* wakeup main thread */
02667         rb_threadptr_interrupt(mth);
02668     }
02669 }
02670 
02671 static void
02672 timer_thread_function(void *arg)
02673 {
02674     rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
02675 
02676     /* for time slice */
02677     RUBY_VM_SET_TIMER_INTERRUPT(vm->running_thread);
02678 
02679     /* check signal */
02680     rb_threadptr_check_signal(vm->main_thread);
02681 
02682 #if 0
02683     /* prove profiler */
02684     if (vm->prove_profile.enable) {
02685         rb_thread_t *th = vm->running_thread;
02686 
02687         if (vm->during_gc) {
02688             /* GC prove profiling */
02689         }
02690     }
02691 #endif
02692 }
02693 
02694 void
02695 rb_thread_stop_timer_thread(void)
02696 {
02697     if (timer_thread_id && native_stop_timer_thread()) {
02698         native_reset_timer_thread();
02699     }
02700 }
02701 
02702 void
02703 rb_thread_reset_timer_thread(void)
02704 {
02705     native_reset_timer_thread();
02706 }
02707 
02708 void
02709 rb_thread_start_timer_thread(void)
02710 {
02711     system_working = 1;
02712     rb_thread_create_timer_thread();
02713 }
02714 
02715 static int
02716 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
02717 {
02718     int i;
02719     VALUE lines = (VALUE)val;
02720 
02721     for (i = 0; i < RARRAY_LEN(lines); i++) {
02722         if (RARRAY_PTR(lines)[i] != Qnil) {
02723             RARRAY_PTR(lines)[i] = INT2FIX(0);
02724         }
02725     }
02726     return ST_CONTINUE;
02727 }
02728 
02729 static void
02730 clear_coverage(void)
02731 {
02732     extern VALUE rb_get_coverages(void);
02733     VALUE coverages = rb_get_coverages();
02734     if (RTEST(coverages)) {
02735         st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
02736     }
02737 }
02738 
02739 static void
02740 rb_thread_atfork_internal(int (*atfork)(st_data_t, st_data_t, st_data_t))
02741 {
02742     rb_thread_t *th = GET_THREAD();
02743     rb_vm_t *vm = th->vm;
02744     VALUE thval = th->self;
02745     vm->main_thread = th;
02746 
02747     native_mutex_reinitialize_atfork(&th->vm->global_vm_lock);
02748     st_foreach(vm->living_threads, atfork, (st_data_t)th);
02749     st_clear(vm->living_threads);
02750     st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
02751     vm->sleeper = 0;
02752     clear_coverage();
02753 }
02754 
02755 static int
02756 terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)
02757 {
02758     VALUE thval = key;
02759     rb_thread_t *th;
02760     GetThreadPtr(thval, th);
02761 
02762     if (th != (rb_thread_t *)current_th) {
02763         if (th->keeping_mutexes) {
02764             rb_mutex_abandon_all(th->keeping_mutexes);
02765         }
02766         th->keeping_mutexes = NULL;
02767         thread_cleanup_func(th, TRUE);
02768     }
02769     return ST_CONTINUE;
02770 }
02771 
02772 void
02773 rb_thread_atfork(void)
02774 {
02775     rb_thread_atfork_internal(terminate_atfork_i);
02776     GET_THREAD()->join_list_head = 0;
02777     rb_reset_random_seed();
02778 }
02779 
02780 static int
02781 terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
02782 {
02783     VALUE thval = key;
02784     rb_thread_t *th;
02785     GetThreadPtr(thval, th);
02786 
02787     if (th != (rb_thread_t *)current_th) {
02788         thread_cleanup_func_before_exec(th);
02789     }
02790     return ST_CONTINUE;
02791 }
02792 
02793 void
02794 rb_thread_atfork_before_exec(void)
02795 {
02796     rb_thread_atfork_internal(terminate_atfork_before_exec_i);
02797 }
02798 
02799 struct thgroup {
02800     int enclosed;
02801     VALUE group;
02802 };
02803 
02804 static size_t
02805 thgroup_memsize(const void *ptr)
02806 {
02807     return ptr ? sizeof(struct thgroup) : 0;
02808 }
02809 
02810 static const rb_data_type_t thgroup_data_type = {
02811     "thgroup",
02812     NULL, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,
02813 };
02814 
02815 /*
02816  * Document-class: ThreadGroup
02817  *
02818  *  <code>ThreadGroup</code> provides a means of keeping track of a number of
02819  *  threads as a group. A <code>Thread</code> can belong to only one
02820  *  <code>ThreadGroup</code> at a time; adding a thread to a new group will
02821  *  remove it from any previous group.
02822  *
02823  *  Newly created threads belong to the same group as the thread from which they
02824  *  were created.
02825  */
02826 
02827 static VALUE
02828 thgroup_s_alloc(VALUE klass)
02829 {
02830     VALUE group;
02831     struct thgroup *data;
02832 
02833     group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
02834     data->enclosed = 0;
02835     data->group = group;
02836 
02837     return group;
02838 }
02839 
02840 struct thgroup_list_params {
02841     VALUE ary;
02842     VALUE group;
02843 };
02844 
02845 static int
02846 thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
02847 {
02848     VALUE thread = (VALUE)key;
02849     VALUE ary = ((struct thgroup_list_params *)data)->ary;
02850     VALUE group = ((struct thgroup_list_params *)data)->group;
02851     rb_thread_t *th;
02852     GetThreadPtr(thread, th);
02853 
02854     if (th->thgroup == group) {
02855         rb_ary_push(ary, thread);
02856     }
02857     return ST_CONTINUE;
02858 }
02859 
02860 /*
02861  *  call-seq:
02862  *     thgrp.list   -> array
02863  *
02864  *  Returns an array of all existing <code>Thread</code> objects that belong to
02865  *  this group.
02866  *
02867  *     ThreadGroup::Default.list   #=> [#<Thread:0x401bdf4c run>]
02868  */
02869 
02870 static VALUE
02871 thgroup_list(VALUE group)
02872 {
02873     VALUE ary = rb_ary_new();
02874     struct thgroup_list_params param;
02875 
02876     param.ary = ary;
02877     param.group = group;
02878     st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
02879     return ary;
02880 }
02881 
02882 
02883 /*
02884  *  call-seq:
02885  *     thgrp.enclose   -> thgrp
02886  *
02887  *  Prevents threads from being added to or removed from the receiving
02888  *  <code>ThreadGroup</code>. New threads can still be started in an enclosed
02889  *  <code>ThreadGroup</code>.
02890  *
02891  *     ThreadGroup::Default.enclose        #=> #<ThreadGroup:0x4029d914>
02892  *     thr = Thread::new { Thread.stop }   #=> #<Thread:0x402a7210 sleep>
02893  *     tg = ThreadGroup::new               #=> #<ThreadGroup:0x402752d4>
02894  *     tg.add thr
02895  *
02896  *  <em>produces:</em>
02897  *
02898  *     ThreadError: can't move from the enclosed thread group
02899  */
02900 
02901 static VALUE
02902 thgroup_enclose(VALUE group)
02903 {
02904     struct thgroup *data;
02905 
02906     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
02907     data->enclosed = 1;
02908 
02909     return group;
02910 }
02911 
02912 
02913 /*
02914  *  call-seq:
02915  *     thgrp.enclosed?   -> true or false
02916  *
02917  *  Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
02918  *  ThreadGroup#enclose.
02919  */
02920 
02921 static VALUE
02922 thgroup_enclosed_p(VALUE group)
02923 {
02924     struct thgroup *data;
02925 
02926     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
02927     if (data->enclosed)
02928         return Qtrue;
02929     return Qfalse;
02930 }
02931 
02932 
02933 /*
02934  *  call-seq:
02935  *     thgrp.add(thread)   -> thgrp
02936  *
02937  *  Adds the given <em>thread</em> to this group, removing it from any other
02938  *  group to which it may have previously belonged.
02939  *
02940  *     puts "Initial group is #{ThreadGroup::Default.list}"
02941  *     tg = ThreadGroup.new
02942  *     t1 = Thread.new { sleep }
02943  *     t2 = Thread.new { sleep }
02944  *     puts "t1 is #{t1}"
02945  *     puts "t2 is #{t2}"
02946  *     tg.add(t1)
02947  *     puts "Initial group now #{ThreadGroup::Default.list}"
02948  *     puts "tg group now #{tg.list}"
02949  *
02950  *  <em>produces:</em>
02951  *
02952  *     Initial group is #<Thread:0x401bdf4c>
02953  *     t1 is #<Thread:0x401b3c90>
02954  *     t2 is #<Thread:0x401b3c18>
02955  *     Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
02956  *     tg group now #<Thread:0x401b3c90>
02957  */
02958 
02959 static VALUE
02960 thgroup_add(VALUE group, VALUE thread)
02961 {
02962     rb_thread_t *th;
02963     struct thgroup *data;
02964 
02965     rb_secure(4);
02966     GetThreadPtr(thread, th);
02967 
02968     if (OBJ_FROZEN(group)) {
02969         rb_raise(rb_eThreadError, "can't move to the frozen thread group");
02970     }
02971     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
02972     if (data->enclosed) {
02973         rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
02974     }
02975 
02976     if (!th->thgroup) {
02977         return Qnil;
02978     }
02979 
02980     if (OBJ_FROZEN(th->thgroup)) {
02981         rb_raise(rb_eThreadError, "can't move from the frozen thread group");
02982     }
02983     TypedData_Get_Struct(th->thgroup, struct thgroup, &thgroup_data_type, data);
02984     if (data->enclosed) {
02985         rb_raise(rb_eThreadError,
02986                  "can't move from the enclosed thread group");
02987     }
02988 
02989     th->thgroup = group;
02990     return group;
02991 }
02992 
02993 
02994 /*
02995  *  Document-class: Mutex
02996  *
02997  *  Mutex implements a simple semaphore that can be used to coordinate access to
02998  *  shared data from multiple concurrent threads.
02999  *
03000  *  Example:
03001  *
03002  *    require 'thread'
03003  *    semaphore = Mutex.new
03004  *
03005  *    a = Thread.new {
03006  *      semaphore.synchronize {
03007  *        # access shared resource
03008  *      }
03009  *    }
03010  *
03011  *    b = Thread.new {
03012  *      semaphore.synchronize {
03013  *        # access shared resource
03014  *      }
03015  *    }
03016  *
03017  */
03018 
03019 #define GetMutexPtr(obj, tobj) \
03020     TypedData_Get_Struct(obj, mutex_t, &mutex_data_type, tobj)
03021 
03022 static const char *mutex_unlock(mutex_t *mutex, rb_thread_t volatile *th);
03023 
03024 #define mutex_mark NULL
03025 
03026 static void
03027 mutex_free(void *ptr)
03028 {
03029     if (ptr) {
03030         mutex_t *mutex = ptr;
03031         if (mutex->th) {
03032             /* rb_warn("free locked mutex"); */
03033             const char *err = mutex_unlock(mutex, mutex->th);
03034             if (err) rb_bug("%s", err);
03035         }
03036         native_mutex_destroy(&mutex->lock);
03037         native_cond_destroy(&mutex->cond);
03038     }
03039     ruby_xfree(ptr);
03040 }
03041 
03042 static size_t
03043 mutex_memsize(const void *ptr)
03044 {
03045     return ptr ? sizeof(mutex_t) : 0;
03046 }
03047 
03048 static const rb_data_type_t mutex_data_type = {
03049     "mutex",
03050     mutex_mark, mutex_free, mutex_memsize,
03051 };
03052 
03053 static VALUE
03054 mutex_alloc(VALUE klass)
03055 {
03056     VALUE volatile obj;
03057     mutex_t *mutex;
03058 
03059     obj = TypedData_Make_Struct(klass, mutex_t, &mutex_data_type, mutex);
03060     native_mutex_initialize(&mutex->lock);
03061     native_cond_initialize(&mutex->cond);
03062     return obj;
03063 }
03064 
03065 /*
03066  *  call-seq:
03067  *     Mutex.new   -> mutex
03068  *
03069  *  Creates a new Mutex
03070  */
03071 static VALUE
03072 mutex_initialize(VALUE self)
03073 {
03074     return self;
03075 }
03076 
03077 VALUE
03078 rb_mutex_new(void)
03079 {
03080     return mutex_alloc(rb_cMutex);
03081 }
03082 
03083 /*
03084  * call-seq:
03085  *    mutex.locked?  -> true or false
03086  *
03087  * Returns +true+ if this lock is currently held by some thread.
03088  */
03089 VALUE
03090 rb_mutex_locked_p(VALUE self)
03091 {
03092     mutex_t *mutex;
03093     GetMutexPtr(self, mutex);
03094     return mutex->th ? Qtrue : Qfalse;
03095 }
03096 
03097 static void
03098 mutex_locked(rb_thread_t *th, VALUE self)
03099 {
03100     mutex_t *mutex;
03101     GetMutexPtr(self, mutex);
03102 
03103     if (th->keeping_mutexes) {
03104         mutex->next_mutex = th->keeping_mutexes;
03105     }
03106     th->keeping_mutexes = mutex;
03107 }
03108 
03109 /*
03110  * call-seq:
03111  *    mutex.try_lock  -> true or false
03112  *
03113  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
03114  * lock was granted.
03115  */
03116 VALUE
03117 rb_mutex_trylock(VALUE self)
03118 {
03119     mutex_t *mutex;
03120     VALUE locked = Qfalse;
03121     GetMutexPtr(self, mutex);
03122 
03123     native_mutex_lock(&mutex->lock);
03124     if (mutex->th == 0) {
03125         mutex->th = GET_THREAD();
03126         locked = Qtrue;
03127 
03128         mutex_locked(GET_THREAD(), self);
03129     }
03130     native_mutex_unlock(&mutex->lock);
03131 
03132     return locked;
03133 }
03134 
03135 static int
03136 lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
03137 {
03138     int interrupted = 0;
03139 #if 0 /* for debug */
03140     native_thread_yield();
03141 #endif
03142 
03143     native_mutex_lock(&mutex->lock);
03144     th->transition_for_lock = 0;
03145     while (mutex->th || (mutex->th = th, 0)) {
03146         if (last_thread) {
03147             interrupted = 2;
03148             break;
03149         }
03150 
03151         mutex->cond_waiting++;
03152         native_cond_wait(&mutex->cond, &mutex->lock);
03153         mutex->cond_notified--;
03154 
03155         if (RUBY_VM_INTERRUPTED(th)) {
03156             interrupted = 1;
03157             break;
03158         }
03159     }
03160     th->transition_for_lock = 1;
03161     native_mutex_unlock(&mutex->lock);
03162 
03163     if (interrupted == 2) native_thread_yield();
03164 #if 0 /* for debug */
03165     native_thread_yield();
03166 #endif
03167 
03168     return interrupted;
03169 }
03170 
03171 static void
03172 lock_interrupt(void *ptr)
03173 {
03174     mutex_t *mutex = (mutex_t *)ptr;
03175     native_mutex_lock(&mutex->lock);
03176     if (mutex->cond_waiting > 0) {
03177         native_cond_broadcast(&mutex->cond);
03178         mutex->cond_notified = mutex->cond_waiting;
03179         mutex->cond_waiting = 0;
03180     }
03181     native_mutex_unlock(&mutex->lock);
03182 }
03183 
03184 /*
03185  * call-seq:
03186  *    mutex.lock  -> self
03187  *
03188  * Attempts to grab the lock and waits if it isn't available.
03189  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
03190  */
03191 VALUE
03192 rb_mutex_lock(VALUE self)
03193 {
03194 
03195     if (rb_mutex_trylock(self) == Qfalse) {
03196         mutex_t *mutex;
03197         rb_thread_t *th = GET_THREAD();
03198         GetMutexPtr(self, mutex);
03199 
03200         if (mutex->th == GET_THREAD()) {
03201             rb_raise(rb_eThreadError, "deadlock; recursive locking");
03202         }
03203 
03204         while (mutex->th != th) {
03205             int interrupted;
03206             enum rb_thread_status prev_status = th->status;
03207             int last_thread = 0;
03208             struct rb_unblock_callback oldubf;
03209 
03210             set_unblock_function(th, lock_interrupt, mutex, &oldubf);
03211             th->status = THREAD_STOPPED_FOREVER;
03212             th->vm->sleeper++;
03213             th->locking_mutex = self;
03214             if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
03215                 last_thread = 1;
03216             }
03217 
03218             th->transition_for_lock = 1;
03219             BLOCKING_REGION_CORE({
03220                 interrupted = lock_func(th, mutex, last_thread);
03221             });
03222             th->transition_for_lock = 0;
03223             remove_signal_thread_list(th);
03224             reset_unblock_function(th, &oldubf);
03225 
03226             th->locking_mutex = Qfalse;
03227             if (mutex->th && interrupted == 2) {
03228                 rb_check_deadlock(th->vm);
03229             }
03230             if (th->status == THREAD_STOPPED_FOREVER) {
03231                 th->status = prev_status;
03232             }
03233             th->vm->sleeper--;
03234 
03235             if (mutex->th == th) mutex_locked(th, self);
03236 
03237             if (interrupted) {
03238                 RUBY_VM_CHECK_INTS();
03239             }
03240         }
03241     }
03242     return self;
03243 }
03244 
03245 static const char *
03246 mutex_unlock(mutex_t *mutex, rb_thread_t volatile *th)
03247 {
03248     const char *err = NULL;
03249     mutex_t *th_mutex;
03250 
03251     native_mutex_lock(&mutex->lock);
03252 
03253     if (mutex->th == 0) {
03254         err = "Attempt to unlock a mutex which is not locked";
03255     }
03256     else if (mutex->th != th) {
03257         err = "Attempt to unlock a mutex which is locked by another thread";
03258     }
03259     else {
03260         mutex->th = 0;
03261         if (mutex->cond_waiting > 0) {
03262             /* waiting thread */
03263             native_cond_signal(&mutex->cond);
03264             mutex->cond_waiting--;
03265             mutex->cond_notified++;
03266         }
03267     }
03268 
03269     native_mutex_unlock(&mutex->lock);
03270 
03271     if (!err) {
03272         th_mutex = th->keeping_mutexes;
03273         if (th_mutex == mutex) {
03274             th->keeping_mutexes = mutex->next_mutex;
03275         }
03276         else {
03277             while (1) {
03278                 mutex_t *tmp_mutex;
03279                 tmp_mutex = th_mutex->next_mutex;
03280                 if (tmp_mutex == mutex) {
03281                     th_mutex->next_mutex = tmp_mutex->next_mutex;
03282                     break;
03283                 }
03284                 th_mutex = tmp_mutex;
03285             }
03286         }
03287         mutex->next_mutex = NULL;
03288     }
03289 
03290     return err;
03291 }
03292 
03293 /*
03294  * call-seq:
03295  *    mutex.unlock    -> self
03296  *
03297  * Releases the lock.
03298  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
03299  */
03300 VALUE
03301 rb_mutex_unlock(VALUE self)
03302 {
03303     const char *err;
03304     mutex_t *mutex;
03305     GetMutexPtr(self, mutex);
03306 
03307     err = mutex_unlock(mutex, GET_THREAD());
03308     if (err) rb_raise(rb_eThreadError, "%s", err);
03309 
03310     return self;
03311 }
03312 
03313 static void
03314 rb_mutex_unlock_all(mutex_t *mutexes, rb_thread_t *th)
03315 {
03316     const char *err;
03317     mutex_t *mutex;
03318 
03319     while (mutexes) {
03320         mutex = mutexes;
03321         /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
03322                 mutexes); */
03323         mutexes = mutex->next_mutex;
03324         err = mutex_unlock(mutex, th);
03325         if (err) rb_bug("invalid keeping_mutexes: %s", err);
03326     }
03327 }
03328 
03329 static void
03330 rb_mutex_abandon_all(mutex_t *mutexes)
03331 {
03332     mutex_t *mutex;
03333 
03334     while (mutexes) {
03335         mutex = mutexes;
03336         mutexes = mutex->next_mutex;
03337         mutex->th = 0;
03338         mutex->next_mutex = 0;
03339     }
03340 }
03341 
03342 static VALUE
03343 rb_mutex_sleep_forever(VALUE time)
03344 {
03345     rb_thread_sleep_deadly();
03346     return Qnil;
03347 }
03348 
03349 static VALUE
03350 rb_mutex_wait_for(VALUE time)
03351 {
03352     const struct timeval *t = (struct timeval *)time;
03353     rb_thread_wait_for(*t);
03354     return Qnil;
03355 }
03356 
03357 VALUE
03358 rb_mutex_sleep(VALUE self, VALUE timeout)
03359 {
03360     time_t beg, end;
03361     struct timeval t;
03362 
03363     if (!NIL_P(timeout)) {
03364         t = rb_time_interval(timeout);
03365     }
03366     rb_mutex_unlock(self);
03367     beg = time(0);
03368     if (NIL_P(timeout)) {
03369         rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
03370     }
03371     else {
03372         rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
03373     }
03374     end = time(0) - beg;
03375     return INT2FIX(end);
03376 }
03377 
03378 /*
03379  * call-seq:
03380  *    mutex.sleep(timeout = nil)    -> number
03381  *
03382  * Releases the lock and sleeps +timeout+ seconds if it is given and
03383  * non-nil or forever.  Raises +ThreadError+ if +mutex+ wasn't locked by
03384  * the current thread.
03385  */
03386 static VALUE
03387 mutex_sleep(int argc, VALUE *argv, VALUE self)
03388 {
03389     VALUE timeout;
03390 
03391     rb_scan_args(argc, argv, "01", &timeout);
03392     return rb_mutex_sleep(self, timeout);
03393 }
03394 
03395 /*
03396  * call-seq:
03397  *    mutex.synchronize { ... }    -> result of the block
03398  *
03399  * Obtains a lock, runs the block, and releases the lock when the block
03400  * completes.  See the example under +Mutex+.
03401  */
03402 
03403 VALUE
03404 rb_mutex_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
03405 {
03406     rb_mutex_lock(mutex);
03407     return rb_ensure(func, arg, rb_mutex_unlock, mutex);
03408 }
03409 
03410 /*
03411  * Document-class: Barrier
03412  */
03413 static void
03414 barrier_mark(void *ptr)
03415 {
03416     rb_gc_mark((VALUE)ptr);
03417 }
03418 
03419 static const rb_data_type_t barrier_data_type = {
03420     "barrier",
03421     barrier_mark, 0, 0,
03422 };
03423 
03424 static VALUE
03425 barrier_alloc(VALUE klass)
03426 {
03427     return TypedData_Wrap_Struct(klass, &barrier_data_type, (void *)mutex_alloc(0));
03428 }
03429 
03430 #define GetBarrierPtr(obj) (VALUE)rb_check_typeddata(obj, &barrier_data_type)
03431 
03432 VALUE
03433 rb_barrier_new(void)
03434 {
03435     VALUE barrier = barrier_alloc(rb_cBarrier);
03436     rb_mutex_lock((VALUE)DATA_PTR(barrier));
03437     return barrier;
03438 }
03439 
03440 VALUE
03441 rb_barrier_wait(VALUE self)
03442 {
03443     VALUE mutex = GetBarrierPtr(self);
03444     mutex_t *m;
03445 
03446     if (!mutex) return Qfalse;
03447     GetMutexPtr(mutex, m);
03448     if (m->th == GET_THREAD()) return Qfalse;
03449     rb_mutex_lock(mutex);
03450     if (DATA_PTR(self)) return Qtrue;
03451     rb_mutex_unlock(mutex);
03452     return Qfalse;
03453 }
03454 
03455 VALUE
03456 rb_barrier_release(VALUE self)
03457 {
03458     return rb_mutex_unlock(GetBarrierPtr(self));
03459 }
03460 
03461 VALUE
03462 rb_barrier_destroy(VALUE self)
03463 {
03464     VALUE mutex = GetBarrierPtr(self);
03465     DATA_PTR(self) = 0;
03466     return rb_mutex_unlock(mutex);
03467 }
03468 
03469 /* variables for recursive traversals */
03470 static ID recursive_key;
03471 
03472 /*
03473  * Returns the current "recursive list" used to detect recursion.
03474  * This list is a hash table, unique for the current thread and for
03475  * the current __callee__.
03476  */
03477 
03478 static VALUE
03479 recursive_list_access(void)
03480 {
03481     volatile VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
03482     VALUE sym = ID2SYM(rb_frame_this_func());
03483     VALUE list;
03484     if (NIL_P(hash) || TYPE(hash) != T_HASH) {
03485         hash = rb_hash_new();
03486         OBJ_UNTRUST(hash);
03487         rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
03488         list = Qnil;
03489     }
03490     else {
03491         list = rb_hash_aref(hash, sym);
03492     }
03493     if (NIL_P(list) || TYPE(list) != T_HASH) {
03494         list = rb_hash_new();
03495         OBJ_UNTRUST(list);
03496         rb_hash_aset(hash, sym, list);
03497     }
03498     return list;
03499 }
03500 
03501 /*
03502  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
03503  * in the recursion list.
03504  * Assumes the recursion list is valid.
03505  */
03506 
03507 static VALUE
03508 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
03509 {
03510     VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
03511     if (pair_list == Qundef)
03512         return Qfalse;
03513     if (paired_obj_id) {
03514         if (TYPE(pair_list) != T_HASH) {
03515         if (pair_list != paired_obj_id)
03516             return Qfalse;
03517         }
03518         else {
03519         if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
03520             return Qfalse;
03521         }
03522     }
03523     return Qtrue;
03524 }
03525 
03526 /*
03527  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
03528  * For a single obj_id, it sets list[obj_id] to Qtrue.
03529  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
03530  * otherwise list[obj_id] becomes a hash like:
03531  *   {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
03532  * Assumes the recursion list is valid.
03533  */
03534 
03535 static void
03536 recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
03537 {
03538     VALUE pair_list;
03539 
03540     if (!paired_obj) {
03541         rb_hash_aset(list, obj, Qtrue);
03542     }
03543     else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
03544         rb_hash_aset(list, obj, paired_obj);
03545     }
03546     else {
03547         if (TYPE(pair_list) != T_HASH){
03548             VALUE other_paired_obj = pair_list;
03549             pair_list = rb_hash_new();
03550             OBJ_UNTRUST(pair_list);
03551             rb_hash_aset(pair_list, other_paired_obj, Qtrue);
03552             rb_hash_aset(list, obj, pair_list);
03553         }
03554         rb_hash_aset(pair_list, paired_obj, Qtrue);
03555     }
03556 }
03557 
03558 /*
03559  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
03560  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
03561  * removed from the hash and no attempt is made to simplify
03562  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
03563  * Assumes the recursion list is valid.
03564  */
03565 
03566 static void
03567 recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
03568 {
03569     if (paired_obj) {
03570         VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
03571         if (pair_list == Qundef) {
03572             VALUE symname = rb_inspect(ID2SYM(rb_frame_this_func()));
03573             VALUE thrname = rb_inspect(rb_thread_current());
03574             rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list for %s in %s",
03575                      StringValuePtr(symname), StringValuePtr(thrname));
03576         }
03577         if (TYPE(pair_list) == T_HASH) {
03578             rb_hash_delete(pair_list, paired_obj);
03579             if (!RHASH_EMPTY_P(pair_list)) {
03580                 return; /* keep hash until is empty */
03581             }
03582         }
03583     }
03584     rb_hash_delete(list, obj);
03585 }
03586 
03587 struct exec_recursive_params {
03588     VALUE (*func) (VALUE, VALUE, int);
03589     VALUE list;
03590     VALUE obj;
03591     VALUE objid;
03592     VALUE pairid;
03593     VALUE arg;
03594 };
03595 
03596 static VALUE
03597 exec_recursive_i(VALUE tag, struct exec_recursive_params *p)
03598 {
03599     VALUE result = Qundef;
03600     int state;
03601 
03602     recursive_push(p->list, p->objid, p->pairid);
03603     PUSH_TAG();
03604     if ((state = EXEC_TAG()) == 0) {
03605         result = (*p->func)(p->obj, p->arg, FALSE);
03606     }
03607     POP_TAG();
03608     recursive_pop(p->list, p->objid, p->pairid);
03609     if (state)
03610         JUMP_TAG(state);
03611     return result;
03612 }
03613 
03614 /*
03615  * Calls func(obj, arg, recursive), where recursive is non-zero if the
03616  * current method is called recursively on obj, or on the pair <obj, pairid>
03617  * If outer is 0, then the innermost func will be called with recursive set
03618  * to Qtrue, otherwise the outermost func will be called. In the latter case,
03619  * all inner func are short-circuited by throw.
03620  * Implementation details: the value thrown is the recursive list which is
03621  * proper to the current method and unlikely to be catched anywhere else.
03622  * list[recursive_key] is used as a flag for the outermost call.
03623  */
03624 
03625 static VALUE
03626 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
03627 {
03628     VALUE result = Qundef;
03629     struct exec_recursive_params p;
03630     int outermost;
03631     p.list = recursive_list_access();
03632     p.objid = rb_obj_id(obj);
03633     p.obj = obj;
03634     p.pairid = pairid;
03635     p.arg = arg;
03636     outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
03637 
03638     if (recursive_check(p.list, p.objid, pairid)) {
03639         if (outer && !outermost) {
03640             rb_throw_obj(p.list, p.list);
03641         }
03642         return (*func)(obj, arg, TRUE);
03643     }
03644     else {
03645         p.func = func;
03646 
03647         if (outermost) {
03648             recursive_push(p.list, ID2SYM(recursive_key), 0);
03649             result = rb_catch_obj(p.list, exec_recursive_i, (VALUE)&p);
03650             recursive_pop(p.list, ID2SYM(recursive_key), 0);
03651             if (result == p.list) {
03652                 result = (*func)(obj, arg, TRUE);
03653             }
03654         }
03655         else {
03656             result = exec_recursive_i(0, &p);
03657         }
03658     }
03659     *(volatile struct exec_recursive_params *)&p;
03660     return result;
03661 }
03662 
03663 /*
03664  * Calls func(obj, arg, recursive), where recursive is non-zero if the
03665  * current method is called recursively on obj
03666  */
03667 
03668 VALUE
03669 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
03670 {
03671     return exec_recursive(func, obj, 0, arg, 0);
03672 }
03673 
03674 /*
03675  * Calls func(obj, arg, recursive), where recursive is non-zero if the
03676  * current method is called recursively on the ordered pair <obj, paired_obj>
03677  */
03678 
03679 VALUE
03680 rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
03681 {
03682     return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
03683 }
03684 
03685 /*
03686  * If recursion is detected on the current method and obj, the outermost
03687  * func will be called with (obj, arg, Qtrue). All inner func will be
03688  * short-circuited using throw.
03689  */
03690 
03691 VALUE
03692 rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
03693 {
03694     return exec_recursive(func, obj, 0, arg, 1);
03695 }
03696 
03697 /* tracer */
03698 
03699 static rb_event_hook_t *
03700 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03701 {
03702     rb_event_hook_t *hook = ALLOC(rb_event_hook_t);
03703     hook->func = func;
03704     hook->flag = events;
03705     hook->data = data;
03706     return hook;
03707 }
03708 
03709 static void
03710 thread_reset_event_flags(rb_thread_t *th)
03711 {
03712     rb_event_hook_t *hook = th->event_hooks;
03713     rb_event_flag_t flag = th->event_flags & RUBY_EVENT_VM;
03714 
03715     while (hook) {
03716         flag |= hook->flag;
03717         hook = hook->next;
03718     }
03719     th->event_flags = flag;
03720 }
03721 
03722 static void
03723 rb_threadptr_add_event_hook(rb_thread_t *th,
03724                          rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03725 {
03726     rb_event_hook_t *hook = alloc_event_hook(func, events, data);
03727     hook->next = th->event_hooks;
03728     th->event_hooks = hook;
03729     thread_reset_event_flags(th);
03730 }
03731 
03732 static rb_thread_t *
03733 thval2thread_t(VALUE thval)
03734 {
03735     rb_thread_t *th;
03736     GetThreadPtr(thval, th);
03737     return th;
03738 }
03739 
03740 void
03741 rb_thread_add_event_hook(VALUE thval,
03742                          rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03743 {
03744     rb_threadptr_add_event_hook(thval2thread_t(thval), func, events, data);
03745 }
03746 
03747 static int
03748 set_threads_event_flags_i(st_data_t key, st_data_t val, st_data_t flag)
03749 {
03750     VALUE thval = key;
03751     rb_thread_t *th;
03752     GetThreadPtr(thval, th);
03753 
03754     if (flag) {
03755         th->event_flags |= RUBY_EVENT_VM;
03756     }
03757     else {
03758         th->event_flags &= (~RUBY_EVENT_VM);
03759     }
03760     return ST_CONTINUE;
03761 }
03762 
03763 static void
03764 set_threads_event_flags(int flag)
03765 {
03766     st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
03767 }
03768 
03769 static inline void
03770 exec_event_hooks(const rb_event_hook_t *hook, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
03771 {
03772     for (; hook; hook = hook->next) {
03773         if (flag & hook->flag) {
03774             (*hook->func)(flag, hook->data, self, id, klass);
03775         }
03776     }
03777 }
03778 
03779 void
03780 rb_threadptr_exec_event_hooks(rb_thread_t *th, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
03781 {
03782     const VALUE errinfo = th->errinfo;
03783     const rb_event_flag_t wait_event = th->event_flags;
03784 
03785     if (self == rb_mRubyVMFrozenCore) return;
03786     if (wait_event & flag) {
03787         exec_event_hooks(th->event_hooks, flag, self, id, klass);
03788     }
03789     if (wait_event & RUBY_EVENT_VM) {
03790         if (th->vm->event_hooks == NULL) {
03791             th->event_flags &= (~RUBY_EVENT_VM);
03792         }
03793         else {
03794             exec_event_hooks(th->vm->event_hooks, flag, self, id, klass);
03795         }
03796     }
03797     th->errinfo = errinfo;
03798 }
03799 
03800 void
03801 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03802 {
03803     rb_event_hook_t *hook = alloc_event_hook(func, events, data);
03804     rb_vm_t *vm = GET_VM();
03805 
03806     hook->next = vm->event_hooks;
03807     vm->event_hooks = hook;
03808 
03809     set_threads_event_flags(1);
03810 }
03811 
03812 static int
03813 remove_event_hook(rb_event_hook_t **root, rb_event_hook_func_t func)
03814 {
03815     rb_event_hook_t *prev = NULL, *hook = *root, *next;
03816 
03817     while (hook) {
03818         next = hook->next;
03819         if (func == 0 || hook->func == func) {
03820             if (prev) {
03821                 prev->next = hook->next;
03822             }
03823             else {
03824                 *root = hook->next;
03825             }
03826             xfree(hook);
03827         }
03828         else {
03829             prev = hook;
03830         }
03831         hook = next;
03832     }
03833     return -1;
03834 }
03835 
03836 static int
03837 rb_threadptr_revmove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
03838 {
03839     int ret = remove_event_hook(&th->event_hooks, func);
03840     thread_reset_event_flags(th);
03841     return ret;
03842 }
03843 
03844 int
03845 rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
03846 {
03847     return rb_threadptr_revmove_event_hook(thval2thread_t(thval), func);
03848 }
03849 
03850 int
03851 rb_remove_event_hook(rb_event_hook_func_t func)
03852 {
03853     rb_vm_t *vm = GET_VM();
03854     rb_event_hook_t *hook = vm->event_hooks;
03855     int ret = remove_event_hook(&vm->event_hooks, func);
03856 
03857     if (hook != NULL && vm->event_hooks == NULL) {
03858         set_threads_event_flags(0);
03859     }
03860 
03861     return ret;
03862 }
03863 
03864 static int
03865 clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag)
03866 {
03867     rb_thread_t *th;
03868     GetThreadPtr((VALUE)key, th);
03869     rb_threadptr_revmove_event_hook(th, 0);
03870     return ST_CONTINUE;
03871 }
03872 
03873 void
03874 rb_clear_trace_func(void)
03875 {
03876     st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
03877     rb_remove_event_hook(0);
03878 }
03879 
03880 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
03881 
03882 /*
03883  *  call-seq:
03884  *     set_trace_func(proc)    -> proc
03885  *     set_trace_func(nil)     -> nil
03886  *
03887  *  Establishes _proc_ as the handler for tracing, or disables
03888  *  tracing if the parameter is +nil+. _proc_ takes up
03889  *  to six parameters: an event name, a filename, a line number, an
03890  *  object id, a binding, and the name of a class. _proc_ is
03891  *  invoked whenever an event occurs. Events are: <code>c-call</code>
03892  *  (call a C-language routine), <code>c-return</code> (return from a
03893  *  C-language routine), <code>call</code> (call a Ruby method),
03894  *  <code>class</code> (start a class or module definition),
03895  *  <code>end</code> (finish a class or module definition),
03896  *  <code>line</code> (execute code on a new line), <code>raise</code>
03897  *  (raise an exception), and <code>return</code> (return from a Ruby
03898  *  method). Tracing is disabled within the context of _proc_.
03899  *
03900  *      class Test
03901  *      def test
03902  *        a = 1
03903  *        b = 2
03904  *      end
03905  *      end
03906  *
03907  *      set_trace_func proc { |event, file, line, id, binding, classname|
03908  *         printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
03909  *      }
03910  *      t = Test.new
03911  *      t.test
03912  *
03913  *        line prog.rb:11               false
03914  *      c-call prog.rb:11        new    Class
03915  *      c-call prog.rb:11 initialize   Object
03916  *    c-return prog.rb:11 initialize   Object
03917  *    c-return prog.rb:11        new    Class
03918  *        line prog.rb:12               false
03919  *        call prog.rb:2        test     Test
03920  *        line prog.rb:3        test     Test
03921  *        line prog.rb:4        test     Test
03922  *      return prog.rb:4        test     Test
03923  */
03924 
03925 static VALUE
03926 set_trace_func(VALUE obj, VALUE trace)
03927 {
03928     rb_remove_event_hook(call_trace_func);
03929 
03930     if (NIL_P(trace)) {
03931         return Qnil;
03932     }
03933 
03934     if (!rb_obj_is_proc(trace)) {
03935         rb_raise(rb_eTypeError, "trace_func needs to be Proc");
03936     }
03937 
03938     rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
03939     return trace;
03940 }
03941 
03942 static void
03943 thread_add_trace_func(rb_thread_t *th, VALUE trace)
03944 {
03945     if (!rb_obj_is_proc(trace)) {
03946         rb_raise(rb_eTypeError, "trace_func needs to be Proc");
03947     }
03948 
03949     rb_threadptr_add_event_hook(th, call_trace_func, RUBY_EVENT_ALL, trace);
03950 }
03951 
03952 /*
03953  *  call-seq:
03954  *     thr.add_trace_func(proc)    -> proc
03955  *
03956  *  Adds _proc_ as a handler for tracing.
03957  *  See <code>Thread#set_trace_func</code> and +set_trace_func+.
03958  */
03959 
03960 static VALUE
03961 thread_add_trace_func_m(VALUE obj, VALUE trace)
03962 {
03963     rb_thread_t *th;
03964     GetThreadPtr(obj, th);
03965     thread_add_trace_func(th, trace);
03966     return trace;
03967 }
03968 
03969 /*
03970  *  call-seq:
03971  *     thr.set_trace_func(proc)    -> proc
03972  *     thr.set_trace_func(nil)     -> nil
03973  *
03974  *  Establishes _proc_ on _thr_ as the handler for tracing, or
03975  *  disables tracing if the parameter is +nil+.
03976  *  See +set_trace_func+.
03977  */
03978 
03979 static VALUE
03980 thread_set_trace_func_m(VALUE obj, VALUE trace)
03981 {
03982     rb_thread_t *th;
03983     GetThreadPtr(obj, th);
03984     rb_threadptr_revmove_event_hook(th, call_trace_func);
03985 
03986     if (NIL_P(trace)) {
03987         return Qnil;
03988     }
03989     thread_add_trace_func(th, trace);
03990     return trace;
03991 }
03992 
03993 static const char *
03994 get_event_name(rb_event_flag_t event)
03995 {
03996     switch (event) {
03997       case RUBY_EVENT_LINE:
03998         return "line";
03999       case RUBY_EVENT_CLASS:
04000         return "class";
04001       case RUBY_EVENT_END:
04002         return "end";
04003       case RUBY_EVENT_CALL:
04004         return "call";
04005       case RUBY_EVENT_RETURN:
04006         return "return";
04007       case RUBY_EVENT_C_CALL:
04008         return "c-call";
04009       case RUBY_EVENT_C_RETURN:
04010         return "c-return";
04011       case RUBY_EVENT_RAISE:
04012         return "raise";
04013       default:
04014         return "unknown";
04015     }
04016 }
04017 
04018 VALUE ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always);
04019 
04020 struct call_trace_func_args {
04021     rb_event_flag_t event;
04022     VALUE proc;
04023     VALUE self;
04024     ID id;
04025     VALUE klass;
04026 };
04027 
04028 static VALUE
04029 call_trace_proc(VALUE args, int tracing)
04030 {
04031     struct call_trace_func_args *p = (struct call_trace_func_args *)args;
04032     const char *srcfile = rb_sourcefile();
04033     VALUE eventname = rb_str_new2(get_event_name(p->event));
04034     VALUE filename = srcfile ? rb_str_new2(srcfile) : Qnil;
04035     VALUE argv[6];
04036     int line = rb_sourceline();
04037     ID id = 0;
04038     VALUE klass = 0;
04039 
04040     if (p->klass != 0) {
04041         id = p->id;
04042         klass = p->klass;
04043     }
04044     else {
04045         rb_thread_method_id_and_class(GET_THREAD(), &id, &klass);
04046     }
04047     if (id == ID_ALLOCATOR)
04048       return Qnil;
04049     if (klass) {
04050         if (TYPE(klass) == T_ICLASS) {
04051             klass = RBASIC(klass)->klass;
04052         }
04053         else if (FL_TEST(klass, FL_SINGLETON)) {
04054             klass = rb_iv_get(klass, "__attached__");
04055         }
04056     }
04057 
04058     argv[0] = eventname;
04059     argv[1] = filename;
04060     argv[2] = INT2FIX(line);
04061     argv[3] = id ? ID2SYM(id) : Qnil;
04062     argv[4] = (p->self && srcfile) ? rb_binding_new() : Qnil;
04063     argv[5] = klass ? klass : Qnil;
04064 
04065     return rb_proc_call_with_block(p->proc, 6, argv, Qnil);
04066 }
04067 
04068 static void
04069 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
04070 {
04071     struct call_trace_func_args args;
04072 
04073     args.event = event;
04074     args.proc = proc;
04075     args.self = self;
04076     args.id = id;
04077     args.klass = klass;
04078     ruby_suppress_tracing(call_trace_proc, (VALUE)&args, FALSE);
04079 }
04080 
04081 VALUE
04082 ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always)
04083 {
04084     rb_thread_t *th = GET_THREAD();
04085     int state, tracing;
04086     volatile int raised;
04087     volatile int outer_state;
04088     VALUE result = Qnil;
04089 
04090     if ((tracing = th->tracing) != 0 && !always) {
04091         return Qnil;
04092     }
04093     else {
04094         th->tracing = 1;
04095     }
04096 
04097     raised = rb_threadptr_reset_raised(th);
04098     outer_state = th->state;
04099     th->state = 0;
04100 
04101     PUSH_TAG();
04102     if ((state = EXEC_TAG()) == 0) {
04103         result = (*func)(arg, tracing);
04104     }
04105 
04106     if (raised) {
04107         rb_threadptr_set_raised(th);
04108     }
04109     POP_TAG();
04110 
04111     th->tracing = tracing;
04112     if (state) {
04113         JUMP_TAG(state);
04114     }
04115     th->state = outer_state;
04116 
04117     return result;
04118 }
04119 
04120 VALUE rb_thread_backtrace(VALUE thval);
04121 
04122 /*
04123  *  call-seq:
04124  *     thr.backtrace    -> array
04125  *
04126  *  Returns the current back trace of the _thr_.
04127  */
04128 
04129 static VALUE
04130 rb_thread_backtrace_m(VALUE thval)
04131 {
04132     return rb_thread_backtrace(thval);
04133 }
04134 
04135 /*
04136  *  Document-class: ThreadError
04137  *
04138  *  Raised when an invalid operation is attempted on a thread.
04139  *
04140  *  For example, when no other thread has been started:
04141  *
04142  *     Thread.stop
04143  *
04144  *  <em>raises the exception:</em>
04145  *
04146  *     ThreadError: stopping only thread
04147  */
04148 
04149 /*
04150  *  +Thread+ encapsulates the behavior of a thread of
04151  *  execution, including the main thread of the Ruby script.
04152  *
04153  *  In the descriptions of the methods in this class, the parameter _sym_
04154  *  refers to a symbol, which is either a quoted string or a
04155  *  +Symbol+ (such as <code>:name</code>).
04156  */
04157 
04158 void
04159 Init_Thread(void)
04160 {
04161 #undef rb_intern
04162 #define rb_intern(str) rb_intern_const(str)
04163 
04164     VALUE cThGroup;
04165 
04166     rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
04167     rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
04168     rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
04169     rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
04170     rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
04171     rb_define_singleton_method(rb_cThread, "stop", rb_thread_stop, 0);
04172     rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
04173     rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
04174     rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
04175     rb_define_singleton_method(rb_cThread, "list", rb_thread_list, 0);
04176     rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
04177     rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
04178 #if THREAD_DEBUG < 0
04179     rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
04180     rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
04181 #endif
04182 
04183     rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
04184     rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
04185     rb_define_method(rb_cThread, "join", thread_join_m, -1);
04186     rb_define_method(rb_cThread, "value", thread_value, 0);
04187     rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
04188     rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
04189     rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
04190     rb_define_method(rb_cThread, "run", rb_thread_run, 0);
04191     rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
04192     rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
04193     rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
04194     rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
04195     rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
04196     rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
04197     rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
04198     rb_define_method(rb_cThread, "status", rb_thread_status, 0);
04199     rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
04200     rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
04201     rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
04202     rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
04203     rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
04204     rb_define_method(rb_cThread, "group", rb_thread_group, 0);
04205     rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, 0);
04206 
04207     rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
04208 
04209     cThGroup = rb_define_class("ThreadGroup", rb_cObject);
04210     rb_define_alloc_func(cThGroup, thgroup_s_alloc);
04211     rb_define_method(cThGroup, "list", thgroup_list, 0);
04212     rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
04213     rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
04214     rb_define_method(cThGroup, "add", thgroup_add, 1);
04215 
04216     {
04217         rb_thread_t *th = GET_THREAD();
04218         th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
04219         rb_define_const(cThGroup, "Default", th->thgroup);
04220     }
04221 
04222     rb_cMutex = rb_define_class("Mutex", rb_cObject);
04223     rb_define_alloc_func(rb_cMutex, mutex_alloc);
04224     rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
04225     rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
04226     rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
04227     rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
04228     rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
04229     rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
04230 
04231     recursive_key = rb_intern("__recursive_key__");
04232     rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
04233 
04234     /* trace */
04235     rb_define_global_function("set_trace_func", set_trace_func, 1);
04236     rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
04237     rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
04238 
04239     /* init thread core */
04240     {
04241         /* main thread setting */
04242         {
04243             /* acquire global vm lock */
04244             rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock;
04245             native_mutex_initialize(lp);
04246             native_mutex_lock(lp);
04247             native_mutex_initialize(&GET_THREAD()->interrupt_lock);
04248         }
04249     }
04250 
04251     rb_thread_create_timer_thread();
04252 
04253     (void)native_mutex_trylock;
04254     (void)ruby_thread_set_native;
04255 }
04256 
04257 int
04258 ruby_native_thread_p(void)
04259 {
04260     rb_thread_t *th = ruby_thread_from_native();
04261 
04262     return th != 0;
04263 }
04264 
04265 static int
04266 check_deadlock_i(st_data_t key, st_data_t val, int *found)
04267 {
04268     VALUE thval = key;
04269     rb_thread_t *th;
04270     GetThreadPtr(thval, th);
04271 
04272     if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th) || th->transition_for_lock) {
04273         *found = 1;
04274     }
04275     else if (th->locking_mutex) {
04276         mutex_t *mutex;
04277         GetMutexPtr(th->locking_mutex, mutex);
04278 
04279         native_mutex_lock(&mutex->lock);
04280         if (mutex->th == th || (!mutex->th && mutex->cond_notified)) {
04281             *found = 1;
04282         }
04283         native_mutex_unlock(&mutex->lock);
04284     }
04285 
04286     return (*found) ? ST_STOP : ST_CONTINUE;
04287 }
04288 
04289 #if 0 /* for debug */
04290 static int
04291 debug_i(st_data_t key, st_data_t val, int *found)
04292 {
04293     VALUE thval = key;
04294     rb_thread_t *th;
04295     GetThreadPtr(thval, th);
04296 
04297     printf("th:%p %d %d %d", th, th->status, th->interrupt_flag, th->transition_for_lock);
04298     if (th->locking_mutex) {
04299         mutex_t *mutex;
04300         GetMutexPtr(th->locking_mutex, mutex);
04301 
04302         native_mutex_lock(&mutex->lock);
04303         printf(" %p %d\n", mutex->th, mutex->cond_notified);
04304         native_mutex_unlock(&mutex->lock);
04305     }
04306     else puts("");
04307 
04308     return ST_CONTINUE;
04309 }
04310 #endif
04311 
04312 static void
04313 rb_check_deadlock(rb_vm_t *vm)
04314 {
04315     int found = 0;
04316 
04317     if (vm_living_thread_num(vm) > vm->sleeper) return;
04318     if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
04319 
04320     st_foreach(vm->living_threads, check_deadlock_i, (st_data_t)&found);
04321 
04322     if (!found) {
04323         VALUE argv[2];
04324         argv[0] = rb_eFatal;
04325         argv[1] = rb_str_new2("deadlock detected");
04326 #if 0 /* for debug */
04327         printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
04328         st_foreach(vm->living_threads, debug_i, (st_data_t)0);
04329 #endif
04330         vm->sleeper--;
04331         rb_threadptr_raise(vm->main_thread, 2, argv);
04332     }
04333 }
04334 
04335 static void
04336 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
04337 {
04338     VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
04339     if (coverage && RBASIC(coverage)->klass == 0) {
04340         long line = rb_sourceline() - 1;
04341         long count;
04342         if (RARRAY_PTR(coverage)[line] == Qnil) {
04343             rb_bug("bug");
04344         }
04345         count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
04346         if (POSFIXABLE(count)) {
04347             RARRAY_PTR(coverage)[line] = LONG2FIX(count);
04348         }
04349     }
04350 }
04351 
04352 VALUE
04353 rb_get_coverages(void)
04354 {
04355     return GET_VM()->coverages;
04356 }
04357 
04358 void
04359 rb_set_coverages(VALUE coverages)
04360 {
04361     GET_VM()->coverages = coverages;
04362     rb_add_event_hook(update_coverage, RUBY_EVENT_COVERAGE, Qnil);
04363 }
04364 
04365 void
04366 rb_reset_coverages(void)
04367 {
04368     GET_VM()->coverages = Qfalse;
04369     rb_remove_event_hook(update_coverage);
04370 }
04371 

Generated on Sat Jul 7 2012 15:29:24 for Ruby by  doxygen 1.7.1