Ruby  2.0.0p645(2015-04-13revision50299)
thread_pthread.c
Go to the documentation of this file.
1 /* -*-c-*- */
2 /**********************************************************************
3 
4  thread_pthread.c -
5 
6  $Author: usa $
7 
8  Copyright (C) 2004-2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13 
14 #include "gc.h"
15 
16 #ifdef HAVE_SYS_RESOURCE_H
17 #include <sys/resource.h>
18 #endif
19 #ifdef HAVE_THR_STKSEGMENT
20 #include <thread.h>
21 #endif
22 #if HAVE_FCNTL_H
23 #include <fcntl.h>
24 #elif HAVE_SYS_FCNTL_H
25 #include <sys/fcntl.h>
26 #endif
27 #ifdef HAVE_SYS_PRCTL_H
28 #include <sys/prctl.h>
29 #endif
30 #if defined(__native_client__) && defined(NACL_NEWLIB)
31 # include "nacl/select.h"
32 #endif
33 #if HAVE_POLL
34 #include <poll.h>
35 #endif
36 #if defined(HAVE_SYS_TIME_H)
37 #include <sys/time.h>
38 #endif
39 
40 static void native_mutex_lock(pthread_mutex_t *lock);
41 static void native_mutex_unlock(pthread_mutex_t *lock);
42 static int native_mutex_trylock(pthread_mutex_t *lock);
43 static void native_mutex_initialize(pthread_mutex_t *lock);
44 static void native_mutex_destroy(pthread_mutex_t *lock);
45 static void native_cond_signal(rb_thread_cond_t *cond);
46 static void native_cond_broadcast(rb_thread_cond_t *cond);
47 static void native_cond_wait(rb_thread_cond_t *cond, pthread_mutex_t *mutex);
48 static void native_cond_initialize(rb_thread_cond_t *cond, int flags);
49 static void native_cond_destroy(rb_thread_cond_t *cond);
50 static void rb_thread_wakeup_timer_thread_low(void);
51 static pthread_t timer_thread_id;
52 
53 #define RB_CONDATTR_CLOCK_MONOTONIC 1
54 
55 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCKID_T) && \
56  defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
57  defined(HAVE_CLOCK_GETTIME) && defined(HAVE_PTHREAD_CONDATTR_INIT)
58 #define USE_MONOTONIC_COND 1
59 #else
60 #define USE_MONOTONIC_COND 0
61 #endif
62 
63 #if defined(HAVE_POLL) && defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL) && defined(O_NONBLOCK) && !defined(__native_client__)
64 /* The timer thread sleeps while only one Ruby thread is running. */
65 # define USE_SLEEPY_TIMER_THREAD 1
66 #else
67 # define USE_SLEEPY_TIMER_THREAD 0
68 #endif
69 
70 #ifndef ARRAY_SIZE
71 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
72 #endif
73 
74 static void
75 gvl_acquire_common(rb_vm_t *vm)
76 {
77  if (vm->gvl.acquired) {
78 
79  vm->gvl.waiting++;
80  if (vm->gvl.waiting == 1) {
81  /*
82  * Wake up timer thread iff timer thread is slept.
83  * When timer thread is polling mode, we don't want to
84  * make confusing timer thread interval time.
85  */
86  rb_thread_wakeup_timer_thread_low();
87  }
88 
89  while (vm->gvl.acquired) {
90  native_cond_wait(&vm->gvl.cond, &vm->gvl.lock);
91  }
92 
93  vm->gvl.waiting--;
94 
95  if (vm->gvl.need_yield) {
96  vm->gvl.need_yield = 0;
97  native_cond_signal(&vm->gvl.switch_cond);
98  }
99  }
100 
101  vm->gvl.acquired = 1;
102 }
103 
104 static void
105 gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
106 {
107  native_mutex_lock(&vm->gvl.lock);
108  gvl_acquire_common(vm);
109  native_mutex_unlock(&vm->gvl.lock);
110 }
111 
112 static void
113 gvl_release_common(rb_vm_t *vm)
114 {
115  vm->gvl.acquired = 0;
116  if (vm->gvl.waiting > 0)
117  native_cond_signal(&vm->gvl.cond);
118 }
119 
120 static void
121 gvl_release(rb_vm_t *vm)
122 {
123  native_mutex_lock(&vm->gvl.lock);
124  gvl_release_common(vm);
125  native_mutex_unlock(&vm->gvl.lock);
126 }
127 
128 static void
129 gvl_yield(rb_vm_t *vm, rb_thread_t *th)
130 {
131  native_mutex_lock(&vm->gvl.lock);
132 
133  gvl_release_common(vm);
134 
135  /* An another thread is processing GVL yield. */
136  if (UNLIKELY(vm->gvl.wait_yield)) {
137  while (vm->gvl.wait_yield)
138  native_cond_wait(&vm->gvl.switch_wait_cond, &vm->gvl.lock);
139  goto acquire;
140  }
141 
142  if (vm->gvl.waiting > 0) {
143  /* Wait until another thread task take GVL. */
144  vm->gvl.need_yield = 1;
145  vm->gvl.wait_yield = 1;
146  while (vm->gvl.need_yield)
147  native_cond_wait(&vm->gvl.switch_cond, &vm->gvl.lock);
148  vm->gvl.wait_yield = 0;
149  }
150  else {
151  native_mutex_unlock(&vm->gvl.lock);
152  sched_yield();
153  native_mutex_lock(&vm->gvl.lock);
154  }
155 
156  native_cond_broadcast(&vm->gvl.switch_wait_cond);
157  acquire:
158  gvl_acquire_common(vm);
159  native_mutex_unlock(&vm->gvl.lock);
160 }
161 
162 static void
163 gvl_init(rb_vm_t *vm)
164 {
165  native_mutex_initialize(&vm->gvl.lock);
166  native_cond_initialize(&vm->gvl.cond, RB_CONDATTR_CLOCK_MONOTONIC);
167  native_cond_initialize(&vm->gvl.switch_cond, RB_CONDATTR_CLOCK_MONOTONIC);
168  native_cond_initialize(&vm->gvl.switch_wait_cond, RB_CONDATTR_CLOCK_MONOTONIC);
169  vm->gvl.acquired = 0;
170  vm->gvl.waiting = 0;
171  vm->gvl.need_yield = 0;
172  vm->gvl.wait_yield = 0;
173 }
174 
175 static void
176 gvl_destroy(rb_vm_t *vm)
177 {
178  native_cond_destroy(&vm->gvl.switch_wait_cond);
179  native_cond_destroy(&vm->gvl.switch_cond);
180  native_cond_destroy(&vm->gvl.cond);
181  native_mutex_destroy(&vm->gvl.lock);
182 }
183 
184 static void
185 gvl_atfork(rb_vm_t *vm)
186 {
187  gvl_init(vm);
188  gvl_acquire(vm, GET_THREAD());
189 }
190 
191 #define NATIVE_MUTEX_LOCK_DEBUG 0
192 
193 static void
194 mutex_debug(const char *msg, pthread_mutex_t *lock)
195 {
196  if (NATIVE_MUTEX_LOCK_DEBUG) {
197  int r;
198  static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
199 
200  if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
201  fprintf(stdout, "%s: %p\n", msg, (void *)lock);
202  if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
203  }
204 }
205 
206 static void
207 native_mutex_lock(pthread_mutex_t *lock)
208 {
209  int r;
210  mutex_debug("lock", lock);
211  if ((r = pthread_mutex_lock(lock)) != 0) {
212  rb_bug_errno("pthread_mutex_lock", r);
213  }
214 }
215 
216 static void
217 native_mutex_unlock(pthread_mutex_t *lock)
218 {
219  int r;
220  mutex_debug("unlock", lock);
221  if ((r = pthread_mutex_unlock(lock)) != 0) {
222  rb_bug_errno("pthread_mutex_unlock", r);
223  }
224 }
225 
226 static inline int
227 native_mutex_trylock(pthread_mutex_t *lock)
228 {
229  int r;
230  mutex_debug("trylock", lock);
231  if ((r = pthread_mutex_trylock(lock)) != 0) {
232  if (r == EBUSY) {
233  return EBUSY;
234  }
235  else {
236  rb_bug_errno("pthread_mutex_trylock", r);
237  }
238  }
239  return 0;
240 }
241 
242 static void
243 native_mutex_initialize(pthread_mutex_t *lock)
244 {
245  int r = pthread_mutex_init(lock, 0);
246  mutex_debug("init", lock);
247  if (r != 0) {
248  rb_bug_errno("pthread_mutex_init", r);
249  }
250 }
251 
252 static void
253 native_mutex_destroy(pthread_mutex_t *lock)
254 {
255  int r = pthread_mutex_destroy(lock);
256  mutex_debug("destroy", lock);
257  if (r != 0) {
258  rb_bug_errno("pthread_mutex_destroy", r);
259  }
260 }
261 
262 static void
263 native_cond_initialize(rb_thread_cond_t *cond, int flags)
264 {
265 #ifdef HAVE_PTHREAD_COND_INIT
266  int r;
267 # if USE_MONOTONIC_COND
268  pthread_condattr_t attr;
269 
270  pthread_condattr_init(&attr);
271 
272  cond->clockid = CLOCK_REALTIME;
273  if (flags & RB_CONDATTR_CLOCK_MONOTONIC) {
274  r = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
275  if (r == 0) {
276  cond->clockid = CLOCK_MONOTONIC;
277  }
278  }
279 
280  r = pthread_cond_init(&cond->cond, &attr);
281  pthread_condattr_destroy(&attr);
282 # else
283  r = pthread_cond_init(&cond->cond, NULL);
284 # endif
285  if (r != 0) {
286  rb_bug_errno("pthread_cond_init", r);
287  }
288 
289  return;
290 #endif
291 }
292 
293 static void
294 native_cond_destroy(rb_thread_cond_t *cond)
295 {
296 #ifdef HAVE_PTHREAD_COND_INIT
297  int r = pthread_cond_destroy(&cond->cond);
298  if (r != 0) {
299  rb_bug_errno("pthread_cond_destroy", r);
300  }
301 #endif
302 }
303 
304 /*
305  * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
306  * EAGAIN after retrying 8192 times. You can see them in the following page:
307  *
308  * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
309  *
310  * The following native_cond_signal and native_cond_broadcast functions
311  * need to retrying until pthread functions don't return EAGAIN.
312  */
313 
314 static void
315 native_cond_signal(rb_thread_cond_t *cond)
316 {
317  int r;
318  do {
319  r = pthread_cond_signal(&cond->cond);
320  } while (r == EAGAIN);
321  if (r != 0) {
322  rb_bug_errno("pthread_cond_signal", r);
323  }
324 }
325 
326 static void
327 native_cond_broadcast(rb_thread_cond_t *cond)
328 {
329  int r;
330  do {
331  r = pthread_cond_broadcast(&cond->cond);
332  } while (r == EAGAIN);
333  if (r != 0) {
334  rb_bug_errno("native_cond_broadcast", r);
335  }
336 }
337 
338 static void
339 native_cond_wait(rb_thread_cond_t *cond, pthread_mutex_t *mutex)
340 {
341  int r = pthread_cond_wait(&cond->cond, mutex);
342  if (r != 0) {
343  rb_bug_errno("pthread_cond_wait", r);
344  }
345 }
346 
347 static int
348 native_cond_timedwait(rb_thread_cond_t *cond, pthread_mutex_t *mutex, struct timespec *ts)
349 {
350  int r;
351 
352  /*
353  * An old Linux may return EINTR. Even though POSIX says
354  * "These functions shall not return an error code of [EINTR]".
355  * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
356  * Let's hide it from arch generic code.
357  */
358  do {
359  r = pthread_cond_timedwait(&cond->cond, mutex, ts);
360  } while (r == EINTR);
361 
362  if (r != 0 && r != ETIMEDOUT) {
363  rb_bug_errno("pthread_cond_timedwait", r);
364  }
365 
366  return r;
367 }
368 
369 #if SIZEOF_TIME_T == SIZEOF_LONG
370 typedef unsigned long unsigned_time_t;
371 #elif SIZEOF_TIME_T == SIZEOF_INT
372 typedef unsigned int unsigned_time_t;
373 #elif SIZEOF_TIME_T == SIZEOF_LONG_LONG
374 typedef unsigned LONG_LONG unsigned_time_t;
375 #else
376 # error cannot find integer type which size is same as time_t.
377 #endif
378 
379 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
380 
381 static struct timespec
382 native_cond_timeout(rb_thread_cond_t *cond, struct timespec timeout_rel)
383 {
384  int ret;
385  struct timeval tv;
386  struct timespec timeout;
387  struct timespec now;
388 
389 #if USE_MONOTONIC_COND
390  if (cond->clockid == CLOCK_MONOTONIC) {
391  ret = clock_gettime(cond->clockid, &now);
392  if (ret != 0)
393  rb_sys_fail("clock_gettime()");
394  goto out;
395  }
396 
397  if (cond->clockid != CLOCK_REALTIME)
398  rb_bug("unsupported clockid %"PRIdVALUE, (SIGNED_VALUE)cond->clockid);
399 #endif
400 
401  ret = gettimeofday(&tv, 0);
402  if (ret != 0)
403  rb_sys_fail(0);
404  now.tv_sec = tv.tv_sec;
405  now.tv_nsec = tv.tv_usec * 1000;
406 
407 #if USE_MONOTONIC_COND
408  out:
409 #endif
410  timeout.tv_sec = now.tv_sec;
411  timeout.tv_nsec = now.tv_nsec;
412  timeout.tv_sec += timeout_rel.tv_sec;
413  timeout.tv_nsec += timeout_rel.tv_nsec;
414 
415  if (timeout.tv_nsec >= 1000*1000*1000) {
416  timeout.tv_sec++;
417  timeout.tv_nsec -= 1000*1000*1000;
418  }
419 
420  if (timeout.tv_sec < now.tv_sec)
421  timeout.tv_sec = TIMET_MAX;
422 
423  return timeout;
424 }
425 
426 #define native_cleanup_push pthread_cleanup_push
427 #define native_cleanup_pop pthread_cleanup_pop
428 #ifdef HAVE_SCHED_YIELD
429 #define native_thread_yield() (void)sched_yield()
430 #else
431 #define native_thread_yield() ((void)0)
432 #endif
433 
434 #if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__SYMBIAN32__)
435 #define USE_SIGNAL_THREAD_LIST 1
436 #endif
437 #ifdef USE_SIGNAL_THREAD_LIST
438 static void add_signal_thread_list(rb_thread_t *th);
439 static void remove_signal_thread_list(rb_thread_t *th);
440 static rb_thread_lock_t signal_thread_list_lock;
441 #endif
442 
443 static pthread_key_t ruby_native_thread_key;
444 
445 static void
446 null_func(int i)
447 {
448  /* null */
449 }
450 
451 static rb_thread_t *
452 ruby_thread_from_native(void)
453 {
454  return pthread_getspecific(ruby_native_thread_key);
455 }
456 
457 static int
458 ruby_thread_set_native(rb_thread_t *th)
459 {
460  return pthread_setspecific(ruby_native_thread_key, th) == 0;
461 }
462 
463 static void native_thread_init(rb_thread_t *th);
464 
465 void
466 Init_native_thread(void)
467 {
468  rb_thread_t *th = GET_THREAD();
469 
470  pthread_key_create(&ruby_native_thread_key, NULL);
471  th->thread_id = pthread_self();
472  native_thread_init(th);
473 #ifdef USE_SIGNAL_THREAD_LIST
474  native_mutex_initialize(&signal_thread_list_lock);
475 #endif
476 #ifndef __native_client__
477  posix_signal(SIGVTALRM, null_func);
478 #endif
479 }
480 
481 static void
482 native_thread_init(rb_thread_t *th)
483 {
484  native_cond_initialize(&th->native_thread_data.sleep_cond, RB_CONDATTR_CLOCK_MONOTONIC);
485  ruby_thread_set_native(th);
486 }
487 
488 static void
489 native_thread_destroy(rb_thread_t *th)
490 {
491  native_cond_destroy(&th->native_thread_data.sleep_cond);
492 }
493 
494 #ifndef USE_THREAD_CACHE
495 #define USE_THREAD_CACHE 0
496 #endif
497 
498 #if USE_THREAD_CACHE
499 static rb_thread_t *register_cached_thread_and_wait(void);
500 #endif
501 
502 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
503 #define STACKADDR_AVAILABLE 1
504 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
505 #define STACKADDR_AVAILABLE 1
506 #undef MAINSTACKADDR_AVAILABLE
507 #define MAINSTACKADDR_AVAILABLE 0
508 void *pthread_get_stackaddr_np(pthread_t);
509 size_t pthread_get_stacksize_np(pthread_t);
510 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
511 #define STACKADDR_AVAILABLE 1
512 #elif defined HAVE_PTHREAD_GETTHRDS_NP
513 #define STACKADDR_AVAILABLE 1
514 #endif
515 
516 #ifndef MAINSTACKADDR_AVAILABLE
517 # ifdef STACKADDR_AVAILABLE
518 # define MAINSTACKADDR_AVAILABLE 1
519 # else
520 # define MAINSTACKADDR_AVAILABLE 0
521 # endif
522 #endif
523 
524 #ifdef STACKADDR_AVAILABLE
525 /*
526  * Get the initial address and size of current thread's stack
527  */
528 static int
529 get_stack(void **addr, size_t *size)
530 {
531 #define CHECK_ERR(expr) \
532  {int err = (expr); if (err) return err;}
533 #ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
534  pthread_attr_t attr;
535  size_t guard = 0;
537  CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
538 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
539  CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
540  STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
541 # else
542  CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
543  CHECK_ERR(pthread_attr_getstacksize(&attr, size));
544 # endif
545  CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
546  *size -= guard;
547  pthread_attr_destroy(&attr);
548 #elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
549  pthread_attr_t attr;
550  CHECK_ERR(pthread_attr_init(&attr));
551  CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
552 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
553  CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
554  STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
555 # else
556  CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
557  CHECK_ERR(pthread_attr_getstacksize(&attr, size));
558  STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
559 # endif
560  pthread_attr_destroy(&attr);
561 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
562  pthread_t th = pthread_self();
563  *addr = pthread_get_stackaddr_np(th);
564  *size = pthread_get_stacksize_np(th);
565 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
566  stack_t stk;
567 # if defined HAVE_THR_STKSEGMENT /* Solaris */
568  CHECK_ERR(thr_stksegment(&stk));
569 # else /* OpenBSD */
570  CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
571 # endif
572  *addr = stk.ss_sp;
573  *size = stk.ss_size;
574 #elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
575  pthread_t th = pthread_self();
576  struct __pthrdsinfo thinfo;
577  char reg[256];
578  int regsiz=sizeof(reg);
579  CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
580  &thinfo, sizeof(thinfo),
581  &reg, &regsiz));
582  *addr = thinfo.__pi_stackaddr;
583  *size = thinfo.__pi_stacksize;
584  STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
585 #else
586 #error STACKADDR_AVAILABLE is defined but not implemented.
587 #endif
588  return 0;
589 #undef CHECK_ERR
590 }
591 #endif
592 
593 static struct {
595  size_t stack_maxsize;
596  VALUE *stack_start;
597 #ifdef __ia64
598  VALUE *register_stack_start;
599 #endif
600 } native_main_thread;
601 
602 #ifdef STACK_END_ADDRESS
603 extern void *STACK_END_ADDRESS;
604 #endif
605 
606 enum {
607  RUBY_STACK_SPACE_LIMIT = 1024 * 1024, /* 1024KB */
608  RUBY_STACK_SPACE_RATIO = 5
609 };
610 
611 static size_t
612 space_size(size_t stack_size)
613 {
614  size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
615  if (space_size > RUBY_STACK_SPACE_LIMIT) {
616  return RUBY_STACK_SPACE_LIMIT;
617  }
618  else {
619  return space_size;
620  }
621 }
622 
623 #undef ruby_init_stack
624 /* Set stack bottom of Ruby implementation.
625  *
626  * You must call this function before any heap allocation by Ruby implementation.
627  * Or GC will break living objects */
628 void
629 ruby_init_stack(volatile VALUE *addr
630 #ifdef __ia64
631  , void *bsp
632 #endif
633  )
634 {
635  native_main_thread.id = pthread_self();
636 #ifdef STACK_END_ADDRESS
637  native_main_thread.stack_start = STACK_END_ADDRESS;
638 #else
639  if (!native_main_thread.stack_start ||
640  STACK_UPPER((VALUE *)(void *)&addr,
641  native_main_thread.stack_start > addr,
642  native_main_thread.stack_start < addr)) {
643  native_main_thread.stack_start = (VALUE *)addr;
644  }
645 #endif
646 #ifdef __ia64
647  if (!native_main_thread.register_stack_start ||
648  (VALUE*)bsp < native_main_thread.register_stack_start) {
649  native_main_thread.register_stack_start = (VALUE*)bsp;
650  }
651 #endif
652  {
653 #if defined(PTHREAD_STACK_DEFAULT)
654 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
655 # error "PTHREAD_STACK_DEFAULT is too small"
656 # endif
657  size_t size = PTHREAD_STACK_DEFAULT;
658 #else
659  size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
660 #endif
661  size_t space = space_size(size);
662 #if MAINSTACKADDR_AVAILABLE
663  void* stackaddr;
665  if (get_stack(&stackaddr, &size) == 0) {
666  space = STACK_DIR_UPPER((char *)addr - (char *)stackaddr, (char *)stackaddr - (char *)addr);
667  }
668  native_main_thread.stack_maxsize = size - space;
669 #elif defined(HAVE_GETRLIMIT)
670  int pagesize = getpagesize();
671  struct rlimit rlim;
673  if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
674  size = (size_t)rlim.rlim_cur;
675  }
676  addr = native_main_thread.stack_start;
677  if (IS_STACK_DIR_UPPER()) {
678  space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
679  }
680  else {
681  space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
682  }
683  native_main_thread.stack_maxsize = space;
684 #endif
685  }
686 
687  /* If addr is out of range of main-thread stack range estimation, */
688  /* it should be on co-routine (alternative stack). [Feature #2294] */
689  {
690  void *start, *end;
692 
693  if (IS_STACK_DIR_UPPER()) {
694  start = native_main_thread.stack_start;
695  end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
696  }
697  else {
698  start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
699  end = native_main_thread.stack_start;
700  }
701 
702  if ((void *)addr < start || (void *)addr > end) {
703  /* out of range */
704  native_main_thread.stack_start = (VALUE *)addr;
705  native_main_thread.stack_maxsize = 0; /* unknown */
706  }
707  }
708 }
709 
710 #define CHECK_ERR(expr) \
711  {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
712 
713 static int
714 native_thread_init_stack(rb_thread_t *th)
715 {
716  rb_thread_id_t curr = pthread_self();
717 
718  if (pthread_equal(curr, native_main_thread.id)) {
719  th->machine_stack_start = native_main_thread.stack_start;
720  th->machine_stack_maxsize = native_main_thread.stack_maxsize;
721  }
722  else {
723 #ifdef STACKADDR_AVAILABLE
724  void *start;
725  size_t size;
726 
727  if (get_stack(&start, &size) == 0) {
728  th->machine_stack_start = start;
730  }
731 #else
732  rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
733 #endif
734  }
735 #ifdef __ia64
736  th->machine_register_stack_start = native_main_thread.register_stack_start;
737  th->machine_stack_maxsize /= 2;
738  th->machine_register_stack_maxsize = th->machine_stack_maxsize;
739 #endif
740  return 0;
741 }
742 
743 #ifndef __CYGWIN__
744 #define USE_NATIVE_THREAD_INIT 1
745 #endif
746 
747 static void *
748 thread_start_func_1(void *th_ptr)
749 {
750 #if USE_THREAD_CACHE
751  thread_start:
752 #endif
753  {
754  rb_thread_t *th = th_ptr;
755 #if !defined USE_NATIVE_THREAD_INIT
756  VALUE stack_start;
757 #endif
758 
759 #if defined USE_NATIVE_THREAD_INIT
760  native_thread_init_stack(th);
761 #endif
762  native_thread_init(th);
763  /* run */
764 #if defined USE_NATIVE_THREAD_INIT
765  thread_start_func_2(th, th->machine_stack_start, rb_ia64_bsp());
766 #else
767  thread_start_func_2(th, &stack_start, rb_ia64_bsp());
768 #endif
769  }
770 #if USE_THREAD_CACHE
771  if (1) {
772  /* cache thread */
773  rb_thread_t *th;
774  if ((th = register_cached_thread_and_wait()) != 0) {
775  th_ptr = (void *)th;
776  th->thread_id = pthread_self();
777  goto thread_start;
778  }
779  }
780 #endif
781  return 0;
782 }
783 
784 struct cached_thread_entry {
785  volatile rb_thread_t **th_area;
787  struct cached_thread_entry *next;
788 };
789 
790 
791 #if USE_THREAD_CACHE
792 static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
793 struct cached_thread_entry *cached_thread_root;
794 
795 static rb_thread_t *
796 register_cached_thread_and_wait(void)
797 {
798  rb_thread_cond_t cond = { PTHREAD_COND_INITIALIZER, };
799  volatile rb_thread_t *th_area = 0;
800  struct timeval tv;
801  struct timespec ts;
802  struct cached_thread_entry *entry =
803  (struct cached_thread_entry *)malloc(sizeof(struct cached_thread_entry));
804 
805  if (entry == 0) {
806  return 0; /* failed -> terminate thread immediately */
807  }
808 
809  gettimeofday(&tv, 0);
810  ts.tv_sec = tv.tv_sec + 60;
811  ts.tv_nsec = tv.tv_usec * 1000;
812 
813  pthread_mutex_lock(&thread_cache_lock);
814  {
815  entry->th_area = &th_area;
816  entry->cond = &cond;
817  entry->next = cached_thread_root;
818  cached_thread_root = entry;
819 
820  native_cond_timedwait(&cond, &thread_cache_lock, &ts);
821 
822  {
823  struct cached_thread_entry *e = cached_thread_root;
824  struct cached_thread_entry *prev = cached_thread_root;
825 
826  while (e) {
827  if (e == entry) {
828  if (prev == cached_thread_root) {
829  cached_thread_root = e->next;
830  }
831  else {
832  prev->next = e->next;
833  }
834  break;
835  }
836  prev = e;
837  e = e->next;
838  }
839  }
840 
841  free(entry); /* ok */
842  native_cond_destroy(&cond);
843  }
844  pthread_mutex_unlock(&thread_cache_lock);
845 
846  return (rb_thread_t *)th_area;
847 }
848 #endif
849 
850 static int
851 use_cached_thread(rb_thread_t *th)
852 {
853  int result = 0;
854 #if USE_THREAD_CACHE
855  struct cached_thread_entry *entry;
856 
857  if (cached_thread_root) {
858  pthread_mutex_lock(&thread_cache_lock);
859  entry = cached_thread_root;
860  {
861  if (cached_thread_root) {
862  cached_thread_root = entry->next;
863  *entry->th_area = th;
864  result = 1;
865  }
866  }
867  if (result) {
868  native_cond_signal(entry->cond);
869  }
870  pthread_mutex_unlock(&thread_cache_lock);
871  }
872 #endif
873  return result;
874 }
875 
876 static int
877 native_thread_create(rb_thread_t *th)
878 {
879  int err = 0;
880 
881  if (use_cached_thread(th)) {
882  thread_debug("create (use cached thread): %p\n", (void *)th);
883  }
884  else {
885  pthread_attr_t attr;
886  const size_t stack_size = th->vm->default_params.thread_machine_stack_size;
887  const size_t space = space_size(stack_size);
888 
889  th->machine_stack_maxsize = stack_size - space;
890 #ifdef __ia64
891  th->machine_stack_maxsize /= 2;
892  th->machine_register_stack_maxsize = th->machine_stack_maxsize;
893 #endif
894 
895 #ifdef HAVE_PTHREAD_ATTR_INIT
896  CHECK_ERR(pthread_attr_init(&attr));
897 
898 # ifdef PTHREAD_STACK_MIN
899  thread_debug("create - stack size: %lu\n", (unsigned long)stack_size);
900  CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
901 # endif
902 
903 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
904  CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
905 # endif
906  CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
907 
908  err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
909 #else
910  err = pthread_create(&th->thread_id, NULL, thread_start_func_1, th);
911 #endif
912  thread_debug("create: %p (%d)\n", (void *)th, err);
913 #ifdef HAVE_PTHREAD_ATTR_INIT
914  CHECK_ERR(pthread_attr_destroy(&attr));
915 #endif
916  }
917  return err;
918 }
919 
920 static void
921 native_thread_join(pthread_t th)
922 {
923  int err = pthread_join(th, 0);
924  if (err) {
925  rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
926  }
927 }
928 
929 
930 #if USE_NATIVE_THREAD_PRIORITY
931 
932 static void
933 native_thread_apply_priority(rb_thread_t *th)
934 {
935 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
936  struct sched_param sp;
937  int policy;
938  int priority = 0 - th->priority;
939  int max, min;
940  pthread_getschedparam(th->thread_id, &policy, &sp);
941  max = sched_get_priority_max(policy);
942  min = sched_get_priority_min(policy);
943 
944  if (min > priority) {
945  priority = min;
946  }
947  else if (max < priority) {
948  priority = max;
949  }
950 
951  sp.sched_priority = priority;
952  pthread_setschedparam(th->thread_id, policy, &sp);
953 #else
954  /* not touched */
955 #endif
956 }
957 
958 #endif /* USE_NATIVE_THREAD_PRIORITY */
959 
960 static int
961 native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
962 {
963  return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
964 }
965 
966 static void
967 ubf_pthread_cond_signal(void *ptr)
968 {
969  rb_thread_t *th = (rb_thread_t *)ptr;
970  thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th);
971  native_cond_signal(&th->native_thread_data.sleep_cond);
972 }
973 
974 static void
975 native_sleep(rb_thread_t *th, struct timeval *timeout_tv)
976 {
977  struct timespec timeout;
978  pthread_mutex_t *lock = &th->interrupt_lock;
980 
981  if (timeout_tv) {
982  struct timespec timeout_rel;
983 
984  timeout_rel.tv_sec = timeout_tv->tv_sec;
985  timeout_rel.tv_nsec = timeout_tv->tv_usec * 1000;
986 
987  /* Solaris cond_timedwait() return EINVAL if an argument is greater than
988  * current_time + 100,000,000. So cut up to 100,000,000. This is
989  * considered as a kind of spurious wakeup. The caller to native_sleep
990  * should care about spurious wakeup.
991  *
992  * See also [Bug #1341] [ruby-core:29702]
993  * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
994  */
995  if (timeout_rel.tv_sec > 100000000) {
996  timeout_rel.tv_sec = 100000000;
997  timeout_rel.tv_nsec = 0;
998  }
999 
1000  timeout = native_cond_timeout(cond, timeout_rel);
1001  }
1002 
1003  GVL_UNLOCK_BEGIN();
1004  {
1005  pthread_mutex_lock(lock);
1006  th->unblock.func = ubf_pthread_cond_signal;
1007  th->unblock.arg = th;
1008 
1009  if (RUBY_VM_INTERRUPTED(th)) {
1010  /* interrupted. return immediate */
1011  thread_debug("native_sleep: interrupted before sleep\n");
1012  }
1013  else {
1014  if (!timeout_tv)
1015  native_cond_wait(cond, lock);
1016  else
1017  native_cond_timedwait(cond, lock, &timeout);
1018  }
1019  th->unblock.func = 0;
1020  th->unblock.arg = 0;
1021 
1022  pthread_mutex_unlock(lock);
1023  }
1024  GVL_UNLOCK_END();
1025 
1026  thread_debug("native_sleep done\n");
1027 }
1028 
1029 #ifdef USE_SIGNAL_THREAD_LIST
1030 struct signal_thread_list {
1031  rb_thread_t *th;
1032  struct signal_thread_list *prev;
1033  struct signal_thread_list *next;
1034 };
1035 
1036 static struct signal_thread_list signal_thread_list_anchor = {
1037  0, 0, 0,
1038 };
1039 
1040 #define FGLOCK(lock, body) do { \
1041  native_mutex_lock(lock); \
1042  { \
1043  body; \
1044  } \
1045  native_mutex_unlock(lock); \
1046 } while (0)
1047 
1048 #if 0 /* for debug */
1049 static void
1050 print_signal_list(char *str)
1051 {
1052  struct signal_thread_list *list =
1053  signal_thread_list_anchor.next;
1054  thread_debug("list (%s)> ", str);
1055  while (list) {
1056  thread_debug("%p (%p), ", list->th, list->th->thread_id);
1057  list = list->next;
1058  }
1059  thread_debug("\n");
1060 }
1061 #endif
1062 
1063 static void
1064 add_signal_thread_list(rb_thread_t *th)
1065 {
1067  FGLOCK(&signal_thread_list_lock, {
1068  struct signal_thread_list *list =
1069  malloc(sizeof(struct signal_thread_list));
1070 
1071  if (list == 0) {
1072  fprintf(stderr, "[FATAL] failed to allocate memory\n");
1073  exit(EXIT_FAILURE);
1074  }
1075 
1076  list->th = th;
1077 
1078  list->prev = &signal_thread_list_anchor;
1079  list->next = signal_thread_list_anchor.next;
1080  if (list->next) {
1081  list->next->prev = list;
1082  }
1083  signal_thread_list_anchor.next = list;
1085  });
1086  }
1087 }
1088 
1089 static void
1090 remove_signal_thread_list(rb_thread_t *th)
1091 {
1093  FGLOCK(&signal_thread_list_lock, {
1094  struct signal_thread_list *list =
1095  (struct signal_thread_list *)
1097 
1098  list->prev->next = list->next;
1099  if (list->next) {
1100  list->next->prev = list->prev;
1101  }
1103  list->th = 0;
1104  free(list); /* ok */
1105  });
1106  }
1107 }
1108 
1109 static void
1110 ubf_select_each(rb_thread_t *th)
1111 {
1112  thread_debug("ubf_select_each (%p)\n", (void *)th->thread_id);
1113  if (th) {
1114  pthread_kill(th->thread_id, SIGVTALRM);
1115  }
1116 }
1117 
1118 static void
1119 ubf_select(void *ptr)
1120 {
1121  rb_thread_t *th = (rb_thread_t *)ptr;
1122  add_signal_thread_list(th);
1123  if (pthread_self() != timer_thread_id)
1124  rb_thread_wakeup_timer_thread(); /* activate timer thread */
1125  ubf_select_each(th);
1126 }
1127 
1128 static void
1129 ping_signal_thread_list(void)
1130 {
1131  if (signal_thread_list_anchor.next) {
1132  FGLOCK(&signal_thread_list_lock, {
1133  struct signal_thread_list *list;
1134 
1135  list = signal_thread_list_anchor.next;
1136  while (list) {
1137  ubf_select_each(list->th);
1138  list = list->next;
1139  }
1140  });
1141  }
1142 }
1143 
1144 static int
1145 check_signal_thread_list(void)
1146 {
1147  if (signal_thread_list_anchor.next)
1148  return 1;
1149  else
1150  return 0;
1151 }
1152 #else /* USE_SIGNAL_THREAD_LIST */
1153 #define add_signal_thread_list(th) (void)(th)
1154 #define remove_signal_thread_list(th) (void)(th)
1155 #define ubf_select 0
1156 static void ping_signal_thread_list(void) { return; }
1157 static int check_signal_thread_list(void) { return 0; }
1158 #endif /* USE_SIGNAL_THREAD_LIST */
1159 
1160 #define TT_DEBUG 0
1161 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
1162 
1163 /* 100ms. 10ms is too small for user level thread scheduling
1164  * on recent Linux (tested on 2.6.35)
1165  */
1166 #define TIME_QUANTUM_USEC (100 * 1000)
1167 
1168 #if USE_SLEEPY_TIMER_THREAD
1169 static int timer_thread_pipe[2] = {-1, -1};
1170 static int timer_thread_pipe_low[2] = {-1, -1}; /* low priority */
1171 static int timer_thread_pipe_owner_process;
1172 
1173 /* only use signal-safe system calls here */
1174 static void
1175 rb_thread_wakeup_timer_thread_fd(int fd)
1176 {
1177  ssize_t result;
1178 
1179  /* already opened */
1180  if (timer_thread_pipe_owner_process == getpid()) {
1181  const char *buff = "!";
1182  retry:
1183  if ((result = write(fd, buff, 1)) <= 0) {
1184  switch (errno) {
1185  case EINTR: goto retry;
1186  case EAGAIN:
1187 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1188  case EWOULDBLOCK:
1189 #endif
1190  break;
1191  default:
1192  rb_async_bug_errno("rb_thread_wakeup_timer_thread - write", errno);
1193  }
1194  }
1195  if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
1196  }
1197  else {
1198  /* ignore wakeup */
1199  }
1200 }
1201 
1202 void
1204 {
1205  rb_thread_wakeup_timer_thread_fd(timer_thread_pipe[1]);
1206 }
1207 
1208 static void
1209 rb_thread_wakeup_timer_thread_low(void)
1210 {
1211  rb_thread_wakeup_timer_thread_fd(timer_thread_pipe_low[1]);
1212 }
1213 
1214 /* VM-dependent API is not available for this function */
1215 static void
1216 consume_communication_pipe(int fd)
1217 {
1218 #define CCP_READ_BUFF_SIZE 1024
1219  /* buffer can be shared because no one refers to them. */
1220  static char buff[CCP_READ_BUFF_SIZE];
1221  ssize_t result;
1222 
1223  while (1) {
1224  result = read(fd, buff, sizeof(buff));
1225  if (result == 0) {
1226  return;
1227  }
1228  else if (result < 0) {
1229  switch (errno) {
1230  case EINTR:
1231  continue; /* retry */
1232  case EAGAIN:
1233  return;
1234  default:
1235  rb_async_bug_errno("consume_communication_pipe: read\n", errno);
1236  }
1237  }
1238  }
1239 }
1240 
1241 static void
1242 close_communication_pipe(int pipes[2])
1243 {
1244  if (close(pipes[0]) < 0) {
1245  rb_bug_errno("native_stop_timer_thread - close(ttp[0])", errno);
1246  }
1247  if (close(pipes[1]) < 0) {
1248  rb_bug_errno("native_stop_timer_thread - close(ttp[1])", errno);
1249  }
1250  pipes[0] = pipes[1] = -1;
1251 }
1252 
1253 static void
1254 set_nonblock(int fd)
1255 {
1256  int oflags;
1257  int err;
1258 
1259  oflags = fcntl(fd, F_GETFL);
1260  if (oflags == -1)
1261  rb_sys_fail(0);
1262  oflags |= O_NONBLOCK;
1263  err = fcntl(fd, F_SETFL, oflags);
1264  if (err == -1)
1265  rb_sys_fail(0);
1266 }
1267 
1268 static void
1269 setup_communication_pipe_internal(int pipes[2])
1270 {
1271  int err;
1272 
1273  if (pipes[0] != -1) {
1274  /* close pipe of parent process */
1275  close_communication_pipe(pipes);
1276  }
1277 
1278  err = rb_cloexec_pipe(pipes);
1279  if (err != 0) {
1280  rb_bug_errno("setup_communication_pipe: Failed to create communication pipe for timer thread", errno);
1281  }
1282  rb_update_max_fd(pipes[0]);
1283  rb_update_max_fd(pipes[1]);
1284  set_nonblock(pipes[0]);
1285  set_nonblock(pipes[1]);
1286 }
1287 
1288 /* communication pipe with timer thread and signal handler */
1289 static void
1290 setup_communication_pipe(void)
1291 {
1292  if (timer_thread_pipe_owner_process == getpid()) {
1293  /* already set up. */
1294  return;
1295  }
1296  setup_communication_pipe_internal(timer_thread_pipe);
1297  setup_communication_pipe_internal(timer_thread_pipe_low);
1298 
1299  /* validate pipe on this process */
1300  timer_thread_pipe_owner_process = getpid();
1301 }
1302 
1309 static inline void
1310 timer_thread_sleep(rb_global_vm_lock_t* gvl)
1311 {
1312  int result;
1313  int need_polling;
1314  struct pollfd pollfds[2];
1315 
1316  pollfds[0].fd = timer_thread_pipe[0];
1317  pollfds[0].events = POLLIN;
1318  pollfds[1].fd = timer_thread_pipe_low[0];
1319  pollfds[1].events = POLLIN;
1320 
1321  need_polling = check_signal_thread_list();
1322 
1323  if (gvl->waiting > 0 || need_polling) {
1324  /* polling (TIME_QUANTUM_USEC usec) */
1325  result = poll(pollfds, 1, TIME_QUANTUM_USEC/1000);
1326  }
1327  else {
1328  /* wait (infinite) */
1329  result = poll(pollfds, ARRAY_SIZE(pollfds), -1);
1330  }
1331 
1332  if (result == 0) {
1333  /* maybe timeout */
1334  }
1335  else if (result > 0) {
1336  consume_communication_pipe(timer_thread_pipe[0]);
1337  consume_communication_pipe(timer_thread_pipe_low[0]);
1338  }
1339  else { /* result < 0 */
1340  switch (errno) {
1341  case EBADF:
1342  case EINVAL:
1343  case ENOMEM: /* from Linux man */
1344  case EFAULT: /* from FreeBSD man */
1345  rb_async_bug_errno("thread_timer: select", errno);
1346  default:
1347  /* ignore */;
1348  }
1349  }
1350 }
1351 
1352 #else /* USE_SLEEPY_TIMER_THREAD */
1353 # define PER_NANO 1000000000
1354 void rb_thread_wakeup_timer_thread(void) {}
1355 static void rb_thread_wakeup_timer_thread_low(void) {}
1356 
1357 static pthread_mutex_t timer_thread_lock;
1358 static rb_thread_cond_t timer_thread_cond;
1359 
1360 static inline void
1361 timer_thread_sleep(rb_global_vm_lock_t* unused)
1362 {
1363  struct timespec ts;
1364  ts.tv_sec = 0;
1365  ts.tv_nsec = TIME_QUANTUM_USEC * 1000;
1366  ts = native_cond_timeout(&timer_thread_cond, ts);
1367 
1368  native_cond_timedwait(&timer_thread_cond, &timer_thread_lock, &ts);
1369 }
1370 #endif /* USE_SLEEPY_TIMER_THREAD */
1371 
1372 static void *
1373 thread_timer(void *p)
1374 {
1376 
1377  if (TT_DEBUG) WRITE_CONST(2, "start timer thread\n");
1378 
1379 #if defined(__linux__) && defined(PR_SET_NAME)
1380  prctl(PR_SET_NAME, "ruby-timer-thr");
1381 #endif
1382 
1383 #if !USE_SLEEPY_TIMER_THREAD
1384  native_mutex_initialize(&timer_thread_lock);
1385  native_cond_initialize(&timer_thread_cond, RB_CONDATTR_CLOCK_MONOTONIC);
1386  native_mutex_lock(&timer_thread_lock);
1387 #endif
1388  while (system_working > 0) {
1389 
1390  /* timer function */
1391  ping_signal_thread_list();
1393 
1394  if (TT_DEBUG) WRITE_CONST(2, "tick\n");
1395 
1396  /* wait */
1397  timer_thread_sleep(gvl);
1398  }
1399 #if !USE_SLEEPY_TIMER_THREAD
1400  native_mutex_unlock(&timer_thread_lock);
1401  native_cond_destroy(&timer_thread_cond);
1402  native_mutex_destroy(&timer_thread_lock);
1403 #endif
1404 
1405  if (TT_DEBUG) WRITE_CONST(2, "finish timer thread\n");
1406  return NULL;
1407 }
1408 
1409 static void
1410 rb_thread_create_timer_thread(void)
1411 {
1412  if (!timer_thread_id) {
1413  int err;
1414 #ifdef HAVE_PTHREAD_ATTR_INIT
1415  pthread_attr_t attr;
1416 
1417  err = pthread_attr_init(&attr);
1418  if (err != 0) {
1419  fprintf(stderr, "[FATAL] Failed to initialize pthread attr(errno: %d)\n", err);
1420  exit(EXIT_FAILURE);
1421  }
1422 # ifdef PTHREAD_STACK_MIN
1423  {
1424  const size_t min_size = (4096 * 4);
1425  /* Allocate the machine stack for the timer thread
1426  * at least 16KB (4 pages). FreeBSD 8.2 AMD64 causes
1427  * machine stack overflow only with PTHREAD_STACK_MIN.
1428  */
1429  size_t stack_size = PTHREAD_STACK_MIN; /* may be dynamic, get only once */
1430  if (stack_size < min_size) stack_size = min_size;
1431  if (THREAD_DEBUG) stack_size += BUFSIZ;
1432  pthread_attr_setstacksize(&attr, stack_size);
1433  }
1434 # endif
1435 #endif
1436 
1437 #if USE_SLEEPY_TIMER_THREAD
1438  setup_communication_pipe();
1439 #endif /* USE_SLEEPY_TIMER_THREAD */
1440 
1441  /* create timer thread */
1442  if (timer_thread_id) {
1443  rb_bug("rb_thread_create_timer_thread: Timer thread was already created\n");
1444  }
1445 #ifdef HAVE_PTHREAD_ATTR_INIT
1446  err = pthread_create(&timer_thread_id, &attr, thread_timer, &GET_VM()->gvl);
1447 #else
1448  err = pthread_create(&timer_thread_id, NULL, thread_timer, &GET_VM()->gvl);
1449 #endif
1450  if (err != 0) {
1451  fprintf(stderr, "[FATAL] Failed to create timer thread (errno: %d)\n", err);
1452  exit(EXIT_FAILURE);
1453  }
1454 #ifdef HAVE_PTHREAD_ATTR_INIT
1455  pthread_attr_destroy(&attr);
1456 #endif
1457  }
1458 }
1459 
1460 static int
1461 native_stop_timer_thread(int close_anyway)
1462 {
1463  int stopped;
1464  stopped = --system_working <= 0;
1465 
1466  if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
1467  if (stopped) {
1468  /* join */
1470  native_thread_join(timer_thread_id);
1471  if (TT_DEBUG) fprintf(stderr, "joined timer thread\n");
1472  timer_thread_id = 0;
1473 
1474  /* close communication pipe */
1475  if (close_anyway) {
1476  /* TODO: Uninstall all signal handlers or mask all signals.
1477  * This pass is cleaning phase (terminate ruby process).
1478  * To avoid such race, we skip to close communication
1479  * pipe. OS will close it at process termination.
1480  * It may not good practice, but pragmatic.
1481  * We remain it is TODO.
1482  */
1483  /* close_communication_pipe(); */
1484  }
1485  }
1486  return stopped;
1487 }
1488 
1489 static void
1490 native_reset_timer_thread(void)
1491 {
1492  if (TT_DEBUG) fprintf(stderr, "reset timer thread\n");
1493 }
1494 
1495 #ifdef HAVE_SIGALTSTACK
1496 int
1497 ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
1498 {
1499  void *base;
1500  size_t size;
1501  const size_t water_mark = 1024 * 1024;
1503 
1504  if (th) {
1505  size = th->machine_stack_maxsize;
1506  base = (char *)th->machine_stack_start - STACK_DIR_UPPER(0, size);
1507  }
1508 #ifdef STACKADDR_AVAILABLE
1509  else if (get_stack(&base, &size) == 0) {
1510  STACK_DIR_UPPER((void)(base = (char *)base + size), (void)0);
1511  }
1512 #endif
1513  else {
1514  return 0;
1515  }
1516  size /= RUBY_STACK_SPACE_RATIO;
1517  if (size > water_mark) size = water_mark;
1518  if (IS_STACK_DIR_UPPER()) {
1519  if (size > ~(size_t)base+1) size = ~(size_t)base+1;
1520  if (addr > base && addr <= (void *)((char *)base + size)) return 1;
1521  }
1522  else {
1523  if (size > (size_t)base) size = (size_t)base;
1524  if (addr > (void *)((char *)base - size) && addr <= base) return 1;
1525  }
1526  return 0;
1527 }
1528 #endif
1529 
1530 int
1531 rb_reserved_fd_p(int fd)
1532 {
1533 #if USE_SLEEPY_TIMER_THREAD
1534  if (fd == timer_thread_pipe[0] ||
1535  fd == timer_thread_pipe[1] ||
1536  fd == timer_thread_pipe_low[0] ||
1537  fd == timer_thread_pipe_low[1]) {
1538  return 1;
1539  }
1540  else {
1541  return 0;
1542  }
1543 #else
1544  return 0;
1545 #endif
1546 }
1547 
1548 #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
#define cond(node)
Definition: ripper.c:424
rb_vm_t * vm
Definition: vm_core.h:495
ssize_t n
Definition: bigdecimal.c:5676
void rb_bug(const char *fmt,...)
Definition: error.c:295
int gettimeofday(struct timeval *, struct timezone *)
Definition: win32.c:4023
void * malloc()
static VALUE VALUE th
Definition: tcltklib.c:2947
Win32OLEIDispatch * p
Definition: win32ole.c:786
volatile unsigned long waiting
static int max(int a, int b)
Definition: strftime.c:141
C_block * out
Definition: crypt.c:308
rb_thread_lock_t interrupt_lock
Definition: vm_core.h:556
pthread_mutex_t rb_thread_lock_t
rb_unblock_function_t * func
Definition: vm_core.h:480
#define THREAD_DEBUG
Definition: thread.c:73
ssize_t i
Definition: bigdecimal.c:5676
int ret
Definition: tcltklib.c:280
rb_thread_cond_t switch_cond
void rb_update_max_fd(int fd)
Definition: io.c:164
int fcntl(int, int,...)
Definition: win32.c:3845
void rb_async_bug_errno(const char *mesg, int errno_arg)
Definition: error.c:339
#define STACK_UPPER(x, a, b)
Definition: gc.h:74
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:1788
r
Definition: bigdecimal.c:1210
long tv_sec
Definition: ossl_asn1.c:17
static volatile int system_working
Definition: thread.c:96
unsigned long unsigned_time_t
Definition: time.c:732
time_t tv_sec
Definition: ripper.y:47
pthread_t rb_thread_id_t
sighandler_t posix_signal(int signum, sighandler_t handler)
Definition: missing-pips.c:43
#define rb_fd_select(n, rfds, wfds, efds, timeout)
#define RUBY_VM_THREAD_VM_STACK_SIZE
Definition: vm_core.h:411
void rb_thread_wakeup_timer_thread(void)
int getrlimit(int resource, struct rlimit *rlp)
Definition: missing-pips.c:48
#define F_SETFL
Definition: win32.h:587
long tv_usec
Definition: ossl_asn1.c:18
static VALUE char * str
Definition: tcltklib.c:3546
fd_set rb_fdset_t
Definition: ripper.y:326
int flags
Definition: tcltklib.c:3022
long tv_nsec
Definition: ripper.y:48
#define UNLIKELY(x)
Definition: vm_core.h:115
int rb_cloexec_pipe(int fildes[2])
Definition: io.c:271
#define thread_debug
Definition: thread.c:211
int err
Definition: win32.c:87
#define EXIT_FAILURE
Definition: eval_intern.h:24
VALUE * machine_stack_start
Definition: vm_core.h:588
#define GVL_UNLOCK_BEGIN()
Definition: thread.c:137
struct rb_vm_struct::@153 default_params
#define TIMET_MAX
Definition: thread.c:76
rb_thread_cond_t cond
const int id
Definition: nkf.c:209
int errno
q result
Definition: tcltklib.c:7069
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:82
rb_thread_cond_t switch_wait_cond
static int min(int a, int b)
Definition: strftime.c:131
int pthread_kill(pthread_t thread, int sig)
Definition: missing-pips.c:37
#define GVL_UNLOCK_END()
Definition: thread.c:142
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:81
void rb_bug_errno(const char *mesg, int errno_arg)
Definition: error.c:318
void ruby_init_stack(volatile VALUE *)
static void timer_thread_function(void *)
Definition: thread.c:3745
int rb_reserved_fd_p(int fd)
void rb_sys_fail(const char *mesg)
Definition: error.c:1907
return ptr
Definition: tcltklib.c:784
#define free(x)
Definition: dln.c:50
VALUE msg
Definition: tcltklib.c:846
#define WRITE_CONST(fd, str)
Definition: error.c:336
gz end
Definition: zlib.c:2270
#define thread_start_func_2(th, st, rst)
Definition: thread.c:215
int size
Definition: encoding.c:52
if(RB_TYPE_P(r, T_FLOAT))
Definition: bigdecimal.c:1200
struct rb_unblock_callback unblock
Definition: vm_core.h:557
rb_thread_cond_t sleep_cond
#define O_NONBLOCK
Definition: win32.h:591
struct rb_encoding_entry * list
Definition: encoding.c:50
#define ETIMEDOUT
Definition: win32.h:549
native_thread_data_t native_thread_data
Definition: vm_core.h:535
#define EWOULDBLOCK
Definition: rubysocket.h:90
size_t thread_machine_stack_size
Definition: vm_core.h:401
static VALUE thread_start(VALUE klass, VALUE args)
Definition: thread.c:695
pthread_cond_t cond
VALUE rb_eNotImpError
Definition: error.c:526
rb_global_vm_lock_t gvl
Definition: vm_core.h:340
#define PRIdVALUE
#define SIGNED_VALUE
#define RUBY_VM_INTERRUPTED(th)
Definition: vm_core.h:926
BDIGIT e
Definition: bigdecimal.c:5106
unsigned long VALUE
Definition: ripper.y:104
void Init_native_thread(void)
size_t machine_stack_maxsize
Definition: vm_core.h:590
#define NULL
Definition: _sdbm.c:102
static rb_thread_t * GET_THREAD(void)
Definition: vm_core.h:890
int retry
Definition: tcltklib.c:10150
VALUE rb_eThreadError
Definition: eval.c:690
rb_thread_id_t thread_id
Definition: vm_core.h:530
#define IS_STACK_DIR_UPPER()
Definition: gc.h:84
#define GET_VM()
Definition: vm_core.h:883