Ruby  1.9.3p448(2013-06-27revision41675)
cont.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  cont.c -
4 
5  $Author: naruse $
6  created at: Thu May 23 09:03:43 2007
7 
8  Copyright (C) 2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #include "ruby/ruby.h"
13 #include "internal.h"
14 #include "vm_core.h"
15 #include "gc.h"
16 #include "eval_intern.h"
17 
18 #if ((defined(_WIN32) && _WIN32_WINNT >= 0x0400) || (defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT))) && !defined(__NetBSD__) && !defined(sun) && !defined(FIBER_USE_NATIVE)
19 #define FIBER_USE_NATIVE 1
20 
21 /* FIBER_USE_NATIVE enables Fiber performance improvement using system
22  * dependent method such as make/setcontext on POSIX system or
23  * CreateFiber() API on Windows.
24  * This hack make Fiber context switch faster (x2 or more).
25  * However, it decrease maximum number of Fiber. For example, on the
26  * 32bit POSIX OS, ten or twenty thousands Fiber can be created.
27  *
28  * Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9"
29  * in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese).
30  */
31 
32 /* On our experience, NetBSD doesn't support using setcontext() and pthread
33  * simultaneously. This is because pthread_self(), TLS and other information
34  * are represented by stack pointer (higher bits of stack pointer).
35  * TODO: check such constraint on configure.
36  */
37 #elif !defined(FIBER_USE_NATIVE)
38 #define FIBER_USE_NATIVE 0
39 #endif
40 
41 #if FIBER_USE_NATIVE
42 #ifndef _WIN32
43 #include <unistd.h>
44 #include <sys/mman.h>
45 #include <ucontext.h>
46 #endif
47 #define RB_PAGE_SIZE (pagesize)
48 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
49 static long pagesize;
50 #define FIBER_MACHINE_STACK_ALLOCATION_SIZE (0x10000)
51 #endif
52 
53 #define CAPTURE_JUST_VALID_VM_STACK 1
54 
59 };
60 
61 typedef struct rb_context_struct {
63  VALUE self;
64  int argc;
67 #ifdef CAPTURE_JUST_VALID_VM_STACK
68  size_t vm_stack_slen; /* length of stack (head of th->stack) */
69  size_t vm_stack_clen; /* length of control frames (tail of th->stack) */
70 #endif
73 #ifdef __ia64
74  VALUE *machine_register_stack;
75  VALUE *machine_register_stack_src;
76  int machine_register_stack_size;
77 #endif
81 } rb_context_t;
82 
87 };
88 
89 #if FIBER_USE_NATIVE && !defined(_WIN32)
90 #define MAX_MAHINE_STACK_CACHE 10
91 static int machine_stack_cache_index = 0;
92 typedef struct machine_stack_cache_struct {
93  void *ptr;
94  size_t size;
95 } machine_stack_cache_t;
96 static machine_stack_cache_t machine_stack_cache[MAX_MAHINE_STACK_CACHE];
97 static machine_stack_cache_t terminated_machine_stack;
98 #endif
99 
100 typedef struct rb_fiber_struct {
106 #if FIBER_USE_NATIVE
107 #ifdef _WIN32
108  void *fib_handle;
109 #else
110  ucontext_t context;
111 #endif
112 #endif
113 } rb_fiber_t;
114 
119 
120 #define GetContPtr(obj, ptr) \
121  TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr))
122 
123 #define GetFiberPtr(obj, ptr) do {\
124  TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \
125  if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \
126 } while(0)
127 
128 NOINLINE(static VALUE cont_capture(volatile int *stat));
129 
130 #define THREAD_MUST_BE_RUNNING(th) do { \
131  if (!(th)->tag) rb_raise(rb_eThreadError, "not running thread"); \
132  } while (0)
133 
134 static void
135 cont_mark(void *ptr)
136 {
137  RUBY_MARK_ENTER("cont");
138  if (ptr) {
139  rb_context_t *cont = ptr;
140  rb_gc_mark(cont->value);
143 
144  if (cont->vm_stack) {
145 #ifdef CAPTURE_JUST_VALID_VM_STACK
147  cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
148 #else
149  rb_gc_mark_localtion(cont->vm_stack,
150  cont->vm_stack, cont->saved_thread.stack_size);
151 #endif
152  }
153 
154  if (cont->machine_stack) {
155  if (cont->type == CONTINUATION_CONTEXT) {
156  /* cont */
158  cont->machine_stack + cont->machine_stack_size);
159  }
160  else {
161  /* fiber */
162  rb_thread_t *th;
163  rb_fiber_t *fib = (rb_fiber_t*)cont;
164  GetThreadPtr(cont->saved_thread.self, th);
165  if ((th->fiber != cont->self) && fib->status == RUNNING) {
167  cont->machine_stack + cont->machine_stack_size);
168  }
169  }
170  }
171 #ifdef __ia64
172  if (cont->machine_register_stack) {
173  rb_gc_mark_locations(cont->machine_register_stack,
174  cont->machine_register_stack + cont->machine_register_stack_size);
175  }
176 #endif
177  }
178  RUBY_MARK_LEAVE("cont");
179 }
180 
181 static void
182 cont_free(void *ptr)
183 {
184  RUBY_FREE_ENTER("cont");
185  if (ptr) {
186  rb_context_t *cont = ptr;
187  RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
188 #if FIBER_USE_NATIVE
189  if (cont->type == CONTINUATION_CONTEXT) {
190  /* cont */
192  }
193  else {
194  /* fiber */
195 #ifdef _WIN32
196  if (GET_THREAD()->fiber != cont->self && cont->type != ROOT_FIBER_CONTEXT) {
197  /* don't delete root fiber handle */
198  rb_fiber_t *fib = (rb_fiber_t*)cont;
199  if (fib->fib_handle) {
200  DeleteFiber(fib->fib_handle);
201  }
202  }
203 #else /* not WIN32 */
204  if (GET_THREAD()->fiber != cont->self) {
205  rb_fiber_t *fib = (rb_fiber_t*)cont;
206  if (fib->context.uc_stack.ss_sp) {
207  if (cont->type == ROOT_FIBER_CONTEXT) {
208  rb_bug("Illegal root fiber parameter");
209  }
210  munmap((void*)fib->context.uc_stack.ss_sp, fib->context.uc_stack.ss_size);
211  }
212  }
213  else {
214  /* It may reached here when finalize */
215  /* TODO examine whether it is a bug */
216  /* rb_bug("cont_free: release self"); */
217  }
218 #endif
219  }
220 #else /* not FIBER_USE_NATIVE */
222 #endif
223 #ifdef __ia64
224  RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
225 #endif
227 
228  /* free rb_cont_t or rb_fiber_t */
229  ruby_xfree(ptr);
230  }
231  RUBY_FREE_LEAVE("cont");
232 }
233 
234 static size_t
235 cont_memsize(const void *ptr)
236 {
237  const rb_context_t *cont = ptr;
238  size_t size = 0;
239  if (cont) {
240  size = sizeof(*cont);
241  if (cont->vm_stack) {
242 #ifdef CAPTURE_JUST_VALID_VM_STACK
243  size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
244 #else
245  size_t n = cont->saved_thread.stack_size;
246 #endif
247  size += n * sizeof(*cont->vm_stack);
248  }
249 
250  if (cont->machine_stack) {
251  size += cont->machine_stack_size * sizeof(*cont->machine_stack);
252  }
253 #ifdef __ia64
254  if (cont->machine_register_stack) {
255  size += cont->machine_register_stack_size * sizeof(*cont->machine_register_stack);
256  }
257 #endif
258  }
259  return size;
260 }
261 
262 static void
263 fiber_mark(void *ptr)
264 {
265  RUBY_MARK_ENTER("cont");
266  if (ptr) {
267  rb_fiber_t *fib = ptr;
268  rb_gc_mark(fib->prev);
269  cont_mark(&fib->cont);
270  }
271  RUBY_MARK_LEAVE("cont");
272 }
273 
274 static void
276 {
277  VALUE current_fibval = rb_fiber_current();
278  rb_fiber_t *current_fib;
279  GetFiberPtr(current_fibval, current_fib);
280 
281  /* join fiber link */
282  fib->next_fiber = current_fib->next_fiber;
283  fib->prev_fiber = current_fib;
284  current_fib->next_fiber->prev_fiber = fib;
285  current_fib->next_fiber = fib;
286 }
287 
288 static void
290 {
291  fib->prev_fiber->next_fiber = fib->next_fiber;
292  fib->next_fiber->prev_fiber = fib->prev_fiber;
293 }
294 
295 static void
296 fiber_free(void *ptr)
297 {
298  RUBY_FREE_ENTER("fiber");
299  if (ptr) {
300  rb_fiber_t *fib = ptr;
301  if (fib->cont.type != ROOT_FIBER_CONTEXT &&
304  }
305  fiber_link_remove(fib);
306 
307  cont_free(&fib->cont);
308  }
309  RUBY_FREE_LEAVE("fiber");
310 }
311 
312 static size_t
313 fiber_memsize(const void *ptr)
314 {
315  const rb_fiber_t *fib = ptr;
316  size_t size = 0;
317  if (ptr) {
318  size = sizeof(*fib);
319  if (fib->cont.type != ROOT_FIBER_CONTEXT) {
321  }
322  size += cont_memsize(&fib->cont);
323  }
324  return size;
325 }
326 
327 VALUE
329 {
330  if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
331  return Qtrue;
332  }
333  else {
334  return Qfalse;
335  }
336 }
337 
338 static void
340 {
341  size_t size;
342 
344 #ifdef __ia64
345  th->machine_register_stack_end = rb_ia64_bsp();
346 #endif
347 
348  if (th->machine_stack_start > th->machine_stack_end) {
351  }
352  else {
355  }
356 
357  if (cont->machine_stack) {
358  REALLOC_N(cont->machine_stack, VALUE, size);
359  }
360  else {
361  cont->machine_stack = ALLOC_N(VALUE, size);
362  }
363 
365  MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
366 
367 #ifdef __ia64
368  rb_ia64_flushrs();
369  size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
370  cont->machine_register_stack_src = th->machine_register_stack_start;
371  if (cont->machine_register_stack) {
372  REALLOC_N(cont->machine_register_stack, VALUE, size);
373  }
374  else {
375  cont->machine_register_stack = ALLOC_N(VALUE, size);
376  }
377 
378  MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
379 #endif
380 }
381 
382 static const rb_data_type_t cont_data_type = {
383  "continuation",
385 };
386 
387 static void
389 {
390  /* save thread context */
391  cont->saved_thread = *th;
392  /* saved_thread->machine_stack_(start|end) should be NULL */
393  /* because it may happen GC afterward */
396 #ifdef __ia64
397  cont->saved_thread.machine_register_stack_start = 0;
398  cont->saved_thread.machine_register_stack_end = 0;
399 #endif
400 }
401 
402 static void
404 {
405  /* save thread context */
406  cont_save_thread(cont, th);
407  cont->saved_thread.local_storage = 0;
408 }
409 
410 static rb_context_t *
412 {
414  volatile VALUE contval;
415  rb_thread_t *th = GET_THREAD();
416 
418  contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
419  cont->self = contval;
420  cont_init(cont, th);
421  return cont;
422 }
423 
424 static VALUE
425 cont_capture(volatile int *stat)
426 {
428  rb_thread_t *th = GET_THREAD(), *sth;
429  volatile VALUE contval;
430 
433  cont = cont_new(rb_cContinuation);
434  contval = cont->self;
435  sth = &cont->saved_thread;
436 
437 #ifdef CAPTURE_JUST_VALID_VM_STACK
438  cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
439  cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
440  cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
441  MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
442  MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
443 #else
444  cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
445  MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
446 #endif
447  sth->stack = 0;
448 
449  cont_save_machine_stack(th, cont);
450 
451  if (ruby_setjmp(cont->jmpbuf)) {
452  volatile VALUE value;
453 
454  value = cont->value;
455  if (cont->argc == -1) rb_exc_raise(value);
456  cont->value = Qnil;
457  *stat = 1;
458  return value;
459  }
460  else {
461  *stat = 0;
462  return cont->self;
463  }
464 }
465 
466 static void
468 {
469  rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
470 
471  /* restore thread context */
472  if (cont->type == CONTINUATION_CONTEXT) {
473  /* continuation */
474  VALUE fib;
475 
476  th->fiber = sth->fiber;
477  fib = th->fiber ? th->fiber : th->root_fiber;
478 
479  if (fib) {
480  rb_fiber_t *fcont;
481  GetFiberPtr(fib, fcont);
482  th->stack_size = fcont->cont.saved_thread.stack_size;
483  th->stack = fcont->cont.saved_thread.stack;
484  }
485 #ifdef CAPTURE_JUST_VALID_VM_STACK
486  MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
487  MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
488  cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
489 #else
490  MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
491 #endif
492  }
493  else {
494  /* fiber */
495  th->stack = sth->stack;
496  th->stack_size = sth->stack_size;
497  th->local_storage = sth->local_storage;
498  th->fiber = cont->self;
499  }
500 
501  th->cfp = sth->cfp;
502  th->safe_level = sth->safe_level;
503  th->raised_flag = sth->raised_flag;
504  th->state = sth->state;
505  th->status = sth->status;
506  th->tag = sth->tag;
507  th->protect_tag = sth->protect_tag;
508  th->errinfo = sth->errinfo;
509  th->first_proc = sth->first_proc;
510 }
511 
512 #if FIBER_USE_NATIVE
513 #ifdef _WIN32
514 static void
515 fiber_set_stack_location(void)
516 {
517  rb_thread_t *th = GET_THREAD();
518  VALUE *ptr;
519 
520  SET_MACHINE_STACK_END(&ptr);
521  th->machine_stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
522 }
523 
524 static VOID CALLBACK
525 fiber_entry(void *arg)
526 {
527  fiber_set_stack_location();
528  rb_fiber_start();
529 }
530 #else /* _WIN32 */
531 
532 /*
533  * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
534  * if MAP_STACK is passed.
535  * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
536  */
537 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
538 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
539 #else
540 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
541 #endif
542 
543 static char*
544 fiber_machine_stack_alloc(size_t size)
545 {
546  char *ptr;
547 
548  if (machine_stack_cache_index > 0) {
549  if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
550  ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
551  machine_stack_cache_index--;
552  machine_stack_cache[machine_stack_cache_index].ptr = NULL;
553  machine_stack_cache[machine_stack_cache_index].size = 0;
554  }
555  else{
556  /* TODO handle multiple machine stack size */
557  rb_bug("machine_stack_cache size is not canonicalized");
558  }
559  }
560  else {
561  void *page;
563 
564  ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
565  if (ptr == MAP_FAILED) {
566  rb_raise(rb_eFiberError, "can't alloc machine stack to fiber");
567  }
568 
569  /* guard page setup */
570  page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
571  if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
572  rb_raise(rb_eFiberError, "mprotect failed");
573  }
574  }
575 
576  return ptr;
577 }
578 #endif
579 
580 static void
581 fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
582 {
583  rb_thread_t *sth = &fib->cont.saved_thread;
584 
585 #ifdef _WIN32
586  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
587  if (!fib->fib_handle) {
588  /* try to release unnecessary fibers & retry to create */
589  rb_gc();
590  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
591  if (!fib->fib_handle) {
592  rb_raise(rb_eFiberError, "can't create fiber");
593  }
594  }
596 #else /* not WIN32 */
597  ucontext_t *context = &fib->context;
598  char *ptr;
600 
601  getcontext(context);
602  ptr = fiber_machine_stack_alloc(size);
603  context->uc_link = NULL;
604  context->uc_stack.ss_sp = ptr;
605  context->uc_stack.ss_size = size;
606  makecontext(context, rb_fiber_start, 0);
607  sth->machine_stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
608  sth->machine_stack_maxsize = size - RB_PAGE_SIZE;
609 #endif
610 #ifdef __ia64
611  sth->machine_register_stack_maxsize = sth->machine_stack_maxsize;
612 #endif
613 }
614 
615 NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
616 
617 static void
618 fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
619 {
620  rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread;
621 
622  if (newfib->status != RUNNING) {
623  fiber_initialize_machine_stack_context(newfib, FIBER_MACHINE_STACK_ALLOCATION_SIZE);
624  }
625 
626  /* restore thread context */
627  cont_restore_thread(&newfib->cont);
629  if (sth->machine_stack_end && (newfib != oldfib)) {
630  rb_bug("fiber_setcontext: sth->machine_stack_end has non zero value");
631  }
632 
633  /* save oldfib's machine stack */
634  if (oldfib->status != TERMINATED) {
637  if (STACK_DIR_UPPER(0, 1)) {
639  oldfib->cont.machine_stack = th->machine_stack_end;
640  }
641  else {
643  oldfib->cont.machine_stack = th->machine_stack_start;
644  }
645  }
646  /* exchange machine_stack_start between oldfib and newfib */
649  /* oldfib->machine_stack_end should be NULL */
650  oldfib->cont.saved_thread.machine_stack_end = 0;
651 #ifndef _WIN32
652  if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib->cont.self) {
653  rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
654  }
655 #endif
656 
657  /* swap machine context */
658 #ifdef _WIN32
659  SwitchToFiber(newfib->fib_handle);
660 #elif defined(__FreeBSD__) /* FreeBSD 9 doesn't work with swapcontext */
661  if (!ruby_setjmp(oldfib->cont.jmpbuf)) {
662  if (newfib->status != RUNNING) {
663  if (setcontext(&newfib->context) < 0) {
664  rb_bug("context switch between fiber failed");
665  }
666  }
667  else {
668  ruby_longjmp(newfib->cont.jmpbuf, 1);
669  }
670  }
671 #else
672  swapcontext(&oldfib->context, &newfib->context);
673 #endif
674 }
675 #endif
676 
678 
679 static void
681 {
682  cont_restore_thread(cont);
683 
684  /* restore machine stack */
685 #ifdef _M_AMD64
686  {
687  /* workaround for x64 SEH */
688  jmp_buf buf;
689  setjmp(buf);
690  ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
691  ((_JUMP_BUFFER*)(&buf))->Frame;
692  }
693 #endif
694  if (cont->machine_stack_src) {
697  VALUE, cont->machine_stack_size);
698  }
699 
700 #ifdef __ia64
701  if (cont->machine_register_stack_src) {
702  MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
703  VALUE, cont->machine_register_stack_size);
704  }
705 #endif
706 
707  ruby_longjmp(cont->jmpbuf, 1);
708 }
709 
711 
712 #ifdef __ia64
713 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
714 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
715 static volatile int C(a), C(b), C(c), C(d), C(e);
716 static volatile int C(f), C(g), C(h), C(i), C(j);
717 static volatile int C(k), C(l), C(m), C(n), C(o);
718 static volatile int C(p), C(q), C(r), C(s), C(t);
719 #if 0
720 {/* the above lines make cc-mode.el confused so much */}
721 #endif
722 int rb_dummy_false = 0;
723 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
724 static void
725 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
726 {
727  if (rb_dummy_false) {
728  /* use registers as much as possible */
729  E(a) = E(b) = E(c) = E(d) = E(e) =
730  E(f) = E(g) = E(h) = E(i) = E(j) =
731  E(k) = E(l) = E(m) = E(n) = E(o) =
732  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
733  E(a) = E(b) = E(c) = E(d) = E(e) =
734  E(f) = E(g) = E(h) = E(i) = E(j) =
735  E(k) = E(l) = E(m) = E(n) = E(o) =
736  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
737  }
738  if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
739  register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
740  }
741  cont_restore_0(cont, vp);
742 }
743 #undef C
744 #undef E
745 #endif
746 
747 static void
748 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
749 {
750  if (cont->machine_stack_src) {
751 #ifdef HAVE_ALLOCA
752 #define STACK_PAD_SIZE 1
753 #else
754 #define STACK_PAD_SIZE 1024
755 #endif
756  VALUE space[STACK_PAD_SIZE];
757 
758 #if !STACK_GROW_DIRECTION
759  if (addr_in_prev_frame > &space[0]) {
760  /* Stack grows downward */
761 #endif
762 #if STACK_GROW_DIRECTION <= 0
763  volatile VALUE *const end = cont->machine_stack_src;
764  if (&space[0] > end) {
765 # ifdef HAVE_ALLOCA
766  volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
767  space[0] = *sp;
768 # else
769  cont_restore_0(cont, &space[0]);
770 # endif
771  }
772 #endif
773 #if !STACK_GROW_DIRECTION
774  }
775  else {
776  /* Stack grows upward */
777 #endif
778 #if STACK_GROW_DIRECTION >= 0
779  volatile VALUE *const end = cont->machine_stack_src + cont->machine_stack_size;
780  if (&space[STACK_PAD_SIZE] < end) {
781 # ifdef HAVE_ALLOCA
782  volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
783  space[0] = *sp;
784 # else
785  cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
786 # endif
787  }
788 #endif
789 #if !STACK_GROW_DIRECTION
790  }
791 #endif
792  }
793  cont_restore_1(cont);
794 }
795 #ifdef __ia64
796 #define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp());
797 #endif
798 
799 /*
800  * Document-class: Continuation
801  *
802  * Continuation objects are generated by <code>Kernel#callcc</code>,
803  * after having <code>require</code>d <i>continuation</i>. They hold
804  * a return address and execution context, allowing a nonlocal return
805  * to the end of the <code>callcc</code> block from anywhere within a
806  * program. Continuations are somewhat analogous to a structured
807  * version of C's <code>setjmp/longjmp</code> (although they contain
808  * more state, so you might consider them closer to threads).
809  *
810  * For instance:
811  *
812  * require "continuation"
813  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
814  * callcc{|cc| $cc = cc}
815  * puts(message = arr.shift)
816  * $cc.call unless message =~ /Max/
817  *
818  * <em>produces:</em>
819  *
820  * Freddie
821  * Herbie
822  * Ron
823  * Max
824  *
825  * This (somewhat contrived) example allows the inner loop to abandon
826  * processing early:
827  *
828  * require "continuation"
829  * callcc {|cont|
830  * for i in 0..4
831  * print "\n#{i}: "
832  * for j in i*5...(i+1)*5
833  * cont.call() if j == 17
834  * printf "%3d", j
835  * end
836  * end
837  * }
838  * puts
839  *
840  * <em>produces:</em>
841  *
842  * 0: 0 1 2 3 4
843  * 1: 5 6 7 8 9
844  * 2: 10 11 12 13 14
845  * 3: 15 16
846  */
847 
848 /*
849  * call-seq:
850  * callcc {|cont| block } -> obj
851  *
852  * Generates a <code>Continuation</code> object, which it passes to
853  * the associated block. You need to <code>require
854  * 'continuation'</code> before using this method. Performing a
855  * <em>cont</em><code>.call</code> will cause the <code>callcc</code>
856  * to return (as will falling through the end of the block). The
857  * value returned by the <code>callcc</code> is the value of the
858  * block, or the value passed to <em>cont</em><code>.call</code>. See
859  * class <code>Continuation</code> for more details. Also see
860  * <code>Kernel::throw</code> for an alternative mechanism for
861  * unwinding a call stack.
862  */
863 
864 static VALUE
866 {
867  volatile int called;
868  volatile VALUE val = cont_capture(&called);
869 
870  if (called) {
871  return val;
872  }
873  else {
874  return rb_yield(val);
875  }
876 }
877 
878 static VALUE
880 {
881  switch(argc) {
882  case 0:
883  return Qnil;
884  case 1:
885  return argv[0];
886  default:
887  return rb_ary_new4(argc, argv);
888  }
889 }
890 
891 /*
892  * call-seq:
893  * cont.call(args, ...)
894  * cont[args, ...]
895  *
896  * Invokes the continuation. The program continues from the end of the
897  * <code>callcc</code> block. If no arguments are given, the original
898  * <code>callcc</code> returns <code>nil</code>. If one argument is
899  * given, <code>callcc</code> returns it. Otherwise, an array
900  * containing <i>args</i> is returned.
901  *
902  * callcc {|cont| cont.call } #=> nil
903  * callcc {|cont| cont.call 1 } #=> 1
904  * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
905  */
906 
907 static VALUE
909 {
911  rb_thread_t *th = GET_THREAD();
912  GetContPtr(contval, cont);
913 
914  if (cont->saved_thread.self != th->self) {
915  rb_raise(rb_eRuntimeError, "continuation called across threads");
916  }
917  if (cont->saved_thread.protect_tag != th->protect_tag) {
918  rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
919  }
920  if (cont->saved_thread.fiber) {
921  rb_fiber_t *fcont;
922  GetFiberPtr(cont->saved_thread.fiber, fcont);
923 
924  if (th->fiber != cont->saved_thread.fiber) {
925  rb_raise(rb_eRuntimeError, "continuation called across fiber");
926  }
927  }
928 
929  cont->argc = argc;
930  cont->value = make_passing_arg(argc, argv);
931 
932  cont_restore_0(cont, &contval);
933  return Qnil; /* unreachable */
934 }
935 
936 /*********/
937 /* fiber */
938 /*********/
939 
940 /*
941  * Document-class: Fiber
942  *
943  * Fibers are primitives for implementing light weight cooperative
944  * concurrency in Ruby. Basically they are a means of creating code blocks
945  * that can be paused and resumed, much like threads. The main difference
946  * is that they are never preempted and that the scheduling must be done by
947  * the programmer and not the VM.
948  *
949  * As opposed to other stackless light weight concurrency models, each fiber
950  * comes with a small 4KB stack. This enables the fiber to be paused from deeply
951  * nested function calls within the fiber block.
952  *
953  * When a fiber is created it will not run automatically. Rather it must be
954  * be explicitly asked to run using the <code>Fiber#resume</code> method.
955  * The code running inside the fiber can give up control by calling
956  * <code>Fiber.yield</code> in which case it yields control back to caller
957  * (the caller of the <code>Fiber#resume</code>).
958  *
959  * Upon yielding or termination the Fiber returns the value of the last
960  * executed expression
961  *
962  * For instance:
963  *
964  * fiber = Fiber.new do
965  * Fiber.yield 1
966  * 2
967  * end
968  *
969  * puts fiber.resume
970  * puts fiber.resume
971  * puts fiber.resume
972  *
973  * <em>produces</em>
974  *
975  * 1
976  * 2
977  * FiberError: dead fiber called
978  *
979  * The <code>Fiber#resume</code> method accepts an arbitrary number of
980  * parameters, if it is the first call to <code>resume</code> then they
981  * will be passed as block arguments. Otherwise they will be the return
982  * value of the call to <code>Fiber.yield</code>
983  *
984  * Example:
985  *
986  * fiber = Fiber.new do |first|
987  * second = Fiber.yield first + 2
988  * end
989  *
990  * puts fiber.resume 10
991  * puts fiber.resume 14
992  * puts fiber.resume 18
993  *
994  * <em>produces</em>
995  *
996  * 12
997  * 14
998  * FiberError: dead fiber called
999  *
1000  */
1001 
1002 #define FIBER_VM_STACK_SIZE (4 * 1024)
1003 
1004 static const rb_data_type_t fiber_data_type = {
1005  "fiber",
1007 };
1008 
1009 static VALUE
1011 {
1012  return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1013 }
1014 
1015 static rb_fiber_t*
1017 {
1018  rb_fiber_t *fib;
1019  rb_thread_t *th = GET_THREAD();
1020 
1021  if (DATA_PTR(fibval) != 0) {
1022  rb_raise(rb_eRuntimeError, "cannot initialize twice");
1023  }
1024 
1026  fib = ALLOC(rb_fiber_t);
1027  memset(fib, 0, sizeof(rb_fiber_t));
1028  fib->cont.self = fibval;
1029  fib->cont.type = FIBER_CONTEXT;
1030  cont_init(&fib->cont, th);
1031  fib->prev = Qnil;
1032  fib->status = CREATED;
1033 
1034  DATA_PTR(fibval) = fib;
1035 
1036  return fib;
1037 }
1038 
1039 static VALUE
1040 fiber_init(VALUE fibval, VALUE proc)
1041 {
1042  rb_fiber_t *fib = fiber_t_alloc(fibval);
1043  rb_context_t *cont = &fib->cont;
1044  rb_thread_t *th = &cont->saved_thread;
1045 
1046  /* initialize cont */
1047  cont->vm_stack = 0;
1048 
1049  th->stack = 0;
1050  th->stack_size = 0;
1051 
1052  fiber_link_join(fib);
1053 
1055  th->stack = ALLOC_N(VALUE, th->stack_size);
1056 
1057  th->cfp = (void *)(th->stack + th->stack_size);
1058  th->cfp--;
1059  th->cfp->pc = 0;
1060  th->cfp->sp = th->stack + 1;
1061  th->cfp->bp = 0;
1062  th->cfp->lfp = th->stack;
1063  *th->cfp->lfp = 0;
1064  th->cfp->dfp = th->stack;
1065  th->cfp->self = Qnil;
1066  th->cfp->flag = 0;
1067  th->cfp->iseq = 0;
1068  th->cfp->proc = 0;
1069  th->cfp->block_iseq = 0;
1070  th->cfp->me = 0;
1071  th->tag = 0;
1073 
1074  th->first_proc = proc;
1075 
1076 #if !FIBER_USE_NATIVE
1077  MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
1078 #endif
1079 
1080  return fibval;
1081 }
1082 
1083 /* :nodoc: */
1084 static VALUE
1086 {
1087  return fiber_init(fibval, rb_block_proc());
1088 }
1089 
1090 VALUE
1092 {
1094 }
1095 
1096 static VALUE
1098 {
1099  rb_fiber_t *fib;
1100  VALUE curr = rb_fiber_current();
1101  GetFiberPtr(curr, fib);
1102 
1103  if (fib->prev == Qnil) {
1104  rb_thread_t *th = GET_THREAD();
1105 
1106  if (th->root_fiber != curr) {
1107  return th->root_fiber;
1108  }
1109  else {
1110  rb_raise(rb_eFiberError, "can't yield from root fiber");
1111  }
1112  }
1113  else {
1114  VALUE prev = fib->prev;
1115  fib->prev = Qnil;
1116  return prev;
1117  }
1118 }
1119 
1121 
1122 static void
1124 {
1125  VALUE value = fib->cont.value;
1126  fib->status = TERMINATED;
1127 #if FIBER_USE_NATIVE && !defined(_WIN32)
1128  /* Ruby must not switch to other thread until storing terminated_machine_stack */
1129  terminated_machine_stack.ptr = fib->context.uc_stack.ss_sp;
1130  terminated_machine_stack.size = fib->context.uc_stack.ss_size / sizeof(VALUE);
1131  fib->context.uc_stack.ss_sp = NULL;
1132  fib->cont.machine_stack = NULL;
1133  fib->cont.machine_stack_size = 0;
1134 #endif
1135  rb_fiber_transfer(return_fiber(), 1, &value);
1136 }
1137 
1138 void
1140 {
1141  rb_thread_t *th = GET_THREAD();
1142  rb_fiber_t *fib;
1143  rb_context_t *cont;
1144  rb_proc_t *proc;
1145  int state;
1146 
1147  GetFiberPtr(th->fiber, fib);
1148  cont = &fib->cont;
1149 
1150  TH_PUSH_TAG(th);
1151  if ((state = EXEC_TAG()) == 0) {
1152  int argc;
1153  VALUE *argv, args;
1154  GetProcPtr(cont->saved_thread.first_proc, proc);
1155  args = cont->value;
1156  argv = (argc = cont->argc) > 1 ? RARRAY_PTR(args) : &args;
1157  cont->value = Qnil;
1158  th->errinfo = Qnil;
1159  th->local_lfp = proc->block.lfp;
1160  th->local_svar = Qnil;
1161 
1162  fib->status = RUNNING;
1163  cont->value = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, 0);
1164  }
1165  TH_POP_TAG();
1166 
1167  if (state) {
1168  if (state == TAG_RAISE) {
1169  th->thrown_errinfo = th->errinfo;
1170  }
1171  else {
1172  th->thrown_errinfo =
1174  }
1176  }
1177 
1178  rb_fiber_terminate(fib);
1179  rb_bug("rb_fiber_start: unreachable");
1180 }
1181 
1182 static rb_fiber_t *
1184 {
1185  rb_fiber_t *fib;
1186  /* no need to allocate vm stack */
1188  fib->cont.type = ROOT_FIBER_CONTEXT;
1189 #if FIBER_USE_NATIVE
1190 #ifdef _WIN32
1191  fib->fib_handle = ConvertThreadToFiber(0);
1192 #endif
1193 #endif
1194  fib->status = RUNNING;
1195  fib->prev_fiber = fib->next_fiber = fib;
1196 
1197  return fib;
1198 }
1199 
1200 VALUE
1202 {
1203  rb_thread_t *th = GET_THREAD();
1204  if (th->fiber == 0) {
1205  /* save root */
1206  rb_fiber_t *fib = root_fiber_alloc(th);
1207  th->root_fiber = th->fiber = fib->cont.self;
1208  }
1209  return th->fiber;
1210 }
1211 
1212 static VALUE
1214 {
1215  rb_thread_t *th = GET_THREAD();
1216  rb_fiber_t *fib;
1217 
1218  if (th->fiber) {
1219  GetFiberPtr(th->fiber, fib);
1220  cont_save_thread(&fib->cont, th);
1221  }
1222  else {
1223  /* create current fiber */
1224  fib = root_fiber_alloc(th);
1225  th->root_fiber = th->fiber = fib->cont.self;
1226  }
1227 
1228 #if !FIBER_USE_NATIVE
1229  cont_save_machine_stack(th, &fib->cont);
1230 
1231  if (ruby_setjmp(fib->cont.jmpbuf)) {
1232 #else /* FIBER_USE_NATIVE */
1233  {
1234  fiber_setcontext(next_fib, fib);
1235 #ifndef _WIN32
1236  if (terminated_machine_stack.ptr) {
1237  if (machine_stack_cache_index < MAX_MAHINE_STACK_CACHE) {
1238  machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
1239  machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
1240  machine_stack_cache_index++;
1241  }
1242  else {
1243  if (terminated_machine_stack.ptr != fib->cont.machine_stack) {
1244  munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
1245  }
1246  else {
1247  rb_bug("terminated fiber resumed");
1248  }
1249  }
1250  terminated_machine_stack.ptr = NULL;
1251  terminated_machine_stack.size = 0;
1252  }
1253 #endif
1254 #endif
1255  /* restored */
1256  GetFiberPtr(th->fiber, fib);
1257  if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
1258  return fib->cont.value;
1259  }
1260 #if !FIBER_USE_NATIVE
1261  else {
1262  return Qundef;
1263  }
1264 #endif
1265 }
1266 
1267 static inline VALUE
1268 fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
1269 {
1270  VALUE value;
1271  rb_fiber_t *fib;
1272  rb_context_t *cont;
1273  rb_thread_t *th = GET_THREAD();
1274 
1275  GetFiberPtr(fibval, fib);
1276  cont = &fib->cont;
1277 
1278  if (cont->saved_thread.self != th->self) {
1279  rb_raise(rb_eFiberError, "fiber called across threads");
1280  }
1281  else if (cont->saved_thread.protect_tag != th->protect_tag) {
1282  rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
1283  }
1284  else if (fib->status == TERMINATED) {
1285  value = rb_exc_new2(rb_eFiberError, "dead fiber called");
1286  if (th->fiber != fibval) {
1287  GetFiberPtr(th->fiber, fib);
1288  if (fib->status != TERMINATED) rb_exc_raise(value);
1289  fibval = th->root_fiber;
1290  }
1291  else {
1292  fibval = fib->prev;
1293  if (NIL_P(fibval)) fibval = th->root_fiber;
1294  }
1295  GetFiberPtr(fibval, fib);
1296  cont = &fib->cont;
1297  cont->argc = -1;
1298  cont->value = value;
1299 #if FIBER_USE_NATIVE
1300  {
1301  VALUE oldfibval;
1302  rb_fiber_t *oldfib;
1303  oldfibval = rb_fiber_current();
1304  GetFiberPtr(oldfibval, oldfib);
1305  fiber_setcontext(fib, oldfib);
1306  }
1307 #else
1308  cont_restore_0(cont, &value);
1309 #endif
1310  }
1311 
1312  if (is_resume) {
1313  fib->prev = rb_fiber_current();
1314  }
1315 
1316  cont->argc = argc;
1317  cont->value = make_passing_arg(argc, argv);
1318 
1319  value = fiber_store(fib);
1320 #if !FIBER_USE_NATIVE
1321  if (value == Qundef) {
1322  cont_restore_0(cont, &value);
1323  rb_bug("rb_fiber_resume: unreachable");
1324  }
1325 #endif
1327 
1328  return value;
1329 }
1330 
1331 VALUE
1333 {
1334  return fiber_switch(fib, argc, argv, 0);
1335 }
1336 
1337 VALUE
1339 {
1340  rb_fiber_t *fib;
1341  GetFiberPtr(fibval, fib);
1342 
1343  if (fib->prev != Qnil || fib->cont.type == ROOT_FIBER_CONTEXT) {
1344  rb_raise(rb_eFiberError, "double resume");
1345  }
1346 
1347  return fiber_switch(fibval, argc, argv, 1);
1348 }
1349 
1350 VALUE
1352 {
1353  return rb_fiber_transfer(return_fiber(), argc, argv);
1354 }
1355 
1356 void
1358 {
1359  rb_thread_t *th;
1360  rb_fiber_t *fib;
1361 
1362  GetThreadPtr(thval, th);
1363  if (th->root_fiber && th->root_fiber != th->fiber) {
1364  GetFiberPtr(th->root_fiber, fib);
1366  }
1367 }
1368 
1369 /*
1370  * call-seq:
1371  * fiber.alive? -> true or false
1372  *
1373  * Returns true if the fiber can still be resumed (or transferred
1374  * to). After finishing execution of the fiber block this method will
1375  * always return false. You need to <code>require 'fiber'</code>
1376  * before using this method.
1377  */
1378 VALUE
1380 {
1381  rb_fiber_t *fib;
1382  GetFiberPtr(fibval, fib);
1383  return fib->status != TERMINATED ? Qtrue : Qfalse;
1384 }
1385 
1386 /*
1387  * call-seq:
1388  * fiber.resume(args, ...) -> obj
1389  *
1390  * Resumes the fiber from the point at which the last <code>Fiber.yield</code>
1391  * was called, or starts running it if it is the first call to
1392  * <code>resume</code>. Arguments passed to resume will be the value of
1393  * the <code>Fiber.yield</code> expression or will be passed as block
1394  * parameters to the fiber's block if this is the first <code>resume</code>.
1395  *
1396  * Alternatively, when resume is called it evaluates to the arguments passed
1397  * to the next <code>Fiber.yield</code> statement inside the fiber's block
1398  * or to the block value if it runs to completion without any
1399  * <code>Fiber.yield</code>
1400  */
1401 static VALUE
1403 {
1404  return rb_fiber_resume(fib, argc, argv);
1405 }
1406 
1407 /*
1408  * call-seq:
1409  * fiber.transfer(args, ...) -> obj
1410  *
1411  * Transfer control to another fiber, resuming it from where it last
1412  * stopped or starting it if it was not resumed before. The calling
1413  * fiber will be suspended much like in a call to
1414  * <code>Fiber.yield</code>. You need to <code>require 'fiber'</code>
1415  * before using this method.
1416  *
1417  * The fiber which receives the transfer call is treats it much like
1418  * a resume call. Arguments passed to transfer are treated like those
1419  * passed to resume.
1420  *
1421  * You cannot resume a fiber that transferred control to another one.
1422  * This will cause a double resume error. You need to transfer control
1423  * back to this fiber before it can yield and resume.
1424  */
1425 static VALUE
1427 {
1428  return rb_fiber_transfer(fib, argc, argv);
1429 }
1430 
1431 /*
1432  * call-seq:
1433  * Fiber.yield(args, ...) -> obj
1434  *
1435  * Yields control back to the context that resumed the fiber, passing
1436  * along any arguments that were passed to it. The fiber will resume
1437  * processing at this point when <code>resume</code> is called next.
1438  * Any arguments passed to the next <code>resume</code> will be the
1439  * value that this <code>Fiber.yield</code> expression evaluates to.
1440  */
1441 static VALUE
1443 {
1444  return rb_fiber_yield(argc, argv);
1445 }
1446 
1447 /*
1448  * call-seq:
1449  * Fiber.current() -> fiber
1450  *
1451  * Returns the current fiber. You need to <code>require 'fiber'</code>
1452  * before using this method. If you are not running in the context of
1453  * a fiber this method will return the root fiber.
1454  */
1455 static VALUE
1457 {
1458  return rb_fiber_current();
1459 }
1460 
1461 
1462 
1463 /*
1464  * Document-class: FiberError
1465  *
1466  * Raised when an invalid operation is attempted on a Fiber, in
1467  * particular when attempting to call/resume a dead fiber,
1468  * attempting to yield from the root fiber, or calling a fiber across
1469  * threads.
1470  *
1471  * fiber = Fiber.new{}
1472  * fiber.resume #=> nil
1473  * fiber.resume #=> FiberError: dead fiber called
1474  */
1475 
1476 void
1478 {
1479 #if FIBER_USE_NATIVE
1480  rb_thread_t *th = GET_THREAD();
1481 
1482 #ifdef _WIN32
1483  SYSTEM_INFO info;
1484  GetSystemInfo(&info);
1485  pagesize = info.dwPageSize;
1486 #else /* not WIN32 */
1487  pagesize = sysconf(_SC_PAGESIZE);
1488 #endif
1490 #endif
1491 
1492  rb_cFiber = rb_define_class("Fiber", rb_cObject);
1496  rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
1498 }
1499 
1500 #if defined __GNUC__ && __GNUC__ >= 4
1501 #pragma GCC visibility push(default)
1502 #endif
1503 
1504 void
1506 {
1507  rb_cContinuation = rb_define_class("Continuation", rb_cObject);
1512  rb_define_global_function("callcc", rb_callcc, 0);
1513 }
1514 
1515 void
1517 {
1521 }
1522 
1523 #if defined __GNUC__ && __GNUC__ >= 4
1524 #pragma GCC visibility pop
1525 #endif
1526