Ruby  2.0.0p247(2013-06-27revision41674)
cont.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  cont.c -
4 
5  $Author: nobu $
6  created at: Thu May 23 09:03:43 2007
7 
8  Copyright (C) 2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #include "ruby/ruby.h"
13 #include "internal.h"
14 #include "vm_core.h"
15 #include "gc.h"
16 #include "eval_intern.h"
17 
18 #if ((defined(_WIN32) && _WIN32_WINNT >= 0x0400) || (defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT))) && !defined(__NetBSD__) && !defined(__sun) && !defined(__ia64) && !defined(FIBER_USE_NATIVE)
19 #define FIBER_USE_NATIVE 1
20 
21 /* FIBER_USE_NATIVE enables Fiber performance improvement using system
22  * dependent method such as make/setcontext on POSIX system or
23  * CreateFiber() API on Windows.
24  * This hack make Fiber context switch faster (x2 or more).
25  * However, it decrease maximum number of Fiber. For example, on the
26  * 32bit POSIX OS, ten or twenty thousands Fiber can be created.
27  *
28  * Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9"
29  * in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese).
30  */
31 
32 /* On our experience, NetBSD doesn't support using setcontext() and pthread
33  * simultaneously. This is because pthread_self(), TLS and other information
34  * are represented by stack pointer (higher bits of stack pointer).
35  * TODO: check such constraint on configure.
36  */
37 #elif !defined(FIBER_USE_NATIVE)
38 #define FIBER_USE_NATIVE 0
39 #endif
40 
41 #if FIBER_USE_NATIVE
42 #ifndef _WIN32
43 #include <unistd.h>
44 #include <sys/mman.h>
45 #include <ucontext.h>
46 #endif
47 #define RB_PAGE_SIZE (pagesize)
48 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
49 static long pagesize;
50 #endif /*FIBER_USE_NATIVE*/
51 
52 #define CAPTURE_JUST_VALID_VM_STACK 1
53 
58 };
59 
60 typedef struct rb_context_struct {
62  VALUE self;
63  int argc;
66 #ifdef CAPTURE_JUST_VALID_VM_STACK
67  size_t vm_stack_slen; /* length of stack (head of th->stack) */
68  size_t vm_stack_clen; /* length of control frames (tail of th->stack) */
69 #endif
72 #ifdef __ia64
73  VALUE *machine_register_stack;
74  VALUE *machine_register_stack_src;
75  int machine_register_stack_size;
76 #endif
80 } rb_context_t;
81 
86 };
87 
88 #if FIBER_USE_NATIVE && !defined(_WIN32)
89 #define MAX_MAHINE_STACK_CACHE 10
90 static int machine_stack_cache_index = 0;
91 typedef struct machine_stack_cache_struct {
92  void *ptr;
93  size_t size;
94 } machine_stack_cache_t;
95 static machine_stack_cache_t machine_stack_cache[MAX_MAHINE_STACK_CACHE];
96 static machine_stack_cache_t terminated_machine_stack;
97 #endif
98 
99 typedef struct rb_fiber_struct {
105  /* If a fiber invokes "transfer",
106  * then this fiber can't "resume" any more after that.
107  * You shouldn't mix "transfer" and "resume".
108  */
110 
111 #if FIBER_USE_NATIVE
112 #ifdef _WIN32
113  void *fib_handle;
114 #else
115  ucontext_t context;
116 #endif
117 #endif
118 } rb_fiber_t;
119 
124 
125 #define GetContPtr(obj, ptr) \
126  TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr))
127 
128 #define GetFiberPtr(obj, ptr) do {\
129  TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \
130  if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \
131 } while (0)
132 
133 NOINLINE(static VALUE cont_capture(volatile int *stat));
134 
135 #define THREAD_MUST_BE_RUNNING(th) do { \
136  if (!(th)->tag) rb_raise(rb_eThreadError, "not running thread"); \
137  } while (0)
138 
139 static void
141 {
142  RUBY_MARK_ENTER("cont");
143  if (ptr) {
144  rb_context_t *cont = ptr;
145  rb_gc_mark(cont->value);
148 
149  if (cont->vm_stack) {
150 #ifdef CAPTURE_JUST_VALID_VM_STACK
152  cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
153 #else
154  rb_gc_mark_localtion(cont->vm_stack,
155  cont->vm_stack, cont->saved_thread.stack_size);
156 #endif
157  }
158 
159  if (cont->machine_stack) {
160  if (cont->type == CONTINUATION_CONTEXT) {
161  /* cont */
163  cont->machine_stack + cont->machine_stack_size);
164  }
165  else {
166  /* fiber */
167  rb_thread_t *th;
168  rb_fiber_t *fib = (rb_fiber_t*)cont;
169  GetThreadPtr(cont->saved_thread.self, th);
170  if ((th->fiber != cont->self) && fib->status == RUNNING) {
172  cont->machine_stack + cont->machine_stack_size);
173  }
174  }
175  }
176 #ifdef __ia64
177  if (cont->machine_register_stack) {
178  rb_gc_mark_locations(cont->machine_register_stack,
179  cont->machine_register_stack + cont->machine_register_stack_size);
180  }
181 #endif
182  }
183  RUBY_MARK_LEAVE("cont");
184 }
185 
186 static void
188 {
189  RUBY_FREE_ENTER("cont");
190  if (ptr) {
191  rb_context_t *cont = ptr;
192  RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
193 #if FIBER_USE_NATIVE
194  if (cont->type == CONTINUATION_CONTEXT) {
195  /* cont */
197  }
198  else {
199  /* fiber */
200 #ifdef _WIN32
201  if (GET_THREAD()->fiber != cont->self && cont->type != ROOT_FIBER_CONTEXT) {
202  /* don't delete root fiber handle */
203  rb_fiber_t *fib = (rb_fiber_t*)cont;
204  if (fib->fib_handle) {
205  DeleteFiber(fib->fib_handle);
206  }
207  }
208 #else /* not WIN32 */
209  if (GET_THREAD()->fiber != cont->self) {
210  rb_fiber_t *fib = (rb_fiber_t*)cont;
211  if (fib->context.uc_stack.ss_sp) {
212  if (cont->type == ROOT_FIBER_CONTEXT) {
213  rb_bug("Illegal root fiber parameter");
214  }
215  munmap((void*)fib->context.uc_stack.ss_sp, fib->context.uc_stack.ss_size);
216  }
217  }
218  else {
219  /* It may reached here when finalize */
220  /* TODO examine whether it is a bug */
221  /* rb_bug("cont_free: release self"); */
222  }
223 #endif
224  }
225 #else /* not FIBER_USE_NATIVE */
227 #endif
228 #ifdef __ia64
229  RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
230 #endif
232 
233  /* free rb_cont_t or rb_fiber_t */
234  ruby_xfree(ptr);
235  }
236  RUBY_FREE_LEAVE("cont");
237 }
238 
239 static size_t
240 cont_memsize(const void *ptr)
241 {
242  const rb_context_t *cont = ptr;
243  size_t size = 0;
244  if (cont) {
245  size = sizeof(*cont);
246  if (cont->vm_stack) {
247 #ifdef CAPTURE_JUST_VALID_VM_STACK
248  size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
249 #else
250  size_t n = cont->saved_thread.stack_size;
251 #endif
252  size += n * sizeof(*cont->vm_stack);
253  }
254 
255  if (cont->machine_stack) {
256  size += cont->machine_stack_size * sizeof(*cont->machine_stack);
257  }
258 #ifdef __ia64
259  if (cont->machine_register_stack) {
260  size += cont->machine_register_stack_size * sizeof(*cont->machine_register_stack);
261  }
262 #endif
263  }
264  return size;
265 }
266 
267 static void
269 {
270  RUBY_MARK_ENTER("cont");
271  if (ptr) {
272  rb_fiber_t *fib = ptr;
273  rb_gc_mark(fib->prev);
274  cont_mark(&fib->cont);
275  }
276  RUBY_MARK_LEAVE("cont");
277 }
278 
279 static void
281 {
282  VALUE current_fibval = rb_fiber_current();
283  rb_fiber_t *current_fib;
284  GetFiberPtr(current_fibval, current_fib);
285 
286  /* join fiber link */
287  fib->next_fiber = current_fib->next_fiber;
288  fib->prev_fiber = current_fib;
289  current_fib->next_fiber->prev_fiber = fib;
290  current_fib->next_fiber = fib;
291 }
292 
293 static void
295 {
296  fib->prev_fiber->next_fiber = fib->next_fiber;
297  fib->next_fiber->prev_fiber = fib->prev_fiber;
298 }
299 
300 static void
302 {
303  RUBY_FREE_ENTER("fiber");
304  if (ptr) {
305  rb_fiber_t *fib = ptr;
306  if (fib->cont.type != ROOT_FIBER_CONTEXT &&
309  }
310  fiber_link_remove(fib);
311 
312  cont_free(&fib->cont);
313  }
314  RUBY_FREE_LEAVE("fiber");
315 }
316 
317 static size_t
318 fiber_memsize(const void *ptr)
319 {
320  const rb_fiber_t *fib = ptr;
321  size_t size = 0;
322  if (ptr) {
323  size = sizeof(*fib);
324  if (fib->cont.type != ROOT_FIBER_CONTEXT) {
326  }
327  size += cont_memsize(&fib->cont);
328  }
329  return size;
330 }
331 
332 VALUE
334 {
335  if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
336  return Qtrue;
337  }
338  else {
339  return Qfalse;
340  }
341 }
342 
343 static void
345 {
346  size_t size;
347 
349 #ifdef __ia64
350  th->machine_register_stack_end = rb_ia64_bsp();
351 #endif
352 
353  if (th->machine_stack_start > th->machine_stack_end) {
356  }
357  else {
360  }
361 
362  if (cont->machine_stack) {
363  REALLOC_N(cont->machine_stack, VALUE, size);
364  }
365  else {
366  cont->machine_stack = ALLOC_N(VALUE, size);
367  }
368 
370  MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
371 
372 #ifdef __ia64
373  rb_ia64_flushrs();
374  size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
375  cont->machine_register_stack_src = th->machine_register_stack_start;
376  if (cont->machine_register_stack) {
377  REALLOC_N(cont->machine_register_stack, VALUE, size);
378  }
379  else {
380  cont->machine_register_stack = ALLOC_N(VALUE, size);
381  }
382 
383  MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
384 #endif
385 }
386 
387 static const rb_data_type_t cont_data_type = {
388  "continuation",
390 };
391 
392 static void
394 {
395  /* save thread context */
396  cont->saved_thread = *th;
397  /* saved_thread->machine_stack_(start|end) should be NULL */
398  /* because it may happen GC afterward */
401 #ifdef __ia64
402  cont->saved_thread.machine_register_stack_start = 0;
403  cont->saved_thread.machine_register_stack_end = 0;
404 #endif
405 }
406 
407 static void
409 {
410  /* save thread context */
411  cont_save_thread(cont, th);
412  cont->saved_thread.local_storage = 0;
413 }
414 
415 static rb_context_t *
417 {
419  volatile VALUE contval;
421 
423  contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
424  cont->self = contval;
425  cont_init(cont, th);
426  return cont;
427 }
428 
429 static VALUE
430 cont_capture(volatile int *stat)
431 {
433  rb_thread_t *th = GET_THREAD(), *sth;
434  volatile VALUE contval;
435 
438  cont = cont_new(rb_cContinuation);
439  contval = cont->self;
440  sth = &cont->saved_thread;
441 
442 #ifdef CAPTURE_JUST_VALID_VM_STACK
443  cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
444  cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
445  cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
446  MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
447  MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
448 #else
449  cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
450  MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
451 #endif
452  sth->stack = 0;
453 
454  cont_save_machine_stack(th, cont);
455 
456  if (ruby_setjmp(cont->jmpbuf)) {
457  volatile VALUE value;
458 
459  value = cont->value;
460  if (cont->argc == -1) rb_exc_raise(value);
461  cont->value = Qnil;
462  *stat = 1;
463  return value;
464  }
465  else {
466  *stat = 0;
467  return contval;
468  }
469 }
470 
471 static void
473 {
474  rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
475 
476  /* restore thread context */
477  if (cont->type == CONTINUATION_CONTEXT) {
478  /* continuation */
479  VALUE fib;
480 
481  th->fiber = sth->fiber;
482  fib = th->fiber ? th->fiber : th->root_fiber;
483 
484  if (fib) {
485  rb_fiber_t *fcont;
486  GetFiberPtr(fib, fcont);
487  th->stack_size = fcont->cont.saved_thread.stack_size;
488  th->stack = fcont->cont.saved_thread.stack;
489  }
490 #ifdef CAPTURE_JUST_VALID_VM_STACK
491  MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
492  MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
493  cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
494 #else
495  MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
496 #endif
497  }
498  else {
499  /* fiber */
500  th->stack = sth->stack;
501  th->stack_size = sth->stack_size;
502  th->local_storage = sth->local_storage;
503  th->fiber = cont->self;
504  }
505 
506  th->cfp = sth->cfp;
507  th->safe_level = sth->safe_level;
508  th->raised_flag = sth->raised_flag;
509  th->state = sth->state;
510  th->status = sth->status;
511  th->tag = sth->tag;
512  th->protect_tag = sth->protect_tag;
513  th->errinfo = sth->errinfo;
514  th->first_proc = sth->first_proc;
515  th->root_lep = sth->root_lep;
516  th->root_svar = sth->root_svar;
517 }
518 
519 #if FIBER_USE_NATIVE
520 #ifdef _WIN32
521 static void
522 fiber_set_stack_location(void)
523 {
525  VALUE *ptr;
526 
527  SET_MACHINE_STACK_END(&ptr);
528  th->machine_stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
529 }
530 
531 static VOID CALLBACK
532 fiber_entry(void *arg)
533 {
534  fiber_set_stack_location();
535  rb_fiber_start();
536 }
537 #else /* _WIN32 */
538 
539 /*
540  * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
541  * if MAP_STACK is passed.
542  * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
543  */
544 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
545 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
546 #else
547 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
548 #endif
549 
550 static char*
551 fiber_machine_stack_alloc(size_t size)
552 {
553  char *ptr;
554 
555  if (machine_stack_cache_index > 0) {
556  if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
557  ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
558  machine_stack_cache_index--;
559  machine_stack_cache[machine_stack_cache_index].ptr = NULL;
560  machine_stack_cache[machine_stack_cache_index].size = 0;
561  }
562  else{
563  /* TODO handle multiple machine stack size */
564  rb_bug("machine_stack_cache size is not canonicalized");
565  }
566  }
567  else {
568  void *page;
570 
571  ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
572  if (ptr == MAP_FAILED) {
573  rb_raise(rb_eFiberError, "can't alloc machine stack to fiber");
574  }
575 
576  /* guard page setup */
577  page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
578  if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
579  rb_raise(rb_eFiberError, "mprotect failed");
580  }
581  }
582 
583  return ptr;
584 }
585 #endif
586 
587 static void
588 fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
589 {
590  rb_thread_t *sth = &fib->cont.saved_thread;
591 
592 #ifdef _WIN32
593  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
594  if (!fib->fib_handle) {
595  /* try to release unnecessary fibers & retry to create */
596  rb_gc();
597  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
598  if (!fib->fib_handle) {
599  rb_raise(rb_eFiberError, "can't create fiber");
600  }
601  }
603 #else /* not WIN32 */
604  ucontext_t *context = &fib->context;
605  char *ptr;
607 
608  getcontext(context);
609  ptr = fiber_machine_stack_alloc(size);
610  context->uc_link = NULL;
611  context->uc_stack.ss_sp = ptr;
612  context->uc_stack.ss_size = size;
613  makecontext(context, rb_fiber_start, 0);
614  sth->machine_stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
615  sth->machine_stack_maxsize = size - RB_PAGE_SIZE;
616 #endif
617 #ifdef __ia64
618  sth->machine_register_stack_maxsize = sth->machine_stack_maxsize;
619 #endif
620 }
621 
622 NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
623 
624 static void
625 fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
626 {
627  rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread;
628 
629  if (newfib->status != RUNNING) {
630  fiber_initialize_machine_stack_context(newfib, th->vm->default_params.fiber_machine_stack_size);
631  }
632 
633  /* restore thread context */
634  cont_restore_thread(&newfib->cont);
636  if (sth->machine_stack_end && (newfib != oldfib)) {
637  rb_bug("fiber_setcontext: sth->machine_stack_end has non zero value");
638  }
639 
640  /* save oldfib's machine stack */
641  if (oldfib->status != TERMINATED) {
644  if (STACK_DIR_UPPER(0, 1)) {
646  oldfib->cont.machine_stack = th->machine_stack_end;
647  }
648  else {
650  oldfib->cont.machine_stack = th->machine_stack_start;
651  }
652  }
653  /* exchange machine_stack_start between oldfib and newfib */
656  /* oldfib->machine_stack_end should be NULL */
657  oldfib->cont.saved_thread.machine_stack_end = 0;
658 #ifndef _WIN32
659  if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib->cont.self) {
660  rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
661  }
662 #endif
663 
664  /* swap machine context */
665 #ifdef _WIN32
666  SwitchToFiber(newfib->fib_handle);
667 #else
668  swapcontext(&oldfib->context, &newfib->context);
669 #endif
670 }
671 #endif
672 
674 
675 static void
677 {
678  cont_restore_thread(cont);
679 
680  /* restore machine stack */
681 #ifdef _M_AMD64
682  {
683  /* workaround for x64 SEH */
684  jmp_buf buf;
685  setjmp(buf);
686  ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
687  ((_JUMP_BUFFER*)(&buf))->Frame;
688  }
689 #endif
690  if (cont->machine_stack_src) {
693  VALUE, cont->machine_stack_size);
694  }
695 
696 #ifdef __ia64
697  if (cont->machine_register_stack_src) {
698  MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
699  VALUE, cont->machine_register_stack_size);
700  }
701 #endif
702 
703  ruby_longjmp(cont->jmpbuf, 1);
704 }
705 
707 
708 #ifdef __ia64
709 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
710 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
711 static volatile int C(a), C(b), C(c), C(d), C(e);
712 static volatile int C(f), C(g), C(h), C(i), C(j);
713 static volatile int C(k), C(l), C(m), C(n), C(o);
714 static volatile int C(p), C(q), C(r), C(s), C(t);
715 #if 0
716 {/* the above lines make cc-mode.el confused so much */}
717 #endif
718 int rb_dummy_false = 0;
719 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
720 static void
721 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
722 {
723  if (rb_dummy_false) {
724  /* use registers as much as possible */
725  E(a) = E(b) = E(c) = E(d) = E(e) =
726  E(f) = E(g) = E(h) = E(i) = E(j) =
727  E(k) = E(l) = E(m) = E(n) = E(o) =
728  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
729  E(a) = E(b) = E(c) = E(d) = E(e) =
730  E(f) = E(g) = E(h) = E(i) = E(j) =
731  E(k) = E(l) = E(m) = E(n) = E(o) =
732  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
733  }
734  if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
735  register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
736  }
737  cont_restore_0(cont, vp);
738 }
739 #undef C
740 #undef E
741 #endif
742 
743 static void
744 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
745 {
746  if (cont->machine_stack_src) {
747 #ifdef HAVE_ALLOCA
748 #define STACK_PAD_SIZE 1
749 #else
750 #define STACK_PAD_SIZE 1024
751 #endif
752  VALUE space[STACK_PAD_SIZE];
753 
754 #if !STACK_GROW_DIRECTION
755  if (addr_in_prev_frame > &space[0]) {
756  /* Stack grows downward */
757 #endif
758 #if STACK_GROW_DIRECTION <= 0
759  volatile VALUE *const end = cont->machine_stack_src;
760  if (&space[0] > end) {
761 # ifdef HAVE_ALLOCA
762  volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
763  space[0] = *sp;
764 # else
765  cont_restore_0(cont, &space[0]);
766 # endif
767  }
768 #endif
769 #if !STACK_GROW_DIRECTION
770  }
771  else {
772  /* Stack grows upward */
773 #endif
774 #if STACK_GROW_DIRECTION >= 0
775  volatile VALUE *const end = cont->machine_stack_src + cont->machine_stack_size;
776  if (&space[STACK_PAD_SIZE] < end) {
777 # ifdef HAVE_ALLOCA
778  volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
779  space[0] = *sp;
780 # else
781  cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
782 # endif
783  }
784 #endif
785 #if !STACK_GROW_DIRECTION
786  }
787 #endif
788  }
789  cont_restore_1(cont);
790 }
791 #ifdef __ia64
792 #define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp())
793 #endif
794 
795 /*
796  * Document-class: Continuation
797  *
798  * Continuation objects are generated by Kernel#callcc,
799  * after having +require+d <i>continuation</i>. They hold
800  * a return address and execution context, allowing a nonlocal return
801  * to the end of the <code>callcc</code> block from anywhere within a
802  * program. Continuations are somewhat analogous to a structured
803  * version of C's <code>setjmp/longjmp</code> (although they contain
804  * more state, so you might consider them closer to threads).
805  *
806  * For instance:
807  *
808  * require "continuation"
809  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
810  * callcc{|cc| $cc = cc}
811  * puts(message = arr.shift)
812  * $cc.call unless message =~ /Max/
813  *
814  * <em>produces:</em>
815  *
816  * Freddie
817  * Herbie
818  * Ron
819  * Max
820  *
821  * This (somewhat contrived) example allows the inner loop to abandon
822  * processing early:
823  *
824  * require "continuation"
825  * callcc {|cont|
826  * for i in 0..4
827  * print "\n#{i}: "
828  * for j in i*5...(i+1)*5
829  * cont.call() if j == 17
830  * printf "%3d", j
831  * end
832  * end
833  * }
834  * puts
835  *
836  * <em>produces:</em>
837  *
838  * 0: 0 1 2 3 4
839  * 1: 5 6 7 8 9
840  * 2: 10 11 12 13 14
841  * 3: 15 16
842  */
843 
844 /*
845  * call-seq:
846  * callcc {|cont| block } -> obj
847  *
848  * Generates a Continuation object, which it passes to
849  * the associated block. You need to <code>require
850  * 'continuation'</code> before using this method. Performing a
851  * <em>cont</em><code>.call</code> will cause the #callcc
852  * to return (as will falling through the end of the block). The
853  * value returned by the #callcc is the value of the
854  * block, or the value passed to <em>cont</em><code>.call</code>. See
855  * class Continuation for more details. Also see
856  * Kernel#throw for an alternative mechanism for
857  * unwinding a call stack.
858  */
859 
860 static VALUE
862 {
863  volatile int called;
864  volatile VALUE val = cont_capture(&called);
865 
866  if (called) {
867  return val;
868  }
869  else {
870  return rb_yield(val);
871  }
872 }
873 
874 static VALUE
876 {
877  switch (argc) {
878  case 0:
879  return Qnil;
880  case 1:
881  return argv[0];
882  default:
883  return rb_ary_new4(argc, argv);
884  }
885 }
886 
887 /*
888  * call-seq:
889  * cont.call(args, ...)
890  * cont[args, ...]
891  *
892  * Invokes the continuation. The program continues from the end of the
893  * <code>callcc</code> block. If no arguments are given, the original
894  * <code>callcc</code> returns <code>nil</code>. If one argument is
895  * given, <code>callcc</code> returns it. Otherwise, an array
896  * containing <i>args</i> is returned.
897  *
898  * callcc {|cont| cont.call } #=> nil
899  * callcc {|cont| cont.call 1 } #=> 1
900  * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
901  */
902 
903 static VALUE
905 {
907  rb_thread_t *th = GET_THREAD();
908  GetContPtr(contval, cont);
909 
910  if (cont->saved_thread.self != th->self) {
911  rb_raise(rb_eRuntimeError, "continuation called across threads");
912  }
913  if (cont->saved_thread.protect_tag != th->protect_tag) {
914  rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
915  }
916  if (cont->saved_thread.fiber) {
917  rb_fiber_t *fcont;
918  GetFiberPtr(cont->saved_thread.fiber, fcont);
919 
920  if (th->fiber != cont->saved_thread.fiber) {
921  rb_raise(rb_eRuntimeError, "continuation called across fiber");
922  }
923  }
924 
925  cont->argc = argc;
926  cont->value = make_passing_arg(argc, argv);
927 
928  /* restore `tracing' context. see [Feature #4347] */
929  th->trace_arg = cont->saved_thread.trace_arg;
930 
931  cont_restore_0(cont, &contval);
932  return Qnil; /* unreachable */
933 }
934 
935 /*********/
936 /* fiber */
937 /*********/
938 
939 /*
940  * Document-class: Fiber
941  *
942  * Fibers are primitives for implementing light weight cooperative
943  * concurrency in Ruby. Basically they are a means of creating code blocks
944  * that can be paused and resumed, much like threads. The main difference
945  * is that they are never preempted and that the scheduling must be done by
946  * the programmer and not the VM.
947  *
948  * As opposed to other stackless light weight concurrency models, each fiber
949  * comes with a small 4KB stack. This enables the fiber to be paused from deeply
950  * nested function calls within the fiber block.
951  *
952  * When a fiber is created it will not run automatically. Rather it must be
953  * be explicitly asked to run using the <code>Fiber#resume</code> method.
954  * The code running inside the fiber can give up control by calling
955  * <code>Fiber.yield</code> in which case it yields control back to caller
956  * (the caller of the <code>Fiber#resume</code>).
957  *
958  * Upon yielding or termination the Fiber returns the value of the last
959  * executed expression
960  *
961  * For instance:
962  *
963  * fiber = Fiber.new do
964  * Fiber.yield 1
965  * 2
966  * end
967  *
968  * puts fiber.resume
969  * puts fiber.resume
970  * puts fiber.resume
971  *
972  * <em>produces</em>
973  *
974  * 1
975  * 2
976  * FiberError: dead fiber called
977  *
978  * The <code>Fiber#resume</code> method accepts an arbitrary number of
979  * parameters, if it is the first call to <code>resume</code> then they
980  * will be passed as block arguments. Otherwise they will be the return
981  * value of the call to <code>Fiber.yield</code>
982  *
983  * Example:
984  *
985  * fiber = Fiber.new do |first|
986  * second = Fiber.yield first + 2
987  * end
988  *
989  * puts fiber.resume 10
990  * puts fiber.resume 14
991  * puts fiber.resume 18
992  *
993  * <em>produces</em>
994  *
995  * 12
996  * 14
997  * FiberError: dead fiber called
998  *
999  */
1000 
1001 static const rb_data_type_t fiber_data_type = {
1002  "fiber",
1004 };
1005 
1006 static VALUE
1008 {
1009  return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1010 }
1011 
1012 static rb_fiber_t*
1014 {
1015  rb_fiber_t *fib;
1016  rb_thread_t *th = GET_THREAD();
1017 
1018  if (DATA_PTR(fibval) != 0) {
1019  rb_raise(rb_eRuntimeError, "cannot initialize twice");
1020  }
1021 
1023  fib = ALLOC(rb_fiber_t);
1024  memset(fib, 0, sizeof(rb_fiber_t));
1025  fib->cont.self = fibval;
1026  fib->cont.type = FIBER_CONTEXT;
1027  cont_init(&fib->cont, th);
1028  fib->prev = Qnil;
1029  fib->status = CREATED;
1030 
1031  DATA_PTR(fibval) = fib;
1032 
1033  return fib;
1034 }
1035 
1036 static VALUE
1038 {
1039  rb_fiber_t *fib = fiber_t_alloc(fibval);
1040  rb_context_t *cont = &fib->cont;
1041  rb_thread_t *th = &cont->saved_thread;
1042 
1043  /* initialize cont */
1044  cont->vm_stack = 0;
1045 
1046  th->stack = 0;
1047  th->stack_size = 0;
1048 
1049  fiber_link_join(fib);
1050 
1051  th->stack_size = th->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
1052  th->stack = ALLOC_N(VALUE, th->stack_size);
1053 
1054  th->cfp = (void *)(th->stack + th->stack_size);
1055  th->cfp--;
1056  th->cfp->pc = 0;
1057  th->cfp->sp = th->stack + 1;
1058 #if VM_DEBUG_BP_CHECK
1059  th->cfp->bp_check = 0;
1060 #endif
1061  th->cfp->ep = th->stack;
1062  *th->cfp->ep = VM_ENVVAL_BLOCK_PTR(0);
1063  th->cfp->self = Qnil;
1064  th->cfp->klass = Qnil;
1065  th->cfp->flag = 0;
1066  th->cfp->iseq = 0;
1067  th->cfp->proc = 0;
1068  th->cfp->block_iseq = 0;
1069  th->cfp->me = 0;
1070  th->tag = 0;
1072 
1073  th->first_proc = proc;
1074 
1075 #if !FIBER_USE_NATIVE
1076  MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
1077 #endif
1078 
1079  return fibval;
1080 }
1081 
1082 /* :nodoc: */
1083 static VALUE
1085 {
1086  return fiber_init(fibval, rb_block_proc());
1087 }
1088 
1089 VALUE
1091 {
1093 }
1094 
1095 static VALUE
1097 {
1098  rb_fiber_t *fib;
1099  VALUE curr = rb_fiber_current();
1100  VALUE prev;
1101  GetFiberPtr(curr, fib);
1102 
1103  prev = fib->prev;
1104  if (NIL_P(prev)) {
1105  const VALUE root_fiber = GET_THREAD()->root_fiber;
1106 
1107  if (root_fiber == curr) {
1108  rb_raise(rb_eFiberError, "can't yield from root fiber");
1109  }
1110  return root_fiber;
1111  }
1112  else {
1113  fib->prev = Qnil;
1114  return prev;
1115  }
1116 }
1117 
1119 
1120 static void
1122 {
1123  VALUE value = fib->cont.value;
1124  fib->status = TERMINATED;
1125 #if FIBER_USE_NATIVE && !defined(_WIN32)
1126  /* Ruby must not switch to other thread until storing terminated_machine_stack */
1127  terminated_machine_stack.ptr = fib->context.uc_stack.ss_sp;
1128  terminated_machine_stack.size = fib->context.uc_stack.ss_size / sizeof(VALUE);
1129  fib->context.uc_stack.ss_sp = NULL;
1130  fib->cont.machine_stack = NULL;
1131  fib->cont.machine_stack_size = 0;
1132 #endif
1133  rb_fiber_transfer(return_fiber(), 1, &value);
1134 }
1135 
1136 void
1138 {
1139  rb_thread_t *th = GET_THREAD();
1140  rb_fiber_t *fib;
1141  rb_context_t *cont;
1142  rb_proc_t *proc;
1143  int state;
1144 
1145  GetFiberPtr(th->fiber, fib);
1146  cont = &fib->cont;
1147 
1148  TH_PUSH_TAG(th);
1149  if ((state = EXEC_TAG()) == 0) {
1150  int argc;
1151  VALUE *argv, args;
1152  GetProcPtr(cont->saved_thread.first_proc, proc);
1153  args = cont->value;
1154  argv = (argc = cont->argc) > 1 ? RARRAY_PTR(args) : &args;
1155  cont->value = Qnil;
1156  th->errinfo = Qnil;
1157  th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
1158  th->root_svar = Qnil;
1159 
1160  fib->status = RUNNING;
1161  cont->value = rb_vm_invoke_proc(th, proc, argc, argv, 0);
1162  }
1163  TH_POP_TAG();
1164 
1165  if (state) {
1166  if (state == TAG_RAISE || state == TAG_FATAL) {
1168  }
1169  else {
1171  if (!NIL_P(err))
1173  }
1175  }
1176 
1177  rb_fiber_terminate(fib);
1178  rb_bug("rb_fiber_start: unreachable");
1179 }
1180 
1181 static rb_fiber_t *
1183 {
1184  rb_fiber_t *fib;
1185  /* no need to allocate vm stack */
1187  fib->cont.type = ROOT_FIBER_CONTEXT;
1188 #if FIBER_USE_NATIVE
1189 #ifdef _WIN32
1190  fib->fib_handle = ConvertThreadToFiber(0);
1191 #endif
1192 #endif
1193  fib->status = RUNNING;
1194  fib->prev_fiber = fib->next_fiber = fib;
1195 
1196  return fib;
1197 }
1198 
1199 VALUE
1201 {
1202  rb_thread_t *th = GET_THREAD();
1203  if (th->fiber == 0) {
1204  /* save root */
1205  rb_fiber_t *fib = root_fiber_alloc(th);
1206  th->root_fiber = th->fiber = fib->cont.self;
1207  }
1208  return th->fiber;
1209 }
1210 
1211 static VALUE
1213 {
1214  rb_thread_t *th = GET_THREAD();
1215  rb_fiber_t *fib;
1216 
1217  if (th->fiber) {
1218  GetFiberPtr(th->fiber, fib);
1219  cont_save_thread(&fib->cont, th);
1220  }
1221  else {
1222  /* create current fiber */
1223  fib = root_fiber_alloc(th);
1224  th->root_fiber = th->fiber = fib->cont.self;
1225  }
1226 
1227 #if !FIBER_USE_NATIVE
1228  cont_save_machine_stack(th, &fib->cont);
1229 #endif
1230 
1231  if (FIBER_USE_NATIVE || ruby_setjmp(fib->cont.jmpbuf)) {
1232 #if FIBER_USE_NATIVE
1233  fiber_setcontext(next_fib, fib);
1234 #ifndef _WIN32
1235  if (terminated_machine_stack.ptr) {
1236  if (machine_stack_cache_index < MAX_MAHINE_STACK_CACHE) {
1237  machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
1238  machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
1239  machine_stack_cache_index++;
1240  }
1241  else {
1242  if (terminated_machine_stack.ptr != fib->cont.machine_stack) {
1243  munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
1244  }
1245  else {
1246  rb_bug("terminated fiber resumed");
1247  }
1248  }
1249  terminated_machine_stack.ptr = NULL;
1250  terminated_machine_stack.size = 0;
1251  }
1252 #endif
1253 #endif
1254  /* restored */
1255  GetFiberPtr(th->fiber, fib);
1256  if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
1257  return fib->cont.value;
1258  }
1259 #if !FIBER_USE_NATIVE
1260  else {
1261  return Qundef;
1262  }
1263 #endif
1264 }
1265 
1266 static inline VALUE
1267 fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
1268 {
1269  VALUE value;
1270  rb_fiber_t *fib;
1271  rb_context_t *cont;
1272  rb_thread_t *th = GET_THREAD();
1273 
1274  GetFiberPtr(fibval, fib);
1275  cont = &fib->cont;
1276 
1277  if (th->fiber == fibval) {
1278  /* ignore fiber context switch
1279  * because destination fiber is same as current fiber
1280  */
1281  return make_passing_arg(argc, argv);
1282  }
1283 
1284  if (cont->saved_thread.self != th->self) {
1285  rb_raise(rb_eFiberError, "fiber called across threads");
1286  }
1287  else if (cont->saved_thread.protect_tag != th->protect_tag) {
1288  rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
1289  }
1290  else if (fib->status == TERMINATED) {
1291  value = rb_exc_new2(rb_eFiberError, "dead fiber called");
1292  if (th->fiber != fibval) {
1293  GetFiberPtr(th->fiber, fib);
1294  if (fib->status != TERMINATED) rb_exc_raise(value);
1295  fibval = th->root_fiber;
1296  }
1297  else {
1298  fibval = fib->prev;
1299  if (NIL_P(fibval)) fibval = th->root_fiber;
1300  }
1301  GetFiberPtr(fibval, fib);
1302  cont = &fib->cont;
1303  cont->argc = -1;
1304  cont->value = value;
1305 #if FIBER_USE_NATIVE
1306  {
1307  VALUE oldfibval;
1308  rb_fiber_t *oldfib;
1309  oldfibval = rb_fiber_current();
1310  GetFiberPtr(oldfibval, oldfib);
1311  fiber_setcontext(fib, oldfib);
1312  }
1313 #else
1314  cont_restore_0(cont, &value);
1315 #endif
1316  }
1317 
1318  if (is_resume) {
1319  fib->prev = rb_fiber_current();
1320  }
1321  else {
1322  /* restore `tracing' context. see [Feature #4347] */
1323  th->trace_arg = cont->saved_thread.trace_arg;
1324  }
1325 
1326  cont->argc = argc;
1327  cont->value = make_passing_arg(argc, argv);
1328 
1329  value = fiber_store(fib);
1330 #if !FIBER_USE_NATIVE
1331  if (value == Qundef) {
1332  cont_restore_0(cont, &value);
1333  rb_bug("rb_fiber_resume: unreachable");
1334  }
1335 #endif
1336  RUBY_VM_CHECK_INTS(th);
1337 
1338  return value;
1339 }
1340 
1341 VALUE
1343 {
1344  return fiber_switch(fib, argc, argv, 0);
1345 }
1346 
1347 VALUE
1349 {
1350  rb_fiber_t *fib;
1351  GetFiberPtr(fibval, fib);
1352 
1353  if (fib->prev != Qnil || fib->cont.type == ROOT_FIBER_CONTEXT) {
1354  rb_raise(rb_eFiberError, "double resume");
1355  }
1356  if (fib->transfered != 0) {
1357  rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
1358  }
1359 
1360  return fiber_switch(fibval, argc, argv, 1);
1361 }
1362 
1363 VALUE
1365 {
1366  return rb_fiber_transfer(return_fiber(), argc, argv);
1367 }
1368 
1369 void
1371 {
1372  rb_thread_t *th;
1373  rb_fiber_t *fib;
1374 
1375  GetThreadPtr(thval, th);
1376  if (th->root_fiber && th->root_fiber != th->fiber) {
1377  GetFiberPtr(th->root_fiber, fib);
1379  }
1380 }
1381 
1382 /*
1383  * call-seq:
1384  * fiber.alive? -> true or false
1385  *
1386  * Returns true if the fiber can still be resumed (or transferred
1387  * to). After finishing execution of the fiber block this method will
1388  * always return false. You need to <code>require 'fiber'</code>
1389  * before using this method.
1390  */
1391 VALUE
1393 {
1394  rb_fiber_t *fib;
1395  GetFiberPtr(fibval, fib);
1396  return fib->status != TERMINATED ? Qtrue : Qfalse;
1397 }
1398 
1399 /*
1400  * call-seq:
1401  * fiber.resume(args, ...) -> obj
1402  *
1403  * Resumes the fiber from the point at which the last <code>Fiber.yield</code>
1404  * was called, or starts running it if it is the first call to
1405  * <code>resume</code>. Arguments passed to resume will be the value of
1406  * the <code>Fiber.yield</code> expression or will be passed as block
1407  * parameters to the fiber's block if this is the first <code>resume</code>.
1408  *
1409  * Alternatively, when resume is called it evaluates to the arguments passed
1410  * to the next <code>Fiber.yield</code> statement inside the fiber's block
1411  * or to the block value if it runs to completion without any
1412  * <code>Fiber.yield</code>
1413  */
1414 static VALUE
1416 {
1417  return rb_fiber_resume(fib, argc, argv);
1418 }
1419 
1420 /*
1421  * call-seq:
1422  * fiber.transfer(args, ...) -> obj
1423  *
1424  * Transfer control to another fiber, resuming it from where it last
1425  * stopped or starting it if it was not resumed before. The calling
1426  * fiber will be suspended much like in a call to
1427  * <code>Fiber.yield</code>. You need to <code>require 'fiber'</code>
1428  * before using this method.
1429  *
1430  * The fiber which receives the transfer call is treats it much like
1431  * a resume call. Arguments passed to transfer are treated like those
1432  * passed to resume.
1433  *
1434  * You cannot resume a fiber that transferred control to another one.
1435  * This will cause a double resume error. You need to transfer control
1436  * back to this fiber before it can yield and resume.
1437  *
1438  * Example:
1439  *
1440  * fiber1 = Fiber.new do
1441  * puts "In Fiber 1"
1442  * Fiber.yield
1443  * end
1444  *
1445  * fiber2 = Fiber.new do
1446  * puts "In Fiber 2"
1447  * fiber1.transfer
1448  * puts "Never see this message"
1449  * end
1450  *
1451  * fiber3 = Fiber.new do
1452  * puts "In Fiber 3"
1453  * end
1454  *
1455  * fiber2.resume
1456  * fiber3.resume
1457  *
1458  * <em>produces</em>
1459  *
1460  * In fiber 2
1461  * In fiber 1
1462  * In fiber 3
1463  *
1464  */
1465 static VALUE
1467 {
1468  rb_fiber_t *fib;
1469  GetFiberPtr(fibval, fib);
1470  fib->transfered = 1;
1471  return rb_fiber_transfer(fibval, argc, argv);
1472 }
1473 
1474 /*
1475  * call-seq:
1476  * Fiber.yield(args, ...) -> obj
1477  *
1478  * Yields control back to the context that resumed the fiber, passing
1479  * along any arguments that were passed to it. The fiber will resume
1480  * processing at this point when <code>resume</code> is called next.
1481  * Any arguments passed to the next <code>resume</code> will be the
1482  * value that this <code>Fiber.yield</code> expression evaluates to.
1483  */
1484 static VALUE
1486 {
1487  return rb_fiber_yield(argc, argv);
1488 }
1489 
1490 /*
1491  * call-seq:
1492  * Fiber.current() -> fiber
1493  *
1494  * Returns the current fiber. You need to <code>require 'fiber'</code>
1495  * before using this method. If you are not running in the context of
1496  * a fiber this method will return the root fiber.
1497  */
1498 static VALUE
1500 {
1501  return rb_fiber_current();
1502 }
1503 
1504 
1505 
1506 /*
1507  * Document-class: FiberError
1508  *
1509  * Raised when an invalid operation is attempted on a Fiber, in
1510  * particular when attempting to call/resume a dead fiber,
1511  * attempting to yield from the root fiber, or calling a fiber across
1512  * threads.
1513  *
1514  * fiber = Fiber.new{}
1515  * fiber.resume #=> nil
1516  * fiber.resume #=> FiberError: dead fiber called
1517  */
1518 
1519 void
1521 {
1522 #if FIBER_USE_NATIVE
1523  rb_thread_t *th = GET_THREAD();
1524 
1525 #ifdef _WIN32
1526  SYSTEM_INFO info;
1527  GetSystemInfo(&info);
1528  pagesize = info.dwPageSize;
1529 #else /* not WIN32 */
1530  pagesize = sysconf(_SC_PAGESIZE);
1531 #endif
1533 #endif
1534 
1535  rb_cFiber = rb_define_class("Fiber", rb_cObject);
1539  rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
1541 }
1542 
1543 #if defined __GNUC__ && __GNUC__ >= 4
1544 #pragma GCC visibility push(default)
1545 #endif
1546 
1547 void
1549 {
1550  rb_cContinuation = rb_define_class("Continuation", rb_cObject);
1555  rb_define_global_function("callcc", rb_callcc, 0);
1556 }
1557 
1558 void
1560 {
1564 }
1565 
1566 #if defined __GNUC__ && __GNUC__ >= 4
1567 #pragma GCC visibility pop
1568 #endif
RARRAY_PTR(q->result)[0]
rb_control_frame_t * cfp
Definition: vm_core.h:500
#define ALLOC(type)
size_t machine_stack_size
Definition: cont.c:79
VALUE * vm_stack
Definition: cont.c:65
VALUE rb_eStandardError
Definition: error.c:509
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:948
rb_vm_t * vm
Definition: vm_core.h:495
ssize_t n
Definition: bigdecimal.c:5655
VALUE rb_ary_new4(long n, const VALUE *elts)
Definition: array.c:451
#define THREAD_MUST_BE_RUNNING(th)
Definition: cont.c:135
#define GetContPtr(obj, ptr)
Definition: cont.c:125
void rb_bug(const char *fmt,...)
Definition: error.c:290
VALUE * root_lep
Definition: vm_core.h:526
#define ruby_longjmp(env, val)
Definition: eval_intern.h:51
struct rb_vm_protect_tag * protect_tag
Definition: vm_core.h:562
static VALUE VALUE th
Definition: tcltklib.c:2948
#define rb_gc_mark_locations(start, end)
Definition: gc.c:2346
static VALUE rb_cContinuation
Definition: cont.c:121
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:916
st_table * local_storage
Definition: vm_core.h:579
Win32OLEIDispatch * p
Definition: win32ole.c:786
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1493
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:665
VALUE self
Definition: cont.c:62
st_table * st_init_numtable(void)
Definition: st.c:272
VALUE proc
Definition: tcltklib.c:2959
static VALUE cont_capture(volatile int *stat)
Definition: cont.c:430
SSL_METHOD *(* func)(void)
Definition: ossl_ssl.c:108
ssize_t i
Definition: bigdecimal.c:5655
void rb_fiber_reset_root_local_storage(VALUE thval)
Definition: cont.c:1370
Tcl_CmdInfo * info
Definition: tcltklib.c:1463
VALUE prev
Definition: cont.c:101
static VALUE rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
Definition: cont.c:1415
static const rb_data_type_t cont_data_type
Definition: cont.c:120
#define GetFiberPtr(obj, ptr)
Definition: cont.c:128
Real * a
Definition: bigdecimal.c:1182
static rb_fiber_t * root_fiber_alloc(rb_thread_t *th)
Definition: cont.c:1182
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
enum context_type type
Definition: cont.c:61
void rb_thread_mark(void *th)
Definition: vm.c:1788
static void rb_fiber_terminate(rb_fiber_t *fib)
Definition: cont.c:1121
VALUE rb_fiber_yield(int argc, VALUE *argv)
Definition: cont.c:1364
size_t fiber_machine_stack_size
Definition: vm_core.h:403
static VALUE make_passing_arg(int argc, VALUE *argv)
Definition: cont.c:875
#define CLASS_OF(v)
NIL_P(eventloop_thread)
Definition: tcltklib.c:4068
int transfered
Definition: cont.c:109
#define VM_ENVVAL_BLOCK_PTR(v)
Definition: vm_core.h:775
#define STACK_UPPER(x, a, b)
Definition: gc.h:74
VALUE rb_fiber_alive_p(VALUE fibval)
Definition: cont.c:1392
static VALUE fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
Definition: cont.c:1267
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1495
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:1780
VALUE rb_proc_new(VALUE(*)(ANYARGS), VALUE)
Definition: proc.c:2018
static void cont_save_thread(rb_context_t *cont, rb_thread_t *th)
Definition: cont.c:393
#define RUBY_MARK_LEAVE(msg)
Definition: gc.h:54
VALUE rb_fiber_current(void)
Definition: cont.c:1200
return Qtrue
Definition: tcltklib.c:9610
#define C
Definition: util.c:194
size_t st_memsize(const st_table *)
Definition: st.c:342
static VALUE fiber_init(VALUE fibval, VALUE proc)
Definition: cont.c:1037
static void fiber_link_join(rb_fiber_t *fib)
Definition: cont.c:280
r
Definition: bigdecimal.c:1196
#define TAG_RAISE
Definition: eval_intern.h:140
rb_jmpbuf_t jmpbuf
Definition: cont.c:78
void rb_define_global_function(const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a global function.
Definition: class.c:1522
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, const rb_block_t *blockptr)
Definition: vm.c:712
int state
Definition: tcltklib.c:1462
static const rb_data_type_t fiber_data_type
Definition: cont.c:120
VALUE value
Definition: cont.c:64
VALUE VALUE args
Definition: tcltklib.c:2561
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1358
static void cont_free(void *ptr)
Definition: cont.c:187
d
Definition: strlcat.c:58
rb_thread_t saved_thread
Definition: cont.c:77
struct rb_context_struct rb_context_t
#define NOINLINE(x)
Definition: ruby.h:37
static VALUE rb_eFiberError
Definition: cont.c:123
#define TAG_FATAL
Definition: eval_intern.h:142
#define NORETURN(x)
Definition: ruby.h:31
void rb_exc_raise(VALUE mesg)
Definition: eval.c:527
size_t vm_stack_clen
Definition: cont.c:68
VALUE * stack
Definition: vm_core.h:498
#define FLUSH_REGISTER_WINDOWS
Definition: ripper.y:299
static size_t fiber_memsize(const void *ptr)
Definition: cont.c:318
#define TH_POP_TAG()
Definition: eval_intern.h:101
void rb_gc(void)
Definition: gc.c:3108
enum fiber_status status
Definition: cont.c:102
memset(y->frac+ix+1, 0,(y->Prec-(ix+1))*sizeof(BDIGIT))
VALUE rb_block_proc(void)
Definition: proc.c:479
VALUE rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
Definition: cont.c:1348
BDIGIT m
Definition: bigdecimal.c:5085
static VALUE return_fiber(void)
Definition: cont.c:1096
return Qfalse
Definition: tcltklib.c:6779
#define EXEC_TAG()
Definition: eval_intern.h:113
#define Qnil
Definition: tcltklib.c:1896
#define val
Definition: tcltklib.c:1949
VALUE * rb_vm_ep_local_ep(VALUE *ep)
Definition: vm.c:36
rb_iseq_t * block_iseq
Definition: vm_core.h:433
VALUE rb_eRuntimeError
Definition: error.c:510
size_t fiber_vm_stack_size
Definition: vm_core.h:402
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:473
Definition: cont.c:83
static VALUE rb_fiber_init(VALUE fibval)
Definition: cont.c:1084
static void cont_restore_1(rb_context_t *cont)
Definition: cont.c:676
void rb_gc_mark(VALUE)
Definition: gc.c:2598
rb_iseq_t * iseq
Definition: vm_core.h:428
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:499
static VALUE VALUE obj
Definition: tcltklib.c:3158
static VALUE fiber_alloc(VALUE klass)
Definition: cont.c:1007
VALUE tag
Definition: vm_core.h:469
#define ANYARGS
#define RUBY_MARK_ENTER(msg)
Definition: gc.h:53
VALUE rb_fiber_new(VALUE(*func)(ANYARGS), VALUE obj)
Definition: cont.c:1090
void ruby_Init_Fiber_as_Coroutine(void)
Definition: cont.c:1559
void rb_fiber_start(void)
Definition: cont.c:1137
static void cont_mark(void *ptr)
Definition: cont.c:140
#define TypedData_Wrap_Struct(klass, data_type, sval)
static VALUE rb_callcc(VALUE self)
Definition: cont.c:861
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4308
static VALUE rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fibval)
Definition: cont.c:1466
int err
Definition: win32.c:87
VALUE * machine_stack_start
Definition: vm_core.h:588
#define ALLOCA_N(type, n)
struct rb_vm_struct::@153 default_params
static VALUE rb_fiber_s_current(VALUE klass)
Definition: cont.c:1499
Definition: cont.c:84
VALUE * argv
Definition: tcltklib.c:1971
size_t vm_stack_slen
Definition: cont.c:67
VALUE rb_yield(VALUE)
Definition: vm_eval.c:934
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:82
void rb_vm_stack_to_heap(rb_thread_t *th)
Definition: vm.c:553
volatile VALUE value
Definition: tcltklib.c:9442
register char * s
Definition: os2.c:56
static rb_context_t * cont_new(VALUE klass)
Definition: cont.c:416
void ruby_xfree(void *x)
Definition: gc.c:3649
VALUE * machine_stack
Definition: cont.c:70
static VALUE rb_cont_call(int argc, VALUE *argv, VALUE contval)
Definition: cont.c:904
void Init_Cont(void)
Definition: cont.c:1520
static VALUE rb_cFiber
Definition: cont.c:122
VALUE rb_exc_new2(VALUE etype, const char *s)
Definition: error.c:542
int argc
Definition: tcltklib.c:1970
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:81
rb_context_t cont
Definition: cont.c:100
RUBY_JMP_BUF rb_jmpbuf_t
Definition: vm_core.h:462
static VALUE fiber_store(rb_fiber_t *next_fib)
Definition: cont.c:1212
struct rb_fiber_struct rb_fiber_t
VALUE first_proc
Definition: vm_core.h:583
static void fiber_mark(void *ptr)
Definition: cont.c:268
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:482
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:94
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:11
static void cont_init(rb_context_t *cont, rb_thread_t *th)
Definition: cont.c:408
Real * b
Definition: bigdecimal.c:1182
return ptr
Definition: tcltklib.c:784
VpDivd * c
Definition: bigdecimal.c:1205
static VALUE rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
Definition: cont.c:1485
#define FIBER_USE_NATIVE
Definition: cont.c:38
#define ruby_setjmp(env)
Definition: eval_intern.h:50
#define MEMCPY(p1, p2, type, n)
gz end
Definition: zlib.c:2270
enum rb_thread_status status
Definition: vm_core.h:531
#define RUBY_FREE_UNLESS_NULL(ptr)
Definition: gc.h:61
arg
Definition: ripper.y:1312
struct rb_fiber_struct * next_fiber
Definition: cont.c:104
VALUE * machine_stack_end
Definition: vm_core.h:589
static void cont_restore_thread(rb_context_t *cont)
Definition: cont.c:472
int size
Definition: encoding.c:52
#define f
int mark_stack_len
Definition: vm_core.h:597
VALUE root_svar
Definition: vm_core.h:527
rb_block_t block
Definition: vm_core.h:669
#define Qundef
int t
Definition: ripper.c:13760
const rb_method_entry_t * me
Definition: vm_core.h:435
#define RUBY_FREE_LEAVE(msg)
Definition: gc.h:56
DATA_PTR(self)
#define RUBY_FREE_ENTER(msg)
Definition: gc.h:55
#define TypedData_Make_Struct(klass, type, data_type, sval)
RUBY_EXTERN VALUE rb_cObject
Definition: ripper.y:1426
#define ALLOC_N(type, n)
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:333
#define STACK_PAD_SIZE
VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
Definition: cont.c:1342
VALUE root_fiber
Definition: vm_core.h:608
klass
Definition: tcltklib.c:3504
static rb_fiber_t * fiber_t_alloc(VALUE fibval)
Definition: cont.c:1013
struct rb_fiber_struct * prev_fiber
Definition: cont.c:103
#define GetThreadPtr(obj, ptr)
Definition: vm_core.h:452
rb_jmpbuf_t root_jmpbuf
Definition: vm_core.h:609
static void cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
Definition: cont.c:744
void ruby_Init_Continuation_body(void)
Definition: cont.c:1548
size_t stack_size
Definition: vm_core.h:499
static void fiber_link_remove(rb_fiber_t *fib)
Definition: cont.c:294
struct rb_vm_tag * tag
Definition: vm_core.h:561
BDIGIT e
Definition: bigdecimal.c:5085
unsigned long VALUE
Definition: ripper.y:104
context_type
Definition: cont.c:54
static void fiber_free(void *ptr)
Definition: cont.c:301
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:907
static size_t cont_memsize(const void *ptr)
Definition: cont.c:240
#define stat(path, st)
Definition: win32.h:193
static void cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
Definition: cont.c:344
size_t machine_stack_maxsize
Definition: vm_core.h:590
fiber_status
Definition: cont.c:82
#define NULL
Definition: _sdbm.c:103
q
Definition: tcltklib.c:2968
#define REALLOC_N(var, type, n)
static rb_thread_t * GET_THREAD(void)
Definition: vm_core.h:883
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1340
VALUE * machine_stack_src
Definition: cont.c:71
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:604
void st_free_table(st_table *)
Definition: st.c:334
VALUE * ep
Definition: vm_core.h:445