00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #include "ruby/ruby.h"
00013 #include "vm_core.h"
00014 #include "gc.h"
00015 #include "eval_intern.h"
00016
00017 #define CAPTURE_JUST_VALID_VM_STACK 1
00018
00019 enum context_type {
00020 CONTINUATION_CONTEXT = 0,
00021 FIBER_CONTEXT = 1,
00022 ROOT_FIBER_CONTEXT = 2
00023 };
00024
00025 typedef struct rb_context_struct {
00026 enum context_type type;
00027 VALUE self;
00028 int argc;
00029 VALUE value;
00030 VALUE *vm_stack;
00031 #ifdef CAPTURE_JUST_VALID_VM_STACK
00032 size_t vm_stack_slen;
00033 size_t vm_stack_clen;
00034 #endif
00035 VALUE *machine_stack;
00036 VALUE *machine_stack_src;
00037 #ifdef __ia64
00038 VALUE *machine_register_stack;
00039 VALUE *machine_register_stack_src;
00040 int machine_register_stack_size;
00041 #endif
00042 rb_thread_t saved_thread;
00043 rb_jmpbuf_t jmpbuf;
00044 size_t machine_stack_size;
00045 } rb_context_t;
00046
00047 enum fiber_status {
00048 CREATED,
00049 RUNNING,
00050 TERMINATED
00051 };
00052
00053 typedef struct rb_fiber_struct {
00054 rb_context_t cont;
00055 VALUE prev;
00056 enum fiber_status status;
00057 struct rb_fiber_struct *prev_fiber;
00058 struct rb_fiber_struct *next_fiber;
00059 } rb_fiber_t;
00060
00061 static const rb_data_type_t cont_data_type, fiber_data_type;
00062 static VALUE rb_cContinuation;
00063 static VALUE rb_cFiber;
00064 static VALUE rb_eFiberError;
00065
00066 #define GetContPtr(obj, ptr) \
00067 TypedData_Get_Struct(obj, rb_context_t, &cont_data_type, ptr)
00068
00069 #define GetFiberPtr(obj, ptr) do {\
00070 TypedData_Get_Struct(obj, rb_fiber_t, &fiber_data_type, ptr); \
00071 if (!ptr) rb_raise(rb_eFiberError, "uninitialized fiber"); \
00072 } while(0)
00073
00074 NOINLINE(static VALUE cont_capture(volatile int *stat));
00075
00076 void rb_thread_mark(rb_thread_t *th);
00077 #define THREAD_MUST_BE_RUNNING(th) do { \
00078 if (!th->tag) rb_raise(rb_eThreadError, "not running thread"); \
00079 } while (0)
00080
00081 static void
00082 cont_mark(void *ptr)
00083 {
00084 RUBY_MARK_ENTER("cont");
00085 if (ptr) {
00086 rb_context_t *cont = ptr;
00087 rb_gc_mark(cont->value);
00088 rb_thread_mark(&cont->saved_thread);
00089
00090 if (cont->vm_stack) {
00091 #ifdef CAPTURE_JUST_VALID_VM_STACK
00092 rb_gc_mark_locations(cont->vm_stack,
00093 cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
00094 #else
00095 rb_gc_mark_localtion(cont->vm_stack,
00096 cont->vm_stack, cont->saved_thread.stack_size);
00097 #endif
00098 }
00099
00100 if (cont->machine_stack) {
00101 rb_gc_mark_locations(cont->machine_stack,
00102 cont->machine_stack + cont->machine_stack_size);
00103 }
00104 #ifdef __ia64
00105 if (cont->machine_register_stack) {
00106 rb_gc_mark_locations(cont->machine_register_stack,
00107 cont->machine_register_stack + cont->machine_register_stack_size);
00108 }
00109 #endif
00110 }
00111 RUBY_MARK_LEAVE("cont");
00112 }
00113
00114 static void
00115 cont_free(void *ptr)
00116 {
00117 RUBY_FREE_ENTER("cont");
00118 if (ptr) {
00119 rb_context_t *cont = ptr;
00120 RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
00121 RUBY_FREE_UNLESS_NULL(cont->machine_stack);
00122 #ifdef __ia64
00123 RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
00124 #endif
00125 RUBY_FREE_UNLESS_NULL(cont->vm_stack);
00126
00127
00128 ruby_xfree(ptr);
00129 }
00130 RUBY_FREE_LEAVE("cont");
00131 }
00132
00133 static size_t
00134 cont_memsize(const void *ptr)
00135 {
00136 const rb_context_t *cont = ptr;
00137 size_t size = 0;
00138 if (cont) {
00139 size = sizeof(*cont);
00140 if (cont->vm_stack) {
00141 #ifdef CAPTURE_JUST_VALID_VM_STACK
00142 size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
00143 #else
00144 size_t n = cont->saved_thread.stack_size;
00145 #endif
00146 size += n * sizeof(*cont->vm_stack);
00147 }
00148
00149 if (cont->machine_stack) {
00150 size += cont->machine_stack_size * sizeof(*cont->machine_stack);
00151 }
00152 #ifdef __ia64
00153 if (cont->machine_register_stack) {
00154 size += cont->machine_register_stack_size * sizeof(*cont->machine_register_stack);
00155 }
00156 #endif
00157 }
00158 return size;
00159 }
00160
00161 static void
00162 fiber_mark(void *ptr)
00163 {
00164 RUBY_MARK_ENTER("cont");
00165 if (ptr) {
00166 rb_fiber_t *fib = ptr;
00167 rb_gc_mark(fib->prev);
00168 cont_mark(&fib->cont);
00169 }
00170 RUBY_MARK_LEAVE("cont");
00171 }
00172
00173 static void
00174 fiber_link_join(rb_fiber_t *fib)
00175 {
00176 VALUE current_fibval = rb_fiber_current();
00177 rb_fiber_t *current_fib;
00178 GetFiberPtr(current_fibval, current_fib);
00179
00180
00181 fib->next_fiber = current_fib->next_fiber;
00182 fib->prev_fiber = current_fib;
00183 current_fib->next_fiber->prev_fiber = fib;
00184 current_fib->next_fiber = fib;
00185 }
00186
00187 static void
00188 fiber_link_remove(rb_fiber_t *fib)
00189 {
00190 fib->prev_fiber->next_fiber = fib->next_fiber;
00191 fib->next_fiber->prev_fiber = fib->prev_fiber;
00192 }
00193
00194 static void
00195 fiber_free(void *ptr)
00196 {
00197 RUBY_FREE_ENTER("fiber");
00198 if (ptr) {
00199 rb_fiber_t *fib = ptr;
00200
00201 if (fib->cont.type != ROOT_FIBER_CONTEXT &&
00202 fib->cont.saved_thread.local_storage) {
00203 st_free_table(fib->cont.saved_thread.local_storage);
00204 }
00205 fiber_link_remove(fib);
00206
00207 cont_free(&fib->cont);
00208 }
00209 RUBY_FREE_LEAVE("fiber");
00210 }
00211
00212 static size_t
00213 fiber_memsize(const void *ptr)
00214 {
00215 const rb_fiber_t *fib = ptr;
00216 size_t size = 0;
00217 if (ptr) {
00218 size = sizeof(*fib);
00219 if (fib->cont.type != ROOT_FIBER_CONTEXT) {
00220 size += st_memsize(fib->cont.saved_thread.local_storage);
00221 }
00222 size += cont_memsize(&fib->cont);
00223 }
00224 return size;
00225 }
00226
00227 static void
00228 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
00229 {
00230 size_t size;
00231 rb_thread_t *sth = &cont->saved_thread;
00232
00233 SET_MACHINE_STACK_END(&th->machine_stack_end);
00234 #ifdef __ia64
00235 th->machine_register_stack_end = rb_ia64_bsp();
00236 #endif
00237
00238 if (th->machine_stack_start > th->machine_stack_end) {
00239 size = cont->machine_stack_size = th->machine_stack_start - th->machine_stack_end;
00240 cont->machine_stack_src = th->machine_stack_end;
00241 }
00242 else {
00243 size = cont->machine_stack_size = th->machine_stack_end - th->machine_stack_start;
00244 cont->machine_stack_src = th->machine_stack_start;
00245 }
00246
00247 if (cont->machine_stack) {
00248 REALLOC_N(cont->machine_stack, VALUE, size);
00249 }
00250 else {
00251 cont->machine_stack = ALLOC_N(VALUE, size);
00252 }
00253
00254 FLUSH_REGISTER_WINDOWS;
00255 MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
00256
00257 #ifdef __ia64
00258 rb_ia64_flushrs();
00259 size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
00260 cont->machine_register_stack_src = th->machine_register_stack_start;
00261 if (cont->machine_register_stack) {
00262 REALLOC_N(cont->machine_register_stack, VALUE, size);
00263 }
00264 else {
00265 cont->machine_register_stack = ALLOC_N(VALUE, size);
00266 }
00267
00268 MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
00269 #endif
00270
00271 sth->machine_stack_start = sth->machine_stack_end = 0;
00272 #ifdef __ia64
00273 sth->machine_register_stack_start = sth->machine_register_stack_end = 0;
00274 #endif
00275 }
00276
00277 static const rb_data_type_t cont_data_type = {
00278 "continuation",
00279 cont_mark, cont_free, cont_memsize,
00280 };
00281
00282 static void
00283 cont_init(rb_context_t *cont, rb_thread_t *th)
00284 {
00285
00286 cont->saved_thread = *th;
00287 cont->saved_thread.local_storage = 0;
00288 cont->saved_thread.machine_stack_start = cont->saved_thread.machine_stack_end = 0;
00289 }
00290
00291 static rb_context_t *
00292 cont_new(VALUE klass)
00293 {
00294 rb_context_t *cont;
00295 volatile VALUE contval;
00296 rb_thread_t *th = GET_THREAD();
00297
00298 THREAD_MUST_BE_RUNNING(th);
00299 contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
00300 cont->self = contval;
00301 cont_init(cont, th);
00302 return cont;
00303 }
00304
00305 void rb_vm_stack_to_heap(rb_thread_t *th);
00306
00307 static VALUE
00308 cont_capture(volatile int *stat)
00309 {
00310 rb_context_t *cont;
00311 rb_thread_t *th = GET_THREAD(), *sth;
00312 volatile VALUE contval;
00313
00314 THREAD_MUST_BE_RUNNING(th);
00315 rb_vm_stack_to_heap(th);
00316 cont = cont_new(rb_cContinuation);
00317 contval = cont->self;
00318 sth = &cont->saved_thread;
00319
00320 #ifdef CAPTURE_JUST_VALID_VM_STACK
00321 cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
00322 cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
00323 cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
00324 MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
00325 MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
00326 #else
00327 cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
00328 MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
00329 #endif
00330 sth->stack = 0;
00331
00332 cont_save_machine_stack(th, cont);
00333
00334 if (ruby_setjmp(cont->jmpbuf)) {
00335 VALUE value;
00336
00337 value = cont->value;
00338 if (cont->argc == -1) rb_exc_raise(value);
00339 cont->value = Qnil;
00340 *stat = 1;
00341 return value;
00342 }
00343 else {
00344 *stat = 0;
00345 return cont->self;
00346 }
00347 }
00348
00349 NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
00350
00351 static void
00352 cont_restore_1(rb_context_t *cont)
00353 {
00354 rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
00355
00356
00357 if (cont->type == CONTINUATION_CONTEXT) {
00358
00359 VALUE fib;
00360
00361 th->fiber = sth->fiber;
00362 fib = th->fiber ? th->fiber : th->root_fiber;
00363
00364 if (fib) {
00365 rb_fiber_t *fcont;
00366 GetFiberPtr(fib, fcont);
00367 th->stack_size = fcont->cont.saved_thread.stack_size;
00368 th->stack = fcont->cont.saved_thread.stack;
00369 }
00370 #ifdef CAPTURE_JUST_VALID_VM_STACK
00371 MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
00372 MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
00373 cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
00374 #else
00375 MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
00376 #endif
00377 }
00378 else {
00379
00380 th->stack = sth->stack;
00381 th->stack_size = sth->stack_size;
00382 th->local_storage = sth->local_storage;
00383 th->fiber = cont->self;
00384 }
00385
00386 th->cfp = sth->cfp;
00387 th->safe_level = sth->safe_level;
00388 th->raised_flag = sth->raised_flag;
00389 th->state = sth->state;
00390 th->status = sth->status;
00391 th->tag = sth->tag;
00392 th->protect_tag = sth->protect_tag;
00393 th->errinfo = sth->errinfo;
00394 th->first_proc = sth->first_proc;
00395
00396
00397 #ifdef _M_AMD64
00398 {
00399
00400 jmp_buf buf;
00401 setjmp(buf);
00402 ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
00403 ((_JUMP_BUFFER*)(&buf))->Frame;
00404 }
00405 #endif
00406 if (cont->machine_stack_src) {
00407 FLUSH_REGISTER_WINDOWS;
00408 MEMCPY(cont->machine_stack_src, cont->machine_stack,
00409 VALUE, cont->machine_stack_size);
00410 }
00411
00412 #ifdef __ia64
00413 if (cont->machine_register_stack_src) {
00414 MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
00415 VALUE, cont->machine_register_stack_size);
00416 }
00417 #endif
00418
00419 ruby_longjmp(cont->jmpbuf, 1);
00420 }
00421
00422 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
00423
00424 #ifdef __ia64
00425 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
00426 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
00427 static volatile int C(a), C(b), C(c), C(d), C(e);
00428 static volatile int C(f), C(g), C(h), C(i), C(j);
00429 static volatile int C(k), C(l), C(m), C(n), C(o);
00430 static volatile int C(p), C(q), C(r), C(s), C(t);
00431 #if 0
00432 {}
00433 #endif
00434 int rb_dummy_false = 0;
00435 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
00436 static void
00437 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
00438 {
00439 if (rb_dummy_false) {
00440
00441 E(a) = E(b) = E(c) = E(d) = E(e) =
00442 E(f) = E(g) = E(h) = E(i) = E(j) =
00443 E(k) = E(l) = E(m) = E(n) = E(o) =
00444 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
00445 E(a) = E(b) = E(c) = E(d) = E(e) =
00446 E(f) = E(g) = E(h) = E(i) = E(j) =
00447 E(k) = E(l) = E(m) = E(n) = E(o) =
00448 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
00449 }
00450 if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
00451 register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
00452 }
00453 cont_restore_0(cont, vp);
00454 }
00455 #undef C
00456 #undef E
00457 #endif
00458
00459 static void
00460 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
00461 {
00462 if (cont->machine_stack_src) {
00463 #ifdef HAVE_ALLOCA
00464 #define STACK_PAD_SIZE 1
00465 #else
00466 #define STACK_PAD_SIZE 1024
00467 #endif
00468 VALUE space[STACK_PAD_SIZE];
00469
00470 #if !STACK_GROW_DIRECTION
00471 if (addr_in_prev_frame > &space[0]) {
00472
00473 #endif
00474 #if STACK_GROW_DIRECTION <= 0
00475 volatile VALUE *const end = cont->machine_stack_src;
00476 if (&space[0] > end) {
00477 # ifdef HAVE_ALLOCA
00478 volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
00479 (void)sp;
00480 # else
00481 cont_restore_0(cont, &space[0]);
00482 # endif
00483 }
00484 #endif
00485 #if !STACK_GROW_DIRECTION
00486 }
00487 else {
00488
00489 #endif
00490 #if STACK_GROW_DIRECTION >= 0
00491 volatile VALUE *const end = cont->machine_stack_src + cont->machine_stack_size;
00492 if (&space[STACK_PAD_SIZE] < end) {
00493 # ifdef HAVE_ALLOCA
00494 volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
00495 (void)sp;
00496 # else
00497 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
00498 # endif
00499 }
00500 #endif
00501 #if !STACK_GROW_DIRECTION
00502 }
00503 #endif
00504 }
00505 cont_restore_1(cont);
00506 }
00507 #ifdef __ia64
00508 #define cont_restore_0(cont, vp) register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
00509 #endif
00510
00511
00512
00513
00514
00515
00516
00517
00518
00519
00520
00521
00522
00523
00524
00525
00526
00527
00528
00529
00530
00531
00532
00533
00534
00535
00536
00537
00538
00539
00540
00541
00542
00543
00544
00545
00546
00547
00548
00549
00550
00551
00552
00553
00554
00555
00556
00557
00558
00559
00560
00561
00562
00563
00564
00565
00566
00567
00568
00569
00570
00571
00572 static VALUE
00573 rb_callcc(VALUE self)
00574 {
00575 volatile int called;
00576 volatile VALUE val = cont_capture(&called);
00577
00578 if (called) {
00579 return val;
00580 }
00581 else {
00582 return rb_yield(val);
00583 }
00584 }
00585
00586 static VALUE
00587 make_passing_arg(int argc, VALUE *argv)
00588 {
00589 switch(argc) {
00590 case 0:
00591 return Qnil;
00592 case 1:
00593 return argv[0];
00594 default:
00595 return rb_ary_new4(argc, argv);
00596 }
00597 }
00598
00599
00600
00601
00602
00603
00604
00605
00606
00607
00608
00609
00610
00611
00612
00613
00614
00615 static VALUE
00616 rb_cont_call(int argc, VALUE *argv, VALUE contval)
00617 {
00618 rb_context_t *cont;
00619 rb_thread_t *th = GET_THREAD();
00620 GetContPtr(contval, cont);
00621
00622 if (cont->saved_thread.self != th->self) {
00623 rb_raise(rb_eRuntimeError, "continuation called across threads");
00624 }
00625 if (cont->saved_thread.protect_tag != th->protect_tag) {
00626 rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
00627 }
00628 if (cont->saved_thread.fiber) {
00629 rb_fiber_t *fcont;
00630 GetFiberPtr(cont->saved_thread.fiber, fcont);
00631
00632 if (th->fiber != cont->saved_thread.fiber) {
00633 rb_raise(rb_eRuntimeError, "continuation called across fiber");
00634 }
00635 }
00636
00637 cont->argc = argc;
00638 cont->value = make_passing_arg(argc, argv);
00639
00640 cont_restore_0(cont, &contval);
00641 return Qnil;
00642 }
00643
00644
00645
00646
00647
00648
00649
00650
00651
00652
00653
00654
00655
00656
00657
00658
00659
00660
00661
00662
00663
00664
00665
00666
00667
00668
00669
00670
00671
00672
00673
00674
00675
00676
00677
00678
00679
00680
00681
00682
00683
00684
00685
00686
00687
00688
00689
00690
00691
00692
00693
00694
00695
00696
00697
00698
00699
00700
00701
00702
00703
00704
00705
00706
00707
00708
00709
00710 #define FIBER_VM_STACK_SIZE (4 * 1024)
00711
00712 static const rb_data_type_t fiber_data_type = {
00713 "fiber",
00714 fiber_mark, fiber_free, fiber_memsize,
00715 };
00716
00717 static VALUE
00718 fiber_alloc(VALUE klass)
00719 {
00720 return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
00721 }
00722
00723 static rb_fiber_t*
00724 fiber_t_alloc(VALUE fibval)
00725 {
00726 rb_fiber_t *fib;
00727 rb_thread_t *th = GET_THREAD();
00728
00729 THREAD_MUST_BE_RUNNING(th);
00730 fib = ALLOC(rb_fiber_t);
00731 memset(fib, 0, sizeof(rb_fiber_t));
00732 fib->cont.self = fibval;
00733 fib->cont.type = FIBER_CONTEXT;
00734 cont_init(&fib->cont, th);
00735 fib->prev = Qnil;
00736 fib->status = CREATED;
00737
00738 DATA_PTR(fibval) = fib;
00739
00740 return fib;
00741 }
00742
00743 static VALUE
00744 fiber_init(VALUE fibval, VALUE proc)
00745 {
00746 rb_fiber_t *fib = fiber_t_alloc(fibval);
00747 rb_context_t *cont = &fib->cont;
00748 rb_thread_t *th = &cont->saved_thread;
00749
00750
00751
00752 cont->vm_stack = 0;
00753
00754 th->stack = 0;
00755 th->stack_size = 0;
00756
00757 fiber_link_join(fib);
00758
00759 th->stack_size = FIBER_VM_STACK_SIZE;
00760 th->stack = ALLOC_N(VALUE, th->stack_size);
00761
00762 th->cfp = (void *)(th->stack + th->stack_size);
00763 th->cfp--;
00764 th->cfp->pc = 0;
00765 th->cfp->sp = th->stack + 1;
00766 th->cfp->bp = 0;
00767 th->cfp->lfp = th->stack;
00768 *th->cfp->lfp = 0;
00769 th->cfp->dfp = th->stack;
00770 th->cfp->self = Qnil;
00771 th->cfp->flag = 0;
00772 th->cfp->iseq = 0;
00773 th->cfp->proc = 0;
00774 th->cfp->block_iseq = 0;
00775 th->cfp->me = 0;
00776 th->tag = 0;
00777 th->local_storage = st_init_numtable();
00778
00779 th->first_proc = proc;
00780
00781 MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
00782
00783 return fibval;
00784 }
00785
00786
00787 static VALUE
00788 rb_fiber_init(VALUE fibval)
00789 {
00790 return fiber_init(fibval, rb_block_proc());
00791 }
00792
00793 VALUE
00794 rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
00795 {
00796 return fiber_init(fiber_alloc(rb_cFiber), rb_proc_new(func, obj));
00797 }
00798
00799 static VALUE
00800 return_fiber(void)
00801 {
00802 rb_fiber_t *fib;
00803 VALUE curr = rb_fiber_current();
00804 GetFiberPtr(curr, fib);
00805
00806 if (fib->prev == Qnil) {
00807 rb_thread_t *th = GET_THREAD();
00808
00809 if (th->root_fiber != curr) {
00810 return th->root_fiber;
00811 }
00812 else {
00813 rb_raise(rb_eFiberError, "can't yield from root fiber");
00814 }
00815 }
00816 else {
00817 VALUE prev = fib->prev;
00818 fib->prev = Qnil;
00819 return prev;
00820 }
00821 }
00822
00823 VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv);
00824
00825 static void
00826 rb_fiber_terminate(rb_fiber_t *fib)
00827 {
00828 VALUE value = fib->cont.value;
00829 fib->status = TERMINATED;
00830 rb_fiber_transfer(return_fiber(), 1, &value);
00831 }
00832
00833 void
00834 rb_fiber_start(void)
00835 {
00836 rb_thread_t *th = GET_THREAD();
00837 rb_fiber_t *fib;
00838 rb_context_t *cont;
00839 rb_proc_t *proc;
00840 int state;
00841
00842 GetFiberPtr(th->fiber, fib);
00843 cont = &fib->cont;
00844
00845 TH_PUSH_TAG(th);
00846 if ((state = EXEC_TAG()) == 0) {
00847 int argc;
00848 VALUE *argv, args;
00849 GetProcPtr(cont->saved_thread.first_proc, proc);
00850 args = cont->value;
00851 argv = (argc = cont->argc) > 1 ? RARRAY_PTR(args) : &args;
00852 cont->value = Qnil;
00853 th->errinfo = Qnil;
00854 th->local_lfp = proc->block.lfp;
00855 th->local_svar = Qnil;
00856
00857 fib->status = RUNNING;
00858 cont->value = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, 0);
00859 }
00860 TH_POP_TAG();
00861
00862 if (state) {
00863 if (state == TAG_RAISE) {
00864 th->thrown_errinfo = th->errinfo;
00865 }
00866 else {
00867 th->thrown_errinfo =
00868 rb_vm_make_jump_tag_but_local_jump(state, th->errinfo);
00869 }
00870 RUBY_VM_SET_INTERRUPT(th);
00871 }
00872
00873 rb_fiber_terminate(fib);
00874 rb_bug("rb_fiber_start: unreachable");
00875 }
00876
00877 static rb_fiber_t *
00878 root_fiber_alloc(rb_thread_t *th)
00879 {
00880 rb_fiber_t *fib;
00881
00882
00883 fib = fiber_t_alloc(fiber_alloc(rb_cFiber));
00884 fib->cont.type = ROOT_FIBER_CONTEXT;
00885 fib->prev_fiber = fib->next_fiber = fib;
00886
00887 return fib;
00888 }
00889
00890 VALUE
00891 rb_fiber_current(void)
00892 {
00893 rb_thread_t *th = GET_THREAD();
00894 if (th->fiber == 0) {
00895
00896 rb_fiber_t *fib = root_fiber_alloc(th);
00897 th->root_fiber = th->fiber = fib->cont.self;
00898 }
00899 return th->fiber;
00900 }
00901
00902 static VALUE
00903 fiber_store(rb_fiber_t *next_fib)
00904 {
00905 rb_thread_t *th = GET_THREAD();
00906 rb_fiber_t *fib;
00907
00908 if (th->fiber) {
00909 GetFiberPtr(th->fiber, fib);
00910 fib->cont.saved_thread = *th;
00911 }
00912 else {
00913
00914 fib = root_fiber_alloc(th);
00915 th->root_fiber = th->fiber = fib->cont.self;
00916 }
00917
00918 cont_save_machine_stack(th, &fib->cont);
00919
00920 if (ruby_setjmp(fib->cont.jmpbuf)) {
00921
00922 GetFiberPtr(th->fiber, fib);
00923 if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
00924 return fib->cont.value;
00925 }
00926 else {
00927 return Qundef;
00928 }
00929 }
00930
00931 static inline VALUE
00932 fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
00933 {
00934 VALUE value;
00935 rb_fiber_t *fib;
00936 rb_context_t *cont;
00937 rb_thread_t *th = GET_THREAD();
00938
00939 GetFiberPtr(fibval, fib);
00940 cont = &fib->cont;
00941
00942 if (cont->saved_thread.self != th->self) {
00943 rb_raise(rb_eFiberError, "fiber called across threads");
00944 }
00945 else if (cont->saved_thread.protect_tag != th->protect_tag) {
00946 rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
00947 }
00948 else if (fib->status == TERMINATED) {
00949 value = rb_exc_new2(rb_eFiberError, "dead fiber called");
00950 if (th->fiber != fibval) {
00951 GetFiberPtr(th->fiber, fib);
00952 if (fib->status != TERMINATED) rb_exc_raise(value);
00953 fibval = th->root_fiber;
00954 }
00955 else {
00956 fibval = fib->prev;
00957 if (NIL_P(fibval)) fibval = th->root_fiber;
00958 }
00959 GetFiberPtr(fibval, fib);
00960 cont = &fib->cont;
00961 cont->argc = -1;
00962 cont->value = value;
00963 cont_restore_0(cont, &value);
00964 }
00965
00966 if (is_resume) {
00967 fib->prev = rb_fiber_current();
00968 }
00969
00970 cont->argc = argc;
00971 cont->value = make_passing_arg(argc, argv);
00972
00973 if ((value = fiber_store(fib)) == Qundef) {
00974 cont_restore_0(cont, &value);
00975 rb_bug("rb_fiber_resume: unreachable");
00976 }
00977
00978 RUBY_VM_CHECK_INTS();
00979
00980 return value;
00981 }
00982
00983 VALUE
00984 rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
00985 {
00986 return fiber_switch(fib, argc, argv, 0);
00987 }
00988
00989 VALUE
00990 rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
00991 {
00992 rb_fiber_t *fib;
00993 GetFiberPtr(fibval, fib);
00994
00995 if (fib->prev != Qnil) {
00996 rb_raise(rb_eFiberError, "double resume");
00997 }
00998
00999 return fiber_switch(fibval, argc, argv, 1);
01000 }
01001
01002 VALUE
01003 rb_fiber_yield(int argc, VALUE *argv)
01004 {
01005 return rb_fiber_transfer(return_fiber(), argc, argv);
01006 }
01007
01008
01009
01010
01011
01012
01013
01014
01015
01016 VALUE
01017 rb_fiber_alive_p(VALUE fibval)
01018 {
01019 rb_fiber_t *fib;
01020 GetFiberPtr(fibval, fib);
01021 return fib->status != TERMINATED ? Qtrue : Qfalse;
01022 }
01023
01024
01025
01026
01027
01028
01029
01030
01031
01032
01033
01034
01035
01036
01037
01038
01039 static VALUE
01040 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
01041 {
01042 return rb_fiber_resume(fib, argc, argv);
01043 }
01044
01045
01046
01047
01048
01049
01050
01051
01052
01053
01054
01055
01056
01057
01058
01059
01060
01061 static VALUE
01062 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fib)
01063 {
01064 return rb_fiber_transfer(fib, argc, argv);
01065 }
01066
01067
01068
01069
01070
01071
01072
01073
01074
01075
01076
01077 static VALUE
01078 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
01079 {
01080 return rb_fiber_yield(argc, argv);
01081 }
01082
01083
01084
01085
01086
01087
01088
01089
01090
01091 static VALUE
01092 rb_fiber_s_current(VALUE klass)
01093 {
01094 return rb_fiber_current();
01095 }
01096
01097
01098
01099
01100
01101
01102
01103
01104
01105
01106
01107
01108
01109
01110
01111
01112 void
01113 Init_Cont(void)
01114 {
01115 rb_cFiber = rb_define_class("Fiber", rb_cObject);
01116 rb_define_alloc_func(rb_cFiber, fiber_alloc);
01117 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
01118 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
01119 rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
01120 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
01121 }
01122
01123 void
01124 ruby_Init_Continuation_body(void)
01125 {
01126 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
01127 rb_undef_alloc_func(rb_cContinuation);
01128 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
01129 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
01130 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
01131 rb_define_global_function("callcc", rb_callcc, 0);
01132 }
01133
01134 void
01135 ruby_Init_Fiber_as_Coroutine(void)
01136 {
01137 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
01138 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
01139 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
01140 }
01141