Ruby  1.9.3p551(2014-11-13revision48407)
vm.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  vm.c -
4 
5  $Author: usa $
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 #include "ruby/ruby.h"
12 #include "ruby/vm.h"
13 #include "ruby/st.h"
14 #include "ruby/encoding.h"
15 #include "internal.h"
16 
17 #include "gc.h"
18 #include "vm_core.h"
19 #include "iseq.h"
20 #include "eval_intern.h"
21 
22 #include "vm_insnhelper.h"
23 #include "vm_insnhelper.c"
24 #include "vm_exec.h"
25 #include "vm_exec.c"
26 
27 #include "vm_method.c"
28 #include "vm_eval.c"
29 
30 #include <assert.h>
31 
32 #define BUFSIZE 0x100
33 #define PROCDEBUG 0
34 
39 
41 
43 
46 
47 static void thread_free(void *ptr);
48 
49 void vm_analysis_operand(int insn, int n, VALUE op);
50 void vm_analysis_register(int reg, int isset);
51 void vm_analysis_insn(int insn);
52 
53 /*
54  * TODO: replace with better interface at the next release.
55  *
56  * these functions are exported just as a workaround for ruby-debug
57  * for the time being.
58  */
61 
62 void
64 {
66 }
67 
68 static void vm_clear_global_method_cache(void);
69 
70 static void
72 {
73  /* TODO: Clear all inline cache entries in all iseqs.
74  How to iterate all iseqs in sweep phase?
75  rb_objspace_each_objects() doesn't work at sweep phase.
76  */
77 }
78 
79 static void
81 {
85 }
86 
87 void
89 {
91 }
92 
93 /* control stack frame */
94 
95 static inline VALUE
97 {
99  Qnil, th->cfp->lfp[0], 0,
100  th->cfp->sp, 0, 1);
101  th->cfp->pc = (VALUE *)&finish_insn_seq[0];
102  return Qtrue;
103 }
104 
105 static void
107 {
108  rb_iseq_t *iseq;
109  GetISeqPtr(iseqval, iseq);
110 
111  if (iseq->type != ISEQ_TYPE_TOP) {
112  rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
113  }
114 
115  /* for return */
117 
119  th->top_self, 0, iseq->iseq_encoded,
120  th->cfp->sp, 0, iseq->local_size);
121 
122  CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
123 }
124 
125 static void
126 vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref)
127 {
128  rb_iseq_t *iseq;
129  rb_block_t * const block = th->base_block;
130  GetISeqPtr(iseqval, iseq);
131 
132  /* for return */
134  vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL, block->self,
135  GC_GUARDED_PTR(block->dfp), iseq->iseq_encoded,
136  th->cfp->sp, block->lfp, iseq->local_size);
137 
138  if (cref) {
139  th->cfp->dfp[-1] = (VALUE)cref;
140  }
141 
142  CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
143 }
144 
145 static void
147 {
148  VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
149  rb_binding_t *bind;
150  rb_iseq_t *iseq;
151  rb_env_t *env;
152 
153  GetBindingPtr(toplevel_binding, bind);
154  GetEnvPtr(bind->env, env);
155  th->base_block = &env->block;
156  vm_set_eval_stack(th, iseqval, 0);
157  th->base_block = 0;
158 
159  /* save binding */
160  GetISeqPtr(iseqval, iseq);
161  if (bind && iseq->local_size > 0) {
162  bind->env = rb_vm_make_env_object(th, th->cfp);
163  }
164 
165  CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
166 }
167 
170 {
171  while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
172  if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
173  return cfp;
174  }
176  }
177  return 0;
178 }
179 
180 static rb_control_frame_t *
182 {
183  if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
184  return cfp;
185  }
186 
188 
189  while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
190  if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
191  return cfp;
192  }
193 
194  if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
195  break;
196  }
198  }
199  return 0;
200 }
201 
202 /* at exit */
203 
204 void
206 {
208 }
209 
210 static void
212 {
213  VALUE hook = (VALUE)&vm->at_exit;
214 
215  while (RARRAY_LEN(hook) > 0) {
216  typedef void rb_vm_at_exit_func(rb_vm_t*);
217  rb_vm_at_exit_func *func = (rb_vm_at_exit_func*)rb_ary_pop(hook);
218  (*func)(vm);
219  }
220  rb_ary_free(hook);
221 }
222 
223 /* Env */
224 
225 /*
226  env{
227  env[0] // special (block or prev env)
228  env[1] // env object
229  env[2] // prev env val
230  };
231  */
232 
233 #define ENV_IN_HEAP_P(th, env) \
234  (!((th)->stack < (env) && (env) < ((th)->stack + (th)->stack_size)))
235 #define ENV_VAL(env) ((env)[1])
236 
237 static void
238 env_mark(void * const ptr)
239 {
240  RUBY_MARK_ENTER("env");
241  if (ptr) {
242  const rb_env_t * const env = ptr;
243 
244  if (env->env) {
245  /* TODO: should mark more restricted range */
246  RUBY_GC_INFO("env->env\n");
247  rb_gc_mark_locations(env->env, env->env + env->env_size);
248  }
249 
250  RUBY_GC_INFO("env->prev_envval\n");
254 
255  if (env->block.iseq) {
256  if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
258  }
259  else {
261  }
262  }
263  }
264  RUBY_MARK_LEAVE("env");
265 }
266 
267 static void
268 env_free(void * const ptr)
269 {
270  RUBY_FREE_ENTER("env");
271  if (ptr) {
272  rb_env_t *const env = ptr;
274  ruby_xfree(ptr);
275  }
276  RUBY_FREE_LEAVE("env");
277 }
278 
279 static size_t
280 env_memsize(const void *ptr)
281 {
282  if (ptr) {
283  const rb_env_t * const env = ptr;
284  size_t size = sizeof(rb_env_t);
285  if (env->env) {
286  size += env->env_size * sizeof(VALUE);
287  }
288  return size;
289  }
290  return 0;
291 }
292 
294  "VM/env",
296 };
297 
298 static VALUE
300 {
301  VALUE obj;
302  rb_env_t *env;
303  obj = TypedData_Make_Struct(rb_cEnv, rb_env_t, &env_data_type, env);
304  env->env = 0;
305  env->prev_envval = 0;
306  env->block.iseq = 0;
307  return obj;
308 }
309 
310 static VALUE check_env_value(VALUE envval);
311 
312 static int
313 check_env(rb_env_t * const env)
314 {
315  printf("---\n");
316  printf("envptr: %p\n", (void *)&env->block.dfp[0]);
317  printf("orphan: %p\n", (void *)env->block.dfp[1]);
318  printf("inheap: %p\n", (void *)env->block.dfp[2]);
319  printf("envval: %10p ", (void *)env->block.dfp[3]);
320  dp(env->block.dfp[3]);
321  printf("penvv : %10p ", (void *)env->block.dfp[4]);
322  dp(env->block.dfp[4]);
323  printf("lfp: %10p\n", (void *)env->block.lfp);
324  printf("dfp: %10p\n", (void *)env->block.dfp);
325  if (env->block.dfp[4]) {
326  printf(">>\n");
327  check_env_value(env->block.dfp[4]);
328  printf("<<\n");
329  }
330  return 1;
331 }
332 
333 static VALUE
335 {
336  rb_env_t *env;
337  GetEnvPtr(envval, env);
338 
339  if (check_env(env)) {
340  return envval;
341  }
342  rb_bug("invalid env");
343  return Qnil; /* unreachable */
344 }
345 
346 static VALUE
348  VALUE *envptr, VALUE * const endptr)
349 {
350  VALUE envval, penvval = 0;
351  rb_env_t *env;
352  VALUE *nenvptr;
353  int i, local_size;
354 
355  if (ENV_IN_HEAP_P(th, envptr)) {
356  return ENV_VAL(envptr);
357  }
358 
359  if (envptr != endptr) {
360  VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr);
361  rb_control_frame_t *pcfp = cfp;
362 
363  if (ENV_IN_HEAP_P(th, penvptr)) {
364  penvval = ENV_VAL(penvptr);
365  }
366  else {
367  while (pcfp->dfp != penvptr) {
368  pcfp++;
369  if (pcfp->dfp == 0) {
370  SDR();
371  rb_bug("invalid dfp");
372  }
373  }
374  penvval = vm_make_env_each(th, pcfp, penvptr, endptr);
375  cfp->lfp = pcfp->lfp;
376  *envptr = GC_GUARDED_PTR(pcfp->dfp);
377  }
378  }
379 
380  /* allocate env */
381  envval = env_alloc();
382  GetEnvPtr(envval, env);
383 
384  if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
385  local_size = 2;
386  }
387  else {
388  local_size = cfp->iseq->local_size;
389  }
390 
391  env->env_size = local_size + 1 + 2;
392  env->local_size = local_size;
393  env->env = ALLOC_N(VALUE, env->env_size);
394  env->prev_envval = penvval;
395 
396  for (i = 0; i <= local_size; i++) {
397  env->env[i] = envptr[-local_size + i];
398 #if 0
399  fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]);
400  if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
401  /* clear value stack for GC */
402  envptr[-local_size + i] = 0;
403  }
404 #endif
405  }
406 
407  *envptr = envval; /* GC mark */
408  nenvptr = &env->env[i - 1];
409  nenvptr[1] = envval; /* frame self */
410  nenvptr[2] = penvval; /* frame prev env object */
411 
412  /* reset lfp/dfp in cfp */
413  cfp->dfp = nenvptr;
414  if (envptr == endptr) {
415  cfp->lfp = nenvptr;
416  }
417 
418  /* as Binding */
419  env->block.self = cfp->self;
420  env->block.lfp = cfp->lfp;
421  env->block.dfp = cfp->dfp;
422  env->block.iseq = cfp->iseq;
423 
424  if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
425  /* TODO */
426  env->block.iseq = 0;
427  }
428  return envval;
429 }
430 
431 static int
433 {
434  int i;
435  if (!iseq) return 0;
436  for (i = 0; i < iseq->local_table_size; i++) {
437  ID lid = iseq->local_table[i];
438  if (rb_is_local_id(lid)) {
439  rb_ary_push(ary, ID2SYM(lid));
440  }
441  }
442  return 1;
443 }
444 
445 static int
447 {
448 
449  while (collect_local_variables_in_iseq(env->block.iseq, ary),
450  env->prev_envval) {
451  GetEnvPtr(env->prev_envval, env);
452  }
453  return 0;
454 }
455 
456 static int
458 {
459  if (ENV_IN_HEAP_P(th, dfp)) {
460  rb_env_t *env;
461  GetEnvPtr(ENV_VAL(dfp), env);
463  return 1;
464  }
465  else {
466  return 0;
467  }
468 }
469 
471 static VALUE vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp, VALUE *blockprocptr);
472 
473 VALUE
475 {
476  VALUE blockprocval;
477  return vm_make_env_object(th, cfp, &blockprocval);
478 }
479 
480 static VALUE
482 {
483  VALUE envval;
484  VALUE *lfp;
485  rb_block_t *blockptr;
486 
487  if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_FINISH) {
488  /* for method_missing */
490  }
491 
492  lfp = cfp->lfp;
493  blockptr = GC_GUARDED_PTR_REF(lfp[0]);
494 
495  if (blockptr && !(lfp[0] & 0x02)) {
496  VALUE blockprocval = vm_make_proc_from_block(th, blockptr);
497  rb_proc_t *p;
498  GetProcPtr(blockprocval, p);
499  lfp[0] = GC_GUARDED_PTR(&p->block);
500  *blockprocptr = blockprocval;
501  }
502 
503  envval = vm_make_env_each(th, cfp, cfp->dfp, cfp->lfp);
505 
506  if (PROCDEBUG) {
507  check_env_value(envval);
508  }
509 
510  return envval;
511 }
512 
513 void
515 {
516  rb_control_frame_t *cfp = th->cfp;
517  while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
518  /* rewrite dfp in errinfo to point to heap */
519  if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) &&
520  (cfp->iseq->type == ISEQ_TYPE_RESCUE ||
521  cfp->iseq->type == ISEQ_TYPE_ENSURE)) {
522  VALUE errinfo = cfp->dfp[-2]; /* #$! */
523  if (RB_TYPE_P(errinfo, T_NODE)) {
524  VALUE *escape_dfp = GET_THROWOBJ_CATCH_POINT(errinfo);
525  if (! ENV_IN_HEAP_P(th, escape_dfp)) {
526  VALUE dfpval = *escape_dfp;
527  if (CLASS_OF(dfpval) == rb_cEnv) {
528  rb_env_t *dfpenv;
529  GetEnvPtr(dfpval, dfpenv);
530  SET_THROWOBJ_CATCH_POINT(errinfo, (VALUE)(dfpenv->env + dfpenv->local_size));
531  }
532  }
533  }
534  }
536  }
537 }
538 
539 void
541 {
542  rb_control_frame_t *cfp = th->cfp;
543  while ((cfp = rb_vm_get_ruby_level_next_cfp(th, cfp)) != 0) {
544  rb_vm_make_env_object(th, cfp);
546  }
547 }
548 
549 /* Proc */
550 
551 static VALUE
553 {
554  if (!block->proc) {
555  block->proc = rb_vm_make_proc(th, block, rb_cProc);
556  }
557  return block->proc;
558 }
559 
560 VALUE
561 rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
562 {
563  VALUE procval, envval, blockprocval = 0;
564  rb_proc_t *proc;
566 
567  if (block->proc) {
568  rb_bug("rb_vm_make_proc: Proc value is already created.");
569  }
570 
571  envval = vm_make_env_object(th, cfp, &blockprocval);
572 
573  if (PROCDEBUG) {
574  check_env_value(envval);
575  }
576  procval = rb_proc_alloc(klass);
577  GetProcPtr(procval, proc);
578  proc->blockprocval = blockprocval;
579  proc->block.self = block->self;
580  proc->block.lfp = block->lfp;
581  proc->block.dfp = block->dfp;
582  proc->block.iseq = block->iseq;
583  proc->block.proc = procval;
584  proc->envval = envval;
585  proc->safe_level = th->safe_level;
586 
587  if (VMDEBUG) {
588  if (th->stack < block->dfp && block->dfp < th->stack + th->stack_size) {
589  rb_bug("invalid ptr: block->dfp");
590  }
591  if (th->stack < block->lfp && block->lfp < th->stack + th->stack_size) {
592  rb_bug("invalid ptr: block->lfp");
593  }
594  }
595 
596  return procval;
597 }
598 
599 /* C -> Ruby: block */
600 
601 static inline VALUE
603  VALUE self, int argc, const VALUE *argv,
604  const rb_block_t *blockptr, const NODE *cref)
605 {
606  if (SPECIAL_CONST_P(block->iseq))
607  return Qnil;
608  else if (BUILTIN_TYPE(block->iseq) != T_NODE) {
609  const rb_iseq_t *iseq = block->iseq;
610  const rb_control_frame_t *cfp;
611  rb_control_frame_t *ncfp;
612  int i, opt_pc, arg_size = iseq->arg_size;
613  int type = block_proc_is_lambda(block->proc) ?
615 
617 
618  cfp = th->cfp;
619  CHECK_STACK_OVERFLOW(cfp, argc + iseq->stack_max);
620 
621  for (i=0; i<argc; i++) {
622  cfp->sp[i] = argv[i];
623  }
624 
625  opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr,
626  type == VM_FRAME_MAGIC_LAMBDA);
627 
628  ncfp = vm_push_frame(th, iseq, type,
629  self, GC_GUARDED_PTR(block->dfp),
630  iseq->iseq_encoded + opt_pc, cfp->sp + arg_size, block->lfp,
631  iseq->local_size - arg_size);
632  ncfp->me = th->passed_me;
633  th->passed_me = 0;
634  th->passed_block = blockptr;
635 
636  if (cref) {
637  th->cfp->dfp[-1] = (VALUE)cref;
638  }
639 
640  return vm_exec(th);
641  }
642  else {
643  return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
644  }
645 }
646 
647 static inline const rb_block_t *
649 {
650  const rb_block_t *blockptr = GC_GUARDED_PTR_REF(th->cfp->lfp[0]);
651 
652  if (blockptr == 0) {
653  rb_vm_localjump_error("no block given", Qnil, 0);
654  }
655 
656  return blockptr;
657 }
658 
659 static inline VALUE
660 vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const NODE *cref)
661 {
662  const rb_block_t *blockptr = check_block(th);
663  return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, cref);
664 }
665 
666 static inline VALUE
667 vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
668 {
669  const rb_block_t *blockptr = check_block(th);
670  return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, 0);
671 }
672 
673 VALUE
675  int argc, const VALUE *argv, const rb_block_t * blockptr)
676 {
677  VALUE val = Qundef;
678  int state;
679  volatile int stored_safe = th->safe_level;
680 
681  TH_PUSH_TAG(th);
682  if ((state = EXEC_TAG()) == 0) {
683  if (!proc->is_from_method) {
684  th->safe_level = proc->safe_level;
685  }
686  val = invoke_block_from_c(th, &proc->block, self, argc, argv, blockptr, 0);
687  }
688  TH_POP_TAG();
689 
690  if (!proc->is_from_method) {
691  th->safe_level = stored_safe;
692  }
693 
694  if (state) {
695  JUMP_TAG(state);
696  }
697  return val;
698 }
699 
700 /* special variable */
701 
702 static rb_control_frame_t *
704 {
705  while (cfp->pc == 0) {
708  return 0;
709  }
710  }
711  return cfp;
712 }
713 
714 static VALUE
716 {
717  cfp = vm_normal_frame(th, cfp);
718  return lfp_svar_get(th, cfp ? cfp->lfp : 0, key);
719 }
720 
721 static void
723 {
724  cfp = vm_normal_frame(th, cfp);
725  lfp_svar_set(th, cfp ? cfp->lfp : 0, key, val);
726 }
727 
728 static VALUE
730 {
731  rb_thread_t *th = GET_THREAD();
732  return vm_cfp_svar_get(th, th->cfp, key);
733 }
734 
735 static void
737 {
738  rb_thread_t *th = GET_THREAD();
739  vm_cfp_svar_set(th, th->cfp, key, val);
740 }
741 
742 VALUE
744 {
745  return vm_svar_get(1);
746 }
747 
748 void
750 {
751  vm_svar_set(1, val);
752 }
753 
754 VALUE
756 {
757  return vm_svar_get(0);
758 }
759 
760 void
762 {
763  vm_svar_set(0, val);
764 }
765 
766 /* backtrace */
767 
768 int
770 {
771  int line_no = 0;
772  const rb_iseq_t *iseq = cfp->iseq;
773 
774  if (RUBY_VM_NORMAL_ISEQ_P(iseq) && iseq->insn_info_size > 0) {
775  rb_num_t i;
776  size_t pos = cfp->pc - cfp->iseq->iseq_encoded;
777 
778  if (iseq->insn_info_table[0].position == pos) goto found;
779  for (i = 1; i < iseq->insn_info_size; i++) {
780  if (iseq->insn_info_table[i].position == pos) {
781  line_no = iseq->insn_info_table[i - 1].line_no;
782  goto found;
783  }
784  }
785  line_no = iseq->insn_info_table[i - 1].line_no;
786  }
787  found:
788  return line_no;
789 }
790 
791 static int
792 vm_backtrace_each(rb_thread_t *th, int lev, void (*init)(void *), rb_backtrace_iter_func *iter, void *arg)
793 {
794  const rb_control_frame_t *limit_cfp = th->cfp;
795  const rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
796  VALUE file = Qnil;
797  int line_no = 0;
798 
799  cfp -= 2;
800  while (lev-- >= 0) {
801  if (++limit_cfp > cfp) {
802  return FALSE;
803  }
804  }
805  if (init) (*init)(arg);
806  limit_cfp = RUBY_VM_NEXT_CONTROL_FRAME(limit_cfp);
807  if (th->vm->progname) file = th->vm->progname;
808  while (cfp > limit_cfp) {
809  if (cfp->iseq != 0) {
810  if (cfp->pc != 0) {
811  rb_iseq_t *iseq = cfp->iseq;
812 
813  line_no = rb_vm_get_sourceline(cfp);
814  file = iseq->filename;
815  if ((*iter)(arg, file, line_no, iseq->name)) break;
816  }
817  }
818  else if (RUBYVM_CFUNC_FRAME_P(cfp)) {
819  ID id;
820  extern VALUE ruby_engine_name;
821 
822  if (NIL_P(file)) file = ruby_engine_name;
823  if (cfp->me->def)
824  id = cfp->me->def->original_id;
825  else
826  id = cfp->me->called_id;
827  if (id != ID_ALLOCATOR && (*iter)(arg, file, line_no, rb_id2str(id)))
828  break;
829  }
830  cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp);
831  }
832  return TRUE;
833 }
834 
835 static void
837 {
838  VALUE *aryp = arg;
839  *aryp = rb_ary_new();
840 }
841 
842 static int
843 vm_backtrace_push(void *arg, VALUE file, int line_no, VALUE name)
844 {
845  VALUE *aryp = arg;
846  VALUE bt;
847 
848  if (line_no) {
849  bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:%d:in `%s'",
850  RSTRING_PTR(file), line_no, RSTRING_PTR(name));
851  }
852  else {
853  bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:in `%s'",
854  RSTRING_PTR(file), RSTRING_PTR(name));
855  }
856  rb_ary_push(*aryp, bt);
857  return 0;
858 }
859 
860 static inline VALUE
862 {
863  VALUE ary = 0;
864 
865  if (lev < 0) {
866  ary = rb_ary_new();
867  }
869  if (!ary) return Qnil;
870  return rb_ary_reverse(ary);
871 }
872 
873 const char *
875 {
876  rb_thread_t *th = GET_THREAD();
878 
879  if (cfp) {
880  return RSTRING_PTR(cfp->iseq->filename);
881  }
882  else {
883  return 0;
884  }
885 }
886 
887 int
889 {
890  rb_thread_t *th = GET_THREAD();
892 
893  if (cfp) {
894  return rb_vm_get_sourceline(cfp);
895  }
896  else {
897  return 0;
898  }
899 }
900 
901 NODE *
903 {
904  rb_thread_t *th = GET_THREAD();
906 
907  if (cfp == 0) {
908  rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
909  }
910  return vm_get_cref(cfp->iseq, cfp->lfp, cfp->dfp);
911 }
912 
913 #if 0
914 void
915 debug_cref(NODE *cref)
916 {
917  while (cref) {
918  dp(cref->nd_clss);
919  printf("%ld\n", cref->nd_visi);
920  cref = cref->nd_next;
921  }
922 }
923 #endif
924 
925 VALUE
927 {
928  rb_thread_t *th = GET_THREAD();
930 
931  if (cfp == 0) {
932  rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
933  }
934  return vm_get_cbase(cfp->iseq, cfp->lfp, cfp->dfp);
935 }
936 
937 /* jump */
938 
939 static VALUE
940 make_localjump_error(const char *mesg, VALUE value, int reason)
941 {
942  extern VALUE rb_eLocalJumpError;
943  VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
944  ID id;
945 
946  switch (reason) {
947  case TAG_BREAK:
948  CONST_ID(id, "break");
949  break;
950  case TAG_REDO:
951  CONST_ID(id, "redo");
952  break;
953  case TAG_RETRY:
954  CONST_ID(id, "retry");
955  break;
956  case TAG_NEXT:
957  CONST_ID(id, "next");
958  break;
959  case TAG_RETURN:
960  CONST_ID(id, "return");
961  break;
962  default:
963  CONST_ID(id, "noreason");
964  break;
965  }
966  rb_iv_set(exc, "@exit_value", value);
967  rb_iv_set(exc, "@reason", ID2SYM(id));
968  return exc;
969 }
970 
971 void
972 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
973 {
974  VALUE exc = make_localjump_error(mesg, value, reason);
975  rb_exc_raise(exc);
976 }
977 
978 VALUE
980 {
981  VALUE result = Qnil;
982 
983  if (val == Qundef) {
984  val = GET_THREAD()->tag->retval;
985  }
986  switch (state) {
987  case 0:
988  break;
989  case TAG_RETURN:
990  result = make_localjump_error("unexpected return", val, state);
991  break;
992  case TAG_BREAK:
993  result = make_localjump_error("unexpected break", val, state);
994  break;
995  case TAG_NEXT:
996  result = make_localjump_error("unexpected next", val, state);
997  break;
998  case TAG_REDO:
999  result = make_localjump_error("unexpected redo", Qnil, state);
1000  break;
1001  case TAG_RETRY:
1002  result = make_localjump_error("retry outside of rescue clause", Qnil, state);
1003  break;
1004  default:
1005  break;
1006  }
1007  return result;
1008 }
1009 
1010 void
1012 {
1013  if (val != Qnil) {
1014  VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, val);
1015  if (!NIL_P(exc)) rb_exc_raise(exc);
1016  }
1017  JUMP_TAG(state);
1018 }
1019 
1020 NORETURN(static void vm_iter_break(rb_thread_t *th));
1021 
1022 static void
1024 {
1025  rb_control_frame_t *cfp = th->cfp;
1026  VALUE *dfp = GC_GUARDED_PTR_REF(*cfp->dfp);
1027 
1028  th->state = TAG_BREAK;
1030  TH_JUMP_TAG(th, TAG_BREAK);
1031 }
1032 
1033 void
1035 {
1037 }
1038 
1039 /* optimization: redefine management */
1040 
1042 
1043 static void
1045 {
1046  st_data_t bop;
1047  if (!me->def || me->def->type == VM_METHOD_TYPE_CFUNC) {
1048  if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
1049  ruby_vm_redefined_flag[bop] = 1;
1050  }
1051  }
1052 }
1053 
1054 static void
1055 add_opt_method(VALUE klass, ID mid, VALUE bop)
1056 {
1057  rb_method_entry_t *me;
1058  if (st_lookup(RCLASS_M_TBL(klass), mid, (void *)&me) && me->def &&
1059  me->def->type == VM_METHOD_TYPE_CFUNC) {
1060  st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
1061  }
1062  else {
1063  rb_bug("undefined optimized method: %s", rb_id2name(mid));
1064  }
1065 }
1066 
1067 static void
1069 {
1070  ID mid;
1071  VALUE bop;
1072 
1073  vm_opt_method_table = st_init_numtable();
1074 
1075 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
1076 #define C(k) add_opt_method(rb_c##k, mid, bop)
1077  OP(PLUS, PLUS), (C(Fixnum), C(Float), C(String), C(Array));
1078  OP(MINUS, MINUS), (C(Fixnum));
1079  OP(MULT, MULT), (C(Fixnum), C(Float));
1080  OP(DIV, DIV), (C(Fixnum), C(Float));
1081  OP(MOD, MOD), (C(Fixnum), C(Float));
1082  OP(Eq, EQ), (C(Fixnum), C(Float), C(String));
1083  OP(Eqq, EQQ), (C(Fixnum), C(Bignum), C(Float), C(Symbol), C(String));
1084  OP(LT, LT), (C(Fixnum));
1085  OP(LE, LE), (C(Fixnum));
1086  OP(LTLT, LTLT), (C(String), C(Array));
1087  OP(AREF, AREF), (C(Array), C(Hash));
1088  OP(ASET, ASET), (C(Array), C(Hash));
1089  OP(Length, LENGTH), (C(Array), C(String), C(Hash));
1090  OP(Size, SIZE), (C(Array), C(String), C(Hash));
1091  OP(Succ, SUCC), (C(Fixnum), C(String), C(Time));
1092  OP(GT, GT), (C(Fixnum));
1093  OP(GE, GE), (C(Fixnum));
1094 #undef C
1095 #undef OP
1096 }
1097 
1098 /* for vm development */
1099 
1100 #if VMDEBUG
1101 static const char *
1102 vm_frametype_name(const rb_control_frame_t *cfp)
1103 {
1104  switch (VM_FRAME_TYPE(cfp)) {
1105  case VM_FRAME_MAGIC_METHOD: return "method";
1106  case VM_FRAME_MAGIC_BLOCK: return "block";
1107  case VM_FRAME_MAGIC_CLASS: return "class";
1108  case VM_FRAME_MAGIC_TOP: return "top";
1109  case VM_FRAME_MAGIC_FINISH: return "finish";
1110  case VM_FRAME_MAGIC_CFUNC: return "cfunc";
1111  case VM_FRAME_MAGIC_PROC: return "proc";
1112  case VM_FRAME_MAGIC_IFUNC: return "ifunc";
1113  case VM_FRAME_MAGIC_EVAL: return "eval";
1114  case VM_FRAME_MAGIC_LAMBDA: return "lambda";
1115  default:
1116  rb_bug("unknown frame");
1117  }
1118 }
1119 #endif
1120 
1121 /* evaluator body */
1122 
1123 /* finish
1124  VMe (h1) finish
1125  VM finish F1 F2
1126  cfunc finish F1 F2 C1
1127  rb_funcall finish F1 F2 C1
1128  VMe finish F1 F2 C1
1129  VM finish F1 F2 C1 F3
1130 
1131  F1 - F3 : pushed by VM
1132  C1 : pushed by send insn (CFUNC)
1133 
1134  struct CONTROL_FRAME {
1135  VALUE *pc; // cfp[0], program counter
1136  VALUE *sp; // cfp[1], stack pointer
1137  VALUE *bp; // cfp[2], base pointer
1138  rb_iseq_t *iseq; // cfp[3], iseq
1139  VALUE flag; // cfp[4], magic
1140  VALUE self; // cfp[5], self
1141  VALUE *lfp; // cfp[6], local frame pointer
1142  VALUE *dfp; // cfp[7], dynamic frame pointer
1143  rb_iseq_t * block_iseq; // cfp[8], block iseq
1144  VALUE proc; // cfp[9], always 0
1145  };
1146 
1147  struct BLOCK {
1148  VALUE self;
1149  VALUE *lfp;
1150  VALUE *dfp;
1151  rb_iseq_t *block_iseq;
1152  VALUE proc;
1153  };
1154 
1155  struct METHOD_CONTROL_FRAME {
1156  rb_control_frame_t frame;
1157  };
1158 
1159  struct METHOD_FRAME {
1160  VALUE arg0;
1161  ...
1162  VALUE argM;
1163  VALUE param0;
1164  ...
1165  VALUE paramN;
1166  VALUE cref;
1167  VALUE special; // lfp [1]
1168  struct block_object *block_ptr | 0x01; // lfp [0]
1169  };
1170 
1171  struct BLOCK_CONTROL_FRAME {
1172  rb_control_frame_t frame;
1173  };
1174 
1175  struct BLOCK_FRAME {
1176  VALUE arg0;
1177  ...
1178  VALUE argM;
1179  VALUE param0;
1180  ...
1181  VALUE paramN;
1182  VALUE cref;
1183  VALUE *(prev_ptr | 0x01); // DFP[0]
1184  };
1185 
1186  struct CLASS_CONTROL_FRAME {
1187  rb_control_frame_t frame;
1188  };
1189 
1190  struct CLASS_FRAME {
1191  VALUE param0;
1192  ...
1193  VALUE paramN;
1194  VALUE cref;
1195  VALUE prev_dfp; // for frame jump
1196  };
1197 
1198  struct C_METHOD_CONTROL_FRAME {
1199  VALUE *pc; // 0
1200  VALUE *sp; // stack pointer
1201  VALUE *bp; // base pointer (used in exception)
1202  rb_iseq_t *iseq; // cmi
1203  VALUE magic; // C_METHOD_FRAME
1204  VALUE self; // ?
1205  VALUE *lfp; // lfp
1206  VALUE *dfp; // == lfp
1207  rb_iseq_t * block_iseq; //
1208  VALUE proc; // always 0
1209  };
1210 
1211  struct C_BLOCK_CONTROL_FRAME {
1212  VALUE *pc; // point only "finish" insn
1213  VALUE *sp; // sp
1214  rb_iseq_t *iseq; // ?
1215  VALUE magic; // C_METHOD_FRAME
1216  VALUE self; // needed?
1217  VALUE *lfp; // lfp
1218  VALUE *dfp; // lfp
1219  rb_iseq_t * block_iseq; // 0
1220  };
1221  */
1222 
1223 
1224 static VALUE
1226 {
1227  int state;
1228  VALUE result, err;
1229  VALUE initial = 0;
1230  VALUE *escape_dfp = NULL;
1231 
1232  TH_PUSH_TAG(th);
1233  _tag.retval = Qnil;
1234  if ((state = EXEC_TAG()) == 0) {
1235  vm_loop_start:
1236  result = vm_exec_core(th, initial);
1237  if ((state = th->state) != 0) {
1238  err = result;
1239  th->state = 0;
1240  goto exception_handler;
1241  }
1242  }
1243  else {
1244  int i;
1245  struct iseq_catch_table_entry *entry;
1246  unsigned long epc, cont_pc, cont_sp;
1247  VALUE catch_iseqval;
1248  rb_control_frame_t *cfp;
1249  VALUE type;
1250 
1251  err = th->errinfo;
1252 
1253  exception_handler:
1254  cont_pc = cont_sp = catch_iseqval = 0;
1255 
1256  while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
1258  const rb_method_entry_t *me = th->cfp->me;
1260  }
1262  }
1263 
1264  cfp = th->cfp;
1265  epc = cfp->pc - cfp->iseq->iseq_encoded;
1266 
1267  if (state == TAG_BREAK || state == TAG_RETURN) {
1268  escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
1269 
1270  if (cfp->dfp == escape_dfp) {
1271  if (state == TAG_RETURN) {
1272  if ((cfp + 1)->pc != &finish_insn_seq[0]) {
1273  SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->dfp);
1274  SET_THROWOBJ_STATE(err, state = TAG_BREAK);
1275  }
1276  else {
1277  for (i = 0; i < cfp->iseq->catch_table_size; i++) {
1278  entry = &cfp->iseq->catch_table[i];
1279  if (entry->start < epc && entry->end >= epc) {
1280  if (entry->type == CATCH_TYPE_ENSURE) {
1281  catch_iseqval = entry->iseq;
1282  cont_pc = entry->cont;
1283  cont_sp = entry->sp;
1284  break;
1285  }
1286  }
1287  }
1288  if (!catch_iseqval) {
1289  result = GET_THROWOBJ_VAL(err);
1290  th->errinfo = Qnil;
1291  th->cfp += 2;
1292  goto finish_vme;
1293  }
1294  }
1295  /* through */
1296  }
1297  else {
1298  /* TAG_BREAK */
1299 #if OPT_STACK_CACHING
1300  initial = (GET_THROWOBJ_VAL(err));
1301 #else
1302  *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
1303 #endif
1304  th->errinfo = Qnil;
1305  goto vm_loop_start;
1306  }
1307  }
1308  }
1309 
1310  if (state == TAG_RAISE) {
1311  for (i = 0; i < cfp->iseq->catch_table_size; i++) {
1312  entry = &cfp->iseq->catch_table[i];
1313  if (entry->start < epc && entry->end >= epc) {
1314 
1315  if (entry->type == CATCH_TYPE_RESCUE ||
1316  entry->type == CATCH_TYPE_ENSURE) {
1317  catch_iseqval = entry->iseq;
1318  cont_pc = entry->cont;
1319  cont_sp = entry->sp;
1320  break;
1321  }
1322  }
1323  }
1324  }
1325  else if (state == TAG_RETRY) {
1326  for (i = 0; i < cfp->iseq->catch_table_size; i++) {
1327  entry = &cfp->iseq->catch_table[i];
1328  if (entry->start < epc && entry->end >= epc) {
1329 
1330  if (entry->type == CATCH_TYPE_ENSURE) {
1331  catch_iseqval = entry->iseq;
1332  cont_pc = entry->cont;
1333  cont_sp = entry->sp;
1334  break;
1335  }
1336  else if (entry->type == CATCH_TYPE_RETRY) {
1337  VALUE *escape_dfp;
1338  escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
1339  if (cfp->dfp == escape_dfp) {
1340  cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
1341  th->errinfo = Qnil;
1342  goto vm_loop_start;
1343  }
1344  }
1345  }
1346  }
1347  }
1348  else if (state == TAG_BREAK && ((VALUE)escape_dfp & ~0x03) == 0) {
1349  type = CATCH_TYPE_BREAK;
1350 
1351  search_restart_point:
1352  for (i = 0; i < cfp->iseq->catch_table_size; i++) {
1353  entry = &cfp->iseq->catch_table[i];
1354 
1355  if (entry->start < epc && entry->end >= epc) {
1356  if (entry->type == CATCH_TYPE_ENSURE) {
1357  catch_iseqval = entry->iseq;
1358  cont_pc = entry->cont;
1359  cont_sp = entry->sp;
1360  break;
1361  }
1362  else if (entry->type == type) {
1363  cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
1364  cfp->sp = cfp->bp + entry->sp;
1365 
1366  if (state != TAG_REDO) {
1367 #if OPT_STACK_CACHING
1368  initial = (GET_THROWOBJ_VAL(err));
1369 #else
1370  *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
1371 #endif
1372  }
1373  th->state = 0;
1374  th->errinfo = Qnil;
1375  goto vm_loop_start;
1376  }
1377  }
1378  }
1379  }
1380  else if (state == TAG_REDO) {
1381  type = CATCH_TYPE_REDO;
1382  goto search_restart_point;
1383  }
1384  else if (state == TAG_NEXT) {
1385  type = CATCH_TYPE_NEXT;
1386  goto search_restart_point;
1387  }
1388  else {
1389  for (i = 0; i < cfp->iseq->catch_table_size; i++) {
1390  entry = &cfp->iseq->catch_table[i];
1391  if (entry->start < epc && entry->end >= epc) {
1392 
1393  if (entry->type == CATCH_TYPE_ENSURE) {
1394  catch_iseqval = entry->iseq;
1395  cont_pc = entry->cont;
1396  cont_sp = entry->sp;
1397  break;
1398  }
1399  }
1400  }
1401  }
1402 
1403  if (catch_iseqval != 0) {
1404  /* found catch table */
1405  rb_iseq_t *catch_iseq;
1406 
1407  /* enter catch scope */
1408  GetISeqPtr(catch_iseqval, catch_iseq);
1409  cfp->sp = cfp->bp + cont_sp;
1410  cfp->pc = cfp->iseq->iseq_encoded + cont_pc;
1411 
1412  /* push block frame */
1413  cfp->sp[0] = err;
1414  vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_BLOCK,
1415  cfp->self, (VALUE)cfp->dfp, catch_iseq->iseq_encoded,
1416  cfp->sp + 1 /* push value */, cfp->lfp, catch_iseq->local_size - 1);
1417 
1418  state = 0;
1419  th->state = 0;
1420  th->errinfo = Qnil;
1421  goto vm_loop_start;
1422  }
1423  else {
1424  /* skip frame */
1425 
1426  switch (VM_FRAME_TYPE(th->cfp)) {
1427  case VM_FRAME_MAGIC_METHOD:
1429  break;
1430  case VM_FRAME_MAGIC_CLASS:
1432  break;
1433  }
1434 
1436 
1437  if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_FINISH) {
1438  goto exception_handler;
1439  }
1440  else {
1441  vm_pop_frame(th);
1442  th->errinfo = err;
1443  TH_POP_TAG2();
1444  JUMP_TAG(state);
1445  }
1446  }
1447  }
1448  finish_vme:
1449  TH_POP_TAG();
1450  return result;
1451 }
1452 
1453 /* misc */
1454 
1455 VALUE
1457 {
1458  rb_thread_t *th = GET_THREAD();
1459  VALUE val;
1460  volatile VALUE tmp;
1461 
1462  vm_set_top_stack(th, iseqval);
1463 
1464  val = vm_exec(th);
1465  tmp = iseqval; /* prohibit tail call optimization */
1466  return val;
1467 }
1468 
1469 VALUE
1471 {
1472  rb_thread_t *th = GET_THREAD();
1473  VALUE val;
1474  volatile VALUE tmp;
1475 
1476  vm_set_main_stack(th, iseqval);
1477 
1478  val = vm_exec(th);
1479  tmp = iseqval; /* prohibit tail call optimization */
1480  return val;
1481 }
1482 
1483 int
1485  ID *idp, VALUE *klassp)
1486 {
1487  rb_control_frame_t *cfp = th->cfp;
1488  rb_iseq_t *iseq = cfp->iseq;
1489  if (!iseq && cfp->me) {
1490  if (idp) *idp = cfp->me->def->original_id;
1491  if (klassp) *klassp = cfp->me->klass;
1492  return 1;
1493  }
1494  while (iseq) {
1495  if (RUBY_VM_IFUNC_P(iseq)) {
1496  if (idp) CONST_ID(*idp, "<ifunc>");
1497  if (klassp) *klassp = 0;
1498  return 1;
1499  }
1500  if (iseq->defined_method_id) {
1501  if (idp) *idp = iseq->defined_method_id;
1502  if (klassp) *klassp = iseq->klass;
1503  return 1;
1504  }
1505  if (iseq->local_iseq == iseq) {
1506  break;
1507  }
1508  iseq = iseq->parent_iseq;
1509  }
1510  return 0;
1511 }
1512 
1513 int
1515 {
1516  return rb_thread_method_id_and_class(GET_THREAD(), idp, klassp);
1517 }
1518 
1519 VALUE
1521 {
1522  const rb_control_frame_t *cfp = th->cfp;
1523  VALUE str = Qnil;
1524 
1525  if (cfp->iseq != 0) {
1526  if (cfp->pc != 0) {
1527  rb_iseq_t *iseq = cfp->iseq;
1528  int line_no = rb_vm_get_sourceline(cfp);
1529  char *file = RSTRING_PTR(iseq->filename);
1530  str = rb_sprintf("%s:%d:in `%s'",
1531  file, line_no, RSTRING_PTR(iseq->name));
1532  }
1533  }
1534  else if (cfp->me->def->original_id) {
1535  str = rb_sprintf("`%s#%s' (cfunc)",
1536  rb_class2name(cfp->me->klass),
1537  rb_id2name(cfp->me->def->original_id));
1538  }
1539 
1540  return str;
1541 }
1542 
1543 VALUE
1545  const rb_block_t *blockptr, VALUE filename)
1546 {
1547  rb_thread_t *th = GET_THREAD();
1548  const rb_control_frame_t *reg_cfp = th->cfp;
1549  volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
1550  VALUE val;
1551 
1553  recv, (VALUE)blockptr, 0, reg_cfp->sp, 0, 1);
1554 
1555  val = (*func)(arg);
1556 
1557  vm_pop_frame(th);
1558  return val;
1559 }
1560 
1561 /* vm */
1562 
1563 static int
1565 {
1566  VALUE thval = (VALUE)key;
1567  rb_gc_mark(thval);
1568  return ST_CONTINUE;
1569 }
1570 
1571 static void
1573 {
1574  while (hook) {
1575  rb_gc_mark(hook->data);
1576  hook = hook->next;
1577  }
1578 }
1579 
1580 void
1581 rb_vm_mark(void *ptr)
1582 {
1583  int i;
1584 
1585  RUBY_MARK_ENTER("vm");
1586  RUBY_GC_INFO("-------------------------------------------------\n");
1587  if (ptr) {
1588  rb_vm_t *vm = ptr;
1589  if (vm->living_threads) {
1591  }
1599 
1600  if (vm->loading_table) {
1602  }
1603 
1605 
1606  for (i = 0; i < RUBY_NSIG; i++) {
1607  if (vm->trap_list[i].cmd)
1608  rb_gc_mark(vm->trap_list[i].cmd);
1609  }
1610  }
1611 
1612  RUBY_MARK_LEAVE("vm");
1613 }
1614 
1615 #define vm_free 0
1616 
1617 int
1619 {
1620  RUBY_FREE_ENTER("vm");
1621  if (vm) {
1622  rb_thread_t *th = vm->main_thread;
1623 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
1624  struct rb_objspace *objspace = vm->objspace;
1625 #endif
1627  vm->main_thread = 0;
1628  if (th) {
1630  thread_free(th);
1631  }
1632  if (vm->living_threads) {
1634  vm->living_threads = 0;
1635  }
1636 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
1637  if (objspace) {
1638  rb_objspace_free(objspace);
1639  }
1640 #endif
1642  rb_vm_gvl_destroy(vm);
1643  ruby_xfree(vm);
1644  ruby_current_vm = 0;
1645  }
1646  RUBY_FREE_LEAVE("vm");
1647  return 0;
1648 }
1649 
1650 static size_t
1651 vm_memsize(const void *ptr)
1652 {
1653  if (ptr) {
1654  const rb_vm_t *vmobj = ptr;
1655  return sizeof(rb_vm_t) + st_memsize(vmobj->living_threads);
1656  }
1657  else {
1658  return 0;
1659  }
1660 }
1661 
1663  "VM",
1665 };
1666 
1667 static void
1669 {
1670  MEMZERO(vm, rb_vm_t, 1);
1671  vm->src_encoding_index = -1;
1672  vm->at_exit.basic.flags = (T_ARRAY | RARRAY_EMBED_FLAG) & ~RARRAY_EMBED_LEN_MASK; /* len set 0 */
1673  vm->at_exit.basic.klass = 0;
1674 }
1675 
1676 /* Thread */
1677 
1678 #define USE_THREAD_DATA_RECYCLE 1
1679 
1680 #if USE_THREAD_DATA_RECYCLE
1681 #define RECYCLE_MAX 64
1684 
1685 static VALUE *
1687 {
1688  if (thread_recycle_stack_count) {
1689  return thread_recycle_stack_slot[--thread_recycle_stack_count];
1690  }
1691  else {
1692  return ALLOC_N(VALUE, size);
1693  }
1694 }
1695 
1696 #else
1697 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
1698 #endif
1699 
1700 void
1702 {
1703 #if USE_THREAD_DATA_RECYCLE
1704  if (thread_recycle_stack_count < RECYCLE_MAX) {
1705  thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
1706  return;
1707  }
1708 #endif
1709  ruby_xfree(stack);
1710 }
1711 
1712 #ifdef USE_THREAD_RECYCLE
1713 static rb_thread_t *
1714 thread_recycle_struct(void)
1715 {
1716  void *p = ALLOC_N(rb_thread_t, 1);
1717  memset(p, 0, sizeof(rb_thread_t));
1718  return p;
1719 }
1720 #endif
1721 
1722 void
1724 {
1725  rb_thread_t *th = NULL;
1726  RUBY_MARK_ENTER("thread");
1727  if (ptr) {
1728  th = ptr;
1729  if (th->stack) {
1730  VALUE *p = th->stack;
1731  VALUE *sp = th->cfp->sp;
1732  rb_control_frame_t *cfp = th->cfp;
1733  rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
1734 
1735  while (p < sp) {
1736  rb_gc_mark(*p++);
1737  }
1739 
1740  while (cfp != limit_cfp) {
1741  rb_iseq_t *iseq = cfp->iseq;
1742  rb_gc_mark(cfp->proc);
1743  rb_gc_mark(cfp->self);
1744  if (iseq) {
1745  rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
1746  }
1747  if (cfp->me) {
1748  /* TODO: marking `me' can be more sophisticated way */
1749  ((rb_method_entry_t *)cfp->me)->mark = 1;
1750  rb_mark_method_entry(cfp->me);
1751  }
1752  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1753  }
1754  }
1755 
1756  /* mark ruby objects */
1759 
1771 
1773 
1775 
1776  if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
1779  (VALUE *)(&th->machine_regs) +
1780  sizeof(th->machine_regs) / sizeof(VALUE));
1781  }
1782 
1784  }
1785 
1786  RUBY_MARK_LEAVE("thread");
1787 }
1788 
1789 static void
1791 {
1792  rb_thread_t *th;
1793  RUBY_FREE_ENTER("thread");
1794 
1795  if (ptr) {
1796  th = ptr;
1797 
1798  if (!th->root_fiber) {
1800  }
1801 
1802  if (th->locking_mutex != Qfalse) {
1803  rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
1804  }
1805  if (th->keeping_mutexes != NULL) {
1806  rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
1807  }
1808 
1809  if (th->local_storage) {
1811  }
1812 
1813  if (th->vm && th->vm->main_thread == th) {
1814  RUBY_GC_INFO("main thread\n");
1815  }
1816  else {
1817 #ifdef USE_SIGALTSTACK
1818  if (th->altstack) {
1819  free(th->altstack);
1820  }
1821 #endif
1822  ruby_xfree(ptr);
1823  }
1824  if (ruby_current_thread == th)
1825  ruby_current_thread = NULL;
1826  }
1827  RUBY_FREE_LEAVE("thread");
1828 }
1829 
1830 static size_t
1831 thread_memsize(const void *ptr)
1832 {
1833  if (ptr) {
1834  const rb_thread_t *th = ptr;
1835  size_t size = sizeof(rb_thread_t);
1836 
1837  if (!th->root_fiber) {
1838  size += th->stack_size * sizeof(VALUE);
1839  }
1840  if (th->local_storage) {
1841  size += st_memsize(th->local_storage);
1842  }
1843  return size;
1844  }
1845  else {
1846  return 0;
1847  }
1848 }
1849 
1850 #define thread_data_type ruby_threadptr_data_type
1852  "VM/thread",
1853  {
1855  thread_free,
1857  },
1858 };
1859 
1860 VALUE
1862 {
1864  return Qtrue;
1865  }
1866  else {
1867  return Qfalse;
1868  }
1869 }
1870 
1871 static VALUE
1873 {
1874  VALUE volatile obj;
1875 #ifdef USE_THREAD_RECYCLE
1876  rb_thread_t *th = thread_recycle_struct();
1877  obj = TypedData_Wrap_Struct(klass, &thread_data_type, th);
1878 #else
1879  rb_thread_t *th;
1881 #endif
1882  return obj;
1883 }
1884 
1885 static void
1887 {
1888  th->self = self;
1889 
1890  /* allocate thread stack */
1891 #ifdef USE_SIGALTSTACK
1892  /* altstack of main thread is reallocated in another place */
1893  th->altstack = malloc(ALT_STACK_SIZE);
1894 #endif
1897 
1898  th->cfp = (void *)(th->stack + th->stack_size);
1899 
1900  vm_push_frame(th, 0, VM_FRAME_MAGIC_TOP, Qnil, 0, 0,
1901  th->stack, 0, 1);
1902 
1903  th->status = THREAD_RUNNABLE;
1904  th->errinfo = Qnil;
1905  th->last_status = Qnil;
1906  th->waiting_fd = -1;
1907 }
1908 
1909 static VALUE
1911 {
1912  rb_thread_t *th;
1913  rb_vm_t *vm = GET_THREAD()->vm;
1914  GetThreadPtr(self, th);
1915 
1916  th_init(th, self);
1917  th->vm = vm;
1918 
1919  th->top_wrapper = 0;
1920  th->top_self = rb_vm_top_self();
1921  return self;
1922 }
1923 
1924 VALUE
1926 {
1927  VALUE self = thread_alloc(klass);
1928  ruby_thread_init(self);
1929  return self;
1930 }
1931 
1932 static void
1934  rb_num_t is_singleton, NODE *cref)
1935 {
1936  VALUE klass = cref->nd_clss;
1937  int noex = (int)cref->nd_visi;
1938  rb_iseq_t *miseq;
1939  GetISeqPtr(iseqval, miseq);
1940 
1941  if (miseq->klass) {
1942  iseqval = rb_iseq_clone(iseqval, 0);
1943  RB_GC_GUARD(iseqval);
1944  GetISeqPtr(iseqval, miseq);
1945  }
1946 
1947  if (NIL_P(klass)) {
1948  rb_raise(rb_eTypeError, "no class/module to add method");
1949  }
1950 
1951  if (is_singleton) {
1952  if (FIXNUM_P(obj) || SYMBOL_P(obj)) {
1954  "can't define singleton method \"%s\" for %s",
1955  rb_id2name(id), rb_obj_classname(obj));
1956  }
1957 
1958  rb_check_frozen(obj);
1959  klass = rb_singleton_class(obj);
1960  noex = NOEX_PUBLIC;
1961  }
1962 
1963  /* dup */
1964  COPY_CREF(miseq->cref_stack, cref);
1965  miseq->cref_stack->nd_visi = NOEX_PUBLIC;
1966  miseq->klass = klass;
1967  miseq->defined_method_id = id;
1968  rb_add_method(klass, id, VM_METHOD_TYPE_ISEQ, miseq, noex);
1969 
1970  if (!is_singleton && noex == NOEX_MODFUNC) {
1972  }
1974 }
1975 
1976 #define REWIND_CFP(expr) do { \
1977  rb_thread_t *th__ = GET_THREAD(); \
1978  th__->cfp++; expr; th__->cfp--; \
1979 } while (0)
1980 
1981 static VALUE
1983 {
1984  REWIND_CFP({
1985  vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 0, rb_vm_cref());
1986  });
1987  return Qnil;
1988 }
1989 
1990 static VALUE
1992 {
1993  REWIND_CFP({
1994  vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 1, rb_vm_cref());
1995  });
1996  return Qnil;
1997 }
1998 
1999 static VALUE
2001 {
2002  REWIND_CFP({
2003  rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
2004  });
2005  return Qnil;
2006 }
2007 
2008 static VALUE
2010 {
2011  REWIND_CFP({
2012  rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
2013  });
2014  return Qnil;
2015 }
2016 
2017 static VALUE
2019 {
2020  REWIND_CFP({
2021  rb_undef(cbase, SYM2ID(sym));
2023  });
2024  return Qnil;
2025 }
2026 
2027 static VALUE
2029 {
2030  REWIND_CFP({
2031  rb_iseq_t *blockiseq;
2032  rb_block_t *blockptr;
2033  rb_thread_t *th = GET_THREAD();
2035  VALUE proc;
2036 
2037  if (cfp == 0) {
2038  rb_bug("m_core_set_postexe: unreachable");
2039  }
2040 
2041  GetISeqPtr(iseqval, blockiseq);
2042 
2043  blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
2044  blockptr->iseq = blockiseq;
2045  blockptr->proc = 0;
2046 
2047  proc = rb_vm_make_proc(th, blockptr, rb_cProc);
2049  });
2050  return Qnil;
2051 }
2052 
2053 extern VALUE *rb_gc_stack_start;
2054 extern size_t rb_gc_stack_maxsize;
2055 #ifdef __ia64
2056 extern VALUE *rb_gc_register_stack_start;
2057 #endif
2058 
2059 /* debug functions */
2060 
2061 /* :nodoc: */
2062 static VALUE
2063 sdr(void)
2064 {
2065  rb_vm_bugreport();
2066  return Qnil;
2067 }
2068 
2069 /* :nodoc: */
2070 static VALUE
2071 nsdr(void)
2072 {
2073  VALUE ary = rb_ary_new();
2074 #if HAVE_BACKTRACE
2075 #include <execinfo.h>
2076 #define MAX_NATIVE_TRACE 1024
2077  static void *trace[MAX_NATIVE_TRACE];
2078  int n = backtrace(trace, MAX_NATIVE_TRACE);
2079  char **syms = backtrace_symbols(trace, n);
2080  int i;
2081 
2082  if (syms == 0) {
2083  rb_memerror();
2084  }
2085 
2086  for (i=0; i<n; i++) {
2087  rb_ary_push(ary, rb_str_new2(syms[i]));
2088  }
2089  free(syms); /* OK */
2090 #endif
2091  return ary;
2092 }
2093 
2094 void
2095 Init_VM(void)
2096 {
2097  VALUE opts;
2098  VALUE klass;
2099  VALUE fcore;
2100 
2101  /* ::VM */
2102  rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
2105 
2106  /* ::VM::FrozenCore */
2107  fcore = rb_class_new(rb_cBasicObject);
2108  RBASIC(fcore)->flags = T_ICLASS;
2109  klass = rb_singleton_class(fcore);
2116  rb_define_method_id(klass, idProc, rb_block_proc, 0);
2117  rb_define_method_id(klass, idLambda, rb_block_lambda, 0);
2118  rb_obj_freeze(fcore);
2120  rb_mRubyVMFrozenCore = fcore;
2121 
2122  /* ::VM::Env */
2125  rb_undef_method(CLASS_OF(rb_cEnv), "new");
2126 
2127  /* ::Thread */
2128  rb_cThread = rb_define_class("Thread", rb_cObject);
2130 
2131  /* ::VM::USAGE_ANALYSIS_* */
2132  rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN", rb_hash_new());
2133  rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_REGS", rb_hash_new());
2134  rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
2135  rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
2136 
2137 #if OPT_DIRECT_THREADED_CODE
2138  rb_ary_push(opts, rb_str_new2("direct threaded code"));
2139 #elif OPT_TOKEN_THREADED_CODE
2140  rb_ary_push(opts, rb_str_new2("token threaded code"));
2141 #elif OPT_CALL_THREADED_CODE
2142  rb_ary_push(opts, rb_str_new2("call threaded code"));
2143 #endif
2144 
2145 #if OPT_STACK_CACHING
2146  rb_ary_push(opts, rb_str_new2("stack caching"));
2147 #endif
2148 #if OPT_OPERANDS_UNIFICATION
2149  rb_ary_push(opts, rb_str_new2("operands unification]"));
2150 #endif
2151 #if OPT_INSTRUCTIONS_UNIFICATION
2152  rb_ary_push(opts, rb_str_new2("instructions unification"));
2153 #endif
2154 #if OPT_INLINE_METHOD_CACHE
2155  rb_ary_push(opts, rb_str_new2("inline method cache"));
2156 #endif
2157 #if OPT_BLOCKINLINING
2158  rb_ary_push(opts, rb_str_new2("block inlining"));
2159 #endif
2160 
2161  /* ::VM::InsnNameArray */
2162  rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
2163 
2164  /* debug functions ::VM::SDR(), ::VM::NSDR() */
2165 #if VMDEBUG
2168 #else
2169  (void)sdr;
2170  (void)nsdr;
2171 #endif
2172 
2173  /* VM bootstrap: phase 2 */
2174  {
2175  rb_vm_t *vm = ruby_current_vm;
2176  rb_thread_t *th = GET_THREAD();
2177  VALUE filename = rb_str_new2("<main>");
2178  volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
2179  volatile VALUE th_self;
2180  rb_iseq_t *iseq;
2181 
2182  /* create vm object */
2183  vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
2184 
2185  /* create main thread */
2186  th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
2187  vm->main_thread = th;
2188  vm->running_thread = th;
2189  th->vm = vm;
2190  th->top_wrapper = 0;
2191  th->top_self = rb_vm_top_self();
2193 
2195  st_insert(vm->living_threads, th_self, (st_data_t) th->thread_id);
2196 
2197  rb_gc_register_mark_object(iseqval);
2198  GetISeqPtr(iseqval, iseq);
2199  th->cfp->iseq = iseq;
2200  th->cfp->pc = iseq->iseq_encoded;
2201  th->cfp->self = th->top_self;
2202 
2203  /*
2204  * The Binding of the top level scope
2205  */
2206  rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
2207  }
2209 }
2210 
2211 void
2213 {
2214  rb_thread_t *th = GET_VM()->main_thread;
2215  rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
2216  --cfp;
2217  cfp->iseq->filename = filename;
2218 }
2219 
2220 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
2221 struct rb_objspace *rb_objspace_alloc(void);
2222 #endif
2223 
2224 void
2226 {
2227  /* VM bootstrap: phase 1 */
2228  rb_vm_t * vm = malloc(sizeof(*vm));
2229  rb_thread_t * th = malloc(sizeof(*th));
2230  if (!vm || !th) {
2231  fprintf(stderr, "[FATAL] failed to allocate memory\n");
2232  exit(EXIT_FAILURE);
2233  }
2234  MEMZERO(th, rb_thread_t, 1);
2235 
2237 
2238  vm_init2(vm);
2239 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
2240  vm->objspace = rb_objspace_alloc();
2241 #endif
2242  ruby_current_vm = vm;
2243 
2245  th_init(th, 0);
2246  th->vm = vm;
2248 }
2249 
2250 /* top self */
2251 
2252 static VALUE
2254 {
2255  return rb_str_new2("main");
2256 }
2257 
2258 VALUE
2260 {
2261  return GET_VM()->top_self;
2262 }
2263 
2264 void
2266 {
2267  rb_vm_t *vm = GET_VM();
2268 
2271 
2272  /* initialize mark object array */
2274 }
2275 
2276 VALUE *
2278 {
2279  return &vm->verbose;
2280 }
2281 
2282 VALUE *
2284 {
2285  return &vm->debug;
2286 }
2287 
2288 VALUE *
2290 {
2291  return ruby_vm_verbose_ptr(GET_VM());
2292 }
2293 
2294 VALUE *
2296 {
2297  return ruby_vm_debug_ptr(GET_VM());
2298 }
2299