Ruby  3.1.4p223 (2023-03-30 revision HEAD)
vm.c
1 /**********************************************************************
2 
3  Vm.c -
4 
5  $Author$
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 #define vm_exec rb_vm_exec
12 
13 #include "eval_intern.h"
14 #include "gc.h"
15 #include "internal.h"
16 #include "internal/compile.h"
17 #include "internal/cont.h"
18 #include "internal/error.h"
19 #include "internal/eval.h"
20 #include "internal/inits.h"
21 #include "internal/object.h"
22 #include "internal/parse.h"
23 #include "internal/proc.h"
24 #include "internal/re.h"
25 #include "internal/symbol.h"
26 #include "internal/thread.h"
27 #include "internal/vm.h"
28 #include "internal/sanitizers.h"
29 #include "iseq.h"
30 #include "mjit.h"
31 #include "yjit.h"
32 #include "ruby/st.h"
33 #include "ruby/vm.h"
34 #include "vm_core.h"
35 #include "vm_callinfo.h"
36 #include "vm_debug.h"
37 #include "vm_exec.h"
38 #include "vm_insnhelper.h"
39 #include "ractor_core.h"
40 #include "vm_sync.h"
41 
42 #include "builtin.h"
43 
44 #ifndef MJIT_HEADER
45 #include "probes.h"
46 #else
47 #include "probes.dmyh"
48 #endif
49 #include "probes_helper.h"
50 
51 VALUE rb_str_concat_literals(size_t, const VALUE*);
52 
53 /* :FIXME: This #ifdef is because we build pch in case of mswin and
54  * not in case of other situations. That distinction might change in
55  * a future. We would better make it detectable in something better
56  * than just _MSC_VER. */
57 #ifdef _MSC_VER
58 RUBY_FUNC_EXPORTED
59 #else
60 MJIT_FUNC_EXPORTED
61 #endif
62 VALUE vm_exec(rb_execution_context_t *, bool);
63 
64 PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *));
65 static inline const VALUE *
66 VM_EP_LEP(const VALUE *ep)
67 {
68  while (!VM_ENV_LOCAL_P(ep)) {
69  ep = VM_ENV_PREV_EP(ep);
70  }
71  return ep;
72 }
73 
74 static inline const rb_control_frame_t *
75 rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE * const ep)
76 {
77  if (!ep) {
78  return NULL;
79  }
80  else {
81  const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
82 
83  while (cfp < eocfp) {
84  if (cfp->ep == ep) {
85  return cfp;
86  }
87  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
88  }
89 
90  return NULL;
91  }
92 }
93 
94 const VALUE *
95 rb_vm_ep_local_ep(const VALUE *ep)
96 {
97  return VM_EP_LEP(ep);
98 }
99 
100 PUREFUNC(static inline const VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
101 static inline const VALUE *
102 VM_CF_LEP(const rb_control_frame_t * const cfp)
103 {
104  return VM_EP_LEP(cfp->ep);
105 }
106 
107 static inline const VALUE *
108 VM_CF_PREV_EP(const rb_control_frame_t * const cfp)
109 {
110  return VM_ENV_PREV_EP(cfp->ep);
111 }
112 
113 PUREFUNC(static inline VALUE VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp));
114 static inline VALUE
115 VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)
116 {
117  const VALUE *ep = VM_CF_LEP(cfp);
118  return VM_ENV_BLOCK_HANDLER(ep);
119 }
120 
121 int
122 rb_vm_cframe_keyword_p(const rb_control_frame_t *cfp)
123 {
124  return VM_FRAME_CFRAME_KW_P(cfp);
125 }
126 
127 VALUE
128 rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
129 {
130  return VM_CF_BLOCK_HANDLER(cfp);
131 }
132 
133 #if VM_CHECK_MODE > 0
134 static int
135 VM_CFP_IN_HEAP_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
136 {
137  const VALUE *start = ec->vm_stack;
138  const VALUE *end = (VALUE *)ec->vm_stack + ec->vm_stack_size;
139  VM_ASSERT(start != NULL);
140 
141  if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
142  return FALSE;
143  }
144  else {
145  return TRUE;
146  }
147 }
148 
149 static int
150 VM_EP_IN_HEAP_P(const rb_execution_context_t *ec, const VALUE *ep)
151 {
152  const VALUE *start = ec->vm_stack;
153  const VALUE *end = (VALUE *)ec->cfp;
154  VM_ASSERT(start != NULL);
155 
156  if (start <= ep && ep < end) {
157  return FALSE;
158  }
159  else {
160  return TRUE;
161  }
162 }
163 
164 static int
165 vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep)
166 {
167  if (VM_EP_IN_HEAP_P(ec, ep)) {
168  VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
169 
170  if (envval != Qundef) {
171  const rb_env_t *env = (const rb_env_t *)envval;
172 
173  VM_ASSERT(vm_assert_env(envval));
174  VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
175  VM_ASSERT(env->ep == ep);
176  }
177  return TRUE;
178  }
179  else {
180  return FALSE;
181  }
182 }
183 
184 int
185 rb_vm_ep_in_heap_p(const VALUE *ep)
186 {
187  const rb_execution_context_t *ec = GET_EC();
188  if (ec->vm_stack == NULL) return TRUE;
189  return vm_ep_in_heap_p_(ec, ep);
190 }
191 #endif
192 
193 static struct rb_captured_block *
194 VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t *cfp)
195 {
196  VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
197  return (struct rb_captured_block *)&cfp->self;
198 }
199 
200 static rb_control_frame_t *
201 VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
202 {
203  rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
204  VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
205  VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 8 + VM_DEBUG_BP_CHECK ? 1 : 0);
206  return cfp;
207 }
208 
209 static int
210 VM_BH_FROM_CFP_P(VALUE block_handler, const rb_control_frame_t *cfp)
211 {
212  const struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
213  return VM_TAGGED_PTR_REF(block_handler, 0x03) == captured;
214 }
215 
216 static VALUE
217 vm_passed_block_handler(rb_execution_context_t *ec)
218 {
219  VALUE block_handler = ec->passed_block_handler;
220  ec->passed_block_handler = VM_BLOCK_HANDLER_NONE;
221  vm_block_handler_verify(block_handler);
222  return block_handler;
223 }
224 
225 static rb_cref_t *
226 vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int use_prev_prev, int singleton)
227 {
228  VALUE refinements = Qnil;
229  int omod_shared = FALSE;
230  rb_cref_t *cref;
231 
232  /* scope */
233  union {
235  VALUE value;
236  } scope_visi;
237 
238  scope_visi.visi.method_visi = visi;
239  scope_visi.visi.module_func = module_func;
240 
241  /* refinements */
242  if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) {
243  refinements = CREF_REFINEMENTS(prev_cref);
244 
245  if (!NIL_P(refinements)) {
246  omod_shared = TRUE;
247  CREF_OMOD_SHARED_SET(prev_cref);
248  }
249  }
250 
251  VM_ASSERT(singleton || klass);
252 
253  cref = (rb_cref_t *)rb_imemo_new(imemo_cref, klass, (VALUE)(use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref), scope_visi.value, refinements);
254 
255  if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
256  if (omod_shared) CREF_OMOD_SHARED_SET(cref);
257  if (singleton) CREF_SINGLETON_SET(cref);
258 
259  return cref;
260 }
261 
262 static rb_cref_t *
263 vm_cref_new(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int singleton)
264 {
265  return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, FALSE, singleton);
266 }
267 
268 static rb_cref_t *
269 vm_cref_new_use_prev(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
270 {
271  return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, TRUE, FALSE);
272 }
273 
274 static int
275 ref_delete_symkey(VALUE key, VALUE value, VALUE unused)
276 {
277  return SYMBOL_P(key) ? ST_DELETE : ST_CONTINUE;
278 }
279 
280 static rb_cref_t *
281 vm_cref_dup(const rb_cref_t *cref)
282 {
283  const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
284  rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
285  int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
286  int singleton = CREF_SINGLETON(cref);
287 
288  new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
289 
290  if (!NIL_P(CREF_REFINEMENTS(cref))) {
291  VALUE ref = rb_hash_dup(CREF_REFINEMENTS(cref));
292  rb_hash_foreach(ref, ref_delete_symkey, Qnil);
293  CREF_REFINEMENTS_SET(new_cref, ref);
294  CREF_OMOD_SHARED_UNSET(new_cref);
295  }
296 
297  return new_cref;
298 }
299 
300 
301 rb_cref_t *
302 rb_vm_cref_dup_without_refinements(const rb_cref_t *cref)
303 {
304  const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
305  rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
306  int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
307  int singleton = CREF_SINGLETON(cref);
308 
309  new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
310 
311  if (!NIL_P(CREF_REFINEMENTS(cref))) {
312  CREF_REFINEMENTS_SET(new_cref, Qnil);
313  CREF_OMOD_SHARED_UNSET(new_cref);
314  }
315 
316  return new_cref;
317 }
318 
319 static rb_cref_t *
320 vm_cref_new_toplevel(rb_execution_context_t *ec)
321 {
322  rb_cref_t *cref = vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE /* toplevel visibility is private */, FALSE, NULL, FALSE, FALSE);
323  VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
324 
325  if (top_wrapper) {
326  cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE, FALSE);
327  }
328 
329  return cref;
330 }
331 
332 rb_cref_t *
333 rb_vm_cref_new_toplevel(void)
334 {
335  return vm_cref_new_toplevel(GET_EC());
336 }
337 
338 static void
339 vm_cref_dump(const char *mesg, const rb_cref_t *cref)
340 {
341  ruby_debug_printf("vm_cref_dump: %s (%p)\n", mesg, (void *)cref);
342 
343  while (cref) {
344  ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref))));
345  cref = CREF_NEXT(cref);
346  }
347 }
348 
349 void
350 rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
351 {
352  *((const VALUE **)&dst->as.captured.ep) = ep;
353  RB_OBJ_WRITTEN(obj, Qundef, VM_ENV_ENVVAL(ep));
354 }
355 
356 static void
357 vm_bind_update_env(VALUE bindval, rb_binding_t *bind, VALUE envval)
358 {
359  const rb_env_t *env = (rb_env_t *)envval;
360  RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, env->iseq);
361  rb_vm_block_ep_update(bindval, &bind->block, env->ep);
362 }
363 
364 #if VM_COLLECT_USAGE_DETAILS
365 static void vm_collect_usage_operand(int insn, int n, VALUE op);
366 static void vm_collect_usage_insn(int insn);
367 static void vm_collect_usage_register(int reg, int isset);
368 #endif
369 
370 static VALUE vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp);
371 extern VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
372  int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
373  const rb_callable_method_entry_t *me);
374 static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
375 
376 #include "vm_insnhelper.c"
377 
378 #ifndef MJIT_HEADER
379 
380 #include "vm_exec.c"
381 
382 #include "vm_method.c"
383 #endif /* #ifndef MJIT_HEADER */
384 #include "vm_eval.c"
385 #ifndef MJIT_HEADER
386 
387 #define PROCDEBUG 0
388 
389 rb_serial_t
390 rb_next_class_serial(void)
391 {
392  rb_serial_t class_serial = NEXT_CLASS_SERIAL();
393  return class_serial;
394 }
395 
396 VALUE rb_cRubyVM;
398 VALUE rb_mRubyVMFrozenCore;
399 VALUE rb_block_param_proxy;
400 
401 #define ruby_vm_redefined_flag GET_VM()->redefined_flag
402 VALUE ruby_vm_const_missing_count = 0;
403 rb_vm_t *ruby_current_vm_ptr = NULL;
404 rb_ractor_t *ruby_single_main_ractor;
405 bool ruby_vm_keep_script_lines;
406 
407 #ifdef RB_THREAD_LOCAL_SPECIFIER
408 RB_THREAD_LOCAL_SPECIFIER rb_execution_context_t *ruby_current_ec;
409 
410 #ifdef __APPLE__
412  rb_current_ec(void)
413  {
414  return ruby_current_ec;
415  }
416  void
417  rb_current_ec_set(rb_execution_context_t *ec)
418  {
419  ruby_current_ec = ec;
420  }
421 #endif
422 
423 #else
424 native_tls_key_t ruby_current_ec_key;
425 #endif
426 
427 rb_event_flag_t ruby_vm_event_flags;
428 rb_event_flag_t ruby_vm_event_enabled_global_flags;
429 unsigned int ruby_vm_event_local_num;
430 
431 rb_serial_t ruby_vm_global_constant_state = 1;
432 rb_serial_t ruby_vm_class_serial = 1;
433 rb_serial_t ruby_vm_global_cvar_state = 1;
434 
435 static const struct rb_callcache vm_empty_cc = {
436  .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
437  .klass = Qfalse,
438  .cme_ = NULL,
439  .call_ = vm_call_general,
440  .aux_ = {
441  .v = Qfalse,
442  }
443 };
444 
445 static const struct rb_callcache vm_empty_cc_for_super = {
446  .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
447  .klass = Qfalse,
448  .cme_ = NULL,
449  .call_ = vm_call_super_method,
450  .aux_ = {
451  .v = Qfalse,
452  }
453 };
454 
455 static void thread_free(void *ptr);
456 
457 void
458 rb_vm_inc_const_missing_count(void)
459 {
460  ruby_vm_const_missing_count +=1;
461 }
462 
463 MJIT_FUNC_EXPORTED int
464 rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
465  struct ruby_dtrace_method_hook_args *args)
466 {
467  enum ruby_value_type type;
468  if (!klass) {
469  if (!ec) ec = GET_EC();
470  if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass)
471  return FALSE;
472  }
473  if (RB_TYPE_P(klass, T_ICLASS)) {
474  klass = RBASIC(klass)->klass;
475  }
476  else if (FL_TEST(klass, FL_SINGLETON)) {
477  klass = rb_attr_get(klass, id__attached__);
478  if (NIL_P(klass)) return FALSE;
479  }
480  type = BUILTIN_TYPE(klass);
481  if (type == T_CLASS || type == T_ICLASS || type == T_MODULE) {
482  VALUE name = rb_class_path(klass);
483  const char *classname, *filename;
484  const char *methodname = rb_id2name(id);
485  if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) {
486  if (NIL_P(name) || !(classname = StringValuePtr(name)))
487  classname = "<unknown>";
488  args->classname = classname;
489  args->methodname = methodname;
490  args->filename = filename;
491  args->klass = klass;
492  args->name = name;
493  return TRUE;
494  }
495  }
496  return FALSE;
497 }
498 
499 /*
500  * call-seq:
501  * RubyVM.stat -> Hash
502  * RubyVM.stat(hsh) -> hsh
503  * RubyVM.stat(Symbol) -> Numeric
504  *
505  * Returns a Hash containing implementation-dependent counters inside the VM.
506  *
507  * This hash includes information about method/constant cache serials:
508  *
509  * {
510  * :global_constant_state=>481,
511  * :class_serial=>9029
512  * }
513  *
514  * The contents of the hash are implementation specific and may be changed in
515  * the future.
516  *
517  * This method is only expected to work on C Ruby.
518  */
519 
520 static VALUE
521 vm_stat(int argc, VALUE *argv, VALUE self)
522 {
523  static VALUE sym_global_constant_state, sym_class_serial, sym_global_cvar_state;
524  VALUE arg = Qnil;
525  VALUE hash = Qnil, key = Qnil;
526 
527  if (rb_check_arity(argc, 0, 1) == 1) {
528  arg = argv[0];
529  if (SYMBOL_P(arg))
530  key = arg;
531  else if (RB_TYPE_P(arg, T_HASH))
532  hash = arg;
533  else
534  rb_raise(rb_eTypeError, "non-hash or symbol given");
535  }
536  else {
537  hash = rb_hash_new();
538  }
539 
540  if (sym_global_constant_state == 0) {
541 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
542  S(global_constant_state);
543  S(class_serial);
544  S(global_cvar_state);
545 #undef S
546  }
547 
548 #define SET(name, attr) \
549  if (key == sym_##name) \
550  return SERIALT2NUM(attr); \
551  else if (hash != Qnil) \
552  rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
553 
554  SET(global_constant_state, ruby_vm_global_constant_state);
555  SET(class_serial, ruby_vm_class_serial);
556  SET(global_cvar_state, ruby_vm_global_cvar_state);
557 #undef SET
558 
559  if (!NIL_P(key)) { /* matched key should return above */
560  rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
561  }
562 
563  return hash;
564 }
565 
566 /* control stack frame */
567 
568 static void
569 vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
570 {
571  if (iseq->body->type != ISEQ_TYPE_TOP) {
572  rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
573  }
574 
575  /* for return */
576  vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, rb_ec_thread_ptr(ec)->top_self,
577  VM_BLOCK_HANDLER_NONE,
578  (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
579  iseq->body->iseq_encoded, ec->cfp->sp,
580  iseq->body->local_table_size, iseq->body->stack_max);
581 }
582 
583 static void
584 vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block)
585 {
586  vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
587  vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
588  (VALUE)cref, /* cref or me */
589  iseq->body->iseq_encoded,
590  ec->cfp->sp, iseq->body->local_table_size,
591  iseq->body->stack_max);
592 }
593 
594 static void
595 vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
596 {
597  VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
598  rb_binding_t *bind;
599 
600  GetBindingPtr(toplevel_binding, bind);
601  RUBY_ASSERT_MESG(bind, "TOPLEVEL_BINDING is not built");
602 
603  vm_set_eval_stack(ec, iseq, 0, &bind->block);
604 
605  /* save binding */
606  if (iseq->body->local_table_size > 0) {
607  vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp));
608  }
609 }
610 
612 rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
613 {
614  while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
615  if (cfp->iseq) {
616  return (rb_control_frame_t *)cfp;
617  }
618  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
619  }
620  return 0;
621 }
622 
623 MJIT_FUNC_EXPORTED rb_control_frame_t *
624 rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
625 {
626  while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
627  if (VM_FRAME_RUBYFRAME_P(cfp)) {
628  return (rb_control_frame_t *)cfp;
629  }
630  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
631  }
632  return 0;
633 }
634 
635 #endif /* #ifndef MJIT_HEADER */
636 
637 static rb_control_frame_t *
638 vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
639 {
640  if (VM_FRAME_RUBYFRAME_P(cfp)) {
641  return (rb_control_frame_t *)cfp;
642  }
643 
644  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
645 
646  while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
647  if (VM_FRAME_RUBYFRAME_P(cfp)) {
648  return (rb_control_frame_t *)cfp;
649  }
650 
651  if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
652  break;
653  }
654  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
655  }
656  return 0;
657 }
658 
659 MJIT_STATIC void
660 rb_vm_pop_cfunc_frame(void)
661 {
662  rb_execution_context_t *ec = GET_EC();
663  rb_control_frame_t *cfp = ec->cfp;
664  const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
665 
666  EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, Qnil);
667  RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
668  vm_pop_frame(ec, cfp, cfp->ep);
669 }
670 
671 #ifndef MJIT_HEADER
672 
673 void
674 rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
675 {
676  /* check skipped frame */
677  while (ec->cfp != cfp) {
678 #if VMDEBUG
679  printf("skipped frame: %s\n", vm_frametype_name(ec->cfp));
680 #endif
681  if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) {
682  rb_vm_pop_frame(ec);
683  }
684  else { /* unlikely path */
685  rb_vm_pop_cfunc_frame();
686  }
687  }
688 }
689 
690 /* at exit */
691 
692 void
693 ruby_vm_at_exit(void (*func)(rb_vm_t *))
694 {
695  rb_vm_t *vm = GET_VM();
697  nl->func = func;
698  nl->next = vm->at_exit;
699  vm->at_exit = nl;
700 }
701 
702 static void
703 ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
704 {
705  rb_at_exit_list *l = vm->at_exit;
706 
707  while (l) {
708  rb_at_exit_list* t = l->next;
709  rb_vm_at_exit_func *func = l->func;
710  ruby_xfree(l);
711  l = t;
712  (*func)(vm);
713  }
714 }
715 
716 /* Env */
717 
718 static VALUE check_env_value(const rb_env_t *env);
719 
720 static int
721 check_env(const rb_env_t *env)
722 {
723  fputs("---\n", stderr);
724  ruby_debug_printf("envptr: %p\n", (void *)&env->ep[0]);
725  ruby_debug_printf("envval: %10p ", (void *)env->ep[1]);
726  dp(env->ep[1]);
727  ruby_debug_printf("ep: %10p\n", (void *)env->ep);
728  if (rb_vm_env_prev_env(env)) {
729  fputs(">>\n", stderr);
730  check_env_value(rb_vm_env_prev_env(env));
731  fputs("<<\n", stderr);
732  }
733  return 1;
734 }
735 
736 static VALUE
737 check_env_value(const rb_env_t *env)
738 {
739  if (check_env(env)) {
740  return (VALUE)env;
741  }
742  rb_bug("invalid env");
743  return Qnil; /* unreachable */
744 }
745 
746 static VALUE
747 vm_block_handler_escape(const rb_execution_context_t *ec, VALUE block_handler)
748 {
749  switch (vm_block_handler_type(block_handler)) {
750  case block_handler_type_ifunc:
751  case block_handler_type_iseq:
752  return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
753 
754  case block_handler_type_symbol:
755  case block_handler_type_proc:
756  return block_handler;
757  }
758  VM_UNREACHABLE(vm_block_handler_escape);
759  return Qnil;
760 }
761 
762 static VALUE
763 vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *const cfp)
764 {
765  const VALUE * const ep = cfp->ep;
766  const rb_env_t *env;
767  const rb_iseq_t *env_iseq;
768  VALUE *env_body, *env_ep;
769  int local_size, env_size;
770 
771  if (VM_ENV_ESCAPED_P(ep)) {
772  return VM_ENV_ENVVAL(ep);
773  }
774 
775  if (!VM_ENV_LOCAL_P(ep)) {
776  const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
777  if (!VM_ENV_ESCAPED_P(prev_ep)) {
778  rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
779 
780  while (prev_cfp->ep != prev_ep) {
781  prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(prev_cfp);
782  VM_ASSERT(prev_cfp->ep != NULL);
783  }
784 
785  vm_make_env_each(ec, prev_cfp);
786  VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_SPECVAL], VM_GUARDED_PREV_EP(prev_cfp->ep));
787  }
788  }
789  else {
790  VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
791 
792  if (block_handler != VM_BLOCK_HANDLER_NONE) {
793  VALUE blockprocval = vm_block_handler_escape(ec, block_handler);
794  VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
795  }
796  }
797 
798  if (!VM_FRAME_RUBYFRAME_P(cfp)) {
799  local_size = VM_ENV_DATA_SIZE;
800  }
801  else {
802  local_size = cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
803  }
804 
805  /*
806  * # local variables on a stack frame (N == local_size)
807  * [lvar1, lvar2, ..., lvarN, SPECVAL]
808  * ^
809  * ep[0]
810  *
811  * # moved local variables
812  * [lvar1, lvar2, ..., lvarN, SPECVAL, Envval, BlockProcval (if needed)]
813  * ^ ^
814  * env->env[0] ep[0]
815  */
816 
817  env_size = local_size +
818  1 /* envval */;
819  env_body = ALLOC_N(VALUE, env_size);
820  MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size);
821 
822 #if 0
823  for (i = 0; i < local_size; i++) {
824  if (VM_FRAME_RUBYFRAME_P(cfp)) {
825  /* clear value stack for GC */
826  ep[-local_size + i] = 0;
827  }
828  }
829 #endif
830 
831  env_iseq = VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL;
832  env_ep = &env_body[local_size - 1 /* specval */];
833 
834  env = vm_env_new(env_ep, env_body, env_size, env_iseq);
835 
836  cfp->ep = env_ep;
837  VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
838  VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */
839  return (VALUE)env;
840 }
841 
842 static VALUE
843 vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
844 {
845  VALUE envval = vm_make_env_each(ec, cfp);
846 
847  if (PROCDEBUG) {
848  check_env_value((const rb_env_t *)envval);
849  }
850 
851  return envval;
852 }
853 
854 void
855 rb_vm_stack_to_heap(rb_execution_context_t *ec)
856 {
857  rb_control_frame_t *cfp = ec->cfp;
858  while ((cfp = rb_vm_get_binding_creatable_next_cfp(ec, cfp)) != 0) {
859  vm_make_env_object(ec, cfp);
860  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
861  }
862 }
863 
864 const rb_env_t *
865 rb_vm_env_prev_env(const rb_env_t *env)
866 {
867  const VALUE *ep = env->ep;
868 
869  if (VM_ENV_LOCAL_P(ep)) {
870  return NULL;
871  }
872  else {
873  const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
874  return VM_ENV_ENVVAL_PTR(prev_ep);
875  }
876 }
877 
878 static int
879 collect_local_variables_in_iseq(const rb_iseq_t *iseq, const struct local_var_list *vars)
880 {
881  unsigned int i;
882  if (!iseq) return 0;
883  for (i = 0; i < iseq->body->local_table_size; i++) {
884  local_var_list_add(vars, iseq->body->local_table[i]);
885  }
886  return 1;
887 }
888 
889 static void
890 collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list *vars)
891 {
892  do {
893  if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break;
894  collect_local_variables_in_iseq(env->iseq, vars);
895  } while ((env = rb_vm_env_prev_env(env)) != NULL);
896 }
897 
898 static int
899 vm_collect_local_variables_in_heap(const VALUE *ep, const struct local_var_list *vars)
900 {
901  if (VM_ENV_ESCAPED_P(ep)) {
902  collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
903  return 1;
904  }
905  else {
906  return 0;
907  }
908 }
909 
910 VALUE
911 rb_vm_env_local_variables(const rb_env_t *env)
912 {
913  struct local_var_list vars;
914  local_var_list_init(&vars);
915  collect_local_variables_in_env(env, &vars);
916  return local_var_list_finish(&vars);
917 }
918 
919 VALUE
920 rb_iseq_local_variables(const rb_iseq_t *iseq)
921 {
922  struct local_var_list vars;
923  local_var_list_init(&vars);
924  while (collect_local_variables_in_iseq(iseq, &vars)) {
925  iseq = iseq->body->parent_iseq;
926  }
927  return local_var_list_finish(&vars);
928 }
929 
930 /* Proc */
931 
932 static VALUE
933 vm_proc_create_from_captured(VALUE klass,
934  const struct rb_captured_block *captured,
935  enum rb_block_type block_type,
936  int8_t is_from_method, int8_t is_lambda)
937 {
938  VALUE procval = rb_proc_alloc(klass);
939  rb_proc_t *proc = RTYPEDDATA_DATA(procval);
940 
941  VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), captured->ep));
942 
943  /* copy block */
944  RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, captured->code.val);
945  RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self);
946  rb_vm_block_ep_update(procval, &proc->block, captured->ep);
947 
948  vm_block_type_set(&proc->block, block_type);
949  proc->is_from_method = is_from_method;
950  proc->is_lambda = is_lambda;
951 
952  return procval;
953 }
954 
955 void
956 rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
957 {
958  /* copy block */
959  switch (vm_block_type(src)) {
960  case block_type_iseq:
961  case block_type_ifunc:
962  RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self);
963  RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val);
964  rb_vm_block_ep_update(obj, dst, src->as.captured.ep);
965  break;
966  case block_type_symbol:
967  RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol);
968  break;
969  case block_type_proc:
970  RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc);
971  break;
972  }
973 }
974 
975 static VALUE
976 proc_create(VALUE klass, const struct rb_block *block, int8_t is_from_method, int8_t is_lambda)
977 {
978  VALUE procval = rb_proc_alloc(klass);
979  rb_proc_t *proc = RTYPEDDATA_DATA(procval);
980 
981  VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), vm_block_ep(block)));
982  rb_vm_block_copy(procval, &proc->block, block);
983  vm_block_type_set(&proc->block, block->type);
984  proc->is_from_method = is_from_method;
985  proc->is_lambda = is_lambda;
986 
987  return procval;
988 }
989 
990 VALUE
991 rb_proc_dup(VALUE self)
992 {
993  VALUE procval;
994  rb_proc_t *src;
995 
996  GetProcPtr(self, src);
997  procval = proc_create(rb_cProc, &src->block, src->is_from_method, src->is_lambda);
998  if (RB_OBJ_SHAREABLE_P(self)) FL_SET_RAW(procval, RUBY_FL_SHAREABLE);
999  RB_GC_GUARD(self); /* for: body = rb_proc_dup(body) */
1000  return procval;
1001 }
1002 
1004  VALUE ary;
1005  VALUE read_only;
1006  bool yield;
1007  bool isolate;
1008 };
1009 
1010 static VALUE
1011 ID2NUM(ID id)
1012 {
1013  if (SIZEOF_VOIDP > SIZEOF_LONG)
1014  return ULL2NUM(id);
1015  else
1016  return ULONG2NUM(id);
1017 }
1018 
1019 static ID
1020 NUM2ID(VALUE num)
1021 {
1022  if (SIZEOF_VOIDP > SIZEOF_LONG)
1023  return (ID)NUM2ULL(num);
1024  else
1025  return (ID)NUM2ULONG(num);
1026 }
1027 
1028 static enum rb_id_table_iterator_result
1029 collect_outer_variable_names(ID id, VALUE val, void *ptr)
1030 {
1032 
1033  if (id == rb_intern("yield")) {
1034  data->yield = true;
1035  }
1036  else {
1037  VALUE *store;
1038  if (data->isolate ||
1039  val == Qtrue /* write */) {
1040  store = &data->ary;
1041  }
1042  else {
1043  store = &data->read_only;
1044  }
1045  if (*store == Qfalse) *store = rb_ary_new();
1046  rb_ary_push(*store, ID2NUM(id));
1047  }
1048  return ID_TABLE_CONTINUE;
1049 }
1050 
1051 static const rb_env_t *
1052 env_copy(const VALUE *src_ep, VALUE read_only_variables)
1053 {
1054  const rb_env_t *src_env = (rb_env_t *)VM_ENV_ENVVAL(src_ep);
1055  VM_ASSERT(src_env->ep == src_ep);
1056 
1057  VALUE *env_body = ZALLOC_N(VALUE, src_env->env_size); // fill with Qfalse
1058  VALUE *ep = &env_body[src_env->env_size - 2];
1059  volatile VALUE prev_env = Qnil;
1060 
1061  if (read_only_variables) {
1062  for (int i=RARRAY_LENINT(read_only_variables)-1; i>=0; i--) {
1063  ID id = NUM2ID(RARRAY_AREF(read_only_variables, i));
1064 
1065  for (unsigned int j=0; j<src_env->iseq->body->local_table_size; j++) {
1066  if (id == src_env->iseq->body->local_table[j]) {
1067  VALUE v = src_env->env[j];
1068  if (!rb_ractor_shareable_p(v)) {
1069  VALUE name = rb_id2str(id);
1070  VALUE msg = rb_sprintf("can not make shareable Proc because it can refer"
1071  " unshareable object %+" PRIsVALUE " from ", v);
1072  if (name)
1073  rb_str_catf(msg, "variable `%" PRIsVALUE "'", name);
1074  else
1075  rb_str_cat_cstr(msg, "a hidden variable");
1076  rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg));
1077  }
1078  env_body[j] = v;
1079  rb_ary_delete_at(read_only_variables, i);
1080  break;
1081  }
1082  }
1083  }
1084  }
1085 
1086  ep[VM_ENV_DATA_INDEX_ME_CREF] = src_ep[VM_ENV_DATA_INDEX_ME_CREF];
1087  ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED;
1088 
1089  if (!VM_ENV_LOCAL_P(src_ep)) {
1090  const VALUE *prev_ep = VM_ENV_PREV_EP(src_env->ep);
1091  const rb_env_t *new_prev_env = env_copy(prev_ep, read_only_variables);
1092  prev_env = (VALUE)new_prev_env;
1093  ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_GUARDED_PREV_EP(new_prev_env->ep);
1094  }
1095  else {
1096  ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_BLOCK_HANDLER_NONE;
1097  }
1098 
1099  const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq);
1100  RB_GC_GUARD(prev_env);
1101  return copied_env;
1102 }
1103 
1104 static void
1105 proc_isolate_env(VALUE self, rb_proc_t *proc, VALUE read_only_variables)
1106 {
1107  const struct rb_captured_block *captured = &proc->block.as.captured;
1108  const rb_env_t *env = env_copy(captured->ep, read_only_variables);
1109  *((const VALUE **)&proc->block.as.captured.ep) = env->ep;
1110  RB_OBJ_WRITTEN(self, Qundef, env);
1111 }
1112 
1113 static VALUE
1114 proc_shared_outer_variables(struct rb_id_table *outer_variables, bool isolate, const char *message)
1115 {
1116  struct collect_outer_variable_name_data data = {
1117  .isolate = isolate,
1118  .ary = Qfalse,
1119  .read_only = Qfalse,
1120  .yield = false,
1121  };
1122  rb_id_table_foreach(outer_variables, collect_outer_variable_names, (void *)&data);
1123 
1124  if (data.ary != Qfalse) {
1125  VALUE str = rb_sprintf("can not %s because it accesses outer variables", message);
1126  VALUE ary = data.ary;
1127  const char *sep = " (";
1128  for (long i = 0; i < RARRAY_LEN(ary); i++) {
1129  VALUE name = rb_id2str(NUM2ID(RARRAY_AREF(ary, i)));
1130  if (!name) continue;
1131  rb_str_cat_cstr(str, sep);
1132  sep = ", ";
1133  rb_str_append(str, name);
1134  }
1135  if (*sep == ',') rb_str_cat_cstr(str, ")");
1136  rb_str_cat_cstr(str, data.yield ? " and uses `yield'." : ".");
1138  }
1139  else if (data.yield) {
1140  rb_raise(rb_eArgError, "can not %s because it uses `yield'.", message);
1141  }
1142 
1143  return data.read_only;
1144 }
1145 
1146 VALUE
1147 rb_proc_isolate_bang(VALUE self)
1148 {
1149  const rb_iseq_t *iseq = vm_proc_iseq(self);
1150 
1151  if (iseq) {
1152  rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
1153  if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1154 
1155  if (iseq->body->outer_variables) {
1156  proc_shared_outer_variables(iseq->body->outer_variables, true, "isolate a Proc");
1157  }
1158 
1159  proc_isolate_env(self, proc, Qfalse);
1160  proc->is_isolated = TRUE;
1161  }
1162 
1164  return self;
1165 }
1166 
1167 VALUE
1168 rb_proc_isolate(VALUE self)
1169 {
1170  VALUE dst = rb_proc_dup(self);
1171  rb_proc_isolate_bang(dst);
1172  return dst;
1173 }
1174 
1175 VALUE
1176 rb_proc_ractor_make_shareable(VALUE self)
1177 {
1178  const rb_iseq_t *iseq = vm_proc_iseq(self);
1179 
1180  if (iseq) {
1181  rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
1182  if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1183 
1184  if (!rb_ractor_shareable_p(vm_block_self(&proc->block))) {
1185  rb_raise(rb_eRactorIsolationError,
1186  "Proc's self is not shareable: %" PRIsVALUE,
1187  self);
1188  }
1189 
1190  VALUE read_only_variables = Qfalse;
1191 
1192  if (iseq->body->outer_variables) {
1193  read_only_variables =
1194  proc_shared_outer_variables(iseq->body->outer_variables, false, "make a Proc shareable");
1195  }
1196 
1197  proc_isolate_env(self, proc, read_only_variables);
1198  proc->is_isolated = TRUE;
1199  }
1200 
1202  return self;
1203 }
1204 
1205 MJIT_FUNC_EXPORTED VALUE
1206 rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
1207 {
1208  VALUE procval;
1209 
1210  if (!VM_ENV_ESCAPED_P(captured->ep)) {
1211  rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
1212  vm_make_env_object(ec, cfp);
1213  }
1214  VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep));
1215  VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq) ||
1216  imemo_type_p(captured->code.val, imemo_ifunc));
1217 
1218  procval = vm_proc_create_from_captured(klass, captured,
1219  imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc, FALSE, is_lambda);
1220  return procval;
1221 }
1222 
1223 /* Binding */
1224 
1225 VALUE
1226 rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
1227 {
1228  rb_control_frame_t *cfp = rb_vm_get_binding_creatable_next_cfp(ec, src_cfp);
1229  rb_control_frame_t *ruby_level_cfp = rb_vm_get_ruby_level_next_cfp(ec, src_cfp);
1230  VALUE bindval, envval;
1231  rb_binding_t *bind;
1232 
1233  if (cfp == 0 || ruby_level_cfp == 0) {
1234  rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
1235  }
1236 
1237  while (1) {
1238  envval = vm_make_env_object(ec, cfp);
1239  if (cfp == ruby_level_cfp) {
1240  break;
1241  }
1242  cfp = rb_vm_get_binding_creatable_next_cfp(ec, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
1243  }
1244 
1245  bindval = rb_binding_alloc(rb_cBinding);
1246  GetBindingPtr(bindval, bind);
1247  vm_bind_update_env(bindval, bind, envval);
1248  RB_OBJ_WRITE(bindval, &bind->block.as.captured.self, cfp->self);
1249  RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, cfp->iseq);
1250  RB_OBJ_WRITE(bindval, &bind->pathobj, ruby_level_cfp->iseq->body->location.pathobj);
1251  bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
1252 
1253  return bindval;
1254 }
1255 
1256 const VALUE *
1257 rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
1258 {
1259  VALUE envval, pathobj = bind->pathobj;
1260  VALUE path = pathobj_path(pathobj);
1261  VALUE realpath = pathobj_realpath(pathobj);
1262  const struct rb_block *base_block;
1263  const rb_env_t *env;
1264  rb_execution_context_t *ec = GET_EC();
1265  const rb_iseq_t *base_iseq, *iseq;
1266  rb_ast_body_t ast;
1267  NODE tmp_node;
1268 
1269  if (dyncount < 0) return 0;
1270 
1271  base_block = &bind->block;
1272  base_iseq = vm_block_iseq(base_block);
1273 
1274  VALUE idtmp = 0;
1275  rb_ast_id_table_t *dyns = ALLOCV(idtmp, sizeof(rb_ast_id_table_t) + dyncount * sizeof(ID));
1276  dyns->size = dyncount;
1277  MEMCPY(dyns->ids, dynvars, ID, dyncount);
1278 
1279  rb_node_init(&tmp_node, NODE_SCOPE, (VALUE)dyns, 0, 0);
1280  ast.root = &tmp_node;
1281  ast.compile_option = 0;
1282  ast.script_lines = INT2FIX(-1);
1283 
1284  if (base_iseq) {
1285  iseq = rb_iseq_new(&ast, base_iseq->body->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
1286  }
1287  else {
1288  VALUE tempstr = rb_fstring_lit("<temp>");
1289  iseq = rb_iseq_new_top(&ast, tempstr, tempstr, tempstr, NULL);
1290  }
1291  tmp_node.nd_tbl = 0; /* reset table */
1292  ALLOCV_END(idtmp);
1293 
1294  vm_set_eval_stack(ec, iseq, 0, base_block);
1295  vm_bind_update_env(bindval, bind, envval = vm_make_env_object(ec, ec->cfp));
1296  rb_vm_pop_frame(ec);
1297 
1298  env = (const rb_env_t *)envval;
1299  return env->env;
1300 }
1301 
1302 /* C -> Ruby: block */
1303 
1304 static inline VALUE
1305 invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
1306 {
1307  int arg_size = iseq->body->param.size;
1308 
1309  vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self,
1310  VM_GUARDED_PREV_EP(captured->ep),
1311  (VALUE)cref, /* cref or method */
1312  iseq->body->iseq_encoded + opt_pc,
1313  ec->cfp->sp + arg_size,
1314  iseq->body->local_table_size - arg_size,
1315  iseq->body->stack_max);
1316  return vm_exec(ec, true);
1317 }
1318 
1319 static VALUE
1320 invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
1321 {
1322  /* bmethod */
1323  int arg_size = iseq->body->param.size;
1324  VALUE ret;
1325 
1326  VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
1327 
1328  vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self,
1329  VM_GUARDED_PREV_EP(captured->ep),
1330  (VALUE)me,
1331  iseq->body->iseq_encoded + opt_pc,
1332  ec->cfp->sp + arg_size,
1333  iseq->body->local_table_size - arg_size,
1334  iseq->body->stack_max);
1335 
1336  VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
1337  ret = vm_exec(ec, true);
1338 
1339  return ret;
1340 }
1341 
1342 ALWAYS_INLINE(static VALUE
1343  invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1344  VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1345  const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me));
1346 
1347 static inline VALUE
1348 invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1349  VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1350  const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
1351 {
1352  const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
1353  int i, opt_pc;
1354  VALUE type = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
1355  rb_control_frame_t *cfp = ec->cfp;
1356  VALUE *sp = cfp->sp;
1357 
1358  stack_check(ec);
1359 
1360  CHECK_VM_STACK_OVERFLOW(cfp, argc);
1361  vm_check_canary(ec, sp);
1362  cfp->sp = sp + argc;
1363  for (i=0; i<argc; i++) {
1364  sp[i] = argv[i];
1365  }
1366 
1367  opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, kw_splat, passed_block_handler,
1368  (is_lambda ? arg_setup_method : arg_setup_block));
1369  cfp->sp = sp;
1370 
1371  if (me == NULL) {
1372  return invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
1373  }
1374  else {
1375  return invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
1376  }
1377 }
1378 
1379 static inline VALUE
1380 invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler,
1381  int argc, const VALUE *argv,
1382  int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref,
1383  int is_lambda, int force_blockarg)
1384 {
1385  again:
1386  switch (vm_block_handler_type(block_handler)) {
1387  case block_handler_type_iseq:
1388  {
1389  const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
1390  return invoke_iseq_block_from_c(ec, captured, captured->self,
1391  argc, argv, kw_splat, passed_block_handler,
1392  cref, is_lambda, NULL);
1393  }
1394  case block_handler_type_ifunc:
1395  return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
1396  VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
1397  argc, argv, kw_splat, passed_block_handler, NULL);
1398  case block_handler_type_symbol:
1399  return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
1400  argc, argv, kw_splat, passed_block_handler);
1401  case block_handler_type_proc:
1402  if (force_blockarg == FALSE) {
1403  is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
1404  }
1405  block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
1406  goto again;
1407  }
1408  VM_UNREACHABLE(invoke_block_from_c_splattable);
1409  return Qundef;
1410 }
1411 
1412 static inline VALUE
1413 check_block_handler(rb_execution_context_t *ec)
1414 {
1415  VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->cfp);
1416  vm_block_handler_verify(block_handler);
1417  if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
1418  rb_vm_localjump_error("no block given", Qnil, 0);
1419  }
1420 
1421  return block_handler;
1422 }
1423 
1424 static VALUE
1425 vm_yield_with_cref(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat, const rb_cref_t *cref, int is_lambda)
1426 {
1427  return invoke_block_from_c_bh(ec, check_block_handler(ec),
1428  argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE,
1429  cref, is_lambda, FALSE);
1430 }
1431 
1432 static VALUE
1433 vm_yield(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat)
1434 {
1435  return vm_yield_with_cref(ec, argc, argv, kw_splat, NULL, FALSE);
1436 }
1437 
1438 static VALUE
1439 vm_yield_with_block(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE block_handler, int kw_splat)
1440 {
1441  return invoke_block_from_c_bh(ec, check_block_handler(ec),
1442  argc, argv, kw_splat, block_handler,
1443  NULL, FALSE, FALSE);
1444 }
1445 
1446 static VALUE
1447 vm_yield_force_blockarg(rb_execution_context_t *ec, VALUE args)
1448 {
1449  return invoke_block_from_c_bh(ec, check_block_handler(ec), 1, &args,
1450  RB_NO_KEYWORDS, VM_BLOCK_HANDLER_NONE, NULL, FALSE, TRUE);
1451 }
1452 
1453 ALWAYS_INLINE(static VALUE
1454  invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1455  VALUE self, int argc, const VALUE *argv,
1456  int kw_splat, VALUE passed_block_handler, int is_lambda,
1457  const rb_callable_method_entry_t *me));
1458 
1459 static inline VALUE
1460 invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1461  VALUE self, int argc, const VALUE *argv,
1462  int kw_splat, VALUE passed_block_handler, int is_lambda,
1463  const rb_callable_method_entry_t *me)
1464 {
1465  const struct rb_block *block = &proc->block;
1466 
1467  again:
1468  switch (vm_block_type(block)) {
1469  case block_type_iseq:
1470  return invoke_iseq_block_from_c(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, NULL, is_lambda, me);
1471  case block_type_ifunc:
1472  if (kw_splat == 1) {
1473  VALUE keyword_hash = argv[argc-1];
1474  if (!RB_TYPE_P(keyword_hash, T_HASH)) {
1475  keyword_hash = rb_to_hash_type(keyword_hash);
1476  }
1477  if (RHASH_EMPTY_P(keyword_hash)) {
1478  argc--;
1479  }
1480  else {
1481  ((VALUE *)argv)[argc-1] = rb_hash_dup(keyword_hash);
1482  }
1483  }
1484  return vm_yield_with_cfunc(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, me);
1485  case block_type_symbol:
1486  return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler);
1487  case block_type_proc:
1488  is_lambda = block_proc_is_lambda(block->as.proc);
1489  block = vm_proc_block(block->as.proc);
1490  goto again;
1491  }
1492  VM_UNREACHABLE(invoke_block_from_c_proc);
1493  return Qundef;
1494 }
1495 
1496 static VALUE
1497 vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1498  int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1499 {
1500  return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler, proc->is_lambda, NULL);
1501 }
1502 
1503 MJIT_FUNC_EXPORTED VALUE
1504 rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1505  int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
1506 {
1507  return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, block_handler, TRUE, me);
1508 }
1509 
1510 MJIT_FUNC_EXPORTED VALUE
1511 rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
1512  int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1513 {
1514  VALUE self = vm_block_self(&proc->block);
1515  vm_block_handler_verify(passed_block_handler);
1516 
1517  if (proc->is_from_method) {
1518  return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1519  }
1520  else {
1521  return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1522  }
1523 }
1524 
1525 VALUE
1526 rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1527  int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1528 {
1529  vm_block_handler_verify(passed_block_handler);
1530 
1531  if (proc->is_from_method) {
1532  return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1533  }
1534  else {
1535  return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1536  }
1537 }
1538 
1539 /* special variable */
1540 
1541 static rb_control_frame_t *
1542 vm_normal_frame(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
1543 {
1544  while (cfp->pc == 0) {
1545  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1546  if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
1547  return 0;
1548  }
1549  }
1550  return cfp;
1551 }
1552 
1553 static VALUE
1554 vm_cfp_svar_get(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key)
1555 {
1556  cfp = vm_normal_frame(ec, cfp);
1557  return lep_svar_get(ec, cfp ? VM_CF_LEP(cfp) : 0, key);
1558 }
1559 
1560 static void
1561 vm_cfp_svar_set(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key, const VALUE val)
1562 {
1563  cfp = vm_normal_frame(ec, cfp);
1564  lep_svar_set(ec, cfp ? VM_CF_LEP(cfp) : 0, key, val);
1565 }
1566 
1567 static VALUE
1568 vm_svar_get(const rb_execution_context_t *ec, VALUE key)
1569 {
1570  return vm_cfp_svar_get(ec, ec->cfp, key);
1571 }
1572 
1573 static void
1574 vm_svar_set(const rb_execution_context_t *ec, VALUE key, VALUE val)
1575 {
1576  vm_cfp_svar_set(ec, ec->cfp, key, val);
1577 }
1578 
1579 VALUE
1581 {
1582  return vm_svar_get(GET_EC(), VM_SVAR_BACKREF);
1583 }
1584 
1585 void
1587 {
1588  vm_svar_set(GET_EC(), VM_SVAR_BACKREF, val);
1589 }
1590 
1591 VALUE
1593 {
1594  return vm_svar_get(GET_EC(), VM_SVAR_LASTLINE);
1595 }
1596 
1597 void
1599 {
1600  vm_svar_set(GET_EC(), VM_SVAR_LASTLINE, val);
1601 }
1602 
1603 /* misc */
1604 
1605 const char *
1607 {
1608  const rb_execution_context_t *ec = GET_EC();
1609  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1610 
1611  if (cfp) {
1612  return RSTRING_PTR(rb_iseq_path(cfp->iseq));
1613  }
1614  else {
1615  return 0;
1616  }
1617 }
1618 
1619 int
1621 {
1622  const rb_execution_context_t *ec = GET_EC();
1623  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1624 
1625  if (cfp) {
1626  return rb_vm_get_sourceline(cfp);
1627  }
1628  else {
1629  return 0;
1630  }
1631 }
1632 
1633 VALUE
1634 rb_source_location(int *pline)
1635 {
1636  const rb_execution_context_t *ec = GET_EC();
1637  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1638 
1639  if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) {
1640  if (pline) *pline = rb_vm_get_sourceline(cfp);
1641  return rb_iseq_path(cfp->iseq);
1642  }
1643  else {
1644  if (pline) *pline = 0;
1645  return Qnil;
1646  }
1647 }
1648 
1649 MJIT_FUNC_EXPORTED const char *
1650 rb_source_location_cstr(int *pline)
1651 {
1652  VALUE path = rb_source_location(pline);
1653  if (NIL_P(path)) return NULL;
1654  return RSTRING_PTR(path);
1655 }
1656 
1657 rb_cref_t *
1658 rb_vm_cref(void)
1659 {
1660  const rb_execution_context_t *ec = GET_EC();
1661  return vm_ec_cref(ec);
1662 }
1663 
1664 rb_cref_t *
1665 rb_vm_cref_replace_with_duplicated_cref(void)
1666 {
1667  const rb_execution_context_t *ec = GET_EC();
1668  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1669  rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep);
1670  ASSUME(cref);
1671  return cref;
1672 }
1673 
1674 const rb_cref_t *
1675 rb_vm_cref_in_context(VALUE self, VALUE cbase)
1676 {
1677  const rb_execution_context_t *ec = GET_EC();
1678  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1679  const rb_cref_t *cref;
1680  if (!cfp || cfp->self != self) return NULL;
1681  if (!vm_env_cref_by_cref(cfp->ep)) return NULL;
1682  cref = vm_get_cref(cfp->ep);
1683  if (CREF_CLASS(cref) != cbase) return NULL;
1684  return cref;
1685 }
1686 
1687 #if 0
1688 void
1689 debug_cref(rb_cref_t *cref)
1690 {
1691  while (cref) {
1692  dp(CREF_CLASS(cref));
1693  printf("%ld\n", CREF_VISI(cref));
1694  cref = CREF_NEXT(cref);
1695  }
1696 }
1697 #endif
1698 
1699 VALUE
1700 rb_vm_cbase(void)
1701 {
1702  const rb_execution_context_t *ec = GET_EC();
1703  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1704 
1705  if (cfp == 0) {
1706  rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
1707  }
1708  return vm_get_cbase(cfp->ep);
1709 }
1710 
1711 /* jump */
1712 
1713 static VALUE
1714 make_localjump_error(const char *mesg, VALUE value, int reason)
1715 {
1716  extern VALUE rb_eLocalJumpError;
1717  VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
1718  ID id;
1719 
1720  switch (reason) {
1721  case TAG_BREAK:
1722  CONST_ID(id, "break");
1723  break;
1724  case TAG_REDO:
1725  CONST_ID(id, "redo");
1726  break;
1727  case TAG_RETRY:
1728  CONST_ID(id, "retry");
1729  break;
1730  case TAG_NEXT:
1731  CONST_ID(id, "next");
1732  break;
1733  case TAG_RETURN:
1734  CONST_ID(id, "return");
1735  break;
1736  default:
1737  CONST_ID(id, "noreason");
1738  break;
1739  }
1740  rb_iv_set(exc, "@exit_value", value);
1741  rb_iv_set(exc, "@reason", ID2SYM(id));
1742  return exc;
1743 }
1744 
1745 MJIT_FUNC_EXPORTED void
1746 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
1747 {
1748  VALUE exc = make_localjump_error(mesg, value, reason);
1749  rb_exc_raise(exc);
1750 }
1751 
1752 VALUE
1753 rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
1754 {
1755  const char *mesg;
1756 
1757  switch (state) {
1758  case TAG_RETURN:
1759  mesg = "unexpected return";
1760  break;
1761  case TAG_BREAK:
1762  mesg = "unexpected break";
1763  break;
1764  case TAG_NEXT:
1765  mesg = "unexpected next";
1766  break;
1767  case TAG_REDO:
1768  mesg = "unexpected redo";
1769  val = Qnil;
1770  break;
1771  case TAG_RETRY:
1772  mesg = "retry outside of rescue clause";
1773  val = Qnil;
1774  break;
1775  default:
1776  return Qnil;
1777  }
1778  if (val == Qundef) {
1779  val = GET_EC()->tag->retval;
1780  }
1781  return make_localjump_error(mesg, val, state);
1782 }
1783 
1784 void
1785 rb_vm_jump_tag_but_local_jump(int state)
1786 {
1787  VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
1788  if (!NIL_P(exc)) rb_exc_raise(exc);
1789  EC_JUMP_TAG(GET_EC(), state);
1790 }
1791 
1792 static rb_control_frame_t *
1793 next_not_local_frame(rb_control_frame_t *cfp)
1794 {
1795  while (VM_ENV_LOCAL_P(cfp->ep)) {
1796  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1797  }
1798  return cfp;
1799 }
1800 
1801 NORETURN(static void vm_iter_break(rb_execution_context_t *ec, VALUE val));
1802 
1803 static void
1804 vm_iter_break(rb_execution_context_t *ec, VALUE val)
1805 {
1806  rb_control_frame_t *cfp = next_not_local_frame(ec->cfp);
1807  const VALUE *ep = VM_CF_PREV_EP(cfp);
1808  const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(ec, cfp, ep);
1809 
1810 #if 0 /* raise LocalJumpError */
1811  if (!target_cfp) {
1812  rb_vm_localjump_error("unexpected break", val, TAG_BREAK);
1813  }
1814 #endif
1815 
1816  ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK);
1817  EC_JUMP_TAG(ec, TAG_BREAK);
1818 }
1819 
1820 void
1822 {
1823  vm_iter_break(GET_EC(), Qnil);
1824 }
1825 
1826 void
1828 {
1829  vm_iter_break(GET_EC(), val);
1830 }
1831 
1832 /* optimization: redefine management */
1833 
1834 static st_table *vm_opt_method_def_table = 0;
1835 static st_table *vm_opt_mid_table = 0;
1836 
1837 static int
1838 vm_redefinition_check_flag(VALUE klass)
1839 {
1840  if (klass == rb_cInteger) return INTEGER_REDEFINED_OP_FLAG;
1841  if (klass == rb_cFloat) return FLOAT_REDEFINED_OP_FLAG;
1842  if (klass == rb_cString) return STRING_REDEFINED_OP_FLAG;
1843  if (klass == rb_cArray) return ARRAY_REDEFINED_OP_FLAG;
1844  if (klass == rb_cHash) return HASH_REDEFINED_OP_FLAG;
1845  if (klass == rb_cSymbol) return SYMBOL_REDEFINED_OP_FLAG;
1846 #if 0
1847  if (klass == rb_cTime) return TIME_REDEFINED_OP_FLAG;
1848 #endif
1849  if (klass == rb_cRegexp) return REGEXP_REDEFINED_OP_FLAG;
1850  if (klass == rb_cNilClass) return NIL_REDEFINED_OP_FLAG;
1851  if (klass == rb_cTrueClass) return TRUE_REDEFINED_OP_FLAG;
1852  if (klass == rb_cFalseClass) return FALSE_REDEFINED_OP_FLAG;
1853  if (klass == rb_cProc) return PROC_REDEFINED_OP_FLAG;
1854  return 0;
1855 }
1856 
1857 int
1858 rb_vm_check_optimizable_mid(VALUE mid)
1859 {
1860  if (!vm_opt_mid_table) {
1861  return FALSE;
1862  }
1863 
1864  return st_lookup(vm_opt_mid_table, mid, NULL);
1865 }
1866 
1867 static int
1868 vm_redefinition_check_method_type(const rb_method_entry_t *me)
1869 {
1870  if (me->called_id != me->def->original_id) {
1871  return FALSE;
1872  }
1873 
1874  const rb_method_definition_t *def = me->def;
1875  switch (def->type) {
1876  case VM_METHOD_TYPE_CFUNC:
1877  case VM_METHOD_TYPE_OPTIMIZED:
1878  return TRUE;
1879  default:
1880  return FALSE;
1881  }
1882 }
1883 
1884 static void
1885 rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
1886 {
1887  st_data_t bop;
1888  if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN) &&
1889  RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
1890  klass = RBASIC_CLASS(klass);
1891  }
1892  if (vm_redefinition_check_method_type(me)) {
1893  if (st_lookup(vm_opt_method_def_table, (st_data_t)me->def, &bop)) {
1894  int flag = vm_redefinition_check_flag(klass);
1895  if (flag != 0) {
1896  rb_yjit_bop_redefined(klass, me, (enum ruby_basic_operators)bop);
1897  ruby_vm_redefined_flag[bop] |= flag;
1898  }
1899  }
1900  }
1901 }
1902 
1903 static enum rb_id_table_iterator_result
1904 check_redefined_method(ID mid, VALUE value, void *data)
1905 {
1906  VALUE klass = (VALUE)data;
1907  const rb_method_entry_t *me = (rb_method_entry_t *)value;
1908  const rb_method_entry_t *newme = rb_method_entry(klass, mid);
1909 
1910  if (newme != me) rb_vm_check_redefinition_opt_method(me, me->owner);
1911 
1912  return ID_TABLE_CONTINUE;
1913 }
1914 
1915 void
1916 rb_vm_check_redefinition_by_prepend(VALUE klass)
1917 {
1918  if (!vm_redefinition_check_flag(klass)) return;
1919  rb_id_table_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method, (void *)klass);
1920 }
1921 
1922 static void
1923 add_opt_method(VALUE klass, ID mid, VALUE bop)
1924 {
1925  const rb_method_entry_t *me = rb_method_entry_at(klass, mid);
1926 
1927  if (me && vm_redefinition_check_method_type(me)) {
1928  st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop);
1929  st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue);
1930  }
1931  else {
1932  rb_bug("undefined optimized method: %s", rb_id2name(mid));
1933  }
1934 }
1935 
1936 static void
1937 vm_init_redefined_flag(void)
1938 {
1939  ID mid;
1940  VALUE bop;
1941 
1942  vm_opt_method_def_table = st_init_numtable();
1943  vm_opt_mid_table = st_init_numtable();
1944 
1945 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
1946 #define C(k) add_opt_method(rb_c##k, mid, bop)
1947  OP(PLUS, PLUS), (C(Integer), C(Float), C(String), C(Array));
1948  OP(MINUS, MINUS), (C(Integer), C(Float));
1949  OP(MULT, MULT), (C(Integer), C(Float));
1950  OP(DIV, DIV), (C(Integer), C(Float));
1951  OP(MOD, MOD), (C(Integer), C(Float));
1952  OP(Eq, EQ), (C(Integer), C(Float), C(String), C(Symbol));
1953  OP(Eqq, EQQ), (C(Integer), C(Float), C(Symbol), C(String),
1954  C(NilClass), C(TrueClass), C(FalseClass));
1955  OP(LT, LT), (C(Integer), C(Float));
1956  OP(LE, LE), (C(Integer), C(Float));
1957  OP(GT, GT), (C(Integer), C(Float));
1958  OP(GE, GE), (C(Integer), C(Float));
1959  OP(LTLT, LTLT), (C(String), C(Array));
1960  OP(AREF, AREF), (C(Array), C(Hash), C(Integer));
1961  OP(ASET, ASET), (C(Array), C(Hash));
1962  OP(Length, LENGTH), (C(Array), C(String), C(Hash));
1963  OP(Size, SIZE), (C(Array), C(String), C(Hash));
1964  OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
1965  OP(Succ, SUCC), (C(Integer), C(String));
1966  OP(EqTilde, MATCH), (C(Regexp), C(String));
1967  OP(Freeze, FREEZE), (C(String));
1968  OP(UMinus, UMINUS), (C(String));
1969  OP(Max, MAX), (C(Array));
1970  OP(Min, MIN), (C(Array));
1971  OP(Call, CALL), (C(Proc));
1972  OP(And, AND), (C(Integer));
1973  OP(Or, OR), (C(Integer));
1974  OP(NilP, NIL_P), (C(NilClass));
1975 #undef C
1976 #undef OP
1977 }
1978 
1979 /* for vm development */
1980 
1981 #if VMDEBUG
1982 static const char *
1983 vm_frametype_name(const rb_control_frame_t *cfp)
1984 {
1985  switch (VM_FRAME_TYPE(cfp)) {
1986  case VM_FRAME_MAGIC_METHOD: return "method";
1987  case VM_FRAME_MAGIC_BLOCK: return "block";
1988  case VM_FRAME_MAGIC_CLASS: return "class";
1989  case VM_FRAME_MAGIC_TOP: return "top";
1990  case VM_FRAME_MAGIC_CFUNC: return "cfunc";
1991  case VM_FRAME_MAGIC_IFUNC: return "ifunc";
1992  case VM_FRAME_MAGIC_EVAL: return "eval";
1993  case VM_FRAME_MAGIC_RESCUE: return "rescue";
1994  default:
1995  rb_bug("unknown frame");
1996  }
1997 }
1998 #endif
1999 
2000 static VALUE
2001 frame_return_value(const struct vm_throw_data *err)
2002 {
2003  if (THROW_DATA_P(err) &&
2004  THROW_DATA_STATE(err) == TAG_BREAK &&
2005  THROW_DATA_CONSUMED_P(err) == FALSE) {
2006  return THROW_DATA_VAL(err);
2007  }
2008  else {
2009  return Qnil;
2010  }
2011 }
2012 
2013 #if 0
2014 /* for debug */
2015 static const char *
2016 frame_name(const rb_control_frame_t *cfp)
2017 {
2018  unsigned long type = VM_FRAME_TYPE(cfp);
2019 #define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
2020  C(METHOD);
2021  C(BLOCK);
2022  C(CLASS);
2023  C(TOP);
2024  C(CFUNC);
2025  C(PROC);
2026  C(IFUNC);
2027  C(EVAL);
2028  C(LAMBDA);
2029  C(RESCUE);
2030  C(DUMMY);
2031 #undef C
2032  return "unknown";
2033 }
2034 #endif
2035 
2036 // cfp_returning_with_value:
2037 // Whether cfp is the last frame in the unwinding process for a non-local return.
2038 static void
2039 hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
2040  bool cfp_returning_with_value, int state, struct vm_throw_data *err)
2041 {
2042  if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) {
2043  return;
2044  }
2045  else {
2046  const rb_iseq_t *iseq = cfp->iseq;
2047  rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
2048 
2049  switch (VM_FRAME_TYPE(ec->cfp)) {
2050  case VM_FRAME_MAGIC_METHOD:
2051  RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
2052  EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2053 
2054  if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
2055  rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN,
2056  ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2057  }
2058 
2059  THROW_DATA_CONSUMED_SET(err);
2060  break;
2061  case VM_FRAME_MAGIC_BLOCK:
2062  if (VM_FRAME_BMETHOD_P(ec->cfp)) {
2063  VALUE bmethod_return_value = frame_return_value(err);
2064  if (cfp_returning_with_value) {
2065  // Non-local return terminating at a BMETHOD control frame.
2066  bmethod_return_value = THROW_DATA_VAL(err);
2067  }
2068 
2069 
2070  EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, bmethod_return_value);
2071  if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2072  rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2073  ec->cfp->self, 0, 0, 0, bmethod_return_value, FALSE);
2074  }
2075 
2076  const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
2077 
2078  EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
2079  rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2080  rb_vm_frame_method_entry(ec->cfp)->called_id,
2081  rb_vm_frame_method_entry(ec->cfp)->owner,
2082  bmethod_return_value);
2083 
2084  VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
2085  local_hooks = me->def->body.bmethod.hooks;
2086 
2087  if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
2088  rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
2089  rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2090  rb_vm_frame_method_entry(ec->cfp)->called_id,
2091  rb_vm_frame_method_entry(ec->cfp)->owner,
2092  bmethod_return_value, TRUE);
2093  }
2094  THROW_DATA_CONSUMED_SET(err);
2095  }
2096  else {
2097  EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2098  if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2099  rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2100  ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2101  }
2102  THROW_DATA_CONSUMED_SET(err);
2103  }
2104  break;
2105  case VM_FRAME_MAGIC_CLASS:
2106  EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_END, ec->cfp->self, 0, 0, 0, Qnil);
2107  break;
2108  }
2109  }
2110 }
2111 
2112 /* evaluator body */
2113 
2114 /* finish
2115  VMe (h1) finish
2116  VM finish F1 F2
2117  cfunc finish F1 F2 C1
2118  rb_funcall finish F1 F2 C1
2119  VMe finish F1 F2 C1
2120  VM finish F1 F2 C1 F3
2121 
2122  F1 - F3 : pushed by VM
2123  C1 : pushed by send insn (CFUNC)
2124 
2125  struct CONTROL_FRAME {
2126  VALUE *pc; // cfp[0], program counter
2127  VALUE *sp; // cfp[1], stack pointer
2128  rb_iseq_t *iseq; // cfp[2], iseq
2129  VALUE self; // cfp[3], self
2130  const VALUE *ep; // cfp[4], env pointer
2131  const void *block_code; // cfp[5], block code
2132  };
2133 
2134  struct rb_captured_block {
2135  VALUE self;
2136  VALUE *ep;
2137  union code;
2138  };
2139 
2140  struct METHOD_ENV {
2141  VALUE param0;
2142  ...
2143  VALUE paramN;
2144  VALUE lvar1;
2145  ...
2146  VALUE lvarM;
2147  VALUE cref; // ep[-2]
2148  VALUE special; // ep[-1]
2149  VALUE flags; // ep[ 0] == lep[0]
2150  };
2151 
2152  struct BLOCK_ENV {
2153  VALUE block_param0;
2154  ...
2155  VALUE block_paramN;
2156  VALUE block_lvar1;
2157  ...
2158  VALUE block_lvarM;
2159  VALUE cref; // ep[-2]
2160  VALUE special; // ep[-1]
2161  VALUE flags; // ep[ 0]
2162  };
2163 
2164  struct CLASS_ENV {
2165  VALUE class_lvar0;
2166  ...
2167  VALUE class_lvarN;
2168  VALUE cref;
2169  VALUE prev_ep; // for frame jump
2170  VALUE flags;
2171  };
2172 
2173  struct C_METHOD_CONTROL_FRAME {
2174  VALUE *pc; // 0
2175  VALUE *sp; // stack pointer
2176  rb_iseq_t *iseq; // cmi
2177  VALUE self; // ?
2178  VALUE *ep; // ep == lep
2179  void *code; //
2180  };
2181 
2182  struct C_BLOCK_CONTROL_FRAME {
2183  VALUE *pc; // point only "finish" insn
2184  VALUE *sp; // sp
2185  rb_iseq_t *iseq; // ?
2186  VALUE self; //
2187  VALUE *ep; // ep
2188  void *code; //
2189  };
2190 
2191  If mjit_exec is already called before calling vm_exec, `mjit_enable_p` should
2192  be FALSE to avoid calling `mjit_exec` twice.
2193  */
2194 
2195 static inline VALUE
2196 vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
2197  VALUE errinfo, VALUE *initial);
2198 
2199 VALUE
2200 vm_exec(rb_execution_context_t *ec, bool mjit_enable_p)
2201 {
2202  enum ruby_tag_type state;
2203  VALUE result = Qundef;
2204  VALUE initial = 0;
2205 
2206  EC_PUSH_TAG(ec);
2207 
2208  _tag.retval = Qnil;
2209  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2210  if (!mjit_enable_p || (result = mjit_exec(ec)) == Qundef) {
2211  result = vm_exec_core(ec, initial);
2212  }
2213  goto vm_loop_start; /* fallback to the VM */
2214  }
2215  else {
2216  result = ec->errinfo;
2217  rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY);
2218  while ((result = vm_exec_handle_exception(ec, state, result, &initial)) == Qundef) {
2219  /* caught a jump, exec the handler */
2220  result = vm_exec_core(ec, initial);
2221  vm_loop_start:
2222  VM_ASSERT(ec->tag == &_tag);
2223  /* when caught `throw`, `tag.state` is set. */
2224  if ((state = _tag.state) == TAG_NONE) break;
2225  _tag.state = TAG_NONE;
2226  }
2227  }
2228  EC_POP_TAG();
2229  return result;
2230 }
2231 
2232 static inline VALUE
2233 vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
2234  VALUE errinfo, VALUE *initial)
2235 {
2236  struct vm_throw_data *err = (struct vm_throw_data *)errinfo;
2237 
2238  for (;;) {
2239  unsigned int i;
2240  const struct iseq_catch_table_entry *entry;
2241  const struct iseq_catch_table *ct;
2242  unsigned long epc, cont_pc, cont_sp;
2243  const rb_iseq_t *catch_iseq;
2244  rb_control_frame_t *cfp;
2245  VALUE type;
2246  const rb_control_frame_t *escape_cfp;
2247 
2248  cont_pc = cont_sp = 0;
2249  catch_iseq = NULL;
2250 
2251  while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) {
2252  if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) {
2253  EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self,
2254  rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2255  rb_vm_frame_method_entry(ec->cfp)->called_id,
2256  rb_vm_frame_method_entry(ec->cfp)->owner, Qnil);
2257  RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec,
2258  rb_vm_frame_method_entry(ec->cfp)->owner,
2259  rb_vm_frame_method_entry(ec->cfp)->def->original_id);
2260  }
2261  rb_vm_pop_frame(ec);
2262  }
2263 
2264  cfp = ec->cfp;
2265  epc = cfp->pc - cfp->iseq->body->iseq_encoded;
2266 
2267  escape_cfp = NULL;
2268  if (state == TAG_BREAK || state == TAG_RETURN) {
2269  escape_cfp = THROW_DATA_CATCH_FRAME(err);
2270 
2271  if (cfp == escape_cfp) {
2272  if (state == TAG_RETURN) {
2273  if (!VM_FRAME_FINISHED_P(cfp)) {
2274  THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2275  THROW_DATA_STATE_SET(err, state = TAG_BREAK);
2276  }
2277  else {
2278  ct = cfp->iseq->body->catch_table;
2279  if (ct) for (i = 0; i < ct->size; i++) {
2280  entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2281  if (entry->start < epc && entry->end >= epc) {
2282  if (entry->type == CATCH_TYPE_ENSURE) {
2283  catch_iseq = entry->iseq;
2284  cont_pc = entry->cont;
2285  cont_sp = entry->sp;
2286  break;
2287  }
2288  }
2289  }
2290  if (catch_iseq == NULL) {
2291  ec->errinfo = Qnil;
2292  THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2293  // cfp == escape_cfp here so calling with cfp_returning_with_value = true
2294  hook_before_rewind(ec, ec->cfp, true, state, err);
2295  rb_vm_pop_frame(ec);
2296  return THROW_DATA_VAL(err);
2297  }
2298  }
2299  /* through */
2300  }
2301  else {
2302  /* TAG_BREAK */
2303 #if OPT_STACK_CACHING
2304  *initial = THROW_DATA_VAL(err);
2305 #else
2306  *ec->cfp->sp++ = THROW_DATA_VAL(err);
2307 #endif
2308  ec->errinfo = Qnil;
2309  return Qundef;
2310  }
2311  }
2312  }
2313 
2314  if (state == TAG_RAISE) {
2315  ct = cfp->iseq->body->catch_table;
2316  if (ct) for (i = 0; i < ct->size; i++) {
2317  entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2318  if (entry->start < epc && entry->end >= epc) {
2319 
2320  if (entry->type == CATCH_TYPE_RESCUE ||
2321  entry->type == CATCH_TYPE_ENSURE) {
2322  catch_iseq = entry->iseq;
2323  cont_pc = entry->cont;
2324  cont_sp = entry->sp;
2325  break;
2326  }
2327  }
2328  }
2329  }
2330  else if (state == TAG_RETRY) {
2331  ct = cfp->iseq->body->catch_table;
2332  if (ct) for (i = 0; i < ct->size; i++) {
2333  entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2334  if (entry->start < epc && entry->end >= epc) {
2335 
2336  if (entry->type == CATCH_TYPE_ENSURE) {
2337  catch_iseq = entry->iseq;
2338  cont_pc = entry->cont;
2339  cont_sp = entry->sp;
2340  break;
2341  }
2342  else if (entry->type == CATCH_TYPE_RETRY) {
2343  const rb_control_frame_t *escape_cfp;
2344  escape_cfp = THROW_DATA_CATCH_FRAME(err);
2345  if (cfp == escape_cfp) {
2346  cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
2347  ec->errinfo = Qnil;
2348  return Qundef;
2349  }
2350  }
2351  }
2352  }
2353  }
2354  else if ((state == TAG_BREAK && !escape_cfp) ||
2355  (state == TAG_REDO) ||
2356  (state == TAG_NEXT)) {
2357  type = (const enum catch_type[TAG_MASK]) {
2358  [TAG_BREAK] = CATCH_TYPE_BREAK,
2359  [TAG_NEXT] = CATCH_TYPE_NEXT,
2360  [TAG_REDO] = CATCH_TYPE_REDO,
2361  /* otherwise = dontcare */
2362  }[state];
2363 
2364  ct = cfp->iseq->body->catch_table;
2365  if (ct) for (i = 0; i < ct->size; i++) {
2366  entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2367 
2368  if (entry->start < epc && entry->end >= epc) {
2369  if (entry->type == CATCH_TYPE_ENSURE) {
2370  catch_iseq = entry->iseq;
2371  cont_pc = entry->cont;
2372  cont_sp = entry->sp;
2373  break;
2374  }
2375  else if (entry->type == type) {
2376  cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
2377  cfp->sp = vm_base_ptr(cfp) + entry->sp;
2378 
2379  if (state != TAG_REDO) {
2380 #if OPT_STACK_CACHING
2381  *initial = THROW_DATA_VAL(err);
2382 #else
2383  *ec->cfp->sp++ = THROW_DATA_VAL(err);
2384 #endif
2385  }
2386  ec->errinfo = Qnil;
2387  VM_ASSERT(ec->tag->state == TAG_NONE);
2388  return Qundef;
2389  }
2390  }
2391  }
2392  }
2393  else {
2394  ct = cfp->iseq->body->catch_table;
2395  if (ct) for (i = 0; i < ct->size; i++) {
2396  entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2397  if (entry->start < epc && entry->end >= epc) {
2398 
2399  if (entry->type == CATCH_TYPE_ENSURE) {
2400  catch_iseq = entry->iseq;
2401  cont_pc = entry->cont;
2402  cont_sp = entry->sp;
2403  break;
2404  }
2405  }
2406  }
2407  }
2408 
2409  if (catch_iseq != NULL) { /* found catch table */
2410  /* enter catch scope */
2411  const int arg_size = 1;
2412 
2413  rb_iseq_check(catch_iseq);
2414  cfp->sp = vm_base_ptr(cfp) + cont_sp;
2415  cfp->pc = cfp->iseq->body->iseq_encoded + cont_pc;
2416 
2417  /* push block frame */
2418  cfp->sp[0] = (VALUE)err;
2419  vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE,
2420  cfp->self,
2421  VM_GUARDED_PREV_EP(cfp->ep),
2422  0, /* cref or me */
2423  catch_iseq->body->iseq_encoded,
2424  cfp->sp + arg_size /* push value */,
2425  catch_iseq->body->local_table_size - arg_size,
2426  catch_iseq->body->stack_max);
2427 
2428  state = 0;
2429  ec->tag->state = TAG_NONE;
2430  ec->errinfo = Qnil;
2431 
2432  return Qundef;
2433  }
2434  else {
2435  hook_before_rewind(ec, ec->cfp, (cfp == escape_cfp), state, err);
2436 
2437  if (VM_FRAME_FINISHED_P(ec->cfp)) {
2438  rb_vm_pop_frame(ec);
2439  ec->errinfo = (VALUE)err;
2440  ec->tag = ec->tag->prev;
2441  EC_JUMP_TAG(ec, state);
2442  }
2443  else {
2444  rb_vm_pop_frame(ec);
2445  }
2446  }
2447  }
2448 }
2449 
2450 /* misc */
2451 
2452 VALUE
2453 rb_iseq_eval(const rb_iseq_t *iseq)
2454 {
2455  rb_execution_context_t *ec = GET_EC();
2456  VALUE val;
2457  vm_set_top_stack(ec, iseq);
2458  val = vm_exec(ec, true);
2459  return val;
2460 }
2461 
2462 VALUE
2463 rb_iseq_eval_main(const rb_iseq_t *iseq)
2464 {
2465  rb_execution_context_t *ec = GET_EC();
2466  VALUE val;
2467 
2468  vm_set_main_stack(ec, iseq);
2469  val = vm_exec(ec, true);
2470  return val;
2471 }
2472 
2473 int
2474 rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
2475 {
2476  const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
2477 
2478  if (me) {
2479  if (idp) *idp = me->def->original_id;
2480  if (called_idp) *called_idp = me->called_id;
2481  if (klassp) *klassp = me->owner;
2482  return TRUE;
2483  }
2484  else {
2485  return FALSE;
2486  }
2487 }
2488 
2489 int
2490 rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
2491 {
2492  return rb_vm_control_frame_id_and_class(ec->cfp, idp, called_idp, klassp);
2493 }
2494 
2495 int
2497 {
2498  return rb_ec_frame_method_id_and_class(GET_EC(), idp, 0, klassp);
2499 }
2500 
2501 VALUE
2502 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
2503  VALUE block_handler, VALUE filename)
2504 {
2505  rb_execution_context_t *ec = GET_EC();
2506  const rb_control_frame_t *reg_cfp = ec->cfp;
2507  const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
2508  VALUE val;
2509 
2510  vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
2511  recv, block_handler,
2512  (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
2513  0, reg_cfp->sp, 0, 0);
2514 
2515  val = (*func)(arg);
2516 
2517  rb_vm_pop_frame(ec);
2518  return val;
2519 }
2520 
2521 /* vm */
2522 
2523 void
2524 rb_vm_update_references(void *ptr)
2525 {
2526  if (ptr) {
2527  rb_vm_t *vm = ptr;
2528 
2529  rb_gc_update_tbl_refs(vm->frozen_strings);
2530  vm->mark_object_ary = rb_gc_location(vm->mark_object_ary);
2531  vm->load_path = rb_gc_location(vm->load_path);
2532  vm->load_path_snapshot = rb_gc_location(vm->load_path_snapshot);
2533 
2534  if (vm->load_path_check_cache) {
2535  vm->load_path_check_cache = rb_gc_location(vm->load_path_check_cache);
2536  }
2537 
2538  vm->expanded_load_path = rb_gc_location(vm->expanded_load_path);
2539  vm->loaded_features = rb_gc_location(vm->loaded_features);
2540  vm->loaded_features_snapshot = rb_gc_location(vm->loaded_features_snapshot);
2541  vm->loaded_features_realpaths = rb_gc_location(vm->loaded_features_realpaths);
2542  vm->top_self = rb_gc_location(vm->top_self);
2543  vm->orig_progname = rb_gc_location(vm->orig_progname);
2544 
2545  rb_gc_update_tbl_refs(vm->overloaded_cme_table);
2546 
2547  if (vm->coverages) {
2548  vm->coverages = rb_gc_location(vm->coverages);
2549  vm->me2counter = rb_gc_location(vm->me2counter);
2550  }
2551  }
2552 }
2553 
2554 void
2555 rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx)
2556 {
2557  if (ptr) {
2558  rb_vm_t *vm = ptr;
2559  rb_ractor_t *r = 0;
2560  list_for_each(&vm->ractor.set, r, vmlr_node) {
2561  VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
2562  rb_ractor_status_p(r, ractor_running));
2563  if (r->threads.cnt > 0) {
2564  rb_thread_t *th = 0;
2565  list_for_each(&r->threads.set, th, lt_node) {
2566  VM_ASSERT(th != NULL);
2567  rb_execution_context_t * ec = th->ec;
2568  if (ec->vm_stack) {
2569  VALUE *p = ec->vm_stack;
2570  VALUE *sp = ec->cfp->sp;
2571  while (p < sp) {
2572  if (!rb_special_const_p(*p)) {
2573  cb(*p, ctx);
2574  }
2575  p++;
2576  }
2577  }
2578  }
2579  }
2580  }
2581  }
2582 }
2583 
2584 static enum rb_id_table_iterator_result
2585 vm_mark_negative_cme(VALUE val, void *dmy)
2586 {
2587  rb_gc_mark(val);
2588  return ID_TABLE_CONTINUE;
2589 }
2590 
2591 void
2592 rb_vm_mark(void *ptr)
2593 {
2594  RUBY_MARK_ENTER("vm");
2595  RUBY_GC_INFO("-------------------------------------------------\n");
2596  if (ptr) {
2597  rb_vm_t *vm = ptr;
2598  rb_ractor_t *r = 0;
2599  long i, len;
2600  const VALUE *obj_ary;
2601 
2602  list_for_each(&vm->ractor.set, r, vmlr_node) {
2603  // ractor.set only contains blocking or running ractors
2604  VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
2605  rb_ractor_status_p(r, ractor_running));
2606  rb_gc_mark(rb_ractor_self(r));
2607  }
2608 
2609  rb_gc_mark_movable(vm->mark_object_ary);
2610 
2611  len = RARRAY_LEN(vm->mark_object_ary);
2612  obj_ary = RARRAY_CONST_PTR(vm->mark_object_ary);
2613  for (i=0; i < len; i++) {
2614  const VALUE *ptr;
2615  long j, jlen;
2616 
2617  rb_gc_mark(*obj_ary);
2618  jlen = RARRAY_LEN(*obj_ary);
2619  ptr = RARRAY_CONST_PTR(*obj_ary);
2620  for (j=0; j < jlen; j++) {
2621  rb_gc_mark(*ptr++);
2622  }
2623  obj_ary++;
2624  }
2625 
2626  rb_gc_mark_movable(vm->load_path);
2627  rb_gc_mark_movable(vm->load_path_snapshot);
2628  RUBY_MARK_MOVABLE_UNLESS_NULL(vm->load_path_check_cache);
2629  rb_gc_mark_movable(vm->expanded_load_path);
2630  rb_gc_mark_movable(vm->loaded_features);
2631  rb_gc_mark_movable(vm->loaded_features_snapshot);
2632  rb_gc_mark_movable(vm->loaded_features_realpaths);
2633  rb_gc_mark_movable(vm->top_self);
2634  rb_gc_mark_movable(vm->orig_progname);
2635  RUBY_MARK_MOVABLE_UNLESS_NULL(vm->coverages);
2636  RUBY_MARK_MOVABLE_UNLESS_NULL(vm->me2counter);
2637  /* Prevent classes from moving */
2638  rb_mark_tbl(vm->defined_module_hash);
2639 
2640  if (vm->loading_table) {
2641  rb_mark_tbl(vm->loading_table);
2642  }
2643 
2644  rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd);
2645 
2646  rb_id_table_foreach_values(vm->negative_cme_table, vm_mark_negative_cme, NULL);
2647  rb_mark_tbl_no_pin(vm->overloaded_cme_table);
2648  for (i=0; i<VM_GLOBAL_CC_CACHE_TABLE_SIZE; i++) {
2649  const struct rb_callcache *cc = vm->global_cc_cache_table[i];
2650 
2651  if (cc != NULL) {
2652  if (!vm_cc_invalidated_p(cc)) {
2653  rb_gc_mark((VALUE)cc);
2654  }
2655  else {
2656  vm->global_cc_cache_table[i] = NULL;
2657  }
2658  }
2659  }
2660 
2661  mjit_mark();
2662  }
2663 
2664  RUBY_MARK_LEAVE("vm");
2665 }
2666 
2667 #undef rb_vm_register_special_exception
2668 void
2669 rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls, VALUE mesg)
2670 {
2671  rb_vm_t *vm = GET_VM();
2672  VALUE exc = rb_exc_new3(cls, rb_obj_freeze(mesg));
2673  OBJ_FREEZE(exc);
2674  ((VALUE *)vm->special_exceptions)[sp] = exc;
2676 }
2677 
2678 int
2679 rb_vm_add_root_module(VALUE module)
2680 {
2681  rb_vm_t *vm = GET_VM();
2682 
2683  st_insert(vm->defined_module_hash, (st_data_t)module, (st_data_t)module);
2684 
2685  return TRUE;
2686 }
2687 
2688 static int
2689 free_loading_table_entry(st_data_t key, st_data_t value, st_data_t arg)
2690 {
2691  xfree((char *)key);
2692  return ST_DELETE;
2693 }
2694 
2695 int
2697 {
2698  RUBY_FREE_ENTER("vm");
2699 
2700  if (vm) {
2701  rb_thread_t *th = vm->ractor.main_thread;
2702  struct rb_objspace *objspace = vm->objspace;
2703  vm->ractor.main_thread = NULL;
2704 
2705  if (th) {
2706  rb_fiber_reset_root_local_storage(th);
2707  thread_free(th);
2708  }
2709  rb_vm_living_threads_init(vm);
2710  ruby_vm_run_at_exit_hooks(vm);
2711  if (vm->loading_table) {
2712  st_foreach(vm->loading_table, free_loading_table_entry, 0);
2713  st_free_table(vm->loading_table);
2714  vm->loading_table = 0;
2715  }
2716  if (vm->frozen_strings) {
2717  st_free_table(vm->frozen_strings);
2718  vm->frozen_strings = 0;
2719  }
2720  RB_ALTSTACK_FREE(vm->main_altstack);
2721  if (objspace) {
2722  rb_objspace_free(objspace);
2723  }
2724  rb_native_mutex_destroy(&vm->waitpid_lock);
2725  rb_native_mutex_destroy(&vm->workqueue_lock);
2726  /* after freeing objspace, you *can't* use ruby_xfree() */
2727  ruby_mimfree(vm);
2728  ruby_current_vm_ptr = NULL;
2729  }
2730  RUBY_FREE_LEAVE("vm");
2731  return 0;
2732 }
2733 
2734 static size_t
2735 vm_memsize(const void *ptr)
2736 {
2737  size_t size = sizeof(rb_vm_t);
2738 
2739  // TODO
2740  // size += vmobj->ractor_num * sizeof(rb_ractor_t);
2741 
2742  return size;
2743 }
2744 
2745 static const rb_data_type_t vm_data_type = {
2746  "VM",
2747  {0, 0, vm_memsize,},
2748  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
2749 };
2750 
2751 
2752 static VALUE
2753 vm_default_params(void)
2754 {
2755  rb_vm_t *vm = GET_VM();
2756  VALUE result = rb_hash_new_with_size(4);
2757 #define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
2758  SET(thread_vm_stack_size);
2759  SET(thread_machine_stack_size);
2760  SET(fiber_vm_stack_size);
2761  SET(fiber_machine_stack_size);
2762 #undef SET
2763  rb_obj_freeze(result);
2764  return result;
2765 }
2766 
2767 static size_t
2768 get_param(const char *name, size_t default_value, size_t min_value)
2769 {
2770  const char *envval;
2771  size_t result = default_value;
2772  if ((envval = getenv(name)) != 0) {
2773  long val = atol(envval);
2774  if (val < (long)min_value) {
2775  val = (long)min_value;
2776  }
2777  result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
2778  }
2779  if (0) ruby_debug_printf("%s: %"PRIuSIZE"\n", name, result); /* debug print */
2780 
2781  return result;
2782 }
2783 
2784 static void
2785 check_machine_stack_size(size_t *sizep)
2786 {
2787 #ifdef PTHREAD_STACK_MIN
2788  size_t size = *sizep;
2789 #endif
2790 
2791 #ifdef PTHREAD_STACK_MIN
2792  if (size < (size_t)PTHREAD_STACK_MIN) {
2793  *sizep = (size_t)PTHREAD_STACK_MIN * 2;
2794  }
2795 #endif
2796 }
2797 
2798 static void
2799 vm_default_params_setup(rb_vm_t *vm)
2800 {
2801  vm->default_params.thread_vm_stack_size =
2802  get_param("RUBY_THREAD_VM_STACK_SIZE",
2803  RUBY_VM_THREAD_VM_STACK_SIZE,
2804  RUBY_VM_THREAD_VM_STACK_SIZE_MIN);
2805 
2806  vm->default_params.thread_machine_stack_size =
2807  get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
2808  RUBY_VM_THREAD_MACHINE_STACK_SIZE,
2809  RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN);
2810 
2811  vm->default_params.fiber_vm_stack_size =
2812  get_param("RUBY_FIBER_VM_STACK_SIZE",
2813  RUBY_VM_FIBER_VM_STACK_SIZE,
2814  RUBY_VM_FIBER_VM_STACK_SIZE_MIN);
2815 
2816  vm->default_params.fiber_machine_stack_size =
2817  get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
2818  RUBY_VM_FIBER_MACHINE_STACK_SIZE,
2819  RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN);
2820 
2821  /* environment dependent check */
2822  check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
2823  check_machine_stack_size(&vm->default_params.fiber_machine_stack_size);
2824 }
2825 
2826 static void
2827 vm_init2(rb_vm_t *vm)
2828 {
2829  MEMZERO(vm, rb_vm_t, 1);
2830  rb_vm_living_threads_init(vm);
2831  vm->thread_report_on_exception = 1;
2832  vm->src_encoding_index = -1;
2833 
2834  vm_default_params_setup(vm);
2835 }
2836 
2837 void
2838 rb_execution_context_update(const rb_execution_context_t *ec)
2839 {
2840  /* update VM stack */
2841  if (ec->vm_stack) {
2842  long i;
2843  VM_ASSERT(ec->cfp);
2844  VALUE *p = ec->vm_stack;
2845  VALUE *sp = ec->cfp->sp;
2846  rb_control_frame_t *cfp = ec->cfp;
2847  rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
2848 
2849  for (i = 0; i < (long)(sp - p); i++) {
2850  VALUE ref = p[i];
2851  VALUE update = rb_gc_location(ref);
2852  if (ref != update) {
2853  p[i] = update;
2854  }
2855  }
2856 
2857  while (cfp != limit_cfp) {
2858  const VALUE *ep = cfp->ep;
2859  cfp->self = rb_gc_location(cfp->self);
2860  cfp->iseq = (rb_iseq_t *)rb_gc_location((VALUE)cfp->iseq);
2861  cfp->block_code = (void *)rb_gc_location((VALUE)cfp->block_code);
2862 
2863  if (!VM_ENV_LOCAL_P(ep)) {
2864  const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
2865  if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
2866  VM_FORCE_WRITE(&prev_ep[VM_ENV_DATA_INDEX_ENV], rb_gc_location(prev_ep[VM_ENV_DATA_INDEX_ENV]));
2867  }
2868 
2869  if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
2870  VM_FORCE_WRITE(&ep[VM_ENV_DATA_INDEX_ENV], rb_gc_location(ep[VM_ENV_DATA_INDEX_ENV]));
2871  VM_FORCE_WRITE(&ep[VM_ENV_DATA_INDEX_ME_CREF], rb_gc_location(ep[VM_ENV_DATA_INDEX_ME_CREF]));
2872  }
2873  }
2874 
2875  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2876  }
2877  }
2878 }
2879 
2880 static enum rb_id_table_iterator_result
2881 mark_local_storage_i(VALUE local, void *data)
2882 {
2883  rb_gc_mark(local);
2884  return ID_TABLE_CONTINUE;
2885 }
2886 
2887 void
2888 rb_execution_context_mark(const rb_execution_context_t *ec)
2889 {
2890  /* mark VM stack */
2891  if (ec->vm_stack) {
2892  VM_ASSERT(ec->cfp);
2893  VALUE *p = ec->vm_stack;
2894  VALUE *sp = ec->cfp->sp;
2895  rb_control_frame_t *cfp = ec->cfp;
2896  rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
2897 
2898  VM_ASSERT(sp == ec->cfp->sp);
2899  rb_gc_mark_vm_stack_values((long)(sp - p), p);
2900 
2901  while (cfp != limit_cfp) {
2902  const VALUE *ep = cfp->ep;
2903  VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep));
2904  rb_gc_mark_movable(cfp->self);
2905  rb_gc_mark_movable((VALUE)cfp->iseq);
2906  rb_gc_mark_movable((VALUE)cfp->block_code);
2907 
2908  if (!VM_ENV_LOCAL_P(ep)) {
2909  const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
2910  if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
2911  rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]);
2912  }
2913 
2914  if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
2915  rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]);
2916  rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]);
2917  }
2918  }
2919 
2920  cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2921  }
2922  }
2923 
2924  /* mark machine stack */
2925  if (ec->machine.stack_start && ec->machine.stack_end &&
2926  ec != GET_EC() /* marked for current ec at the first stage of marking */
2927  ) {
2928  rb_gc_mark_machine_stack(ec);
2929  rb_gc_mark_locations((VALUE *)&ec->machine.regs,
2930  (VALUE *)(&ec->machine.regs) +
2931  sizeof(ec->machine.regs) / (sizeof(VALUE)));
2932  }
2933 
2934  RUBY_MARK_UNLESS_NULL(ec->errinfo);
2935  RUBY_MARK_UNLESS_NULL(ec->root_svar);
2936  if (ec->local_storage) {
2937  rb_id_table_foreach_values(ec->local_storage, mark_local_storage_i, NULL);
2938  }
2939  RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash);
2940  RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash_for_trace);
2941  RUBY_MARK_UNLESS_NULL(ec->private_const_reference);
2942 }
2943 
2944 void rb_fiber_mark_self(rb_fiber_t *fib);
2945 void rb_fiber_update_self(rb_fiber_t *fib);
2946 void rb_threadptr_root_fiber_setup(rb_thread_t *th);
2947 void rb_threadptr_root_fiber_release(rb_thread_t *th);
2948 
2949 static void
2950 thread_compact(void *ptr)
2951 {
2952  rb_thread_t *th = ptr;
2953 
2954  th->self = rb_gc_location(th->self);
2955 
2956  if (!th->root_fiber) {
2957  rb_execution_context_update(th->ec);
2958  }
2959 }
2960 
2961 static void
2962 thread_mark(void *ptr)
2963 {
2964  rb_thread_t *th = ptr;
2965  RUBY_MARK_ENTER("thread");
2966  rb_fiber_mark_self(th->ec->fiber_ptr);
2967 
2968  /* mark ruby objects */
2969  switch (th->invoke_type) {
2970  case thread_invoke_type_proc:
2971  case thread_invoke_type_ractor_proc:
2972  RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.proc);
2973  RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.args);
2974  break;
2975  case thread_invoke_type_func:
2976  rb_gc_mark_maybe((VALUE)th->invoke_arg.func.arg);
2977  break;
2978  default:
2979  break;
2980  }
2981 
2982  rb_gc_mark(rb_ractor_self(th->ractor));
2983  RUBY_MARK_UNLESS_NULL(th->thgroup);
2984  RUBY_MARK_UNLESS_NULL(th->value);
2985  RUBY_MARK_UNLESS_NULL(th->pending_interrupt_queue);
2986  RUBY_MARK_UNLESS_NULL(th->pending_interrupt_mask_stack);
2987  RUBY_MARK_UNLESS_NULL(th->top_self);
2988  RUBY_MARK_UNLESS_NULL(th->top_wrapper);
2989  if (th->root_fiber) rb_fiber_mark_self(th->root_fiber);
2990 
2991  /* Ensure EC stack objects are pinned */
2992  rb_execution_context_mark(th->ec);
2993  RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
2994  RUBY_MARK_UNLESS_NULL(th->last_status);
2995  RUBY_MARK_UNLESS_NULL(th->locking_mutex);
2996  RUBY_MARK_UNLESS_NULL(th->name);
2997 
2998  RUBY_MARK_UNLESS_NULL(th->scheduler);
2999 
3000  RUBY_MARK_LEAVE("thread");
3001 }
3002 
3003 static void
3004 thread_free(void *ptr)
3005 {
3006  rb_thread_t *th = ptr;
3007  RUBY_FREE_ENTER("thread");
3008 
3009  if (th->locking_mutex != Qfalse) {
3010  rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
3011  }
3012  if (th->keeping_mutexes != NULL) {
3013  rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
3014  }
3015 
3016  rb_threadptr_root_fiber_release(th);
3017 
3018  if (th->vm && th->vm->ractor.main_thread == th) {
3019  RUBY_GC_INFO("MRI main thread\n");
3020  }
3021  else {
3022  ruby_xfree(ptr);
3023  }
3024 
3025  RUBY_FREE_LEAVE("thread");
3026 }
3027 
3028 static size_t
3029 thread_memsize(const void *ptr)
3030 {
3031  const rb_thread_t *th = ptr;
3032  size_t size = sizeof(rb_thread_t);
3033 
3034  if (!th->root_fiber) {
3035  size += th->ec->vm_stack_size * sizeof(VALUE);
3036  }
3037  if (th->ec->local_storage) {
3038  size += rb_id_table_memsize(th->ec->local_storage);
3039  }
3040  return size;
3041 }
3042 
3043 #define thread_data_type ruby_threadptr_data_type
3044 const rb_data_type_t ruby_threadptr_data_type = {
3045  "VM/thread",
3046  {
3047  thread_mark,
3048  thread_free,
3049  thread_memsize,
3050  thread_compact,
3051  },
3052  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
3053 };
3054 
3055 VALUE
3056 rb_obj_is_thread(VALUE obj)
3057 {
3058  return RBOOL(rb_typeddata_is_kind_of(obj, &thread_data_type));
3059 }
3060 
3061 static VALUE
3062 thread_alloc(VALUE klass)
3063 {
3064  VALUE obj;
3065  rb_thread_t *th;
3066  obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
3067 
3068  return obj;
3069 }
3070 
3071 inline void
3072 rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
3073 {
3074  ec->vm_stack = stack;
3075  ec->vm_stack_size = size;
3076 }
3077 
3078 void
3079 rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
3080 {
3081  rb_ec_set_vm_stack(ec, stack, size);
3082 
3083  ec->cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3084 
3085  vm_push_frame(ec,
3086  NULL /* dummy iseq */,
3087  VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME /* dummy frame */,
3088  Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
3089  0 /* dummy cref/me */,
3090  0 /* dummy pc */, ec->vm_stack, 0, 0
3091  );
3092 }
3093 
3094 void
3095 rb_ec_clear_vm_stack(rb_execution_context_t *ec)
3096 {
3097  rb_ec_set_vm_stack(ec, NULL, 0);
3098 
3099  // Avoid dangling pointers:
3100  ec->cfp = NULL;
3101 }
3102 
3103 static void
3104 th_init(rb_thread_t *th, VALUE self)
3105 {
3106  th->self = self;
3107  rb_threadptr_root_fiber_setup(th);
3108 
3109  /* All threads are blocking until a non-blocking fiber is scheduled */
3110  th->blocking = 1;
3111  th->scheduler = Qnil;
3112 
3113  if (self == 0) {
3114  size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
3115  rb_ec_initialize_vm_stack(th->ec, ALLOC_N(VALUE, size), size);
3116  }
3117  else {
3118  VM_ASSERT(th->ec->cfp == NULL);
3119  VM_ASSERT(th->ec->vm_stack == NULL);
3120  VM_ASSERT(th->ec->vm_stack_size == 0);
3121  }
3122 
3123  th->status = THREAD_RUNNABLE;
3124  th->last_status = Qnil;
3125  th->ec->errinfo = Qnil;
3126  th->ec->root_svar = Qfalse;
3127  th->ec->local_storage_recursive_hash = Qnil;
3128  th->ec->local_storage_recursive_hash_for_trace = Qnil;
3129 #ifdef NON_SCALAR_THREAD_ID
3130  th->thread_id_string[0] = '\0';
3131 #endif
3132 
3133  th->value = Qundef;
3134 
3135 #if OPT_CALL_THREADED_CODE
3136  th->retval = Qundef;
3137 #endif
3138  th->name = Qnil;
3139  th->report_on_exception = th->vm->thread_report_on_exception;
3140  th->ext_config.ractor_safe = true;
3141 }
3142 
3143 static VALUE
3144 ruby_thread_init(VALUE self)
3145 {
3146  rb_thread_t *th = GET_THREAD();
3147  rb_thread_t *target_th = rb_thread_ptr(self);
3148  rb_vm_t *vm = th->vm;
3149 
3150  target_th->vm = vm;
3151  th_init(target_th, self);
3152 
3153  target_th->top_wrapper = 0;
3154  target_th->top_self = rb_vm_top_self();
3155  target_th->ec->root_svar = Qfalse;
3156  target_th->ractor = th->ractor;
3157 
3158  return self;
3159 }
3160 
3161 VALUE
3162 rb_thread_alloc(VALUE klass)
3163 {
3164  VALUE self = thread_alloc(klass);
3165  ruby_thread_init(self);
3166  return self;
3167 }
3168 
3169 #define REWIND_CFP(expr) do { \
3170  rb_execution_context_t *ec__ = GET_EC(); \
3171  VALUE *const curr_sp = (ec__->cfp++)->sp; \
3172  VALUE *const saved_sp = ec__->cfp->sp; \
3173  ec__->cfp->sp = curr_sp; \
3174  expr; \
3175  (ec__->cfp--)->sp = saved_sp; \
3176 } while (0)
3177 
3178 static VALUE
3179 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
3180 {
3181  REWIND_CFP({
3182  rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
3183  });
3184  return Qnil;
3185 }
3186 
3187 static VALUE
3188 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
3189 {
3190  REWIND_CFP({
3191  rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
3192  });
3193  return Qnil;
3194 }
3195 
3196 static VALUE
3197 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
3198 {
3199  REWIND_CFP({
3200  ID mid = SYM2ID(sym);
3201  rb_undef(cbase, mid);
3202  rb_clear_method_cache(self, mid);
3203  });
3204  return Qnil;
3205 }
3206 
3207 static VALUE
3208 m_core_set_postexe(VALUE self)
3209 {
3210  rb_set_end_proc(rb_call_end_proc, rb_block_proc());
3211  return Qnil;
3212 }
3213 
3214 static VALUE core_hash_merge_kwd(VALUE hash, VALUE kw);
3215 
3216 static VALUE
3217 core_hash_merge(VALUE hash, long argc, const VALUE *argv)
3218 {
3219  Check_Type(hash, T_HASH);
3220  VM_ASSERT(argc % 2 == 0);
3221  rb_hash_bulk_insert(argc, argv, hash);
3222  return hash;
3223 }
3224 
3225 static VALUE
3226 m_core_hash_merge_ptr(int argc, VALUE *argv, VALUE recv)
3227 {
3228  VALUE hash = argv[0];
3229 
3230  REWIND_CFP(hash = core_hash_merge(hash, argc-1, argv+1));
3231 
3232  return hash;
3233 }
3234 
3235 static int
3236 kwmerge_i(VALUE key, VALUE value, VALUE hash)
3237 {
3238  rb_hash_aset(hash, key, value);
3239  return ST_CONTINUE;
3240 }
3241 
3242 static VALUE
3243 m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
3244 {
3245  REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
3246  return hash;
3247 }
3248 
3249 static VALUE
3250 m_core_make_shareable(VALUE recv, VALUE obj)
3251 {
3252  return rb_ractor_make_shareable(obj);
3253 }
3254 
3255 static VALUE
3256 m_core_make_shareable_copy(VALUE recv, VALUE obj)
3257 {
3258  return rb_ractor_make_shareable_copy(obj);
3259 }
3260 
3261 static VALUE
3262 m_core_ensure_shareable(VALUE recv, VALUE obj, VALUE name)
3263 {
3264  return rb_ractor_ensure_shareable(obj, name);
3265 }
3266 
3267 static VALUE
3268 core_hash_merge_kwd(VALUE hash, VALUE kw)
3269 {
3270  rb_hash_foreach(rb_to_hash_type(kw), kwmerge_i, hash);
3271  return hash;
3272 }
3273 
3274 /* Returns true if JIT is enabled */
3275 static VALUE
3276 mjit_enabled_p(VALUE _)
3277 {
3278  return RBOOL(mjit_enabled);
3279 }
3280 
3281 static VALUE
3282 mjit_pause_m(int argc, VALUE *argv, RB_UNUSED_VAR(VALUE self))
3283 {
3284  VALUE options = Qnil;
3285  VALUE wait = Qtrue;
3286  rb_scan_args(argc, argv, "0:", &options);
3287 
3288  if (!NIL_P(options)) {
3289  static ID keyword_ids[1];
3290  if (!keyword_ids[0])
3291  keyword_ids[0] = rb_intern("wait");
3292  rb_get_kwargs(options, keyword_ids, 0, 1, &wait);
3293  }
3294 
3295  return mjit_pause(RTEST(wait));
3296 }
3297 
3298 static VALUE
3299 mjit_resume_m(VALUE _)
3300 {
3301  return mjit_resume();
3302 }
3303 
3304 extern VALUE *rb_gc_stack_start;
3305 extern size_t rb_gc_stack_maxsize;
3306 
3307 /* debug functions */
3308 
3309 /* :nodoc: */
3310 static VALUE
3311 sdr(VALUE self)
3312 {
3313  rb_vm_bugreport(NULL);
3314  return Qnil;
3315 }
3316 
3317 /* :nodoc: */
3318 static VALUE
3319 nsdr(VALUE self)
3320 {
3321  VALUE ary = rb_ary_new();
3322 #ifdef HAVE_BACKTRACE
3323 #include <execinfo.h>
3324 #define MAX_NATIVE_TRACE 1024
3325  static void *trace[MAX_NATIVE_TRACE];
3326  int n = (int)backtrace(trace, MAX_NATIVE_TRACE);
3327  char **syms = backtrace_symbols(trace, n);
3328  int i;
3329 
3330  if (syms == 0) {
3331  rb_memerror();
3332  }
3333 
3334  for (i=0; i<n; i++) {
3335  rb_ary_push(ary, rb_str_new2(syms[i]));
3336  }
3337  free(syms); /* OK */
3338 #endif
3339  return ary;
3340 }
3341 
3342 #if VM_COLLECT_USAGE_DETAILS
3343 static VALUE usage_analysis_insn_start(VALUE self);
3344 static VALUE usage_analysis_operand_start(VALUE self);
3345 static VALUE usage_analysis_register_start(VALUE self);
3346 static VALUE usage_analysis_insn_stop(VALUE self);
3347 static VALUE usage_analysis_operand_stop(VALUE self);
3348 static VALUE usage_analysis_register_stop(VALUE self);
3349 static VALUE usage_analysis_insn_running(VALUE self);
3350 static VALUE usage_analysis_operand_running(VALUE self);
3351 static VALUE usage_analysis_register_running(VALUE self);
3352 static VALUE usage_analysis_insn_clear(VALUE self);
3353 static VALUE usage_analysis_operand_clear(VALUE self);
3354 static VALUE usage_analysis_register_clear(VALUE self);
3355 #endif
3356 
3357 static VALUE
3358 f_raise(int c, VALUE *v, VALUE _)
3359 {
3360  return rb_f_raise(c, v);
3361 }
3362 
3363 static VALUE
3364 f_proc(VALUE _)
3365 {
3366  return rb_block_proc();
3367 }
3368 
3369 static VALUE
3370 f_lambda(VALUE _)
3371 {
3372  return rb_block_lambda();
3373 }
3374 
3375 static VALUE
3376 f_sprintf(int c, const VALUE *v, VALUE _)
3377 {
3378  return rb_f_sprintf(c, v);
3379 }
3380 
3381 static VALUE
3382 vm_mtbl(VALUE self, VALUE obj, VALUE sym)
3383 {
3384  vm_mtbl_dump(CLASS_OF(obj), RTEST(sym) ? SYM2ID(sym) : 0);
3385  return Qnil;
3386 }
3387 
3388 static VALUE
3389 vm_mtbl2(VALUE self, VALUE obj, VALUE sym)
3390 {
3391  vm_mtbl_dump(obj, RTEST(sym) ? SYM2ID(sym) : 0);
3392  return Qnil;
3393 }
3394 
3395 /*
3396  * call-seq:
3397  * RubyVM.keep_script_lines -> true or false
3398  *
3399  * Return current +keep_script_lines+ status. Now it only returns
3400  * +true+ of +false+, but it can return other objects in future.
3401  *
3402  * Note that this is an API for ruby internal use, debugging,
3403  * and research. Do not use this for any other purpose.
3404  * The compatibility is not guaranteed.
3405  */
3406 static VALUE
3407 vm_keep_script_lines(VALUE self)
3408 {
3409  return RBOOL(ruby_vm_keep_script_lines);
3410 }
3411 
3412 /*
3413  * call-seq:
3414  * RubyVM.keep_script_lines = true / false
3415  *
3416  * It set +keep_script_lines+ flag. If the flag is set, all
3417  * loaded scripts are recorded in a interpreter process.
3418  *
3419  * Note that this is an API for ruby internal use, debugging,
3420  * and research. Do not use this for any other purpose.
3421  * The compatibility is not guaranteed.
3422  */
3423 static VALUE
3424 vm_keep_script_lines_set(VALUE self, VALUE flags)
3425 {
3426  ruby_vm_keep_script_lines = RTEST(flags);
3427  return flags;
3428 }
3429 
3430 void
3431 Init_VM(void)
3432 {
3433  VALUE opts;
3434  VALUE klass;
3435  VALUE fcore;
3436 
3437  /*
3438  * Document-class: RubyVM
3439  *
3440  * The RubyVM module only exists on MRI. +RubyVM+ is not defined in
3441  * other Ruby implementations such as JRuby and TruffleRuby.
3442  *
3443  * The RubyVM module provides some access to MRI internals.
3444  * This module is for very limited purposes, such as debugging,
3445  * prototyping, and research. Normal users must not use it.
3446  * This module is not portable between Ruby implementations.
3447  */
3448  rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
3449  rb_undef_alloc_func(rb_cRubyVM);
3450  rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
3451  rb_define_singleton_method(rb_cRubyVM, "stat", vm_stat, -1);
3452  rb_define_singleton_method(rb_cRubyVM, "keep_script_lines", vm_keep_script_lines, 0);
3453  rb_define_singleton_method(rb_cRubyVM, "keep_script_lines=", vm_keep_script_lines_set, 1);
3454 
3455 #if USE_DEBUG_COUNTER
3456  rb_define_singleton_method(rb_cRubyVM, "reset_debug_counters", rb_debug_counter_reset, 0);
3457  rb_define_singleton_method(rb_cRubyVM, "show_debug_counters", rb_debug_counter_show, 0);
3458 #endif
3459 
3460  /* FrozenCore (hidden) */
3461  fcore = rb_class_new(rb_cBasicObject);
3462  rb_set_class_path(fcore, rb_cRubyVM, "FrozenCore");
3463  RBASIC(fcore)->flags = T_ICLASS;
3464  klass = rb_singleton_class(fcore);
3465  rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
3466  rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
3467  rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
3468  rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 0);
3469  rb_define_method_id(klass, id_core_hash_merge_ptr, m_core_hash_merge_ptr, -1);
3470  rb_define_method_id(klass, id_core_hash_merge_kwd, m_core_hash_merge_kwd, 2);
3471  rb_define_method_id(klass, id_core_raise, f_raise, -1);
3472  rb_define_method_id(klass, id_core_sprintf, f_sprintf, -1);
3473  rb_define_method_id(klass, idProc, f_proc, 0);
3474  rb_define_method_id(klass, idLambda, f_lambda, 0);
3475  rb_define_method(klass, "make_shareable", m_core_make_shareable, 1);
3476  rb_define_method(klass, "make_shareable_copy", m_core_make_shareable_copy, 1);
3477  rb_define_method(klass, "ensure_shareable", m_core_ensure_shareable, 2);
3478  rb_obj_freeze(fcore);
3479  RBASIC_CLEAR_CLASS(klass);
3480  rb_obj_freeze(klass);
3482  rb_mRubyVMFrozenCore = fcore;
3483 
3484  /* ::RubyVM::MJIT
3485  * Provides access to the Method JIT compiler of MRI.
3486  * Of course, this module is MRI specific.
3487  */
3488  VALUE mjit = rb_define_module_under(rb_cRubyVM, "MJIT");
3489  rb_define_singleton_method(mjit, "enabled?", mjit_enabled_p, 0);
3490  rb_define_singleton_method(mjit, "pause", mjit_pause_m, -1);
3491  rb_define_singleton_method(mjit, "resume", mjit_resume_m, 0);
3492 
3493  /*
3494  * Document-class: Thread
3495  *
3496  * Threads are the Ruby implementation for a concurrent programming model.
3497  *
3498  * Programs that require multiple threads of execution are a perfect
3499  * candidate for Ruby's Thread class.
3500  *
3501  * For example, we can create a new thread separate from the main thread's
3502  * execution using ::new.
3503  *
3504  * thr = Thread.new { puts "What's the big deal" }
3505  *
3506  * Then we are able to pause the execution of the main thread and allow
3507  * our new thread to finish, using #join:
3508  *
3509  * thr.join #=> "What's the big deal"
3510  *
3511  * If we don't call +thr.join+ before the main thread terminates, then all
3512  * other threads including +thr+ will be killed.
3513  *
3514  * Alternatively, you can use an array for handling multiple threads at
3515  * once, like in the following example:
3516  *
3517  * threads = []
3518  * threads << Thread.new { puts "What's the big deal" }
3519  * threads << Thread.new { 3.times { puts "Threads are fun!" } }
3520  *
3521  * After creating a few threads we wait for them all to finish
3522  * consecutively.
3523  *
3524  * threads.each { |thr| thr.join }
3525  *
3526  * To retrieve the last value of a thread, use #value
3527  *
3528  * thr = Thread.new { sleep 1; "Useful value" }
3529  * thr.value #=> "Useful value"
3530  *
3531  * === Thread initialization
3532  *
3533  * In order to create new threads, Ruby provides ::new, ::start, and
3534  * ::fork. A block must be provided with each of these methods, otherwise
3535  * a ThreadError will be raised.
3536  *
3537  * When subclassing the Thread class, the +initialize+ method of your
3538  * subclass will be ignored by ::start and ::fork. Otherwise, be sure to
3539  * call super in your +initialize+ method.
3540  *
3541  * === Thread termination
3542  *
3543  * For terminating threads, Ruby provides a variety of ways to do this.
3544  *
3545  * The class method ::kill, is meant to exit a given thread:
3546  *
3547  * thr = Thread.new { sleep }
3548  * Thread.kill(thr) # sends exit() to thr
3549  *
3550  * Alternatively, you can use the instance method #exit, or any of its
3551  * aliases #kill or #terminate.
3552  *
3553  * thr.exit
3554  *
3555  * === Thread status
3556  *
3557  * Ruby provides a few instance methods for querying the state of a given
3558  * thread. To get a string with the current thread's state use #status
3559  *
3560  * thr = Thread.new { sleep }
3561  * thr.status # => "sleep"
3562  * thr.exit
3563  * thr.status # => false
3564  *
3565  * You can also use #alive? to tell if the thread is running or sleeping,
3566  * and #stop? if the thread is dead or sleeping.
3567  *
3568  * === Thread variables and scope
3569  *
3570  * Since threads are created with blocks, the same rules apply to other
3571  * Ruby blocks for variable scope. Any local variables created within this
3572  * block are accessible to only this thread.
3573  *
3574  * ==== Fiber-local vs. Thread-local
3575  *
3576  * Each fiber has its own bucket for Thread#[] storage. When you set a
3577  * new fiber-local it is only accessible within this Fiber. To illustrate:
3578  *
3579  * Thread.new {
3580  * Thread.current[:foo] = "bar"
3581  * Fiber.new {
3582  * p Thread.current[:foo] # => nil
3583  * }.resume
3584  * }.join
3585  *
3586  * This example uses #[] for getting and #[]= for setting fiber-locals,
3587  * you can also use #keys to list the fiber-locals for a given
3588  * thread and #key? to check if a fiber-local exists.
3589  *
3590  * When it comes to thread-locals, they are accessible within the entire
3591  * scope of the thread. Given the following example:
3592  *
3593  * Thread.new{
3594  * Thread.current.thread_variable_set(:foo, 1)
3595  * p Thread.current.thread_variable_get(:foo) # => 1
3596  * Fiber.new{
3597  * Thread.current.thread_variable_set(:foo, 2)
3598  * p Thread.current.thread_variable_get(:foo) # => 2
3599  * }.resume
3600  * p Thread.current.thread_variable_get(:foo) # => 2
3601  * }.join
3602  *
3603  * You can see that the thread-local +:foo+ carried over into the fiber
3604  * and was changed to +2+ by the end of the thread.
3605  *
3606  * This example makes use of #thread_variable_set to create new
3607  * thread-locals, and #thread_variable_get to reference them.
3608  *
3609  * There is also #thread_variables to list all thread-locals, and
3610  * #thread_variable? to check if a given thread-local exists.
3611  *
3612  * === Exception handling
3613  *
3614  * When an unhandled exception is raised inside a thread, it will
3615  * terminate. By default, this exception will not propagate to other
3616  * threads. The exception is stored and when another thread calls #value
3617  * or #join, the exception will be re-raised in that thread.
3618  *
3619  * t = Thread.new{ raise 'something went wrong' }
3620  * t.value #=> RuntimeError: something went wrong
3621  *
3622  * An exception can be raised from outside the thread using the
3623  * Thread#raise instance method, which takes the same parameters as
3624  * Kernel#raise.
3625  *
3626  * Setting Thread.abort_on_exception = true, Thread#abort_on_exception =
3627  * true, or $DEBUG = true will cause a subsequent unhandled exception
3628  * raised in a thread to be automatically re-raised in the main thread.
3629  *
3630  * With the addition of the class method ::handle_interrupt, you can now
3631  * handle exceptions asynchronously with threads.
3632  *
3633  * === Scheduling
3634  *
3635  * Ruby provides a few ways to support scheduling threads in your program.
3636  *
3637  * The first way is by using the class method ::stop, to put the current
3638  * running thread to sleep and schedule the execution of another thread.
3639  *
3640  * Once a thread is asleep, you can use the instance method #wakeup to
3641  * mark your thread as eligible for scheduling.
3642  *
3643  * You can also try ::pass, which attempts to pass execution to another
3644  * thread but is dependent on the OS whether a running thread will switch
3645  * or not. The same goes for #priority, which lets you hint to the thread
3646  * scheduler which threads you want to take precedence when passing
3647  * execution. This method is also dependent on the OS and may be ignored
3648  * on some platforms.
3649  *
3650  */
3651  rb_cThread = rb_define_class("Thread", rb_cObject);
3653 
3654 #if VM_COLLECT_USAGE_DETAILS
3655  /* ::RubyVM::USAGE_ANALYSIS_* */
3656 #define define_usage_analysis_hash(name) /* shut up rdoc -C */ \
3657  rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
3658  define_usage_analysis_hash(INSN);
3659  define_usage_analysis_hash(REGS);
3660  define_usage_analysis_hash(INSN_BIGRAM);
3661 
3662  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_START", usage_analysis_insn_start, 0);
3663  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_START", usage_analysis_operand_start, 0);
3664  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_START", usage_analysis_register_start, 0);
3665  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop, 0);
3666  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop, 0);
3667  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop, 0);
3668  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_RUNNING", usage_analysis_insn_running, 0);
3669  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_RUNNING", usage_analysis_operand_running, 0);
3670  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_RUNNING", usage_analysis_register_running, 0);
3671  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_CLEAR", usage_analysis_insn_clear, 0);
3672  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_CLEAR", usage_analysis_operand_clear, 0);
3673  rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_CLEAR", usage_analysis_register_clear, 0);
3674 #endif
3675 
3676  /* ::RubyVM::OPTS
3677  * An Array of VM build options.
3678  * This constant is MRI specific.
3679  */
3680  rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
3681 
3682 #if OPT_DIRECT_THREADED_CODE
3683  rb_ary_push(opts, rb_str_new2("direct threaded code"));
3684 #elif OPT_TOKEN_THREADED_CODE
3685  rb_ary_push(opts, rb_str_new2("token threaded code"));
3686 #elif OPT_CALL_THREADED_CODE
3687  rb_ary_push(opts, rb_str_new2("call threaded code"));
3688 #endif
3689 
3690 #if OPT_STACK_CACHING
3691  rb_ary_push(opts, rb_str_new2("stack caching"));
3692 #endif
3693 #if OPT_OPERANDS_UNIFICATION
3694  rb_ary_push(opts, rb_str_new2("operands unification"));
3695 #endif
3696 #if OPT_INSTRUCTIONS_UNIFICATION
3697  rb_ary_push(opts, rb_str_new2("instructions unification"));
3698 #endif
3699 #if OPT_INLINE_METHOD_CACHE
3700  rb_ary_push(opts, rb_str_new2("inline method cache"));
3701 #endif
3702 #if OPT_BLOCKINLINING
3703  rb_ary_push(opts, rb_str_new2("block inlining"));
3704 #endif
3705 
3706  /* ::RubyVM::INSTRUCTION_NAMES
3707  * A list of bytecode instruction names in MRI.
3708  * This constant is MRI specific.
3709  */
3710  rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
3711 
3712  /* ::RubyVM::DEFAULT_PARAMS
3713  * This constant exposes the VM's default parameters.
3714  * Note that changing these values does not affect VM execution.
3715  * Specification is not stable and you should not depend on this value.
3716  * Of course, this constant is MRI specific.
3717  */
3718  rb_define_const(rb_cRubyVM, "DEFAULT_PARAMS", vm_default_params());
3719 
3720  /* debug functions ::RubyVM::SDR(), ::RubyVM::NSDR() */
3721 #if VMDEBUG
3722  rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
3723  rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
3724  rb_define_singleton_method(rb_cRubyVM, "mtbl", vm_mtbl, 2);
3725  rb_define_singleton_method(rb_cRubyVM, "mtbl2", vm_mtbl2, 2);
3726 #else
3727  (void)sdr;
3728  (void)nsdr;
3729  (void)vm_mtbl;
3730  (void)vm_mtbl2;
3731 #endif
3732 
3733  /* VM bootstrap: phase 2 */
3734  {
3735  rb_vm_t *vm = ruby_current_vm_ptr;
3736  rb_thread_t *th = GET_THREAD();
3737  VALUE filename = rb_fstring_lit("<main>");
3738  const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
3739 
3740  // Ractor setup
3741  rb_ractor_main_setup(vm, th->ractor, th);
3742 
3743  /* create vm object */
3744  vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
3745 
3746  /* create main thread */
3747  th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
3748  vm->ractor.main_thread = th;
3749  vm->ractor.main_ractor = th->ractor;
3750  th->vm = vm;
3751  th->top_wrapper = 0;
3752  th->top_self = rb_vm_top_self();
3753 
3755  th->ec->cfp->iseq = iseq;
3756  th->ec->cfp->pc = iseq->body->iseq_encoded;
3757  th->ec->cfp->self = th->top_self;
3758 
3759  VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
3760  VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE, FALSE));
3761 
3762  /*
3763  * The Binding of the top level scope
3764  */
3765  rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
3766 
3767  rb_objspace_gc_enable(vm->objspace);
3768  }
3769  vm_init_redefined_flag();
3770 
3771  rb_block_param_proxy = rb_obj_alloc(rb_cObject);
3772  rb_add_method_optimized(rb_singleton_class(rb_block_param_proxy), idCall,
3773  OPTIMIZED_METHOD_TYPE_BLOCK_CALL, 0, METHOD_VISI_PUBLIC);
3774  rb_obj_freeze(rb_block_param_proxy);
3775  rb_gc_register_mark_object(rb_block_param_proxy);
3776 
3777  /* vm_backtrace.c */
3778  Init_vm_backtrace();
3779 }
3780 
3781 void
3782 rb_vm_set_progname(VALUE filename)
3783 {
3784  rb_thread_t *th = GET_VM()->ractor.main_thread;
3785  rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
3786  --cfp;
3787 
3788  rb_iseq_pathobj_set(cfp->iseq, rb_str_dup(filename), rb_iseq_realpath(cfp->iseq));
3789 }
3790 
3791 extern const struct st_hash_type rb_fstring_hash_type;
3792 
3793 void
3794 Init_BareVM(void)
3795 {
3796  /* VM bootstrap: phase 1 */
3797  rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm));
3798  rb_thread_t * th = ruby_mimmalloc(sizeof(*th));
3799  if (!vm || !th) {
3800  fputs("[FATAL] failed to allocate memory\n", stderr);
3801  exit(EXIT_FAILURE);
3802  }
3803  MEMZERO(th, rb_thread_t, 1);
3804  vm_init2(vm);
3805 
3806  vm->objspace = rb_objspace_alloc();
3807  ruby_current_vm_ptr = vm;
3808  vm->negative_cme_table = rb_id_table_create(16);
3809  vm->overloaded_cme_table = st_init_numtable();
3810 
3811  Init_native_thread(th);
3812  th->vm = vm;
3813  th_init(th, 0);
3814  vm->ractor.main_ractor = th->ractor = rb_ractor_main_alloc();
3815  rb_ractor_set_current_ec(th->ractor, th->ec);
3816  ruby_thread_init_stack(th);
3817 
3818  rb_native_mutex_initialize(&vm->ractor.sync.lock);
3819  rb_native_cond_initialize(&vm->ractor.sync.barrier_cond);
3820  rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
3821 }
3822 
3823 void
3824 Init_vm_objects(void)
3825 {
3826  rb_vm_t *vm = GET_VM();
3827 
3828  vm->defined_module_hash = st_init_numtable();
3829 
3830  /* initialize mark object array, hash */
3831  vm->mark_object_ary = rb_ary_tmp_new(128);
3832  vm->loading_table = st_init_strtable();
3833  vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000);
3834 #if EXTSTATIC
3835  vm->static_ext_inits = st_init_strtable();
3836 #endif
3837 }
3838 
3839 /* top self */
3840 
3841 static VALUE
3842 main_to_s(VALUE obj)
3843 {
3844  return rb_str_new2("main");
3845 }
3846 
3847 VALUE
3848 rb_vm_top_self(void)
3849 {
3850  return GET_VM()->top_self;
3851 }
3852 
3853 void
3854 Init_top_self(void)
3855 {
3856  rb_vm_t *vm = GET_VM();
3857 
3858  vm->top_self = rb_obj_alloc(rb_cObject);
3859  rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
3860  rb_define_alias(rb_singleton_class(rb_vm_top_self()), "inspect", "to_s");
3861 }
3862 
3863 VALUE *
3865 {
3866  rb_ractor_t *cr = GET_RACTOR();
3867  return &cr->verbose;
3868 }
3869 
3870 VALUE *
3872 {
3873  rb_ractor_t *cr = GET_RACTOR();
3874  return &cr->debug;
3875 }
3876 
3877 /* iseq.c */
3878 VALUE rb_insn_operand_intern(const rb_iseq_t *iseq,
3879  VALUE insn, int op_no, VALUE op,
3880  int len, size_t pos, VALUE *pnop, VALUE child);
3881 
3882 st_table *
3883 rb_vm_fstring_table(void)
3884 {
3885  return GET_VM()->frozen_strings;
3886 }
3887 
3888 #if VM_COLLECT_USAGE_DETAILS
3889 
3890 #define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
3891 
3892 /* uh = {
3893  * insn(Fixnum) => ihash(Hash)
3894  * }
3895  * ihash = {
3896  * -1(Fixnum) => count, # insn usage
3897  * 0(Fixnum) => ophash, # operand usage
3898  * }
3899  * ophash = {
3900  * val(interned string) => count(Fixnum)
3901  * }
3902  */
3903 static void
3904 vm_analysis_insn(int insn)
3905 {
3906  ID usage_hash;
3907  ID bigram_hash;
3908  static int prev_insn = -1;
3909 
3910  VALUE uh;
3911  VALUE ihash;
3912  VALUE cv;
3913 
3914  CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
3915  CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
3916  uh = rb_const_get(rb_cRubyVM, usage_hash);
3917  if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
3918  ihash = rb_hash_new();
3919  HASH_ASET(uh, INT2FIX(insn), ihash);
3920  }
3921  if (NIL_P(cv = rb_hash_aref(ihash, INT2FIX(-1)))) {
3922  cv = INT2FIX(0);
3923  }
3924  HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
3925 
3926  /* calc bigram */
3927  if (prev_insn != -1) {
3928  VALUE bi;
3929  VALUE ary[2];
3930  VALUE cv;
3931 
3932  ary[0] = INT2FIX(prev_insn);
3933  ary[1] = INT2FIX(insn);
3934  bi = rb_ary_new4(2, &ary[0]);
3935 
3936  uh = rb_const_get(rb_cRubyVM, bigram_hash);
3937  if (NIL_P(cv = rb_hash_aref(uh, bi))) {
3938  cv = INT2FIX(0);
3939  }
3940  HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
3941  }
3942  prev_insn = insn;
3943 }
3944 
3945 static void
3946 vm_analysis_operand(int insn, int n, VALUE op)
3947 {
3948  ID usage_hash;
3949 
3950  VALUE uh;
3951  VALUE ihash;
3952  VALUE ophash;
3953  VALUE valstr;
3954  VALUE cv;
3955 
3956  CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
3957 
3958  uh = rb_const_get(rb_cRubyVM, usage_hash);
3959  if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
3960  ihash = rb_hash_new();
3961  HASH_ASET(uh, INT2FIX(insn), ihash);
3962  }
3963  if (NIL_P(ophash = rb_hash_aref(ihash, INT2FIX(n)))) {
3964  ophash = rb_hash_new();
3965  HASH_ASET(ihash, INT2FIX(n), ophash);
3966  }
3967  /* intern */
3968  valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
3969 
3970  /* set count */
3971  if (NIL_P(cv = rb_hash_aref(ophash, valstr))) {
3972  cv = INT2FIX(0);
3973  }
3974  HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
3975 }
3976 
3977 static void
3978 vm_analysis_register(int reg, int isset)
3979 {
3980  ID usage_hash;
3981  VALUE uh;
3982  VALUE valstr;
3983  static const char regstrs[][5] = {
3984  "pc", /* 0 */
3985  "sp", /* 1 */
3986  "ep", /* 2 */
3987  "cfp", /* 3 */
3988  "self", /* 4 */
3989  "iseq", /* 5 */
3990  };
3991  static const char getsetstr[][4] = {
3992  "get",
3993  "set",
3994  };
3995  static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
3996 
3997  VALUE cv;
3998 
3999  CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
4000  if (syms[0] == 0) {
4001  char buff[0x10];
4002  int i;
4003 
4004  for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
4005  int j;
4006  for (j = 0; j < 2; j++) {
4007  snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
4008  syms[i][j] = ID2SYM(rb_intern(buff));
4009  }
4010  }
4011  }
4012  valstr = syms[reg][isset];
4013 
4014  uh = rb_const_get(rb_cRubyVM, usage_hash);
4015  if (NIL_P(cv = rb_hash_aref(uh, valstr))) {
4016  cv = INT2FIX(0);
4017  }
4018  HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
4019 }
4020 
4021 #undef HASH_ASET
4022 
4023 static void (*ruby_vm_collect_usage_func_insn)(int insn) = NULL;
4024 static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = NULL;
4025 static void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = NULL;
4026 
4027 /* :nodoc: */
4028 static VALUE
4029 usage_analysis_insn_start(VALUE self)
4030 {
4031  ruby_vm_collect_usage_func_insn = vm_analysis_insn;
4032  return Qnil;
4033 }
4034 
4035 /* :nodoc: */
4036 static VALUE
4037 usage_analysis_operand_start(VALUE self)
4038 {
4039  ruby_vm_collect_usage_func_operand = vm_analysis_operand;
4040  return Qnil;
4041 }
4042 
4043 /* :nodoc: */
4044 static VALUE
4045 usage_analysis_register_start(VALUE self)
4046 {
4047  ruby_vm_collect_usage_func_register = vm_analysis_register;
4048  return Qnil;
4049 }
4050 
4051 /* :nodoc: */
4052 static VALUE
4053 usage_analysis_insn_stop(VALUE self)
4054 {
4055  ruby_vm_collect_usage_func_insn = 0;
4056  return Qnil;
4057 }
4058 
4059 /* :nodoc: */
4060 static VALUE
4061 usage_analysis_operand_stop(VALUE self)
4062 {
4063  ruby_vm_collect_usage_func_operand = 0;
4064  return Qnil;
4065 }
4066 
4067 /* :nodoc: */
4068 static VALUE
4069 usage_analysis_register_stop(VALUE self)
4070 {
4071  ruby_vm_collect_usage_func_register = 0;
4072  return Qnil;
4073 }
4074 
4075 /* :nodoc: */
4076 static VALUE
4077 usage_analysis_insn_running(VALUE self)
4078 {
4079  return RBOOL(ruby_vm_collect_usage_func_insn != 0);
4080 }
4081 
4082 /* :nodoc: */
4083 static VALUE
4084 usage_analysis_operand_running(VALUE self)
4085 {
4086  return RBOOL(ruby_vm_collect_usage_func_operand != 0);
4087 }
4088 
4089 /* :nodoc: */
4090 static VALUE
4091 usage_analysis_register_running(VALUE self)
4092 {
4093  return RBOOL(ruby_vm_collect_usage_func_register != 0);
4094 }
4095 
4096 /* :nodoc: */
4097 static VALUE
4098 usage_analysis_insn_clear(VALUE self)
4099 {
4100  ID usage_hash;
4101  ID bigram_hash;
4102  VALUE uh;
4103  VALUE bh;
4104 
4105  CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4106  CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
4107  uh = rb_const_get(rb_cRubyVM, usage_hash);
4108  bh = rb_const_get(rb_cRubyVM, bigram_hash);
4109  rb_hash_clear(uh);
4110  rb_hash_clear(bh);
4111 
4112  return Qtrue;
4113 }
4114 
4115 /* :nodoc: */
4116 static VALUE
4117 usage_analysis_operand_clear(VALUE self)
4118 {
4119  ID usage_hash;
4120  VALUE uh;
4121 
4122  CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4123  uh = rb_const_get(rb_cRubyVM, usage_hash);
4124  rb_hash_clear(uh);
4125 
4126  return Qtrue;
4127 }
4128 
4129 /* :nodoc: */
4130 static VALUE
4131 usage_analysis_register_clear(VALUE self)
4132 {
4133  ID usage_hash;
4134  VALUE uh;
4135 
4136  CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
4137  uh = rb_const_get(rb_cRubyVM, usage_hash);
4138  rb_hash_clear(uh);
4139 
4140  return Qtrue;
4141 }
4142 
4143 #else
4144 
4145 MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_insn)(int insn)) = 0;
4146 MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op)) = 0;
4147 MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_register)(int reg, int isset)) = 0;
4148 
4149 #endif
4150 
4151 #if VM_COLLECT_USAGE_DETAILS
4152 /* @param insn instruction number */
4153 static void
4154 vm_collect_usage_insn(int insn)
4155 {
4156  if (RUBY_DTRACE_INSN_ENABLED()) {
4157  RUBY_DTRACE_INSN(rb_insns_name(insn));
4158  }
4159  if (ruby_vm_collect_usage_func_insn)
4160  (*ruby_vm_collect_usage_func_insn)(insn);
4161 }
4162 
4163 /* @param insn instruction number
4164  * @param n n-th operand
4165  * @param op operand value
4166  */
4167 static void
4168 vm_collect_usage_operand(int insn, int n, VALUE op)
4169 {
4170  if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
4171  VALUE valstr;
4172 
4173  valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
4174 
4175  RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
4176  RB_GC_GUARD(valstr);
4177  }
4178  if (ruby_vm_collect_usage_func_operand)
4179  (*ruby_vm_collect_usage_func_operand)(insn, n, op);
4180 }
4181 
4182 /* @param reg register id. see code of vm_analysis_register() */
4183 /* @param isset 0: read, 1: write */
4184 static void
4185 vm_collect_usage_register(int reg, int isset)
4186 {
4187  if (ruby_vm_collect_usage_func_register)
4188  (*ruby_vm_collect_usage_func_register)(reg, isset);
4189 }
4190 #endif
4191 
4192 MJIT_FUNC_EXPORTED const struct rb_callcache *
4193 rb_vm_empty_cc(void)
4194 {
4195  return &vm_empty_cc;
4196 }
4197 
4198 MJIT_FUNC_EXPORTED const struct rb_callcache *
4199 rb_vm_empty_cc_for_super(void)
4200 {
4201  return &vm_empty_cc_for_super;
4202 }
4203 
4204 #endif /* #ifndef MJIT_HEADER */
4205 
4206 #include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */
#define RUBY_ASSERT_MESG(expr, mesg)
Asserts that the expression is truthy.
Definition: assert.h:159
#define rb_define_method_id(klass, mid, func, arity)
Defines klass#mid.
Definition: cxxanyargs.hpp:673
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:685
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition: event.h:36
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition: event.h:52
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition: event.h:38
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition: event.h:40
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:103
@ RUBY_FL_SHAREABLE
This flag has something to do with Ractor.
Definition: fl_type.h:298
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:837
VALUE rb_class_new(VALUE super)
Creates a new, anonymous class.
Definition: class.c:275
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition: class.c:2068
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
Definition: class.c:972
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:2116
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
Definition: class.c:1938
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition: class.c:2406
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
Definition: class.c:1914
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
Definition: class.c:2195
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition: string.h:1738
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition: fl_type.h:58
#define NUM2ULONG
Old name of RB_NUM2ULONG.
Definition: long.h:52
#define ALLOCV
Old name of RB_ALLOCV.
Definition: memory.h:398
#define ALLOC
Old name of RB_ALLOC.
Definition: memory.h:394
#define xfree
Old name of ruby_xfree.
Definition: xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition: value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition: fl_type.h:143
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition: long.h:60
#define SYM2ID
Old name of RB_SYM2ID.
Definition: symbol.h:45
#define CLASS_OF
Old name of rb_class_of.
Definition: globals.h:203
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition: array.h:653
#define rb_exc_new2
Old name of rb_exc_new_cstr.
Definition: error.h:37
#define FIX2INT
Old name of RB_FIX2INT.
Definition: int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition: value_type.h:70
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition: memory.h:395
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition: assume.h:29
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition: value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition: memory.h:393
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition: error.h:38
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition: long_long.h:31
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition: long_long.h:35
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition: value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition: value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition: fl_type.h:139
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition: fl_type.h:70
#define CONST_ID
Old name of RUBY_CONST_ID.
Definition: symbol.h:47
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition: fl_type.h:138
#define ALLOCV_END
Old name of RB_ALLOCV_END.
Definition: memory.h:400
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
VALUE rb_eLocalJumpError
LocalJumpError exception.
Definition: eval.c:48
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition: error.c:3025
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:675
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Checks if the given object is of given kind.
Definition: error.c:1049
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:802
void rb_iter_break(void)
Breaks from a block.
Definition: vm.c:1821
VALUE rb_eTypeError
TypeError exception.
Definition: error.c:1099
void rb_iter_break_value(VALUE val)
Identical to rb_iter_break(), except it additionally takes the "value" of this breakage.
Definition: vm.c:1827
VALUE rb_eRuntimeError
RuntimeError exception.
Definition: error.c:1097
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition: error.c:1150
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1100
VALUE * rb_ruby_debug_ptr(void)
This is an implementation detail of ruby_debug.
Definition: vm.c:3871
VALUE rb_eSysStackError
SystemStackError exception.
Definition: eval.c:49
VALUE * rb_ruby_verbose_ptr(void)
This is an implementation detail of ruby_verbose.
Definition: vm.c:3864
VALUE rb_cTime
Time class.
Definition: time.c:647
VALUE rb_cArray
Array class.
Definition: array.c:40
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition: object.c:1909
VALUE rb_cInteger
Module class.
Definition: numeric.c:192
VALUE rb_cNilClass
NilClass class.
Definition: object.c:55
VALUE rb_cBinding
Binding class.
Definition: proc.c:51
VALUE rb_cRegexp
Regexp class.
Definition: re.c:2370
VALUE rb_cHash
Hash class.
Definition: hash.c:92
VALUE rb_cFalseClass
FalseClass class.
Definition: object.c:57
VALUE rb_cSymbol
Sumbol class.
Definition: string.c:81
VALUE rb_cBasicObject
BasicObject class.
Definition: object.c:48
VALUE rb_cThread
Thread class.
Definition: vm.c:397
VALUE rb_obj_freeze(VALUE obj)
Just calls rb_obj_freeze_inline() inside.
Definition: object.c:1161
VALUE rb_cFloat
Float class.
Definition: numeric.c:191
VALUE rb_cProc
Proc class.
Definition: proc.c:52
VALUE rb_cTrueClass
TrueClass class.
Definition: object.c:56
VALUE rb_cString
String class.
Definition: string.c:80
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition: rgengc.h:232
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition: rgengc.h:220
void rb_gc_register_mark_object(VALUE object)
Inform the garbage collector that object is a live Ruby object that should not be moved.
Definition: gc.c:8687
Defines RBIMPL_HAS_BUILTIN.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
Definition: array.c:3941
VALUE rb_ary_new(void)
Allocates a new, empty array.
Definition: array.c:750
VALUE rb_ary_tmp_new(long capa)
Allocates a "temporary" array.
Definition: array.c:847
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
Definition: array.c:1308
void rb_undef(VALUE mod, ID mid)
Inserts a method entry that hides previous method definition of the given name.
Definition: vm_method.c:1717
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition: error.h:294
void rb_set_end_proc(void(*func)(VALUE arg), VALUE arg)
Registers a function that shall run on process exit.
void rb_gc_mark(VALUE obj)
Marks an object.
Definition: gc.c:6775
void rb_mark_tbl_no_pin(struct st_table *tbl)
Identical to rb_mark_tbl(), except it marks objects using rb_gc_mark_movable().
Definition: gc.c:6562
void rb_memerror(void)
Triggers out-of-memory error.
Definition: gc.c:11117
void rb_gc_mark_movable(VALUE obj)
Maybe this is the only function provided for C extensions to control the pinning of objects,...
Definition: gc.c:6769
void rb_mark_tbl(struct st_table *tbl)
Identical to rb_mark_hash(), except it marks only values of the table and leave their associated keys...
Definition: gc.c:6556
void rb_gc_mark_maybe(VALUE obj)
Identical to rb_gc_mark(), except it allows the passed value be a non-object.
Definition: gc.c:6594
VALUE rb_gc_location(VALUE obj)
Finds a new "location" of an object.
Definition: gc.c:9754
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Marks objects between the two pointers.
Definition: gc.c:6209
void rb_gc_update_tbl_refs(st_table *ptr)
Updates references inside of tables.
Definition: gc.c:9598
void rb_hash_bulk_insert(long argc, const VALUE *argv, VALUE hash)
Inserts a list of key-value pairs into a hash table at once.
Definition: hash.c:4753
void rb_hash_foreach(VALUE hash, int(*func)(VALUE key, VALUE val, VALUE arg), VALUE arg)
Iterates over a hash.
VALUE rb_hash_aref(VALUE hash, VALUE key)
Queries the given key in the given hash table.
Definition: hash.c:2082
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2903
VALUE rb_hash_dup(VALUE hash)
Duplicates a hash.
Definition: hash.c:1585
VALUE rb_hash_clear(VALUE hash)
Swipes everything out of the passed hash table.
Definition: hash.c:2829
VALUE rb_hash_new(void)
Creates a new, empty hash object.
Definition: hash.c:1529
VALUE rb_backref_get(void)
Queries the last match, or Regexp.last_match, or the $~.
Definition: vm.c:1580
void rb_lastline_set(VALUE str)
Updates $_.
Definition: vm.c:1598
VALUE rb_lastline_get(void)
Queries the last line, or the $_.
Definition: vm.c:1592
void rb_backref_set(VALUE md)
Updates $~.
Definition: vm.c:1586
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition: proc.c:848
VALUE rb_block_lambda(void)
Identical to rb_proc_new(), except it returns a lambda.
Definition: proc.c:867
VALUE rb_binding_new(void)
Snapshots the current execution context and turn it into an instance of rb_cBinding.
Definition: proc.c:385
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition: string.c:3317
VALUE rb_str_dup(VALUE str)
Duplicates a string.
Definition: string.c:1808
VALUE rb_str_cat_cstr(VALUE dst, const char *src)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:3171
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition: variable.c:2733
VALUE rb_attr_get(VALUE obj, ID name)
Identical to rb_ivar_get()
Definition: variable.c:1293
void rb_set_class_path(VALUE klass, VALUE space, const char *name)
Names a class.
Definition: variable.c:235
void rb_alias_variable(ID dst, ID src)
Aliases a global variable.
Definition: variable.c:843
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition: variable.c:172
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition: vm_method.c:1117
void rb_alias(VALUE klass, ID dst, ID src)
Resembles alias.
Definition: vm_method.c:2100
const char * rb_sourcefile(void)
Resembles __FILE__.
Definition: vm.c:1606
int rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
Resembles __method__.
Definition: vm.c:2496
int rb_sourceline(void)
Resembles __LINE__.
Definition: vm.c:1620
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
Definition: symbol.c:941
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
Definition: symbol.c:782
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
Definition: symbol.c:924
VALUE rb_id2str(ID id)
Identical to rb_id2name(), except it returns a Ruby's String instead of C's.
Definition: symbol.c:935
void rb_define_global_const(const char *name, VALUE val)
Identical to rb_define_const(), except it defines that of "global", i.e.
Definition: variable.c:3265
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition: variable.c:3253
VALUE rb_iv_set(VALUE obj, const char *name, VALUE val)
Assigns to an instance variable.
Definition: variable.c:3755
void ruby_vm_at_exit(void(*func)(ruby_vm_t *))
ruby_vm_at_exit registers a function func to be invoked when a VM passed away.
Definition: vm.c:693
int ruby_vm_destruct(ruby_vm_t *vm)
Destructs the passed VM.
Definition: vm.c:2696
VALUE rb_f_sprintf(int argc, const VALUE *argv)
Identical to rb_str_format(), except how the arguments are arranged.
Definition: sprintf.c:208
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition: sprintf.c:1201
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition: sprintf.c:1241
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition: memory.h:366
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition: memory.h:354
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition: memory.h:161
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
int st_foreach(st_table *q, int_type *w, st_data_t e)
Iteration over the given table.
Definition: cxxanyargs.hpp:432
VALUE rb_ractor_make_shareable_copy(VALUE obj)
Identical to rb_ractor_make_shareable(), except it returns a (deep) copy of the passed one instead of...
Definition: ractor.c:2506
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition: ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition: ractor.h:235
VALUE rb_ractor_make_shareable(VALUE obj)
Destructively transforms the passed object so that multiple Ractors can share it.
Definition: ractor.c:2497
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:68
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition: rarray.h:324
#define RARRAY_AREF(a, i)
Definition: rarray.h:588
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition: rarray.h:69
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition: rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition: rhash.h:92
#define StringValuePtr(v)
Identical to StringValue, except it returns a char*.
Definition: rstring.h:82
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
Definition: rstring.h:497
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition: rtypeddata.h:102
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition: rtypeddata.h:441
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition: rtypeddata.h:489
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition: scan_args.h:69
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition: stdarg.h:35
Definition: proc.c:37
Definition: node.h:155
Definition: iseq.h:236
Definition: method.h:62
CREF (Class REFerence)
Definition: method.h:44
This is the struct that holds necessary info for a struct.
Definition: rtypeddata.h:190
struct rb_iseq_constant_body::@152 param
parameter information
Definition: method.h:54
Definition: st.h:79
THROW_DATA.
Definition: imemo.h:62
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static void Check_Type(VALUE v, enum ruby_value_type t)
Identical to RB_TYPE_P(), except it raises exceptions on predication failure.
Definition: value_type.h:432
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:375
ruby_value_type
C-level type of an object.
Definition: value_type.h:112
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition: gc.c:11775