Ruby  3.1.4p223 (2023-03-30 revision HEAD)
vm_trace.c
1 /**********************************************************************
2 
3  vm_trace.c -
4 
5  $Author: ko1 $
6  created at: Tue Aug 14 19:37:09 2012
7 
8  Copyright (C) 1993-2012 Yukihiro Matsumoto
9 
10 **********************************************************************/
11 
12 /*
13  * This file include two parts:
14  *
15  * (1) set_trace_func internal mechanisms
16  * and C level API
17  *
18  * (2) Ruby level API
19  * (2-1) set_trace_func API
20  * (2-2) TracePoint API (not yet)
21  *
22  */
23 
24 #include "eval_intern.h"
25 #include "internal.h"
26 #include "internal/hash.h"
27 #include "internal/symbol.h"
28 #include "iseq.h"
29 #include "mjit.h"
30 #include "ruby/debug.h"
31 #include "vm_core.h"
32 #include "ruby/ractor.h"
33 #include "yjit.h"
34 
35 #include "builtin.h"
36 
37 /* (1) trace mechanisms */
38 
39 typedef struct rb_event_hook_struct {
40  rb_event_hook_flag_t hook_flags;
41  rb_event_flag_t events;
43  VALUE data;
44  struct rb_event_hook_struct *next;
45 
46  struct {
47  rb_thread_t *th;
48  unsigned int target_line;
49  } filter;
51 
52 typedef void (*rb_event_hook_raw_arg_func_t)(VALUE data, const rb_trace_arg_t *arg);
53 
54 #define MAX_EVENT_NUM 32
55 
56 void
57 rb_hook_list_mark(rb_hook_list_t *hooks)
58 {
59  rb_event_hook_t *hook = hooks->hooks;
60 
61  while (hook) {
62  rb_gc_mark(hook->data);
63  hook = hook->next;
64  }
65 }
66 
67 static void clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list);
68 
69 void
70 rb_hook_list_free(rb_hook_list_t *hooks)
71 {
72  hooks->need_clean = true;
73 
74  if (hooks->running == 0) {
75  clean_hooks(GET_EC(), hooks);
76  }
77 }
78 
79 /* ruby_vm_event_flags management */
80 
81 void rb_clear_attr_ccs(void);
82 
83 static void
84 update_global_event_hook(rb_event_flag_t prev_events, rb_event_flag_t new_events)
85 {
86  rb_event_flag_t new_iseq_events = new_events & ISEQ_TRACE_EVENTS;
87  rb_event_flag_t enabled_iseq_events = ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS;
88 
89  if (new_iseq_events & ~enabled_iseq_events) {
90  // :class events are triggered only in ISEQ_TYPE_CLASS, but mjit_target_iseq_p ignores such iseqs.
91  // Thus we don't need to cancel JIT-ed code for :class events.
92  if (new_iseq_events != RUBY_EVENT_CLASS) {
93  // Stop calling all JIT-ed code. We can't rewrite existing JIT-ed code to trace_ insns for now.
94  mjit_cancel_all("TracePoint is enabled");
95  }
96 
97  /* write all ISeqs if and only if new events are added */
98  rb_iseq_trace_set_all(new_iseq_events | enabled_iseq_events);
99  }
100  else {
101  // if c_call or c_return is activated:
102  if (((prev_events & RUBY_EVENT_C_CALL) == 0 && (new_events & RUBY_EVENT_C_CALL)) ||
103  ((prev_events & RUBY_EVENT_C_RETURN) == 0 && (new_events & RUBY_EVENT_C_RETURN))) {
104  rb_clear_attr_ccs();
105  }
106  }
107 
108  ruby_vm_event_flags = new_events;
109  ruby_vm_event_enabled_global_flags |= new_events;
110  rb_objspace_set_event_hook(new_events);
111 
112  if (new_events & RUBY_EVENT_TRACEPOINT_ALL) {
113  // Invalidate all code if listening for any TracePoint event.
114  // Internal events fire inside C routines so don't need special handling.
115  // Do this last so other ractors see updated vm events when they wake up.
116  rb_yjit_tracing_invalidate_all();
117  }
118 }
119 
120 /* add/remove hooks */
121 
122 static rb_event_hook_t *
123 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
124 {
125  rb_event_hook_t *hook;
126 
127  if ((events & RUBY_INTERNAL_EVENT_MASK) && (events & ~RUBY_INTERNAL_EVENT_MASK)) {
128  rb_raise(rb_eTypeError, "Can not specify normal event and internal event simultaneously.");
129  }
130 
131  hook = ALLOC(rb_event_hook_t);
132  hook->hook_flags = hook_flags;
133  hook->events = events;
134  hook->func = func;
135  hook->data = data;
136 
137  /* no filters */
138  hook->filter.th = NULL;
139  hook->filter.target_line = 0;
140 
141  return hook;
142 }
143 
144 static void
145 hook_list_connect(VALUE list_owner, rb_hook_list_t *list, rb_event_hook_t *hook, int global_p)
146 {
147  rb_event_flag_t prev_events = list->events;
148  hook->next = list->hooks;
149  list->hooks = hook;
150  list->events |= hook->events;
151 
152  if (global_p) {
153  /* global hooks are root objects at GC mark. */
154  update_global_event_hook(prev_events, list->events);
155  }
156  else {
157  RB_OBJ_WRITTEN(list_owner, Qundef, hook->data);
158  }
159 }
160 
161 static void
162 connect_event_hook(const rb_execution_context_t *ec, rb_event_hook_t *hook)
163 {
164  rb_hook_list_t *list = rb_ec_ractor_hooks(ec);
165  hook_list_connect(Qundef, list, hook, TRUE);
166 }
167 
168 static void
169 rb_threadptr_add_event_hook(const rb_execution_context_t *ec, rb_thread_t *th,
170  rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
171 {
172  rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
173  hook->filter.th = th;
174  connect_event_hook(ec, hook);
175 }
176 
177 void
179 {
180  rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
181 }
182 
183 void
185 {
186  rb_add_event_hook2(func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
187 }
188 
189 void
190 rb_thread_add_event_hook2(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
191 {
192  rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, hook_flags);
193 }
194 
195 void
196 rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
197 {
198  rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
199  connect_event_hook(GET_EC(), hook);
200 }
201 
202 static void
203 clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list)
204 {
205  rb_event_hook_t *hook, **nextp = &list->hooks;
206  rb_event_flag_t prev_events = list->events;
207 
208  VM_ASSERT(list->running == 0);
209  VM_ASSERT(list->need_clean == true);
210 
211  list->events = 0;
212  list->need_clean = false;
213 
214  while ((hook = *nextp) != 0) {
215  if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
216  *nextp = hook->next;
217  xfree(hook);
218  }
219  else {
220  list->events |= hook->events; /* update active events */
221  nextp = &hook->next;
222  }
223  }
224 
225  if (list->is_local) {
226  if (list->events == 0) {
227  /* local events */
228  ruby_xfree(list);
229  }
230  }
231  else {
232  update_global_event_hook(prev_events, list->events);
233  }
234 }
235 
236 static void
237 clean_hooks_check(const rb_execution_context_t *ec, rb_hook_list_t *list)
238 {
239  if (UNLIKELY(list->need_clean)) {
240  if (list->running == 0) {
241  clean_hooks(ec, list);
242  }
243  }
244 }
245 
246 #define MATCH_ANY_FILTER_TH ((rb_thread_t *)1)
247 
248 /* if func is 0, then clear all funcs */
249 static int
250 remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
251 {
252  rb_hook_list_t *list = rb_ec_ractor_hooks(ec);
253  int ret = 0;
254  rb_event_hook_t *hook = list->hooks;
255 
256  while (hook) {
257  if (func == 0 || hook->func == func) {
258  if (hook->filter.th == filter_th || filter_th == MATCH_ANY_FILTER_TH) {
259  if (data == Qundef || hook->data == data) {
260  hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
261  ret+=1;
262  list->need_clean = true;
263  }
264  }
265  }
266  hook = hook->next;
267  }
268 
269  clean_hooks_check(ec, list);
270  return ret;
271 }
272 
273 static int
274 rb_threadptr_remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
275 {
276  return remove_event_hook(ec, filter_th, func, data);
277 }
278 
279 int
281 {
282  return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, Qundef);
283 }
284 
285 int
287 {
288  return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, data);
289 }
290 
291 int
293 {
294  return remove_event_hook(GET_EC(), NULL, func, Qundef);
295 }
296 
297 int
299 {
300  return remove_event_hook(GET_EC(), NULL, func, data);
301 }
302 
303 void
304 rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec)
305 {
306  rb_threadptr_remove_event_hook(ec, rb_ec_thread_ptr(ec), 0, Qundef);
307 }
308 
309 void
310 rb_ec_clear_all_trace_func(const rb_execution_context_t *ec)
311 {
312  rb_threadptr_remove_event_hook(ec, MATCH_ANY_FILTER_TH, 0, Qundef);
313 }
314 
315 /* invoke hooks */
316 
317 static void
318 exec_hooks_body(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
319 {
320  rb_event_hook_t *hook;
321 
322  for (hook = list->hooks; hook; hook = hook->next) {
323  if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) &&
324  (trace_arg->event & hook->events) &&
325  (LIKELY(hook->filter.th == 0) || hook->filter.th == rb_ec_thread_ptr(ec)) &&
326  (LIKELY(hook->filter.target_line == 0) || (hook->filter.target_line == (unsigned int)rb_vm_get_sourceline(ec->cfp)))) {
327  if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_RAW_ARG)) {
328  (*hook->func)(trace_arg->event, hook->data, trace_arg->self, trace_arg->id, trace_arg->klass);
329  }
330  else {
331  (*((rb_event_hook_raw_arg_func_t)hook->func))(hook->data, trace_arg);
332  }
333  }
334  }
335 }
336 
337 static int
338 exec_hooks_precheck(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
339 {
340  if (list->events & trace_arg->event) {
341  list->running++;
342  return TRUE;
343  }
344  else {
345  return FALSE;
346  }
347 }
348 
349 static void
350 exec_hooks_postcheck(const rb_execution_context_t *ec, rb_hook_list_t *list)
351 {
352  list->running--;
353  clean_hooks_check(ec, list);
354 }
355 
356 static void
357 exec_hooks_unprotected(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
358 {
359  if (exec_hooks_precheck(ec, list, trace_arg) == 0) return;
360  exec_hooks_body(ec, list, trace_arg);
361  exec_hooks_postcheck(ec, list);
362 }
363 
364 static int
365 exec_hooks_protected(rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
366 {
367  enum ruby_tag_type state;
368  volatile int raised;
369 
370  if (exec_hooks_precheck(ec, list, trace_arg) == 0) return 0;
371 
372  raised = rb_ec_reset_raised(ec);
373 
374  /* TODO: Support !RUBY_EVENT_HOOK_FLAG_SAFE hooks */
375 
376  EC_PUSH_TAG(ec);
377  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
378  exec_hooks_body(ec, list, trace_arg);
379  }
380  EC_POP_TAG();
381 
382  exec_hooks_postcheck(ec, list);
383 
384  if (raised) {
385  rb_ec_set_raised(ec);
386  }
387 
388  return state;
389 }
390 
391 MJIT_FUNC_EXPORTED void
392 rb_exec_event_hooks(rb_trace_arg_t *trace_arg, rb_hook_list_t *hooks, int pop_p)
393 {
394  rb_execution_context_t *ec = trace_arg->ec;
395 
396  if (UNLIKELY(trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
397  if (ec->trace_arg && (ec->trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
398  /* skip hooks because this thread doing INTERNAL_EVENT */
399  }
400  else {
401  rb_trace_arg_t *prev_trace_arg = ec->trace_arg;
402 
403  ec->trace_arg = trace_arg;
404  /* only global hooks */
405  exec_hooks_unprotected(ec, rb_ec_ractor_hooks(ec), trace_arg);
406  ec->trace_arg = prev_trace_arg;
407  }
408  }
409  else {
410  if (ec->trace_arg == NULL && /* check reentrant */
411  trace_arg->self != rb_mRubyVMFrozenCore /* skip special methods. TODO: remove it. */) {
412  const VALUE errinfo = ec->errinfo;
413  const VALUE old_recursive = ec->local_storage_recursive_hash;
414  int state = 0;
415 
416  /* setup */
417  ec->local_storage_recursive_hash = ec->local_storage_recursive_hash_for_trace;
418  ec->errinfo = Qnil;
419  ec->trace_arg = trace_arg;
420 
421  /* kick hooks */
422  if ((state = exec_hooks_protected(ec, hooks, trace_arg)) == TAG_NONE) {
423  ec->errinfo = errinfo;
424  }
425 
426  /* cleanup */
427  ec->trace_arg = NULL;
428  ec->local_storage_recursive_hash_for_trace = ec->local_storage_recursive_hash;
429  ec->local_storage_recursive_hash = old_recursive;
430 
431  if (state) {
432  if (pop_p) {
433  if (VM_FRAME_FINISHED_P(ec->cfp)) {
434  ec->tag = ec->tag->prev;
435  }
436  rb_vm_pop_frame(ec);
437  }
438  EC_JUMP_TAG(ec, state);
439  }
440  }
441  }
442 }
443 
444 VALUE
445 rb_suppress_tracing(VALUE (*func)(VALUE), VALUE arg)
446 {
447  volatile int raised;
448  volatile VALUE result = Qnil;
449  rb_execution_context_t *const ec = GET_EC();
450  rb_vm_t *const vm = rb_ec_vm_ptr(ec);
451  enum ruby_tag_type state;
452  rb_trace_arg_t dummy_trace_arg;
453  dummy_trace_arg.event = 0;
454 
455  if (!ec->trace_arg) {
456  ec->trace_arg = &dummy_trace_arg;
457  }
458 
459  raised = rb_ec_reset_raised(ec);
460 
461  EC_PUSH_TAG(ec);
462  if (LIKELY((state = EC_EXEC_TAG()) == TAG_NONE)) {
463  result = (*func)(arg);
464  }
465  else {
466  (void)*&vm; /* suppress "clobbered" warning */
467  }
468  EC_POP_TAG();
469 
470  if (raised) {
471  rb_ec_reset_raised(ec);
472  }
473 
474  if (ec->trace_arg == &dummy_trace_arg) {
475  ec->trace_arg = NULL;
476  }
477 
478  if (state) {
479 #if defined RUBY_USE_SETJMPEX && RUBY_USE_SETJMPEX
480  RB_GC_GUARD(result);
481 #endif
482  EC_JUMP_TAG(ec, state);
483  }
484 
485  return result;
486 }
487 
488 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
489 
490 /* (2-1) set_trace_func (old API) */
491 
492 /*
493  * call-seq:
494  * set_trace_func(proc) -> proc
495  * set_trace_func(nil) -> nil
496  *
497  * Establishes _proc_ as the handler for tracing, or disables
498  * tracing if the parameter is +nil+.
499  *
500  * *Note:* this method is obsolete, please use TracePoint instead.
501  *
502  * _proc_ takes up to six parameters:
503  *
504  * * an event name
505  * * a filename
506  * * a line number
507  * * an object id
508  * * a binding
509  * * the name of a class
510  *
511  * _proc_ is invoked whenever an event occurs.
512  *
513  * Events are:
514  *
515  * +c-call+:: call a C-language routine
516  * +c-return+:: return from a C-language routine
517  * +call+:: call a Ruby method
518  * +class+:: start a class or module definition
519  * +end+:: finish a class or module definition
520  * +line+:: execute code on a new line
521  * +raise+:: raise an exception
522  * +return+:: return from a Ruby method
523  *
524  * Tracing is disabled within the context of _proc_.
525  *
526  * class Test
527  * def test
528  * a = 1
529  * b = 2
530  * end
531  * end
532  *
533  * set_trace_func proc { |event, file, line, id, binding, classname|
534  * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
535  * }
536  * t = Test.new
537  * t.test
538  *
539  * line prog.rb:11 false
540  * c-call prog.rb:11 new Class
541  * c-call prog.rb:11 initialize Object
542  * c-return prog.rb:11 initialize Object
543  * c-return prog.rb:11 new Class
544  * line prog.rb:12 false
545  * call prog.rb:2 test Test
546  * line prog.rb:3 test Test
547  * line prog.rb:4 test Test
548  * return prog.rb:4 test Test
549  *
550  * Note that for +c-call+ and +c-return+ events, the binding returned is the
551  * binding of the nearest Ruby method calling the C method, since C methods
552  * themselves do not have bindings.
553  */
554 
555 static VALUE
556 set_trace_func(VALUE obj, VALUE trace)
557 {
558  rb_remove_event_hook(call_trace_func);
559 
560  if (NIL_P(trace)) {
561  return Qnil;
562  }
563 
564  if (!rb_obj_is_proc(trace)) {
565  rb_raise(rb_eTypeError, "trace_func needs to be Proc");
566  }
567 
568  rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
569  return trace;
570 }
571 
572 static void
573 thread_add_trace_func(rb_execution_context_t *ec, rb_thread_t *filter_th, VALUE trace)
574 {
575  if (!rb_obj_is_proc(trace)) {
576  rb_raise(rb_eTypeError, "trace_func needs to be Proc");
577  }
578 
579  rb_threadptr_add_event_hook(ec, filter_th, call_trace_func, RUBY_EVENT_ALL, trace, RUBY_EVENT_HOOK_FLAG_SAFE);
580 }
581 
582 /*
583  * call-seq:
584  * thr.add_trace_func(proc) -> proc
585  *
586  * Adds _proc_ as a handler for tracing.
587  *
588  * See Thread#set_trace_func and Kernel#set_trace_func.
589  */
590 
591 static VALUE
592 thread_add_trace_func_m(VALUE obj, VALUE trace)
593 {
594  thread_add_trace_func(GET_EC(), rb_thread_ptr(obj), trace);
595  return trace;
596 }
597 
598 /*
599  * call-seq:
600  * thr.set_trace_func(proc) -> proc
601  * thr.set_trace_func(nil) -> nil
602  *
603  * Establishes _proc_ on _thr_ as the handler for tracing, or
604  * disables tracing if the parameter is +nil+.
605  *
606  * See Kernel#set_trace_func.
607  */
608 
609 static VALUE
610 thread_set_trace_func_m(VALUE target_thread, VALUE trace)
611 {
612  rb_execution_context_t *ec = GET_EC();
613  rb_thread_t *target_th = rb_thread_ptr(target_thread);
614 
615  rb_threadptr_remove_event_hook(ec, target_th, call_trace_func, Qundef);
616 
617  if (NIL_P(trace)) {
618  return Qnil;
619  }
620  else {
621  thread_add_trace_func(ec, target_th, trace);
622  return trace;
623  }
624 }
625 
626 static const char *
627 get_event_name(rb_event_flag_t event)
628 {
629  switch (event) {
630  case RUBY_EVENT_LINE: return "line";
631  case RUBY_EVENT_CLASS: return "class";
632  case RUBY_EVENT_END: return "end";
633  case RUBY_EVENT_CALL: return "call";
634  case RUBY_EVENT_RETURN: return "return";
635  case RUBY_EVENT_C_CALL: return "c-call";
636  case RUBY_EVENT_C_RETURN: return "c-return";
637  case RUBY_EVENT_RAISE: return "raise";
638  default:
639  return "unknown";
640  }
641 }
642 
643 static ID
644 get_event_id(rb_event_flag_t event)
645 {
646  ID id;
647 
648  switch (event) {
649 #define C(name, NAME) case RUBY_EVENT_##NAME: CONST_ID(id, #name); return id;
650  C(line, LINE);
651  C(class, CLASS);
652  C(end, END);
653  C(call, CALL);
654  C(return, RETURN);
655  C(c_call, C_CALL);
656  C(c_return, C_RETURN);
657  C(raise, RAISE);
658  C(b_call, B_CALL);
659  C(b_return, B_RETURN);
660  C(thread_begin, THREAD_BEGIN);
661  C(thread_end, THREAD_END);
662  C(fiber_switch, FIBER_SWITCH);
663  C(script_compiled, SCRIPT_COMPILED);
664 #undef C
665  default:
666  return 0;
667  }
668 }
669 
670 static void
671 get_path_and_lineno(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, rb_event_flag_t event, VALUE *pathp, int *linep)
672 {
673  cfp = rb_vm_get_ruby_level_next_cfp(ec, cfp);
674 
675  if (cfp) {
676  const rb_iseq_t *iseq = cfp->iseq;
677  *pathp = rb_iseq_path(iseq);
678 
679  if (event & (RUBY_EVENT_CLASS |
682  *linep = FIX2INT(rb_iseq_first_lineno(iseq));
683  }
684  else {
685  *linep = rb_vm_get_sourceline(cfp);
686  }
687  }
688  else {
689  *pathp = Qnil;
690  *linep = 0;
691  }
692 }
693 
694 static void
695 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
696 {
697  int line;
698  VALUE filename;
699  VALUE eventname = rb_str_new2(get_event_name(event));
700  VALUE argv[6];
701  const rb_execution_context_t *ec = GET_EC();
702 
703  get_path_and_lineno(ec, ec->cfp, event, &filename, &line);
704 
705  if (!klass) {
706  rb_ec_frame_method_id_and_class(ec, &id, 0, &klass);
707  }
708 
709  if (klass) {
710  if (RB_TYPE_P(klass, T_ICLASS)) {
711  klass = RBASIC(klass)->klass;
712  }
713  else if (FL_TEST(klass, FL_SINGLETON)) {
714  klass = rb_ivar_get(klass, id__attached__);
715  }
716  }
717 
718  argv[0] = eventname;
719  argv[1] = filename;
720  argv[2] = INT2FIX(line);
721  argv[3] = id ? ID2SYM(id) : Qnil;
722  argv[4] = (self && (filename != Qnil)) ? rb_binding_new() : Qnil;
723  argv[5] = klass ? klass : Qnil;
724 
725  rb_proc_call_with_block(proc, 6, argv, Qnil);
726 }
727 
728 /* (2-2) TracePoint API */
729 
730 static VALUE rb_cTracePoint;
731 
732 typedef struct rb_tp_struct {
733  rb_event_flag_t events;
734  int tracing; /* bool */
735  rb_thread_t *target_th;
736  VALUE local_target_set; /* Hash: target ->
737  * Qtrue (if target is iseq) or
738  * Qfalse (if target is bmethod)
739  */
740  void (*func)(VALUE tpval, void *data);
741  void *data;
742  VALUE proc;
743  rb_ractor_t *ractor;
744  VALUE self;
745 } rb_tp_t;
746 
747 static void
748 tp_mark(void *ptr)
749 {
750  rb_tp_t *tp = ptr;
751  rb_gc_mark(tp->proc);
752  rb_gc_mark(tp->local_target_set);
753  if (tp->target_th) rb_gc_mark(tp->target_th->self);
754 }
755 
756 static size_t
757 tp_memsize(const void *ptr)
758 {
759  return sizeof(rb_tp_t);
760 }
761 
762 static const rb_data_type_t tp_data_type = {
763  "tracepoint",
764  {tp_mark, RUBY_TYPED_DEFAULT_FREE, tp_memsize,},
765  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
766 };
767 
768 static VALUE
769 tp_alloc(VALUE klass)
770 {
771  rb_tp_t *tp;
772  return TypedData_Make_Struct(klass, rb_tp_t, &tp_data_type, tp);
773 }
774 
775 static rb_event_flag_t
776 symbol2event_flag(VALUE v)
777 {
778  ID id;
779  VALUE sym = rb_to_symbol_type(v);
780  const rb_event_flag_t RUBY_EVENT_A_CALL =
782  const rb_event_flag_t RUBY_EVENT_A_RETURN =
784 
785 #define C(name, NAME) CONST_ID(id, #name); if (sym == ID2SYM(id)) return RUBY_EVENT_##NAME
786  C(line, LINE);
787  C(class, CLASS);
788  C(end, END);
789  C(call, CALL);
790  C(return, RETURN);
791  C(c_call, C_CALL);
792  C(c_return, C_RETURN);
793  C(raise, RAISE);
794  C(b_call, B_CALL);
795  C(b_return, B_RETURN);
796  C(thread_begin, THREAD_BEGIN);
797  C(thread_end, THREAD_END);
798  C(fiber_switch, FIBER_SWITCH);
799  C(script_compiled, SCRIPT_COMPILED);
800 
801  /* joke */
802  C(a_call, A_CALL);
803  C(a_return, A_RETURN);
804 #undef C
805  rb_raise(rb_eArgError, "unknown event: %"PRIsVALUE, rb_sym2str(sym));
806 }
807 
808 static rb_tp_t *
809 tpptr(VALUE tpval)
810 {
811  rb_tp_t *tp;
812  TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
813  return tp;
814 }
815 
816 static rb_trace_arg_t *
817 get_trace_arg(void)
818 {
819  rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
820  if (trace_arg == 0) {
821  rb_raise(rb_eRuntimeError, "access from outside");
822  }
823  return trace_arg;
824 }
825 
826 struct rb_trace_arg_struct *
828 {
829  return get_trace_arg();
830 }
831 
834 {
835  return trace_arg->event;
836 }
837 
838 VALUE
840 {
841  return ID2SYM(get_event_id(trace_arg->event));
842 }
843 
844 static void
845 fill_path_and_lineno(rb_trace_arg_t *trace_arg)
846 {
847  if (trace_arg->path == Qundef) {
848  get_path_and_lineno(trace_arg->ec, trace_arg->cfp, trace_arg->event, &trace_arg->path, &trace_arg->lineno);
849  }
850 }
851 
852 VALUE
854 {
855  fill_path_and_lineno(trace_arg);
856  return INT2FIX(trace_arg->lineno);
857 }
858 VALUE
860 {
861  fill_path_and_lineno(trace_arg);
862  return trace_arg->path;
863 }
864 
865 static void
866 fill_id_and_klass(rb_trace_arg_t *trace_arg)
867 {
868  if (!trace_arg->klass_solved) {
869  if (!trace_arg->klass) {
870  rb_vm_control_frame_id_and_class(trace_arg->cfp, &trace_arg->id, &trace_arg->called_id, &trace_arg->klass);
871  }
872 
873  if (trace_arg->klass) {
874  if (RB_TYPE_P(trace_arg->klass, T_ICLASS)) {
875  trace_arg->klass = RBASIC(trace_arg->klass)->klass;
876  }
877  }
878  else {
879  trace_arg->klass = Qnil;
880  }
881 
882  trace_arg->klass_solved = 1;
883  }
884 }
885 
886 VALUE
887 rb_tracearg_parameters(rb_trace_arg_t *trace_arg)
888 {
889  switch (trace_arg->event) {
890  case RUBY_EVENT_CALL:
891  case RUBY_EVENT_RETURN:
892  case RUBY_EVENT_B_CALL:
893  case RUBY_EVENT_B_RETURN: {
894  const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(trace_arg->ec, trace_arg->cfp);
895  if (cfp) {
896  int is_proc = 0;
897  if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_BLOCK && !VM_FRAME_LAMBDA_P(cfp)) {
898  is_proc = 1;
899  }
900  return rb_iseq_parameters(cfp->iseq, is_proc);
901  }
902  break;
903  }
904  case RUBY_EVENT_C_CALL:
905  case RUBY_EVENT_C_RETURN: {
906  fill_id_and_klass(trace_arg);
907  if (trace_arg->klass && trace_arg->id) {
908  const rb_method_entry_t *me;
909  VALUE iclass = Qnil;
910  me = rb_method_entry_without_refinements(trace_arg->klass, trace_arg->called_id, &iclass);
911  return rb_unnamed_parameters(rb_method_entry_arity(me));
912  }
913  break;
914  }
915  case RUBY_EVENT_RAISE:
916  case RUBY_EVENT_LINE:
917  case RUBY_EVENT_CLASS:
918  case RUBY_EVENT_END:
920  rb_raise(rb_eRuntimeError, "not supported by this event");
921  break;
922  }
923  return Qnil;
924 }
925 
926 VALUE
928 {
929  fill_id_and_klass(trace_arg);
930  return trace_arg->id ? ID2SYM(trace_arg->id) : Qnil;
931 }
932 
933 VALUE
935 {
936  fill_id_and_klass(trace_arg);
937  return trace_arg->called_id ? ID2SYM(trace_arg->called_id) : Qnil;
938 }
939 
940 VALUE
942 {
943  fill_id_and_klass(trace_arg);
944  return trace_arg->klass;
945 }
946 
947 VALUE
949 {
950  rb_control_frame_t *cfp;
951  cfp = rb_vm_get_binding_creatable_next_cfp(trace_arg->ec, trace_arg->cfp);
952 
953  if (cfp) {
954  return rb_vm_make_binding(trace_arg->ec, cfp);
955  }
956  else {
957  return Qnil;
958  }
959 }
960 
961 VALUE
963 {
964  return trace_arg->self;
965 }
966 
967 VALUE
969 {
970  if (trace_arg->event & (RUBY_EVENT_RETURN | RUBY_EVENT_C_RETURN | RUBY_EVENT_B_RETURN)) {
971  /* ok */
972  }
973  else {
974  rb_raise(rb_eRuntimeError, "not supported by this event");
975  }
976  if (trace_arg->data == Qundef) {
977  rb_bug("rb_tracearg_return_value: unreachable");
978  }
979  return trace_arg->data;
980 }
981 
982 VALUE
984 {
985  if (trace_arg->event & (RUBY_EVENT_RAISE)) {
986  /* ok */
987  }
988  else {
989  rb_raise(rb_eRuntimeError, "not supported by this event");
990  }
991  if (trace_arg->data == Qundef) {
992  rb_bug("rb_tracearg_raised_exception: unreachable");
993  }
994  return trace_arg->data;
995 }
996 
997 VALUE
998 rb_tracearg_eval_script(rb_trace_arg_t *trace_arg)
999 {
1000  VALUE data = trace_arg->data;
1001 
1002  if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
1003  /* ok */
1004  }
1005  else {
1006  rb_raise(rb_eRuntimeError, "not supported by this event");
1007  }
1008  if (data == Qundef) {
1009  rb_bug("rb_tracearg_raised_exception: unreachable");
1010  }
1011  if (rb_obj_is_iseq(data)) {
1012  return Qnil;
1013  }
1014  else {
1015  VM_ASSERT(RB_TYPE_P(data, T_ARRAY));
1016  /* [src, iseq] */
1017  return RARRAY_AREF(data, 0);
1018  }
1019 }
1020 
1021 VALUE
1022 rb_tracearg_instruction_sequence(rb_trace_arg_t *trace_arg)
1023 {
1024  VALUE data = trace_arg->data;
1025 
1026  if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
1027  /* ok */
1028  }
1029  else {
1030  rb_raise(rb_eRuntimeError, "not supported by this event");
1031  }
1032  if (data == Qundef) {
1033  rb_bug("rb_tracearg_raised_exception: unreachable");
1034  }
1035 
1036  if (rb_obj_is_iseq(data)) {
1037  return rb_iseqw_new((const rb_iseq_t *)data);
1038  }
1039  else {
1040  VM_ASSERT(RB_TYPE_P(data, T_ARRAY));
1041  VM_ASSERT(rb_obj_is_iseq(RARRAY_AREF(data, 1)));
1042 
1043  /* [src, iseq] */
1044  return rb_iseqw_new((const rb_iseq_t *)RARRAY_AREF(data, 1));
1045  }
1046 }
1047 
1048 VALUE
1050 {
1051  if (trace_arg->event & (RUBY_INTERNAL_EVENT_NEWOBJ | RUBY_INTERNAL_EVENT_FREEOBJ)) {
1052  /* ok */
1053  }
1054  else {
1055  rb_raise(rb_eRuntimeError, "not supported by this event");
1056  }
1057  if (trace_arg->data == Qundef) {
1058  rb_bug("rb_tracearg_object: unreachable");
1059  }
1060  return trace_arg->data;
1061 }
1062 
1063 static VALUE
1064 tracepoint_attr_event(rb_execution_context_t *ec, VALUE tpval)
1065 {
1066  return rb_tracearg_event(get_trace_arg());
1067 }
1068 
1069 static VALUE
1070 tracepoint_attr_lineno(rb_execution_context_t *ec, VALUE tpval)
1071 {
1072  return rb_tracearg_lineno(get_trace_arg());
1073 }
1074 static VALUE
1075 tracepoint_attr_path(rb_execution_context_t *ec, VALUE tpval)
1076 {
1077  return rb_tracearg_path(get_trace_arg());
1078 }
1079 
1080 static VALUE
1081 tracepoint_attr_parameters(rb_execution_context_t *ec, VALUE tpval)
1082 {
1083  return rb_tracearg_parameters(get_trace_arg());
1084 }
1085 
1086 static VALUE
1087 tracepoint_attr_method_id(rb_execution_context_t *ec, VALUE tpval)
1088 {
1089  return rb_tracearg_method_id(get_trace_arg());
1090 }
1091 
1092 static VALUE
1093 tracepoint_attr_callee_id(rb_execution_context_t *ec, VALUE tpval)
1094 {
1095  return rb_tracearg_callee_id(get_trace_arg());
1096 }
1097 
1098 static VALUE
1099 tracepoint_attr_defined_class(rb_execution_context_t *ec, VALUE tpval)
1100 {
1101  return rb_tracearg_defined_class(get_trace_arg());
1102 }
1103 
1104 static VALUE
1105 tracepoint_attr_binding(rb_execution_context_t *ec, VALUE tpval)
1106 {
1107  return rb_tracearg_binding(get_trace_arg());
1108 }
1109 
1110 static VALUE
1111 tracepoint_attr_self(rb_execution_context_t *ec, VALUE tpval)
1112 {
1113  return rb_tracearg_self(get_trace_arg());
1114 }
1115 
1116 static VALUE
1117 tracepoint_attr_return_value(rb_execution_context_t *ec, VALUE tpval)
1118 {
1119  return rb_tracearg_return_value(get_trace_arg());
1120 }
1121 
1122 static VALUE
1123 tracepoint_attr_raised_exception(rb_execution_context_t *ec, VALUE tpval)
1124 {
1125  return rb_tracearg_raised_exception(get_trace_arg());
1126 }
1127 
1128 static VALUE
1129 tracepoint_attr_eval_script(rb_execution_context_t *ec, VALUE tpval)
1130 {
1131  return rb_tracearg_eval_script(get_trace_arg());
1132 }
1133 
1134 static VALUE
1135 tracepoint_attr_instruction_sequence(rb_execution_context_t *ec, VALUE tpval)
1136 {
1137  return rb_tracearg_instruction_sequence(get_trace_arg());
1138 }
1139 
1140 static void
1141 tp_call_trace(VALUE tpval, rb_trace_arg_t *trace_arg)
1142 {
1143  rb_tp_t *tp = tpptr(tpval);
1144 
1145  if (tp->func) {
1146  (*tp->func)(tpval, tp->data);
1147  }
1148  else {
1149  if (tp->ractor == NULL || tp->ractor == GET_RACTOR()) {
1150  rb_proc_call_with_block((VALUE)tp->proc, 1, &tpval, Qnil);
1151  }
1152  }
1153 }
1154 
1155 VALUE
1157 {
1158  rb_tp_t *tp;
1159  tp = tpptr(tpval);
1160 
1161  if (tp->local_target_set != Qfalse) {
1162  rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
1163  }
1164 
1165  if (tp->target_th) {
1166  rb_thread_add_event_hook2(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1167  RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1168  }
1169  else {
1170  rb_add_event_hook2((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1171  RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1172  }
1173  tp->tracing = 1;
1174  return Qundef;
1175 }
1176 
1177 static const rb_iseq_t *
1178 iseq_of(VALUE target)
1179 {
1180  VALUE iseqv = rb_funcall(rb_cISeq, rb_intern("of"), 1, target);
1181  if (NIL_P(iseqv)) {
1182  rb_raise(rb_eArgError, "specified target is not supported");
1183  }
1184  else {
1185  return rb_iseqw_to_iseq(iseqv);
1186  }
1187 }
1188 
1189 const rb_method_definition_t *rb_method_def(VALUE method); /* proc.c */
1190 
1191 static VALUE
1192 rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
1193 {
1194  rb_tp_t *tp = tpptr(tpval);
1195  const rb_iseq_t *iseq = iseq_of(target);
1196  int n = 0;
1197  unsigned int line = 0;
1198  bool target_bmethod = false;
1199 
1200  if (tp->tracing > 0) {
1201  rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
1202  }
1203 
1204  if (!NIL_P(target_line)) {
1205  if ((tp->events & RUBY_EVENT_LINE) == 0) {
1206  rb_raise(rb_eArgError, "target_line is specified, but line event is not specified");
1207  }
1208  else {
1209  line = NUM2UINT(target_line);
1210  }
1211  }
1212 
1213  VM_ASSERT(tp->local_target_set == Qfalse);
1214  tp->local_target_set = rb_obj_hide(rb_ident_hash_new());
1215 
1216  /* bmethod */
1217  if (rb_obj_is_method(target)) {
1218  rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
1219  if (def->type == VM_METHOD_TYPE_BMETHOD &&
1220  (tp->events & (RUBY_EVENT_CALL | RUBY_EVENT_RETURN))) {
1221  def->body.bmethod.hooks = ZALLOC(rb_hook_list_t);
1222  rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval, 0);
1223  rb_hash_aset(tp->local_target_set, target, Qfalse);
1224  target_bmethod = true;
1225 
1226  n++;
1227  }
1228  }
1229 
1230  /* iseq */
1231  n += rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line, target_bmethod);
1232  rb_hash_aset(tp->local_target_set, (VALUE)iseq, Qtrue);
1233 
1234 
1235  if (n == 0) {
1236  rb_raise(rb_eArgError, "can not enable any hooks");
1237  }
1238 
1239  rb_yjit_tracing_invalidate_all();
1240 
1241  ruby_vm_event_local_num++;
1242 
1243  tp->tracing = 1;
1244 
1245  return Qnil;
1246 }
1247 
1248 static int
1249 disable_local_event_iseq_i(VALUE target, VALUE iseq_p, VALUE tpval)
1250 {
1251  if (iseq_p) {
1252  rb_iseq_remove_local_tracepoint_recursively((rb_iseq_t *)target, tpval);
1253  }
1254  else {
1255  /* bmethod */
1256  rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
1257  rb_hook_list_t *hooks = def->body.bmethod.hooks;
1258  VM_ASSERT(hooks != NULL);
1259  rb_hook_list_remove_tracepoint(hooks, tpval);
1260 
1261  if (hooks->events == 0) {
1262  rb_hook_list_free(def->body.bmethod.hooks);
1263  def->body.bmethod.hooks = NULL;
1264  }
1265  }
1266  return ST_CONTINUE;
1267 }
1268 
1269 VALUE
1271 {
1272  rb_tp_t *tp;
1273 
1274  tp = tpptr(tpval);
1275 
1276  if (tp->local_target_set) {
1277  rb_hash_foreach(tp->local_target_set, disable_local_event_iseq_i, tpval);
1278  tp->local_target_set = Qfalse;
1279  ruby_vm_event_local_num--;
1280  }
1281  else {
1282  if (tp->target_th) {
1283  rb_thread_remove_event_hook_with_data(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tpval);
1284  }
1285  else {
1287  }
1288  }
1289  tp->tracing = 0;
1290  tp->target_th = NULL;
1291  return Qundef;
1292 }
1293 
1294 void
1295 rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line)
1296 {
1297  rb_tp_t *tp = tpptr(tpval);
1298  rb_event_hook_t *hook = alloc_event_hook((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1299  RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1300  hook->filter.target_line = target_line;
1301  hook_list_connect(target, list, hook, FALSE);
1302 }
1303 
1304 void
1305 rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval)
1306 {
1307  rb_event_hook_t *hook = list->hooks;
1308  rb_event_flag_t events = 0;
1309 
1310  while (hook) {
1311  if (hook->data == tpval) {
1312  hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
1313  list->need_clean = true;
1314  }
1315  else if ((hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) == 0) {
1316  events |= hook->events;
1317  }
1318  hook = hook->next;
1319  }
1320 
1321  list->events = events;
1322 }
1323 
1324 static VALUE
1325 tracepoint_enable_m(rb_execution_context_t *ec, VALUE tpval, VALUE target, VALUE target_line, VALUE target_thread)
1326 {
1327  rb_tp_t *tp = tpptr(tpval);
1328  int previous_tracing = tp->tracing;
1329 
1330  /* check target_thread */
1331  if (RTEST(target_thread)) {
1332  if (tp->target_th) {
1333  rb_raise(rb_eArgError, "can not override target_thread filter");
1334  }
1335  tp->target_th = rb_thread_ptr(target_thread);
1336  }
1337  else {
1338  tp->target_th = NULL;
1339  }
1340 
1341  if (NIL_P(target)) {
1342  if (!NIL_P(target_line)) {
1343  rb_raise(rb_eArgError, "only target_line is specified");
1344  }
1345  rb_tracepoint_enable(tpval);
1346  }
1347  else {
1348  rb_tracepoint_enable_for_target(tpval, target, target_line);
1349  }
1350 
1351  if (rb_block_given_p()) {
1352  return rb_ensure(rb_yield, Qundef,
1353  previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1354  tpval);
1355  }
1356  else {
1357  return RBOOL(previous_tracing);
1358  }
1359 }
1360 
1361 static VALUE
1362 tracepoint_disable_m(rb_execution_context_t *ec, VALUE tpval)
1363 {
1364  rb_tp_t *tp = tpptr(tpval);
1365  int previous_tracing = tp->tracing;
1366 
1367  if (rb_block_given_p()) {
1368  if (tp->local_target_set != Qfalse) {
1369  rb_raise(rb_eArgError, "can't disable a targeting TracePoint in a block");
1370  }
1371 
1372  rb_tracepoint_disable(tpval);
1373  return rb_ensure(rb_yield, Qundef,
1374  previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1375  tpval);
1376  }
1377  else {
1378  rb_tracepoint_disable(tpval);
1379  return RBOOL(previous_tracing);
1380  }
1381 }
1382 
1383 VALUE
1385 {
1386  rb_tp_t *tp = tpptr(tpval);
1387  return RBOOL(tp->tracing);
1388 }
1389 
1390 static VALUE
1391 tracepoint_enabled_p(rb_execution_context_t *ec, VALUE tpval)
1392 {
1393  return rb_tracepoint_enabled_p(tpval);
1394 }
1395 
1396 static VALUE
1397 tracepoint_new(VALUE klass, rb_thread_t *target_th, rb_event_flag_t events, void (func)(VALUE, void*), void *data, VALUE proc)
1398 {
1399  VALUE tpval = tp_alloc(klass);
1400  rb_tp_t *tp;
1401  TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
1402 
1403  tp->proc = proc;
1404  tp->ractor = rb_ractor_shareable_p(proc) ? NULL : GET_RACTOR();
1405  tp->func = func;
1406  tp->data = data;
1407  tp->events = events;
1408  tp->self = tpval;
1409 
1410  return tpval;
1411 }
1412 
1413 VALUE
1414 rb_tracepoint_new(VALUE target_thval, rb_event_flag_t events, void (*func)(VALUE, void *), void *data)
1415 {
1416  rb_thread_t *target_th = NULL;
1417 
1418  if (RTEST(target_thval)) {
1419  target_th = rb_thread_ptr(target_thval);
1420  /* TODO: Test it!
1421  * Warning: This function is not tested.
1422  */
1423  }
1424  return tracepoint_new(rb_cTracePoint, target_th, events, func, data, Qundef);
1425 }
1426 
1427 static VALUE
1428 tracepoint_new_s(rb_execution_context_t *ec, VALUE self, VALUE args)
1429 {
1430  rb_event_flag_t events = 0;
1431  long i;
1432  long argc = RARRAY_LEN(args);
1433 
1434  if (argc > 0) {
1435  for (i=0; i<argc; i++) {
1436  events |= symbol2event_flag(RARRAY_AREF(args, i));
1437  }
1438  }
1439  else {
1440  events = RUBY_EVENT_TRACEPOINT_ALL;
1441  }
1442 
1443  if (!rb_block_given_p()) {
1444  rb_raise(rb_eArgError, "must be called with a block");
1445  }
1446 
1447  return tracepoint_new(self, 0, events, 0, 0, rb_block_proc());
1448 }
1449 
1450 static VALUE
1451 tracepoint_trace_s(rb_execution_context_t *ec, VALUE self, VALUE args)
1452 {
1453  VALUE trace = tracepoint_new_s(ec, self, args);
1454  rb_tracepoint_enable(trace);
1455  return trace;
1456 }
1457 
1458 static VALUE
1459 tracepoint_inspect(rb_execution_context_t *ec, VALUE self)
1460 {
1461  rb_tp_t *tp = tpptr(self);
1462  rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
1463 
1464  if (trace_arg) {
1465  switch (trace_arg->event) {
1466  case RUBY_EVENT_LINE:
1467  {
1468  VALUE sym = rb_tracearg_method_id(trace_arg);
1469  if (NIL_P(sym))
1470  break;
1471  return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE":%d in `%"PRIsVALUE"'>",
1472  rb_tracearg_event(trace_arg),
1473  rb_tracearg_path(trace_arg),
1474  FIX2INT(rb_tracearg_lineno(trace_arg)),
1475  sym);
1476  }
1477  case RUBY_EVENT_CALL:
1478  case RUBY_EVENT_C_CALL:
1479  case RUBY_EVENT_RETURN:
1480  case RUBY_EVENT_C_RETURN:
1481  return rb_sprintf("#<TracePoint:%"PRIsVALUE" `%"PRIsVALUE"' %"PRIsVALUE":%d>",
1482  rb_tracearg_event(trace_arg),
1483  rb_tracearg_method_id(trace_arg),
1484  rb_tracearg_path(trace_arg),
1485  FIX2INT(rb_tracearg_lineno(trace_arg)));
1487  case RUBY_EVENT_THREAD_END:
1488  return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE">",
1489  rb_tracearg_event(trace_arg),
1490  rb_tracearg_self(trace_arg));
1491  default:
1492  break;
1493  }
1494  return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE":%d>",
1495  rb_tracearg_event(trace_arg),
1496  rb_tracearg_path(trace_arg),
1497  FIX2INT(rb_tracearg_lineno(trace_arg)));
1498  }
1499  else {
1500  return rb_sprintf("#<TracePoint:%s>", tp->tracing ? "enabled" : "disabled");
1501  }
1502 }
1503 
1504 static void
1505 tracepoint_stat_event_hooks(VALUE hash, VALUE key, rb_event_hook_t *hook)
1506 {
1507  int active = 0, deleted = 0;
1508 
1509  while (hook) {
1510  if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
1511  deleted++;
1512  }
1513  else {
1514  active++;
1515  }
1516  hook = hook->next;
1517  }
1518 
1519  rb_hash_aset(hash, key, rb_ary_new3(2, INT2FIX(active), INT2FIX(deleted)));
1520 }
1521 
1522 static VALUE
1523 tracepoint_stat_s(rb_execution_context_t *ec, VALUE self)
1524 {
1525  rb_vm_t *vm = GET_VM();
1526  VALUE stat = rb_hash_new();
1527 
1528  tracepoint_stat_event_hooks(stat, vm->self, rb_ec_ractor_hooks(ec)->hooks);
1529  /* TODO: thread local hooks */
1530 
1531  return stat;
1532 }
1533 
1534 static VALUE
1535 disallow_reentry(VALUE val)
1536 {
1537  rb_trace_arg_t *arg = (rb_trace_arg_t *)val;
1538  rb_execution_context_t *ec = GET_EC();
1539  if (ec->trace_arg != NULL) rb_bug("should be NULL, but %p", (void *)ec->trace_arg);
1540  ec->trace_arg = arg;
1541  return Qnil;
1542 }
1543 
1544 static VALUE
1545 tracepoint_allow_reentry(rb_execution_context_t *ec, VALUE self)
1546 {
1547  const rb_trace_arg_t *arg = ec->trace_arg;
1548  if (arg == NULL) rb_raise(rb_eRuntimeError, "No need to allow reentrance.");
1549  ec->trace_arg = NULL;
1550  return rb_ensure(rb_yield, Qnil, disallow_reentry, (VALUE)arg);
1551 }
1552 
1553 #include "trace_point.rbinc"
1554 
1555 /* This function is called from inits.c */
1556 void
1557 Init_vm_trace(void)
1558 {
1559  /* trace_func */
1560  rb_define_global_function("set_trace_func", set_trace_func, 1);
1561  rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
1562  rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
1563 
1564  rb_cTracePoint = rb_define_class("TracePoint", rb_cObject);
1565  rb_undef_alloc_func(rb_cTracePoint);
1566 }
1567 
1568 typedef struct rb_postponed_job_struct {
1570  void *data;
1572 
1573 #define MAX_POSTPONED_JOB 1000
1574 #define MAX_POSTPONED_JOB_SPECIAL_ADDITION 24
1575 
1577  struct list_node jnode; /* <=> vm->workqueue */
1578  rb_postponed_job_t job;
1579 };
1580 
1581 void
1582 Init_vm_postponed_job(void)
1583 {
1584  rb_vm_t *vm = GET_VM();
1585  vm->postponed_job_buffer = ALLOC_N(rb_postponed_job_t, MAX_POSTPONED_JOB);
1586  vm->postponed_job_index = 0;
1587  /* workqueue is initialized when VM locks are initialized */
1588 }
1589 
1590 enum postponed_job_register_result {
1591  PJRR_SUCCESS = 0,
1592  PJRR_FULL = 1,
1593  PJRR_INTERRUPTED = 2
1594 };
1595 
1596 /* Async-signal-safe */
1597 static enum postponed_job_register_result
1598 postponed_job_register(rb_execution_context_t *ec, rb_vm_t *vm,
1599  unsigned int flags, rb_postponed_job_func_t func, void *data, rb_atomic_t max, rb_atomic_t expected_index)
1600 {
1601  rb_postponed_job_t *pjob;
1602 
1603  if (expected_index >= max) return PJRR_FULL; /* failed */
1604 
1605  if (ATOMIC_CAS(vm->postponed_job_index, expected_index, expected_index+1) == expected_index) {
1606  pjob = &vm->postponed_job_buffer[expected_index];
1607  }
1608  else {
1609  return PJRR_INTERRUPTED;
1610  }
1611 
1612  /* unused: pjob->flags = flags; */
1613  pjob->func = func;
1614  pjob->data = data;
1615 
1616  RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec);
1617 
1618  return PJRR_SUCCESS;
1619 }
1620 
1621 static rb_execution_context_t *
1622 get_valid_ec(rb_vm_t *vm)
1623 {
1624  rb_execution_context_t *ec = rb_current_execution_context(false);
1625  if (ec == NULL) ec = rb_vm_main_ractor_ec(vm);
1626  return ec;
1627 }
1628 
1629 /*
1630  * return 0 if job buffer is full
1631  * Async-signal-safe
1632  */
1633 int
1634 rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
1635 {
1636  rb_vm_t *vm = GET_VM();
1637  rb_execution_context_t *ec = get_valid_ec(vm);
1638 
1639  begin:
1640  switch (postponed_job_register(ec, vm, flags, func, data, MAX_POSTPONED_JOB, vm->postponed_job_index)) {
1641  case PJRR_SUCCESS : return 1;
1642  case PJRR_FULL : return 0;
1643  case PJRR_INTERRUPTED: goto begin;
1644  default: rb_bug("unreachable\n");
1645  }
1646 }
1647 
1648 /*
1649  * return 0 if job buffer is full
1650  * Async-signal-safe
1651  */
1652 int
1653 rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
1654 {
1655  rb_vm_t *vm = GET_VM();
1656  rb_execution_context_t *ec = get_valid_ec(vm);
1657  rb_postponed_job_t *pjob;
1658  rb_atomic_t i, index;
1659 
1660  begin:
1661  index = vm->postponed_job_index;
1662  for (i=0; i<index; i++) {
1663  pjob = &vm->postponed_job_buffer[i];
1664  if (pjob->func == func) {
1665  RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec);
1666  return 2;
1667  }
1668  }
1669  switch (postponed_job_register(ec, vm, flags, func, data, MAX_POSTPONED_JOB + MAX_POSTPONED_JOB_SPECIAL_ADDITION, index)) {
1670  case PJRR_SUCCESS : return 1;
1671  case PJRR_FULL : return 0;
1672  case PJRR_INTERRUPTED: goto begin;
1673  default: rb_bug("unreachable\n");
1674  }
1675 }
1676 
1677 /*
1678  * thread-safe and called from non-Ruby thread
1679  * returns FALSE on failure (ENOMEM), TRUE otherwise
1680  */
1681 int
1682 rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
1683 {
1684  struct rb_workqueue_job *wq_job = malloc(sizeof(*wq_job));
1685  rb_vm_t *vm = GET_VM();
1686 
1687  if (!wq_job) return FALSE;
1688  wq_job->job.func = func;
1689  wq_job->job.data = data;
1690 
1691  rb_nativethread_lock_lock(&vm->workqueue_lock);
1692  list_add_tail(&vm->workqueue, &wq_job->jnode);
1693  rb_nativethread_lock_unlock(&vm->workqueue_lock);
1694 
1695  // TODO: current implementation affects only main ractor
1696  RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(rb_vm_main_ractor_ec(vm));
1697 
1698  return TRUE;
1699 }
1700 
1701 void
1702 rb_postponed_job_flush(rb_vm_t *vm)
1703 {
1704  rb_execution_context_t *ec = GET_EC();
1705  const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK|TRAP_INTERRUPT_MASK;
1706  volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;
1707  VALUE volatile saved_errno = ec->errinfo;
1708  struct list_head tmp;
1709 
1710  list_head_init(&tmp);
1711 
1712  rb_nativethread_lock_lock(&vm->workqueue_lock);
1713  list_append_list(&tmp, &vm->workqueue);
1714  rb_nativethread_lock_unlock(&vm->workqueue_lock);
1715 
1716  ec->errinfo = Qnil;
1717  /* mask POSTPONED_JOB dispatch */
1718  ec->interrupt_mask |= block_mask;
1719  {
1720  EC_PUSH_TAG(ec);
1721  if (EC_EXEC_TAG() == TAG_NONE) {
1722  rb_atomic_t index;
1723  struct rb_workqueue_job *wq_job;
1724 
1725  while ((index = vm->postponed_job_index) > 0) {
1726  if (ATOMIC_CAS(vm->postponed_job_index, index, index-1) == index) {
1727  rb_postponed_job_t *pjob = &vm->postponed_job_buffer[index-1];
1728  (*pjob->func)(pjob->data);
1729  }
1730  }
1731  while ((wq_job = list_pop(&tmp, struct rb_workqueue_job, jnode))) {
1732  rb_postponed_job_t pjob = wq_job->job;
1733 
1734  free(wq_job);
1735  (pjob.func)(pjob.data);
1736  }
1737  }
1738  EC_POP_TAG();
1739  }
1740  /* restore POSTPONED_JOB mask */
1741  ec->interrupt_mask &= ~(saved_mask ^ block_mask);
1742  ec->errinfo = saved_errno;
1743 
1744  /* don't leak memory if a job threw an exception */
1745  if (!list_empty(&tmp)) {
1746  rb_nativethread_lock_lock(&vm->workqueue_lock);
1747  list_prepend_list(&vm->workqueue, &tmp);
1748  rb_nativethread_lock_unlock(&vm->workqueue_lock);
1749 
1750  RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
1751  }
1752 }
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
VALUE rb_tracearg_binding(rb_trace_arg_t *trace_arg)
Creates a binding object of the point where the trace is at.
Definition: vm_trace.c:948
VALUE rb_tracepoint_enabled_p(VALUE tpval)
Queries if the passed TracePoint is up and running.
Definition: vm_trace.c:1384
VALUE rb_tracearg_object(rb_trace_arg_t *trace_arg)
Queries the allocated/deallocated object that the trace represents.
Definition: vm_trace.c:1049
VALUE rb_tracearg_callee_id(rb_trace_arg_t *trace_arg)
Identical to rb_tracearg_method_id(), except it returns callee id like rb_frame_callee().
Definition: vm_trace.c:934
VALUE rb_tracearg_defined_class(rb_trace_arg_t *trace_arg)
Queries the class that defines the method that the passed trace is at.
Definition: vm_trace.c:941
VALUE rb_tracepoint_new(VALUE target_thread_not_supported_yet, rb_event_flag_t events, void(*func)(VALUE, void *), void *data)
Creates a tracepoint by registering a callback function for one or more tracepoint events.
Definition: vm_trace.c:1414
rb_trace_arg_t * rb_tracearg_from_tracepoint(VALUE tpval)
Queries the current event of the passed tracepoint.
Definition: vm_trace.c:827
VALUE rb_tracearg_raised_exception(rb_trace_arg_t *trace_arg)
Queries the raised exception that the trace represents.
Definition: vm_trace.c:983
void rb_thread_add_event_hook(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Identical to rb_add_event_hook(), except its effect is limited to the passed thread.
Definition: vm_trace.c:178
VALUE rb_tracepoint_disable(VALUE tpval)
Stops (disables) an already running instance of TracePoint.
Definition: vm_trace.c:1270
VALUE rb_tracearg_self(rb_trace_arg_t *trace_arg)
Queries the receiver of the point trace is at.
Definition: vm_trace.c:962
int rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
Identical to rb_remove_event_hook(), except it additionally takes a thread argument.
Definition: vm_trace.c:280
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register_one(), except it additionally checks for duplicated registrati...
Definition: vm_trace.c:1653
VALUE rb_tracearg_return_value(rb_trace_arg_t *trace_arg)
Queries the return value that the trace represents.
Definition: vm_trace.c:968
rb_event_flag_t rb_tracearg_event_flag(rb_trace_arg_t *trace_arg)
Queries the event of the passed trace.
Definition: vm_trace.c:833
VALUE rb_tracearg_path(rb_trace_arg_t *trace_arg)
Queries the file name of the point where the trace is at.
Definition: vm_trace.c:859
int rb_thread_remove_event_hook_with_data(VALUE thval, rb_event_hook_func_t func, VALUE data)
Identical to rb_thread_remove_event_hook(), except it additionally takes the data argument.
Definition: vm_trace.c:286
VALUE rb_tracepoint_enable(VALUE tpval)
Starts (enables) trace(s) defined by the passed object.
Definition: vm_trace.c:1156
int rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
Registers a postponed job.
Definition: vm_trace.c:1634
VALUE rb_tracearg_method_id(rb_trace_arg_t *trace_arg)
Queries the method name of the point where the trace is at.
Definition: vm_trace.c:927
int rb_remove_event_hook_with_data(rb_event_hook_func_t func, VALUE data)
Identical to rb_remove_event_hook(), except it additionally takes the data argument.
Definition: vm_trace.c:298
VALUE rb_tracearg_lineno(rb_trace_arg_t *trace_arg)
Queries the line of the point where the trace is at.
Definition: vm_trace.c:853
void(* rb_postponed_job_func_t)(void *arg)
Type of postponed jobs.
Definition: debug.h:585
VALUE rb_tracearg_event(rb_trace_arg_t *trace_arg)
Identical to rb_tracearg_event_flag(), except it returns the name of the event in Ruby's symbol.
Definition: vm_trace.c:839
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition: event.h:36
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition: event.h:39
#define RUBY_EVENT_TRACEPOINT_ALL
Bitmask of extended events.
Definition: event.h:57
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Registers an event hook function.
Definition: vm_trace.c:184
#define RUBY_EVENT_RAISE
Encountered a raise statement.
Definition: event.h:41
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition: event.h:52
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition: event.h:56
#define RUBY_INTERNAL_EVENT_MASK
Bitmask of internal events.
Definition: event.h:96
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition: vm_trace.c:292
#define RUBY_EVENT_ALL
Bitmask of traditional events.
Definition: event.h:42
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition: event.h:53
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition: event.h:35
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition: event.h:115
#define RUBY_EVENT_LINE
Encountered a new line.
Definition: event.h:34
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition: event.h:38
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition: event.h:40
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition: event.h:51
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition: event.h:89
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:103
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition: event.h:37
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition: event.h:88
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition: event.h:54
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:837
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
Definition: class.c:1914
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:854
void rb_define_global_function(const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a global function.
Definition: class.c:2110
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition: string.h:1738
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition: fl_type.h:58
#define ALLOC
Old name of RB_ALLOC.
Definition: memory.h:394
#define xfree
Old name of ruby_xfree.
Definition: xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define ZALLOC
Old name of RB_ZALLOC.
Definition: memory.h:396
#define FIX2INT
Old name of RB_FIX2INT.
Definition: int.h:41
#define NUM2UINT
Old name of RB_NUM2UINT.
Definition: int.h:45
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition: memory.h:393
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition: array.h:652
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FL_TEST
Old name of RB_FL_TEST.
Definition: fl_type.h:139
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition: error.c:3025
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:802
VALUE rb_eTypeError
TypeError exception.
Definition: error.c:1099
VALUE rb_eRuntimeError
RuntimeError exception.
Definition: error.c:1097
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1100
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:983
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:82
VALUE rb_cThread
Thread class.
Definition: vm.c:397
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition: rgengc.h:232
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition: vm_eval.c:1102
Defines RBIMPL_HAS_BUILTIN.
void rb_gc_mark(VALUE obj)
Marks an object.
Definition: gc.c:6775
void rb_hash_foreach(VALUE hash, int(*func)(VALUE key, VALUE val, VALUE arg), VALUE arg)
Iterates over a hash.
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2903
VALUE rb_hash_new(void)
Creates a new, empty hash object.
Definition: hash.c:1529
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition: proc.c:848
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition: proc.c:1027
VALUE rb_obj_is_method(VALUE recv)
Queries if the given object is a method.
Definition: proc.c:1600
VALUE rb_binding_new(void)
Snapshots the current execution context and turn it into an instance of rb_cBinding.
Definition: proc.c:385
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition: proc.c:175
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition: variable.c:1285
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition: vm_method.c:1117
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
Definition: symbol.c:782
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
Definition: symbol.c:924
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition: sprintf.c:1201
VALUE rb_yield(VALUE val)
Yields the block.
Definition: vm_eval.c:1357
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition: memory.h:161
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition: ractor.h:249
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:68
#define RARRAY_AREF(a, i)
Definition: rarray.h:588
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition: rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition: rtypeddata.h:507
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition: rtypeddata.h:489
#define RTEST
This is an old name of RB_TEST.
This is the struct that holds necessary info for a struct.
Definition: rtypeddata.h:190
Definition: method.h:54
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition: thread.c:440
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition: thread.c:446
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:375
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition: gc.c:11775