14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
17 #include "ruby/internal/config.h"
24 #define sighandler_t ruby_sighandler_t
36 #ifndef HAVE_MALLOC_USABLE_SIZE
38 # define HAVE_MALLOC_USABLE_SIZE
39 # define malloc_usable_size(a) _msize(a)
40 # elif defined HAVE_MALLOC_SIZE
41 # define HAVE_MALLOC_USABLE_SIZE
42 # define malloc_usable_size(a) malloc_size(a)
46 #ifdef HAVE_MALLOC_USABLE_SIZE
47 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
49 # elif defined(HAVE_MALLOC_H)
51 # elif defined(HAVE_MALLOC_NP_H)
52 # include <malloc_np.h>
53 # elif defined(HAVE_MALLOC_MALLOC_H)
54 # include <malloc/malloc.h>
58 #if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
60 # include <sys/user.h>
64 #ifdef HAVE_SYS_TIME_H
65 # include <sys/time.h>
68 #ifdef HAVE_SYS_RESOURCE_H
69 # include <sys/resource.h>
72 #if defined _WIN32 || defined __CYGWIN__
74 #elif defined(HAVE_POSIX_MEMALIGN)
75 #elif defined(HAVE_MEMALIGN)
79 #include <sys/types.h>
82 #include <emscripten.h>
88 #include "debug_counter.h"
89 #include "eval_intern.h"
93 #include "internal/class.h"
94 #include "internal/complex.h"
95 #include "internal/cont.h"
96 #include "internal/error.h"
97 #include "internal/eval.h"
98 #include "internal/gc.h"
99 #include "internal/hash.h"
100 #include "internal/imemo.h"
101 #include "internal/io.h"
102 #include "internal/numeric.h"
103 #include "internal/object.h"
104 #include "internal/proc.h"
105 #include "internal/rational.h"
106 #include "internal/sanitizers.h"
107 #include "internal/struct.h"
108 #include "internal/symbol.h"
109 #include "internal/thread.h"
110 #include "internal/variable.h"
111 #include "internal/warnings.h"
121 #include "ruby_assert.h"
122 #include "ruby_atomic.h"
124 #include "transient_heap.h"
127 #include "vm_callinfo.h"
128 #include "ractor_core.h"
132 #define rb_setjmp(env) RUBY_SETJMP(env)
133 #define rb_jmp_buf rb_jmpbuf_t
134 #undef rb_data_object_wrap
136 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
137 #define MAP_ANONYMOUS MAP_ANON
140 static inline struct rbimpl_size_mul_overflow_tag
141 size_add_overflow(size_t x, size_t y)
147 #elif __has_builtin(__builtin_add_overflow)
148 p = __builtin_add_overflow(x, y, &z);
150 #elif defined(DSIZE_T)
162 return (
struct rbimpl_size_mul_overflow_tag) { p, z, };
165 static inline struct rbimpl_size_mul_overflow_tag
166 size_mul_add_overflow(size_t x, size_t y, size_t z)
168 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
169 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
170 return (
struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
173 static inline struct rbimpl_size_mul_overflow_tag
174 size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w)
176 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
177 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
178 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
179 return (
struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
182 PRINTF_ARGS(NORETURN(
static void gc_raise(
VALUE,
const char*, ...)), 2, 3);
185 size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
187 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
188 if (LIKELY(!t.left)) {
197 "integer overflow: %"PRIuSIZE
200 x, y, (
size_t)SIZE_MAX);
205 rb_size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
207 return size_mul_or_raise(x, y, exc);
211 size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
213 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
214 if (LIKELY(!t.left)) {
223 "integer overflow: %"PRIuSIZE
227 x, y, z, (
size_t)SIZE_MAX);
232 rb_size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
234 return size_mul_add_or_raise(x, y, z, exc);
238 size_mul_add_mul_or_raise(
size_t x,
size_t y,
size_t z,
size_t w,
VALUE exc)
240 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
241 if (LIKELY(!t.left)) {
250 "integer overflow: %"PRIdSIZE
255 x, y, z, w, (
size_t)SIZE_MAX);
259 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
261 volatile VALUE rb_gc_guarded_val;
263 rb_gc_guarded_ptr_val(
volatile VALUE *ptr,
VALUE val)
265 rb_gc_guarded_val = val;
271 #ifndef GC_HEAP_INIT_SLOTS
272 #define GC_HEAP_INIT_SLOTS 10000
274 #ifndef GC_HEAP_FREE_SLOTS
275 #define GC_HEAP_FREE_SLOTS 4096
277 #ifndef GC_HEAP_GROWTH_FACTOR
278 #define GC_HEAP_GROWTH_FACTOR 1.8
280 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
281 #define GC_HEAP_GROWTH_MAX_SLOTS 0
283 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
284 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
287 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
288 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
290 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
291 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
293 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
294 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
297 #ifndef GC_MALLOC_LIMIT_MIN
298 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
300 #ifndef GC_MALLOC_LIMIT_MAX
301 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
303 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
304 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
307 #ifndef GC_OLDMALLOC_LIMIT_MIN
308 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
310 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
311 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
313 #ifndef GC_OLDMALLOC_LIMIT_MAX
314 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
317 #ifndef PRINT_MEASURE_LINE
318 #define PRINT_MEASURE_LINE 0
320 #ifndef PRINT_ENTER_EXIT_TICK
321 #define PRINT_ENTER_EXIT_TICK 0
323 #ifndef PRINT_ROOT_TICKS
324 #define PRINT_ROOT_TICKS 0
327 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
331 size_t heap_init_slots;
332 size_t heap_free_slots;
333 double growth_factor;
334 size_t growth_max_slots;
336 double heap_free_slots_min_ratio;
337 double heap_free_slots_goal_ratio;
338 double heap_free_slots_max_ratio;
339 double oldobject_limit_factor;
341 size_t malloc_limit_min;
342 size_t malloc_limit_max;
343 double malloc_limit_growth_factor;
345 size_t oldmalloc_limit_min;
346 size_t oldmalloc_limit_max;
347 double oldmalloc_limit_growth_factor;
355 GC_HEAP_GROWTH_FACTOR,
356 GC_HEAP_GROWTH_MAX_SLOTS,
358 GC_HEAP_FREE_SLOTS_MIN_RATIO,
359 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
360 GC_HEAP_FREE_SLOTS_MAX_RATIO,
361 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
365 GC_MALLOC_LIMIT_GROWTH_FACTOR,
367 GC_OLDMALLOC_LIMIT_MIN,
368 GC_OLDMALLOC_LIMIT_MAX,
369 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
390 #define RGENGC_DEBUG -1
392 #define RGENGC_DEBUG 0
395 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
396 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
397 #elif defined(HAVE_VA_ARGS_MACRO)
398 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
400 # define RGENGC_DEBUG_ENABLED(level) 0
402 int ruby_rgengc_debug;
412 #ifndef RGENGC_CHECK_MODE
413 #define RGENGC_CHECK_MODE 0
417 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
425 #ifndef RGENGC_OLD_NEWOBJ_CHECK
426 #define RGENGC_OLD_NEWOBJ_CHECK 0
434 #ifndef RGENGC_PROFILE
435 #define RGENGC_PROFILE 0
444 #ifndef RGENGC_ESTIMATE_OLDMALLOC
445 #define RGENGC_ESTIMATE_OLDMALLOC 1
451 #ifndef RGENGC_FORCE_MAJOR_GC
452 #define RGENGC_FORCE_MAJOR_GC 0
455 #ifndef GC_PROFILE_MORE_DETAIL
456 #define GC_PROFILE_MORE_DETAIL 0
458 #ifndef GC_PROFILE_DETAIL_MEMORY
459 #define GC_PROFILE_DETAIL_MEMORY 0
461 #ifndef GC_ENABLE_INCREMENTAL_MARK
462 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
464 #ifndef GC_ENABLE_LAZY_SWEEP
465 #define GC_ENABLE_LAZY_SWEEP 1
467 #ifndef CALC_EXACT_MALLOC_SIZE
468 #define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
470 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
471 #ifndef MALLOC_ALLOCATED_SIZE
472 #define MALLOC_ALLOCATED_SIZE 0
475 #define MALLOC_ALLOCATED_SIZE 0
477 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
478 #define MALLOC_ALLOCATED_SIZE_CHECK 0
481 #ifndef GC_DEBUG_STRESS_TO_CLASS
482 #define GC_DEBUG_STRESS_TO_CLASS 0
485 #ifndef RGENGC_OBJ_INFO
486 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
490 GPR_FLAG_NONE = 0x000,
492 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
493 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
494 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
495 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
496 #if RGENGC_ESTIMATE_OLDMALLOC
497 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
499 GPR_FLAG_MAJOR_MASK = 0x0ff,
502 GPR_FLAG_NEWOBJ = 0x100,
503 GPR_FLAG_MALLOC = 0x200,
504 GPR_FLAG_METHOD = 0x400,
505 GPR_FLAG_CAPI = 0x800,
506 GPR_FLAG_STRESS = 0x1000,
509 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
510 GPR_FLAG_HAVE_FINALIZE = 0x4000,
511 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
512 GPR_FLAG_FULL_MARK = 0x10000,
513 GPR_FLAG_COMPACT = 0x20000,
516 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
517 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
518 } gc_profile_record_flag;
524 double gc_invoke_time;
526 size_t heap_total_objects;
527 size_t heap_use_size;
528 size_t heap_total_size;
529 size_t moved_objects;
531 #if GC_PROFILE_MORE_DETAIL
533 double gc_sweep_time;
535 size_t heap_use_pages;
536 size_t heap_live_objects;
537 size_t heap_free_objects;
539 size_t allocate_increase;
540 size_t allocate_limit;
543 size_t removing_objects;
544 size_t empty_objects;
545 #if GC_PROFILE_DETAIL_MEMORY
551 #if MALLOC_ALLOCATED_SIZE
552 size_t allocated_size;
555 #if RGENGC_PROFILE > 0
557 size_t remembered_normal_objects;
558 size_t remembered_shady_objects;
562 #define FL_FROM_FREELIST FL_USER0
570 #define RMOVED(obj) ((struct RMoved *)(obj))
628 typedef uintptr_t bits_t;
630 BITS_SIZE =
sizeof(bits_t),
631 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
633 #define popcount_bits rb_popcount_intptr
650 #define STACK_CHUNK_SIZE 500
653 VALUE data[STACK_CHUNK_SIZE];
663 size_t unused_cache_size;
666 #define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
667 #define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
671 struct list_head pages;
674 RVALUE * compact_cursor_index;
675 #if GC_ENABLE_INCREMENTAL_MARK
685 size_t allocatable_pages;
693 size_t force_major_gc_count;
710 #if MALLOC_ALLOCATED_SIZE
711 size_t allocated_size;
717 unsigned int mode : 2;
718 unsigned int immediate_sweep : 1;
719 unsigned int dont_gc : 1;
720 unsigned int dont_incremental : 1;
721 unsigned int during_gc : 1;
722 unsigned int during_compacting : 1;
723 unsigned int gc_stressful: 1;
724 unsigned int has_hook: 1;
725 unsigned int during_minor_gc : 1;
726 #if GC_ENABLE_INCREMENTAL_MARK
727 unsigned int during_incremental_marking : 1;
729 unsigned int measure_gc : 1;
733 size_t total_allocated_objects;
734 VALUE next_object_id;
747 size_t allocated_pages;
748 size_t allocatable_pages;
749 size_t sorted_length;
751 size_t freeable_pages;
755 VALUE deferred_final;
762 unsigned int latest_gc_info;
768 #if GC_PROFILE_MORE_DETAIL
773 size_t minor_gc_count;
774 size_t major_gc_count;
775 size_t compact_count;
776 size_t read_barrier_faults;
777 #if RGENGC_PROFILE > 0
778 size_t total_generated_normal_object_count;
779 size_t total_generated_shady_object_count;
780 size_t total_shade_operation_count;
781 size_t total_promoted_count;
782 size_t total_remembered_normal_object_count;
783 size_t total_remembered_shady_object_count;
785 #if RGENGC_PROFILE >= 2
786 size_t generated_normal_object_count_types[
RUBY_T_MASK];
787 size_t generated_shady_object_count_types[
RUBY_T_MASK];
790 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
791 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
796 double gc_sweep_start_time;
797 size_t total_allocated_objects_at_gc_start;
798 size_t heap_used_at_gc_start;
802 size_t total_freed_objects;
803 size_t total_allocated_pages;
804 size_t total_freed_pages;
805 uint64_t total_time_ns;
810 VALUE gc_stress_mode;
815 size_t last_major_gc;
816 size_t uncollectible_wb_unprotected_objects;
817 size_t uncollectible_wb_unprotected_objects_limit;
819 size_t old_objects_limit;
821 #if RGENGC_ESTIMATE_OLDMALLOC
822 size_t oldmalloc_increase;
823 size_t oldmalloc_increase_limit;
826 #if RGENGC_CHECK_MODE >= 2
833 size_t considered_count_table[
T_MASK];
834 size_t moved_count_table[
T_MASK];
838 #if GC_ENABLE_INCREMENTAL_MARK
848 #if GC_DEBUG_STRESS_TO_CLASS
849 VALUE stress_to_class;
855 #define HEAP_PAGE_ALIGN_LOG 14
856 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
858 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
859 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
860 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
861 HEAP_PAGE_OBJ_LIMIT = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header))/sizeof(struct
RVALUE)),
862 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE,
sizeof(
struct RVALUE)), BITS_BITLENGTH),
863 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
865 #define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
866 #define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
869 # if HAVE_CONST_PAGE_SIZE
871 static const bool USE_MMAP_ALIGNED_ALLOC = (PAGE_SIZE <= HEAP_PAGE_SIZE);
872 # elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
874 static const bool USE_MMAP_ALIGNED_ALLOC =
true;
877 # define USE_MMAP_ALIGNED_ALLOC (use_mmap_aligned_alloc != false)
879 static bool use_mmap_aligned_alloc;
881 #elif !defined(__MINGW32__) && !defined(_WIN32)
882 static const bool USE_MMAP_ALIGNED_ALLOC =
false;
892 unsigned int before_sweep : 1;
893 unsigned int has_remembered_objects : 1;
894 unsigned int has_uncollectible_shady_objects : 1;
895 unsigned int in_tomb : 1;
903 struct list_node page_node;
905 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
907 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
908 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
909 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
912 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
915 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
916 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
917 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
919 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
920 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
921 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
922 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
925 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
926 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
927 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
930 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
931 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
932 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
933 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
934 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
937 #define rb_objspace (*rb_objspace_of(GET_VM()))
938 #define rb_objspace_of(vm) ((vm)->objspace)
940 #define ruby_initial_gc_stress gc_params.gc_stress
942 VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
944 #define malloc_limit objspace->malloc_params.limit
945 #define malloc_increase objspace->malloc_params.increase
946 #define malloc_allocated_size objspace->malloc_params.allocated_size
947 #define heap_pages_sorted objspace->heap_pages.sorted
948 #define heap_allocated_pages objspace->heap_pages.allocated_pages
949 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
950 #define heap_pages_lomem objspace->heap_pages.range[0]
951 #define heap_pages_himem objspace->heap_pages.range[1]
952 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
953 #define heap_pages_final_slots objspace->heap_pages.final_slots
954 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
955 #define size_pools objspace->size_pools
956 #define during_gc objspace->flags.during_gc
957 #define finalizing objspace->atomic_flags.finalizing
958 #define finalizer_table objspace->finalizer_table
959 #define global_list objspace->global_list
960 #define ruby_gc_stressful objspace->flags.gc_stressful
961 #define ruby_gc_stress_mode objspace->gc_stress_mode
962 #if GC_DEBUG_STRESS_TO_CLASS
963 #define stress_to_class objspace->stress_to_class
965 #define stress_to_class 0
969 #define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
970 #define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
971 #define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
972 #define dont_gc_val() (objspace->flags.dont_gc)
974 #define dont_gc_on() (objspace->flags.dont_gc = 1)
975 #define dont_gc_off() (objspace->flags.dont_gc = 0)
976 #define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
977 #define dont_gc_val() (objspace->flags.dont_gc)
980 static inline enum gc_mode
981 gc_mode_verify(
enum gc_mode mode)
983 #if RGENGC_CHECK_MODE > 0
986 case gc_mode_marking:
987 case gc_mode_sweeping:
990 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
999 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1000 if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1007 static inline size_t
1011 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1012 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1017 static inline size_t
1021 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1022 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1027 static inline size_t
1031 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1032 count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1037 static inline size_t
1041 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1042 count += size_pools[i].allocatable_pages;
1047 static inline size_t
1051 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1053 int slot_size_multiple = size_pool->slot_size /
sizeof(
RVALUE);
1054 count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1059 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1060 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
1062 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1063 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1064 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1065 #if GC_ENABLE_INCREMENTAL_MARK
1066 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1068 #define is_incremental_marking(objspace) FALSE
1070 #if GC_ENABLE_INCREMENTAL_MARK
1071 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1073 #define will_be_incremental_marking(objspace) FALSE
1075 #define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1077 #if SIZEOF_LONG == SIZEOF_VOIDP
1078 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
1079 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
1080 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1081 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
1082 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1083 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1085 # error not supported
1088 #define RANY(o) ((RVALUE*)(o))
1093 void (*dfree)(
void *);
1097 #define RZOMBIE(o) ((struct RZombie *)(o))
1099 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1101 #if RUBY_MARK_FREE_DEBUG
1102 int ruby_gc_debug_indent = 0;
1105 int ruby_disable_gc = 0;
1106 int ruby_enable_autocompact = 0;
1108 void rb_iseq_mark(
const rb_iseq_t *iseq);
1109 void rb_iseq_update_references(
rb_iseq_t *iseq);
1110 void rb_iseq_free(
const rb_iseq_t *iseq);
1111 size_t rb_iseq_memsize(
const rb_iseq_t *iseq);
1112 void rb_vm_update_references(
void *ptr);
1114 void rb_gcdebug_print_obj_condition(
VALUE obj);
1118 NORETURN(
static void *gc_vraise(
void *ptr));
1119 NORETURN(
static void gc_raise(
VALUE exc,
const char *fmt, ...));
1120 NORETURN(
static void negative_size_allocation_error(
const char *));
1126 static int garbage_collect(
rb_objspace_t *,
unsigned int reason);
1128 static int gc_start(
rb_objspace_t *objspace,
unsigned int reason);
1131 enum gc_enter_event {
1132 gc_enter_event_start,
1133 gc_enter_event_mark_continue,
1134 gc_enter_event_sweep_continue,
1135 gc_enter_event_rest,
1136 gc_enter_event_finalizer,
1137 gc_enter_event_rb_memerror,
1140 static inline void gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1141 static inline void gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1143 static void gc_marks(
rb_objspace_t *objspace,
int full_mark);
1144 static void gc_marks_start(
rb_objspace_t *objspace,
int full);
1160 NO_SANITIZE(
"memory",
static void gc_mark_maybe(
rb_objspace_t *objspace,
VALUE ptr));
1163 static int gc_mark_stacked_objects_incremental(
rb_objspace_t *,
size_t count);
1168 NO_SANITIZE(
"memory",
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr));
1173 static void shrink_stack_chunk_cache(
mark_stack_t *stack);
1175 static size_t obj_memsize_of(
VALUE obj,
int use_all_types);
1176 static void gc_verify_internal_consistency(
rb_objspace_t *objspace);
1183 static double getrusage_time(
void);
1184 static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason);
1187 static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1188 static inline void gc_prof_mark_timer_stop(
rb_objspace_t *);
1189 static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1190 static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1191 static inline void gc_prof_set_malloc_info(
rb_objspace_t *);
1194 #define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1195 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1196 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1200 #define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1202 #define gc_prof_record(objspace) (objspace)->profile.current_record
1203 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1205 #ifdef HAVE_VA_ARGS_MACRO
1206 # define gc_report(level, objspace, ...) \
1207 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1209 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1211 PRINTF_ARGS(
static void gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...), 3, 4);
1212 static const char *obj_info(
VALUE obj);
1213 static const char *obj_type_name(
VALUE obj);
1233 #if defined(__GNUC__) && defined(__i386__)
1234 typedef unsigned long long tick_t;
1235 #define PRItick "llu"
1236 static inline tick_t
1239 unsigned long long int x;
1240 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1244 #elif defined(__GNUC__) && defined(__x86_64__)
1245 typedef unsigned long long tick_t;
1246 #define PRItick "llu"
1248 static __inline__ tick_t
1251 unsigned long hi, lo;
1252 __asm__ __volatile__ (
"rdtsc" :
"=a"(lo),
"=d"(hi));
1253 return ((
unsigned long long)lo)|( ((
unsigned long long)hi)<<32);
1256 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1257 typedef unsigned long long tick_t;
1258 #define PRItick "llu"
1260 static __inline__ tick_t
1263 unsigned long long val = __builtin_ppc_get_timebase();
1267 #elif defined(__aarch64__) && defined(__GNUC__)
1268 typedef unsigned long tick_t;
1269 #define PRItick "lu"
1271 static __inline__ tick_t
1275 __asm__ __volatile__ (
"mrs %0, cntvct_el0" :
"=r" (val));
1280 #elif defined(_WIN32) && defined(_MSC_VER)
1282 typedef unsigned __int64 tick_t;
1283 #define PRItick "llu"
1285 static inline tick_t
1292 typedef clock_t tick_t;
1293 #define PRItick "llu"
1295 static inline tick_t
1302 #elif TICK_TYPE == 2
1303 typedef double tick_t;
1304 #define PRItick "4.9f"
1306 static inline tick_t
1309 return getrusage_time();
1312 #error "choose tick type"
1315 #define MEASURE_LINE(expr) do { \
1316 volatile tick_t start_time = tick(); \
1317 volatile tick_t end_time; \
1319 end_time = tick(); \
1320 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1324 #define MEASURE_LINE(expr) expr
1327 static inline void *
1328 asan_unpoison_object_temporary(
VALUE obj)
1330 void *ptr = asan_poisoned_object_p(obj);
1331 asan_unpoison_object(obj,
false);
1335 #define FL_CHECK2(name, x, pred) \
1336 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1337 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1338 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1339 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1340 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1342 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1343 #define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1344 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1346 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1347 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1348 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1350 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1351 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1352 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1354 #define RVALUE_OLD_AGE 3
1355 #define RVALUE_AGE_SHIFT 5
1370 check_rvalue_consistency_force(
const VALUE obj,
int terminate)
1375 RB_VM_LOCK_ENTER_NO_BARRIER();
1378 fprintf(stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)obj);
1381 else if (!is_pointer_to_heap(objspace, (
void *)obj)) {
1384 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1386 list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1387 if (&page->start[0] <= (
RVALUE *)obj &&
1388 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * size_pool->slot_size))) {
1389 fprintf(stderr,
"check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1390 (
void *)obj, (
void *)page);
1397 fprintf(stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)obj);
1403 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1404 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1405 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1406 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1407 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
1409 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1410 fprintf(stderr,
"check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1414 fprintf(stderr,
"check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1418 fprintf(stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1422 obj_memsize_of((
VALUE)obj, FALSE);
1428 if (age > 0 && wb_unprotected_bit) {
1429 fprintf(stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1433 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1434 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1438 if (!is_full_marking(objspace)) {
1439 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1440 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1441 obj_info(obj), age);
1444 if (remembered_bit && age != RVALUE_OLD_AGE) {
1445 fprintf(stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1446 obj_info(obj), age);
1458 if (is_incremental_marking(objspace) && marking_bit) {
1459 if (!is_marking(objspace) && !mark_bit) {
1460 fprintf(stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1466 RB_VM_LOCK_LEAVE_NO_BARRIER();
1468 if (err > 0 && terminate) {
1469 rb_bug(
"check_rvalue_consistency_force: there is %d errors.", err);
1474 #if RGENGC_CHECK_MODE == 0
1476 check_rvalue_consistency(
const VALUE obj)
1482 check_rvalue_consistency(
const VALUE obj)
1484 check_rvalue_consistency_force(obj, TRUE);
1496 void *poisoned = asan_poisoned_object_p(obj);
1497 asan_unpoison_object(obj,
false);
1503 asan_poison_object(obj);
1510 RVALUE_MARKED(
VALUE obj)
1512 check_rvalue_consistency(obj);
1513 return RVALUE_MARK_BITMAP(obj) != 0;
1517 RVALUE_PINNED(
VALUE obj)
1519 check_rvalue_consistency(obj);
1520 return RVALUE_PIN_BITMAP(obj) != 0;
1524 RVALUE_WB_UNPROTECTED(
VALUE obj)
1526 check_rvalue_consistency(obj);
1527 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1531 RVALUE_MARKING(
VALUE obj)
1533 check_rvalue_consistency(obj);
1534 return RVALUE_MARKING_BITMAP(obj) != 0;
1538 RVALUE_REMEMBERED(
VALUE obj)
1540 check_rvalue_consistency(obj);
1541 return RVALUE_MARKING_BITMAP(obj) != 0;
1545 RVALUE_UNCOLLECTIBLE(
VALUE obj)
1547 check_rvalue_consistency(obj);
1548 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1552 RVALUE_OLD_P_RAW(
VALUE obj)
1555 return (
RBASIC(obj)->flags & promoted) == promoted;
1559 RVALUE_OLD_P(
VALUE obj)
1561 check_rvalue_consistency(obj);
1562 return RVALUE_OLD_P_RAW(obj);
1565 #if RGENGC_CHECK_MODE || GC_DEBUG
1567 RVALUE_AGE(
VALUE obj)
1569 check_rvalue_consistency(obj);
1570 return RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
1577 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1578 objspace->rgengc.old_objects++;
1579 rb_transient_heap_promote(obj);
1581 #if RGENGC_PROFILE >= 2
1582 objspace->profile.total_promoted_count++;
1590 RB_DEBUG_COUNTER_INC(obj_promote);
1591 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1595 RVALUE_FLAGS_AGE_SET(
VALUE flags,
int age)
1598 flags |= (age << RVALUE_AGE_SHIFT);
1607 int age = RVALUE_FLAGS_AGE(flags);
1609 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1610 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1614 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1616 if (age == RVALUE_OLD_AGE) {
1617 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1619 check_rvalue_consistency(obj);
1626 check_rvalue_consistency(obj);
1627 GC_ASSERT(!RVALUE_OLD_P(obj));
1629 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, RVALUE_OLD_AGE);
1630 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1632 check_rvalue_consistency(obj);
1639 check_rvalue_consistency(obj);
1640 GC_ASSERT(!RVALUE_OLD_P(obj));
1642 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1644 check_rvalue_consistency(obj);
1650 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, 0);
1651 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1657 check_rvalue_consistency(obj);
1658 GC_ASSERT(RVALUE_OLD_P(obj));
1660 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1661 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
1664 RVALUE_DEMOTE_RAW(objspace, obj);
1666 if (RVALUE_MARKED(obj)) {
1667 objspace->rgengc.old_objects--;
1670 check_rvalue_consistency(obj);
1674 RVALUE_AGE_RESET_RAW(
VALUE obj)
1676 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, 0);
1680 RVALUE_AGE_RESET(
VALUE obj)
1682 check_rvalue_consistency(obj);
1683 GC_ASSERT(!RVALUE_OLD_P(obj));
1685 RVALUE_AGE_RESET_RAW(obj);
1686 check_rvalue_consistency(obj);
1690 RVALUE_BLACK_P(
VALUE obj)
1692 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1697 RVALUE_GREY_P(
VALUE obj)
1699 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1704 RVALUE_WHITE_P(
VALUE obj)
1706 return RVALUE_MARKED(obj) == FALSE;
1713 static inline void *
1716 return calloc(1, n);
1720 rb_objspace_alloc(
void)
1723 objspace->flags.measure_gc = 1;
1724 malloc_limit = gc_params.malloc_limit_min;
1726 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1729 size_pool->slot_size =
sizeof(
RVALUE) * (1 << i);
1731 list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1732 list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1747 if (is_lazy_sweeping(objspace))
1748 rb_bug(
"lazy sweeping underway when freeing object space");
1750 if (objspace->profile.records) {
1751 free(objspace->profile.records);
1752 objspace->profile.records = 0;
1757 for (list = global_list; list; list = next) {
1762 if (heap_pages_sorted) {
1764 for (i = 0; i < heap_allocated_pages; ++i) {
1765 heap_page_free(objspace, heap_pages_sorted[i]);
1767 free(heap_pages_sorted);
1768 heap_allocated_pages = 0;
1769 heap_pages_sorted_length = 0;
1770 heap_pages_lomem = 0;
1771 heap_pages_himem = 0;
1773 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1775 SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1776 SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1779 st_free_table(objspace->id_to_obj_tbl);
1780 st_free_table(objspace->obj_to_id_tbl);
1782 free_stack_chunks(&objspace->mark_stack);
1783 mark_stack_free_cache(&objspace->mark_stack);
1789 heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1794 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %"PRIdSIZE
", size: %"PRIdSIZE
"\n",
1797 if (heap_pages_sorted_length > 0) {
1798 sorted = (
struct heap_page **)realloc(heap_pages_sorted, size);
1799 if (sorted) heap_pages_sorted = sorted;
1802 sorted = heap_pages_sorted = (
struct heap_page **)malloc(size);
1809 heap_pages_sorted_length = next_length;
1820 size_t next_length = heap_allocatable_pages(objspace);
1821 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1823 next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
1824 next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
1827 if (next_length > heap_pages_sorted_length) {
1828 heap_pages_expand_sorted_to(objspace, next_length);
1831 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
1832 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
1838 size_pool->allocatable_pages = s;
1839 heap_pages_expand_sorted(objspace);
1845 ASSERT_vm_locking();
1849 asan_unpoison_object(obj,
false);
1851 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
1853 p->as.free.flags = 0;
1854 p->as.free.next = page->freelist;
1856 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
1858 if (RGENGC_CHECK_MODE &&
1860 !(&page->start[0] <= (
RVALUE *)obj &&
1861 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
1862 obj %
sizeof(
RVALUE) == 0)) {
1863 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)p);
1866 asan_poison_object(obj);
1867 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)obj);
1873 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
1874 GC_ASSERT(page->free_slots != 0);
1875 GC_ASSERT(page->freelist != NULL);
1877 page->free_next = heap->free_pages;
1878 heap->free_pages = page;
1880 RUBY_DEBUG_LOG(
"page:%p freelist:%p", (
void *)page, (
void *)page->freelist);
1882 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
1885 #if GC_ENABLE_INCREMENTAL_MARK
1889 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
1890 GC_ASSERT(page->free_slots != 0);
1891 GC_ASSERT(page->freelist != NULL);
1893 page->free_next = heap->pooled_pages;
1894 heap->pooled_pages = page;
1895 objspace->rincgc.pooled_slots += page->free_slots;
1897 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
1904 list_del(&page->page_node);
1905 heap->total_pages--;
1906 heap->total_slots -= page->total_slots;
1909 static void rb_aligned_free(
void *ptr,
size_t size);
1914 heap_allocated_pages--;
1915 objspace->profile.total_freed_pages++;
1916 rb_aligned_free(GET_PAGE_BODY(page->start), HEAP_PAGE_SIZE);
1925 bool has_pages_in_tomb_heap = FALSE;
1926 for (i = 0; i < SIZE_POOL_COUNT; i++) {
1927 if (!list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
1928 has_pages_in_tomb_heap = TRUE;
1933 if (has_pages_in_tomb_heap) {
1934 for (i = j = 1; j < heap_allocated_pages; i++) {
1935 struct heap_page *page = heap_pages_sorted[i];
1937 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
1938 heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
1939 heap_page_free(objspace, page);
1943 heap_pages_sorted[j] = page;
1949 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
1950 uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
1951 GC_ASSERT(himem <= (uintptr_t)heap_pages_himem);
1952 heap_pages_himem = (
RVALUE *)himem;
1954 GC_ASSERT(j == heap_allocated_pages);
1961 uintptr_t start, end, p;
1964 uintptr_t hi, lo, mid;
1965 size_t stride = size_pool->slot_size;
1966 unsigned int limit = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header)))/(
int)stride;
1969 page_body = (
struct heap_page_body *)rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
1970 if (page_body == 0) {
1975 page = calloc1(
sizeof(
struct heap_page));
1977 rb_aligned_free(page_body, HEAP_PAGE_SIZE);
1985 int delta = (int)
sizeof(
RVALUE) - (start % (int)
sizeof(
RVALUE));
1986 start = start + delta;
1987 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
1993 if (NUM_IN_PAGE(start) == 1) {
1994 start += stride -
sizeof(
RVALUE);
1997 GC_ASSERT(NUM_IN_PAGE(start) *
sizeof(
RVALUE) % stride == 0);
1999 limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride;
2001 end = start + (limit * (int)stride);
2005 hi = (uintptr_t)heap_allocated_pages;
2009 mid = (lo + hi) / 2;
2010 mid_page = heap_pages_sorted[mid];
2011 if ((uintptr_t)mid_page->start < start) {
2014 else if ((uintptr_t)mid_page->start > start) {
2018 rb_bug(
"same heap page is allocated: %p at %"PRIuVALUE, (
void *)page_body, (
VALUE)mid);
2022 if (hi < (uintptr_t)heap_allocated_pages) {
2023 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi],
struct heap_page_header*, heap_allocated_pages - hi);
2026 heap_pages_sorted[hi] = page;
2028 heap_allocated_pages++;
2030 GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2031 GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2032 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2034 objspace->profile.total_allocated_pages++;
2036 if (heap_allocated_pages > heap_pages_sorted_length) {
2037 rb_bug(
"heap_page_allocate: allocated(%"PRIdSIZE
") > sorted(%"PRIdSIZE
")",
2038 heap_allocated_pages, heap_pages_sorted_length);
2041 if (heap_pages_lomem == 0 || (uintptr_t)heap_pages_lomem > start) heap_pages_lomem = (
RVALUE *)start;
2042 if ((uintptr_t)heap_pages_himem < end) heap_pages_himem = (
RVALUE *)end;
2044 page->start = (
RVALUE *)start;
2045 page->total_slots = limit;
2046 page->slot_size = size_pool->slot_size;
2047 page->size_pool = size_pool;
2048 page_body->header.page = page;
2050 for (p = start; p != end; p += stride) {
2051 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", (
void *)p);
2052 heap_page_add_freeobj(objspace, page, (
VALUE)p);
2054 page->free_slots = limit;
2056 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
2065 list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2066 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
2067 if (page->freelist != NULL) {
2068 heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2069 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
2081 const char *method =
"recycle";
2083 size_pool->allocatable_pages--;
2085 page = heap_page_resurrect(objspace, size_pool);
2088 page = heap_page_allocate(objspace, size_pool);
2089 method =
"allocate";
2091 if (0) fprintf(stderr,
"heap_page_create: %s - %p, "
2092 "heap_allocated_pages: %"PRIdSIZE
", "
2093 "heap_allocated_pages: %"PRIdSIZE
", "
2094 "tomb->total_pages: %"PRIdSIZE
"\n",
2095 method, (
void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2103 GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2104 page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2105 list_add_tail(&heap->pages, &page->page_node);
2106 heap->total_pages++;
2107 heap->total_slots += page->total_slots;
2113 struct heap_page *page = heap_page_create(objspace, size_pool);
2114 heap_add_page(objspace, size_pool, heap, page);
2115 heap_add_freepage(heap, page);
2123 size_pool_allocatable_pages_set(objspace, size_pool, add);
2125 for (i = 0; i < add; i++) {
2126 heap_assign_page(objspace, size_pool, heap);
2129 GC_ASSERT(size_pool->allocatable_pages == 0);
2133 heap_extend_pages(
rb_objspace_t *objspace,
size_t free_slots,
size_t total_slots,
size_t used)
2135 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2138 if (goal_ratio == 0.0) {
2139 next_used = (size_t)(used * gc_params.growth_factor);
2145 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2147 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2148 if (f < 1.0) f = 1.1;
2150 next_used = (size_t)(f * used);
2154 "free_slots(%8"PRIuSIZE
")/total_slots(%8"PRIuSIZE
")=%1.2f,"
2155 " G(%1.2f), f(%1.2f),"
2156 " used(%8"PRIuSIZE
") => next_used(%8"PRIuSIZE
")\n",
2157 free_slots, total_slots, free_slots/(
double)total_slots,
2158 goal_ratio, f, used, next_used);
2162 if (gc_params.growth_max_slots > 0) {
2163 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2164 if (next_used > max_used) next_used = max_used;
2167 size_t extend_page_count = next_used - used;
2169 if (extend_page_count == 0) extend_page_count = 1;
2171 return extend_page_count;
2177 if (size_pool->allocatable_pages > 0) {
2178 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %"PRIdSIZE
", "
2179 "heap_pages_inc: %"PRIdSIZE
", heap->total_pages: %"PRIdSIZE
"\n",
2180 heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2182 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2183 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2185 heap_assign_page(objspace, size_pool, heap);
2194 GC_ASSERT(heap->free_pages == NULL);
2196 if (is_lazy_sweeping(objspace)) {
2197 gc_sweep_continue(objspace, size_pool, heap);
2199 else if (is_incremental_marking(objspace)) {
2200 gc_marks_continue(objspace, size_pool, heap);
2203 if (heap->free_pages == NULL &&
2204 (will_be_incremental_marking(objspace) || heap_increment(objspace, size_pool, heap) == FALSE) &&
2205 gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2215 objspace->flags.has_hook = (objspace->hook_events != 0);
2221 if (UNLIKELY(!ec->cfp))
return;
2222 const VALUE *pc = ec->cfp->pc;
2223 if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2227 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2231 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2232 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2234 #define gc_event_hook_prep(objspace, event, data, prep) do { \
2235 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2237 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2241 #define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2246 #if !__has_feature(memory_sanitizer)
2251 p->as.basic.
flags = flags;
2254 #if RACTOR_CHECK_MODE
2255 rb_ractor_setup_belonging(obj);
2258 #if RGENGC_CHECK_MODE
2259 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2261 RB_VM_LOCK_ENTER_NO_BARRIER();
2263 check_rvalue_consistency(obj);
2265 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2266 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2267 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2268 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2271 if (RVALUE_AGE(obj) != 2)
rb_bug(
"newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2274 if (RVALUE_AGE(obj) > 0)
rb_bug(
"newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2276 if (rgengc_remembered(objspace, (
VALUE)obj))
rb_bug(
"newobj: %s is remembered.", obj_info(obj));
2278 RB_VM_LOCK_LEAVE_NO_BARRIER();
2281 if (UNLIKELY(wb_protected == FALSE)) {
2282 ASSERT_vm_locking();
2283 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2287 objspace->total_allocated_objects++;
2291 objspace->profile.total_generated_normal_object_count++;
2292 #if RGENGC_PROFILE >= 2
2293 objspace->profile.generated_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
2297 objspace->profile.total_generated_shady_object_count++;
2298 #if RGENGC_PROFILE >= 2
2299 objspace->profile.generated_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
2305 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2309 gc_report(5, objspace,
"newobj: %s\n", obj_info(obj));
2311 #if RGENGC_OLD_NEWOBJ_CHECK > 0
2313 static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2315 if (!is_incremental_marking(objspace) &&
2318 if (--newobj_cnt == 0) {
2319 newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2321 gc_mark_set(objspace, obj);
2322 RVALUE_AGE_SET_OLD(objspace, obj);
2324 rb_gc_writebarrier_remember(obj);
2335 static inline void ractor_set_cache(
rb_ractor_t *cr,
struct heap_page *page,
size_t size_pool_idx);
2338 rb_gc_obj_slot_size(
VALUE obj)
2340 return GET_HEAP_PAGE(obj)->slot_size;
2343 static inline size_t
2344 size_pool_slot_size(
unsigned char pool_id)
2346 GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2348 size_t slot_size = (1 << pool_id) *
sizeof(
RVALUE);
2350 #if RGENGC_CHECK_MODE
2352 GC_ASSERT(size_pools[pool_id].slot_size == (
short)slot_size);
2359 rb_gc_size_allocatable_p(
size_t size)
2361 return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2368 RVALUE *p = cache->freelist;
2372 cache->freelist = p->as.free.next;
2373 asan_unpoison_object(obj,
true);
2374 #if RGENGC_CHECK_MODE
2376 MEMZERO((
char *)obj,
char, size_pool_slot_size(size_pool_idx));
2388 ASSERT_vm_locking();
2392 while (heap->free_pages == NULL) {
2393 heap_prepare(objspace, size_pool, heap);
2395 page = heap->free_pages;
2396 heap->free_pages = page->free_next;
2398 GC_ASSERT(page->free_slots != 0);
2399 RUBY_DEBUG_LOG(
"page:%p freelist:%p cnt:%d", (
void *)page, (
void *)page->freelist, page->free_slots);
2401 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
2409 gc_report(3, &
rb_objspace,
"ractor_set_cache: Using page %p\n", (
void *)GET_PAGE_BODY(page->start));
2413 cache->using_page = page;
2414 cache->freelist = page->freelist;
2415 page->free_slots = 0;
2416 page->freelist = NULL;
2418 asan_unpoison_object((
VALUE)cache->freelist,
false);
2420 asan_poison_object((
VALUE)cache->freelist);
2426 ASSERT_vm_locking();
2429 struct heap_page *page = heap_next_freepage(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
2431 ractor_set_cache(cr, page, size_pool_idx);
2438 p->as.values.v1 = v1;
2439 p->as.values.v2 = v2;
2440 p->as.values.v3 = v3;
2444 static inline size_t
2445 size_pool_idx_for_size(
size_t size)
2448 size_t slot_count = CEILDIV(size,
sizeof(
RVALUE));
2451 size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2452 if (size_pool_idx >= SIZE_POOL_COUNT) {
2453 rb_bug(
"size_pool_idx_for_size: allocation size too large");
2456 return size_pool_idx;
2458 GC_ASSERT(size <=
sizeof(
RVALUE));
2471 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2473 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2477 rb_bug(
"object allocation during garbage collection phase");
2480 if (ruby_gc_stressful) {
2481 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2488 while ((obj = ractor_cached_free_region(objspace, cr, size_pool_idx)) ==
Qfalse) {
2489 ractor_cache_slots(objspace, cr, size_pool_idx);
2491 GC_ASSERT(obj != 0);
2492 newobj_init(klass, flags, wb_protected, objspace, obj);
2496 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2501 NOINLINE(
static VALUE newobj_slowpath_wb_protected(
VALUE klass,
VALUE flags,
2503 NOINLINE(
static VALUE newobj_slowpath_wb_unprotected(
VALUE klass,
VALUE flags,
2509 return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2515 return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2524 RB_DEBUG_COUNTER_INC(obj_newobj);
2525 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2527 #if GC_DEBUG_STRESS_TO_CLASS
2528 if (UNLIKELY(stress_to_class)) {
2530 for (i = 0; i < cnt; ++i) {
2536 size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2538 if ((!UNLIKELY(during_gc ||
2539 ruby_gc_stressful ||
2540 gc_event_hook_available_p(objspace)) &&
2542 (obj = ractor_cached_free_region(objspace, cr, size_pool_idx)) !=
Qfalse)) {
2544 newobj_init(klass, flags, wb_protected, objspace, obj);
2547 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2549 obj = wb_protected ?
2550 newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2551 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2560 VALUE obj = newobj_of0(klass, flags, wb_protected, GET_RACTOR(), alloc_size);
2561 return newobj_fill(obj, v1, v2, v3);
2567 VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2568 return newobj_fill(obj, v1, v2, v3);
2572 rb_wb_unprotected_newobj_of(
VALUE klass,
VALUE flags,
size_t size)
2575 return newobj_of(klass, flags, 0, 0, 0, FALSE, size);
2579 rb_wb_protected_newobj_of(
VALUE klass,
VALUE flags,
size_t size)
2582 return newobj_of(klass, flags, 0, 0, 0, TRUE, size);
2589 return newobj_of_cr(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2597 return newobj_of(0,
T_NONE, 0, 0, 0, FALSE,
sizeof(
RVALUE));
2604 st_table *index_tbl = RCLASS_IV_INDEX_TBL(klass);
2609 rb_init_iv_list(obj);
2618 #define UNEXPECTED_NODE(func) \
2619 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2620 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2623 rb_imemo_name(
enum imemo_type
type)
2627 #define IMEMO_NAME(x) case imemo_##x: return #x;
2631 IMEMO_NAME(throw_data);
2638 IMEMO_NAME(parser_strterm);
2639 IMEMO_NAME(callinfo);
2640 IMEMO_NAME(callcache);
2641 IMEMO_NAME(constcache);
2652 size_t size =
sizeof(
RVALUE);
2654 return newobj_of(v0, flags, v1, v2, v3, TRUE, size);
2660 size_t size =
sizeof(
RVALUE);
2662 return newobj_of(v0, flags, v1, v2, v3, FALSE, size);
2666 rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
void *buf,
size_t cnt)
2668 return rb_imemo_tmpbuf_new((
VALUE)buf, 0, (
VALUE)cnt, 0);
2678 imemo_memsize(
VALUE obj)
2681 switch (imemo_type(obj)) {
2683 size +=
sizeof(RANY(obj)->as.imemo.ment.def);
2686 size += rb_iseq_memsize((
rb_iseq_t *)obj);
2689 size += RANY(obj)->as.imemo.env.env_size *
sizeof(
VALUE);
2692 size += RANY(obj)->as.imemo.alloc.cnt *
sizeof(
VALUE);
2695 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
2699 case imemo_throw_data:
2702 case imemo_parser_strterm:
2715 VALUE memo = rb_imemo_new(
type, v1, v2, v3, v0);
2716 fprintf(stderr,
"memo %p (type: %d) @ %s:%d\n", (
void *)memo, imemo_type(memo), file, line);
2722 rb_class_allocate_instance(
VALUE klass)
2724 st_table *index_tbl = RCLASS_IV_INDEX_TBL(klass);
2731 rb_init_iv_list(obj);
2738 rb_data_object_check(
VALUE klass)
2740 if (klass != rb_cObject && (
rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
2742 #if RUBY_VERSION_SINCE(3, 2)
2743 RBIMPL_TODO(
"enable the warning at this release");
2744 rb_warn(
"undefining the allocator of T_DATA class %"PRIsVALUE, klass);
2753 if (klass) rb_data_object_check(klass);
2768 RBIMPL_NONNULL_ARG(
type);
2769 if (klass) rb_data_object_check(klass);
2782 rb_objspace_data_type_memsize(
VALUE obj)
2787 if (ptr &&
type->function.dsize) {
2788 return type->function.dsize(ptr);
2795 rb_objspace_data_type_name(
VALUE obj)
2805 PUREFUNC(
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr);)
2809 register RVALUE *p = RANY(ptr);
2811 register size_t hi, lo, mid;
2813 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2815 if (p < heap_pages_lomem || p > heap_pages_himem)
return FALSE;
2816 RB_DEBUG_COUNTER_INC(gc_isptr_range);
2819 RB_DEBUG_COUNTER_INC(gc_isptr_align);
2823 hi = heap_allocated_pages;
2825 mid = (lo + hi) / 2;
2826 page = heap_pages_sorted[mid];
2827 if (page->start <= p) {
2828 if ((uintptr_t)p < ((uintptr_t)page->start + (page->total_slots * page->slot_size))) {
2829 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2831 if (page->flags.in_tomb) {
2835 if ((NUM_IN_PAGE(p) *
sizeof(
RVALUE)) % page->slot_size != 0)
return FALSE;
2849 static enum rb_id_table_iterator_result
2850 free_const_entry_i(
VALUE value,
void *data)
2854 return ID_TABLE_CONTINUE;
2860 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
2861 rb_id_table_free(tbl);
2865 free_iv_index_tbl_free_i(st_data_t key, st_data_t value, st_data_t data)
2867 xfree((
void *)value);
2872 iv_index_tbl_free(
struct st_table *tbl)
2874 st_foreach(tbl, free_iv_index_tbl_free_i, 0);
2884 for (
int i=0; i<ccs->len; i++) {
2887 void *ptr = asan_poisoned_object_p((
VALUE)cc);
2888 asan_unpoison_object((
VALUE)cc,
false);
2890 if (is_pointer_to_heap(objspace, (
void *)cc) &&
2891 IMEMO_TYPE_P(cc, imemo_callcache) &&
2892 cc->klass == klass) {
2897 asan_poison_object((
VALUE)cc);
2902 asan_poison_object((
VALUE)cc);
2905 vm_cc_invalidate(cc);
2915 RB_DEBUG_COUNTER_INC(ccs_free);
2916 vm_ccs_free(ccs, TRUE, NULL,
Qundef);
2925 static enum rb_id_table_iterator_result
2926 cc_table_mark_i(
ID id,
VALUE ccs_ptr,
void *data_ptr)
2930 VM_ASSERT(vm_ccs_p(ccs));
2931 VM_ASSERT(
id == ccs->cme->called_id);
2933 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2934 rb_vm_ccs_free(ccs);
2935 return ID_TABLE_DELETE;
2938 gc_mark(data->objspace, (
VALUE)ccs->cme);
2940 for (
int i=0; i<ccs->len; i++) {
2941 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
2942 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
2944 gc_mark(data->objspace, (
VALUE)ccs->entries[i].ci);
2945 gc_mark(data->objspace, (
VALUE)ccs->entries[i].cc);
2947 return ID_TABLE_CONTINUE;
2954 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2957 .objspace = objspace,
2960 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
2964 static enum rb_id_table_iterator_result
2965 cc_table_free_i(
VALUE ccs_ptr,
void *data_ptr)
2969 VM_ASSERT(vm_ccs_p(ccs));
2970 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
2971 return ID_TABLE_CONTINUE;
2977 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2981 .objspace = objspace,
2985 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
2986 rb_id_table_free(cc_tbl);
2990 static enum rb_id_table_iterator_result
2991 cvar_table_free_i(
VALUE value,
void * ctx)
2993 xfree((
void *) value);
2994 return ID_TABLE_CONTINUE;
2998 rb_cc_table_free(
VALUE klass)
3006 struct RZombie *zombie = RZOMBIE(obj);
3008 zombie->dfree = dfree;
3009 zombie->data = data;
3010 zombie->next = heap_pages_deferred_final;
3011 heap_pages_deferred_final = (
VALUE)zombie;
3013 struct heap_page *page = GET_HEAP_PAGE(obj);
3014 page->final_slots++;
3015 heap_pages_final_slots++;
3021 rb_io_t *fptr = RANY(obj)->as.file.fptr;
3022 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3028 ASSERT_vm_locking();
3029 st_data_t o = (st_data_t)obj,
id;
3034 if (st_delete(objspace->obj_to_id_tbl, &o, &
id)) {
3036 st_delete(objspace->id_to_obj_tbl, &
id, NULL);
3039 rb_bug(
"Object ID seen, but not in mapping table: %s\n", obj_info(obj));
3046 RB_DEBUG_COUNTER_INC(obj_free);
3056 rb_bug(
"obj_free() called for broken object");
3068 obj_free_object_id(objspace, obj);
3071 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3073 #if RGENGC_CHECK_MODE
3074 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3075 CHECK(RVALUE_WB_UNPROTECTED);
3076 CHECK(RVALUE_MARKED);
3077 CHECK(RVALUE_MARKING);
3078 CHECK(RVALUE_UNCOLLECTIBLE);
3084 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3085 RB_DEBUG_COUNTER_INC(obj_obj_embed);
3087 else if (ROBJ_TRANSIENT_P(obj)) {
3088 RB_DEBUG_COUNTER_INC(obj_obj_transient);
3091 xfree(RANY(obj)->as.object.as.heap.ivptr);
3092 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3097 rb_id_table_free(RCLASS_M_TBL(obj));
3098 cc_table_free(objspace, obj, FALSE);
3099 if (RCLASS_IV_TBL(obj)) {
3100 st_free_table(RCLASS_IV_TBL(obj));
3102 if (RCLASS_CONST_TBL(obj)) {
3103 rb_free_const_table(RCLASS_CONST_TBL(obj));
3105 if (RCLASS_IV_INDEX_TBL(obj)) {
3106 iv_index_tbl_free(RCLASS_IV_INDEX_TBL(obj));
3108 if (RCLASS_CVC_TBL(obj)) {
3109 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3110 rb_id_table_free(RCLASS_CVC_TBL(obj));
3112 rb_class_remove_subclass_head(obj);
3113 rb_class_remove_from_module_subclasses(obj);
3114 rb_class_remove_from_super_subclasses(obj);
3116 if (RCLASS_EXT(obj))
3117 xfree(RCLASS_EXT(obj));
3130 #if USE_DEBUG_COUNTER
3133 RB_DEBUG_COUNTER_INC(obj_hash_empty);
3136 RB_DEBUG_COUNTER_INC(obj_hash_1);
3139 RB_DEBUG_COUNTER_INC(obj_hash_2);
3142 RB_DEBUG_COUNTER_INC(obj_hash_3);
3145 RB_DEBUG_COUNTER_INC(obj_hash_4);
3151 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3155 RB_DEBUG_COUNTER_INC(obj_hash_g8);
3158 if (RHASH_AR_TABLE_P(obj)) {
3159 if (RHASH_AR_TABLE(obj) == NULL) {
3160 RB_DEBUG_COUNTER_INC(obj_hash_null);
3163 RB_DEBUG_COUNTER_INC(obj_hash_ar);
3167 RB_DEBUG_COUNTER_INC(obj_hash_st);
3174 if (RHASH_TRANSIENT_P(obj)) {
3175 RB_DEBUG_COUNTER_INC(obj_hash_transient);
3183 GC_ASSERT(RHASH_ST_TABLE_P(obj));
3184 st_free_table(RHASH(obj)->as.st);
3188 if (RANY(obj)->as.regexp.ptr) {
3189 onig_free(RANY(obj)->as.regexp.ptr);
3190 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3195 int free_immediately = FALSE;
3196 void (*dfree)(
void *);
3200 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3201 dfree = RANY(obj)->as.typeddata.type->function.dfree;
3202 if (0 && free_immediately == 0) {
3204 fprintf(stderr,
"not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
3208 dfree = RANY(obj)->as.data.dfree;
3214 RB_DEBUG_COUNTER_INC(obj_data_xfree);
3216 else if (free_immediately) {
3218 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3221 make_zombie(objspace, obj, dfree, data);
3222 RB_DEBUG_COUNTER_INC(obj_data_zombie);
3227 RB_DEBUG_COUNTER_INC(obj_data_empty);
3232 if (RANY(obj)->as.match.rmatch) {
3233 struct rmatch *rm = RANY(obj)->as.match.rmatch;
3234 #if USE_DEBUG_COUNTER
3235 if (rm->
regs.num_regs >= 8) {
3236 RB_DEBUG_COUNTER_INC(obj_match_ge8);
3238 else if (rm->
regs.num_regs >= 4) {
3239 RB_DEBUG_COUNTER_INC(obj_match_ge4);
3241 else if (rm->
regs.num_regs >= 1) {
3242 RB_DEBUG_COUNTER_INC(obj_match_under4);
3245 onig_region_free(&rm->
regs, 0);
3250 RB_DEBUG_COUNTER_INC(obj_match_ptr);
3254 if (RANY(obj)->as.file.fptr) {
3255 make_io_zombie(objspace, obj);
3256 RB_DEBUG_COUNTER_INC(obj_file_ptr);
3261 RB_DEBUG_COUNTER_INC(obj_rational);
3264 RB_DEBUG_COUNTER_INC(obj_complex);
3270 if (RICLASS_OWNS_M_TBL_P(obj)) {
3272 rb_id_table_free(RCLASS_M_TBL(obj));
3274 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3275 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3277 rb_class_remove_subclass_head(obj);
3278 cc_table_free(objspace, obj, FALSE);
3279 rb_class_remove_from_module_subclasses(obj);
3280 rb_class_remove_from_super_subclasses(obj);
3282 xfree(RCLASS_EXT(obj));
3285 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3289 RB_DEBUG_COUNTER_INC(obj_float);
3293 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3294 xfree(BIGNUM_DIGITS(obj));
3295 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3298 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3303 UNEXPECTED_NODE(obj_free);
3307 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3308 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3309 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3311 else if (RSTRUCT_TRANSIENT_P(obj)) {
3312 RB_DEBUG_COUNTER_INC(obj_struct_transient);
3315 xfree((
void *)RANY(obj)->as.rstruct.as.heap.ptr);
3316 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3322 rb_gc_free_dsymbol(obj);
3323 RB_DEBUG_COUNTER_INC(obj_symbol);
3328 switch (imemo_type(obj)) {
3330 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3331 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3334 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3335 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3338 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3340 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3343 xfree(RANY(obj)->as.imemo.alloc.ptr);
3344 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3347 rb_ast_free(&RANY(obj)->as.imemo.ast);
3348 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3351 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3354 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3356 case imemo_throw_data:
3357 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3360 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3363 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3365 case imemo_parser_strterm:
3366 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3368 case imemo_callinfo:
3369 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3371 case imemo_callcache:
3372 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3374 case imemo_constcache:
3375 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3381 rb_bug(
"gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3386 make_zombie(objspace, obj, 0, 0);
3395 #define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3396 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3399 object_id_cmp(st_data_t x, st_data_t y)
3401 if (RB_BIGNUM_TYPE_P(x)) {
3410 object_id_hash(st_data_t n)
3412 if (RB_BIGNUM_TYPE_P(n)) {
3416 return st_numhash(n);
3419 static const struct st_hash_type object_id_hash_type = {
3429 #if defined(HAVE_MMAP) && !HAVE_CONST_PAGE_SIZE && !defined(PAGE_MAX_SIZE)
3433 use_mmap_aligned_alloc = PAGE_SIZE <= HEAP_PAGE_SIZE;
3434 # elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
3436 use_mmap_aligned_alloc = sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE;
3439 use_mmap_aligned_alloc = FALSE;
3443 objspace->next_object_id =
INT2FIX(OBJ_ID_INITIAL);
3444 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3445 objspace->obj_to_id_tbl = st_init_numtable();
3447 #if RGENGC_ESTIMATE_OLDMALLOC
3448 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3451 heap_add_pages(objspace, &size_pools[0], SIZE_POOL_EDEN_HEAP(&size_pools[0]), gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
3454 for (
int i = 1; i < SIZE_POOL_COUNT; i++) {
3456 int multiple = size_pool->slot_size /
sizeof(
RVALUE);
3457 size_pool->allocatable_pages = gc_params.heap_init_slots * multiple / HEAP_PAGE_OBJ_LIMIT;
3459 heap_pages_expand_sorted(objspace);
3461 init_mark_stack(&objspace->mark_stack);
3463 objspace->profile.invoke_time = getrusage_time();
3464 finalizer_table = st_init_numtable();
3468 Init_gc_stress(
void)
3472 gc_stress_set(objspace, ruby_initial_gc_stress);
3475 typedef int each_obj_callback(
void *,
void *,
size_t,
void *);
3477 static void objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected);
3478 static void objspace_reachable_objects_from_root(
rb_objspace_t *,
void (func)(
const char *,
VALUE,
void *),
void *);
3482 bool reenable_incremental;
3484 each_obj_callback *callback;
3487 struct heap_page **pages[SIZE_POOL_COUNT];
3488 size_t pages_counts[SIZE_POOL_COUNT];
3492 objspace_each_objects_ensure(
VALUE arg)
3498 if (data->reenable_incremental) {
3499 objspace->flags.dont_incremental = FALSE;
3502 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3503 struct heap_page **pages = data->pages[i];
3515 objspace_each_objects_try(
VALUE arg)
3521 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3523 size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages,
sizeof(
struct heap_page *),
rb_eRuntimeError);
3525 struct heap_page **pages = malloc(size);
3534 size_t pages_count = 0;
3535 list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3536 pages[pages_count] = page;
3539 data->pages[i] = pages;
3540 data->pages_counts[i] = pages_count;
3541 GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3544 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3546 size_t pages_count = data->pages_counts[i];
3547 struct heap_page **pages = data->pages[i];
3549 struct heap_page *page = list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages,
struct heap_page, page_node);
3550 for (
size_t i = 0; i < pages_count; i++) {
3553 if (page == NULL)
break;
3557 if (pages[i] != page)
continue;
3559 uintptr_t pstart = (uintptr_t)page->start;
3560 uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3562 if ((*data->callback)((
void *)pstart, (
void *)pend, size_pool->slot_size, data->data)) {
3566 page = list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
3610 rb_objspace_each_objects(each_obj_callback *callback,
void *data)
3612 objspace_each_objects(&
rb_objspace, callback, data, TRUE);
3616 objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected)
3619 bool reenable_incremental = FALSE;
3621 reenable_incremental = !objspace->flags.dont_incremental;
3624 objspace->flags.dont_incremental = TRUE;
3628 .objspace = objspace,
3629 .reenable_incremental = reenable_incremental,
3631 .callback = callback,
3635 .pages_counts = {0},
3642 rb_objspace_each_objects_without_setup(each_obj_callback *callback,
void *data)
3644 objspace_each_objects(&
rb_objspace, callback, data, FALSE);
3653 internal_object_p(
VALUE obj)
3656 void *ptr = __asan_region_is_poisoned(p,
SIZEOF_VALUE);
3657 asan_unpoison_object(obj,
false);
3658 bool used_p = p->as.basic.
flags;
3663 UNEXPECTED_NODE(internal_object_p);
3672 if (!p->as.basic.
klass)
break;
3674 return rb_singleton_class_internal_p(obj);
3678 if (!p->as.basic.
klass)
break;
3682 if (ptr || ! used_p) {
3683 asan_poison_object(obj);
3689 rb_objspace_internal_object_p(
VALUE obj)
3691 return internal_object_p(obj);
3695 os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
3700 for (; v != (
VALUE)vend; v += stride) {
3701 if (!internal_object_p(v)) {
3721 rb_objspace_each_objects(os_obj_of_i, &oes);
3768 return os_obj_of(of);
3789 st_data_t data = obj;
3791 st_delete(finalizer_table, &data, 0);
3797 should_be_callable(
VALUE block)
3806 should_be_finalizable(
VALUE obj)
3878 define_final(
int argc,
VALUE *argv,
VALUE os)
3883 should_be_finalizable(obj);
3888 should_be_callable(block);
3891 if (rb_callable_receiver(block) == obj) {
3892 rb_warn(
"finalizer references object to be finalized");
3895 return define_final0(obj, block);
3907 if (st_lookup(finalizer_table, obj, &data)) {
3908 table = (
VALUE)data;
3915 for (i = 0; i < len; i++) {
3928 RBASIC_CLEAR_CLASS(table);
3929 st_add_direct(finalizer_table, obj, table);
3940 should_be_finalizable(obj);
3941 should_be_callable(block);
3942 return define_final0(obj, block);
3953 if (st_lookup(finalizer_table, obj, &data)) {
3954 table = (
VALUE)data;
3955 st_insert(finalizer_table, dest, table);
3970 VALUE errinfo = ec->errinfo;
3971 rb_warn(
"Exception in finalizer %+"PRIsVALUE,
final);
3972 rb_ec_error_print(ec, errinfo);
3980 enum ruby_tag_type state;
3989 #define RESTORE_FINALIZER() (\
3990 ec->cfp = saved.cfp, \
3991 ec->errinfo = saved.errinfo)
3993 saved.errinfo = ec->errinfo;
3995 saved.cfp = ec->cfp;
4000 state = EC_EXEC_TAG();
4001 if (state != TAG_NONE) {
4003 warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final,
Qundef));
4005 for (i = saved.finished;
4007 saved.finished = ++i) {
4008 run_single_final(saved.final =
RARRAY_AREF(table, i), saved.objid);
4011 #undef RESTORE_FINALIZER
4017 st_data_t key, table;
4019 if (RZOMBIE(zombie)->dfree) {
4020 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4023 key = (st_data_t)zombie;
4024 if (st_delete(finalizer_table, &key, &table)) {
4025 run_finalizer(objspace, zombie, (
VALUE)table);
4035 asan_unpoison_object(zombie,
false);
4036 next_zombie = RZOMBIE(zombie)->next;
4037 page = GET_HEAP_PAGE(zombie);
4039 run_final(objspace, zombie);
4045 obj_free_object_id(objspace, zombie);
4048 GC_ASSERT(heap_pages_final_slots > 0);
4049 GC_ASSERT(page->final_slots > 0);
4051 heap_pages_final_slots--;
4052 page->final_slots--;
4054 heap_page_add_freeobj(objspace, page, zombie);
4055 objspace->profile.total_freed_objects++;
4059 zombie = next_zombie;
4068 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4070 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4071 finalize_list(objspace, zombie);
4074 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4078 gc_finalize_deferred(
void *dmy)
4081 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4083 finalize_deferred(objspace);
4084 ATOMIC_SET(finalizing, 0);
4091 rb_bug(
"gc_finalize_deferred_register: can't register finalizer.");
4102 force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4113 bool rb_obj_is_main_ractor(
VALUE gv);
4120 #if RGENGC_CHECK_MODE >= 2
4121 gc_verify_internal_consistency(objspace);
4125 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4128 finalize_deferred(objspace);
4129 GC_ASSERT(heap_pages_deferred_final == 0);
4133 objspace->flags.dont_incremental = 1;
4136 while (finalizer_table->num_entries) {
4138 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4141 st_data_t obj = (st_data_t)curr->obj;
4142 run_finalizer(objspace, curr->obj, curr->table);
4143 st_delete(finalizer_table, &obj, 0);
4153 unsigned int lock_lev;
4154 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4157 for (i = 0; i < heap_allocated_pages; i++) {
4158 struct heap_page *page = heap_pages_sorted[i];
4159 short stride = page->slot_size;
4161 uintptr_t p = (uintptr_t)page->start;
4162 uintptr_t pend = p + page->total_slots * stride;
4163 for (; p < pend; p += stride) {
4165 void *poisoned = asan_poisoned_object_p(vp);
4166 asan_unpoison_object(vp,
false);
4169 if (!
DATA_PTR(p) || !RANY(p)->as.data.dfree)
break;
4170 if (rb_obj_is_thread(vp))
break;
4171 if (rb_obj_is_mutex(vp))
break;
4173 if (rb_obj_is_main_ractor(vp))
break;
4175 RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
4177 RANY(p)->as.free.flags = 0;
4181 else if (RANY(p)->as.data.dfree) {
4182 make_zombie(objspace, vp, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
4186 if (RANY(p)->as.file.fptr) {
4187 make_io_zombie(objspace, vp);
4195 asan_poison_object(vp);
4200 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4202 if (heap_pages_deferred_final) {
4203 finalize_list(objspace, heap_pages_deferred_final);
4206 st_free_table(finalizer_table);
4207 finalizer_table = 0;
4208 ATOMIC_SET(finalizing, 0);
4214 struct heap_page *page = GET_HEAP_PAGE(ptr);
4215 return page->flags.before_sweep ? FALSE : TRUE;
4222 if (!is_lazy_sweeping(objspace) ||
4223 is_swept_object(objspace, ptr) ||
4224 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4245 if (!is_garbage_object(objspace, ptr)) {
4257 check_rvalue_consistency(obj);
4262 rb_objspace_markable_object_p(
VALUE obj)
4265 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
4269 rb_objspace_garbage_object_p(
VALUE obj)
4272 return is_garbage_object(objspace, obj);
4279 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4305 #if SIZEOF_LONG == SIZEOF_VOIDP
4306 #define NUM2PTR(x) NUM2ULONG(x)
4307 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4308 #define NUM2PTR(x) NUM2ULL(x)
4316 if (
FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4317 ptr = NUM2PTR(objid);
4324 ptr = obj_id_to_ref(objid);
4325 if ((ptr %
sizeof(
RVALUE)) == (4 << 2)) {
4334 if ((orig = id2ref_obj_tbl(objspace, objid)) !=
Qundef &&
4335 is_live_object(objspace, orig)) {
4341 rb_raise(
rb_eRangeError,
"%+"PRIsVALUE
" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4345 if (rb_int_ge(objid, objspace->next_object_id)) {
4356 return id2ref(objid);
4366 #if SIZEOF_LONG == SIZEOF_VOIDP
4376 return get_heap_object_id(obj);
4380 cached_object_id(
VALUE obj)
4386 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &
id)) {
4392 id = objspace->next_object_id;
4393 objspace->next_object_id = rb_int_plus(
id,
INT2FIX(OBJ_ID_INCREMENT));
4395 VALUE already_disabled = rb_gc_disable_no_rest();
4396 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)
id);
4397 st_insert(objspace->id_to_obj_tbl, (st_data_t)
id, (st_data_t)obj);
4398 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
4407 nonspecial_obj_id_(
VALUE obj)
4409 return nonspecial_obj_id(obj);
4416 return rb_find_object_id(obj, nonspecial_obj_id_);
4478 return rb_find_object_id(obj, cached_object_id);
4481 static enum rb_id_table_iterator_result
4482 cc_table_memsize_i(
VALUE ccs_ptr,
void *data_ptr)
4484 size_t *total_size = data_ptr;
4486 *total_size +=
sizeof(*ccs);
4487 *total_size +=
sizeof(ccs->entries[0]) * ccs->capa;
4488 return ID_TABLE_CONTINUE;
4494 size_t total = rb_id_table_memsize(cc_table);
4495 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
4500 obj_memsize_of(
VALUE obj,
int use_all_types)
4509 size += rb_generic_ivar_memsize(obj);
4514 if (!(
RBASIC(obj)->flags & ROBJECT_EMBED)) {
4520 if (RCLASS_EXT(obj)) {
4521 if (RCLASS_M_TBL(obj)) {
4522 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4524 if (RCLASS_IV_TBL(obj)) {
4525 size += st_memsize(RCLASS_IV_TBL(obj));
4527 if (RCLASS_CVC_TBL(obj)) {
4528 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
4530 if (RCLASS_IV_INDEX_TBL(obj)) {
4532 size += st_memsize(RCLASS_IV_INDEX_TBL(obj));
4534 if (RCLASS_EXT(obj)->iv_tbl) {
4535 size += st_memsize(RCLASS_EXT(obj)->iv_tbl);
4537 if (RCLASS_EXT(obj)->const_tbl) {
4538 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
4540 if (RCLASS_CC_TBL(obj)) {
4541 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4549 if (RICLASS_OWNS_M_TBL_P(obj)) {
4550 if (RCLASS_M_TBL(obj)) {
4551 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4554 if (RCLASS_EXT(obj) && RCLASS_CC_TBL(obj)) {
4555 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4559 size += rb_str_memsize(obj);
4562 size += rb_ary_memsize(obj);
4565 if (RHASH_AR_TABLE_P(obj)) {
4566 if (RHASH_AR_TABLE(obj) != NULL) {
4567 size_t rb_hash_ar_table_size(
void);
4568 size += rb_hash_ar_table_size();
4572 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
4573 size += st_memsize(RHASH_ST_TABLE(obj));
4582 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
4587 size += onig_region_memsize(&rm->
regs);
4589 size +=
sizeof(
struct rmatch);
4593 if (
RFILE(obj)->fptr) {
4594 size += rb_io_memsize(
RFILE(obj)->fptr);
4601 size += imemo_memsize(obj);
4609 if (!(
RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
4610 size += BIGNUM_LEN(obj) *
sizeof(BDIGIT);
4615 UNEXPECTED_NODE(obj_memsize_of);
4619 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
4620 RSTRUCT(obj)->as.heap.ptr) {
4630 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
4634 return size + GET_HEAP_PAGE(obj)->slot_size;
4638 rb_obj_memsize_of(
VALUE obj)
4640 return obj_memsize_of(obj, TRUE);
4644 set_zero(st_data_t key, st_data_t val, st_data_t arg)
4653 type_sym(
size_t type)
4656 #define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4725 count_objects(
int argc,
VALUE *argv,
VALUE os)
4740 for (i = 0; i <=
T_MASK; i++) {
4744 for (i = 0; i < heap_allocated_pages; i++) {
4745 struct heap_page *page = heap_pages_sorted[i];
4746 short stride = page->slot_size;
4748 uintptr_t p = (uintptr_t)page->start;
4749 uintptr_t pend = p + page->total_slots * stride;
4750 for (;p < pend; p += stride) {
4752 GC_ASSERT((NUM_IN_PAGE(vp) *
sizeof(
RVALUE)) % page->slot_size == 0);
4754 void *poisoned = asan_poisoned_object_p(vp);
4755 asan_unpoison_object(vp,
false);
4756 if (RANY(p)->as.basic.flags) {
4764 asan_poison_object(vp);
4767 total += page->total_slots;
4774 rb_hash_stlike_foreach(hash, set_zero, hash);
4779 for (i = 0; i <=
T_MASK; i++) {
4797 size_t total_slots = 0;
4798 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
4800 total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
4801 total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
4809 return (objspace->total_allocated_objects - objspace->profile.total_freed_objects) - heap_pages_final_slots;
4815 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
4819 gc_setup_mark_bits(
struct heap_page *page)
4822 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
4834 if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_NOACCESS, &old_protect)) {
4836 if (mprotect(body, HEAP_PAGE_SIZE, PROT_NONE)) {
4838 rb_bug(
"Couldn't protect page %p, errno: %s", (
void *)body, strerror(errno));
4841 gc_report(5, objspace,
"Protecting page in move %p\n", (
void *)body);
4851 if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_READWRITE, &old_protect)) {
4853 if (mprotect(body, HEAP_PAGE_SIZE, PROT_READ | PROT_WRITE)) {
4855 rb_bug(
"Couldn't unprotect page %p, errno: %s", (
void *)body, strerror(errno));
4858 gc_report(5, objspace,
"Unprotecting page in move %p\n", (
void *)body);
4871 if (gc_is_moveable_obj(objspace, (
VALUE)p)) {
4874 objspace->rcompactor.total_moved++;
4876 bool from_freelist =
false;
4879 from_freelist =
true;
4882 gc_move(objspace, (
VALUE)p, dest, page->slot_size);
4883 gc_pin(objspace, (
VALUE)p);
4884 heap->compact_cursor_index = (
RVALUE *)p;
4885 if (from_freelist) {
4903 struct heap_page * cursor = heap->compact_cursor;
4905 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
4914 bits_t *mark_bits = cursor->mark_bits;
4915 bits_t *pin_bits = cursor->pinned_bits;
4918 if (heap->compact_cursor_index) {
4919 index = BITMAP_INDEX(heap->compact_cursor_index);
4920 p = heap->compact_cursor_index;
4921 GC_ASSERT(cursor == GET_HEAP_PAGE(p));
4928 bits_t bits = mark_bits[index] & ~pin_bits[index];
4930 bits >>= NUM_IN_PAGE(p);
4931 if (try_move_plane(objspace, heap, sweep_page, (uintptr_t)p, bits, dest))
return 1;
4934 p = cursor->start + (BITS_BITLENGTH - NUM_IN_PAGE(cursor->start));
4937 p = cursor->start + (BITS_BITLENGTH - NUM_IN_PAGE(cursor->start)) + (BITS_BITLENGTH * index);
4942 for (
size_t i = index + 1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4943 bits_t bits = mark_bits[i] & ~pin_bits[i];
4944 if (try_move_plane(objspace, heap, sweep_page, (uintptr_t)p, bits, dest))
return 1;
4945 p += BITS_BITLENGTH;
4954 next = list_prev(&heap->pages, cursor, page_node);
4957 lock_page_body(objspace, GET_PAGE_BODY(cursor->start));
4959 heap->compact_cursor = next;
4960 heap->compact_cursor_index = 0;
4966 if (next == sweep_page) {
4977 struct heap_page *cursor = heap->compact_cursor;
4980 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
4981 cursor = list_next(&heap->pages, cursor, page_node);
4989 read_barrier_handler(uintptr_t address)
4994 address -= address %
sizeof(
RVALUE);
4996 obj = (
VALUE)address;
5000 unlock_page_body(objspace, GET_PAGE_BODY(obj));
5002 objspace->profile.read_barrier_faults++;
5004 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5010 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5011 typedef void (*signal_handler)(int);
5012 static signal_handler old_sigsegv_handler;
5015 read_barrier_signal(EXCEPTION_POINTERS * info)
5018 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5023 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5024 return EXCEPTION_CONTINUE_EXECUTION;
5027 return EXCEPTION_CONTINUE_SEARCH;
5032 uninstall_handlers(
void)
5034 signal(SIGSEGV, old_sigsegv_handler);
5035 SetUnhandledExceptionFilter(old_handler);
5039 install_handlers(
void)
5042 old_sigsegv_handler = signal(SIGSEGV, NULL);
5045 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5048 static struct sigaction old_sigbus_handler;
5049 static struct sigaction old_sigsegv_handler;
5052 read_barrier_signal(
int sig, siginfo_t * info,
void * data)
5055 struct sigaction prev_sigbus, prev_sigsegv;
5056 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5057 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5060 sigset_t set, prev_set;
5062 sigaddset(&set, SIGBUS);
5063 sigaddset(&set, SIGSEGV);
5064 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5067 read_barrier_handler((uintptr_t)info->si_addr);
5070 sigaction(SIGBUS, &prev_sigbus, NULL);
5071 sigaction(SIGSEGV, &prev_sigsegv, NULL);
5072 sigprocmask(SIG_SETMASK, &prev_set, NULL);
5076 uninstall_handlers(
void)
5078 sigaction(SIGBUS, &old_sigbus_handler, NULL);
5079 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5083 install_handlers(
void)
5085 struct sigaction action;
5086 memset(&action, 0,
sizeof(
struct sigaction));
5087 sigemptyset(&action.sa_mask);
5088 action.sa_sigaction = read_barrier_signal;
5089 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5091 sigaction(SIGBUS, &action, &old_sigbus_handler);
5092 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5097 revert_stack_objects(
VALUE stack_obj,
void *ctx)
5105 invalidate_moved_page(objspace, GET_HEAP_PAGE(stack_obj));
5112 if (is_pointer_to_heap(objspace, (
void *)v)) {
5117 invalidate_moved_page(objspace, GET_HEAP_PAGE(v));
5128 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5129 rb_vm_each_stack_value(vm, revert_stack_objects, (
void*)objspace);
5130 each_machine_stack_value(ec, revert_machine_stack_references);
5136 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5138 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5139 gc_unprotect_pages(objspace, heap);
5142 uninstall_handlers();
5149 check_stack_for_moved(objspace);
5151 gc_update_references(objspace);
5152 objspace->profile.compact_count++;
5154 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5156 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5157 heap->compact_cursor = NULL;
5158 heap->compact_cursor_index = 0;
5161 if (gc_prof_enabled(objspace)) {
5163 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5165 objspace->flags.during_compacting = FALSE;
5178 struct heap_page * sweep_page = ctx->page;
5181 short slot_size = sweep_page->slot_size;
5182 short slot_bits = slot_size /
sizeof(
RVALUE);
5188 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(dest), dest));
5189 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
5191 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(dest), dest);
5193 if (*finished_compacting) {
5200 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)dest,
sizeof(
RVALUE));
5201 heap_page_add_freeobj(objspace, sweep_page, dest);
5207 if (!try_move(objspace, heap, sweep_page, dest)) {
5208 *finished_compacting =
true;
5209 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)p,
sizeof(
RVALUE));
5210 gc_report(5, objspace,
"Quit compacting, couldn't find an object to move\n");
5217 heap_page_add_freeobj(objspace, sweep_page, dest);
5218 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(dest));
5227 bitset >>= slot_bits;
5236 bool finished_compacting =
false;
5237 bits_t *mark_bits, *pin_bits;
5241 mark_bits = sweep_page->mark_bits;
5242 pin_bits = sweep_page->pinned_bits;
5244 p = (uintptr_t)sweep_page->start;
5246 struct heap_page * cursor = heap->compact_cursor;
5248 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5251 bitset = pin_bits[0] & ~mark_bits[0];
5252 bitset >>= NUM_IN_PAGE(p);
5253 gc_fill_swept_plane(objspace, heap, (uintptr_t)p, bitset, &finished_compacting, ctx);
5254 p += ((BITS_BITLENGTH - NUM_IN_PAGE(p)) *
sizeof(
RVALUE));
5256 for (
int i = 1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5258 bitset = pin_bits[i] & ~mark_bits[i];
5259 gc_fill_swept_plane(objspace, heap, (uintptr_t)p, bitset, &finished_compacting, ctx);
5260 p += ((BITS_BITLENGTH) *
sizeof(
RVALUE));
5263 lock_page_body(objspace, GET_PAGE_BODY(heap->compact_cursor->start));
5265 return finished_compacting;
5271 struct heap_page * sweep_page = ctx->page;
5272 short slot_size = sweep_page->slot_size;
5273 short slot_bits = slot_size /
sizeof(
RVALUE);
5274 GC_ASSERT(slot_bits > 0);
5278 GC_ASSERT(vp %
sizeof(
RVALUE) == 0);
5280 asan_unpoison_object(vp,
false);
5284 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
5285 #if RGENGC_CHECK_MODE
5286 if (!is_full_marking(objspace)) {
5287 if (RVALUE_OLD_P(vp))
rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
5288 if (rgengc_remembered_sweep(objspace, vp))
rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
5291 if (obj_free(objspace, vp)) {
5292 if (heap->compact_cursor) {
5294 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp), vp);
5297 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)p,
sizeof(
RVALUE));
5298 heap_page_add_freeobj(objspace, sweep_page, vp);
5299 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5309 if (objspace->flags.during_compacting) {
5315 rb_bug(
"T_MOVED shouldn't be seen until compaction is finished\n");
5317 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5318 if (
FL_TEST(vp, FL_FROM_FREELIST)) {
5324 heap_page_add_freeobj(objspace, sweep_page, vp);
5330 if (heap->compact_cursor) {
5332 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp), vp);
5341 bitset >>= slot_bits;
5348 struct heap_page *sweep_page = ctx->page;
5353 bits_t *bits, bitset;
5355 gc_report(2, objspace,
"page_sweep: start.\n");
5357 if (heap->compact_cursor) {
5358 if (sweep_page == heap->compact_cursor) {
5360 gc_report(5, objspace,
"Quit compacting, mark and compact cursor met\n");
5361 gc_compact_finish(objspace, size_pool, heap);
5365 asan_unpoison_memory_region(&sweep_page->freelist,
sizeof(
RVALUE*),
false);
5366 sweep_page->freelist = NULL;
5367 asan_poison_memory_region(&sweep_page->freelist,
sizeof(
RVALUE*));
5371 sweep_page->flags.before_sweep = FALSE;
5372 sweep_page->free_slots = 0;
5374 p = sweep_page->start;
5375 bits = sweep_page->mark_bits;
5377 int page_rvalue_count = sweep_page->total_slots * (size_pool->slot_size /
sizeof(
RVALUE));
5378 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5379 if (out_of_range_bits != 0) {
5380 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5385 bitset >>= NUM_IN_PAGE(p);
5387 gc_sweep_plane(objspace, heap, (uintptr_t)p, bitset, ctx);
5389 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
5391 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5394 gc_sweep_plane(objspace, heap, (uintptr_t)p, bitset, ctx);
5396 p += BITS_BITLENGTH;
5399 if (heap->compact_cursor) {
5400 if (gc_fill_swept_page(objspace, heap, sweep_page, ctx)) {
5401 gc_compact_finish(objspace, size_pool, heap);
5405 if (!heap->compact_cursor) {
5406 gc_setup_mark_bits(sweep_page);
5409 #if GC_PROFILE_MORE_DETAIL
5410 if (gc_prof_enabled(objspace)) {
5412 record->removing_objects += ctx->final_slots + ctx->freed_slots;
5413 record->empty_objects += ctx->empty_slots;
5416 if (0) fprintf(stderr,
"gc_sweep_page(%"PRIdSIZE
"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5418 sweep_page->total_slots,
5419 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5421 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5422 objspace->profile.total_freed_objects += ctx->freed_slots;
5424 if (heap_pages_deferred_final && !finalizing) {
5427 gc_finalize_deferred_register(objspace);
5431 #if RGENGC_CHECK_MODE
5432 short freelist_len = 0;
5433 RVALUE *ptr = sweep_page->freelist;
5436 ptr = ptr->as.free.next;
5438 if (freelist_len != sweep_page->free_slots) {
5439 rb_bug(
"inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5443 gc_report(2, objspace,
"page_sweep: end.\n");
5451 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5452 if (!heap->free_pages && heap_increment(objspace, size_pool, heap) == FALSE) {
5454 size_pool_allocatable_pages_set(objspace, size_pool, 1);
5455 if (!heap_increment(objspace, size_pool, heap)) {
5464 gc_mode_name(
enum gc_mode mode)
5467 case gc_mode_none:
return "none";
5468 case gc_mode_marking:
return "marking";
5469 case gc_mode_sweeping:
return "sweeping";
5470 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
5475 gc_mode_transition(
rb_objspace_t *objspace,
enum gc_mode mode)
5477 #if RGENGC_CHECK_MODE
5478 enum gc_mode prev_mode = gc_mode(objspace);
5479 switch (prev_mode) {
5480 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking);
break;
5481 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping);
break;
5482 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none);
break;
5485 if (0) fprintf(stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5486 gc_mode_set(objspace, mode);
5493 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
5494 if (page->freelist) {
5495 RVALUE *p = page->freelist;
5496 asan_unpoison_object((
VALUE)p,
false);
5497 while (p->as.free.next) {
5499 p = p->as.free.next;
5500 asan_poison_object((
VALUE)prev);
5501 asan_unpoison_object((
VALUE)p,
false);
5503 p->as.free.next = freelist;
5504 asan_poison_object((
VALUE)p);
5507 page->freelist = freelist;
5509 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
5516 heap->sweeping_page = list_top(&heap->pages,
struct heap_page, page_node);
5517 heap->free_pages = NULL;
5518 #if GC_ENABLE_INCREMENTAL_MARK
5519 heap->pooled_pages = NULL;
5523 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5524 __attribute__((noinline))
5529 gc_mode_transition(objspace, gc_mode_sweeping);
5531 #if GC_ENABLE_INCREMENTAL_MARK
5532 objspace->rincgc.pooled_slots = 0;
5535 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5538 gc_sweep_start_heap(objspace, SIZE_POOL_EDEN_HEAP(size_pool));
5542 list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5543 rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5551 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5552 size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5553 size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5554 size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5556 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5558 if (swept_slots < min_free_slots) {
5559 bool grow_heap = is_full_marking(objspace);
5561 if (!is_full_marking(objspace)) {
5563 bool is_growth_heap = size_pool->empty_slots == 0 ||
5564 size_pool->freed_slots > size_pool->empty_slots;
5566 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5569 else if (is_growth_heap) {
5570 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5571 size_pool->force_major_gc_count++;
5576 size_t extend_page_count = heap_extend_pages(objspace, swept_slots, total_slots, total_pages);
5578 if (extend_page_count > size_pool->allocatable_pages) {
5579 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5582 heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5591 gc_report(1, objspace,
"gc_sweep_finish\n");
5593 gc_prof_set_heap_info(objspace);
5594 heap_pages_free_unused_pages(objspace);
5596 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5600 size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5601 if (size_pool->allocatable_pages < tomb_pages) {
5602 size_pool->allocatable_pages = tomb_pages;
5606 size_pool->freed_slots = 0;
5607 size_pool->empty_slots = 0;
5609 #if GC_ENABLE_INCREMENTAL_MARK
5610 if (!will_be_incremental_marking(objspace)) {
5611 rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
5612 struct heap_page *end_page = eden_heap->free_pages;
5614 while (end_page->free_next) end_page = end_page->free_next;
5615 end_page->free_next = eden_heap->pooled_pages;
5618 eden_heap->free_pages = eden_heap->pooled_pages;
5620 eden_heap->pooled_pages = NULL;
5621 objspace->rincgc.pooled_slots = 0;
5626 heap_pages_expand_sorted(objspace);
5629 gc_mode_transition(objspace, gc_mode_none);
5631 #if RGENGC_CHECK_MODE >= 2
5632 gc_verify_internal_consistency(objspace);
5639 struct heap_page *sweep_page = heap->sweeping_page;
5640 int unlink_limit = 3;
5642 #if GC_ENABLE_INCREMENTAL_MARK
5643 int swept_slots = 0;
5645 bool need_pool = TRUE;
5647 int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
5650 gc_report(2, objspace,
"gc_sweep_step (need_pool: %d)\n", need_pool);
5652 gc_report(2, objspace,
"gc_sweep_step\n");
5655 if (sweep_page == NULL)
return FALSE;
5657 #if GC_ENABLE_LAZY_SWEEP
5658 gc_prof_sweep_timer_start(objspace);
5662 RUBY_DEBUG_LOG(
"sweep_page:%p", (
void *)sweep_page);
5670 gc_sweep_page(objspace, size_pool, heap, &ctx);
5671 int free_slots = ctx.freed_slots + ctx.empty_slots;
5673 heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node);
5675 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
5676 heap_pages_freeable_pages > 0 &&
5678 heap_pages_freeable_pages--;
5681 heap_unlink_page(objspace, heap, sweep_page);
5682 heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
5684 else if (free_slots > 0) {
5686 size_pool->freed_slots += ctx.freed_slots;
5687 size_pool->empty_slots += ctx.empty_slots;
5690 #if GC_ENABLE_INCREMENTAL_MARK
5692 heap_add_poolpage(objspace, heap, sweep_page);
5696 heap_add_freepage(heap, sweep_page);
5697 swept_slots += free_slots;
5698 if (swept_slots > 2048) {
5703 heap_add_freepage(heap, sweep_page);
5708 sweep_page->free_next = NULL;
5710 }
while ((sweep_page = heap->sweeping_page));
5712 if (!heap->sweeping_page) {
5714 gc_sweep_finish_size_pool(objspace, size_pool);
5717 if (!has_sweeping_pages(objspace)) {
5718 gc_sweep_finish(objspace);
5722 #if GC_ENABLE_LAZY_SWEEP
5723 gc_prof_sweep_timer_stop(objspace);
5726 return heap->free_pages != NULL;
5732 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5735 while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
5736 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5744 GC_ASSERT(dont_gc_val() == FALSE);
5745 if (!GC_ENABLE_LAZY_SWEEP)
return;
5747 unsigned int lock_lev;
5748 gc_enter(objspace, gc_enter_event_sweep_continue, &lock_lev);
5750 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5752 if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
5755 if (size_pool == sweep_size_pool) {
5756 if (size_pool->allocatable_pages > 0) {
5757 heap_increment(objspace, size_pool, heap);
5761 gc_sweep_rest(objspace);
5769 gc_exit(objspace, gc_enter_event_sweep_continue, &lock_lev);
5782 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
5783 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
5785 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
5787 bool from_freelist =
FL_TEST_RAW(forwarding_object, FL_FROM_FREELIST);
5790 gc_move(objspace,
object, forwarding_object, page->slot_size);
5793 struct heap_page *orig_page = GET_HEAP_PAGE(
object);
5794 orig_page->free_slots++;
5795 if (!from_freelist) {
5796 objspace->profile.total_freed_objects++;
5798 heap_page_add_freeobj(objspace, orig_page,
object);
5800 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
5815 bits_t *mark_bits, *pin_bits;
5819 mark_bits = page->mark_bits;
5820 pin_bits = page->pinned_bits;
5825 bitset = pin_bits[0] & ~mark_bits[0];
5826 bitset >>= NUM_IN_PAGE(p);
5827 invalidate_moved_plane(objspace, page, (uintptr_t)p, bitset);
5828 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
5830 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5833 bitset = pin_bits[i] & ~mark_bits[i];
5835 invalidate_moved_plane(objspace, page, (uintptr_t)p, bitset);
5836 p += BITS_BITLENGTH;
5845 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5846 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
5847 list_for_each(&heap->pages, page, page_node) {
5848 page->flags.before_sweep = TRUE;
5851 heap->compact_cursor = list_tail(&heap->pages,
struct heap_page, page_node);
5852 heap->compact_cursor_index = 0;
5855 if (gc_prof_enabled(objspace)) {
5857 record->moved_objects = objspace->rcompactor.total_moved;
5860 memset(objspace->rcompactor.considered_count_table, 0,
T_MASK *
sizeof(
size_t));
5861 memset(objspace->rcompactor.moved_count_table, 0,
T_MASK *
sizeof(
size_t));
5870 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
5872 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
5874 if (immediate_sweep) {
5875 #if !GC_ENABLE_LAZY_SWEEP
5876 gc_prof_sweep_timer_start(objspace);
5878 gc_sweep_start(objspace);
5879 if (objspace->flags.during_compacting) {
5880 gc_compact_start(objspace);
5883 gc_sweep_rest(objspace);
5884 #if !GC_ENABLE_LAZY_SWEEP
5885 gc_prof_sweep_timer_stop(objspace);
5890 gc_sweep_start(objspace);
5892 if (ruby_enable_autocompact && is_full_marking(objspace)) {
5893 gc_compact_start(objspace);
5896 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5897 list_for_each(&(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages), page, page_node) {
5898 page->flags.before_sweep = TRUE;
5903 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5905 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5911 gc_heap_prepare_minimum_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5918 stack_chunk_alloc(
void)
5932 return stack->chunk == NULL;
5938 size_t size = stack->index;
5939 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
5942 size += stack->limit;
5943 chunk = chunk->next;
5951 chunk->next = stack->cache;
5952 stack->cache = chunk;
5953 stack->cache_size++;
5961 if (stack->unused_cache_size > (stack->cache_size/2)) {
5962 chunk = stack->cache;
5963 stack->cache = stack->cache->next;
5964 stack->cache_size--;
5967 stack->unused_cache_size = stack->cache_size;
5975 GC_ASSERT(stack->index == stack->limit);
5977 if (stack->cache_size > 0) {
5978 next = stack->cache;
5979 stack->cache = stack->cache->next;
5980 stack->cache_size--;
5981 if (stack->unused_cache_size > stack->cache_size)
5982 stack->unused_cache_size = stack->cache_size;
5985 next = stack_chunk_alloc();
5987 next->next = stack->chunk;
5988 stack->chunk = next;
5997 prev = stack->chunk->next;
5998 GC_ASSERT(stack->index == 0);
5999 add_stack_chunk_cache(stack, stack->chunk);
6000 stack->chunk = prev;
6001 stack->index = stack->limit;
6009 while (chunk != NULL) {
6019 mark_stack_chunk_list_free(stack->chunk);
6025 mark_stack_chunk_list_free(stack->cache);
6026 stack->cache_size = 0;
6027 stack->unused_cache_size = 0;
6055 if (stack->index == stack->limit) {
6056 push_mark_stack_chunk(stack);
6058 stack->chunk->data[stack->index++] = data;
6068 rb_bug(
"push_mark_stack() called for broken object");
6072 UNEXPECTED_NODE(push_mark_stack);
6076 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
6078 is_pointer_to_heap(&
rb_objspace, (
void *)data) ?
"corrupted object" :
"non object");
6084 if (is_mark_stack_empty(stack)) {
6087 if (stack->index == 1) {
6088 *data = stack->chunk->data[--stack->index];
6089 pop_mark_stack_chunk(stack);
6092 *data = stack->chunk->data[--stack->index];
6103 stack->index = stack->limit = STACK_CHUNK_SIZE;
6105 for (i=0; i < 4; i++) {
6106 add_stack_chunk_cache(stack, stack_chunk_alloc());
6108 stack->unused_cache_size = stack->cache_size;
6113 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6115 #define STACK_START (ec->machine.stack_start)
6116 #define STACK_END (ec->machine.stack_end)
6117 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6119 #if STACK_GROW_DIRECTION < 0
6120 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6121 #elif STACK_GROW_DIRECTION > 0
6122 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6124 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6125 : (size_t)(STACK_END - STACK_START + 1))
6127 #if !STACK_GROW_DIRECTION
6128 int ruby_stack_grow_direction;
6130 ruby_get_stack_grow_direction(
volatile VALUE *addr)
6133 SET_MACHINE_STACK_END(&end);
6135 if (end > addr)
return ruby_stack_grow_direction = 1;
6136 return ruby_stack_grow_direction = -1;
6145 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6146 return STACK_LENGTH;
6149 #define PREVENT_STACK_OVERFLOW 1
6150 #ifndef PREVENT_STACK_OVERFLOW
6151 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6152 # define PREVENT_STACK_OVERFLOW 1
6154 # define PREVENT_STACK_OVERFLOW 0
6157 #if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6163 size_t length = STACK_LENGTH;
6164 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6166 return length > maximum_length;
6169 #define stack_check(ec, water_mark) FALSE
6172 #define STACKFRAME_FOR_CALL_CFUNC 2048
6174 MJIT_FUNC_EXPORTED
int
6177 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6183 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6203 if (end <= start)
return;
6205 each_location(objspace, start, n, cb);
6211 gc_mark_locations(&
rb_objspace, start, end, gc_mark_maybe);
6219 for (i=0; i<n; i++) {
6220 gc_mark(objspace, values[i]);
6225 rb_gc_mark_values(
long n,
const VALUE *values)
6230 for (i=0; i<n; i++) {
6231 gc_mark_and_pin(objspace, values[i]);
6240 for (i=0; i<n; i++) {
6241 if (is_markable_object(objspace, values[i])) {
6242 gc_mark_and_pin(objspace, values[i]);
6248 rb_gc_mark_vm_stack_values(
long n,
const VALUE *values)
6251 gc_mark_stack_values(objspace, n, values);
6255 mark_value(st_data_t key, st_data_t value, st_data_t data)
6258 gc_mark(objspace, (
VALUE)value);
6263 mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6266 gc_mark_and_pin(objspace, (
VALUE)value);
6273 if (!tbl || tbl->num_entries == 0)
return;
6274 st_foreach(tbl, mark_value, (st_data_t)objspace);
6280 if (!tbl || tbl->num_entries == 0)
return;
6281 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6285 mark_key(st_data_t key, st_data_t value, st_data_t data)
6288 gc_mark_and_pin(objspace, (
VALUE)key);
6296 st_foreach(tbl, mark_key, (st_data_t)objspace);
6300 pin_value(st_data_t key, st_data_t value, st_data_t data)
6303 gc_mark_and_pin(objspace, (
VALUE)value);
6311 st_foreach(tbl, pin_value, (st_data_t)objspace);
6321 mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6325 gc_mark(objspace, (
VALUE)key);
6326 gc_mark(objspace, (
VALUE)value);
6331 pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6335 gc_mark_and_pin(objspace, (
VALUE)key);
6336 gc_mark_and_pin(objspace, (
VALUE)value);
6341 pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6345 gc_mark_and_pin(objspace, (
VALUE)key);
6346 gc_mark(objspace, (
VALUE)value);
6353 if (rb_hash_compare_by_id_p(hash)) {
6354 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6357 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6360 if (RHASH_AR_TABLE_P(hash)) {
6361 if (LIKELY(during_gc) && RHASH_TRANSIENT_P(hash)) {
6362 rb_transient_heap_mark(hash, RHASH_AR_TABLE(hash));
6366 VM_ASSERT(!RHASH_TRANSIENT_P(hash));
6368 gc_mark(objspace, RHASH(hash)->ifnone);
6375 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6389 gc_mark(objspace, me->owner);
6390 gc_mark(objspace, me->defined_class);
6393 switch (def->type) {
6394 case VM_METHOD_TYPE_ISEQ:
6396 gc_mark(objspace, (
VALUE)def->body.iseq.
cref);
6398 if (def->iseq_overload && me->defined_class) {
6401 gc_mark_and_pin(objspace, (
VALUE)me);
6404 case VM_METHOD_TYPE_ATTRSET:
6405 case VM_METHOD_TYPE_IVAR:
6406 gc_mark(objspace, def->body.attr.location);
6408 case VM_METHOD_TYPE_BMETHOD:
6409 gc_mark(objspace, def->body.bmethod.proc);
6410 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6412 case VM_METHOD_TYPE_ALIAS:
6413 gc_mark(objspace, (
VALUE)def->body.alias.original_me);
6415 case VM_METHOD_TYPE_REFINED:
6416 gc_mark(objspace, (
VALUE)def->body.refined.orig_me);
6417 gc_mark(objspace, (
VALUE)def->body.refined.owner);
6419 case VM_METHOD_TYPE_CFUNC:
6420 case VM_METHOD_TYPE_ZSUPER:
6421 case VM_METHOD_TYPE_MISSING:
6422 case VM_METHOD_TYPE_OPTIMIZED:
6423 case VM_METHOD_TYPE_UNDEF:
6424 case VM_METHOD_TYPE_NOTIMPLEMENTED:
6430 static enum rb_id_table_iterator_result
6431 mark_method_entry_i(
VALUE me,
void *data)
6435 gc_mark(objspace, me);
6436 return ID_TABLE_CONTINUE;
6443 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6447 static enum rb_id_table_iterator_result
6448 mark_const_entry_i(
VALUE value,
void *data)
6453 gc_mark(objspace, ce->value);
6454 gc_mark(objspace, ce->file);
6455 return ID_TABLE_CONTINUE;
6462 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6465 #if STACK_GROW_DIRECTION < 0
6466 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6467 #elif STACK_GROW_DIRECTION > 0
6468 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6470 #define GET_STACK_BOUNDS(start, end, appendix) \
6471 ((STACK_END < STACK_START) ? \
6472 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6478 #ifndef __EMSCRIPTEN__
6484 VALUE v[
sizeof(rb_jmp_buf) / (
sizeof(
VALUE))];
6485 } save_regs_gc_mark;
6486 VALUE *stack_start, *stack_end;
6488 FLUSH_REGISTER_WINDOWS;
6489 memset(&save_regs_gc_mark, 0,
sizeof(save_regs_gc_mark));
6491 rb_setjmp(save_regs_gc_mark.j);
6497 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6499 each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6501 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6505 static VALUE *rb_emscripten_stack_range_tmp[2];
6508 rb_emscripten_mark_locations(
void *begin,
void *end)
6510 rb_emscripten_stack_range_tmp[0] = begin;
6511 rb_emscripten_stack_range_tmp[1] = end;
6517 emscripten_scan_stack(rb_emscripten_mark_locations);
6518 each_stack_location(objspace, ec, rb_emscripten_stack_range_tmp[0], rb_emscripten_stack_range_tmp[1], gc_mark_maybe);
6520 emscripten_scan_registers(rb_emscripten_mark_locations);
6521 each_stack_location(objspace, ec, rb_emscripten_stack_range_tmp[0], rb_emscripten_stack_range_tmp[1], gc_mark_maybe);
6529 VALUE *stack_start, *stack_end;
6531 GET_STACK_BOUNDS(stack_start, stack_end, 0);
6532 each_stack_location(objspace, ec, stack_start, stack_end, cb);
6538 each_machine_stack_value(ec, gc_mark_maybe);
6546 gc_mark_locations(objspace, stack_start, stack_end, cb);
6548 #if defined(__mc68000__)
6549 gc_mark_locations(objspace,
6550 (
VALUE*)((
char*)stack_start + 2),
6551 (
VALUE*)((
char*)stack_end - 2), cb);
6570 (void)VALGRIND_MAKE_MEM_DEFINED(&obj,
sizeof(obj));
6572 if (is_pointer_to_heap(objspace, (
void *)obj)) {
6573 void *ptr = __asan_region_is_poisoned((
void *)obj,
SIZEOF_VALUE);
6574 asan_unpoison_object(obj,
false);
6582 gc_mark_and_pin(objspace, obj);
6588 asan_poison_object(obj);
6602 ASSERT_vm_locking();
6603 if (RVALUE_MARKED(obj))
return 0;
6604 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6611 struct heap_page *page = GET_HEAP_PAGE(obj);
6612 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6614 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6615 page->flags.has_uncollectible_shady_objects = TRUE;
6616 MARK_IN_BITMAP(uncollectible_bits, obj);
6617 objspace->rgengc.uncollectible_wb_unprotected_objects++;
6619 #if RGENGC_PROFILE > 0
6620 objspace->profile.total_remembered_shady_object_count++;
6621 #if RGENGC_PROFILE >= 2
6622 objspace->profile.remembered_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
6635 const VALUE old_parent = objspace->rgengc.parent_object;
6638 if (RVALUE_WB_UNPROTECTED(obj)) {
6639 if (gc_remember_unprotected(objspace, obj)) {
6640 gc_report(2, objspace,
"relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6644 if (!RVALUE_OLD_P(obj)) {
6645 if (RVALUE_MARKED(obj)) {
6647 gc_report(2, objspace,
"relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6648 RVALUE_AGE_SET_OLD(objspace, obj);
6649 if (is_incremental_marking(objspace)) {
6650 if (!RVALUE_MARKING(obj)) {
6651 gc_grey(objspace, obj);
6655 rgengc_remember(objspace, obj);
6659 gc_report(2, objspace,
"relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6660 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
6666 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
6672 #if RGENGC_CHECK_MODE
6673 if (RVALUE_MARKED(obj) == FALSE)
rb_bug(
"gc_grey: %s is not marked.", obj_info(obj));
6674 if (RVALUE_MARKING(obj) == TRUE)
rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(obj));
6677 #if GC_ENABLE_INCREMENTAL_MARK
6678 if (is_incremental_marking(objspace)) {
6679 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
6683 push_mark_stack(&objspace->mark_stack, obj);
6689 struct heap_page *page = GET_HEAP_PAGE(obj);
6691 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
6692 check_rvalue_consistency(obj);
6694 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
6695 if (!RVALUE_OLD_P(obj)) {
6696 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(obj));
6697 RVALUE_AGE_INC(objspace, obj);
6699 else if (is_full_marking(objspace)) {
6700 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
6701 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
6704 check_rvalue_consistency(obj);
6706 objspace->marked_slots++;
6710 static void reachable_objects_from_callback(
VALUE obj);
6715 if (LIKELY(during_gc)) {
6716 rgengc_check_relation(objspace, obj);
6717 if (!gc_mark_set(objspace, obj))
return;
6720 if (objspace->rgengc.parent_object) {
6721 RUBY_DEBUG_LOG(
"%p (%s) parent:%p (%s)",
6722 (
void *)obj, obj_type_name(obj),
6723 (
void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
6726 RUBY_DEBUG_LOG(
"%p (%s)", (
void *)obj, obj_type_name(obj));
6732 rb_bug(
"try to mark T_NONE object");
6734 gc_aging(objspace, obj);
6735 gc_grey(objspace, obj);
6738 reachable_objects_from_callback(obj);
6745 GC_ASSERT(is_markable_object(objspace, obj));
6746 if (UNLIKELY(objspace->flags.during_compacting)) {
6747 if (LIKELY(during_gc)) {
6748 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
6756 if (!is_markable_object(objspace, obj))
return;
6757 gc_pin(objspace, obj);
6758 gc_mark_ptr(objspace, obj);
6764 if (!is_markable_object(objspace, obj))
return;
6765 gc_mark_ptr(objspace, obj);
6785 rb_objspace_marked_object_p(
VALUE obj)
6787 return RVALUE_MARKED(obj) ? TRUE : FALSE;
6793 if (RVALUE_OLD_P(obj)) {
6794 objspace->rgengc.parent_object = obj;
6797 objspace->rgengc.parent_object =
Qfalse;
6804 switch (imemo_type(obj)) {
6809 if (LIKELY(env->ep)) {
6811 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
6812 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
6813 gc_mark_values(objspace, (
long)env->env_size, env->env);
6814 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
6815 gc_mark(objspace, (
VALUE)rb_vm_env_prev_env(env));
6816 gc_mark(objspace, (
VALUE)env->iseq);
6821 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
6822 gc_mark(objspace, (
VALUE)RANY(obj)->as.imemo.cref.next);
6823 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
6826 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
6827 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
6828 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
6829 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
6831 case imemo_throw_data:
6832 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
6835 gc_mark_maybe(objspace, (
VALUE)RANY(obj)->as.imemo.ifunc.data);
6838 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
6839 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
6840 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
6843 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
6853 }
while ((m = m->next) != NULL);
6857 rb_ast_mark(&RANY(obj)->as.imemo.ast);
6859 case imemo_parser_strterm:
6860 rb_strterm_mark(obj);
6862 case imemo_callinfo:
6864 case imemo_callcache:
6868 gc_mark(objspace, (
VALUE)vm_cc_cme(cc));
6871 case imemo_constcache:
6874 gc_mark(objspace, ice->value);
6877 #if VM_CHECK_MODE > 0
6879 VM_UNREACHABLE(gc_mark_imemo);
6887 register RVALUE *any = RANY(obj);
6888 gc_mark_set_parent(objspace, obj);
6891 rb_mark_generic_ivar(obj);
6904 rb_bug(
"rb_gc_mark() called for broken object");
6912 gc_mark_imemo(objspace, obj);
6919 gc_mark(objspace, any->as.basic.
klass);
6927 if (!RCLASS_EXT(obj))
break;
6929 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
6930 cc_table_mark(objspace, obj);
6931 mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj));
6932 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
6936 if (RICLASS_OWNS_M_TBL_P(obj)) {
6937 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
6942 if (!RCLASS_EXT(obj))
break;
6943 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
6944 cc_table_mark(objspace, obj);
6949 VALUE root = any->as.array.
as.
heap.aux.shared_root;
6950 gc_mark(objspace, root);
6955 for (i=0; i < len; i++) {
6956 gc_mark(objspace, ptr[i]);
6959 if (LIKELY(during_gc)) {
6962 rb_transient_heap_mark(obj, ptr);
6969 mark_hash(objspace, obj);
6973 if (STR_SHARED_P(obj)) {
6974 gc_mark(objspace, any->as.string.
as.
heap.aux.shared);
6985 if (mark_func) (*mark_func)(ptr);
6995 for (i = 0; i < len; i++) {
6996 gc_mark(objspace, ptr[i]);
6999 if (LIKELY(during_gc) &&
7000 ROBJ_TRANSIENT_P(obj)) {
7001 rb_transient_heap_mark(obj, ptr);
7007 if (any->as.file.
fptr) {
7008 gc_mark(objspace, any->as.file.
fptr->
self);
7009 gc_mark(objspace, any->as.file.
fptr->
pathv);
7019 gc_mark(objspace, any->as.regexp.
src);
7023 gc_mark(objspace, any->as.match.
regexp);
7024 if (any->as.match.
str) {
7025 gc_mark(objspace, any->as.match.
str);
7030 gc_mark(objspace, any->as.rational.num);
7031 gc_mark(objspace, any->as.rational.den);
7035 gc_mark(objspace, any->as.complex.real);
7036 gc_mark(objspace, any->as.complex.imag);
7043 const VALUE *
const ptr = RSTRUCT_CONST_PTR(obj);
7045 for (i=0; i<len; i++) {
7046 gc_mark(objspace, ptr[i]);
7049 if (LIKELY(during_gc) &&
7050 RSTRUCT_TRANSIENT_P(obj)) {
7051 rb_transient_heap_mark(obj, ptr);
7058 rb_gcdebug_print_obj_condition((
VALUE)obj);
7063 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
7065 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
7074 gc_mark_stacked_objects(
rb_objspace_t *objspace,
int incremental,
size_t count)
7078 #if GC_ENABLE_INCREMENTAL_MARK
7079 size_t marked_slots_at_the_beginning = objspace->marked_slots;
7080 size_t popped_count = 0;
7083 while (pop_mark_stack(mstack, &obj)) {
7084 if (obj ==
Qundef)
continue;
7086 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7087 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7089 gc_mark_children(objspace, obj);
7091 #if GC_ENABLE_INCREMENTAL_MARK
7093 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7094 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
7096 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7099 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7109 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7111 if (is_mark_stack_empty(mstack)) {
7112 shrink_stack_chunk_cache(mstack);
7121 gc_mark_stacked_objects_incremental(
rb_objspace_t *objspace,
size_t count)
7123 return gc_mark_stacked_objects(objspace, TRUE, count);
7129 return gc_mark_stacked_objects(objspace, FALSE, 0);
7132 #if PRINT_ROOT_TICKS
7133 #define MAX_TICKS 0x100
7134 static tick_t mark_ticks[MAX_TICKS];
7135 static const char *mark_ticks_categories[MAX_TICKS];
7138 show_mark_ticks(
void)
7141 fprintf(stderr,
"mark ticks result:\n");
7142 for (i=0; i<MAX_TICKS; i++) {
7143 const char *category = mark_ticks_categories[i];
7145 fprintf(stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[i]);
7156 gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
7160 rb_vm_t *vm = rb_ec_vm_ptr(ec);
7162 #if PRINT_ROOT_TICKS
7163 tick_t start_tick = tick();
7165 const char *prev_category = 0;
7167 if (mark_ticks_categories[0] == 0) {
7168 atexit(show_mark_ticks);
7172 if (categoryp) *categoryp =
"xxx";
7174 objspace->rgengc.parent_object =
Qfalse;
7176 #if PRINT_ROOT_TICKS
7177 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7178 if (prev_category) { \
7179 tick_t t = tick(); \
7180 mark_ticks[tick_count] = t - start_tick; \
7181 mark_ticks_categories[tick_count] = prev_category; \
7184 prev_category = category; \
7185 start_tick = tick(); \
7188 #define MARK_CHECKPOINT_PRINT_TICK(category)
7191 #define MARK_CHECKPOINT(category) do { \
7192 if (categoryp) *categoryp = category; \
7193 MARK_CHECKPOINT_PRINT_TICK(category); \
7196 MARK_CHECKPOINT(
"vm");
7199 if (vm->self) gc_mark(objspace, vm->self);
7201 MARK_CHECKPOINT(
"finalizers");
7202 mark_finalizer_tbl(objspace, finalizer_table);
7204 MARK_CHECKPOINT(
"machine_context");
7205 mark_current_machine_context(objspace, ec);
7208 MARK_CHECKPOINT(
"global_list");
7209 for (list = global_list; list; list = list->next) {
7210 gc_mark_maybe(objspace, *list->varptr);
7213 MARK_CHECKPOINT(
"end_proc");
7216 MARK_CHECKPOINT(
"global_tbl");
7217 rb_gc_mark_global_tbl();
7219 MARK_CHECKPOINT(
"object_id");
7221 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl);
7223 if (stress_to_class)
rb_gc_mark(stress_to_class);
7225 MARK_CHECKPOINT(
"finish");
7226 #undef MARK_CHECKPOINT
7229 #if RGENGC_CHECK_MODE >= 4
7231 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7232 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7233 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7241 static struct reflist *
7242 reflist_create(
VALUE obj)
7244 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
7247 refs->list[0] = obj;
7253 reflist_destruct(
struct reflist *refs)
7260 reflist_add(
struct reflist *refs,
VALUE obj)
7262 if (refs->pos == refs->size) {
7264 SIZED_REALLOC_N(refs->list,
VALUE, refs->size, refs->size/2);
7267 refs->list[refs->pos++] = obj;
7271 reflist_dump(
struct reflist *refs)
7274 for (i=0; i<refs->pos; i++) {
7275 VALUE obj = refs->list[i];
7276 if (IS_ROOTSIG(obj)) {
7277 fprintf(stderr,
"<root@%s>", GET_ROOTSIG(obj));
7280 fprintf(stderr,
"<%s>", obj_info(obj));
7282 if (i+1 < refs->pos) fprintf(stderr,
", ");
7287 reflist_referred_from_machine_context(
struct reflist *refs)
7290 for (i=0; i<refs->pos; i++) {
7291 VALUE obj = refs->list[i];
7292 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj),
"machine_context") == 0)
return 1;
7307 const char *category;
7313 allrefs_add(
struct allrefs *data,
VALUE obj)
7315 struct reflist *refs;
7318 if (st_lookup(data->references, obj, &r)) {
7319 refs = (
struct reflist *)r;
7320 reflist_add(refs, data->root_obj);
7324 refs = reflist_create(data->root_obj);
7325 st_insert(data->references, obj, (st_data_t)refs);
7331 allrefs_i(
VALUE obj,
void *ptr)
7333 struct allrefs *data = (
struct allrefs *)ptr;
7335 if (allrefs_add(data, obj)) {
7336 push_mark_stack(&data->mark_stack, obj);
7341 allrefs_roots_i(
VALUE obj,
void *ptr)
7343 struct allrefs *data = (
struct allrefs *)ptr;
7344 if (strlen(data->category) == 0)
rb_bug(
"!!!");
7345 data->root_obj = MAKE_ROOTSIG(data->category);
7347 if (allrefs_add(data, obj)) {
7348 push_mark_stack(&data->mark_stack, obj);
7351 #define PUSH_MARK_FUNC_DATA(v) do { \
7352 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7353 GET_RACTOR()->mfd = (v);
7355 #define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7360 struct allrefs data;
7361 struct gc_mark_func_data_struct mfd;
7363 int prev_dont_gc = dont_gc_val();
7366 data.objspace = objspace;
7367 data.references = st_init_numtable();
7368 init_mark_stack(&data.mark_stack);
7370 mfd.mark_func = allrefs_roots_i;
7374 PUSH_MARK_FUNC_DATA(&mfd);
7375 GET_RACTOR()->mfd = &mfd;
7376 gc_mark_roots(objspace, &data.category);
7377 POP_MARK_FUNC_DATA();
7380 while (pop_mark_stack(&data.mark_stack, &obj)) {
7381 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7383 free_stack_chunks(&data.mark_stack);
7385 dont_gc_set(prev_dont_gc);
7386 return data.references;
7390 objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7392 struct reflist *refs = (
struct reflist *)value;
7393 reflist_destruct(refs);
7398 objspace_allrefs_destruct(
struct st_table *refs)
7400 st_foreach(refs, objspace_allrefs_destruct_i, 0);
7401 st_free_table(refs);
7404 #if RGENGC_CHECK_MODE >= 5
7406 allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7409 struct reflist *refs = (
struct reflist *)v;
7410 fprintf(stderr,
"[allrefs_dump_i] %s <- ", obj_info(obj));
7412 fprintf(stderr,
"\n");
7419 VALUE size = objspace->rgengc.allrefs_table->num_entries;
7420 fprintf(stderr,
"[all refs] (size: %"PRIuVALUE
")\n", size);
7421 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7426 gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7429 struct reflist *refs = (
struct reflist *)v;
7433 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7434 fprintf(stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7435 fprintf(stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)obj);
7438 if (reflist_referred_from_machine_context(refs)) {
7439 fprintf(stderr,
" (marked from machine stack).\n");
7443 objspace->rgengc.error_count++;
7444 fprintf(stderr,
"\n");
7451 gc_marks_check(
rb_objspace_t *objspace, st_foreach_callback_func *checker_func,
const char *checker_name)
7453 size_t saved_malloc_increase = objspace->malloc_params.increase;
7454 #if RGENGC_ESTIMATE_OLDMALLOC
7455 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7457 VALUE already_disabled = rb_objspace_gc_disable(objspace);
7459 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7462 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7465 if (objspace->rgengc.error_count > 0) {
7466 #if RGENGC_CHECK_MODE >= 5
7467 allrefs_dump(objspace);
7469 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
7472 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7473 objspace->rgengc.allrefs_table = 0;
7475 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
7476 objspace->malloc_params.increase = saved_malloc_increase;
7477 #if RGENGC_ESTIMATE_OLDMALLOC
7478 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7486 size_t live_object_count;
7487 size_t zombie_object_count;
7490 size_t old_object_count;
7491 size_t remembered_shady_count;
7495 check_generation_i(
const VALUE child,
void *ptr)
7498 const VALUE parent = data->parent;
7500 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7502 if (!RVALUE_OLD_P(child)) {
7503 if (!RVALUE_REMEMBERED(parent) &&
7504 !RVALUE_REMEMBERED(child) &&
7505 !RVALUE_UNCOLLECTIBLE(child)) {
7506 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7513 check_color_i(
const VALUE child,
void *ptr)
7516 const VALUE parent = data->parent;
7518 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7519 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7520 obj_info(parent), obj_info(child));
7526 check_children_i(
const VALUE child,
void *ptr)
7529 if (check_rvalue_consistency_force(child, FALSE) != 0) {
7530 fprintf(stderr,
"check_children_i: %s has error (referenced from %s)",
7531 obj_info(child), obj_info(data->parent));
7532 rb_print_backtrace();
7539 verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
7545 for (obj = (
VALUE)page_start; obj != (
VALUE)page_end; obj += stride) {
7546 void *poisoned = asan_poisoned_object_p(obj);
7547 asan_unpoison_object(obj,
false);
7549 if (is_live_object(objspace, obj)) {
7551 data->live_object_count++;
7556 if (!gc_object_moved_p(objspace, obj)) {
7558 rb_objspace_reachable_objects_from(obj, check_children_i, (
void *)data);
7562 if (RVALUE_OLD_P(obj)) data->old_object_count++;
7563 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
7565 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
7568 rb_objspace_reachable_objects_from(obj, check_generation_i, (
void *)data);
7571 if (is_incremental_marking(objspace)) {
7572 if (RVALUE_BLACK_P(obj)) {
7575 rb_objspace_reachable_objects_from(obj, check_color_i, (
void *)data);
7582 data->zombie_object_count++;
7587 asan_poison_object(obj);
7598 unsigned int has_remembered_shady = FALSE;
7599 unsigned int has_remembered_old = FALSE;
7600 int remembered_old_objects = 0;
7601 int free_objects = 0;
7602 int zombie_objects = 0;
7603 int stride = page->slot_size /
sizeof(
RVALUE);
7605 for (i=0; i<page->total_slots; i+=stride) {
7607 void *poisoned = asan_poisoned_object_p(val);
7608 asan_unpoison_object(val,
false);
7610 if (
RBASIC(val) == 0) free_objects++;
7612 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
7613 has_remembered_shady = TRUE;
7615 if (RVALUE_PAGE_MARKING(page, val)) {
7616 has_remembered_old = TRUE;
7617 remembered_old_objects++;
7622 asan_poison_object(val);
7626 if (!is_incremental_marking(objspace) &&
7627 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
7629 for (i=0; i<page->total_slots; i++) {
7631 if (RVALUE_PAGE_MARKING(page, val)) {
7632 fprintf(stderr,
"marking -> %s\n", obj_info(val));
7635 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
7636 (
void *)page, remembered_old_objects, obj ? obj_info(obj) :
"");
7639 if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
7640 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
7641 (
void *)page, obj ? obj_info(obj) :
"");
7646 if (page->free_slots != free_objects) {
7647 rb_bug(
"page %p's free_slots should be %d, but %d\n", (
void *)page, page->free_slots, free_objects);
7650 if (page->final_slots != zombie_objects) {
7651 rb_bug(
"page %p's final_slots should be %d, but %d\n", (
void *)page, page->final_slots, zombie_objects);
7654 return remembered_old_objects;
7658 gc_verify_heap_pages_(
rb_objspace_t *objspace,
struct list_head *head)
7660 int remembered_old_objects = 0;
7663 list_for_each(head, page, page_node) {
7664 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
7665 RVALUE *p = page->freelist;
7669 asan_unpoison_object(vp,
false);
7671 fprintf(stderr,
"freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
7673 p = p->as.free.next;
7674 asan_poison_object(prev);
7676 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
7678 if (page->flags.has_remembered_objects == FALSE) {
7679 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
7683 return remembered_old_objects;
7689 int remembered_old_objects = 0;
7690 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
7691 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
7692 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
7694 return remembered_old_objects;
7708 gc_verify_internal_consistency_m(
VALUE dummy)
7719 data.objspace = objspace;
7720 gc_report(5, objspace,
"gc_verify_internal_consistency: start\n");
7723 for (
size_t i = 0; i < heap_allocated_pages; i++) {
7724 struct heap_page *page = heap_pages_sorted[i];
7725 short slot_size = page->slot_size;
7727 uintptr_t start = (uintptr_t)page->start;
7728 uintptr_t end = start + page->total_slots * slot_size;
7730 verify_internal_consistency_i((
void *)start, (
void *)end, slot_size, &data);
7733 if (data.err_count != 0) {
7734 #if RGENGC_CHECK_MODE >= 5
7735 objspace->rgengc.error_count = data.err_count;
7736 gc_marks_check(objspace, NULL, NULL);
7737 allrefs_dump(objspace);
7739 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
7743 gc_verify_heap_pages(objspace);
7747 if (!is_lazy_sweeping(objspace) &&
7749 ruby_single_main_ractor != NULL) {
7750 if (objspace_live_slots(objspace) != data.live_object_count) {
7751 fprintf(stderr,
"heap_pages_final_slots: %"PRIdSIZE
", "
7752 "objspace->profile.total_freed_objects: %"PRIdSIZE
"\n",
7753 heap_pages_final_slots, objspace->profile.total_freed_objects);
7754 rb_bug(
"inconsistent live slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7755 objspace_live_slots(objspace), data.live_object_count);
7759 if (!is_marking(objspace)) {
7760 if (objspace->rgengc.old_objects != data.old_object_count) {
7761 rb_bug(
"inconsistent old slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7762 objspace->rgengc.old_objects, data.old_object_count);
7764 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
7765 rb_bug(
"inconsistent number of wb unprotected objects: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7766 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
7771 size_t list_count = 0;
7774 VALUE z = heap_pages_deferred_final;
7777 z = RZOMBIE(z)->next;
7781 if (heap_pages_final_slots != data.zombie_object_count ||
7782 heap_pages_final_slots != list_count) {
7784 rb_bug(
"inconsistent finalizing object count:\n"
7785 " expect %"PRIuSIZE
"\n"
7786 " but %"PRIuSIZE
" zombies\n"
7787 " heap_pages_deferred_final list has %"PRIuSIZE
" items.",
7788 heap_pages_final_slots,
7789 data.zombie_object_count,
7794 gc_report(5, objspace,
"gc_verify_internal_consistency: OK\n");
7804 unsigned int prev_during_gc = during_gc;
7807 gc_verify_internal_consistency_(objspace);
7809 during_gc = prev_during_gc;
7815 rb_gc_verify_internal_consistency(
void)
7821 gc_verify_transient_heap_internal_consistency(
VALUE dmy)
7823 rb_transient_heap_verify();
7833 gc_report(1, objspace,
"gc_marks_start: (%s)\n", full_mark ?
"full" :
"minor");
7834 gc_mode_transition(objspace, gc_mode_marking);
7837 #if GC_ENABLE_INCREMENTAL_MARK
7838 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / ((objspace->rincgc.pooled_slots / HEAP_PAGE_OBJ_LIMIT) + 1);
7840 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
", "
7841 "objspace->rincgc.pooled_page_num: %"PRIdSIZE
", "
7842 "objspace->rincgc.step_slots: %"PRIdSIZE
", \n",
7843 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
7845 objspace->flags.during_minor_gc = FALSE;
7846 if (ruby_enable_autocompact) {
7847 objspace->flags.during_compacting |= TRUE;
7849 objspace->profile.major_gc_count++;
7850 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
7851 objspace->rgengc.old_objects = 0;
7852 objspace->rgengc.last_major_gc = objspace->profile.count;
7853 objspace->marked_slots = 0;
7855 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
7856 rgengc_mark_and_rememberset_clear(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7860 objspace->flags.during_minor_gc = TRUE;
7861 objspace->marked_slots =
7862 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects;
7863 objspace->profile.minor_gc_count++;
7865 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
7866 rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7870 gc_mark_roots(objspace, NULL);
7872 gc_report(1, objspace,
"gc_marks_start: (%s) end, stack in %"PRIdSIZE
"\n",
7873 full_mark ?
"full" :
"minor", mark_stack_size(&objspace->mark_stack));
7876 #if GC_ENABLE_INCREMENTAL_MARK
7878 gc_marks_wb_unprotected_objects_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bits)
7883 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((
VALUE)p));
7884 GC_ASSERT(RVALUE_WB_UNPROTECTED((
VALUE)p));
7885 GC_ASSERT(RVALUE_MARKED((
VALUE)p));
7886 gc_mark_children(objspace, (
VALUE)p);
7899 list_for_each(&heap->pages, page, page_node) {
7900 bits_t *mark_bits = page->mark_bits;
7901 bits_t *wbun_bits = page->wb_unprotected_bits;
7905 bits_t bits = mark_bits[0] & wbun_bits[0];
7906 bits >>= NUM_IN_PAGE(p);
7907 gc_marks_wb_unprotected_objects_plane(objspace, (uintptr_t)p, bits);
7908 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
7910 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
7911 bits_t bits = mark_bits[j] & wbun_bits[j];
7913 gc_marks_wb_unprotected_objects_plane(objspace, (uintptr_t)p, bits);
7914 p += BITS_BITLENGTH;
7918 gc_mark_stacked_objects_all(objspace);
7922 heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
7924 struct heap_page *page = heap->pooled_pages;
7927 heap->pooled_pages = page->free_next;
7928 heap_add_freepage(heap, page);
7938 #if GC_ENABLE_INCREMENTAL_MARK
7940 if (is_incremental_marking(objspace)) {
7941 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
7942 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
7943 if (heap->pooled_pages) {
7944 heap_move_pooled_pages_to_free_pages(heap);
7945 gc_report(1, objspace,
"gc_marks_finish: pooled pages are exists. retry.\n");
7950 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
7951 rb_bug(
"gc_marks_finish: mark stack is not empty (%"PRIdSIZE
").",
7952 mark_stack_size(&objspace->mark_stack));
7955 gc_mark_roots(objspace, 0);
7957 if (is_mark_stack_empty(&objspace->mark_stack) == FALSE) {
7958 gc_report(1, objspace,
"gc_marks_finish: not empty (%"PRIdSIZE
"). retry.\n",
7959 mark_stack_size(&objspace->mark_stack));
7963 #if RGENGC_CHECK_MODE >= 2
7964 if (gc_verify_heap_pages(objspace) != 0) {
7965 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
7969 objspace->flags.during_incremental_marking = FALSE;
7971 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
7972 gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7977 #if RGENGC_CHECK_MODE >= 2
7978 gc_verify_internal_consistency(objspace);
7981 if (is_full_marking(objspace)) {
7983 const double r = gc_params.oldobject_limit_factor;
7984 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
7985 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
7988 #if RGENGC_CHECK_MODE >= 4
7990 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
7996 size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
7997 size_t sweep_slots = total_slots - objspace->marked_slots;
7998 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
7999 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
8000 int full_marking = is_full_marking(objspace);
8001 const int r_cnt = GET_VM()->ractor.cnt;
8002 const int r_mul = r_cnt > 8 ? 8 : r_cnt;
8004 GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8007 if (max_free_slots < gc_params.heap_init_slots * r_mul) {
8008 max_free_slots = gc_params.heap_init_slots * r_mul;
8011 if (sweep_slots > max_free_slots) {
8012 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8015 heap_pages_freeable_pages = 0;
8019 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8020 min_free_slots = gc_params.heap_free_slots * r_mul;
8023 if (sweep_slots < min_free_slots) {
8024 if (!full_marking) {
8025 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8026 full_marking = TRUE;
8031 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
8032 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8039 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
8041 size_pool_allocatable_pages_set(objspace, size_pool, heap_extend_pages(objspace, sweep_slots, total_slots, heap_allocated_pages + heap_allocatable_pages(objspace)));
8043 heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8050 const double r = gc_params.oldobject_limit_factor;
8051 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8052 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8055 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8056 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8058 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8059 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8061 if (RGENGC_FORCE_MAJOR_GC) {
8062 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8065 gc_report(1, objspace,
"gc_marks_finish (marks %"PRIdSIZE
" objects, "
8066 "old %"PRIdSIZE
" objects, total %"PRIdSIZE
" slots, "
8067 "sweep %"PRIdSIZE
" slots, increment: %"PRIdSIZE
", next GC: %s)\n",
8068 objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8069 objspace->rgengc.need_major_gc ?
"major" :
"minor");
8072 rb_transient_heap_finish_marking();
8073 rb_ractor_finish_marking();
8080 #if GC_ENABLE_INCREMENTAL_MARK
8084 GC_ASSERT(is_marking(objspace));
8086 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8087 if (gc_marks_finish(objspace)) {
8092 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
"\n", objspace->marked_slots);
8099 gc_report(1, objspace,
"gc_marks_rest\n");
8101 #if GC_ENABLE_INCREMENTAL_MARK
8102 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8103 SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8107 if (is_incremental_marking(objspace)) {
8109 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8110 }
while (gc_marks_finish(objspace) == FALSE);
8113 gc_mark_stacked_objects_all(objspace);
8114 gc_marks_finish(objspace);
8124 GC_ASSERT(dont_gc_val() == FALSE);
8125 #if GC_ENABLE_INCREMENTAL_MARK
8127 unsigned int lock_lev;
8128 gc_enter(objspace, gc_enter_event_mark_continue, &lock_lev);
8133 if (heap->pooled_pages) {
8134 while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) {
8135 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
8136 slots += page->free_slots;
8138 from =
"pooled-pages";
8140 else if (heap_increment(objspace, size_pool, heap)) {
8141 slots = heap->free_pages->free_slots;
8142 from =
"incremented-pages";
8146 gc_report(2, objspace,
"gc_marks_continue: provide %d slots from %s.\n",
8148 gc_marks_step(objspace, objspace->rincgc.step_slots);
8151 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE
").\n",
8152 mark_stack_size(&objspace->mark_stack));
8153 gc_marks_rest(objspace);
8156 gc_exit(objspace, gc_enter_event_mark_continue, &lock_lev);
8163 gc_prof_mark_timer_start(objspace);
8167 gc_marks_start(objspace, full_mark);
8168 if (!is_incremental_marking(objspace)) {
8169 gc_marks_rest(objspace);
8172 #if RGENGC_PROFILE > 0
8173 if (gc_prof_record(objspace)) {
8175 record->old_objects = objspace->rgengc.old_objects;
8178 gc_prof_mark_timer_stop(objspace);
8184 gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...)
8186 if (level <= RGENGC_DEBUG) {
8190 const char *status =
" ";
8193 status = is_full_marking(objspace) ?
"+" :
"-";
8196 if (is_lazy_sweeping(objspace)) {
8199 if (is_incremental_marking(objspace)) {
8204 va_start(args, fmt);
8205 vsnprintf(buf, 1024, fmt, args);
8208 fprintf(out,
"%s|", status);
8218 return RVALUE_REMEMBERED(obj);
8224 struct heap_page *page = GET_HEAP_PAGE(obj);
8225 bits_t *bits = &page->marking_bits[0];
8227 GC_ASSERT(!is_incremental_marking(objspace));
8229 if (MARKED_IN_BITMAP(bits, obj)) {
8233 page->flags.has_remembered_objects = TRUE;
8234 MARK_IN_BITMAP(bits, obj);
8245 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(obj),
8246 rgengc_remembersetbits_get(objspace, obj) ?
"was already remembered" :
"is remembered now");
8248 check_rvalue_consistency(obj);
8250 if (RGENGC_CHECK_MODE) {
8251 if (RVALUE_WB_UNPROTECTED(obj))
rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(obj));
8254 #if RGENGC_PROFILE > 0
8255 if (!rgengc_remembered(objspace, obj)) {
8256 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8257 objspace->profile.total_remembered_normal_object_count++;
8258 #if RGENGC_PROFILE >= 2
8259 objspace->profile.remembered_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
8265 return rgengc_remembersetbits_set(objspace, obj);
8271 int result = rgengc_remembersetbits_get(objspace, obj);
8272 check_rvalue_consistency(obj);
8279 gc_report(6, objspace,
"rgengc_remembered: %s\n", obj_info(obj));
8280 return rgengc_remembered_sweep(objspace, obj);
8283 #ifndef PROFILE_REMEMBERSET_MARK
8284 #define PROFILE_REMEMBERSET_MARK 0
8288 rgengc_rememberset_mark_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8294 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8295 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8296 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8298 gc_mark_children(objspace, obj);
8311 #if PROFILE_REMEMBERSET_MARK
8312 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8314 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
8316 list_for_each(&heap->pages, page, page_node) {
8317 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
8319 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8320 bits_t *marking_bits = page->marking_bits;
8321 bits_t *uncollectible_bits = page->uncollectible_bits;
8322 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8323 #if PROFILE_REMEMBERSET_MARK
8324 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
8325 else if (page->flags.has_remembered_objects) has_old++;
8326 else if (page->flags.has_uncollectible_shady_objects) has_shady++;
8328 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8329 bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8330 marking_bits[j] = 0;
8332 page->flags.has_remembered_objects = FALSE;
8335 bitset >>= NUM_IN_PAGE(p);
8336 rgengc_rememberset_mark_plane(objspace, (uintptr_t)p, bitset);
8337 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
8339 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8341 rgengc_rememberset_mark_plane(objspace, (uintptr_t)p, bitset);
8342 p += BITS_BITLENGTH;
8345 #if PROFILE_REMEMBERSET_MARK
8352 #if PROFILE_REMEMBERSET_MARK
8353 fprintf(stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
8355 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
8363 list_for_each(&heap->pages, page, page_node) {
8364 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8365 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8366 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8367 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8368 page->flags.has_uncollectible_shady_objects = FALSE;
8369 page->flags.has_remembered_objects = FALSE;
8380 if (RGENGC_CHECK_MODE) {
8381 if (!RVALUE_OLD_P(a))
rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
8382 if ( RVALUE_OLD_P(b))
rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
8383 if (is_incremental_marking(objspace))
rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
8388 if (!rgengc_remembered(objspace, a)) {
8389 RB_VM_LOCK_ENTER_NO_BARRIER();
8391 rgengc_remember(objspace, a);
8393 RB_VM_LOCK_LEAVE_NO_BARRIER();
8394 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
8398 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(b), b);
8399 if (RVALUE_WB_UNPROTECTED(b)) {
8400 gc_remember_unprotected(objspace, b);
8403 RVALUE_AGE_SET_OLD(objspace, b);
8404 rgengc_remember(objspace, b);
8407 gc_report(1, objspace,
"gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
8410 check_rvalue_consistency(a);
8411 check_rvalue_consistency(b);
8414 #if GC_ENABLE_INCREMENTAL_MARK
8418 gc_mark_set_parent(objspace, parent);
8419 rgengc_check_relation(objspace, obj);
8420 if (gc_mark_set(objspace, obj) == FALSE)
return;
8421 gc_aging(objspace, obj);
8422 gc_grey(objspace, obj);
8430 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, obj_info(b));
8432 if (RVALUE_BLACK_P(a)) {
8433 if (RVALUE_WHITE_P(b)) {
8434 if (!RVALUE_WB_UNPROTECTED(a)) {
8435 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, obj_info(b));
8436 gc_mark_from(objspace, b, a);
8439 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
8440 if (!RVALUE_WB_UNPROTECTED(b)) {
8441 gc_report(1, objspace,
"gc_writebarrier_incremental: [GN] %p -> %s\n", (
void *)a, obj_info(b));
8442 RVALUE_AGE_SET_OLD(objspace, b);
8444 if (RVALUE_BLACK_P(b)) {
8445 gc_grey(objspace, b);
8449 gc_report(1, objspace,
"gc_writebarrier_incremental: [LL] %p -> %s\n", (
void *)a, obj_info(b));
8450 gc_remember_unprotected(objspace, b);
8454 if (UNLIKELY(objspace->flags.during_compacting)) {
8455 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
8460 #define gc_writebarrier_incremental(a, b, objspace)
8472 if (!is_incremental_marking(objspace)) {
8473 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
8477 gc_writebarrier_generational(a, b, objspace);
8483 RB_VM_LOCK_ENTER_NO_BARRIER();
8485 if (is_incremental_marking(objspace)) {
8486 gc_writebarrier_incremental(a, b, objspace);
8492 RB_VM_LOCK_LEAVE_NO_BARRIER();
8494 if (retry)
goto retry;
8502 if (RVALUE_WB_UNPROTECTED(obj)) {
8508 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
8509 rgengc_remembered(objspace, obj) ?
" (already remembered)" :
"");
8511 if (RVALUE_OLD_P(obj)) {
8512 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
8513 RVALUE_DEMOTE(objspace, obj);
8514 gc_mark_set(objspace, obj);
8515 gc_remember_unprotected(objspace, obj);
8518 objspace->profile.total_shade_operation_count++;
8519 #if RGENGC_PROFILE >= 2
8520 objspace->profile.shade_operation_count_types[
BUILTIN_TYPE(obj)]++;
8525 RVALUE_AGE_RESET(obj);
8528 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
8529 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
8536 MJIT_FUNC_EXPORTED
void
8537 rb_gc_writebarrier_remember(
VALUE obj)
8541 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(obj));
8543 if (is_incremental_marking(objspace)) {
8544 if (RVALUE_BLACK_P(obj)) {
8545 gc_grey(objspace, obj);
8549 if (RVALUE_OLD_P(obj)) {
8550 rgengc_remember(objspace, obj);
8555 static st_table *rgengc_unprotect_logging_table;
8558 rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
8560 fprintf(stderr,
"%s\t%"PRIuVALUE
"\n", (
char *)key, (
VALUE)val);
8565 rgengc_unprotect_logging_exit_func(
void)
8567 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
8571 rb_gc_unprotect_logging(
void *objptr,
const char *filename,
int line)
8575 if (rgengc_unprotect_logging_table == 0) {
8576 rgengc_unprotect_logging_table = st_init_strtable();
8577 atexit(rgengc_unprotect_logging_exit_func);
8580 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8585 snprintf(ptr, 0x100 - 1,
"%s|%s:%d", obj_info(obj), filename, line);
8587 if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
8594 st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
8599 rb_copy_wb_protected_attribute(
VALUE dest,
VALUE obj)
8603 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
8604 if (!RVALUE_OLD_P(dest)) {
8605 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
8606 RVALUE_AGE_RESET_RAW(dest);
8609 RVALUE_DEMOTE(objspace, dest);
8613 check_rvalue_consistency(dest);
8619 rb_obj_rgengc_writebarrier_protected_p(
VALUE obj)
8621 return RVALUE_WB_UNPROTECTED(obj) ?
Qfalse :
Qtrue;
8625 rb_obj_rgengc_promoted_p(
VALUE obj)
8631 rb_obj_gc_flags(
VALUE obj,
ID* flags,
size_t max)
8634 static ID ID_marked;
8635 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
8638 #define I(s) ID_##s = rb_intern(#s);
8648 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
8649 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
8650 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
8651 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
8652 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
8653 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
8662 for (
size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
8665 struct heap_page *page = cache->using_page;
8666 RVALUE *freelist = cache->freelist;
8667 RUBY_DEBUG_LOG(
"ractor using_page:%p freelist:%p", (
void *)page, (
void *)freelist);
8669 heap_page_freelist_append(page, freelist);
8671 cache->using_page = NULL;
8672 cache->freelist = NULL;
8682 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE
8683 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024
8689 if (!is_pointer_to_heap(&
rb_objspace, (
void *)obj))
8694 VALUE ary_ary = GET_VM()->mark_object_ary;
8695 VALUE ary = rb_ary_last(0, 0, ary_ary);
8714 tmp->next = global_list;
8723 struct gc_list *tmp = global_list;
8725 if (tmp->varptr == addr) {
8726 global_list = tmp->next;
8731 if (tmp->next->varptr == addr) {
8732 struct gc_list *t = tmp->next;
8734 tmp->next = tmp->next->next;
8752 gc_stress_no_immediate_sweep,
8753 gc_stress_full_mark_after_malloc,
8757 #define gc_stress_full_mark_after_malloc_p() \
8758 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
8763 if (!heap->free_pages) {
8764 if (!heap_increment(objspace, size_pool, heap)) {
8765 size_pool_allocatable_pages_set(objspace, size_pool, 1);
8766 heap_increment(objspace, size_pool, heap);
8774 if (dont_gc_val() || during_gc || ruby_disable_gc) {
8775 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8777 heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8787 gc_reset_malloc_info(
rb_objspace_t *objspace,
bool full_mark)
8789 gc_prof_set_malloc_info(objspace);
8791 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
8792 size_t old_limit = malloc_limit;
8794 if (inc > malloc_limit) {
8795 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
8796 if (malloc_limit > gc_params.malloc_limit_max) {
8797 malloc_limit = gc_params.malloc_limit_max;
8801 malloc_limit = (size_t)(malloc_limit * 0.98);
8802 if (malloc_limit < gc_params.malloc_limit_min) {
8803 malloc_limit = gc_params.malloc_limit_min;
8808 if (old_limit != malloc_limit) {
8809 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: %"PRIuSIZE
" -> %"PRIuSIZE
"\n",
8813 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: not changed (%"PRIuSIZE
")\n",
8820 #if RGENGC_ESTIMATE_OLDMALLOC
8822 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
8823 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
8824 objspace->rgengc.oldmalloc_increase_limit =
8825 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
8827 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
8828 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
8832 if (0) fprintf(stderr,
"%"PRIdSIZE
"\t%d\t%"PRIuSIZE
"\t%"PRIuSIZE
"\t%"PRIdSIZE
"\n",
8834 objspace->rgengc.need_major_gc,
8835 objspace->rgengc.oldmalloc_increase,
8836 objspace->rgengc.oldmalloc_increase_limit,
8837 gc_params.oldmalloc_limit_max);
8841 objspace->rgengc.oldmalloc_increase = 0;
8843 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
8844 objspace->rgengc.oldmalloc_increase_limit =
8845 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
8846 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
8847 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
8855 garbage_collect(
rb_objspace_t *objspace,
unsigned int reason)
8861 #if GC_PROFILE_MORE_DETAIL
8862 objspace->profile.prepare_time = getrusage_time();
8867 #if GC_PROFILE_MORE_DETAIL
8868 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
8871 ret = gc_start(objspace, reason);
8881 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
8882 #if GC_ENABLE_INCREMENTAL_MARK
8883 unsigned int immediate_mark = reason & GPR_FLAG_IMMEDIATE_MARK;
8887 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
8890 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
8892 if (!heap_allocated_pages)
return FALSE;
8893 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace))
return TRUE;
8895 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
8896 GC_ASSERT(!is_lazy_sweeping(objspace));
8897 GC_ASSERT(!is_incremental_marking(objspace));
8899 unsigned int lock_lev;
8900 gc_enter(objspace, gc_enter_event_start, &lock_lev);
8902 #if RGENGC_CHECK_MODE >= 2
8903 gc_verify_internal_consistency(objspace);
8906 if (ruby_gc_stressful) {
8907 int flag =
FIXNUM_P(ruby_gc_stress_mode) ?
FIX2INT(ruby_gc_stress_mode) : 0;
8909 if ((flag & (1<<gc_stress_no_major)) == 0) {
8910 do_full_mark = TRUE;
8913 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
8916 if (objspace->rgengc.need_major_gc) {
8917 reason |= objspace->rgengc.need_major_gc;
8918 do_full_mark = TRUE;
8920 else if (RGENGC_FORCE_MAJOR_GC) {
8921 reason = GPR_FLAG_MAJOR_BY_FORCE;
8922 do_full_mark = TRUE;
8925 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
8928 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
8929 reason |= GPR_FLAG_MAJOR_BY_FORCE;
8932 #if GC_ENABLE_INCREMENTAL_MARK
8933 if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
8934 objspace->flags.during_incremental_marking = FALSE;
8937 objspace->flags.during_incremental_marking = do_full_mark;
8941 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
8942 objspace->flags.immediate_sweep = TRUE;
8945 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
8947 gc_report(1, objspace,
"gc_start(reason: %x) => %u, %d, %d\n",
8949 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
8951 #if USE_DEBUG_COUNTER
8952 RB_DEBUG_COUNTER_INC(gc_count);
8954 if (reason & GPR_FLAG_MAJOR_MASK) {
8955 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
8956 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
8957 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
8958 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
8959 #if RGENGC_ESTIMATE_OLDMALLOC
8960 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
8964 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
8965 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
8966 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
8967 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
8968 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
8972 objspace->profile.count++;
8973 objspace->profile.latest_gc_info = reason;
8974 objspace->profile.total_allocated_objects_at_gc_start = objspace->total_allocated_objects;
8975 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
8976 gc_prof_setup_new_record(objspace, reason);
8977 gc_reset_malloc_info(objspace, do_full_mark);
8978 rb_transient_heap_start_marking(do_full_mark);
8981 GC_ASSERT(during_gc);
8983 gc_prof_timer_start(objspace);
8985 gc_marks(objspace, do_full_mark);
8987 gc_prof_timer_stop(objspace);
8989 gc_exit(objspace, gc_enter_event_start, &lock_lev);
8996 int marking = is_incremental_marking(objspace);
8997 int sweeping = is_lazy_sweeping(objspace);
8999 if (marking || sweeping) {
9000 unsigned int lock_lev;
9001 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9003 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9005 if (is_incremental_marking(objspace)) {
9006 gc_marks_rest(objspace);
9008 if (is_lazy_sweeping(objspace)) {
9009 gc_sweep_rest(objspace);
9011 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9017 unsigned int reason;
9024 if (is_marking(objspace)) {
9026 if (is_full_marking(objspace)) buff[i++] =
'F';
9027 #if GC_ENABLE_INCREMENTAL_MARK
9028 if (is_incremental_marking(objspace)) buff[i++] =
'I';
9031 else if (is_sweeping(objspace)) {
9033 if (is_lazy_sweeping(objspace)) buff[i++] =
'L';
9044 static char buff[0x10];
9045 gc_current_status_fill(objspace, buff);
9049 #if PRINT_ENTER_EXIT_TICK
9051 static tick_t last_exit_tick;
9052 static tick_t enter_tick;
9053 static int enter_count = 0;
9054 static char last_gc_status[0x10];
9057 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9059 if (direction == 0) {
9061 enter_tick = tick();
9062 gc_current_status_fill(objspace, last_gc_status);
9065 tick_t exit_tick = tick();
9066 char current_gc_status[0x10];
9067 gc_current_status_fill(objspace, current_gc_status);
9070 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9071 enter_tick - last_exit_tick,
9072 exit_tick - enter_tick,
9074 last_gc_status, current_gc_status,
9075 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9076 last_exit_tick = exit_tick;
9079 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9081 exit_tick - enter_tick,
9083 last_gc_status, current_gc_status,
9084 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9090 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9097 gc_enter_event_cstr(
enum gc_enter_event event)
9100 case gc_enter_event_start:
return "start";
9101 case gc_enter_event_mark_continue:
return "mark_continue";
9102 case gc_enter_event_sweep_continue:
return "sweep_continue";
9103 case gc_enter_event_rest:
return "rest";
9104 case gc_enter_event_finalizer:
return "finalizer";
9105 case gc_enter_event_rb_memerror:
return "rb_memerror";
9111 gc_enter_count(
enum gc_enter_event event)
9114 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start);
break;
9115 case gc_enter_event_mark_continue: RB_DEBUG_COUNTER_INC(gc_enter_mark_continue);
break;
9116 case gc_enter_event_sweep_continue: RB_DEBUG_COUNTER_INC(gc_enter_sweep_continue);
break;
9117 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest);
break;
9118 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer);
break;
9119 case gc_enter_event_rb_memerror:
break;
9124 #define MEASURE_GC (objspace->flags.measure_gc)
9128 gc_enter_event_measure_p(
rb_objspace_t *objspace,
enum gc_enter_event event)
9130 if (!MEASURE_GC)
return false;
9133 case gc_enter_event_start:
9134 case gc_enter_event_mark_continue:
9135 case gc_enter_event_sweep_continue:
9136 case gc_enter_event_rest:
9146 static bool current_process_time(
struct timespec *ts);
9149 gc_enter_clock(
rb_objspace_t *objspace,
enum gc_enter_event event)
9151 if (gc_enter_event_measure_p(objspace, event)) {
9152 if (!current_process_time(&objspace->profile.start_time)) {
9153 objspace->profile.start_time.tv_sec = 0;
9154 objspace->profile.start_time.tv_nsec = 0;
9160 gc_exit_clock(
rb_objspace_t *objspace,
enum gc_enter_event event)
9162 if (gc_enter_event_measure_p(objspace, event)) {
9165 if ((objspace->profile.start_time.tv_sec > 0 ||
9166 objspace->profile.start_time.tv_nsec > 0) &&
9167 current_process_time(&end_time)) {
9169 if (end_time.tv_sec < objspace->profile.start_time.tv_sec) {
9174 (uint64_t)(end_time.tv_sec - objspace->profile.start_time.tv_sec) * (1000 * 1000 * 1000) +
9175 (end_time.tv_nsec - objspace->profile.start_time.tv_nsec);
9176 objspace->profile.total_time_ns += ns;
9183 gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9185 RB_VM_LOCK_ENTER_LEV(lock_lev);
9187 gc_enter_clock(objspace, event);
9190 case gc_enter_event_rest:
9191 if (!is_marking(objspace))
break;
9193 case gc_enter_event_start:
9194 case gc_enter_event_mark_continue:
9202 gc_enter_count(event);
9203 if (UNLIKELY(during_gc != 0))
rb_bug(
"during_gc != 0");
9204 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9206 mjit_gc_start_hook();
9209 RUBY_DEBUG_LOG(
"%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9210 gc_report(1, objspace,
"gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9211 gc_record(objspace, 0, gc_enter_event_cstr(event));
9216 gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9218 GC_ASSERT(during_gc != 0);
9221 gc_record(objspace, 1, gc_enter_event_cstr(event));
9222 RUBY_DEBUG_LOG(
"%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9223 gc_report(1, objspace,
"gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9226 mjit_gc_exit_hook();
9227 gc_exit_clock(objspace, event);
9228 RB_VM_LOCK_LEAVE_LEV(lock_lev);
9232 gc_with_gvl(
void *ptr)
9235 return (
void *)(
VALUE)garbage_collect(oar->objspace, oar->reason);
9239 garbage_collect_with_gvl(
rb_objspace_t *objspace,
unsigned int reason)
9241 if (dont_gc_val())
return TRUE;
9242 if (ruby_thread_has_gvl_p()) {
9243 return garbage_collect(objspace, reason);
9248 oar.objspace = objspace;
9249 oar.reason = reason;
9254 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
9264 unsigned int reason = (GPR_FLAG_FULL_MARK |
9265 GPR_FLAG_IMMEDIATE_MARK |
9266 GPR_FLAG_IMMEDIATE_SWEEP |
9270 if (
RTEST(compact)) {
9273 #if !defined(__MINGW32__) && !defined(_WIN32)
9274 if (!USE_MMAP_ALIGNED_ALLOC) {
9279 reason |= GPR_FLAG_COMPACT;
9282 if (!
RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9283 if (!
RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9284 if (!
RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9287 garbage_collect(objspace, reason);
9288 gc_finalize_deferred(objspace);
9305 if (
DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->
id & ~ID_SCOPE_MASK)) {
9333 if (st_is_member(finalizer_table, obj)) {
9337 GC_ASSERT(RVALUE_MARKED(obj));
9338 GC_ASSERT(!RVALUE_PINNED(obj));
9360 gc_report(4, objspace,
"Moving object: %p -> %p\n", (
void*)scan, (
void *)free);
9363 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
9366 marked = rb_objspace_marked_object_p((
VALUE)src);
9367 wb_unprotected = RVALUE_WB_UNPROTECTED((
VALUE)src);
9368 uncollectible = RVALUE_UNCOLLECTIBLE((
VALUE)src);
9369 marking = RVALUE_MARKING((
VALUE)src);
9372 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)src), (
VALUE)src);
9373 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)src), (
VALUE)src);
9374 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)src), (
VALUE)src);
9375 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((
VALUE)src), (
VALUE)src);
9380 VALUE already_disabled = rb_gc_disable_no_rest();
9382 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
9385 st_data_t srcid = (st_data_t)src,
id;
9389 if (st_lookup(objspace->obj_to_id_tbl, srcid, &
id)) {
9390 gc_report(4, objspace,
"Moving object with seen id: %p -> %p\n", (
void *)src, (
void *)dest);
9394 VALUE already_disabled = rb_gc_disable_no_rest();
9395 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
9396 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest,
id);
9397 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
9401 memcpy(dest, src, slot_size);
9402 memset(src, 0, slot_size);
9406 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS((
VALUE)dest), (
VALUE)dest);
9409 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((
VALUE)dest), (
VALUE)dest);
9413 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)dest), (
VALUE)dest);
9416 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)dest), (
VALUE)dest);
9419 if (wb_unprotected) {
9420 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)dest), (
VALUE)dest);
9423 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)dest), (
VALUE)dest);
9426 if (uncollectible) {
9427 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)dest), (
VALUE)dest);
9430 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)dest), (
VALUE)dest);
9434 src->as.moved.flags =
T_MOVED;
9435 src->as.moved.dummy =
Qundef;
9436 src->as.moved.destination = (
VALUE)dest;
9443 compare_free_slots(
const void *left,
const void *right,
void *dummy)
9448 left_page = *(
struct heap_page *
const *)left;
9449 right_page = *(
struct heap_page *
const *)right;
9451 return left_page->free_slots - right_page->free_slots;
9457 for (
int j = 0; j < SIZE_POOL_COUNT; j++) {
9460 size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
9462 struct heap_page *page = 0, **page_list = malloc(size);
9465 list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
9466 page_list[i++] = page;
9470 GC_ASSERT((
size_t)i == total_pages);
9477 list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
9479 for (i = 0; i < total_pages; i++) {
9480 list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
9481 if (page_list[i]->free_slots != 0) {
9482 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
9501 for (i = 0; i < len; i++) {
9502 UPDATE_IF_MOVED(objspace, ptr[i]);
9513 for (i = 0; i < len; i++) {
9514 UPDATE_IF_MOVED(objspace, ptr[i]);
9519 hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
9523 if (gc_object_moved_p(objspace, (
VALUE)*key)) {
9527 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
9535 hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp,
int error)
9541 if (gc_object_moved_p(objspace, (
VALUE)key)) {
9545 if (gc_object_moved_p(objspace, (
VALUE)value)) {
9552 hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
9556 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
9564 hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp,
int error)
9570 if (gc_object_moved_p(objspace, (
VALUE)value)) {
9579 if (!tbl || tbl->num_entries == 0)
return;
9581 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
9589 if (!tbl || tbl->num_entries == 0)
return;
9591 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
9601 gc_update_table_refs(objspace, ptr);
9607 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
9615 UPDATE_IF_MOVED(objspace, me->owner);
9616 UPDATE_IF_MOVED(objspace, me->defined_class);
9619 switch (def->type) {
9620 case VM_METHOD_TYPE_ISEQ:
9624 TYPED_UPDATE_IF_MOVED(objspace,
rb_cref_t *, def->body.iseq.
cref);
9626 case VM_METHOD_TYPE_ATTRSET:
9627 case VM_METHOD_TYPE_IVAR:
9628 UPDATE_IF_MOVED(objspace, def->body.attr.location);
9630 case VM_METHOD_TYPE_BMETHOD:
9631 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
9633 case VM_METHOD_TYPE_ALIAS:
9636 case VM_METHOD_TYPE_REFINED:
9638 UPDATE_IF_MOVED(objspace, def->body.refined.owner);
9640 case VM_METHOD_TYPE_CFUNC:
9641 case VM_METHOD_TYPE_ZSUPER:
9642 case VM_METHOD_TYPE_MISSING:
9643 case VM_METHOD_TYPE_OPTIMIZED:
9644 case VM_METHOD_TYPE_UNDEF:
9645 case VM_METHOD_TYPE_NOTIMPLEMENTED:
9656 for (i=0; i<n; i++) {
9657 UPDATE_IF_MOVED(objspace, values[i]);
9664 switch (imemo_type(obj)) {
9668 if (LIKELY(env->ep)) {
9670 TYPED_UPDATE_IF_MOVED(objspace,
rb_iseq_t *, env->iseq);
9671 UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
9672 gc_update_values(objspace, (
long)env->env_size, (
VALUE *)env->env);
9677 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
9678 TYPED_UPDATE_IF_MOVED(objspace,
struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
9679 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
9682 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
9683 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
9684 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
9685 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
9687 case imemo_throw_data:
9688 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
9693 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
9694 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
9697 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
9700 rb_iseq_update_references((
rb_iseq_t *)obj);
9703 rb_ast_update_references((
rb_ast_t *)obj);
9705 case imemo_callcache:
9709 UPDATE_IF_MOVED(objspace, cc->klass);
9710 if (!is_live_object(objspace, cc->klass)) {
9717 if (!is_live_object(objspace, (
VALUE)cc->cme_)) {
9723 case imemo_constcache:
9726 UPDATE_IF_MOVED(objspace, ice->value);
9729 case imemo_parser_strterm:
9731 case imemo_callinfo:
9734 rb_bug(
"not reachable %d", imemo_type(obj));
9739 static enum rb_id_table_iterator_result
9740 check_id_table_move(
ID id,
VALUE value,
void *data)
9744 if (gc_object_moved_p(objspace, (
VALUE)value)) {
9745 return ID_TABLE_REPLACE;
9748 return ID_TABLE_CONTINUE;
9760 void *poisoned = asan_poisoned_object_p(value);
9761 asan_unpoison_object(value,
false);
9764 destination = (
VALUE)RMOVED(value)->destination;
9768 destination = value;
9774 asan_poison_object(value);
9778 destination = value;
9784 static enum rb_id_table_iterator_result
9785 update_id_table(
ID *key,
VALUE * value,
void *data,
int existing)
9789 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
9793 return ID_TABLE_CONTINUE;
9800 rb_id_table_foreach_with_replace(tbl, check_id_table_move, update_id_table, objspace);
9804 static enum rb_id_table_iterator_result
9805 update_cc_tbl_i(
ID id,
VALUE ccs_ptr,
void *data)
9809 VM_ASSERT(vm_ccs_p(ccs));
9811 if (gc_object_moved_p(objspace, (
VALUE)ccs->cme)) {
9815 for (
int i=0; i<ccs->len; i++) {
9816 if (gc_object_moved_p(objspace, (
VALUE)ccs->entries[i].ci)) {
9819 if (gc_object_moved_p(objspace, (
VALUE)ccs->entries[i].cc)) {
9825 return ID_TABLE_CONTINUE;
9833 rb_id_table_foreach_with_replace(tbl, update_cc_tbl_i, 0, objspace);
9837 static enum rb_id_table_iterator_result
9838 update_cvc_tbl_i(
ID id,
VALUE cvc_entry,
void *data)
9846 return ID_TABLE_CONTINUE;
9854 rb_id_table_foreach_with_replace(tbl, update_cvc_tbl_i, 0, objspace);
9858 static enum rb_id_table_iterator_result
9859 update_const_table(
VALUE value,
void *data)
9864 if (gc_object_moved_p(objspace, ce->value)) {
9868 if (gc_object_moved_p(objspace, ce->file)) {
9872 return ID_TABLE_CONTINUE;
9879 rb_id_table_foreach_values(tbl, update_const_table, objspace);
9886 UPDATE_IF_MOVED(objspace, entry->klass);
9887 entry = entry->next;
9892 update_iv_index_tbl_i(st_data_t key, st_data_t value, st_data_t arg)
9896 UPDATE_IF_MOVED(objspace, ent->class_value);
9903 UPDATE_IF_MOVED(objspace, ext->origin_);
9904 UPDATE_IF_MOVED(objspace, ext->refined_class);
9905 update_subclass_entries(objspace, ext->subclasses);
9908 if (ext->iv_index_tbl) {
9909 st_foreach(ext->iv_index_tbl, update_iv_index_tbl_i, (st_data_t)objspace);
9918 gc_report(4, objspace,
"update-refs: %p ->\n", (
void *)obj);
9924 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
9926 if (!RCLASS_EXT(obj))
break;
9927 update_m_tbl(objspace, RCLASS_M_TBL(obj));
9928 update_cc_tbl(objspace, obj);
9929 update_cvc_tbl(objspace, obj);
9931 gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
9933 update_class_ext(objspace, RCLASS_EXT(obj));
9934 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
9938 if (
FL_TEST(obj, RICLASS_IS_ORIGIN) &&
9939 !
FL_TEST(obj, RICLASS_ORIGIN_SHARED_MTBL)) {
9940 update_m_tbl(objspace, RCLASS_M_TBL(obj));
9943 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
9945 if (!RCLASS_EXT(obj))
break;
9946 if (RCLASS_IV_TBL(obj)) {
9947 gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
9949 update_class_ext(objspace, RCLASS_EXT(obj));
9950 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
9951 update_cc_tbl(objspace, obj);
9955 gc_ref_update_imemo(objspace, obj);
9968 UPDATE_IF_MOVED(objspace, any->as.array.
as.
heap.aux.shared_root);
9971 gc_ref_update_array(objspace, obj);
9976 gc_ref_update_hash(objspace, obj);
9977 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
9981 if (STR_SHARED_P(obj)) {
9983 VALUE orig_shared = any->as.string.
as.
heap.aux.shared;
9985 UPDATE_IF_MOVED(objspace, any->as.string.
as.
heap.aux.shared);
9988 if (STR_EMBED_P(shared)) {
9989 size_t offset = (size_t)any->as.string.
as.
heap.ptr - (
size_t)
RSTRING(orig_shared)->as.embed.ary;
9990 GC_ASSERT(any->as.string.
as.
heap.ptr >=
RSTRING(orig_shared)->as.embed.ary);
9991 GC_ASSERT(offset <= (
size_t)
RSTRING(shared)->as.embed.len);
9992 any->as.string.
as.
heap.ptr =
RSTRING(shared)->as.embed.ary + offset;
10005 if (compact_func) (*compact_func)(ptr);
10012 gc_ref_update_object(objspace, obj);
10016 if (any->as.file.
fptr) {
10017 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
self);
10018 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
pathv);
10027 UPDATE_IF_MOVED(objspace, any->as.regexp.
src);
10032 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10041 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10043 if (any->as.match.str) {
10044 UPDATE_IF_MOVED(objspace, any->as.match.str);
10049 UPDATE_IF_MOVED(objspace, any->as.rational.num);
10050 UPDATE_IF_MOVED(objspace, any->as.rational.den);
10054 UPDATE_IF_MOVED(objspace, any->as.complex.real);
10055 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10062 VALUE *ptr = (
VALUE *)RSTRUCT_CONST_PTR(obj);
10064 for (i = 0; i < len; i++) {
10065 UPDATE_IF_MOVED(objspace, ptr[i]);
10071 rb_gcdebug_print_obj_condition((
VALUE)obj);
10072 rb_obj_info_dump(obj);
10079 UPDATE_IF_MOVED(objspace,
RBASIC(obj)->klass);
10081 gc_report(4, objspace,
"update-refs: %p <-\n", (
void *)obj);
10085 gc_ref_update(
void *vstart,
void *vend,
size_t stride,
rb_objspace_t * objspace,
struct heap_page *page)
10088 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
10089 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
10090 page->flags.has_uncollectible_shady_objects = FALSE;
10091 page->flags.has_remembered_objects = FALSE;
10094 for (; v != (
VALUE)vend; v += stride) {
10095 void *poisoned = asan_poisoned_object_p(v);
10096 asan_unpoison_object(v,
false);
10104 if (RVALUE_WB_UNPROTECTED(v)) {
10105 page->flags.has_uncollectible_shady_objects = TRUE;
10107 if (RVALUE_PAGE_MARKING(page, v)) {
10108 page->flags.has_remembered_objects = TRUE;
10110 if (page->flags.before_sweep) {
10111 if (RVALUE_MARKED(v)) {
10112 gc_update_object_references(objspace, v);
10116 gc_update_object_references(objspace, v);
10121 asan_poison_object(v);
10129 #define global_symbols ruby_global_symbols
10135 rb_vm_t *vm = rb_ec_vm_ptr(ec);
10139 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10140 bool should_set_mark_bits = TRUE;
10142 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10144 list_for_each(&heap->pages, page, page_node) {
10145 uintptr_t start = (uintptr_t)page->start;
10146 uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10148 gc_ref_update((
void *)start, (
void *)end, size_pool->slot_size, objspace, page);
10149 if (page == heap->sweeping_page) {
10150 should_set_mark_bits = FALSE;
10152 if (should_set_mark_bits) {
10153 gc_setup_mark_bits(page);
10157 rb_vm_update_references(vm);
10158 rb_transient_heap_update_references();
10159 rb_gc_update_global_tbl();
10161 global_symbols.dsymbol_fstr_hash =
rb_gc_location(global_symbols.dsymbol_fstr_hash);
10162 gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
10163 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10164 gc_update_table_refs(objspace, global_symbols.str_sym);
10165 gc_update_table_refs(objspace, finalizer_table);
10177 for (i=0; i<
T_MASK; i++) {
10178 if (objspace->rcompactor.considered_count_table[i]) {
10182 if (objspace->rcompactor.moved_count_table[i]) {
10194 root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
10197 rb_bug(
"ROOT %s points to MOVED: %p -> %s\n", category, (
void *)obj, obj_info(
rb_gc_location(obj)));
10202 reachable_object_check_moved_i(
VALUE ref,
void *data)
10206 rb_bug(
"Object %s points to MOVED: %p -> %s\n", obj_info(parent), (
void *)ref, obj_info(
rb_gc_location(ref)));
10211 heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
10214 for (; v != (
VALUE)vend; v += stride) {
10219 void *poisoned = asan_poisoned_object_p(v);
10220 asan_unpoison_object(v,
false);
10227 if (!rb_objspace_garbage_object_p(v)) {
10228 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (
void *)v);
10234 asan_poison_object(v);
10248 return gc_compact_stats(ec,
self);
10259 RB_VM_LOCK_ENTER();
10263 if (
RTEST(double_heap)) {
10264 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10266 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10267 heap_add_pages(objspace, size_pool, heap, heap->total_pages);
10271 if (
RTEST(toward_empty)) {
10272 gc_sort_heap_by_empty_slots(objspace);
10275 RB_VM_LOCK_LEAVE();
10279 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
10280 objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
10282 return gc_compact_stats(ec,
self);
10296 unsigned int reason = GPR_DEFAULT_REASON;
10297 garbage_collect(objspace, reason);
10307 #if RGENGC_PROFILE >= 2
10309 static const char *type_name(
int type,
VALUE obj);
10312 gc_count_add_each_types(
VALUE hash,
const char *name,
const size_t *types)
10316 for (i=0; i<
T_MASK; i++) {
10317 const char *
type = type_name(i, 0);
10337 gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const unsigned int orig_flags)
10339 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
10340 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
10341 #if RGENGC_ESTIMATE_OLDMALLOC
10342 static VALUE sym_oldmalloc;
10344 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
10345 static VALUE sym_none, sym_marking, sym_sweeping;
10348 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
10354 hash = hash_or_key;
10360 if (
NIL_P(sym_major_by)) {
10361 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
10364 S(immediate_sweep);
10373 #if RGENGC_ESTIMATE_OLDMALLOC
10387 #define SET(name, attr) \
10388 if (key == sym_##name) \
10390 else if (hash != Qnil) \
10391 rb_hash_aset(hash, sym_##name, (attr));
10394 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
10395 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
10396 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
10397 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
10398 #if RGENGC_ESTIMATE_OLDMALLOC
10399 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
10402 SET(major_by, major_by);
10405 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
10406 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
10407 (flags & GPR_FLAG_METHOD) ? sym_method :
10408 (flags & GPR_FLAG_CAPI) ? sym_capi :
10409 (flags & GPR_FLAG_STRESS) ? sym_stress :
10413 SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
10414 SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
10416 if (orig_flags == 0) {
10417 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
10418 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
10433 return gc_info_decode(objspace, key, 0);
10448 return gc_info_decode(objspace, arg, 0);
10454 gc_stat_sym_heap_allocated_pages,
10455 gc_stat_sym_heap_sorted_length,
10456 gc_stat_sym_heap_allocatable_pages,
10457 gc_stat_sym_heap_available_slots,
10458 gc_stat_sym_heap_live_slots,
10459 gc_stat_sym_heap_free_slots,
10460 gc_stat_sym_heap_final_slots,
10461 gc_stat_sym_heap_marked_slots,
10462 gc_stat_sym_heap_eden_pages,
10463 gc_stat_sym_heap_tomb_pages,
10464 gc_stat_sym_total_allocated_pages,
10465 gc_stat_sym_total_freed_pages,
10466 gc_stat_sym_total_allocated_objects,
10467 gc_stat_sym_total_freed_objects,
10468 gc_stat_sym_malloc_increase_bytes,
10469 gc_stat_sym_malloc_increase_bytes_limit,
10470 gc_stat_sym_minor_gc_count,
10471 gc_stat_sym_major_gc_count,
10472 gc_stat_sym_compact_count,
10473 gc_stat_sym_read_barrier_faults,
10474 gc_stat_sym_total_moved_objects,
10475 gc_stat_sym_remembered_wb_unprotected_objects,
10476 gc_stat_sym_remembered_wb_unprotected_objects_limit,
10477 gc_stat_sym_old_objects,
10478 gc_stat_sym_old_objects_limit,
10479 #if RGENGC_ESTIMATE_OLDMALLOC
10480 gc_stat_sym_oldmalloc_increase_bytes,
10481 gc_stat_sym_oldmalloc_increase_bytes_limit,
10484 gc_stat_sym_total_generated_normal_object_count,
10485 gc_stat_sym_total_generated_shady_object_count,
10486 gc_stat_sym_total_shade_operation_count,
10487 gc_stat_sym_total_promoted_count,
10488 gc_stat_sym_total_remembered_normal_object_count,
10489 gc_stat_sym_total_remembered_shady_object_count,
10494 static VALUE gc_stat_symbols[gc_stat_sym_last];
10497 setup_gc_stat_symbols(
void)
10499 if (gc_stat_symbols[0] == 0) {
10500 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
10503 S(heap_allocated_pages);
10504 S(heap_sorted_length);
10505 S(heap_allocatable_pages);
10506 S(heap_available_slots);
10507 S(heap_live_slots);
10508 S(heap_free_slots);
10509 S(heap_final_slots);
10510 S(heap_marked_slots);
10511 S(heap_eden_pages);
10512 S(heap_tomb_pages);
10513 S(total_allocated_pages);
10514 S(total_freed_pages);
10515 S(total_allocated_objects);
10516 S(total_freed_objects);
10517 S(malloc_increase_bytes);
10518 S(malloc_increase_bytes_limit);
10522 S(read_barrier_faults);
10523 S(total_moved_objects);
10524 S(remembered_wb_unprotected_objects);
10525 S(remembered_wb_unprotected_objects_limit);
10527 S(old_objects_limit);
10528 #if RGENGC_ESTIMATE_OLDMALLOC
10529 S(oldmalloc_increase_bytes);
10530 S(oldmalloc_increase_bytes_limit);
10533 S(total_generated_normal_object_count);
10534 S(total_generated_shady_object_count);
10535 S(total_shade_operation_count);
10536 S(total_promoted_count);
10537 S(total_remembered_normal_object_count);
10538 S(total_remembered_shady_object_count);
10545 gc_stat_internal(
VALUE hash_or_sym)
10550 setup_gc_stat_symbols();
10553 hash = hash_or_sym;
10562 #define SET(name, attr) \
10563 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
10565 else if (hash != Qnil) \
10566 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
10568 SET(count, objspace->profile.count);
10569 SET(time, (
size_t) (objspace->profile.total_time_ns / (1000 * 1000) ));
10572 SET(heap_allocated_pages, heap_allocated_pages);
10573 SET(heap_sorted_length, heap_pages_sorted_length);
10574 SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
10575 SET(heap_available_slots, objspace_available_slots(objspace));
10576 SET(heap_live_slots, objspace_live_slots(objspace));
10577 SET(heap_free_slots, objspace_free_slots(objspace));
10578 SET(heap_final_slots, heap_pages_final_slots);
10579 SET(heap_marked_slots, objspace->marked_slots);
10580 SET(heap_eden_pages, heap_eden_total_pages(objspace));
10581 SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
10582 SET(total_allocated_pages, objspace->profile.total_allocated_pages);
10583 SET(total_freed_pages, objspace->profile.total_freed_pages);
10584 SET(total_allocated_objects, objspace->total_allocated_objects);
10585 SET(total_freed_objects, objspace->profile.total_freed_objects);
10586 SET(malloc_increase_bytes, malloc_increase);
10587 SET(malloc_increase_bytes_limit, malloc_limit);
10588 SET(minor_gc_count, objspace->profile.minor_gc_count);
10589 SET(major_gc_count, objspace->profile.major_gc_count);
10590 SET(compact_count, objspace->profile.compact_count);
10591 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
10592 SET(total_moved_objects, objspace->rcompactor.total_moved);
10593 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
10594 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
10595 SET(old_objects, objspace->rgengc.old_objects);
10596 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
10597 #if RGENGC_ESTIMATE_OLDMALLOC
10598 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
10599 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
10603 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
10604 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
10605 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
10606 SET(total_promoted_count, objspace->profile.total_promoted_count);
10607 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
10608 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
10616 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
10617 if (hash !=
Qnil) {
10618 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
10619 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
10620 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->profile.shade_operation_count_types);
10621 gc_count_add_each_types(hash,
"promoted_types", objspace->profile.promoted_types);
10622 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
10623 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
10637 size_t value = gc_stat_internal(arg);
10647 gc_stat_internal(arg);
10655 size_t value = gc_stat_internal(key);
10659 gc_stat_internal(key);
10668 return ruby_gc_stress_mode;
10674 objspace->flags.gc_stressful =
RTEST(flag);
10675 objspace->gc_stress_mode = flag;
10682 gc_stress_set(objspace, flag);
10690 return rb_objspace_gc_enable(objspace);
10696 int old = dont_gc_val();
10709 rb_gc_disable_no_rest(
void)
10712 return gc_disable_no_rest(objspace);
10718 int old = dont_gc_val();
10727 return rb_objspace_gc_disable(objspace);
10734 return gc_disable_no_rest(objspace);
10748 #if !defined(__MINGW32__) && !defined(_WIN32)
10749 if (!USE_MMAP_ALIGNED_ALLOC) {
10754 ruby_enable_autocompact =
RTEST(v);
10761 return RBOOL(ruby_enable_autocompact);
10765 get_envparam_size(
const char *name,
size_t *default_value,
size_t lower_bound)
10767 const char *ptr = getenv(name);
10770 if (ptr != NULL && *ptr) {
10773 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
10774 val = strtoll(ptr, &end, 0);
10776 val = strtol(ptr, &end, 0);
10779 case 'k':
case 'K':
10783 case 'm':
case 'M':
10787 case 'g':
case 'G':
10788 unit = 1024*1024*1024;
10792 while (*end && isspace((
unsigned char)*end)) end++;
10794 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
10798 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
10799 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%s is ignored because it overflows\n", name, ptr);
10804 if (val > 0 && (
size_t)val > lower_bound) {
10806 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
")\n", name, val, *default_value);
10808 *default_value = (size_t)val;
10813 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
") is ignored because it must be greater than %"PRIuSIZE
".\n",
10814 name, val, *default_value, lower_bound);
10823 get_envparam_double(
const char *name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
10825 const char *ptr = getenv(name);
10828 if (ptr != NULL && *ptr) {
10830 val =
strtod(ptr, &end);
10831 if (!*ptr || *end) {
10832 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
10836 if (accept_zero && val == 0.0) {
10839 else if (val <= lower_bound) {
10841 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
10842 name, val, *default_value, lower_bound);
10845 else if (upper_bound != 0.0 &&
10846 val > upper_bound) {
10848 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
10849 name, val, *default_value, upper_bound);
10859 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%f (default value: %f)\n", name, val, *default_value);
10860 *default_value = val;
10869 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10872 if (gc_params.heap_init_slots > size_pool->eden_heap.total_slots) {
10873 size_t slots = gc_params.heap_init_slots - size_pool->eden_heap.total_slots;
10874 int multiple = size_pool->slot_size /
sizeof(
RVALUE);
10875 size_pool->allocatable_pages = slots * multiple / HEAP_PAGE_OBJ_LIMIT;
10880 size_pool->allocatable_pages = 0;
10883 heap_pages_expand_sorted(objspace);
10929 ruby_gc_set_params(
void)
10933 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
10938 if (get_envparam_size(
"RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
10939 gc_set_initial_pages(objspace);
10942 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
10943 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
10944 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
10946 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
10947 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
10948 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
10949 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
10950 get_envparam_double(
"RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
10952 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0);
10953 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
10954 if (!gc_params.malloc_limit_max) {
10955 gc_params.malloc_limit_max = SIZE_MAX;
10957 get_envparam_double(
"RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
10959 #if RGENGC_ESTIMATE_OLDMALLOC
10960 if (get_envparam_size(
"RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
10962 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
10964 get_envparam_size (
"RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
10965 get_envparam_double(
"RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
10970 reachable_objects_from_callback(
VALUE obj)
10973 cr->mfd->mark_func(obj, cr->mfd->data);
10977 rb_objspace_reachable_objects_from(
VALUE obj,
void (func)(
VALUE,
void *),
void *data)
10981 if (during_gc)
rb_bug(
"rb_objspace_reachable_objects_from() is not supported while during_gc == true");
10983 if (is_markable_object(objspace, obj)) {
10985 struct gc_mark_func_data_struct mfd = {
10988 }, *prev_mfd = cr->mfd;
10991 gc_mark_children(objspace, obj);
10992 cr->mfd = prev_mfd;
10997 const char *category;
10998 void (*func)(
const char *category,
VALUE,
void *);
11003 root_objects_from(
VALUE obj,
void *ptr)
11006 (*data->func)(data->category, obj, data->data);
11010 rb_objspace_reachable_objects_from_root(
void (func)(
const char *category,
VALUE,
void *),
void *passing_data)
11013 objspace_reachable_objects_from_root(objspace, func, passing_data);
11017 objspace_reachable_objects_from_root(
rb_objspace_t *objspace,
void (func)(
const char *category,
VALUE,
void *),
void *passing_data)
11019 if (during_gc)
rb_bug(
"objspace_reachable_objects_from_root() is not supported while during_gc == true");
11024 .data = passing_data,
11026 struct gc_mark_func_data_struct mfd = {
11027 .mark_func = root_objects_from,
11029 }, *prev_mfd = cr->mfd;
11032 gc_mark_roots(objspace, &data.category);
11033 cr->mfd = prev_mfd;
11047 gc_vraise(
void *ptr)
11050 rb_vraise(argv->exc, argv->fmt, *argv->ap);
11055 gc_raise(
VALUE exc,
const char *fmt, ...)
11063 if (ruby_thread_has_gvl_p()) {
11073 fprintf(stderr,
"%s",
"[FATAL] ");
11074 vfprintf(stderr, fmt, ap);
11081 static void objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t size);
11084 negative_size_allocation_error(
const char *msg)
11090 ruby_memerror_body(
void *dummy)
11096 NORETURN(
static void ruby_memerror(
void));
11099 ruby_memerror(
void)
11101 if (ruby_thread_has_gvl_p()) {
11110 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
11113 exit(EXIT_FAILURE);
11120 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
11131 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
11136 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
11137 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
11138 exit(EXIT_FAILURE);
11140 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
11141 rb_ec_raised_clear(ec);
11144 rb_ec_raised_set(ec, RAISED_NOMEMORY);
11145 exc = ruby_vm_special_exception_copy(exc);
11148 EC_JUMP_TAG(ec, TAG_RAISE);
11152 rb_aligned_malloc(
size_t alignment,
size_t size)
11156 #if defined __MINGW32__
11157 res = __mingw_aligned_malloc(size, alignment);
11158 #elif defined _WIN32
11159 void *_aligned_malloc(
size_t,
size_t);
11160 res = _aligned_malloc(size, alignment);
11162 if (USE_MMAP_ALIGNED_ALLOC) {
11163 GC_ASSERT(alignment % sysconf(_SC_PAGE_SIZE) == 0);
11165 char *ptr = mmap(NULL, alignment + size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
11166 if (ptr == MAP_FAILED) {
11170 char *aligned = ptr + alignment;
11171 aligned -= ((
VALUE)aligned & (alignment - 1));
11172 GC_ASSERT(aligned > ptr);
11173 GC_ASSERT(aligned <= ptr + alignment);
11175 size_t start_out_of_range_size = aligned - ptr;
11176 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
11177 if (start_out_of_range_size > 0) {
11178 if (munmap(ptr, start_out_of_range_size)) {
11179 rb_bug(
"rb_aligned_malloc: munmap failed for start");
11183 size_t end_out_of_range_size = alignment - start_out_of_range_size;
11184 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
11185 if (end_out_of_range_size > 0) {
11186 if (munmap(aligned + size, end_out_of_range_size)) {
11187 rb_bug(
"rb_aligned_malloc: munmap failed for end");
11191 res = (
void *)aligned;
11194 # if defined(HAVE_POSIX_MEMALIGN)
11195 if (posix_memalign(&res, alignment, size) != 0) {
11198 # elif defined(HAVE_MEMALIGN)
11199 res = memalign(alignment, size);
11202 res = malloc(alignment + size +
sizeof(
void*));
11203 aligned = (
char*)res + alignment +
sizeof(
void*);
11204 aligned -= ((
VALUE)aligned & (alignment - 1));
11205 ((
void**)aligned)[-1] = res;
11206 res = (
void*)aligned;
11212 GC_ASSERT(((alignment - 1) & alignment) == 0);
11213 GC_ASSERT(alignment %
sizeof(
void*) == 0);
11218 rb_aligned_free(
void *ptr,
size_t size)
11220 #if defined __MINGW32__
11221 __mingw_aligned_free(ptr);
11222 #elif defined _WIN32
11223 _aligned_free(ptr);
11225 if (USE_MMAP_ALIGNED_ALLOC) {
11226 GC_ASSERT(size % sysconf(_SC_PAGE_SIZE) == 0);
11227 if (munmap(ptr, size)) {
11228 rb_bug(
"rb_aligned_free: munmap failed");
11232 # if defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
11235 free(((
void**)ptr)[-1]);
11241 static inline size_t
11242 objspace_malloc_size(
rb_objspace_t *objspace,
void *ptr,
size_t hint)
11244 #ifdef HAVE_MALLOC_USABLE_SIZE
11245 return malloc_usable_size(ptr);
11252 MEMOP_TYPE_MALLOC = 0,
11258 atomic_sub_nounderflow(
size_t *var,
size_t sub)
11260 if (sub == 0)
return;
11264 if (val < sub) sub = val;
11265 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val)
break;
11273 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
11274 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
11276 if (gc_stress_full_mark_after_malloc_p()) {
11277 reason |= GPR_FLAG_FULL_MARK;
11279 garbage_collect_with_gvl(objspace, reason);
11284 objspace_malloc_increase_report(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
11286 if (0) fprintf(stderr,
"increase - ptr: %p, type: %s, new_size: %"PRIdSIZE
", old_size: %"PRIdSIZE
"\n",
11288 type == MEMOP_TYPE_MALLOC ?
"malloc" :
11289 type == MEMOP_TYPE_FREE ?
"free " :
11290 type == MEMOP_TYPE_REALLOC ?
"realloc":
"error",
11291 new_size, old_size);
11296 objspace_malloc_increase_body(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
11298 if (new_size > old_size) {
11299 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
11300 #if RGENGC_ESTIMATE_OLDMALLOC
11301 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
11305 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
11306 #if RGENGC_ESTIMATE_OLDMALLOC
11307 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
11311 if (
type == MEMOP_TYPE_MALLOC) {
11314 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
11318 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
11322 #if MALLOC_ALLOCATED_SIZE
11323 if (new_size >= old_size) {
11324 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
11327 size_t dec_size = old_size - new_size;
11328 size_t allocated_size = objspace->malloc_params.allocated_size;
11330 #if MALLOC_ALLOCATED_SIZE_CHECK
11331 if (allocated_size < dec_size) {
11332 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
11335 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
11339 case MEMOP_TYPE_MALLOC:
11340 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
11342 case MEMOP_TYPE_FREE:
11344 size_t allocations = objspace->malloc_params.allocations;
11345 if (allocations > 0) {
11346 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
11348 #if MALLOC_ALLOCATED_SIZE_CHECK
11350 GC_ASSERT(objspace->malloc_params.allocations > 0);
11355 case MEMOP_TYPE_REALLOC:
break;
11361 #define objspace_malloc_increase(...) \
11362 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
11363 !malloc_increase_done; \
11364 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
11368 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11375 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11376 const char *ruby_malloc_info_file;
11377 int ruby_malloc_info_line;
11380 static inline size_t
11381 objspace_malloc_prepare(
rb_objspace_t *objspace,
size_t size)
11383 if (size == 0) size = 1;
11385 #if CALC_EXACT_MALLOC_SIZE
11392 static inline void *
11393 objspace_malloc_fixup(
rb_objspace_t *objspace,
void *mem,
size_t size)
11395 size = objspace_malloc_size(objspace, mem, size);
11396 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
11398 #if CALC_EXACT_MALLOC_SIZE
11402 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11403 info->gen = objspace->profile.count;
11404 info->file = ruby_malloc_info_file;
11405 info->line = info->file ? ruby_malloc_info_line : 0;
11414 #if defined(__GNUC__) && RUBY_DEBUG
11415 #define RB_BUG_INSTEAD_OF_RB_MEMERROR
11418 #ifdef RB_BUG_INSTEAD_OF_RB_MEMERROR
11419 #define TRY_WITH_GC(siz, expr) do { \
11420 const gc_profile_record_flag gpr = \
11421 GPR_FLAG_FULL_MARK | \
11422 GPR_FLAG_IMMEDIATE_MARK | \
11423 GPR_FLAG_IMMEDIATE_SWEEP | \
11425 objspace_malloc_gc_stress(objspace); \
11427 if (LIKELY((expr))) { \
11430 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
11432 rb_bug("TRY_WITH_GC: could not GC"); \
11434 else if ((expr)) { \
11438 rb_bug("TRY_WITH_GC: could not allocate:" \
11439 "%"PRIdSIZE" bytes for %s", \
11444 #define TRY_WITH_GC(siz, alloc) do { \
11445 objspace_malloc_gc_stress(objspace); \
11447 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
11448 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
11449 GPR_FLAG_MALLOC) || \
11464 size = objspace_malloc_prepare(objspace, size);
11465 TRY_WITH_GC(size, mem = malloc(size));
11466 RB_DEBUG_COUNTER_INC(heap_xmalloc);
11467 return objspace_malloc_fixup(objspace, mem, size);
11470 static inline size_t
11471 xmalloc2_size(
const size_t count,
const size_t elsize)
11473 return size_mul_or_raise(count, elsize,
rb_eArgError);
11477 objspace_xrealloc(
rb_objspace_t *objspace,
void *ptr,
size_t new_size,
size_t old_size)
11481 if (!ptr)
return objspace_xmalloc0(objspace, new_size);
11488 if (new_size == 0) {
11489 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
11512 objspace_xfree(objspace, ptr, old_size);
11526 #if CALC_EXACT_MALLOC_SIZE
11531 old_size = info->size;
11535 old_size = objspace_malloc_size(objspace, ptr, old_size);
11536 TRY_WITH_GC(new_size, mem = realloc(ptr, new_size));
11537 new_size = objspace_malloc_size(objspace, mem, new_size);
11539 #if CALC_EXACT_MALLOC_SIZE
11542 info->size = new_size;
11547 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
11549 RB_DEBUG_COUNTER_INC(heap_xrealloc);
11553 #if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
11555 #define MALLOC_INFO_GEN_SIZE 100
11556 #define MALLOC_INFO_SIZE_SIZE 10
11557 static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
11558 static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
11559 static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
11560 static st_table *malloc_info_file_table;
11563 mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
11565 const char *file = (
void *)key;
11566 const size_t *data = (
void *)val;
11568 fprintf(stderr,
"%s\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", file, data[0], data[1]);
11570 return ST_CONTINUE;
11573 __attribute__((destructor))
11575 rb_malloc_info_show_results(
void)
11579 fprintf(stderr,
"* malloc_info gen statistics\n");
11580 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
11581 if (i == MALLOC_INFO_GEN_SIZE-1) {
11582 fprintf(stderr,
"more\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
11585 fprintf(stderr,
"%d\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
11589 fprintf(stderr,
"* malloc_info size statistics\n");
11590 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
11592 fprintf(stderr,
"%d\t%"PRIdSIZE
"\n", s, malloc_info_size[i]);
11594 fprintf(stderr,
"more\t%"PRIdSIZE
"\n", malloc_info_size[i]);
11596 if (malloc_info_file_table) {
11597 fprintf(stderr,
"* malloc_info file statistics\n");
11598 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
11603 rb_malloc_info_show_results(
void)
11609 objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t old_size)
11618 #if CALC_EXACT_MALLOC_SIZE
11621 old_size = info->size;
11623 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11625 int gen = (int)(objspace->profile.count - info->gen);
11626 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
11629 malloc_info_gen_cnt[gen_index]++;
11630 malloc_info_gen_size[gen_index] += info->size;
11632 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
11633 size_t s = 16 << i;
11634 if (info->size <= s) {
11635 malloc_info_size[i]++;
11639 malloc_info_size[i]++;
11643 st_data_t key = (st_data_t)info->file, d;
11646 if (malloc_info_file_table == NULL) {
11647 malloc_info_file_table = st_init_numtable_with_size(1024);
11649 if (st_lookup(malloc_info_file_table, key, &d)) {
11651 data = (
size_t *)d;
11654 data = malloc(xmalloc2_size(2,
sizeof(
size_t)));
11655 if (data == NULL)
rb_bug(
"objspace_xfree: can not allocate memory");
11656 data[0] = data[1] = 0;
11657 st_insert(malloc_info_file_table, key, (st_data_t)data);
11660 data[1] += info->size;
11662 if (0 && gen >= 2) {
11664 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d, pos: %s:%"PRIdSIZE
"\n",
11665 info->size, gen, info->file, info->line);
11668 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d\n",
11675 old_size = objspace_malloc_size(objspace, ptr, old_size);
11677 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
11679 RB_DEBUG_COUNTER_INC(heap_xfree);
11684 ruby_xmalloc0(
size_t size)
11690 ruby_xmalloc_body(
size_t size)
11692 if ((ssize_t)size < 0) {
11693 negative_size_allocation_error(
"too large allocation size");
11695 return ruby_xmalloc0(size);
11699 ruby_malloc_size_overflow(
size_t count,
size_t elsize)
11702 "malloc: possible integer overflow (%"PRIuSIZE
"*%"PRIuSIZE
")",
11707 ruby_xmalloc2_body(
size_t n,
size_t size)
11709 return objspace_xmalloc0(&
rb_objspace, xmalloc2_size(n, size));
11717 size = objspace_malloc_prepare(objspace, size);
11718 TRY_WITH_GC(size, mem = calloc1(size));
11719 return objspace_malloc_fixup(objspace, mem, size);
11723 ruby_xcalloc_body(
size_t n,
size_t size)
11725 return objspace_xcalloc(&
rb_objspace, xmalloc2_size(n, size));
11728 #ifdef ruby_sized_xrealloc
11729 #undef ruby_sized_xrealloc
11732 ruby_sized_xrealloc(
void *ptr,
size_t new_size,
size_t old_size)
11734 if ((ssize_t)new_size < 0) {
11735 negative_size_allocation_error(
"too large allocation size");
11738 return objspace_xrealloc(&
rb_objspace, ptr, new_size, old_size);
11742 ruby_xrealloc_body(
void *ptr,
size_t new_size)
11744 return ruby_sized_xrealloc(ptr, new_size, 0);
11747 #ifdef ruby_sized_xrealloc2
11748 #undef ruby_sized_xrealloc2
11751 ruby_sized_xrealloc2(
void *ptr,
size_t n,
size_t size,
size_t old_n)
11753 size_t len = xmalloc2_size(n, size);
11754 return objspace_xrealloc(&
rb_objspace, ptr, len, old_n * size);
11758 ruby_xrealloc2_body(
void *ptr,
size_t n,
size_t size)
11760 return ruby_sized_xrealloc2(ptr, n, size, 0);
11763 #ifdef ruby_sized_xfree
11764 #undef ruby_sized_xfree
11767 ruby_sized_xfree(
void *x,
size_t size)
11777 ruby_sized_xfree(x, 0);
11781 rb_xmalloc_mul_add(
size_t x,
size_t y,
size_t z)
11783 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
11788 rb_xrealloc_mul_add(
const void *p,
size_t x,
size_t y,
size_t z)
11790 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
11795 rb_xmalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
11797 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
11802 rb_xcalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
11804 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
11812 ruby_mimmalloc(
size_t size)
11815 #if CALC_EXACT_MALLOC_SIZE
11818 mem = malloc(size);
11819 #if CALC_EXACT_MALLOC_SIZE
11828 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11840 ruby_mimfree(
void *ptr)
11842 #if CALC_EXACT_MALLOC_SIZE
11850 rb_alloc_tmp_buffer_with_count(
volatile VALUE *store,
size_t size,
size_t cnt)
11858 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
11860 ptr = ruby_xmalloc0(size);
11868 rb_alloc_tmp_buffer(
volatile VALUE *store,
long len)
11872 if (len < 0 || (cnt = (
long)roomof(len,
sizeof(
VALUE))) < 0) {
11876 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
11880 rb_free_tmp_buffer(
volatile VALUE *store)
11884 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
11890 #if MALLOC_ALLOCATED_SIZE
11901 gc_malloc_allocated_size(
VALUE self)
11916 gc_malloc_allocations(
VALUE self)
11927 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
11929 else if (diff < 0) {
11930 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
11944 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
11946 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11948 wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
11952 if (!is_live_object(objspace, obj))
return ST_DELETE;
11953 return ST_CONTINUE;
11958 wmap_replace_ref(st_data_t *key, st_data_t *value, st_data_t _argp,
int existing)
11963 VALUE size = values[0];
11965 for (
VALUE index = 1; index <= size; index++) {
11969 return ST_CONTINUE;
11973 wmap_foreach_replace(st_data_t key, st_data_t value, st_data_t _argp,
int error)
11980 VALUE size = values[0];
11982 for (
VALUE index = 1; index <= size; index++) {
11983 VALUE val = values[index];
11989 return ST_CONTINUE;
11993 wmap_compact(
void *ptr)
11997 if (w->obj2wmap) st_foreach_with_replace(w->obj2wmap, wmap_foreach_replace, wmap_replace_ref, (st_data_t)NULL);
12002 wmap_mark(
void *ptr)
12005 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12012 wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
12015 ruby_sized_xfree(ptr, (ptr[0] + 1) *
sizeof(
VALUE));
12016 return ST_CONTINUE;
12020 wmap_free(
void *ptr)
12024 st_free_table(w->obj2wmap);
12025 st_free_table(w->wmap2obj);
12030 wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
12033 *(
size_t *)arg += (ptr[0] + 1) *
sizeof(
VALUE);
12034 return ST_CONTINUE;
12038 wmap_memsize(
const void *ptr)
12041 const struct weakmap *w = ptr;
12043 size += st_memsize(w->obj2wmap);
12044 size += st_memsize(w->wmap2obj);
12045 st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
12057 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
12063 wmap_allocate(
VALUE klass)
12067 w->obj2wmap = rb_init_identtable();
12068 w->wmap2obj = rb_init_identtable();
12069 w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
12079 if (!is_pointer_to_heap(objspace, (
void *)obj))
return FALSE;
12081 void *poisoned = asan_unpoison_object_temporary(obj);
12085 is_live_object(objspace, obj));
12088 asan_poison_object(obj);
12095 wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg,
int existing)
12097 VALUE wmap, *ptr, size, i, j;
12098 if (!existing)
return ST_STOP;
12100 for (i = j = 1, size = ptr[0]; i <= size; ++i) {
12101 if (ptr[i] != wmap) {
12106 ruby_sized_xfree(ptr, i *
sizeof(
VALUE));
12110 SIZED_REALLOC_N(ptr,
VALUE, j, i);
12112 *value = (st_data_t)ptr;
12114 return ST_CONTINUE;
12121 st_data_t orig, wmap, data;
12122 VALUE obj, *rids, i, size;
12128 rb_bug(
"wmap_finalize: objid is not found.");
12132 orig = (st_data_t)obj;
12133 if (st_delete(w->obj2wmap, &orig, &data)) {
12134 rids = (
VALUE *)data;
12136 for (i = 0; i < size; ++i) {
12137 wmap = (st_data_t)rids[i];
12138 st_delete(w->wmap2obj, &wmap, NULL);
12140 ruby_sized_xfree((
VALUE *)data, (size + 1) *
sizeof(
VALUE));
12143 wmap = (st_data_t)obj;
12144 if (st_delete(w->wmap2obj, &wmap, &orig)) {
12145 wmap = (st_data_t)obj;
12146 st_update(w->obj2wmap, orig, wmap_final_func, wmap);
12162 else if (wmap_live_p(objspace, obj)) {
12166 return rb_str_catf(str,
"#<collected:%p>", (
void*)obj);
12171 wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
12175 VALUE str = argp->value;
12185 wmap_inspect_append(objspace, str, k);
12187 wmap_inspect_append(objspace, str, v);
12189 return ST_CONTINUE;
12193 wmap_inspect(
VALUE self)
12201 str =
rb_sprintf(
"-<%"PRIsVALUE
":%p", c, (
void *)
self);
12205 st_foreach(w->wmap2obj, wmap_inspect_i, (st_data_t)&args);
12213 wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
12217 if (wmap_live_p(objspace, obj)) {
12220 return ST_CONTINUE;
12225 wmap_each(
VALUE self)
12231 st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
12236 wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
12240 if (wmap_live_p(objspace, obj)) {
12243 return ST_CONTINUE;
12248 wmap_each_key(
VALUE self)
12254 st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
12259 wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
12263 if (wmap_live_p(objspace, obj)) {
12266 return ST_CONTINUE;
12271 wmap_each_value(
VALUE self)
12277 st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
12282 wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
12286 VALUE ary = argp->value;
12288 if (wmap_live_p(objspace, obj)) {
12291 return ST_CONTINUE;
12296 wmap_keys(
VALUE self)
12304 st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
12309 wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
12313 VALUE ary = argp->value;
12315 if (wmap_live_p(objspace, obj)) {
12318 return ST_CONTINUE;
12323 wmap_values(
VALUE self)
12331 st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
12336 wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg,
int existing)
12338 VALUE size, *ptr, *optr;
12340 size = (ptr = optr = (
VALUE *)*val)[0];
12342 SIZED_REALLOC_N(ptr,
VALUE, size + 1, size);
12347 ptr = ruby_xmalloc0(2 *
sizeof(
VALUE));
12350 ptr[size] = (
VALUE)arg;
12351 if (ptr == optr)
return ST_STOP;
12352 *val = (st_data_t)ptr;
12353 return ST_CONTINUE;
12364 define_final0(value, w->final);
12367 define_final0(key, w->final);
12370 st_update(w->obj2wmap, (st_data_t)value, wmap_aset_update, key);
12371 st_insert(w->wmap2obj, (st_data_t)key, (st_data_t)value);
12372 return nonspecial_obj_id(value);
12385 if (!st_lookup(w->wmap2obj, (st_data_t)key, &data))
return Qundef;
12387 if (!wmap_live_p(objspace, obj))
return Qundef;
12395 VALUE obj = wmap_lookup(
self, key);
12408 wmap_size(
VALUE self)
12414 n = w->wmap2obj->num_entries;
12415 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
12426 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
12429 current_process_time(
struct timespec *ts)
12431 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
12433 static int try_clock_gettime = 1;
12434 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
12438 try_clock_gettime = 0;
12445 struct rusage usage;
12447 if (getrusage(RUSAGE_SELF, &usage) == 0) {
12448 time = usage.ru_utime;
12449 ts->tv_sec = time.tv_sec;
12450 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
12458 FILETIME creation_time, exit_time, kernel_time, user_time;
12461 if (GetProcessTimes(GetCurrentProcess(),
12462 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
12463 memcpy(&ui, &user_time,
sizeof(FILETIME));
12464 #define PER100NSEC (uint64_t)(1000 * 1000 * 10)
12465 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
12466 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
12476 getrusage_time(
void)
12479 if (current_process_time(&ts)) {
12480 return ts.tv_sec + ts.tv_nsec * 1e-9;
12489 gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason)
12491 if (objspace->profile.run) {
12492 size_t index = objspace->profile.next_index;
12496 objspace->profile.next_index++;
12498 if (!objspace->profile.records) {
12499 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
12500 objspace->profile.records = malloc(xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
12502 if (index >= objspace->profile.size) {
12504 objspace->profile.size += 1000;
12505 ptr = realloc(objspace->profile.records, xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
12507 objspace->profile.records = ptr;
12509 if (!objspace->profile.records) {
12510 rb_bug(
"gc_profile malloc or realloc miss");
12512 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
12516 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
12517 #if MALLOC_ALLOCATED_SIZE
12518 record->allocated_size = malloc_allocated_size;
12520 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
12523 struct rusage usage;
12524 if (getrusage(RUSAGE_SELF, &usage) == 0) {
12525 record->maxrss = usage.ru_maxrss;
12526 record->minflt = usage.ru_minflt;
12527 record->majflt = usage.ru_majflt;
12538 if (gc_prof_enabled(objspace)) {
12540 #if GC_PROFILE_MORE_DETAIL
12541 record->prepare_time = objspace->profile.prepare_time;
12543 record->gc_time = 0;
12544 record->gc_invoke_time = getrusage_time();
12549 elapsed_time_from(
double time)
12551 double now = getrusage_time();
12563 if (gc_prof_enabled(objspace)) {
12565 record->gc_time = elapsed_time_from(record->gc_invoke_time);
12566 record->gc_invoke_time -= objspace->profile.invoke_time;
12570 #define RUBY_DTRACE_GC_HOOK(name) \
12571 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
12575 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
12576 #if GC_PROFILE_MORE_DETAIL
12577 if (gc_prof_enabled(objspace)) {
12578 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
12586 RUBY_DTRACE_GC_HOOK(MARK_END);
12587 #if GC_PROFILE_MORE_DETAIL
12588 if (gc_prof_enabled(objspace)) {
12590 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
12598 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
12599 if (gc_prof_enabled(objspace)) {
12602 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
12603 objspace->profile.gc_sweep_start_time = getrusage_time();
12611 RUBY_DTRACE_GC_HOOK(SWEEP_END);
12613 if (gc_prof_enabled(objspace)) {
12617 if (record->gc_time > 0) {
12618 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
12620 record->gc_time += sweep_time;
12622 else if (GC_PROFILE_MORE_DETAIL) {
12623 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
12626 #if GC_PROFILE_MORE_DETAIL
12627 record->gc_sweep_time += sweep_time;
12628 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
12630 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
12637 #if GC_PROFILE_MORE_DETAIL
12638 if (gc_prof_enabled(objspace)) {
12640 record->allocate_increase = malloc_increase;
12641 record->allocate_limit = malloc_limit;
12649 if (gc_prof_enabled(objspace)) {
12651 size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
12652 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
12654 #if GC_PROFILE_MORE_DETAIL
12655 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
12656 record->heap_live_objects = live;
12657 record->heap_free_objects = total - live;
12660 record->heap_total_objects = total;
12661 record->heap_use_size = live *
sizeof(
RVALUE);
12662 record->heap_total_size = total *
sizeof(
RVALUE);
12675 gc_profile_clear(
VALUE _)
12678 void *p = objspace->profile.records;
12679 objspace->profile.records = NULL;
12680 objspace->profile.size = 0;
12681 objspace->profile.next_index = 0;
12682 objspace->profile.current_record = 0;
12740 gc_profile_record_get(
VALUE _)
12747 if (!objspace->profile.run) {
12751 for (i =0; i < objspace->profile.next_index; i++) {
12763 #if GC_PROFILE_MORE_DETAIL
12778 #if RGENGC_PROFILE > 0
12789 #if GC_PROFILE_MORE_DETAIL
12790 #define MAJOR_REASON_MAX 0x10
12793 gc_profile_dump_major_reason(
unsigned int flags,
char *buff)
12795 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
12798 if (reason == GPR_FLAG_NONE) {
12804 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
12805 buff[i++] = #x[0]; \
12806 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
12812 #if RGENGC_ESTIMATE_OLDMALLOC
12825 size_t count = objspace->profile.next_index;
12826 #ifdef MAJOR_REASON_MAX
12827 char reason_str[MAJOR_REASON_MAX];
12830 if (objspace->profile.run && count ) {
12834 append(out,
rb_sprintf(
"GC %"PRIuSIZE
" invokes.\n", objspace->profile.count));
12835 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
12837 for (i = 0; i < count; i++) {
12838 record = &objspace->profile.records[i];
12839 append(out,
rb_sprintf(
"%5"PRIuSIZE
" %19.3f %20"PRIuSIZE
" %20"PRIuSIZE
" %20"PRIuSIZE
" %30.20f\n",
12840 i+1, record->gc_invoke_time, record->heap_use_size,
12841 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
12844 #if GC_PROFILE_MORE_DETAIL
12845 const char *str =
"\n\n" \
12847 "Prepare Time = Previously GC's rest sweep time\n"
12848 "Index Flags Allocate Inc. Allocate Limit"
12849 #if CALC_EXACT_MALLOC_SIZE
12852 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
12854 " OldgenObj RemNormObj RemShadObj"
12856 #if GC_PROFILE_DETAIL_MEMORY
12857 " MaxRSS(KB) MinorFLT MajorFLT"
12862 for (i = 0; i < count; i++) {
12863 record = &objspace->profile.records[i];
12864 append(out,
rb_sprintf(
"%5"PRIuSIZE
" %4s/%c/%6s%c %13"PRIuSIZE
" %15"PRIuSIZE
12865 #
if CALC_EXACT_MALLOC_SIZE
12868 " %9"PRIuSIZE
" %17.12f %17.12f %17.12f %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
12870 "%10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
12872 #
if GC_PROFILE_DETAIL_MEMORY
12878 gc_profile_dump_major_reason(record->flags, reason_str),
12879 (record->flags & GPR_FLAG_HAVE_FINALIZE) ?
'F' :
'.',
12880 (record->flags & GPR_FLAG_NEWOBJ) ?
"NEWOBJ" :
12881 (record->flags & GPR_FLAG_MALLOC) ?
"MALLOC" :
12882 (record->flags & GPR_FLAG_METHOD) ?
"METHOD" :
12883 (record->flags & GPR_FLAG_CAPI) ?
"CAPI__" :
"??????",
12884 (record->flags & GPR_FLAG_STRESS) ?
'!' :
' ',
12885 record->allocate_increase, record->allocate_limit,
12886 #
if CALC_EXACT_MALLOC_SIZE
12887 record->allocated_size,
12889 record->heap_use_pages,
12890 record->gc_mark_time*1000,
12891 record->gc_sweep_time*1000,
12892 record->prepare_time*1000,
12894 record->heap_live_objects,
12895 record->heap_free_objects,
12896 record->removing_objects,
12897 record->empty_objects
12900 record->old_objects,
12901 record->remembered_normal_objects,
12902 record->remembered_shady_objects
12904 #
if GC_PROFILE_DETAIL_MEMORY
12906 record->maxrss / 1024,
12929 gc_profile_result(
VALUE _)
12946 gc_profile_report(
int argc,
VALUE *argv,
VALUE self)
12964 gc_profile_total_time(
VALUE self)
12969 if (objspace->profile.run && objspace->profile.next_index > 0) {
12971 size_t count = objspace->profile.next_index;
12973 for (i = 0; i < count; i++) {
12974 time += objspace->profile.records[i].gc_time;
12988 gc_profile_enable_get(
VALUE self)
12991 return RBOOL(objspace->profile.run);
13003 gc_profile_enable(
VALUE _)
13006 objspace->profile.run = TRUE;
13007 objspace->profile.current_record = 0;
13020 gc_profile_disable(
VALUE _)
13024 objspace->profile.run = FALSE;
13025 objspace->profile.current_record = 0;
13033 static const char *
13037 #define TYPE_NAME(t) case (t): return #t;
13064 if (obj && rb_objspace_data_type_name(obj)) {
13065 return rb_objspace_data_type_name(obj);
13073 static const char *
13074 obj_type_name(
VALUE obj)
13076 return type_name(
TYPE(obj), obj);
13080 rb_method_type_name(rb_method_type_t
type)
13083 case VM_METHOD_TYPE_ISEQ:
return "iseq";
13084 case VM_METHOD_TYPE_ATTRSET:
return "attrest";
13085 case VM_METHOD_TYPE_IVAR:
return "ivar";
13086 case VM_METHOD_TYPE_BMETHOD:
return "bmethod";
13087 case VM_METHOD_TYPE_ALIAS:
return "alias";
13088 case VM_METHOD_TYPE_REFINED:
return "refined";
13089 case VM_METHOD_TYPE_CFUNC:
return "cfunc";
13090 case VM_METHOD_TYPE_ZSUPER:
return "zsuper";
13091 case VM_METHOD_TYPE_MISSING:
return "missing";
13092 case VM_METHOD_TYPE_OPTIMIZED:
return "optimized";
13093 case VM_METHOD_TYPE_UNDEF:
return "undef";
13094 case VM_METHOD_TYPE_NOTIMPLEMENTED:
return "notimplemented";
13096 rb_bug(
"rb_method_type_name: unreachable (type: %d)",
type);
13100 # define ARY_SHARED_P(ary) \
13101 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13102 FL_TEST((ary),ELTS_SHARED)!=0)
13103 # define ARY_EMBED_P(ary) \
13104 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13105 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
13108 rb_raw_iseq_info(
char *buff,
const int buff_size,
const rb_iseq_t *iseq)
13110 if (buff_size > 0 && iseq->body && iseq->body->location.label && !
RB_TYPE_P(iseq->body->location.pathobj,
T_MOVED)) {
13111 VALUE path = rb_iseq_path(iseq);
13112 VALUE n = iseq->body->location.first_lineno;
13113 snprintf(buff, buff_size,
" %s@%s:%d",
13121 str_len_no_raise(
VALUE str)
13124 if (len < 0)
return 0;
13125 if (len > INT_MAX)
return INT_MAX;
13130 rb_raw_obj_info(
char *buff,
const int buff_size,
VALUE obj)
13133 void *poisoned = asan_poisoned_object_p(obj);
13134 asan_unpoison_object(obj,
false);
13136 #define BUFF_ARGS buff + pos, buff_size - pos
13137 #define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
13139 APPENDF((BUFF_ARGS,
"%s", obj_type_name(obj)));
13142 APPENDF((BUFF_ARGS,
" %ld",
FIX2LONG(obj)));
13149 #define TF(c) ((c) != 0 ? "true" : "false")
13150 #define C(c, s) ((c) != 0 ? (s) : " ")
13152 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
13154 if (is_pointer_to_heap(&
rb_objspace, (
void *)obj)) {
13155 APPENDF((BUFF_ARGS,
"%p [%d%s%s%s%s%s%s] %s ",
13157 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj),
"L"),
13158 C(RVALUE_MARK_BITMAP(obj),
"M"),
13159 C(RVALUE_PIN_BITMAP(obj),
"P"),
13160 C(RVALUE_MARKING_BITMAP(obj),
"R"),
13161 C(RVALUE_WB_UNPROTECTED_BITMAP(obj),
"U"),
13162 C(rb_objspace_garbage_object_p(obj),
"G"),
13163 obj_type_name(obj)));
13167 APPENDF((BUFF_ARGS,
"%p [%dXXXX] %s",
13169 obj_type_name(obj)));
13172 if (internal_object_p(obj)) {
13175 else if (
RBASIC(obj)->klass == 0) {
13176 APPENDF((BUFF_ARGS,
"(temporary internal)"));
13181 if (!
NIL_P(class_path)) {
13182 APPENDF((BUFF_ARGS,
"(%s)",
RSTRING_PTR(class_path)));
13188 APPENDF((BUFF_ARGS,
"@%s:%d", RANY(obj)->file, RANY(obj)->line));
13193 UNEXPECTED_NODE(rb_raw_obj_info);
13197 APPENDF((BUFF_ARGS,
"shared -> %s",
13198 rb_obj_info(
RARRAY(obj)->as.heap.aux.shared_root)));
13200 else if (
FL_TEST(obj, RARRAY_EMBED_FLAG)) {
13201 APPENDF((BUFF_ARGS,
"[%s%s] len: %ld (embed)",
13202 C(ARY_EMBED_P(obj),
"E"),
13203 C(ARY_SHARED_P(obj),
"S"),
13207 APPENDF((BUFF_ARGS,
"[%s%s%s] len: %ld, capa:%ld ptr:%p",
13208 C(ARY_EMBED_P(obj),
"E"),
13209 C(ARY_SHARED_P(obj),
"S"),
13212 ARY_EMBED_P(obj) ? -1L :
RARRAY(obj)->as.heap.aux.capa,
13217 if (STR_SHARED_P(obj)) APPENDF((BUFF_ARGS,
" [shared] "));
13218 APPENDF((BUFF_ARGS,
"%.*s", str_len_no_raise(obj),
RSTRING_PTR(obj)));
13222 VALUE fstr = RSYMBOL(obj)->fstr;
13223 ID id = RSYMBOL(obj)->id;
13225 APPENDF((BUFF_ARGS,
":%s id:%d",
RSTRING_PTR(fstr), (
unsigned int)
id));
13228 APPENDF((BUFF_ARGS,
"(%p) id:%d", (
void *)fstr, (
unsigned int)
id));
13237 APPENDF((BUFF_ARGS,
"[%c%c] %"PRIdSIZE,
13238 RHASH_AR_TABLE_P(obj) ?
'A' :
'S',
13239 RHASH_TRANSIENT_P(obj) ?
'T' :
' ',
13247 if (!
NIL_P(class_path)) {
13248 APPENDF((BUFF_ARGS,
"%s",
RSTRING_PTR(class_path)));
13251 APPENDF((BUFF_ARGS,
"(annon)"));
13258 if (!
NIL_P(class_path)) {
13259 APPENDF((BUFF_ARGS,
"src:%s",
RSTRING_PTR(class_path)));
13267 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
13268 APPENDF((BUFF_ARGS,
"(embed) len:%d", len));
13272 APPENDF((BUFF_ARGS,
"len:%d ptr:%p", len, (
void *)ptr));
13280 (block = vm_proc_block(obj)) != NULL &&
13281 (vm_block_type(block) == block_type_iseq) &&
13282 (iseq = vm_block_iseq(block)) != NULL) {
13283 rb_raw_iseq_info(BUFF_ARGS, iseq);
13285 else if (rb_ractor_p(obj)) {
13288 APPENDF((BUFF_ARGS,
"r:%d", r->pub.id));
13292 const char *
const type_name = rb_objspace_data_type_name(obj);
13294 APPENDF((BUFF_ARGS,
"%s", type_name));
13300 APPENDF((BUFF_ARGS,
"<%s> ", rb_imemo_name(imemo_type(obj))));
13302 switch (imemo_type(obj)) {
13307 APPENDF((BUFF_ARGS,
":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
13309 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ?
"pub" :
13310 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ?
"pri" :
"pro",
13311 METHOD_ENTRY_COMPLEMENTED(me) ?
",cmp" :
"",
13312 METHOD_ENTRY_CACHED(me) ?
",cc" :
"",
13313 METHOD_ENTRY_INVALIDATED(me) ?
",inv" :
"",
13314 me->def ? rb_method_type_name(me->def->type) :
"NULL",
13315 me->def ? me->def->alias_count : -1,
13317 (
void *)me->defined_class));
13320 switch (me->def->type) {
13321 case VM_METHOD_TYPE_ISEQ:
13322 APPENDF((BUFF_ARGS,
" (iseq:%s)", obj_info((
VALUE)me->def->body.iseq.
iseqptr)));
13333 rb_raw_iseq_info(BUFF_ARGS, iseq);
13336 case imemo_callinfo:
13339 APPENDF((BUFF_ARGS,
"(mid:%s, flag:%x argc:%d, kwarg:%s)",
13343 vm_ci_kwarg(ci) ?
"available" :
"NULL"));
13346 case imemo_callcache:
13352 APPENDF((BUFF_ARGS,
"(klass:%s cme:%s%s (%p) call:%p",
13353 NIL_P(class_path) ? (cc->klass ?
"??" :
"<NULL>") :
RSTRING_PTR(class_path),
13354 cme ?
rb_id2name(cme->called_id) :
"<NULL>",
13355 cme ? (METHOD_ENTRY_INVALIDATED(cme) ?
" [inv]" :
"") :
"",
13357 (
void *)vm_cc_call(cc)));
13372 asan_poison_object(obj);
13380 #if RGENGC_OBJ_INFO
13381 #define OBJ_INFO_BUFFERS_NUM 10
13382 #define OBJ_INFO_BUFFERS_SIZE 0x100
13383 static int obj_info_buffers_index = 0;
13384 static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
13386 static const char *
13387 obj_info(
VALUE obj)
13389 const int index = obj_info_buffers_index++;
13390 char *
const buff = &obj_info_buffers[index][0];
13392 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
13393 obj_info_buffers_index = 0;
13396 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
13399 static const char *
13400 obj_info(
VALUE obj)
13402 return obj_type_name(obj);
13406 MJIT_FUNC_EXPORTED
const char *
13407 rb_obj_info(
VALUE obj)
13409 return obj_info(obj);
13413 rb_obj_info_dump(
VALUE obj)
13416 fprintf(stderr,
"rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
13419 MJIT_FUNC_EXPORTED
void
13420 rb_obj_info_dump_loc(
VALUE obj,
const char *file,
int line,
const char *func)
13423 fprintf(stderr,
"<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
13429 rb_gcdebug_print_obj_condition(
VALUE obj)
13433 fprintf(stderr,
"created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
13436 fprintf(stderr,
"moved?: true\n");
13439 fprintf(stderr,
"moved?: false\n");
13441 if (is_pointer_to_heap(objspace, (
void *)obj)) {
13442 fprintf(stderr,
"pointer to heap?: true\n");
13445 fprintf(stderr,
"pointer to heap?: false\n");
13449 fprintf(stderr,
"marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ?
"true" :
"false");
13450 fprintf(stderr,
"pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ?
"true" :
"false");
13451 fprintf(stderr,
"age? : %d\n", RVALUE_AGE(obj));
13452 fprintf(stderr,
"old? : %s\n", RVALUE_OLD_P(obj) ?
"true" :
"false");
13453 fprintf(stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ?
"false" :
"true");
13454 fprintf(stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(obj) ?
"true" :
"false");
13456 if (is_lazy_sweeping(objspace)) {
13457 fprintf(stderr,
"lazy sweeping?: true\n");
13458 fprintf(stderr,
"swept?: %s\n", is_swept_object(objspace, obj) ?
"done" :
"not yet");
13461 fprintf(stderr,
"lazy sweeping?: false\n");
13468 fprintf(stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)name, (
void *)obj);
13473 rb_gcdebug_sentinel(
VALUE obj,
const char *name)
13480 #if GC_DEBUG_STRESS_TO_CLASS
13489 rb_gcdebug_add_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
13493 if (!stress_to_class) {
13509 rb_gcdebug_remove_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
13514 if (stress_to_class) {
13515 for (i = 0; i < argc; ++i) {
13516 rb_ary_delete_same(stress_to_class, argv[i]);
13519 stress_to_class = 0;
13582 #include "gc.rbinc"
13588 VALUE rb_mObjSpace;
13589 VALUE rb_mProfiler;
13590 VALUE gc_constants;
13625 rb_vm_register_special_exception(ruby_error_nomemory,
rb_eNoMemError,
"failed to allocate memory");
13655 #if MALLOC_ALLOCATED_SIZE
13660 #if GC_DEBUG_STRESS_TO_CLASS
13669 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
13673 OPT(RGENGC_CHECK_MODE);
13674 OPT(RGENGC_PROFILE);
13675 OPT(RGENGC_ESTIMATE_OLDMALLOC);
13676 OPT(GC_PROFILE_MORE_DETAIL);
13677 OPT(GC_ENABLE_LAZY_SWEEP);
13678 OPT(CALC_EXACT_MALLOC_SIZE);
13679 OPT(MALLOC_ALLOCATED_SIZE);
13680 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
13681 OPT(GC_PROFILE_DETAIL_MEMORY);
13687 #ifdef ruby_xmalloc
13688 #undef ruby_xmalloc
13690 #ifdef ruby_xmalloc2
13691 #undef ruby_xmalloc2
13693 #ifdef ruby_xcalloc
13694 #undef ruby_xcalloc
13696 #ifdef ruby_xrealloc
13697 #undef ruby_xrealloc
13699 #ifdef ruby_xrealloc2
13700 #undef ruby_xrealloc2
13706 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13707 ruby_malloc_info_file = __FILE__;
13708 ruby_malloc_info_line = __LINE__;
13710 return ruby_xmalloc_body(size);
13716 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13717 ruby_malloc_info_file = __FILE__;
13718 ruby_malloc_info_line = __LINE__;
13720 return ruby_xmalloc2_body(n, size);
13726 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13727 ruby_malloc_info_file = __FILE__;
13728 ruby_malloc_info_line = __LINE__;
13730 return ruby_xcalloc_body(n, size);
13736 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13737 ruby_malloc_info_file = __FILE__;
13738 ruby_malloc_info_line = __LINE__;
13740 return ruby_xrealloc_body(ptr, new_size);
13746 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13747 ruby_malloc_info_file = __FILE__;
13748 ruby_malloc_info_line = __LINE__;
13750 return ruby_xrealloc2_body(ptr, n, new_size);
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register_one(), except it additionally checks for duplicated registrati...
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
uint32_t rb_event_flag_t
Represents event(s).
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
void rb_include_module(VALUE klass, VALUE module)
Includes a module to a class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
VALUE rb_define_module(const char *name)
Defines a top-level module.
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
void rb_define_module_function(VALUE module, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a module function for a module.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
#define TYPE(_)
Old name of rb_type.
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
#define T_FILE
Old name of RUBY_T_FILE.
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
#define ALLOC
Old name of RB_ALLOC.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define T_MASK
Old name of RUBY_T_MASK.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define T_NIL
Old name of RUBY_T_NIL.
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
#define T_FLOAT
Old name of RUBY_T_FLOAT.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
#define ID2SYM
Old name of RB_ID2SYM.
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define ULONG2NUM
Old name of RB_ULONG2NUM.
#define ELTS_SHARED
Old name of RUBY_ELTS_SHARED.
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define SYM2ID
Old name of RB_SYM2ID.
#define T_DATA
Old name of RUBY_T_DATA.
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
#define FL_PROMOTED0
Old name of RUBY_FL_PROMOTED0.
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
#define CLASS_OF
Old name of rb_class_of.
#define T_NONE
Old name of RUBY_T_NONE.
#define T_NODE
Old name of RUBY_T_NODE.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
#define T_MODULE
Old name of RUBY_T_MODULE.
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define T_HASH
Old name of RUBY_T_HASH.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define FL_ABLE
Old name of RB_FL_ABLE.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define FL_SET
Old name of RB_FL_SET.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define LONG2NUM
Old name of RB_LONG2NUM.
#define T_FALSE
Old name of RUBY_T_FALSE.
#define ULL2NUM
Old name of RB_ULL2NUM.
#define T_UNDEF
Old name of RUBY_T_UNDEF.
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
#define DBL2NUM
Old name of rb_float_new.
#define T_MATCH
Old name of RUBY_T_MATCH.
#define T_CLASS
Old name of RUBY_T_CLASS.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
#define T_MOVED
Old name of RUBY_T_MOVED.
#define FL_TEST
Old name of RB_FL_TEST.
#define FL_PROMOTED1
Old name of RUBY_FL_PROMOTED1.
#define xcalloc
Old name of ruby_xcalloc.
#define FL_UNSET
Old name of RB_FL_UNSET.
#define UINT2NUM
Old name of RB_UINT2NUM.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
#define T_REGEXP
Old name of RUBY_T_REGEXP.
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
int ruby_stack_check(void)
Checks for stack overflow.
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
VALUE rb_eNotImpError
NotImplementedError exception.
void rb_bug(const char *fmt,...)
Interpreter panic switch.
VALUE rb_eNoMemError
NoMemoryError exception.
VALUE rb_eRangeError
RangeError exception.
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
VALUE rb_eTypeError
TypeError exception.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
VALUE rb_eArgError
ArgumentError exception.
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
VALUE rb_mKernel
Kernel module.
VALUE rb_any_to_s(VALUE obj)
Generates a textual representation of the given object.
VALUE rb_mEnumerable
Enumerable module.
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
VALUE rb_cBasicObject
BasicObject class.
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
VALUE rb_stdout
STDOUT constant.
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
void rb_gc_register_address(VALUE *valptr)
Inform the garbage collector that valptr points to a live Ruby object that should not be moved.
void rb_gc_unregister_address(VALUE *valptr)
Inform the garbage collector that a pointer previously passed to rb_gc_register_address() no longer p...
void rb_global_variable(VALUE *)
An alias for rb_gc_register_address().
void rb_gc_register_mark_object(VALUE object)
Inform the garbage collector that object is a live Ruby object that should not be moved.
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_tmp_new(long capa)
Allocates a "temporary" array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_big_eql(VALUE lhs, VALUE rhs)
Equality, in terms of eql?.
VALUE rb_obj_is_fiber(VALUE obj)
Queries if an object is a fiber.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
#define rb_check_frozen
Just another name of rb_check_frozen.
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
void rb_gc_mark(VALUE obj)
Marks an object.
void rb_mark_tbl_no_pin(struct st_table *tbl)
Identical to rb_mark_tbl(), except it marks objects using rb_gc_mark_movable().
void rb_memerror(void)
Triggers out-of-memory error.
size_t rb_gc_stat(VALUE key_or_buf)
Obtains various GC related profiles.
void rb_gc_mark_movable(VALUE obj)
Maybe this is the only function provided for C extensions to control the pinning of objects,...
VALUE rb_gc_disable(void)
Disables GC.
VALUE rb_gc_start(void)
Identical to rb_gc(), except the return value.
VALUE rb_gc_latest_gc_info(VALUE key_or_buf)
Obtains various info regarding the most recent GC run.
void rb_mark_tbl(struct st_table *tbl)
Identical to rb_mark_hash(), except it marks only values of the table and leave their associated keys...
VALUE rb_gc_enable(void)
(Re-) enables GC.
void rb_mark_hash(struct st_table *tbl)
Marks keys and values associated inside of the given table.
VALUE rb_undefine_finalizer(VALUE obj)
Modifies the object so that it has no finalisers at all.
int rb_during_gc(void)
Queries if the GC is busy.
void rb_gc_mark_maybe(VALUE obj)
Identical to rb_gc_mark(), except it allows the passed value be a non-object.
VALUE rb_gc_location(VALUE obj)
Finds a new "location" of an object.
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Marks objects between the two pointers.
void rb_gc(void)
Triggers a GC process.
void rb_gc_force_recycle(VALUE obj)
Asserts that the passed object is no longer needed.
void rb_gc_update_tbl_refs(st_table *ptr)
Updates references inside of tables.
void rb_mark_set(struct st_table *tbl)
Identical to rb_mark_hash(), except it marks only keys of the table and leave their associated values...
VALUE rb_define_finalizer(VALUE obj, VALUE block)
Assigns a finaliser for an object.
void rb_gc_copy_finalizer(VALUE dst, VALUE src)
Copy&paste an object's finaliser to another.
void rb_gc_adjust_memory_usage(ssize_t diff)
Informs that there are external memory usages.
size_t rb_gc_count(void)
Identical to rb_gc_stat(), with "count" parameter.
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
VALUE rb_hash_new(void)
Creates a new, empty hash object.
VALUE rb_io_write(VALUE io, VALUE str)
Writes the given string to the given IO.
VALUE rb_obj_id(VALUE obj)
Finds or creates an integer primary key of the given object.
VALUE rb_memory_id(VALUE obj)
Identical to rb_obj_id(), except it hesitates from allocating a new instance of rb_cInteger.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
VALUE rb_proc_new(rb_block_call_func_t func, VALUE callback_arg)
This is an rb_iterate() + rb_block_proc() combo.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_str_free(VALUE str)
Destroys the given string for no reason.
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
VALUE rb_str_cat2(VALUE, const char *)
Just another name of rb_str_cat_cstr.
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
VALUE rb_str_new_cstr(const char *ptr)
Identical to rb_str_new(), except it assumes the passed pointer is a pointer to a C string.
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
VALUE rb_class_name(VALUE obj)
Queries the name of the given object's class.
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
VALUE rb_id2str(ID id)
Identical to rb_id2name(), except it returns a Ruby's String instead of C's.
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
#define strtod(s, e)
Just another name of ruby_strtod.
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define strdup(s)
Just another name of ruby_strdup.
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
VALUE rb_yield_values(int n,...)
Identical to rb_yield(), except it takes variadic number of parameters and pass them to the block.
VALUE rb_yield(VALUE val)
Yields the block.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
VALUE type(ANYARGS)
ANYARGS-ed function type.
int st_foreach(st_table *q, int_type *w, st_data_t e)
Iteration over the given table.
VALUE rb_newobj(void)
This is the implementation detail of RB_NEWOBJ.
VALUE rb_newobj_of(VALUE klass, VALUE flags)
This is the implementation detail of RB_NEWOBJ_OF.
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
#define RARRAY(obj)
Convenient casting macro.
static bool RARRAY_TRANSIENT_P(VALUE ary)
Queries if the array is a transient array.
#define RARRAY_AREF(a, i)
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
#define RBASIC(obj)
Convenient casting macro.
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
#define RCLASS(obj)
Convenient casting macro.
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
#define DATA_PTR(obj)
Convenient getter macro.
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
#define RDATA(obj)
Convenient casting macro.
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
#define RFILE(obj)
Convenient casting macro.
void rb_gc_writebarrier(VALUE old, VALUE young)
This is the implementation of RB_OBJ_WRITE().
void rb_gc_writebarrier_unprotect(VALUE obj)
This is the implementation of RB_OBJ_WB_UNPROTECT().
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
#define RHASH_SIZE(h)
Queries the size of the hash.
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
#define RMATCH(obj)
Convenient casting macro.
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
@ ROBJECT_EMBED_LEN_MAX
Max possible number of instance variables that can be embedded.
static uint32_t ROBJECT_NUMIV(VALUE obj)
Queries the number of instance variables.
#define RREGEXP_PTR(obj)
Convenient accessor macro.
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
static long RSTRING_LEN(VALUE str)
Queries the length of the string.
#define RSTRING(obj)
Convenient casting macro.
static long RSTRUCT_LEN(VALUE st)
Returns the number of struct members.
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
struct RArray::@42::@43 heap
Arrays that use separated memory region for elements use this pattern.
union RArray::@42 as
Array's specific fields.
Ruby's object's, base components.
const VALUE klass
Class of an object.
VALUE flags
Per-object flags.
Internal header for Complex.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
void * data
Pointer to the actual C level struct that you want to wrap.
struct rb_io_t * fptr
IO's specific fields.
Regular expression execution context.
VALUE regexp
The expression of this match.
VALUE str
The target string that the match was made against.
Internal header for Rational.
Ruby's regular expression.
const VALUE src
Source code of this expression.
struct RString::@47::@48 heap
Strings that use separated memory region for contents use this pattern.
union RString::@47 as
String's specific fields.
const rb_data_type_t * type
This field stores various information about how Ruby should handle a data.
This is the struct that holds necessary info for a struct.
struct rb_data_type_struct::@51 function
Function pointers.
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
const char * wrap_struct_name
Name of structs of this kind.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
VALUE ecopts
Flags as Ruby hash.
Ruby's IO, metadata and buffers.
struct rb_io_t::rb_io_enc_t encs
Decomposed encoding flags.
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
VALUE pathv
pathname for file
VALUE write_lock
This is a Ruby level mutex.
VALUE self
The IO's Ruby level counterpart.
VALUE writeconv_pre_ecopts
Value of rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
VALUE tied_io_for_writing
Duplex IO object, if set.
rb_cref_t * cref
class reference, should be marked
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Internal header for Class.
Represents the region of a capture group.
int char_offset_num_allocated
Number of rmatch_offset that rmatch::char_offset holds.
struct rmatch_offset * char_offset
Capture group offsets, in C array.
struct re_registers regs
"Registers" of a match.
IFUNC (Internal FUNCtion)
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
ruby_value_type
C-level type of an object.
@ RUBY_T_MASK
Bitmask of ruby_value_type.
void * ruby_xmalloc2(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc(), except it allocates nelems * elemsiz bytes.
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
void ruby_xfree(void *ptr)
Deallocates a storage instance.
void * ruby_xcalloc(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc2(), except it returns a zero-filled storage instance.
void * ruby_xrealloc(void *ptr, size_t newsiz)
Resize the storage instance.
void * ruby_xrealloc2(void *ptr, size_t newelems, size_t newsiz)
Identical to ruby_xrealloc(), except it resizes the given storage instance to newelems * newsiz bytes...