29 #include <sys/types.h>
32 #ifdef HAVE_SYS_TIME_H
36 #ifdef HAVE_SYS_RESOURCE_H
37 #include <sys/resource.h>
39 #if defined(__native_client__) && defined(NACL_NEWLIB)
41 # undef HAVE_POSIX_MEMALIGN
46 #if defined _WIN32 || defined __CYGWIN__
48 #elif defined(HAVE_POSIX_MEMALIGN)
49 #elif defined(HAVE_MEMALIGN)
53 #ifdef HAVE_VALGRIND_MEMCHECK_H
54 # include <valgrind/memcheck.h>
55 # ifndef VALGRIND_MAKE_MEM_DEFINED
56 # define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
58 # ifndef VALGRIND_MAKE_MEM_UNDEFINED
59 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
62 # define VALGRIND_MAKE_MEM_DEFINED(p, n) 0
63 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) 0
66 #define rb_setjmp(env) RUBY_SETJMP(env)
67 #define rb_jmp_buf rb_jmpbuf_t
69 #ifndef GC_MALLOC_LIMIT
70 #define GC_MALLOC_LIMIT 8000000
72 #define HEAP_MIN_SLOTS 10000
79 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
88 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
93 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
95 #ifndef GC_PROFILE_MORE_DETAIL
96 #define GC_PROFILE_MORE_DETAIL 0
109 #if GC_PROFILE_MORE_DETAIL
111 double gc_sweep_time;
113 size_t heap_use_slots;
114 size_t heap_live_objects;
115 size_t heap_free_objects;
119 size_t allocate_increase;
120 size_t allocate_limit;
124 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
125 #pragma pack(push, 1)
158 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
188 #define STACK_CHUNK_SIZE 500
204 #ifndef CALC_EXACT_MALLOC_SIZE
205 #define CALC_EXACT_MALLOC_SIZE 0
212 #if CALC_EXACT_MALLOC_SIZE
213 size_t allocated_size;
264 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
265 #define rb_objspace (*GET_VM()->objspace)
266 #define ruby_initial_gc_stress initial_params.gc_stress
272 #define malloc_limit objspace->malloc_params.limit
273 #define malloc_increase objspace->malloc_params.increase
274 #define heaps objspace->heap.ptr
275 #define heaps_length objspace->heap.length
276 #define heaps_used objspace->heap.used
277 #define lomem objspace->heap.range[0]
278 #define himem objspace->heap.range[1]
279 #define heaps_inc objspace->heap.increment
280 #define heaps_freed objspace->heap.freed
281 #define dont_gc objspace->flags.dont_gc
282 #define during_gc objspace->flags.during_gc
283 #define finalizing objspace->flags.finalizing
284 #define finalizer_table objspace->final.table
285 #define deferred_final_list objspace->final.deferred
286 #define global_List objspace->global_list
287 #define ruby_gc_stress objspace->gc_stress
288 #define initial_malloc_limit initial_params.initial_malloc_limit
289 #define initial_heap_min_slots initial_params.initial_heap_min_slots
290 #define initial_free_min initial_params.initial_free_min
292 #define is_lazy_sweeping(objspace) ((objspace)->heap.sweep_slots != 0)
294 #if SIZEOF_LONG == SIZEOF_VOIDP
295 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
296 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
297 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
298 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
299 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
300 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
302 # error not supported
305 #define RANY(o) ((RVALUE*)(o))
306 #define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
308 #define HEAP_HEADER(p) ((struct heaps_header *)(p))
309 #define GET_HEAP_HEADER(x) (HEAP_HEADER((uintptr_t)(x) & ~(HEAP_ALIGN_MASK)))
310 #define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
311 #define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
312 #define NUM_IN_SLOT(p) (((uintptr_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
313 #define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * CHAR_BIT))
314 #define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * CHAR_BIT)-1))
315 #define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
317 #ifndef HEAP_ALIGN_LOG
319 #define HEAP_ALIGN_LOG 14
322 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
372 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
377 memset(objspace, 0,
sizeof(*objspace));
385 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
441 if (heaps_used > 0) {
454 for (i = 0; i <
add; i++) {
516 register RVALUE *mid_membase;
519 if (mid_membase < membase) {
522 else if (mid_membase > membase) {
561 size_t next_heaps_length;
570 for (i = 0; i <
add; i++) {
582 #ifdef USE_SIGALTSTACK
586 void *tmp = th->altstack;
609 size_t next_heaps_length = (size_t)(
heaps_used * 1.8);
643 rb_bug(
"object allocation during garbage collection phase");
687 obj =
newobj(klass, flags);
731 data->typed_flag = 1;
767 register size_t hi,
lo, mid;
778 if (heap->
start <= p) {
911 rb_bug(
"obj_free() called for broken object");
923 RANY(obj)->as.object.as.heap.ivptr) {
924 xfree(
RANY(obj)->as.object.as.heap.ivptr);
951 if (
RANY(obj)->as.hash.ntbl) {
956 if (
RANY(obj)->as.regexp.ptr) {
963 RDATA(obj)->dfree =
RANY(obj)->as.typeddata.type->function.dfree;
968 else if (
RANY(obj)->as.data.dfree) {
975 if (
RANY(obj)->as.match.rmatch) {
976 struct rmatch *rm =
RANY(obj)->as.match.rmatch;
984 if (
RANY(obj)->as.file.fptr) {
1008 if (
RANY(obj)->as.node.u1.tbl) {
1013 if (
RANY(obj)->as.node.u3.args) {
1025 RANY(obj)->as.rstruct.as.heap.ptr) {
1026 xfree(
RANY(obj)->as.rstruct.as.heap.ptr);
1074 for (; pstart != pend; pstart++) {
1080 if (pstart != pend) {
1134 objspace->flags.dont_lazy_sweep =
TRUE;
1181 for (; p != pend; p++) {
1326 table = (
VALUE)data;
1331 RBASIC(table)->klass = 0;
1357 table = (
VALUE)data;
1415 free_func =
RDATA(obj)->dfree;
1529 p->as.free.flags = 0;
1531 RDATA(p)->dfree =
RANY(p)->as.typeddata.type->function.dfree;
1536 else if (
RANY(p)->as.data.dfree) {
1538 RANY(p)->as.free.next = final_list;
1543 if (
RANY(p)->as.file.fptr) {
1545 RANY(p)->as.free.next = final_list;
1619 #if SIZEOF_LONG == SIZEOF_VOIDP
1620 #define NUM2PTR(x) NUM2ULONG(x)
1621 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1622 #define NUM2PTR(x) NUM2ULL(x)
1639 if ((ptr %
sizeof(
RVALUE)) == (4 << 2)) {
1724 #if SIZEOF_LONG == SIZEOF_VOIDP
1785 for (i = 0; i <=
T_MASK; i++) {
1793 for (;p < pend; p++) {
1813 for (i = 0; i <=
T_MASK; i++) {
1816 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
1843 default: type =
INT2NUM(i);
break;
1884 size_t empty_num = 0, freed_num = 0, final_num = 0;
1899 RDATA(p)->dfree = 0;
1908 p->as.free.flags = 0;
1909 p->as.free.next = sweep_slot->
freelist;
1921 if (final_num + freed_num + empty_num == sweep_slot->
header->
limit &&
1926 RDATA(pp)->dmark = (void (*)(
void *))(
VALUE)sweep_slot;
1933 if (freed_num + empty_num > 0) {
1982 if (
GET_VM()->unlinked_method_entry_list) {
2138 stack->
cache = chunk;
2148 chunk = stack->
cache;
2163 next = stack->
cache;
2189 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
2196 while (chunk !=
NULL) {
2219 if (stack->
index == 1) {
2236 for (i=0; i < 4; i++) {
2245 #define MARK_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] = bits[BITMAP_INDEX(p)] | ((uintptr_t)1 << BITMAP_OFFSET(p)))
2249 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
2251 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
2254 #define STACK_START (th->machine_stack_start)
2255 #define STACK_END (th->machine_stack_end)
2256 #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
2258 #if STACK_GROW_DIRECTION < 0
2259 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2260 #elif STACK_GROW_DIRECTION > 0
2261 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2263 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2264 : (size_t)(STACK_END - STACK_START + 1))
2266 #if !STACK_GROW_DIRECTION
2274 if (end > addr)
return ruby_stack_grow_direction = 1;
2275 return ruby_stack_grow_direction = -1;
2288 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2298 ret = (
VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
2299 th->machine_register_stack_maxsize/
sizeof(
VALUE) - water_mark;
2306 #define STACKFRAME_FOR_CALL_CFUNC 512
2311 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
2337 if (end <= start)
return;
2348 #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
2426 switch (def->
type) {
2489 #if STACK_GROW_DIRECTION < 0
2490 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2491 #elif STACK_GROW_DIRECTION > 0
2492 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2494 #define GET_STACK_BOUNDS(start, end, appendix) \
2495 ((STACK_END < STACK_START) ? \
2496 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2499 #define numberof(array) (int)(sizeof(array) / sizeof((array)[0]))
2507 } save_regs_gc_mark;
2508 VALUE *stack_start, *stack_end;
2523 #if defined(__mc68000__)
2533 VALUE *stack_start, *stack_end;
2631 rb_bug(
"rb_gc_mark() called for broken object");
2784 gc_mark(objspace, obj->as.basic.klass);
2798 ptr = obj->as.array.as.heap.aux.shared;
2804 for (i=0; i < len; i++) {
2812 ptr = obj->as.hash.ifnone;
2816 #define STR_ASSOC FL_USER3
2818 ptr = obj->as.string.as.heap.aux.shared;
2825 RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark;
2826 if (mark_func) (*mark_func)(
DATA_PTR(obj));
2829 if (obj->as.data.dmark) (*obj->as.data.dmark)(
DATA_PTR(obj));
2837 for (i = 0; i < len; i++) {
2844 if (obj->as.file.fptr) {
2845 gc_mark(objspace, obj->as.file.fptr->pathv);
2846 gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing);
2847 gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat);
2848 gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts);
2849 gc_mark(objspace, obj->as.file.fptr->encs.ecopts);
2850 gc_mark(objspace, obj->as.file.fptr->write_lock);
2855 ptr = obj->as.regexp.src;
2864 gc_mark(objspace, obj->as.match.regexp);
2865 if (obj->as.match.str) {
2866 ptr = obj->as.match.str;
2872 gc_mark(objspace, obj->as.rational.num);
2873 ptr = obj->as.rational.den;
2877 gc_mark(objspace, obj->as.complex.real);
2878 ptr = obj->as.complex.imag;
2893 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
2905 if (!mstack->
index)
return;
2917 struct mark_func_data_struct *prev_mark_func_data;
3006 if (tmp->
varptr == addr) {
3028 if (
GC_NOTIFY) printf(
"start garbage_collect()\n");
3049 if (
GC_NOTIFY) printf(
"end garbage_collect()\n");
3072 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
3173 static VALUE sym_count;
3174 static VALUE sym_heap_used, sym_heap_length, sym_heap_increment;
3175 static VALUE sym_heap_live_num, sym_heap_free_num, sym_heap_final_num;
3176 static VALUE sym_total_allocated_object, sym_total_freed_object;
3177 if (sym_count == 0) {
3299 char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr;
3303 malloc_limit_ptr =
getenv(
"RUBY_GC_MALLOC_LIMIT");
3304 if (malloc_limit_ptr !=
NULL) {
3305 int malloc_limit_i = atoi(malloc_limit_ptr);
3307 fprintf(stderr,
"malloc_limit=%d (%d)\n",
3309 if (malloc_limit_i > 0) {
3314 heap_min_slots_ptr =
getenv(
"RUBY_HEAP_MIN_SLOTS");
3315 if (heap_min_slots_ptr !=
NULL) {
3316 int heap_min_slots_i = atoi(heap_min_slots_ptr);
3318 fprintf(stderr,
"heap_min_slots=%d (%d)\n",
3320 if (heap_min_slots_i > 0) {
3326 free_min_ptr =
getenv(
"RUBY_FREE_MIN");
3327 if (free_min_ptr !=
NULL) {
3328 int free_min_i = atoi(free_min_ptr);
3331 if (free_min_i > 0) {
3343 struct mark_func_data_struct mfd;
3344 mfd.mark_func =
func;
3376 fprintf(stderr,
"[FATAL] %s\n", msg);
3401 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
3413 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
3430 #if defined __MINGW32__
3431 res = __mingw_aligned_malloc(size, alignment);
3432 #elif defined _WIN32 && !defined __CYGWIN__
3433 res = _aligned_malloc(size, alignment);
3434 #elif defined(HAVE_POSIX_MEMALIGN)
3435 if (posix_memalign(&res, alignment, size) == 0) {
3441 #elif defined(HAVE_MEMALIGN)
3442 res = memalign(alignment, size);
3445 res =
malloc(alignment + size +
sizeof(
void*));
3446 aligned = (
char*)res + alignment +
sizeof(
void*);
3447 aligned -= ((
VALUE)aligned & (alignment - 1));
3448 ((
void**)aligned)[-1] = res;
3449 res = (
void*)aligned;
3452 #if defined(_DEBUG) || defined(GC_DEBUG)
3454 assert((alignment - 1) & alignment == 0);
3455 assert(alignment %
sizeof(
void*) == 0);
3463 #if defined __MINGW32__
3464 __mingw_aligned_free(ptr);
3465 #elif defined _WIN32 && !defined __CYGWIN__
3467 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
3470 free(((
void**)ptr)[-1]);
3474 static inline size_t
3477 if ((ssize_t)size < 0) {
3480 if (size == 0) size = 1;
3482 #if CALC_EXACT_MALLOC_SIZE
3483 size +=
sizeof(size_t);
3494 static inline void *
3499 #if CALC_EXACT_MALLOC_SIZE
3502 ((
size_t *)mem)[0] =
size;
3503 mem = (
size_t *)mem + 1;
3509 #define TRY_WITH_GC(alloc) do { \
3511 (!garbage_collect_with_gvl(objspace) || \
3531 #if CALC_EXACT_MALLOC_SIZE
3535 if ((ssize_t)size < 0) {
3553 #if CALC_EXACT_MALLOC_SIZE
3554 size +=
sizeof(size_t);
3555 ptr = (
size_t *)ptr - 1;
3556 oldsize = ((
size_t *)ptr)[0];
3570 #if CALC_EXACT_MALLOC_SIZE
3572 ((
size_t *)mem)[0] =
size;
3573 mem = (
size_t *)mem + 1;
3582 #if CALC_EXACT_MALLOC_SIZE
3584 ptr = ((
size_t *)ptr) - 1;
3585 size = ((
size_t*)ptr)[0];
3601 static inline size_t
3604 size_t len = size * n;
3605 if (n != 0 && size != len / n) {
3645 size_t len = size * n;
3646 if (n != 0 && size != len / n) {
3667 #if CALC_EXACT_MALLOC_SIZE
3668 size +=
sizeof(size_t);
3671 #if CALC_EXACT_MALLOC_SIZE
3673 ((
size_t *)mem)[0] = 0;
3674 mem = (
size_t *)mem + 1;
3679 #if CALC_EXACT_MALLOC_SIZE
3690 gc_malloc_allocated_size(
VALUE self)
3705 gc_malloc_allocations(
VALUE self)
3764 const struct weakmap *w = ptr;
3797 if (!existing)
return ST_STOP;
3880 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
3885 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
3887 static int try_clock_gettime = 1;
3889 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
3893 try_clock_gettime = 0;
3900 struct rusage usage;
3902 if (
getrusage(RUSAGE_SELF, &usage) == 0) {
3903 time = usage.ru_utime;
3911 FILETIME creation_time, exit_time, kernel_time, user_time;
3916 if (GetProcessTimes(GetCurrentProcess(),
3917 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
3918 memcpy(&ui, &user_time,
sizeof(FILETIME));
3919 q = ui.QuadPart / 10L;
3920 t = (
DWORD)(q % 1000000L) * 1e-6;
3925 t += (double)(
DWORD)(q >> 16) * (1 << 16);
3926 t += (
DWORD)q & ~(~0 << 16);
3951 rb_bug(
"gc_profile malloc or realloc miss");
3969 if (gc_time < 0) gc_time = 0;
3977 #if !GC_PROFILE_MORE_DETAIL
4049 double mark_time = 0;
4054 if (mark_time < 0) mark_time = 0;
4055 record->gc_mark_time = mark_time;
4079 double sweep_time = 0;
4084 if (sweep_time < 0) sweep_time = 0;\
4085 record->gc_sweep_time = sweep_time;
4092 if (objspace->profile.run) {
4108 record->heap_live_objects = live;
4109 record->heap_free_objects = total - live;
4214 #if GC_PROFILE_MORE_DETAIL
4241 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
4242 for (i = 0; i <
count; i++) {
4244 #if !GC_PROFILE_MORE_DETAIL
4250 #if !GC_PROFILE_MORE_DETAIL
4254 #if GC_PROFILE_MORE_DETAIL
4257 "Index Allocate Increase Allocate Limit Use Slot Have Finalize Mark Time(ms) Sweep Time(ms)\n"));
4259 for (i = 0; i <
count; i++) {
4262 index++, r.allocate_increase, r.allocate_limit,
4263 r.heap_use_slots, (r.have_finalize ?
"true" :
"false"),
4264 r.gc_mark_time*1000, r.gc_sweep_time*1000));
4391 rb_gcdebug_print_obj_condition(
VALUE obj)
4396 fprintf(stderr,
"pointer to heap?: true\n");
4399 fprintf(stderr,
"pointer to heap?: false\n");
4402 fprintf(stderr,
"marked?: %s\n",
4405 fprintf(stderr,
"lazy sweeping?: true\n");
4406 fprintf(stderr,
"swept?: %s\n",
4410 fprintf(stderr,
"lazy sweeping?: false\n");
4417 fprintf(stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)name, (
void *)obj);
4422 rb_gcdebug_sentinel(
VALUE obj,
const char *name)
4550 #if CALC_EXACT_MALLOC_SIZE
static void mark_hash(rb_objspace_t *objspace, st_table *tbl)
void rb_gc_finalize_deferred(void)
size_t heap_total_objects
static void slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
int * ruby_initial_gc_stress_ptr
#define VALGRIND_MAKE_MEM_UNDEFINED(p, n)
int ruby_thread_has_gvl_p(void)
static void * vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
unsigned int initial_free_min
void rb_bug(const char *fmt,...)
static VALUE gc_profile_disable(void)
static int set_zero(st_data_t key, st_data_t val, st_data_t arg)
#define RUBY_DTRACE_GC_SWEEP_END_ENABLED()
VALUE rb_obj_id(VALUE obj)
#define rb_gc_mark_locations(start, end)
void rb_objspace_free(rb_objspace_t *objspace)
static void gc_prof_set_malloc_info(rb_objspace_t *)
#define RCLASS_CONST_TBL(c)
static void pop_mark_stack_chunk(mark_stack_t *stack)
static size_t vm_malloc_prepare(rb_objspace_t *objspace, size_t size)
static VALUE id2ref(VALUE obj, VALUE objid)
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
static void link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
static int mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
if(dispIdMember==DISPID_VALUE)
static VALUE os_each_obj(int argc, VALUE *argv, VALUE os)
static int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
static void * vm_xmalloc(rb_objspace_t *objspace, size_t size)
size_t ruby_stack_length(VALUE **p)
#define FLUSH_REGISTER_WINDOWS
static VALUE run_single_final(VALUE arg)
#define ATOMIC_EXCHANGE(var, val)
VALUE rb_obj_is_thread(VALUE obj)
int st_insert(st_table *, st_data_t, st_data_t)
static void wmap_mark(void *ptr)
struct rb_method_entry_struct * orig_me
struct heaps_header ** sorted
#define TypedData_Get_Struct(obj, type, data_type, sval)
void(* mark_func)(VALUE v, void *data)
static int stack_check(int water_mark)
void * ruby_xmalloc2(size_t n, size_t size)
void rb_define_private_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
static void unlink_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
VALUE rb_ary_push(VALUE ary, VALUE item)
static void gc_prof_mark_timer_start(rb_objspace_t *)
void st_free_table(st_table *)
VALUE rb_ary_tmp_new(long capa)
void * ruby_xrealloc2(void *ptr, size_t n, size_t size)
static VALUE count_objects(int argc, VALUE *argv, VALUE os)
#define initial_heap_min_slots
#define STACK_UPPER(x, a, b)
VALUE rb_protect(VALUE(*proc)(VALUE), VALUE data, int *state)
union rb_method_definition_struct::@90 body
static void gc_prof_sweep_timer_start(rb_objspace_t *)
struct heaps_free_bitmap * free_bitmap
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
void rb_raise(VALUE exc, const char *fmt,...)
int rb_io_fptr_finalize(rb_io_t *)
static void ruby_memerror(void)
int ruby_get_stack_grow_direction(volatile VALUE *addr)
void rb_sweep_method_entry(void *vm)
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALUE rb_obj_is_kind_of(VALUE, VALUE)
static int wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
VALUE rb_ary_new3(long n,...)
static VALUE gc_profile_total_time(VALUE self)
static int mark_key(st_data_t key, st_data_t value, st_data_t data)
#define nd_set_type(n, t)
void rb_objspace_each_objects(each_obj_callback *callback, void *data)
#define RSTRUCT_EMBED_LEN_MASK
#define GC_PROFILE_RECORD_DEFAULT_SIZE
void rb_gc_mark(VALUE ptr)
static void before_gc_sweep(rb_objspace_t *objspace)
int rb_objspace_markable_object_p(VALUE obj)
#define ATOMIC_PTR_EXCHANGE(var, val)
void rb_gc_register_address(VALUE *addr)
int st_update(st_table *table, st_data_t key, st_update_callback_func *func, st_data_t arg)
VALUE rb_io_write(VALUE, VALUE)
static void init_heap(rb_objspace_t *objspace)
static void after_gc_sweep(rb_objspace_t *objspace)
void callback(ffi_cif *cif, void *resp, void **args, void *ctx)
struct rb_objspace::@66 malloc_params
VALUE rb_str_buf_append(VALUE, VALUE)
static int mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data)
static int wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
int ruby_stack_grow_direction
int ruby_stack_check(void)
void rb_mark_method_entry(const rb_method_entry_t *me)
const char * rb_obj_classname(VALUE)
struct rb_objspace::mark_func_data_struct * mark_func_data
static void push_mark_stack(mark_stack_t *, VALUE)
void rb_gc_force_recycle(VALUE p)
static void rest_sweep(rb_objspace_t *)
#define ATOMIC_SIZE_ADD(var, val)
static struct heaps_slot * add_slot_local_freelist(rb_objspace_t *objspace, RVALUE *p)
static int lazy_sweep(rb_objspace_t *objspace)
static VALUE objspace_each_objects(VALUE arg)
static void negative_size_allocation_error(const char *)
#define ruby_initial_gc_stress
static VALUE define_final(int argc, VALUE *argv, VALUE os)
#define RUBY_DTRACE_GC_SWEEP_END()
static int force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
#define obj_id_to_ref(objid)
void rb_global_variable(VALUE *var)
static VALUE wmap_allocate(VALUE klass)
static void mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
static int mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
VALUE data[STACK_CHUNK_SIZE]
void rb_exc_raise(VALUE mesg)
static void finalize_list(rb_objspace_t *objspace, RVALUE *p)
static size_t xmalloc2_size(size_t n, size_t size)
static void set_heaps_increment(rb_objspace_t *objspace)
#define RB_TYPE_P(obj, type)
void * ruby_xcalloc(size_t n, size_t size)
#define GET_STACK_BOUNDS(start, end, appendix)
#define is_lazy_sweeping(objspace)
static void gc_prof_timer_start(rb_objspace_t *)
#define ATOMIC_SET(var, val)
int st_lookup(st_table *, st_data_t, st_data_t *)
static void gc_clear_slot_bits(struct heaps_slot *slot)
#define MEMZERO(p, type, n)
VALUE rb_obj_method(VALUE, VALUE)
void rb_free_generic_ivar(VALUE)
#define RUBY_VM_SET_FINALIZER_INTERRUPT(th)
unsigned int initial_heap_min_slots
static void * aligned_malloc(size_t, size_t)
int ruby_disable_gc_stress
void rb_ary_free(VALUE ary)
void rb_mark_end_proc(void)
static void gc_mark(rb_objspace_t *objspace, VALUE ptr)
static void mark_m_tbl(rb_objspace_t *objspace, st_table *tbl)
size_t total_allocated_object_num
void rb_vm_mark(void *ptr)
static void * gc_with_gvl(void *ptr)
void rb_gc_copy_finalizer(VALUE dest, VALUE obj)
void rb_mark_generic_ivar_tbl(void)
RUBY_EXTERN VALUE rb_cObject
static void finalize_deferred(rb_objspace_t *objspace)
static VALUE define_final0(VALUE obj, VALUE block)
unsigned int initial_malloc_limit
static void * negative_size_allocation_error_with_gvl(void *ptr)
static void gc_prof_timer_stop(rb_objspace_t *, int)
void rb_gc_unregister_address(VALUE *addr)
static int pop_mark_stack(mark_stack_t *, VALUE *)
static VALUE gc_stress_get(VALUE self)
size_t rb_ary_memsize(VALUE ary)
static void initial_expand_heap(rb_objspace_t *objspace)
RUBY_EXTERN VALUE rb_cBasicObject
void rb_clear_cache_by_class(VALUE)
struct gc_profile_record gc_profile_record
void rb_free_method_entry(rb_method_entry_t *me)
RUBY_EXTERN VALUE rb_mKernel
#define RUBY_DTRACE_GC_SWEEP_BEGIN_ENABLED()
static void vm_xfree(rb_objspace_t *objspace, void *ptr)
static void mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
static void push_mark_stack_chunk(mark_stack_t *stack)
#define nonspecial_obj_id(obj)
#define RUBY_DTRACE_GC_MARK_END_ENABLED()
void st_add_direct(st_table *, st_data_t, st_data_t)
static VALUE os_obj_of(VALUE of)
static size_t wmap_memsize(const void *ptr)
int st_delete(st_table *, st_data_t *, st_data_t *)
VALUE rb_obj_is_mutex(VALUE obj)
#define NEWOBJ(obj, type)
void onig_region_free(OnigRegion *r, int free_self)
#define ATOMIC_SIZE_DEC(var)
struct force_finalize_list * next
static int free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
void rb_gc_register_mark_object(VALUE obj)
#define ATOMIC_SIZE_EXCHANGE(var, val)
gc_profile_record * record
#define RUBY_DTRACE_GC_MARK_END()
static void assign_heap_slot(rb_objspace_t *objspace)
struct heaps_slot * free_slots
void rb_gc_mark_symbols(void)
size_t rb_objspace_data_type_memsize(VALUE obj)
#define RBIGNUM_DIGITS(b)
NODE * rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
void rb_gc_mark_machine_stack(rb_thread_t *th)
static void run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
void * ruby_mimmalloc(size_t size)
#define rb_thread_raised_clear(th)
static void * vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
static void gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
static void allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
static int internal_object_p(VALUE obj)
#define initial_malloc_limit
static int is_dead_object(rb_objspace_t *objspace, VALUE ptr)
void rb_define_module_function(VALUE module, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a module function for module.
size_t total_freed_object_num
static void gc_profile_dump_on(VALUE out, VALUE(*append)(VALUE, VALUE))
static int garbage_collect(rb_objspace_t *)
struct RRational rational
SSL_METHOD *(* func)(void)
static double getrusage_time(void)
static VALUE gc_profile_report(int argc, VALUE *argv, VALUE self)
static void gc_prof_sweep_timer_stop(rb_objspace_t *)
VALUE rb_sprintf(const char *format,...)
int rb_objspace_internal_object_p(VALUE obj)
#define STACKFRAME_FOR_CALL_CFUNC
static void gc_prof_mark_timer_stop(rb_objspace_t *)
static void aligned_free(void *)
static void make_deferred(RVALUE *p)
static void gc_sweep(rb_objspace_t *objspace)
static void make_io_deferred(RVALUE *p)
static void add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
#define MARKED_IN_BITMAP(bits, p)
#define MEMMOVE(p1, p2, type, n)
static void run_final(rb_objspace_t *objspace, VALUE obj)
static VALUE wmap_aset(VALUE self, VALUE wmap, VALUE orig)
static int os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
#define rb_thread_raised_set(th, f)
static int wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
static void mark_const_tbl(rb_objspace_t *objspace, st_table *tbl)
void Init_stack(volatile VALUE *addr)
static VALUE lazy_sweep_enable(void)
#define ATOMIC_SIZE_INC(var)
struct rb_objspace::@67 heap
static int is_id_value(rb_objspace_t *objspace, VALUE ptr)
static VALUE gc_profile_enable_get(VALUE self)
static int mark_entry(st_data_t key, st_data_t value, st_data_t data)
const char * rb_objspace_data_type_name(VALUE obj)
void rb_free_const_table(st_table *tbl)
static void * ruby_memerror_body(void *dummy)
void rb_mark_tbl(st_table *tbl)
VALUE rb_gc_disable(void)
#define SET_MACHINE_STACK_END(p)
void ruby_init_stack(volatile VALUE *)
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
struct RVALUE::@64::@65 free
VALUE rb_str_new_cstr(const char *)
int rb_sigaltstack_size(void)
st_table * st_init_numtable(void)
static VALUE gc_stress_set(VALUE self, VALUE flag)
struct rb_objspace::@70 profile
struct heaps_header * header
static void wmap_free(void *ptr)
int rb_garbage_collect(void)
static const rb_data_type_t weakmap_type
int rb_respond_to(VALUE, ID)
VALUE rb_define_module_under(VALUE outer, const char *name)
void rb_gc_set_params(void)
void * ruby_xmalloc(size_t size)
static VALUE gc_profile_result(void)
static VALUE wmap_aref(VALUE self, VALUE wmap)
VALUE rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
struct heaps_slot * free_next
#define MARK_IN_BITMAP(bits, p)
void * ruby_xrealloc(void *ptr, size_t size)
static void free_unused_heaps(rb_objspace_t *objspace)
VALUE rb_ary_resize(VALUE ary, long len)
expands or shrinks ary to len elements.
static void mark_tbl(rb_objspace_t *, st_table *)
void rb_gc_mark_parser(void)
static void free_stack_chunks(mark_stack_t *)
static VALUE gc_profile_record_get(void)
void rb_mark_generic_ivar(VALUE)
#define ATOMIC_SIZE_SUB(var, val)
static int ready_to_gc(rb_objspace_t *objspace)
VALUE rb_exc_new3(VALUE etype, VALUE str)
static void mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
VALUE rb_block_proc(void)
void rb_objspace_reachable_objects_from(VALUE obj, void(func)(VALUE, void *), void *data)
rb_method_definition_t * def
size_t st_memsize(const st_table *)
void rb_set_errinfo(VALUE err)
#define RUBY_DTRACE_GC_SWEEP_BEGIN()
void(* RUBY_DATA_FUNC)(void *)
int getrusage(int who, struct rusage *usage)
void rb_mark_set(st_table *tbl)
static size_t objspace_live_num(rb_objspace_t *objspace)
static VALUE gc_count(VALUE self)
static int is_swept_object(rb_objspace_t *objspace, VALUE ptr)
VALUE rb_newobj_of(VALUE klass, VALUE flags)
static int obj_free(rb_objspace_t *, VALUE)
#define RCLASS_IV_INDEX_TBL(c)
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
struct gc_list * global_list
static int garbage_collect_with_gvl(rb_objspace_t *objspace)
#define VALGRIND_MAKE_MEM_DEFINED(p, n)
VALUE rb_obj_is_fiber(VALUE obj)
static int rb_special_const_p(VALUE obj)
static stack_chunk_t * stack_chunk_alloc(void)
static void unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
VALUE rb_proc_new(VALUE(*)(ANYARGS), VALUE)
struct heaps_header * freed
static void rb_objspace_call_finalizer(rb_objspace_t *objspace)
struct rb_encoding_entry * list
static int is_mark_stask_empty(mark_stack_t *stack)
void rb_gc_mark_unlinked_live_method_entries(void *pvm)
int each_obj_callback(void *, void *, size_t, void *)
rb_objspace_t * rb_objspace_alloc(void)
static void gc_prof_set_heap_info(rb_objspace_t *, gc_profile_record *)
static int free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
static void add_heap_slots(rb_objspace_t *objspace, size_t add)
#define TypedData_Make_Struct(klass, type, data_type, sval)
VALUE rb_eval_cmd(VALUE, VALUE, int)
struct mark_stack mark_stack_t
#define rb_thread_raised_p(th, f)
static unsigned int hash(const char *str, unsigned int len)
#define RETURN_ENUMERATOR(obj, argc, argv)
struct rmatch_offset * char_offset
static void init_mark_stack(mark_stack_t *stack)
void rb_gc_mark_maybe(VALUE obj)
static void gc_marks(rb_objspace_t *objspace)
#define assert(condition)
static void shrink_stack_chunk_cache(mark_stack_t *stack)
static int wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
const char * rb_id2name(ID id)
void rb_gc_mark_global_tbl(void)
#define ruby_native_thread_p()
static int is_live_object(rb_objspace_t *objspace, VALUE ptr)
struct heaps_free_bitmap * next
static void gc_mark_stacked_objects(rb_objspace_t *)
#define RTYPEDDATA_DATA(v)
static void * vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
static ruby_gc_params_t initial_params
struct rb_args_info * args
#define rb_check_frozen(obj)
#define RBIGNUM_EMBED_FLAG
VALUE rb_undefine_final(VALUE obj)
struct RTypedData typeddata
RUBY_EXTERN VALUE rb_stdout
#define RUBY_DTRACE_GC_MARK_BEGIN_ENABLED()
void rb_gc_call_finalizer_at_exit(void)
#define rb_intern_const(str)
VALUE rb_obj_freeze(VALUE)
#define deferred_final_list
#define SPECIAL_CONST_P(x)
struct rb_objspace rb_objspace_t
static int heaps_increment(rb_objspace_t *objspace)
static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
static int markable_object_p(rb_objspace_t *objspace, VALUE ptr)
VALUE rb_define_module(const char *name)
#define RUBY_DTRACE_GC_MARK_BEGIN()
void rb_mark_hash(st_table *tbl)
each_obj_callback * callback
static VALUE undefine_final(VALUE os, VALUE obj)
VALUE rb_str_buf_new(long)
static VALUE gc_profile_clear(void)
void rb_gc_mark_encodings(void)
#define RTYPEDDATA_TYPE(v)
VALUE rb_define_final(VALUE obj, VALUE block)
VALUE rb_hash_aset(VALUE, VALUE, VALUE)
struct heaps_slot * sweep_slots
static VALUE wmap_finalize(VALUE self, VALUE objid)
static VALUE gc_stat(int argc, VALUE *argv, VALUE self)
static void mark_set(rb_objspace_t *objspace, st_table *tbl)
void onig_free(regex_t *reg)
#define OBJSETUP(obj, c, t)
static rb_thread_t * GET_THREAD(void)
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
int st_foreach(st_table *, int(*)(ANYARGS), st_data_t)
VALUE rb_str_new2(const char *)
void rb_ary_delete_same(VALUE ary, VALUE item)
struct stack_chunk stack_chunk_t
#define GET_HEAP_BITMAP(x)
struct rb_objspace::@68 flags
static VALUE gc_profile_enable(void)
void rb_free_m_table(st_table *tbl)
static int gc_prepare_free_objects(rb_objspace_t *)
VALUE rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
static VALUE newobj(VALUE klass, VALUE flags)
#define TRY_WITH_GC(alloc)
struct stack_chunk * next
static int gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr)