27 #include <sys/types.h>
30 #ifdef HAVE_SYS_TIME_H
34 #ifdef HAVE_SYS_RESOURCE_H
35 #include <sys/resource.h>
38 #if defined _WIN32 || defined __CYGWIN__
42 #ifdef HAVE_VALGRIND_MEMCHECK_H
43 # include <valgrind/memcheck.h>
44 # ifndef VALGRIND_MAKE_MEM_DEFINED
45 # define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
47 # ifndef VALGRIND_MAKE_MEM_UNDEFINED
48 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
51 # define VALGRIND_MAKE_MEM_DEFINED(p, n)
52 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n)
55 #define rb_setjmp(env) RUBY_SETJMP(env)
56 #define rb_jmp_buf rb_jmpbuf_t
62 # define alloca __builtin_alloca
79 #ifndef GC_MALLOC_LIMIT
80 #define GC_MALLOC_LIMIT 8000000
82 #define HEAP_MIN_SLOTS 10000
96 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
101 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
103 #if SIZEOF_LONG == SIZEOF_VOIDP
104 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
105 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
106 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
107 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
108 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
109 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
111 # error not supported
117 #define GC_PROFILE_MORE_DETAIL 0
145 time = usage.ru_utime;
148 FILETIME creation_time, exit_time, kernel_time, user_time;
153 if (GetProcessTimes(GetCurrentProcess(),
154 &creation_time, &exit_time, &kernel_time, &user_time) == 0)
158 memcpy(&ui, &user_time,
sizeof(FILETIME));
159 q = ui.QuadPart / 10L;
160 t = (
DWORD)(q % 1000000L) * 1e-6;
165 t += (double)(
DWORD)(q >> 16) * (1 << 16);
166 t += (
DWORD)q & ~(~0 << 16);
174 #define GC_PROF_TIMER_START do {\
175 if (objspace->profile.run) {\
176 if (!objspace->profile.record) {\
177 objspace->profile.size = 1000;\
178 objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);\
180 if (count >= objspace->profile.size) {\
181 objspace->profile.size += 1000;\
182 objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);\
184 if (!objspace->profile.record) {\
185 rb_bug("gc_profile malloc or realloc miss");\
187 MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);\
188 gc_time = getrusage_time();\
189 objspace->profile.record[count].gc_invoke_time = gc_time - objspace->profile.invoke_time;\
193 #define GC_PROF_TIMER_STOP(marked) do {\
194 if (objspace->profile.run) {\
195 gc_time = getrusage_time() - gc_time;\
196 if (gc_time < 0) gc_time = 0;\
197 objspace->profile.record[count].gc_time = gc_time;\
198 objspace->profile.record[count].is_marked = !!(marked);\
199 GC_PROF_SET_HEAP_INFO(objspace->profile.record[count]);\
200 objspace->profile.count++;\
204 #if GC_PROFILE_MORE_DETAIL
205 #define INIT_GC_PROF_PARAMS double gc_time = 0, sweep_time = 0;\
206 size_t count = objspace->profile.count, total = 0, live = 0
208 #define GC_PROF_MARK_TIMER_START double mark_time = 0;\
210 if (objspace->profile.run) {\
211 mark_time = getrusage_time();\
215 #define GC_PROF_MARK_TIMER_STOP do {\
216 if (objspace->profile.run) {\
217 mark_time = getrusage_time() - mark_time;\
218 if (mark_time < 0) mark_time = 0;\
219 objspace->profile.record[objspace->profile.count].gc_mark_time = mark_time;\
223 #define GC_PROF_SWEEP_TIMER_START do {\
224 if (objspace->profile.run) {\
225 sweep_time = getrusage_time();\
229 #define GC_PROF_SWEEP_TIMER_STOP do {\
230 if (objspace->profile.run) {\
231 sweep_time = getrusage_time() - sweep_time;\
232 if (sweep_time < 0) sweep_time = 0;\
233 objspace->profile.record[count].gc_sweep_time = sweep_time;\
236 #define GC_PROF_SET_MALLOC_INFO do {\
237 if (objspace->profile.run) {\
238 gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
239 record->allocate_increase = malloc_increase;\
240 record->allocate_limit = malloc_limit; \
243 #define GC_PROF_SET_HEAP_INFO(record) do {\
244 live = objspace->heap.live_num;\
245 total = heaps_used * HEAP_OBJ_LIMIT;\
246 (record).heap_use_slots = heaps_used;\
247 (record).heap_live_objects = live;\
248 (record).heap_free_objects = total - live;\
249 (record).heap_total_objects = total;\
250 (record).have_finalize = deferred_final_list ? Qtrue : Qfalse;\
251 (record).heap_use_size = live * sizeof(RVALUE);\
252 (record).heap_total_size = total * sizeof(RVALUE);\
254 #define GC_PROF_INC_LIVE_NUM objspace->heap.live_num++
255 #define GC_PROF_DEC_LIVE_NUM objspace->heap.live_num--
257 #define INIT_GC_PROF_PARAMS double gc_time = 0;\
258 size_t count = objspace->profile.count, total = 0, live = 0
259 #define GC_PROF_MARK_TIMER_START
260 #define GC_PROF_MARK_TIMER_STOP
261 #define GC_PROF_SWEEP_TIMER_START
262 #define GC_PROF_SWEEP_TIMER_STOP
263 #define GC_PROF_SET_MALLOC_INFO
264 #define GC_PROF_SET_HEAP_INFO(record) do {\
265 live = objspace->heap.live_num;\
266 total = heaps_used * HEAP_OBJ_LIMIT;\
267 (record).heap_total_objects = total;\
268 (record).heap_use_size = live * sizeof(RVALUE);\
269 (record).heap_total_size = total * sizeof(RVALUE);\
271 #define GC_PROF_INC_LIVE_NUM
272 #define GC_PROF_DEC_LIVE_NUM
276 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
277 #pragma pack(push, 1)
310 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
333 #define STACK_CHUNK_SIZE 500
349 #define CALC_EXACT_MALLOC_SIZE 0
355 #if CALC_EXACT_MALLOC_SIZE
356 size_t allocated_size;
399 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
400 #define rb_objspace (*GET_VM()->objspace)
401 #define ruby_initial_gc_stress initial_params.gc_stress
407 #define malloc_limit objspace->malloc_params.limit
408 #define malloc_increase objspace->malloc_params.increase
409 #define heaps objspace->heap.ptr
410 #define heaps_length objspace->heap.length
411 #define heaps_used objspace->heap.used
412 #define freelist objspace->heap.freelist
413 #define lomem objspace->heap.range[0]
414 #define himem objspace->heap.range[1]
415 #define heaps_inc objspace->heap.increment
416 #define heaps_freed objspace->heap.freed
417 #define dont_gc objspace->flags.dont_gc
418 #define during_gc objspace->flags.during_gc
419 #define finalizing objspace->flags.finalizing
420 #define finalizer_table objspace->final.table
421 #define deferred_final_list objspace->final.deferred
422 #define global_List objspace->global_list
423 #define ruby_gc_stress objspace->gc_stress
424 #define initial_malloc_limit initial_params.initial_malloc_limit
425 #define initial_heap_min_slots initial_params.initial_heap_min_slots
426 #define initial_free_min initial_params.initial_free_min
430 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
435 memset(objspace, 0,
sizeof(*objspace));
449 char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr;
453 malloc_limit_ptr =
getenv(
"RUBY_GC_MALLOC_LIMIT");
454 if (malloc_limit_ptr !=
NULL) {
455 int malloc_limit_i = atoi(malloc_limit_ptr);
457 fprintf(stderr,
"malloc_limit=%d (%d)\n",
459 if (malloc_limit_i > 0) {
464 heap_min_slots_ptr =
getenv(
"RUBY_HEAP_MIN_SLOTS");
465 if (heap_min_slots_ptr !=
NULL) {
466 int heap_min_slots_i = atoi(heap_min_slots_ptr);
468 fprintf(stderr,
"heap_min_slots=%d (%d)\n",
470 if (heap_min_slots_i > 0) {
476 free_min_ptr =
getenv(
"RUBY_FREE_MIN");
477 if (free_min_ptr !=
NULL) {
478 int free_min_i = atoi(free_min_ptr);
481 if (free_min_i > 0) {
487 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
531 #define HEAP_SIZE 0x4000
539 #define HEAP_OBJ_LIMIT (unsigned int)(HEAP_SIZE / sizeof(struct RVALUE))
574 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
586 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
716 fprintf(stderr,
"[FATAL] %s\n", msg);
741 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
752 if ((ssize_t)size < 0) {
755 if (size == 0) size = 1;
757 #if CALC_EXACT_MALLOC_SIZE
758 size +=
sizeof(size_t);
774 #if CALC_EXACT_MALLOC_SIZE
777 ((
size_t *)mem)[0] =
size;
778 mem = (
size_t *)mem + 1;
784 #define TRY_WITH_GC(alloc) do { \
786 (!garbage_collect_with_gvl(objspace) || \
807 if ((ssize_t)size < 0) {
818 #if CALC_EXACT_MALLOC_SIZE
819 size +=
sizeof(size_t);
821 ptr = (
size_t *)ptr - 1;
835 #if CALC_EXACT_MALLOC_SIZE
837 ((
size_t *)mem)[0] =
size;
838 mem = (
size_t *)mem + 1;
847 #if CALC_EXACT_MALLOC_SIZE
849 ptr = ((
size_t *)ptr) - 1;
850 size = ((
size_t*)ptr)[0];
867 size_t len = size * n;
868 if (n != 0 && size != len / n) {
908 size_t len = size * n;
909 if (n != 0 && size != len / n) {
995 if (tmp->
varptr == addr) {
1073 register RVALUE *mid_membase;
1074 mid = (lo +
hi) / 2;
1076 if (mid_membase < membase) {
1079 else if (mid_membase > membase) {
1094 heaps->limit = objs;
1118 for (i = 0; i <
add; i++) {
1129 #ifdef USE_SIGALTSTACK
1133 void *tmp = th->altstack;
1156 size_t next_heaps_length = (size_t)(
heaps_used * 1.8);
1159 next_heaps_length++;
1187 #define RANY(o) ((RVALUE*)(o))
1198 rb_bug(
"object allocation during garbage collection phase");
1250 data->dfree = dfree;
1251 data->dmark = dmark;
1266 data->typed_flag = 1;
1295 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
1297 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
1300 #define STACK_START (th->machine_stack_start)
1301 #define STACK_END (th->machine_stack_end)
1302 #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
1304 #if STACK_GROW_DIRECTION < 0
1305 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
1306 #elif STACK_GROW_DIRECTION > 0
1307 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
1309 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
1310 : (size_t)(STACK_END - STACK_START + 1))
1312 #if !STACK_GROW_DIRECTION
1353 stack->
cache = chunk;
1363 chunk = stack->
cache;
1377 next = stack->
cache;
1402 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
1409 while (chunk !=
NULL) {
1432 if (stack->
index == 1) {
1449 for(i=0; i < 4; i++) {
1465 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
1475 ret = (
VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
1476 th->machine_register_stack_maxsize/
sizeof(
VALUE) - water_mark;
1483 #define STACKFRAME_FOR_CALL_CFUNC 512
1488 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
1495 #define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
1506 if (!mstack->
index)
return;
1518 register size_t hi,
lo, mid;
1527 mid = (lo +
hi) / 2;
1529 if (heap->
start <= p) {
1560 if (end <= start)
return;
1571 #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
1648 switch (def->
type) {
1792 rb_bug(
"rb_gc_mark() called for broken object");
1946 for (i=0; i <
len; i++) {
1958 #define STR_ASSOC FL_USER3
1968 if (mark_func) (*mark_func)(
DATA_PTR(obj));
1979 for (i = 0; i <
len; i++) {
2035 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
2060 p->as.free.flags = 0;
2129 size_t free_num = 0, final_num = 0;
2134 p = sweep_slot->
slot; pend = p + sweep_slot->
limit;
2142 RDATA(p)->dfree = 0;
2163 if (final_num + free_num == sweep_slot->
limit &&
2168 RDATA(pp)->dmark = (void (*)(
void *))(
VALUE)sweep_slot;
2171 sweep_slot->
limit = final_num;
2217 if (
GET_VM()->unlinked_method_entry_list) {
2350 RANY(p)->as.free.flags = 0;
2380 rb_bug(
"obj_free() called for broken object");
2392 RANY(obj)->as.object.as.heap.ivptr) {
2393 xfree(
RANY(obj)->as.object.as.heap.ivptr);
2418 if (
RANY(obj)->as.hash.ntbl) {
2423 if (
RANY(obj)->as.regexp.ptr) {
2430 RDATA(obj)->dfree =
RANY(obj)->as.typeddata.type->function.dfree;
2435 else if (
RANY(obj)->as.data.dfree) {
2442 if (
RANY(obj)->as.match.rmatch) {
2443 struct rmatch *rm =
RANY(obj)->as.match.rmatch;
2451 if (
RANY(obj)->as.file.fptr) {
2475 if (
RANY(obj)->as.node.u1.tbl) {
2487 RANY(obj)->as.rstruct.as.heap.ptr) {
2488 xfree(
RANY(obj)->as.rstruct.as.heap.ptr);
2493 rb_bug(
"gc_sweep(): unknown data type 0x%x(%p)",
2502 #if STACK_GROW_DIRECTION < 0
2503 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2504 #elif STACK_GROW_DIRECTION > 0
2505 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2507 #define GET_STACK_BOUNDS(start, end, appendix) \
2508 ((STACK_END < STACK_START) ? \
2509 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2512 #define numberof(array) (int)(sizeof(array) / sizeof((array)[0]))
2520 } save_regs_gc_mark;
2521 VALUE *stack_start, *stack_end;
2536 #if defined(__mc68000__)
2590 if (
GC_NOTIFY) printf(
"start garbage_collect()\n");
2611 if (
GC_NOTIFY) printf(
"end garbage_collect()\n");
2625 VALUE *stack_start, *stack_end;
2736 for (; pstart != pend; pstart++) {
2742 if (pstart != pend) {
2795 objspace->flags.dont_lazy_sweep =
TRUE;
2814 for (; p != pend; p++) {
2934 VALUE obj, block, table;
2956 table = (
VALUE)data;
2961 RBASIC(table)->klass = 0;
2976 table = (
VALUE)data;
3032 free_func =
RDATA(obj)->dfree;
3128 RDATA(p)->dfree =
RANY(p)->as.typeddata.type->function.dfree;
3133 else if (
RANY(p)->as.data.dfree) {
3135 RANY(p)->as.free.next = final_list;
3140 if (
RANY(p)->as.file.fptr) {
3142 RANY(p)->as.free.next = final_list;
3184 #if SIZEOF_LONG == SIZEOF_VOIDP
3185 #define NUM2PTR(x) NUM2ULONG(x)
3186 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3187 #define NUM2PTR(x) NUM2ULL(x)
3203 if ((ptr %
sizeof(
RVALUE)) == (4 << 2)) {
3331 for (i = 0; i <=
T_MASK; i++) {
3339 for (;p < pend; p++) {
3359 for (i = 0; i <=
T_MASK; i++) {
3362 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
3389 default: type =
INT2NUM(i);
break;
3469 #if CALC_EXACT_MALLOC_SIZE
3480 gc_malloc_allocated_size(
VALUE self)
3482 return UINT2NUM((&rb_objspace)->malloc_params.allocated_size);
3495 gc_malloc_allocations(
VALUE self)
3497 return UINT2NUM((&rb_objspace)->malloc_params.allocations);
3521 #if GC_PROFILE_MORE_DETAIL
3560 rb_str_cat2(result,
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n");
3561 for (i = 0; i < (int)
RARRAY_LEN(record); i++) {
3563 #if !GC_PROFILE_MORE_DETAIL
3572 #if !GC_PROFILE_MORE_DETAIL
3576 #if GC_PROFILE_MORE_DETAIL
3579 rb_str_cat2(result,
"Index Allocate Increase Allocate Limit Use Slot Have Finalize Mark Time(ms) Sweep Time(ms)\n");
3581 for (i = 0; i < (int)
RARRAY_LEN(record); i++) {
3718 #if CALC_EXACT_MALLOC_SIZE