* [PATCH 1/5] gc.c: reduce boolean parameters for gc_start / garbage_collect
@ 2018-05-31 22:01 Eric Wong
2018-05-31 22:01 ` [PATCH 2/5] gc.c: introduce GPR_FLAG_IMMEDIATE_MARK to reduce parameters Eric Wong
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Eric Wong @ 2018-05-31 22:01 UTC (permalink / raw)
To: spew
gc_start and garbage_collect functions take too many ordered
parameters and make gc.c hard-to-read sometimes. Rely more on
flags so the reader does not lose track of argument ordering.
---
gc.c | 56 +++++++++++++++++++++++++++++++++-----------------------
1 file changed, 33 insertions(+), 23 deletions(-)
diff --git a/gc.c b/gc.c
index 4ebae169ecf..bf62bd1a4e5 100644
--- a/gc.c
+++ b/gc.c
@@ -848,9 +848,9 @@ static void init_mark_stack(mark_stack_t *stack);
static int ready_to_gc(rb_objspace_t *objspace);
-static int garbage_collect(rb_objspace_t *, int full_mark, int immediate_mark, int immediate_sweep, int reason);
+static int garbage_collect(rb_objspace_t *, int full_mark, int immediate_mark, int reason);
-static int gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, const unsigned int immediate_sweep, int reason);
+static int gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, int reason);
static void gc_rest(rb_objspace_t *objspace);
static inline void gc_enter(rb_objspace_t *objspace, const char *event);
static inline void gc_exit(rb_objspace_t *objspace, const char *event);
@@ -1749,7 +1749,7 @@ heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
if (heap->free_pages == NULL &&
(will_be_incremental_marking(objspace) || heap_increment(objspace, heap) == FALSE) &&
- gc_start(objspace, FALSE, FALSE, FALSE, GPR_FLAG_NEWOBJ) == FALSE) {
+ gc_start(objspace, FALSE, FALSE, GPR_FLAG_NEWOBJ) == FALSE) {
rb_memerror();
}
}
@@ -1919,7 +1919,7 @@ newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objsp
}
if (ruby_gc_stressful) {
- if (!garbage_collect(objspace, FALSE, FALSE, FALSE, GPR_FLAG_NEWOBJ)) {
+ if (!garbage_collect(objspace, FALSE, FALSE, GPR_FLAG_NEWOBJ)) {
rb_memerror();
}
}
@@ -6396,7 +6396,7 @@ gc_reset_malloc_info(rb_objspace_t *objspace)
}
static int
-garbage_collect(rb_objspace_t *objspace, int full_mark, int immediate_mark, int immediate_sweep, int reason)
+garbage_collect(rb_objspace_t *objspace, int full_mark, int immediate_mark, int reason)
{
#if GC_PROFILE_MORE_DETAIL
objspace->profile.prepare_time = getrusage_time();
@@ -6408,17 +6408,19 @@ garbage_collect(rb_objspace_t *objspace, int full_mark, int immediate_mark, int
objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
#endif
- return gc_start(objspace, full_mark, immediate_mark, immediate_sweep, reason);
+ return gc_start(objspace, full_mark, immediate_mark, reason);
}
static int
-gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, const unsigned int immediate_sweep, int reason)
+gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, int reason)
{
int do_full_mark = full_mark;
- objspace->flags.immediate_sweep = immediate_sweep;
+
+ /* reason may be clobbered, later, so keep set immediate_sweep here */
+ objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
if (!heap_allocated_pages) return FALSE; /* heap is not ready */
- if (reason != GPR_FLAG_METHOD && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
+ if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
GC_ASSERT(gc_mode(objspace) == gc_mode_none);
GC_ASSERT(!is_lazy_sweeping(heap_eden));
@@ -6472,8 +6474,8 @@ gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark,
if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
- gc_report(1, objspace, "gc_start(%d, %d, %d, reason: %d) => %d, %d, %d\n",
- full_mark, immediate_mark, immediate_sweep, reason,
+ gc_report(1, objspace, "gc_start(%d, %d, reason: %d) => %d, %d, %d\n",
+ full_mark, immediate_mark, reason,
do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
objspace->profile.count++;
@@ -6524,7 +6526,6 @@ struct objspace_and_reason {
int reason;
int full_mark;
int immediate_mark;
- int immediate_sweep;
};
static void
@@ -6636,15 +6637,15 @@ static void *
gc_with_gvl(void *ptr)
{
struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
- return (void *)(VALUE)garbage_collect(oar->objspace, oar->full_mark, oar->immediate_mark, oar->immediate_sweep, oar->reason);
+ return (void *)(VALUE)garbage_collect(oar->objspace, oar->full_mark, oar->immediate_mark, oar->reason);
}
static int
-garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int immediate_mark, int immediate_sweep, int reason)
+garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int immediate_mark, int reason)
{
if (dont_gc) return TRUE;
if (ruby_thread_has_gvl_p()) {
- return garbage_collect(objspace, full_mark, immediate_mark, immediate_sweep, reason);
+ return garbage_collect(objspace, full_mark, immediate_mark, reason);
}
else {
if (ruby_native_thread_p()) {
@@ -6653,7 +6654,6 @@ garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int immediate_m
oar.reason = reason;
oar.full_mark = full_mark;
oar.immediate_mark = immediate_mark;
- oar.immediate_sweep = immediate_sweep;
return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
}
else {
@@ -6699,7 +6699,8 @@ static VALUE
gc_start_internal(int argc, VALUE *argv, VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
- int full_mark = TRUE, immediate_mark = TRUE, immediate_sweep = TRUE;
+ int reason = GPR_FLAG_METHOD | GPR_FLAG_IMMEDIATE_SWEEP;
+ int full_mark = TRUE, immediate_mark = TRUE;
VALUE opt = Qnil;
static ID keyword_ids[3];
@@ -6718,10 +6719,12 @@ gc_start_internal(int argc, VALUE *argv, VALUE self)
if (kwvals[0] != Qundef) full_mark = RTEST(kwvals[0]);
if (kwvals[1] != Qundef) immediate_mark = RTEST(kwvals[1]);
- if (kwvals[2] != Qundef) immediate_sweep = RTEST(kwvals[2]);
+ if (kwvals[2] != Qundef && !RTEST(kwvals[2])) {
+ reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
+ }
}
- garbage_collect(objspace, full_mark, immediate_mark, immediate_sweep, GPR_FLAG_METHOD);
+ garbage_collect(objspace, full_mark, immediate_mark, reason);
gc_finalize_deferred(objspace);
return Qnil;
@@ -6738,7 +6741,8 @@ void
rb_gc(void)
{
rb_objspace_t *objspace = &rb_objspace;
- garbage_collect(objspace, TRUE, TRUE, TRUE, GPR_FLAG_CAPI);
+ int reason = GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI;
+ garbage_collect(objspace, TRUE, TRUE, reason);
gc_finalize_deferred(objspace);
}
@@ -7806,7 +7810,11 @@ static void
objspace_malloc_gc_stress(rb_objspace_t *objspace)
{
if (ruby_gc_stressful && ruby_native_thread_p()) {
- garbage_collect_with_gvl(objspace, gc_stress_full_mark_after_malloc_p(), TRUE, TRUE, GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
+ garbage_collect_with_gvl(objspace, gc_stress_full_mark_after_malloc_p(),
+ TRUE,
+ GPR_FLAG_IMMEDIATE_SWEEP |
+ GPR_FLAG_STRESS |
+ GPR_FLAG_MALLOC);
}
}
@@ -7833,7 +7841,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si
gc_rest(objspace); /* gc_rest can reduce malloc_increase */
goto retry;
}
- garbage_collect_with_gvl(objspace, FALSE, FALSE, FALSE, GPR_FLAG_MALLOC);
+ garbage_collect_with_gvl(objspace, FALSE, FALSE, GPR_FLAG_MALLOC);
}
}
@@ -7911,7 +7919,9 @@ objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
#define TRY_WITH_GC(alloc) do { \
objspace_malloc_gc_stress(objspace); \
if (!(alloc) && \
- (!garbage_collect_with_gvl(objspace, TRUE, TRUE, TRUE, GPR_FLAG_MALLOC) || /* full/immediate mark && immediate sweep */ \
+ /* full/immediate mark */ \
+ (!garbage_collect_with_gvl(objspace, TRUE, TRUE, \
+ GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_MALLOC) || \
!(alloc))) { \
ruby_memerror(); \
} \
--
EW
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/5] gc.c: introduce GPR_FLAG_IMMEDIATE_MARK to reduce parameters
2018-05-31 22:01 [PATCH 1/5] gc.c: reduce boolean parameters for gc_start / garbage_collect Eric Wong
@ 2018-05-31 22:01 ` Eric Wong
2018-05-31 22:01 ` [PATCH 3/5] gc.c: introduce GPR_FLAG_FULL_MARK " Eric Wong
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Eric Wong @ 2018-05-31 22:01 UTC (permalink / raw)
To: spew
Another step to making gc_start and garbage_collect self-documenting,
because remembering the order of function arguments is difficult.
---
gc.c | 59 ++++++++++++++++++++++++++++++++---------------------------
1 file changed, 32 insertions(+), 27 deletions(-)
diff --git a/gc.c b/gc.c
index bf62bd1a4e5..77833c419f9 100644
--- a/gc.c
+++ b/gc.c
@@ -355,7 +355,8 @@ typedef enum {
/* others */
GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
- GPR_FLAG_HAVE_FINALIZE = 0x4000
+ GPR_FLAG_HAVE_FINALIZE = 0x4000,
+ GPR_FLAG_IMMEDIATE_MARK = 0x8000
} gc_profile_record_flag;
typedef struct gc_profile_record {
@@ -848,9 +849,9 @@ static void init_mark_stack(mark_stack_t *stack);
static int ready_to_gc(rb_objspace_t *objspace);
-static int garbage_collect(rb_objspace_t *, int full_mark, int immediate_mark, int reason);
+static int garbage_collect(rb_objspace_t *, int full_mark, int reason);
-static int gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, int reason);
+static int gc_start(rb_objspace_t *objspace, const int full_mark, int reason);
static void gc_rest(rb_objspace_t *objspace);
static inline void gc_enter(rb_objspace_t *objspace, const char *event);
static inline void gc_exit(rb_objspace_t *objspace, const char *event);
@@ -1749,7 +1750,7 @@ heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
if (heap->free_pages == NULL &&
(will_be_incremental_marking(objspace) || heap_increment(objspace, heap) == FALSE) &&
- gc_start(objspace, FALSE, FALSE, GPR_FLAG_NEWOBJ) == FALSE) {
+ gc_start(objspace, FALSE, GPR_FLAG_NEWOBJ) == FALSE) {
rb_memerror();
}
}
@@ -1919,7 +1920,7 @@ newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objsp
}
if (ruby_gc_stressful) {
- if (!garbage_collect(objspace, FALSE, FALSE, GPR_FLAG_NEWOBJ)) {
+ if (!garbage_collect(objspace, FALSE, GPR_FLAG_NEWOBJ)) {
rb_memerror();
}
}
@@ -6396,7 +6397,7 @@ gc_reset_malloc_info(rb_objspace_t *objspace)
}
static int
-garbage_collect(rb_objspace_t *objspace, int full_mark, int immediate_mark, int reason)
+garbage_collect(rb_objspace_t *objspace, int full_mark, int reason)
{
#if GC_PROFILE_MORE_DETAIL
objspace->profile.prepare_time = getrusage_time();
@@ -6408,13 +6409,14 @@ garbage_collect(rb_objspace_t *objspace, int full_mark, int immediate_mark, int
objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
#endif
- return gc_start(objspace, full_mark, immediate_mark, reason);
+ return gc_start(objspace, full_mark, reason);
}
static int
-gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, int reason)
+gc_start(rb_objspace_t *objspace, const int full_mark, int reason)
{
int do_full_mark = full_mark;
+ unsigned int immediate_mark = (unsigned)reason & GPR_FLAG_IMMEDIATE_MARK;
/* reason may be clobbered, later, so keep set immediate_sweep here */
objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
@@ -6474,8 +6476,8 @@ gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark,
if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
- gc_report(1, objspace, "gc_start(%d, %d, reason: %d) => %d, %d, %d\n",
- full_mark, immediate_mark, reason,
+ gc_report(1, objspace, "gc_start(%d, reason: %d) => %d, %d, %d\n",
+ full_mark, reason,
do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
objspace->profile.count++;
@@ -6525,7 +6527,6 @@ struct objspace_and_reason {
rb_objspace_t *objspace;
int reason;
int full_mark;
- int immediate_mark;
};
static void
@@ -6637,15 +6638,15 @@ static void *
gc_with_gvl(void *ptr)
{
struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
- return (void *)(VALUE)garbage_collect(oar->objspace, oar->full_mark, oar->immediate_mark, oar->reason);
+ return (void *)(VALUE)garbage_collect(oar->objspace, oar->full_mark, oar->reason);
}
static int
-garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int immediate_mark, int reason)
+garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int reason)
{
if (dont_gc) return TRUE;
if (ruby_thread_has_gvl_p()) {
- return garbage_collect(objspace, full_mark, immediate_mark, reason);
+ return garbage_collect(objspace, full_mark, reason);
}
else {
if (ruby_native_thread_p()) {
@@ -6653,7 +6654,6 @@ garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int immediate_m
oar.objspace = objspace;
oar.reason = reason;
oar.full_mark = full_mark;
- oar.immediate_mark = immediate_mark;
return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
}
else {
@@ -6699,8 +6699,9 @@ static VALUE
gc_start_internal(int argc, VALUE *argv, VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
- int reason = GPR_FLAG_METHOD | GPR_FLAG_IMMEDIATE_SWEEP;
- int full_mark = TRUE, immediate_mark = TRUE;
+ int reason = GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
+ GPR_FLAG_METHOD;
+ int full_mark = TRUE;
VALUE opt = Qnil;
static ID keyword_ids[3];
@@ -6718,13 +6719,15 @@ gc_start_internal(int argc, VALUE *argv, VALUE self)
rb_get_kwargs(opt, keyword_ids, 0, 3, kwvals);
if (kwvals[0] != Qundef) full_mark = RTEST(kwvals[0]);
- if (kwvals[1] != Qundef) immediate_mark = RTEST(kwvals[1]);
+ if (kwvals[1] != Qundef && !RTEST(kwvals[1])) {
+ reason &= ~GPR_FLAG_IMMEDIATE_MARK;
+ }
if (kwvals[2] != Qundef && !RTEST(kwvals[2])) {
reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
}
}
- garbage_collect(objspace, full_mark, immediate_mark, reason);
+ garbage_collect(objspace, full_mark, reason);
gc_finalize_deferred(objspace);
return Qnil;
@@ -6741,8 +6744,9 @@ void
rb_gc(void)
{
rb_objspace_t *objspace = &rb_objspace;
- int reason = GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI;
- garbage_collect(objspace, TRUE, TRUE, reason);
+ int reason = GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
+ GPR_FLAG_CAPI;
+ garbage_collect(objspace, TRUE, reason);
gc_finalize_deferred(objspace);
}
@@ -7811,8 +7815,8 @@ objspace_malloc_gc_stress(rb_objspace_t *objspace)
{
if (ruby_gc_stressful && ruby_native_thread_p()) {
garbage_collect_with_gvl(objspace, gc_stress_full_mark_after_malloc_p(),
- TRUE,
- GPR_FLAG_IMMEDIATE_SWEEP |
+ GPR_FLAG_IMMEDIATE_MARK |
+ GPR_FLAG_IMMEDIATE_SWEEP |
GPR_FLAG_STRESS |
GPR_FLAG_MALLOC);
}
@@ -7841,7 +7845,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si
gc_rest(objspace); /* gc_rest can reduce malloc_increase */
goto retry;
}
- garbage_collect_with_gvl(objspace, FALSE, FALSE, GPR_FLAG_MALLOC);
+ garbage_collect_with_gvl(objspace, FALSE, GPR_FLAG_MALLOC);
}
}
@@ -7919,9 +7923,10 @@ objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
#define TRY_WITH_GC(alloc) do { \
objspace_malloc_gc_stress(objspace); \
if (!(alloc) && \
- /* full/immediate mark */ \
- (!garbage_collect_with_gvl(objspace, TRUE, TRUE, \
- GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_MALLOC) || \
+ /* full mark */ \
+ (!garbage_collect_with_gvl(objspace, TRUE, \
+ GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
+ GPR_FLAG_MALLOC) || \
!(alloc))) { \
ruby_memerror(); \
} \
--
EW
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 3/5] gc.c: introduce GPR_FLAG_FULL_MARK to reduce parameters
2018-05-31 22:01 [PATCH 1/5] gc.c: reduce boolean parameters for gc_start / garbage_collect Eric Wong
2018-05-31 22:01 ` [PATCH 2/5] gc.c: introduce GPR_FLAG_IMMEDIATE_MARK to reduce parameters Eric Wong
@ 2018-05-31 22:01 ` Eric Wong
2018-05-31 22:01 ` [PATCH 4/5] gc: pass thread around for malloc Eric Wong
2018-05-31 22:01 ` [PATCH 5/5] per-thread malloc accounting Eric Wong
3 siblings, 0 replies; 5+ messages in thread
From: Eric Wong @ 2018-05-31 22:01 UTC (permalink / raw)
To: spew
Another step to making gc_start and garbage_collect self-documenting,
because remembering the order of function arguments is difficult.
---
gc.c | 65 ++++++++++++++++++++++++++++++------------------------------
1 file changed, 33 insertions(+), 32 deletions(-)
diff --git a/gc.c b/gc.c
index 77833c419f9..9f44381fd5e 100644
--- a/gc.c
+++ b/gc.c
@@ -356,7 +356,8 @@ typedef enum {
/* others */
GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
GPR_FLAG_HAVE_FINALIZE = 0x4000,
- GPR_FLAG_IMMEDIATE_MARK = 0x8000
+ GPR_FLAG_IMMEDIATE_MARK = 0x8000,
+ GPR_FLAG_FULL_MARK = 0x10000
} gc_profile_record_flag;
typedef struct gc_profile_record {
@@ -849,9 +850,9 @@ static void init_mark_stack(mark_stack_t *stack);
static int ready_to_gc(rb_objspace_t *objspace);
-static int garbage_collect(rb_objspace_t *, int full_mark, int reason);
+static int garbage_collect(rb_objspace_t *, int reason);
-static int gc_start(rb_objspace_t *objspace, const int full_mark, int reason);
+static int gc_start(rb_objspace_t *objspace, int reason);
static void gc_rest(rb_objspace_t *objspace);
static inline void gc_enter(rb_objspace_t *objspace, const char *event);
static inline void gc_exit(rb_objspace_t *objspace, const char *event);
@@ -1750,7 +1751,7 @@ heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
if (heap->free_pages == NULL &&
(will_be_incremental_marking(objspace) || heap_increment(objspace, heap) == FALSE) &&
- gc_start(objspace, FALSE, GPR_FLAG_NEWOBJ) == FALSE) {
+ gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
rb_memerror();
}
}
@@ -1920,7 +1921,7 @@ newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objsp
}
if (ruby_gc_stressful) {
- if (!garbage_collect(objspace, FALSE, GPR_FLAG_NEWOBJ)) {
+ if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
rb_memerror();
}
}
@@ -6397,7 +6398,7 @@ gc_reset_malloc_info(rb_objspace_t *objspace)
}
static int
-garbage_collect(rb_objspace_t *objspace, int full_mark, int reason)
+garbage_collect(rb_objspace_t *objspace, int reason)
{
#if GC_PROFILE_MORE_DETAIL
objspace->profile.prepare_time = getrusage_time();
@@ -6409,13 +6410,13 @@ garbage_collect(rb_objspace_t *objspace, int full_mark, int reason)
objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
#endif
- return gc_start(objspace, full_mark, reason);
+ return gc_start(objspace, reason);
}
static int
-gc_start(rb_objspace_t *objspace, const int full_mark, int reason)
+gc_start(rb_objspace_t *objspace, int reason)
{
- int do_full_mark = full_mark;
+ unsigned int do_full_mark = !!((unsigned)reason & GPR_FLAG_FULL_MARK);
unsigned int immediate_mark = (unsigned)reason & GPR_FLAG_IMMEDIATE_MARK;
/* reason may be clobbered, later, so keep set immediate_sweep here */
@@ -6476,8 +6477,8 @@ gc_start(rb_objspace_t *objspace, const int full_mark, int reason)
if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
- gc_report(1, objspace, "gc_start(%d, reason: %d) => %d, %d, %d\n",
- full_mark, reason,
+ gc_report(1, objspace, "gc_start(reason: %d) => %u, %d, %d\n",
+ reason,
do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
objspace->profile.count++;
@@ -6526,7 +6527,6 @@ gc_rest(rb_objspace_t *objspace)
struct objspace_and_reason {
rb_objspace_t *objspace;
int reason;
- int full_mark;
};
static void
@@ -6638,22 +6638,21 @@ static void *
gc_with_gvl(void *ptr)
{
struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
- return (void *)(VALUE)garbage_collect(oar->objspace, oar->full_mark, oar->reason);
+ return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
}
static int
-garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int reason)
+garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
{
if (dont_gc) return TRUE;
if (ruby_thread_has_gvl_p()) {
- return garbage_collect(objspace, full_mark, reason);
+ return garbage_collect(objspace, reason);
}
else {
if (ruby_native_thread_p()) {
struct objspace_and_reason oar;
oar.objspace = objspace;
oar.reason = reason;
- oar.full_mark = full_mark;
return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
}
else {
@@ -6699,9 +6698,8 @@ static VALUE
gc_start_internal(int argc, VALUE *argv, VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
- int reason = GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
- GPR_FLAG_METHOD;
- int full_mark = TRUE;
+ int reason = GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
+ GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_METHOD;
VALUE opt = Qnil;
static ID keyword_ids[3];
@@ -6718,7 +6716,9 @@ gc_start_internal(int argc, VALUE *argv, VALUE self)
rb_get_kwargs(opt, keyword_ids, 0, 3, kwvals);
- if (kwvals[0] != Qundef) full_mark = RTEST(kwvals[0]);
+ if (kwvals[0] != Qundef && !RTEST(kwvals[0])) {
+ reason &= ~GPR_FLAG_FULL_MARK;
+ }
if (kwvals[1] != Qundef && !RTEST(kwvals[1])) {
reason &= ~GPR_FLAG_IMMEDIATE_MARK;
}
@@ -6727,7 +6727,7 @@ gc_start_internal(int argc, VALUE *argv, VALUE self)
}
}
- garbage_collect(objspace, full_mark, reason);
+ garbage_collect(objspace, reason);
gc_finalize_deferred(objspace);
return Qnil;
@@ -6744,9 +6744,9 @@ void
rb_gc(void)
{
rb_objspace_t *objspace = &rb_objspace;
- int reason = GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
- GPR_FLAG_CAPI;
- garbage_collect(objspace, TRUE, reason);
+ int reason = GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
+ GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI;
+ garbage_collect(objspace, reason);
gc_finalize_deferred(objspace);
}
@@ -7814,11 +7814,13 @@ static void
objspace_malloc_gc_stress(rb_objspace_t *objspace)
{
if (ruby_gc_stressful && ruby_native_thread_p()) {
- garbage_collect_with_gvl(objspace, gc_stress_full_mark_after_malloc_p(),
- GPR_FLAG_IMMEDIATE_MARK |
- GPR_FLAG_IMMEDIATE_SWEEP |
- GPR_FLAG_STRESS |
- GPR_FLAG_MALLOC);
+ int reason = GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
+ GPR_FLAG_STRESS | GPR_FLAG_MALLOC;
+
+ if (gc_stress_full_mark_after_malloc_p()) {
+ reason |= GPR_FLAG_FULL_MARK;
+ }
+ garbage_collect_with_gvl(objspace, reason);
}
}
@@ -7845,7 +7847,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si
gc_rest(objspace); /* gc_rest can reduce malloc_increase */
goto retry;
}
- garbage_collect_with_gvl(objspace, FALSE, GPR_FLAG_MALLOC);
+ garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
}
}
@@ -7923,8 +7925,7 @@ objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
#define TRY_WITH_GC(alloc) do { \
objspace_malloc_gc_stress(objspace); \
if (!(alloc) && \
- /* full mark */ \
- (!garbage_collect_with_gvl(objspace, TRUE, \
+ (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
GPR_FLAG_MALLOC) || \
!(alloc))) { \
--
EW
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 4/5] gc: pass thread around for malloc
2018-05-31 22:01 [PATCH 1/5] gc.c: reduce boolean parameters for gc_start / garbage_collect Eric Wong
2018-05-31 22:01 ` [PATCH 2/5] gc.c: introduce GPR_FLAG_IMMEDIATE_MARK to reduce parameters Eric Wong
2018-05-31 22:01 ` [PATCH 3/5] gc.c: introduce GPR_FLAG_FULL_MARK " Eric Wong
@ 2018-05-31 22:01 ` Eric Wong
2018-05-31 22:01 ` [PATCH 5/5] per-thread malloc accounting Eric Wong
3 siblings, 0 replies; 5+ messages in thread
From: Eric Wong @ 2018-05-31 22:01 UTC (permalink / raw)
To: spew
---
gc.c | 75 ++++++++++++++++++++++++++++++------------------
internal.h | 2 ++
thread_pthread.c | 2 +-
thread_win32.c | 2 +-
4 files changed, 51 insertions(+), 30 deletions(-)
diff --git a/gc.c b/gc.c
index 9f44381fd5e..c8f8b16a896 100644
--- a/gc.c
+++ b/gc.c
@@ -6641,15 +6641,22 @@ gc_with_gvl(void *ptr)
return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
}
+static inline int
+thread_has_gvl_p(const rb_thread_t *th)
+{
+ return (th && th->blocking_region_buffer == 0);
+}
+
static int
-garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
+garbage_collect_with_gvl(rb_objspace_t *objspace, const rb_thread_t *th,
+ int reason)
{
if (dont_gc) return TRUE;
- if (ruby_thread_has_gvl_p()) {
+ if (thread_has_gvl_p(th)) {
return garbage_collect(objspace, reason);
}
else {
- if (ruby_native_thread_p()) {
+ if (th) {
struct objspace_and_reason oar;
oar.objspace = objspace;
oar.reason = reason;
@@ -7811,21 +7818,22 @@ atomic_sub_nounderflow(size_t *var, size_t sub)
}
static void
-objspace_malloc_gc_stress(rb_objspace_t *objspace)
+objspace_malloc_gc_stress(rb_objspace_t *objspace, const rb_thread_t *th)
{
- if (ruby_gc_stressful && ruby_native_thread_p()) {
+ if (ruby_gc_stressful && th) {
int reason = GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
GPR_FLAG_STRESS | GPR_FLAG_MALLOC;
if (gc_stress_full_mark_after_malloc_p()) {
reason |= GPR_FLAG_FULL_MARK;
}
- garbage_collect_with_gvl(objspace, reason);
+ garbage_collect_with_gvl(objspace, th, reason);
}
}
static void
-objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
+objspace_malloc_increase(rb_objspace_t *objspace, const rb_thread_t *th,
+ void *mem, size_t new_size, size_t old_size, enum memop_type type)
{
if (new_size > old_size) {
ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
@@ -7842,12 +7850,12 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si
if (type == MEMOP_TYPE_MALLOC) {
retry:
- if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc) {
- if (ruby_thread_has_gvl_p() && is_lazy_sweeping(heap_eden)) {
+ if (malloc_increase > malloc_limit && th && !dont_gc) {
+ if (thread_has_gvl_p(th) && is_lazy_sweeping(heap_eden)) {
gc_rest(objspace); /* gc_rest can reduce malloc_increase */
goto retry;
}
- garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
+ garbage_collect_with_gvl(objspace, th, GPR_FLAG_MALLOC);
}
}
@@ -7896,23 +7904,26 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si
#endif
}
-static inline size_t
-objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
+static size_t
+objspace_malloc_prepare(rb_objspace_t *objspace, const rb_thread_t *th,
+ size_t size)
{
if (size == 0) size = 1;
#if CALC_EXACT_MALLOC_SIZE
size += sizeof(size_t);
#endif
+ /* TODO: use th */
return size;
}
static inline void *
-objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
+objspace_malloc_fixup(rb_objspace_t *objspace, rb_thread_t *th,
+ void *mem, size_t size)
{
size = objspace_malloc_size(objspace, mem, size);
- objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
+ objspace_malloc_increase(objspace, th, mem, size, 0, MEMOP_TYPE_MALLOC);
#if CALC_EXACT_MALLOC_SIZE
((size_t *)mem)[0] = size;
@@ -7922,10 +7933,10 @@ objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
return mem;
}
-#define TRY_WITH_GC(alloc) do { \
- objspace_malloc_gc_stress(objspace); \
+#define TRY_WITH_GC(th, alloc) do { \
+ objspace_malloc_gc_stress(objspace, th); \
if (!(alloc) && \
- (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
+ (!garbage_collect_with_gvl(objspace, th, GPR_FLAG_FULL_MARK | \
GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
GPR_FLAG_MALLOC) || \
!(alloc))) { \
@@ -7940,10 +7951,11 @@ static void *
objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
{
void *mem;
+ rb_thread_t *th = ruby_thread_from_native();
- size = objspace_malloc_prepare(objspace, size);
- TRY_WITH_GC(mem = malloc(size));
- return objspace_malloc_fixup(objspace, mem, size);
+ size = objspace_malloc_prepare(objspace, th, size);
+ TRY_WITH_GC(th, mem = malloc(size));
+ return objspace_malloc_fixup(objspace, th, mem, size);
}
static inline size_t
@@ -7960,6 +7972,7 @@ static void *
objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
{
void *mem;
+ rb_thread_t *th;
if (!ptr) return objspace_xmalloc0(objspace, new_size);
@@ -7979,8 +7992,9 @@ objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t ol
old_size = ((size_t *)ptr)[0];
#endif
+ th = ruby_thread_from_native();
old_size = objspace_malloc_size(objspace, ptr, old_size);
- TRY_WITH_GC(mem = realloc(ptr, new_size));
+ TRY_WITH_GC(th, mem = realloc(ptr, new_size));
new_size = objspace_malloc_size(objspace, mem, new_size);
#if CALC_EXACT_MALLOC_SIZE
@@ -7988,7 +8002,8 @@ objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t ol
mem = (size_t *)mem + 1;
#endif
- objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
+ objspace_malloc_increase(objspace, th, mem, new_size, old_size,
+ MEMOP_TYPE_REALLOC);
return mem;
}
@@ -7996,6 +8011,7 @@ objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t ol
static void
objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
{
+ rb_thread_t *th = ruby_thread_from_native();
#if CALC_EXACT_MALLOC_SIZE
ptr = ((size_t *)ptr) - 1;
old_size = ((size_t*)ptr)[0];
@@ -8004,7 +8020,7 @@ objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
free(ptr);
- objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE);
+ objspace_malloc_increase(objspace, th, ptr, 0, old_size, MEMOP_TYPE_FREE);
}
static void *
@@ -8040,10 +8056,11 @@ static void *
objspace_xcalloc(rb_objspace_t *objspace, size_t size)
{
void *mem;
+ rb_thread_t *th = ruby_thread_from_native();
- size = objspace_malloc_prepare(objspace, size);
- TRY_WITH_GC(mem = calloc(1, size));
- return objspace_malloc_fixup(objspace, mem, size);
+ size = objspace_malloc_prepare(objspace, th, size);
+ TRY_WITH_GC(th, mem = calloc(1, size));
+ return objspace_malloc_fixup(objspace, th, mem, size);
}
void *
@@ -8213,11 +8230,13 @@ void
rb_gc_adjust_memory_usage(ssize_t diff)
{
rb_objspace_t *objspace = &rb_objspace;
+ rb_thread_t *th = ruby_thread_from_native();
+
if (diff > 0) {
- objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
+ objspace_malloc_increase(objspace, th, 0, diff, 0, MEMOP_TYPE_REALLOC);
}
else if (diff < 0) {
- objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
+ objspace_malloc_increase(objspace, th, 0, 0, -diff, MEMOP_TYPE_REALLOC);
}
}
diff --git a/internal.h b/internal.h
index a072fa30dd7..c2c0679fdd3 100644
--- a/internal.h
+++ b/internal.h
@@ -2047,6 +2047,8 @@ VALUE rb_str_upto_endless_each(VALUE, int (*each)(VALUE, VALUE), VALUE);
/* thread.c (export) */
int ruby_thread_has_gvl_p(void); /* for ext/fiddle/closure.c */
+struct rb_thread_struct;
+struct rb_thread_struct *ruby_thread_from_native(void); /* for gc.c */
/* util.c (export) */
extern const signed char ruby_digit36_to_number_table[];
diff --git a/thread_pthread.c b/thread_pthread.c
index e17ca36819e..32c607e3c42 100644
--- a/thread_pthread.c
+++ b/thread_pthread.c
@@ -380,7 +380,7 @@ null_func(int i)
/* null */
}
-static rb_thread_t *
+rb_thread_t *
ruby_thread_from_native(void)
{
return pthread_getspecific(ruby_native_thread_key);
diff --git a/thread_win32.c b/thread_win32.c
index 3c6d0e73693..3e25969c24a 100644
--- a/thread_win32.c
+++ b/thread_win32.c
@@ -127,7 +127,7 @@ gvl_destroy(rb_vm_t *vm)
CloseHandle(vm->gvl.lock);
}
-static rb_thread_t *
+rb_thread_t *
ruby_thread_from_native(void)
{
return TlsGetValue(ruby_native_thread_key);
--
EW
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 5/5] per-thread malloc accounting
2018-05-31 22:01 [PATCH 1/5] gc.c: reduce boolean parameters for gc_start / garbage_collect Eric Wong
` (2 preceding siblings ...)
2018-05-31 22:01 ` [PATCH 4/5] gc: pass thread around for malloc Eric Wong
@ 2018-05-31 22:01 ` Eric Wong
3 siblings, 0 replies; 5+ messages in thread
From: Eric Wong @ 2018-05-31 22:01 UTC (permalink / raw)
To: spew
---
gc.c | 99 ++++++++++++++++++++++++++++++++++++++-----------------
vm_core.h | 6 ++++
2 files changed, 75 insertions(+), 30 deletions(-)
diff --git a/gc.c b/gc.c
index c8f8b16a896..1848eb09843 100644
--- a/gc.c
+++ b/gc.c
@@ -739,7 +739,6 @@ static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT_MIN}};
VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
#define malloc_limit objspace->malloc_params.limit
-#define malloc_increase objspace->malloc_params.increase
#define malloc_allocated_size objspace->malloc_params.allocated_size
#define heap_pages_sorted objspace->heap_pages.sorted
#define heap_allocated_pages objspace->heap_pages.allocated_pages
@@ -6328,14 +6327,39 @@ ready_to_gc(rb_objspace_t *objspace)
}
}
+static size_t
+malloc_increase_all(void) /* cold function only for statistics */
+{
+ size_t inc = 0, dec = 0;
+ rb_vm_t *vm = GET_VM();
+ rb_thread_t *th = 0;
+
+ list_for_each(&vm->living_threads, th, vmlt_node) {
+ inc += th->gc.malloc_inc;
+ dec += th->gc.malloc_dec;
+ }
+ return inc > dec ? inc - dec : 0;
+}
+
static void
gc_reset_malloc_info(rb_objspace_t *objspace)
{
gc_prof_set_malloc_info(objspace);
{
- size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
- size_t old_limit = malloc_limit;
-
+ size_t inc = 0, dec = 0;
+ rb_vm_t *vm = GET_VM();
+ rb_thread_t *th = 0;
+ size_t old_limit = malloc_limit;
+
+ list_for_each(&vm->living_threads, th, vmlt_node) {
+ inc += th->gc.malloc_inc;
+ dec += th->gc.malloc_dec;
+ th->gc.malloc_inc = th->gc.malloc_dec = 0;
+ }
+ inc = inc > dec ? inc - dec : 0;
+#if RGENGC_ESTIMATE_OLDMALLOC
+ objspace->rgengc.oldmalloc_increase += inc;
+#endif
if (inc > malloc_limit) {
malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
if (gc_params.malloc_limit_max > 0 && /* ignore max-check if 0 */
@@ -7195,7 +7219,7 @@ gc_stat_internal(VALUE hash_or_sym)
SET(total_freed_pages, objspace->profile.total_freed_pages);
SET(total_allocated_objects, objspace->total_allocated_objects);
SET(total_freed_objects, objspace->profile.total_freed_objects);
- SET(malloc_increase_bytes, malloc_increase);
+ SET(malloc_increase_bytes, malloc_increase_all());
SET(malloc_increase_bytes_limit, malloc_limit);
#if USE_RGENGC
SET(minor_gc_count, objspace->profile.minor_gc_count);
@@ -7831,32 +7855,37 @@ objspace_malloc_gc_stress(rb_objspace_t *objspace, const rb_thread_t *th)
}
}
-static void
-objspace_malloc_increase(rb_objspace_t *objspace, const rb_thread_t *th,
- void *mem, size_t new_size, size_t old_size, enum memop_type type)
+static size_t
+malloc_diff(const rb_thread_t *th)
{
- if (new_size > old_size) {
- ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
-#if RGENGC_ESTIMATE_OLDMALLOC
- ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
-#endif
- }
- else {
- atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
-#if RGENGC_ESTIMATE_OLDMALLOC
- atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
-#endif
+ return th->gc.malloc_inc;
+ if (th->gc.malloc_inc > th->gc.malloc_dec) {
+ return th->gc.malloc_inc - th->gc.malloc_dec;
}
+ return 0;
+}
- if (type == MEMOP_TYPE_MALLOC) {
- retry:
- if (malloc_increase > malloc_limit && th && !dont_gc) {
- if (thread_has_gvl_p(th) && is_lazy_sweeping(heap_eden)) {
- gc_rest(objspace); /* gc_rest can reduce malloc_increase */
- goto retry;
- }
- garbage_collect_with_gvl(objspace, th, GPR_FLAG_MALLOC);
- }
+static void
+objspace_malloc_increase(rb_objspace_t *objspace, rb_thread_t *th,
+ void *mem, size_t new_size, size_t old_size, enum memop_type type)
+{
+ if (th) {
+ if (new_size) {
+ th->gc.malloc_inc += new_size;
+ }
+ if (old_size) {
+ th->gc.malloc_dec += old_size;
+ }
+ if (type == MEMOP_TYPE_MALLOC) {
+ retry:
+ if (th && malloc_diff(th) > malloc_limit && !dont_gc) {
+ if (thread_has_gvl_p(th) && is_lazy_sweeping(heap_eden)) {
+ gc_rest(objspace); /* gc_rest can reduce malloc_diff */
+ goto retry;
+ }
+ garbage_collect_with_gvl(objspace, th, GPR_FLAG_MALLOC);
+ }
+ }
}
#if MALLOC_ALLOCATED_SIZE
@@ -7913,7 +7942,17 @@ objspace_malloc_prepare(rb_objspace_t *objspace, const rb_thread_t *th,
#if CALC_EXACT_MALLOC_SIZE
size += sizeof(size_t);
#endif
- /* TODO: use th */
+ if (thread_has_gvl_p(th) && !dont_gc) {
+ if (is_lazy_sweeping(heap_eden)) {
+ gc_sweep_continue(objspace, heap_eden);
+ }
+ else if ((malloc_diff(th) + size + sizeof(size_t) * 2) > malloc_limit) {
+ gc_rest(objspace); /* gc_rest can reduce malloc_diff */
+ if ((malloc_diff(th) + size + sizeof(size_t) * 2) > malloc_limit) {
+ garbage_collect_with_gvl(objspace, th, GPR_FLAG_MALLOC);
+ }
+ }
+ }
return size;
}
@@ -8845,7 +8884,7 @@ gc_prof_set_malloc_info(rb_objspace_t *objspace)
#if GC_PROFILE_MORE_DETAIL
if (gc_prof_enabled(objspace)) {
gc_profile_record *record = gc_prof_record(objspace);
- record->allocate_increase = malloc_increase;
+ record->allocate_increase = malloc_increase_all();
record->allocate_limit = malloc_limit;
}
#endif
diff --git a/vm_core.h b/vm_core.h
index 8e34ff14a57..d56cca13ef6 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -892,6 +892,12 @@ typedef struct rb_thread_struct {
unsigned int report_on_exception: 1;
uint32_t running_time_us; /* 12500..800000 */
VALUE name;
+
+ /* reset at each GC */
+ struct {
+ size_t malloc_inc;
+ size_t malloc_dec;
+ } gc;
} rb_thread_t;
typedef enum {
--
EW
^ permalink raw reply related [flat|nested] 5+ messages in thread
end of thread, other threads:[~2018-05-31 22:01 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-05-31 22:01 [PATCH 1/5] gc.c: reduce boolean parameters for gc_start / garbage_collect Eric Wong
2018-05-31 22:01 ` [PATCH 2/5] gc.c: introduce GPR_FLAG_IMMEDIATE_MARK to reduce parameters Eric Wong
2018-05-31 22:01 ` [PATCH 3/5] gc.c: introduce GPR_FLAG_FULL_MARK " Eric Wong
2018-05-31 22:01 ` [PATCH 4/5] gc: pass thread around for malloc Eric Wong
2018-05-31 22:01 ` [PATCH 5/5] per-thread malloc accounting Eric Wong
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).