mwrap user+dev discussion/patches/pulls/bugs/help
 help / color / mirror / code / Atom feed
* dropping Mwrap::HeapPageBody memalign tracking
@ 2023-01-06 21:59 Eric Wong
  2023-01-07 21:51 ` [PATCH] drop heap page support for Ruby <= 3.0 Eric Wong
  0 siblings, 1 reply; 2+ messages in thread
From: Eric Wong @ 2023-01-06 21:59 UTC (permalink / raw)
  To: mwrap-public; +Cc: Sam Saffron

Ruby 3.1 switched to mmap for HPB in 2021, and I doubt it'll be
going back to *memalign on Linux or FreeBSD.  HPB stats don't
exist at all for 3.1+ right now.

Tracking mmap allocations safely would be significantly more
difficult and expensive since munmap can cross mmap-ed regions.

With *memalign + free, there's a simple 1:1 relationship,
but not with mmap + munmap.

munmap can work on any subset (or even superset if multiple mmap
calls return sequential pages) of addresses within any mmap-ed
region(s).  In other words, each 4k page would need a
separately-allocated tracking struct in a process-wide tree or
hash table.

I don't think Ruby currently does asymmetric mmap/munmap; but
extensions and any spawned processes may and it's the only safe
way to account for it.

So the tracking is definitely doable, but I'm not sure it's
worth the time and effort.  These are GC-internal allocations
and any instrumentation for the GC itself is probably better off
being added to ruby/gc.c



There's something similar on the Perl 5 side, too.  It allocates
small strings out of 4080-byte malloc-ed arenas and I was
confused with 4080-byte allocations until I cranked up C
backtraces via MWRAP=bt:$N.

I think a better long-term feature would be to be able to
interactively crank up C backtrace levels on a per-callsite
basis.  Right now, the C backtrace level is global, and
increasing that interactively gets expensive fast.

^ permalink raw reply	[flat|nested] 2+ messages in thread

* [PATCH] drop heap page support for Ruby <= 3.0
  2023-01-06 21:59 dropping Mwrap::HeapPageBody memalign tracking Eric Wong
@ 2023-01-07 21:51 ` Eric Wong
  0 siblings, 0 replies; 2+ messages in thread
From: Eric Wong @ 2023-01-07 21:51 UTC (permalink / raw)
  To: mwrap-public; +Cc: Sam Saffron

Ruby 3.1 uses mmap, nowadays, and I don't think it's worth the
effort to suport it since mmap and munmap don't require the
symmetry *memalign + free do.
---
 Keeping this separate from the upcoming mwrap-perl merge which
 features major changes including more common code.

 ext/mwrap/extconf.rb |   7 -
 ext/mwrap/mwrap.c    | 327 ++-----------------------------------------
 lib/mwrap_rack.rb    |  51 -------
 test/test_mwrap.rb   |  38 -----
 4 files changed, 12 insertions(+), 411 deletions(-)

diff --git a/ext/mwrap/extconf.rb b/ext/mwrap/extconf.rb
index 1828407..e8d3cc6 100644
--- a/ext/mwrap/extconf.rb
+++ b/ext/mwrap/extconf.rb
@@ -25,11 +25,4 @@ else
   abort 'missing __builtin_add_overflow'
 end
 
-begin
-  if n = GC::INTERNAL_CONSTANTS[:HEAP_PAGE_SIZE]
-    $defs << "-DHEAP_PAGE_SIZE=#{n}"
-  end
-rescue NameError
-end
-
 create_makefile 'mwrap'
diff --git a/ext/mwrap/mwrap.c b/ext/mwrap/mwrap.c
index 08761d6..6875486 100644
--- a/ext/mwrap/mwrap.c
+++ b/ext/mwrap/mwrap.c
@@ -51,19 +51,6 @@ static size_t total_bytes_inc, total_bytes_dec;
 /* true for glibc/dlmalloc/ptmalloc, not sure about jemalloc */
 #define ASSUMED_MALLOC_ALIGNMENT (sizeof(void *) * 2)
 
-/* match values in Ruby gc.c */
-#define HEAP_PAGE_ALIGN_LOG 14
-enum {
-	HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG)
-#ifndef HEAP_PAGE_SIZE /* Ruby 2.6-2.7 only */
-	,
-	REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
-	HEAP_PAGE_SIZE = (HEAP_PAGE_ALIGN - REQUIRED_SIZE_BY_MALLOC)
-#endif
-};
-
-#define IS_HEAP_PAGE_BODY ((struct src_loc *)-1)
-
 #ifdef __FreeBSD__
 void *__malloc(size_t);
 void __free(void *);
@@ -111,33 +98,6 @@ static union padded_mutex mutexes[MUTEX_NR] = {
 #endif
 };
 
-#define ACC_INIT(name) { .nr=0, .min=INT64_MAX, .max=-1, .m2=0, .mean=0 }
-struct acc {
-	uint64_t nr;
-	int64_t min;
-	int64_t max;
-	double m2;
-	double mean;
-};
-
-/* for tracking 16K-aligned heap page bodies (protected by GVL) */
-struct {
-	pthread_mutex_t lock;
-	struct cds_list_head bodies;
-	struct cds_list_head freed;
-
-	struct acc alive;
-	struct acc reborn;
-} hpb_stats = {
-#if STATIC_MTX_INIT_OK
-	.lock = PTHREAD_MUTEX_INITIALIZER,
-#endif
-	.bodies = CDS_LIST_HEAD_INIT(hpb_stats.bodies),
-	.freed = CDS_LIST_HEAD_INIT(hpb_stats.freed),
-	.alive = ACC_INIT(hpb_stats.alive),
-	.reborn = ACC_INIT(hpb_stats.reborn)
-};
-
 static pthread_mutex_t *mutex_assign(void)
 {
 	return &mutexes[uatomic_add_return(&mutex_i, 1) & MUTEX_MASK].mtx;
@@ -168,11 +128,6 @@ __attribute__((constructor)) static void resolve_malloc(void)
 				_exit(1);
 			}
 		}
-		err = pthread_mutex_init(&hpb_stats.lock, 0);
-		if (err) {
-			fprintf(stderr, "error: %s\n", strerror(err));
-			_exit(1);
-		}
 		/* initialize mutexes used by urcu-bp */
 		rcu_read_lock();
 		rcu_read_unlock();
@@ -300,9 +255,6 @@ struct alloc_hdr {
 			struct src_loc *loc;
 		} live;
 		struct rcu_head dead;
-		struct {
-			size_t at; /* rb_gc_count() */
-		} hpb_freed;
 	} as;
 	void *real; /* what to call real_free on */
 	size_t size;
@@ -344,64 +296,6 @@ static int loc_eq(struct cds_lfht_node *node, const void *key)
 		memcmp(k->k, existing->k, loc_size(k)) == 0);
 }
 
-/* note: not atomic */
-static void
-acc_add(struct acc *acc, size_t val)
-{
-	double delta = val - acc->mean;
-	uint64_t nr = ++acc->nr;
-
-	/* just don't divide-by-zero if we ever hit this (unlikely :P) */
-	if (nr)
-		acc->mean += delta / nr;
-
-	acc->m2 += delta * (val - acc->mean);
-	if ((int64_t)val < acc->min)
-		acc->min = (int64_t)val;
-	if ((int64_t)val > acc->max)
-		acc->max = (int64_t)val;
-}
-
-#if SIZEOF_LONG == 8
-# define INT64toNUM(x) LONG2NUM((long)x)
-#elif defined(HAVE_LONG_LONG) && SIZEOF_LONG_LONG == 8
-# define INT64toNUM(x) LL2NUM((LONG_LONG)x)
-#endif
-
-static VALUE
-acc_max(const struct acc *acc)
-{
-	return INT64toNUM(acc->max);
-}
-
-static VALUE
-acc_min(const struct acc *acc)
-{
-	return acc->min == INT64_MAX ? INT2FIX(-1) : INT64toNUM(acc->min);
-}
-
-static VALUE
-acc_mean(const struct acc *acc)
-{
-	return DBL2NUM(acc->nr ? acc->mean : HUGE_VAL);
-}
-
-static double
-acc_stddev_dbl(const struct acc *acc)
-{
-	if (acc->nr > 1) {
-		double variance = acc->m2 / (acc->nr - 1);
-		return sqrt(variance);
-	}
-	return 0.0;
-}
-
-static VALUE
-acc_stddev(const struct acc *acc)
-{
-	return DBL2NUM(acc_stddev_dbl(acc));
-}
-
 static struct src_loc *totals_add_rcu(const struct src_loc *k)
 {
 	struct cds_lfht_iter iter;
@@ -519,7 +413,7 @@ void free(void *p)
 		struct src_loc *l = h->as.live.loc;
 
 		if (!real_free) return; /* oh well, leak a little */
-		if (l && l != IS_HEAP_PAGE_BODY) {
+		if (l) {
 			size_t age = generation - h->as.live.gen;
 
 			uatomic_add(&total_bytes_dec, h->size);
@@ -534,19 +428,6 @@ void free(void *p)
 			mutex_unlock(l->mtx);
 
 			call_rcu(&h->as.dead, free_hdr_rcu);
-		} else if (l == IS_HEAP_PAGE_BODY) {
-			size_t gen = generation;
-			size_t age = gen - h->as.live.gen;
-
-			h->as.hpb_freed.at = gen;
-
-			mutex_lock(&hpb_stats.lock);
-			acc_add(&hpb_stats.alive, age);
-
-			/* hpb_stats.bodies => hpb_stats.freed */
-			cds_list_move(&h->anode, &hpb_stats.freed);
-
-			mutex_unlock(&hpb_stats.lock);
 		} else {
 			real_free(h->real);
 		}
@@ -614,65 +495,18 @@ internal_memalign(void **pp, size_t alignment, size_t size, uintptr_t caller)
 		return ENOMEM;
 
 
-	if (alignment == HEAP_PAGE_ALIGN && size == HEAP_PAGE_SIZE) {
-		if (has_ec_p()) generation = rb_gc_count();
-		l = IS_HEAP_PAGE_BODY;
-	} else {
-		l = update_stats_rcu_lock(size, caller);
-	}
+	l = update_stats_rcu_lock(size, caller);
 
-	if (l == IS_HEAP_PAGE_BODY) {
-		void *p;
-		size_t gen = generation;
-
-		mutex_lock(&hpb_stats.lock);
-
-		/* reuse existing entry */
-		if (!cds_list_empty(&hpb_stats.freed)) {
-			size_t deathspan;
-
-			h = cds_list_first_entry(&hpb_stats.freed,
-						 struct alloc_hdr, anode);
-			/* hpb_stats.freed => hpb_stats.bodies */
-			cds_list_move(&h->anode, &hpb_stats.bodies);
-			assert(h->size == size);
-			assert(h->real);
-			real = h->real;
-			p = hdr2ptr(h);
-			assert(ptr_is_aligned(p, alignment));
-
-			deathspan = gen - h->as.hpb_freed.at;
-			acc_add(&hpb_stats.reborn, deathspan);
-		}
-		else {
-			real = real_malloc(asize);
-			if (!real) return ENOMEM;
-
-			p = hdr2ptr(real);
-			if (!ptr_is_aligned(p, alignment))
-				p = ptr_align(p, alignment);
-			h = ptr2hdr(p);
-			h->size = size;
-			h->real = real;
-			cds_list_add(&h->anode, &hpb_stats.bodies);
-		}
-		mutex_unlock(&hpb_stats.lock);
-		h->as.live.loc = l;
-		h->as.live.gen = gen;
+	real = real_malloc(asize);
+	if (real) {
+		void *p = hdr2ptr(real);
+		if (!ptr_is_aligned(p, alignment))
+			p = ptr_align(p, alignment);
+		h = ptr2hdr(p);
+		alloc_insert_rcu(l, h, size, real);
 		*pp = p;
 	}
-	else {
-		real = real_malloc(asize);
-		if (real) {
-			void *p = hdr2ptr(real);
-			if (!ptr_is_aligned(p, alignment))
-				p = ptr_align(p, alignment);
-			h = ptr2hdr(p);
-			alloc_insert_rcu(l, h, size, real);
-			*pp = p;
-		}
-		update_stats_rcu_unlock(l);
-	}
+	update_stats_rcu_unlock(l);
 
 	return real ? 0 : ENOMEM;
 }
@@ -1243,73 +1077,6 @@ static VALUE total_dec(VALUE mod)
 	return SIZET2NUM(total_bytes_dec);
 }
 
-static VALUE hpb_each_yield(VALUE ignore)
-{
-	struct alloc_hdr *h, *next;
-
-	cds_list_for_each_entry_safe(h, next, &hpb_stats.bodies, anode) {
-		VALUE v[2]; /* [ generation, address ] */
-		void *addr = hdr2ptr(h);
-		assert(ptr_is_aligned(addr, HEAP_PAGE_ALIGN));
-		v[0] = LONG2NUM((long)addr);
-		v[1] = SIZET2NUM(h->as.live.gen);
-		rb_yield_values2(2, v);
-	}
-	return Qnil;
-}
-
-/*
- * call-seq:
- *
- *     Mwrap::HeapPageBody.each { |gen, addr| } -> Integer
- *
- * Yields the generation (GC.count) the heap page body was created
- * and address of the heap page body as an Integer.  Returns the
- * number of allocated pages as an Integer.  This return value should
- * match the result of GC.stat(:heap_allocated_pages)
- */
-static VALUE hpb_each(VALUE mod)
-{
-	++locating;
-	return rb_ensure(hpb_each_yield, Qfalse, reset_locating, 0);
-}
-
-/*
- * call-seq:
- *
- *	Mwrap::HeapPageBody.stat -> Hash
- *	Mwrap::HeapPageBody.stat(hash) -> hash
- *
- * The maximum lifespan of a heap page body in the Ruby VM.
- * This may be Infinity if no heap page bodies were ever freed.
- */
-static VALUE hpb_stat(int argc, VALUE *argv, VALUE hpb)
-{
-	VALUE h;
-
-	rb_scan_args(argc, argv, "01", &h);
-	if (NIL_P(h))
-		h = rb_hash_new();
-	else if (!RB_TYPE_P(h, T_HASH))
-		rb_raise(rb_eTypeError, "not a hash %+"PRIsVALUE, h);
-
-	++locating;
-#define S(x) ID2SYM(rb_intern(#x))
-	rb_hash_aset(h, S(lifespan_max), acc_max(&hpb_stats.alive));
-	rb_hash_aset(h, S(lifespan_min), acc_min(&hpb_stats.alive));
-	rb_hash_aset(h, S(lifespan_mean), acc_mean(&hpb_stats.alive));
-	rb_hash_aset(h, S(lifespan_stddev), acc_stddev(&hpb_stats.alive));
-	rb_hash_aset(h, S(deathspan_max), acc_max(&hpb_stats.reborn));
-	rb_hash_aset(h, S(deathspan_min), acc_min(&hpb_stats.reborn));
-	rb_hash_aset(h, S(deathspan_mean), acc_mean(&hpb_stats.reborn));
-	rb_hash_aset(h, S(deathspan_stddev), acc_stddev(&hpb_stats.reborn));
-	rb_hash_aset(h, S(resurrects), SIZET2NUM(hpb_stats.reborn.nr));
-#undef S
-	--locating;
-
-	return h;
-}
-
 /*
  * Document-module: Mwrap
  *
@@ -1328,19 +1095,13 @@ static VALUE hpb_stat(int argc, VALUE *argv, VALUE hpb)
  * * dump_fd: a writable FD to dump to
  * * dump_path: a path to dump to, the file is opened in O_APPEND mode
  * * dump_min: the minimum allocation size (total) to dump
- * * dump_heap: mask of heap_page_body statistics to dump
  *
  * If both `dump_fd' and `dump_path' are specified, dump_path takes
  * precedence.
- *
- * dump_heap bitmask
- * * 0x01 - summary stats (same info as HeapPageBody.stat)
- * * 0x02 - all live heaps (similar to HeapPageBody.each)
- * * 0x04 - skip non-heap_page_body-related output
  */
 void Init_mwrap(void)
 {
-	VALUE mod, hpb;
+	VALUE mod;
 
 	++locating;
 	mod = rb_define_module("Mwrap");
@@ -1372,67 +1133,9 @@ void Init_mwrap(void)
 	rb_define_method(cSrcLoc, "max_lifespan", src_loc_max_lifespan, 0);
 	rb_define_method(cSrcLoc, "name", src_loc_name, 0);
 
-	/*
-	 * Information about "struct heap_page_body" allocations from
-	 * Ruby gc.c.  This can be useful for tracking fragmentation
-	 * from posix_memalign(3) use in mainline Ruby:
-	 *
-	 *   https://sourceware.org/bugzilla/show_bug.cgi?id=14581
-	 *
-	 * These statistics are never reset by Mwrap.reset or
-	 * any other method.  They only make sense in the context
-	 * of an entire program lifetime.
-	 */
-	hpb = rb_define_class_under(mod, "HeapPageBody", rb_cObject);
-	rb_define_singleton_method(hpb, "stat", hpb_stat, -1);
-	rb_define_singleton_method(hpb, "each", hpb_each, 0);
-
 	--locating;
 }
 
-enum {
-	DUMP_HPB_STATS = 0x1,
-	DUMP_HPB_EACH = 0x2,
-	DUMP_HPB_EXCL = 0x4,
-};
-
-static void dump_hpb(FILE *fp, unsigned flags)
-{
-	if (flags & DUMP_HPB_STATS) {
-		fprintf(fp,
-			"lifespan_max: %"PRId64"\n"
-			"lifespan_min:%s%"PRId64"\n"
-			"lifespan_mean: %0.3f\n"
-			"lifespan_stddev: %0.3f\n"
-			"deathspan_max: %"PRId64"\n"
-			"deathspan_min:%s%"PRId64"\n"
-			"deathspan_mean: %0.3f\n"
-			"deathspan_stddev: %0.3f\n"
-			"gc_count: %zu\n",
-			hpb_stats.alive.max,
-			hpb_stats.alive.min == INT64_MAX ? " -" : " ",
-			hpb_stats.alive.min,
-			hpb_stats.alive.mean,
-			acc_stddev_dbl(&hpb_stats.alive),
-			hpb_stats.reborn.max,
-			hpb_stats.reborn.min == INT64_MAX ? " -" : " ",
-			hpb_stats.reborn.min,
-			hpb_stats.reborn.mean,
-			acc_stddev_dbl(&hpb_stats.reborn),
-			/* n.b.: unsafe to call rb_gc_count() in destructor */
-			generation);
-	}
-	if (flags & DUMP_HPB_EACH) {
-		struct alloc_hdr *h;
-
-		cds_list_for_each_entry(h, &hpb_stats.bodies, anode) {
-			void *addr = hdr2ptr(h);
-
-			fprintf(fp, "%p\t%zu\n", addr, h->as.live.gen);
-		}
-	}
-}
-
 /* rb_cloexec_open isn't usable by non-Ruby processes */
 #ifndef O_CLOEXEC
 #  define O_CLOEXEC 0
@@ -1446,7 +1149,6 @@ static void mwrap_dump_destructor(void)
 	struct dump_arg a = { .min = 0 };
 	size_t i;
 	int dump_fd;
-	unsigned dump_heap = 0;
 	char *dump_path;
 	char *s;
 
@@ -1478,9 +1180,6 @@ static void mwrap_dump_destructor(void)
 	if ((s = strstr(opt, "dump_min:")))
 		sscanf(s, "dump_min:%zu", &a.min);
 
-	if ((s = strstr(opt, "dump_heap:")))
-		sscanf(s, "dump_heap:%u", &dump_heap);
-
 	switch (dump_fd) {
 	case 0: goto out;
 	case 1: a.fp = stdout; break;
@@ -1500,9 +1199,7 @@ static void mwrap_dump_destructor(void)
 		}
 		/* we'll leak some memory here, but this is a destructor */
 	}
-	if ((dump_heap & DUMP_HPB_EXCL) == 0)
-		dump_to_file(&a);
-	dump_hpb(a.fp, dump_heap);
+	dump_to_file(&a);
 out:
 	--locating;
 }
diff --git a/lib/mwrap_rack.rb b/lib/mwrap_rack.rb
index 53380b9..c777a78 100644
--- a/lib/mwrap_rack.rb
+++ b/lib/mwrap_rack.rb
@@ -89,54 +89,6 @@ class MwrapRack
     end
   end
 
-  class HeapPages # :nodoc:
-    include HtmlResponse
-    HEADER = '<tr><th>address</th><th>generation</th></tr>'
-
-    def hpb_rows
-      Mwrap::HeapPageBody.stat(stat = Thread.current[:mwrap_hpb_stat] ||= {})
-      %i(lifespan_max lifespan_min lifespan_mean lifespan_stddev
-         deathspan_max deathspan_min deathspan_mean deathspan_stddev
-         resurrects
-        ).map! do |k|
-         "<tr><td>#{k}</td><td>#{stat[k]}</td></tr>\n"
-      end.join
-    end
-
-    def gc_stat_rows
-      GC.stat(stat = Thread.current[:mwrap_gc_stat] ||= {})
-      %i(count heap_allocated_pages heap_eden_pages heap_tomb_pages
-          total_allocated_pages total_freed_pages).map do |k|
-         "<tr><td>GC.stat(:#{k})</td><td>#{stat[k]}</td></tr>\n"
-      end.join
-    end
-
-    GC_STAT_URL = 'https://docs.ruby-lang.org/en/trunk/GC.html#method-c-stat'
-    GC_STAT_HELP = <<~EOM
-      <p>Non-Infinity lifespans can indicate fragmentation.
-      <p>See <a
-      href="#{GC_STAT_URL}">#{GC_STAT_URL}</a> for info on GC.stat values.
-    EOM
-
-    def each
-      Mwrap.quiet do
-        yield("<html><head><title>heap pages</title></head>" \
-              "<body><h1>heap pages</h1>" \
-              "<table><tr><th>stat</th><th>value</th></tr>\n" \
-              "#{hpb_rows}" \
-              "#{gc_stat_rows}" \
-              "</table>\n" \
-              "#{GC_STAT_HELP}" \
-              "<table>#{HEADER}")
-        Mwrap::HeapPageBody.each do |addr, generation|
-          addr = -sprintf('0x%x', addr)
-          yield(-"<tr><td>#{addr}</td><td>#{generation}</td></tr>\n")
-        end
-        yield "</table></body></html>\n"
-      end
-    end
-  end
-
   def r404 # :nodoc:
     [404,{'Content-Type'=>'text/plain'},["Not found\n"]]
   end
@@ -152,15 +104,12 @@ class MwrapRack
       loc = -CGI.unescape($1)
       loc = Mwrap[loc] or return r404
       EachAt.new(loc).response
-    when '/heap_pages'
-      HeapPages.new.response
     when '/'
       n = 2000
       u = 'https://80x24.org/mwrap/README.html'
       b = -('<html><head><title>Mwrap demo</title></head>' \
           "<body><p><a href=\"each/#{n}\">allocations &gt;#{n} bytes</a>" \
           "<p><a href=\"#{u}\">#{u}</a>" \
-          "<p><a href=\"heap_pages\">heap pages</a>" \
           "</body></html>\n")
       [ 200, {'Content-Type'=>'text/html','Content-Length'=>-b.size.to_s},[b]]
     else
diff --git a/test/test_mwrap.rb b/test/test_mwrap.rb
index eaa65cb..6522167 100644
--- a/test/test_mwrap.rb
+++ b/test/test_mwrap.rb
@@ -59,13 +59,6 @@ class TestMwrap < Test::Unit::TestCase
       res = system(env, *cmd)
       assert res, $?.inspect
       assert_match(/\b1\d{4}\s+[1-9]\d*\s+-e:1$/, tmp.read)
-
-      tmp.rewind
-      tmp.truncate(0)
-      env['MWRAP'] = "dump_path:#{tmp.path},dump_heap:5"
-      res = system(env, *cmd)
-      assert res, $?.inspect
-      assert_match %r{lifespan_stddev}, tmp.read
     end
   end
 
@@ -295,35 +288,4 @@ class TestMwrap < Test::Unit::TestCase
         abort 'freed more than allocated'
     end;
   end
-
-  def test_heap_page_body
-    assert_separately(+"#{<<~"begin;"}\n#{<<~'end;'}")
-    begin;
-      require 'mwrap'
-      require 'rubygems' # use up some memory
-      ap = GC.stat(:heap_allocated_pages)
-      h = {}
-      nr = 0
-      Mwrap::HeapPageBody.each do |addr, gen|
-        nr += 1
-        gen <= GC.count && gen >= 0 or abort "bad generation: #{gen}"
-        (0 == (addr & 16383)) or abort "addr not aligned: #{'%x' % addr}"
-      end
-      if RUBY_VERSION.to_f < 3.1 # 3.1+ uses mmap on platforms we care about
-        nr == ap or abort "HeapPageBody.each missed page #{nr} != #{ap}"
-      end
-      10.times { (1..20000).to_a.map(&:to_s) }
-      3.times { GC.start }
-      Mwrap::HeapPageBody.stat(h)
-      Integer === h[:lifespan_max] or abort 'lifespan_max not recorded'
-      Integer === h[:lifespan_min] or abort 'lifespan_min not recorded'
-      Float === h[:lifespan_mean] or abort 'lifespan_mean not recorded'
-      3.times { GC.start }
-      10.times { (1..20000).to_a.map(&:to_s) }
-      Mwrap::HeapPageBody.stat(h)
-      h[:deathspan_min] <= h[:deathspan_max] or
-        abort 'wrong min/max deathtime'
-      Float === h[:deathspan_mean] or abort 'deathspan_mean not recorded'
-    end;
-  end
 end

^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-01-07 21:51 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-06 21:59 dropping Mwrap::HeapPageBody memalign tracking Eric Wong
2023-01-07 21:51 ` [PATCH] drop heap page support for Ruby <= 3.0 Eric Wong

Code repositories for project(s) associated with this public inbox

	https://80x24.org/mwrap.git/

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).