about summary refs log tree commit homepage
diff options
context:
space:
mode:
authorEric Wong <e@80x24.org>2018-08-10 07:07:15 +0000
committerEric Wong <e@80x24.org>2018-08-10 07:09:35 +0000
commit4d67f6b7ca9c3b1568dd329def2a7ae6257bd840 (patch)
tree0e9ed2169fcba43de7ab51d32480fd386a1da378
parent423f89a545ee90cdc428cae5aae7bc6bd18a484c (diff)
downloadmwrap-4d67f6b7ca9c3b1568dd329def2a7ae6257bd840.tar.gz
Since we learned to treat heap_page_body allocations specially,
we can also keep track of rare memalign allocations done outside
of the Ruby GC this way.
-rw-r--r--ext/mwrap/mwrap.c18
1 files changed, 1 insertions, 17 deletions
diff --git a/ext/mwrap/mwrap.c b/ext/mwrap/mwrap.c
index 29cfc11..986ca70 100644
--- a/ext/mwrap/mwrap.c
+++ b/ext/mwrap/mwrap.c
@@ -23,7 +23,6 @@
 #include "jhash.h"
 
 static ID id_uminus;
-static unsigned int track_memalign;
 const char *rb_source_location_cstr(int *line); /* requires 2.6.0dev */
 extern int __attribute__((weak)) ruby_thread_has_gvl_p(void);
 extern void * __attribute__((weak)) ruby_current_execution_context_ptr;
@@ -107,7 +106,6 @@ lfht_new(void)
 __attribute__((constructor)) static void resolve_malloc(void)
 {
         int err;
-        const char *opt;
         ++locating;
 
 #ifdef __FreeBSD__
@@ -151,11 +149,6 @@ __attribute__((constructor)) static void resolve_malloc(void)
         if (err)
                 fprintf(stderr, "pthread_atfork failed: %s\n", strerror(err));
         page_size = sysconf(_SC_PAGESIZE);
-        opt = getenv("MWRAP");
-        if (opt && (opt = strstr(opt, "memalign:"))) {
-                if (!sscanf(opt, "memalign:%u", &track_memalign))
-                        fprintf(stderr, "not an unsigned int: %s\n", opt);
-        }
         --locating;
 }
 
@@ -575,10 +568,8 @@ internal_memalign(void **pp, size_t alignment, size_t size, uintptr_t caller)
         if (alignment == HEAP_PAGE_ALIGN && size == HEAP_PAGE_SIZE) {
                 if (has_ec_p()) generation = rb_gc_count();
                 l = IS_HEAP_PAGE_BODY;
-        } else if (track_memalign) {
-                l = update_stats_rcu_lock(size, caller);
         } else {
-                l = 0;
+                l = update_stats_rcu_lock(size, caller);
         }
 
         if (l == IS_HEAP_PAGE_BODY) {
@@ -1285,16 +1276,9 @@ static VALUE hpb_stat(int argc, VALUE *argv, VALUE hpb)
  * * dump_fd: a writable FD to dump to
  * * dump_path: a path to dump to, the file is opened in O_APPEND mode
  * * dump_min: the minimum allocation size (total) to dump
- * * memalign: use `1' to enable tracking the memalign family
  *
  * If both `dump_fd' and `dump_path' are specified, dump_path takes
  * precedence.
- *
- * Tracking the memalign family of functions is misleading for Ruby
- * applications, as heap page allocations can happen anywhere a
- * Ruby object is allocated, even in the coldest code paths.
- * Furthermore, it is rarely-used outside of the Ruby object allocator.
- * Thus tracking memalign functions is disabled by default.
  */
 void Init_mwrap(void)
 {