about summary refs log tree commit homepage
path: root/ext/mwrap/mwrap.c
diff options
context:
space:
mode:
authorEric Wong <e@80x24.org>2022-08-15 21:22:17 +0000
committerEric Wong <mwrap-public@80x24.org>2022-08-22 17:11:39 +0000
commit32c9a817cab2e28534de62ff62c4686b520fb3fe (patch)
tree0f6cb8ebbb1f7b16d32bbea0c6151f984f05a31a /ext/mwrap/mwrap.c
parent4a90540056afce4f73da97300e6709993355fe4f (diff)
downloadmwrap-32c9a817cab2e28534de62ff62c4686b520fb3fe.tar.gz
urcu v0.11.4+ introduced commit
7ca7fe9c03 (Make temporary variable in _rcu_dereference non-const, 2021-07-29)
which conflicts with our use of _LGPL_SOURCE.  In retrospect,
CMM_LOAD_SHARED and CMM_STORE_SHARED seem sufficient for our use
of the `totals' cds_lfht pointer since the constructur should always
fire before any threads are running.

This is fixed in urcu v0.12.4 and v0.13.2 (released 2022-08-18)
but I suspect older versions will live on in enterprise/LTS
distros for a long while.

Link: https://lore.kernel.org/lttng-dev/20220809181927.GA3718@dcvr/
Diffstat (limited to 'ext/mwrap/mwrap.c')
-rw-r--r--ext/mwrap/mwrap.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/ext/mwrap/mwrap.c b/ext/mwrap/mwrap.c
index 4575e34..477b1cb 100644
--- a/ext/mwrap/mwrap.c
+++ b/ext/mwrap/mwrap.c
@@ -139,8 +139,8 @@ __attribute__((constructor)) static void resolve_malloc(void)
                 _exit(1);
         }
 #endif /* !FreeBSD */
-        totals = lfht_new();
-        if (!totals)
+        CMM_STORE_SHARED(totals, lfht_new());
+        if (!CMM_LOAD_SHARED(totals))
                 fprintf(stderr, "failed to allocate totals table\n");
 
         err = pthread_atfork(call_rcu_before_fork,
@@ -375,7 +375,7 @@ static struct src_loc *totals_add_rcu(struct src_loc *k)
         struct cds_lfht *t;
 
 again:
-        t = rcu_dereference(totals);
+        t = CMM_LOAD_SHARED(totals);
         if (!t) goto out_unlock;
         cds_lfht_lookup(t, k->hval, loc_eq, k, &iter);
         cur = cds_lfht_iter_get_node(&iter);
@@ -417,7 +417,7 @@ static struct src_loc *update_stats_rcu_lock(size_t size, uintptr_t caller)
         static const size_t xlen = sizeof(caller);
         char *dst;
 
-        if (caa_unlikely(!totals)) return 0;
+        if (caa_unlikely(!CMM_LOAD_SHARED(totals))) return 0;
         if (locating++) goto out; /* do not recurse into another *alloc */
 
         uatomic_add(&total_bytes_inc, size);
@@ -808,7 +808,7 @@ static void *dump_to_file(void *x)
 
         ++locating;
         rcu_read_lock();
-        t = rcu_dereference(totals);
+        t = CMM_LOAD_SHARED(totals);
         if (!t)
                 goto out_unlock;
         cds_lfht_for_each_entry(t, &iter, l, hnode) {
@@ -877,7 +877,7 @@ static void *totals_reset(void *ign)
         uatomic_set(&total_bytes_dec, 0);
 
         rcu_read_lock();
-        t = rcu_dereference(totals);
+        t = CMM_LOAD_SHARED(totals);
         cds_lfht_for_each_entry(t, &iter, l, hnode) {
                 uatomic_set(&l->total, 0);
                 uatomic_set(&l->allocations, 0);
@@ -945,7 +945,7 @@ static VALUE dump_each_rcu(VALUE x)
         struct cds_lfht_iter iter;
         struct src_loc *l;
 
-        t = rcu_dereference(totals);
+        t = CMM_LOAD_SHARED(totals);
         cds_lfht_for_each_entry(t, &iter, l, hnode) {
                 VALUE v[6];
                 if (l->total <= a->min) continue;
@@ -1049,9 +1049,9 @@ static VALUE mwrap_aref(VALUE mod, VALUE loc)
 
         if (!k) return val;
 
+        t = CMM_LOAD_SHARED(totals);
+        if (!t) return val;
         rcu_read_lock();
-        t = rcu_dereference(totals);
-        if (!t) goto out_unlock;
 
         cds_lfht_lookup(t, k->hval, loc_eq, k, &iter);
         cur = cds_lfht_iter_get_node(&iter);
@@ -1059,7 +1059,6 @@ static VALUE mwrap_aref(VALUE mod, VALUE loc)
                 l = caa_container_of(cur, struct src_loc, hnode);
                 val = TypedData_Wrap_Struct(cSrcLoc, &src_loc_type, l);
         }
-out_unlock:
         rcu_read_unlock();
         return val;
 }