about summary refs log tree commit homepage
diff options
context:
space:
mode:
authorEric Wong <e@80x24.org>2022-12-19 11:19:16 +0000
committerEric Wong <mwrap-perl@80x24.org>2022-12-19 21:56:33 +0000
commit161ac470d411c9e04ad02dbd14c2e56a756aeaf8 (patch)
treeea183c84cbbc8e365652ad23458185c3301dc54d
parent0805bec5595c4976e5215ca4e681b777d8bffac5 (diff)
downloadmwrap-161ac470d411c9e04ad02dbd14c2e56a756aeaf8.tar.gz
We can rely on the stable value of ->loc_hash to assign the
mutex at when it requires locking rather than relying on a
monotonically increasing counter.
-rw-r--r--mwrap_core.h23
1 files changed, 11 insertions, 12 deletions
diff --git a/mwrap_core.h b/mwrap_core.h
index 7681ca5..7be0a7a 100644
--- a/mwrap_core.h
+++ b/mwrap_core.h
@@ -93,7 +93,6 @@ union padded_mutex {
 #else /* only tested on Linux + glibc */
 #  define STATIC_MTX_INIT_OK (1)
 #endif
-static size_t mutex_i;
 static union padded_mutex mutexes[MUTEX_NR] = {
 #if STATIC_MTX_INIT_OK
         [0 ... (MUTEX_NR-1)].mtx = PTHREAD_MUTEX_INITIALIZER
@@ -105,11 +104,6 @@ static union padded_mutex mutexes[MUTEX_NR] = {
 static_assert(UINT32_MAX > PATH_MAX, "UINT32_MAX > PATH_MAX");
 #endif
 
-static pthread_mutex_t *mutex_assign(void)
-{
-        return &mutexes[uatomic_add_return(&mutex_i, 1) & MUTEX_MASK].mtx;
-}
-
 static struct cds_lfht *lfht_new(size_t size)
 {
         return cds_lfht_new(size, 1, 0, CDS_LFHT_AUTO_RESIZE, 0);
@@ -163,7 +157,6 @@ struct src_file {
 
 /* allocated via real_malloc, immortal for safety reasons */
 struct src_loc {
-        pthread_mutex_t *mtx;
         size_t total;
         size_t freed_bytes;
         size_t allocations;
@@ -309,7 +302,6 @@ again:
                 if (!l) return l;
                 memcpy(l, k, n);
                 l->freed_bytes = 0;
-                l->mtx = mutex_assign();
                 l->age_total = 0;
                 l->max_lifespan = 0;
                 l->freed_bytes = 0;
@@ -470,6 +462,13 @@ static void free_hdr_rcu(struct rcu_head *dead)
         real_free(h->real);
 }
 
+static pthread_mutex_t *src_loc_mutex_lock(const struct src_loc *l)
+{
+        pthread_mutex_t *mtx = &mutexes[l->loc_hash & MUTEX_MASK].mtx;
+        CHECK(int, 0, pthread_mutex_lock(mtx));
+        return mtx;
+}
+
 void free(void *p)
 {
         if (p) {
@@ -485,11 +484,11 @@ void free(void *p)
                         uatomic_inc(&l->frees);
                         uatomic_add(&l->age_total, age);
 
-                        CHECK(int, 0, pthread_mutex_lock(l->mtx));
+                        pthread_mutex_t *mtx = src_loc_mutex_lock(l);
                         cds_list_del_rcu(&h->anode);
                         if (age > l->max_lifespan)
                                 l->max_lifespan = age;
-                        CHECK(int, 0, pthread_mutex_unlock(l->mtx));
+                        CHECK(int, 0, pthread_mutex_unlock(mtx));
 
                         call_rcu(&h->as.dead, free_hdr_rcu);
                 } else {
@@ -509,9 +508,9 @@ alloc_insert_rcu(struct src_loc *l, struct alloc_hdr *h, size_t size,
         h->as.live.loc = l;
         h->as.live.gen = generation;
         if (l) {
-                CHECK(int, 0, pthread_mutex_lock(l->mtx));
+                pthread_mutex_t *mtx = src_loc_mutex_lock(l);
                 cds_list_add_rcu(&h->anode, &l->allocs);
-                CHECK(int, 0, pthread_mutex_unlock(l->mtx));
+                CHECK(int, 0, pthread_mutex_unlock(mtx));
         }
 }