RCU Archive mirror
 help / color / mirror / Atom feed
From: Neeraj upadhyay <neeraj.iitr10@gmail.com>
To: Frederic Weisbecker <frederic@kernel.org>
Cc: LKML <linux-kernel@vger.kernel.org>,
	Boqun Feng <boqun.feng@gmail.com>,
	 Joel Fernandes <joel@joelfernandes.org>,
	Neeraj Upadhyay <neeraj.upadhyay@amd.com>,
	 "Paul E . McKenney" <paulmck@kernel.org>,
	Uladzislau Rezki <urezki@gmail.com>,
	 Zqiang <qiang.zhang1211@gmail.com>, rcu <rcu@vger.kernel.org>
Subject: Re: [PATCH 1/8] rcu/nocb: Make IRQs disablement symetric
Date: Wed, 13 Dec 2023 18:31:07 +0530	[thread overview]
Message-ID: <CAFwiDX8NN_vJ7S50SRiyQKMZ9YpGemB-GLOAHuTQSSD+f0SiVA@mail.gmail.com> (raw)
In-Reply-To: <20231208220545.7452-2-frederic@kernel.org>

On Sat, Dec 9, 2023 at 3:35 AM Frederic Weisbecker <frederic@kernel.org> wrote:
>
> Currently IRQs are disabled on call_rcu() and then depending on the
> context:
>
> * If the CPU is in nocb mode:
>
>    - If the callback is enqueued in the bypass list, IRQs are re-enabled
>      implictly by rcu_nocb_try_bypass()
>
>    - If the callback is enqueued in the normal list, IRQs are re-enabled
>      implicitly by __call_rcu_nocb_wake()
>
> * If the CPU is NOT in nocb mode, IRQs are reenabled explicitly from call_rcu()
>
> This makes the code a bit hard to follow, especially as it interleaves
> with nocb locking.
>
> To make the IRQ flags coverage clearer and also in order to prepare for
> moving all the nocb enqueue code to its own function, always re-enable
> the IRQ flags explicitly from call_rcu().
>
> Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
> ---
>  kernel/rcu/tree.c      |  9 ++++++---
>  kernel/rcu/tree_nocb.h | 20 +++++++++-----------
>  2 files changed, 15 insertions(+), 14 deletions(-)
>

Nit: s/symetric/symmetric/

Reviewed-by: Neeraj Upadhyay (AMD) <neeraj.iitr10@gmail.com>


Thanks
Neeraj

> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 52c2fdbc6363..74159c6d3bdf 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -2697,8 +2697,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
>         }
>
>         check_cb_ovld(rdp);
> -       if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
> +       if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
> +               local_irq_restore(flags);
>                 return; // Enqueued onto ->nocb_bypass, so just leave.
> +       }
>         // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
>         rcu_segcblist_enqueue(&rdp->cblist, head);
>         if (__is_kvfree_rcu_offset((unsigned long)func))
> @@ -2716,8 +2718,8 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
>                 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
>         } else {
>                 __call_rcu_core(rdp, head, flags);
> -               local_irq_restore(flags);
>         }
> +       local_irq_restore(flags);
>  }
>
>  #ifdef CONFIG_RCU_LAZY
> @@ -4615,8 +4617,9 @@ void rcutree_migrate_callbacks(int cpu)
>                 __call_rcu_nocb_wake(my_rdp, true, flags);
>         } else {
>                 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
> -               raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
> +               raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
>         }
> +       local_irq_restore(flags);
>         if (needwake)
>                 rcu_gp_kthread_wake();
>         lockdep_assert_irqs_enabled();
> diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
> index 5598212d1f27..3f70fd0a2db4 100644
> --- a/kernel/rcu/tree_nocb.h
> +++ b/kernel/rcu/tree_nocb.h
> @@ -532,9 +532,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
>         // 2. Both of these conditions are met:
>         //    a. The bypass list previously had only lazy CBs, and:
>         //    b. The new CB is non-lazy.
> -       if (ncbs && (!bypass_is_lazy || lazy)) {
> -               local_irq_restore(flags);
> -       } else {
> +       if (!ncbs || (bypass_is_lazy && !lazy)) {
>                 // No-CBs GP kthread might be indefinitely asleep, if so, wake.
>                 rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
>                 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
> @@ -544,7 +542,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
>                 } else {
>                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
>                                             TPS("FirstBQnoWake"));
> -                       rcu_nocb_unlock_irqrestore(rdp, flags);
> +                       rcu_nocb_unlock(rdp);
>                 }
>         }
>         return true; // Callback already enqueued.
> @@ -570,7 +568,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
>         // If we are being polled or there is no kthread, just leave.
>         t = READ_ONCE(rdp->nocb_gp_kthread);
>         if (rcu_nocb_poll || !t) {
> -               rcu_nocb_unlock_irqrestore(rdp, flags);
> +               rcu_nocb_unlock(rdp);
>                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
>                                     TPS("WakeNotPoll"));
>                 return;
> @@ -583,17 +581,17 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
>                 rdp->qlen_last_fqs_check = len;
>                 // Only lazy CBs in bypass list
>                 if (lazy_len && bypass_len == lazy_len) {
> -                       rcu_nocb_unlock_irqrestore(rdp, flags);
> +                       rcu_nocb_unlock(rdp);
>                         wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
>                                            TPS("WakeLazy"));
>                 } else if (!irqs_disabled_flags(flags)) {
>                         /* ... if queue was empty ... */
> -                       rcu_nocb_unlock_irqrestore(rdp, flags);
> +                       rcu_nocb_unlock(rdp);
>                         wake_nocb_gp(rdp, false);
>                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
>                                             TPS("WakeEmpty"));
>                 } else {
> -                       rcu_nocb_unlock_irqrestore(rdp, flags);
> +                       rcu_nocb_unlock(rdp);
>                         wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
>                                            TPS("WakeEmptyIsDeferred"));
>                 }
> @@ -611,15 +609,15 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
>                 if ((rdp->nocb_cb_sleep ||
>                      !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
>                     !timer_pending(&rdp->nocb_timer)) {
> -                       rcu_nocb_unlock_irqrestore(rdp, flags);
> +                       rcu_nocb_unlock(rdp);
>                         wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
>                                            TPS("WakeOvfIsDeferred"));
>                 } else {
> -                       rcu_nocb_unlock_irqrestore(rdp, flags);
> +                       rcu_nocb_unlock(rdp);
>                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
>                 }
>         } else {
> -               rcu_nocb_unlock_irqrestore(rdp, flags);
> +               rcu_nocb_unlock(rdp);
>                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
>         }
>  }
> --
> 2.42.1
>
>

  reply	other threads:[~2023-12-13 13:01 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-08 22:05 [PATCH 0/8] rcu: Fix expedited GP deadlock (and cleanup some nocb stuff) Frederic Weisbecker
2023-12-08 22:05 ` [PATCH 1/8] rcu/nocb: Make IRQs disablement symetric Frederic Weisbecker
2023-12-13 13:01   ` Neeraj upadhyay [this message]
2023-12-08 22:05 ` [PATCH 2/8] rcu/nocb: Re-arrange call_rcu() NOCB specific code Frederic Weisbecker
2023-12-08 22:05 ` [PATCH 3/8] rcu/exp: Fix RCU expedited parallel grace period kworker allocation failure recovery Frederic Weisbecker
2023-12-12 16:04   ` Kalesh Singh
2023-12-12 16:10     ` Kalesh Singh
2023-12-08 22:05 ` [PATCH 4/8] rcu/exp: Handle RCU expedited grace period kworker allocation failure Frederic Weisbecker
2023-12-12 16:14   ` Kalesh Singh
2023-12-08 22:05 ` [PATCH 5/8] rcu: s/boost_kthread_mutex/kthread_mutex Frederic Weisbecker
2023-12-08 22:05 ` [PATCH 6/8] rcu/exp: Make parallel exp gp kworker per rcu node Frederic Weisbecker
2023-12-08 22:05 ` [PATCH 7/8] rcu/exp: Handle parallel exp gp kworkers affinity Frederic Weisbecker
2023-12-08 22:05 ` [PATCH 8/8] rcu/exp: Remove rcu_par_gp_wq Frederic Weisbecker
2023-12-11 16:38 ` [PATCH 0/8] rcu: Fix expedited GP deadlock (and cleanup some nocb stuff) Paul E. McKenney
2023-12-11 20:04   ` Frederic Weisbecker
2023-12-11 21:39     ` Paul E. McKenney
2023-12-12 13:34       ` Frederic Weisbecker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAFwiDX8NN_vJ7S50SRiyQKMZ9YpGemB-GLOAHuTQSSD+f0SiVA@mail.gmail.com \
    --to=neeraj.iitr10@gmail.com \
    --cc=boqun.feng@gmail.com \
    --cc=frederic@kernel.org \
    --cc=joel@joelfernandes.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=neeraj.upadhyay@amd.com \
    --cc=paulmck@kernel.org \
    --cc=qiang.zhang1211@gmail.com \
    --cc=rcu@vger.kernel.org \
    --cc=urezki@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).