Linux-HyperV Archive mirror
 help / color / mirror / Atom feed
From: Souradeep Chakrabarti <schakrabarti@microsoft.com>
To: Suman Ghosh <sumang@marvell.com>,
	Souradeep Chakrabarti <schakrabarti@linux.microsoft.com>,
	KY Srinivasan <kys@microsoft.com>,
	Haiyang Zhang <haiyangz@microsoft.com>,
	"wei.liu@kernel.org" <wei.liu@kernel.org>,
	Dexuan Cui <decui@microsoft.com>,
	"davem@davemloft.net" <davem@davemloft.net>,
	"edumazet@google.com" <edumazet@google.com>,
	"kuba@kernel.org" <kuba@kernel.org>,
	"pabeni@redhat.com" <pabeni@redhat.com>,
	Long Li <longli@microsoft.com>,
	"yury.norov@gmail.com" <yury.norov@gmail.com>,
	"leon@kernel.org" <leon@kernel.org>,
	"cai.huoqing@linux.dev" <cai.huoqing@linux.dev>,
	"ssengar@linux.microsoft.com" <ssengar@linux.microsoft.com>,
	"vkuznets@redhat.com" <vkuznets@redhat.com>,
	"tglx@linutronix.de" <tglx@linutronix.de>,
	"linux-hyperv@vger.kernel.org" <linux-hyperv@vger.kernel.org>,
	"netdev@vger.kernel.org" <netdev@vger.kernel.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	"linux-rdma@vger.kernel.org" <linux-rdma@vger.kernel.org>
Cc: Paul Rosswurm <paulros@microsoft.com>
Subject: RE: [EXT] [PATCH V5 net-next] net: mana: Assigning IRQ affinity on HT cores
Date: Tue, 12 Dec 2023 18:22:20 +0000	[thread overview]
Message-ID: <PUZP153MB0788323DA797C8DED27E2172CC8EA@PUZP153MB0788.APCP153.PROD.OUTLOOK.COM> (raw)
In-Reply-To: <SJ0PR18MB5216C6E41006057839D3C01BDB8EA@SJ0PR18MB5216.namprd18.prod.outlook.com>



>-----Original Message-----
>From: Suman Ghosh <sumang@marvell.com>
>Sent: Tuesday, December 12, 2023 11:48 PM
>To: Souradeep Chakrabarti <schakrabarti@linux.microsoft.com>; KY Srinivasan
><kys@microsoft.com>; Haiyang Zhang <haiyangz@microsoft.com>;
>wei.liu@kernel.org; Dexuan Cui <decui@microsoft.com>; davem@davemloft.net;
>edumazet@google.com; kuba@kernel.org; pabeni@redhat.com; Long Li
><longli@microsoft.com>; yury.norov@gmail.com; leon@kernel.org;
>cai.huoqing@linux.dev; ssengar@linux.microsoft.com; vkuznets@redhat.com;
>tglx@linutronix.de; linux-hyperv@vger.kernel.org; netdev@vger.kernel.org; linux-
>kernel@vger.kernel.org; linux-rdma@vger.kernel.org
>Cc: Souradeep Chakrabarti <schakrabarti@microsoft.com>; Paul Rosswurm
><paulros@microsoft.com>
>Subject: [EXTERNAL] RE: [EXT] [PATCH V5 net-next] net: mana: Assigning IRQ
>affinity on HT cores
>
>[Some people who received this message don't often get email from
>sumang@marvell.com. Learn why this is important at
>https://aka.ms/LearnAboutSenderIdentification ]
>
>Hi Souradeep,
>
>Please find inline for couple of comments.
>
>>+
>>+      if (!zalloc_cpumask_var(&curr, GFP_KERNEL)) {
>>+              err = -ENOMEM;
>>+              return err;
>>+      }
>>+      if (!zalloc_cpumask_var(&cpus, GFP_KERNEL)) {
>[Suman] memory leak here, should free 'curr'.
This will be taken care in next version.
>>+              err = -ENOMEM;
>>+              return err;
>>+      }
>>+
>>+      rcu_read_lock();
>>+      for_each_numa_hop_mask(next, next_node) {
>>+              cpumask_andnot(curr, next, prev);
>>+              for (w = cpumask_weight(curr), cnt = 0; cnt < w; ) {
>>+                      cpumask_copy(cpus, curr);
>>+                      for_each_cpu(cpu, cpus) {
>>+                              irq_set_affinity_and_hint(irqs[i],
>>topology_sibling_cpumask(cpu));
>>+                              if (++i == nvec)
>>+                                      goto done;
>>+                              cpumask_andnot(cpus, cpus,
>>topology_sibling_cpumask(cpu));
>>+                              ++cnt;
>>+                      }
>>+              }
>>+              prev = next;
>>+      }
>>+done:
>>+      rcu_read_unlock();
>>+      free_cpumask_var(curr);
>>+      free_cpumask_var(cpus);
>>+      return err;
>>+}
>>+
>> static int mana_gd_setup_irqs(struct pci_dev *pdev)  {
>>-      unsigned int max_queues_per_port = num_online_cpus();
>>       struct gdma_context *gc = pci_get_drvdata(pdev);
>>+      unsigned int max_queues_per_port;
>>       struct gdma_irq_context *gic;
>>       unsigned int max_irqs, cpu;
>>-      int nvec, irq;
>>+      int start_irq_index = 1;
>>+      int nvec, *irqs, irq;
>>       int err, i = 0, j;
>>
>>+      cpus_read_lock();
>>+      max_queues_per_port = num_online_cpus();
>>       if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
>>               max_queues_per_port = MANA_MAX_NUM_QUEUES;
>>
>>@@ -1261,6 +1302,14 @@ static int mana_gd_setup_irqs(struct pci_dev
>>*pdev)
>>       nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
>>       if (nvec < 0)
>[Suman] cpus_read_unlock()?
Thanks for pointing, it will be taken care off in the V6.
>>               return nvec;
>>+      if (nvec <= num_online_cpus())
>>+              start_irq_index = 0;
>>+
>>+      irqs = kmalloc_array((nvec - start_irq_index), sizeof(int),
>>GFP_KERNEL);
>>+      if (!irqs) {
>>+              err = -ENOMEM;
>>+              goto free_irq_vector;
>>+      }
>>
>>       gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
>>                                  GFP_KERNEL); @@ -1287,21 +1336,44 @@
>>static int mana_gd_setup_irqs(struct pci_dev
>>*pdev)
>>                       goto free_irq;
>>               }
>>
>>-              err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
>>-              if (err)
>>-                      goto free_irq;
>>-
>>-              cpu = cpumask_local_spread(i, gc->numa_node);
>>-              irq_set_affinity_and_hint(irq, cpumask_of(cpu));
>>+              if (!i) {
>>+                      err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
>>+                      if (err)
>>+                              goto free_irq;
>>+
>>+                      /* If number of IRQ is one extra than number of
>>+ online
>>CPUs,
>>+                       * then we need to assign IRQ0 (hwc irq) and IRQ1 to
>>+                       * same CPU.
>>+                       * Else we will use different CPUs for IRQ0 and IRQ1.
>>+                       * Also we are using cpumask_local_spread instead of
>>+                       * cpumask_first for the node, because the node can be
>>+                       * mem only.
>>+                       */
>>+                      if (start_irq_index) {
>>+                              cpu = cpumask_local_spread(i, gc->numa_node);
>>+                              irq_set_affinity_and_hint(irq, cpumask_of(cpu));
>>+                      } else {
>>+                              irqs[start_irq_index] = irq;
>>+                      }
>>+              } else {
>>+                      irqs[i - start_irq_index] = irq;
>>+                      err = request_irq(irqs[i - start_irq_index],
>>mana_gd_intr, 0,
>>+                                        gic->name, gic);
>>+                      if (err)
>>+                              goto free_irq;
>>+              }
>>       }
>>
>>+      err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
>>+      if (err)
>>+              goto free_irq;
>>       err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
>>       if (err)
>>               goto free_irq;
>>
>>       gc->max_num_msix = nvec;
>>       gc->num_msix_usable = nvec;
>>-
>>+      cpus_read_unlock();
>>       return 0;
>>
>> free_irq:
>>@@ -1314,8 +1386,10 @@ static int mana_gd_setup_irqs(struct pci_dev
>>*pdev)
>>       }
>>
>>       kfree(gc->irq_contexts);
>>+      kfree(irqs);
>>       gc->irq_contexts = NULL;
>> free_irq_vector:
>>+      cpus_read_unlock();
>>       pci_free_irq_vectors(pdev);
>>       return err;
>> }
>>--
>>2.34.1
>>


      reply	other threads:[~2023-12-12 18:22 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-08 10:02 [PATCH V5 net-next] net: mana: Assigning IRQ affinity on HT cores Souradeep Chakrabarti
2023-12-08 14:03 ` Yury Norov
2023-12-08 21:53   ` Yury Norov
2023-12-11  6:53     ` Souradeep Chakrabarti
2023-12-11 14:00       ` Yury Norov
2023-12-12  6:03         ` Souradeep Chakrabarti
2023-12-11  6:37   ` Souradeep Chakrabarti
2023-12-11 15:30     ` Yury Norov
2023-12-12 11:38       ` Souradeep Chakrabarti
2023-12-12 16:34         ` Yury Norov
2023-12-12 17:18           ` [EXTERNAL] " Souradeep Chakrabarti
2023-12-12 17:40             ` Yury Norov
2023-12-12 18:17 ` [EXT] " Suman Ghosh
2023-12-12 18:22   ` Souradeep Chakrabarti [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=PUZP153MB0788323DA797C8DED27E2172CC8EA@PUZP153MB0788.APCP153.PROD.OUTLOOK.COM \
    --to=schakrabarti@microsoft.com \
    --cc=cai.huoqing@linux.dev \
    --cc=davem@davemloft.net \
    --cc=decui@microsoft.com \
    --cc=edumazet@google.com \
    --cc=haiyangz@microsoft.com \
    --cc=kuba@kernel.org \
    --cc=kys@microsoft.com \
    --cc=leon@kernel.org \
    --cc=linux-hyperv@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=longli@microsoft.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=paulros@microsoft.com \
    --cc=schakrabarti@linux.microsoft.com \
    --cc=ssengar@linux.microsoft.com \
    --cc=sumang@marvell.com \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=wei.liu@kernel.org \
    --cc=yury.norov@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).