LKML Archive mirror
 help / color / mirror / Atom feed
* [PATCH] sched/topology: fix potential memoryleak in sched_init_numa()
@ 2023-06-21  6:38 Miaohe Lin
  2023-06-21  8:19 ` Abel Wu
  0 siblings, 1 reply; 2+ messages in thread
From: Miaohe Lin @ 2023-06-21  6:38 UTC (permalink / raw
  To: mingo, peterz, juri.lelli, vincent.guittot
  Cc: dietmar.eggemann, rostedt, bsegall, mgorman, bristot, vschneid,
	linux-kernel, linmiaohe

When sched_init_numa() fails to allocate enough memory for sched domains
numa masks, it forgot to free the allocated memory leading to memoryleak.
Add a helper to help release the resource.

Fixes: cb83b629bae0 ("sched/numa: Rewrite the CONFIG_NUMA sched domain support")
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
 kernel/sched/topology.c | 22 ++++++++++++++++++++--
 1 file changed, 20 insertions(+), 2 deletions(-)

diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 290509383419..dcec4d653ae3 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1807,6 +1807,20 @@ static void init_numa_topology_type(int offline_node)
 
 #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
 
+static void sched_free_numa_mask(struct cpumask ***masks, int nr_levels)
+{
+	int i, j;
+
+	for (i = 0; i < nr_levels; i++) {
+		if (!masks[i])
+			continue;
+		for_each_node(j)
+			kfree(masks[i][j]);
+		kfree(masks[i]);
+	}
+	kfree(masks);
+}
+
 void sched_init_numa(int offline_node)
 {
 	struct sched_domain_topology_level *tl;
@@ -1886,15 +1900,19 @@ void sched_init_numa(int offline_node)
 	 */
 	for (i = 0; i < nr_levels; i++) {
 		masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
-		if (!masks[i])
+		if (!masks[i]) {
+			sched_free_numa_mask(masks, nr_levels);
 			return;
+		}
 
 		for_each_cpu_node_but(j, offline_node) {
 			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
 			int k;
 
-			if (!mask)
+			if (!mask) {
+				sched_free_numa_mask(masks, nr_levels);
 				return;
+			}
 
 			masks[i][j] = mask;
 
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] sched/topology: fix potential memoryleak in sched_init_numa()
  2023-06-21  6:38 [PATCH] sched/topology: fix potential memoryleak in sched_init_numa() Miaohe Lin
@ 2023-06-21  8:19 ` Abel Wu
  0 siblings, 0 replies; 2+ messages in thread
From: Abel Wu @ 2023-06-21  8:19 UTC (permalink / raw
  To: Miaohe Lin, mingo, peterz, juri.lelli, vincent.guittot
  Cc: dietmar.eggemann, rostedt, bsegall, mgorman, bristot, vschneid,
	linux-kernel

On 6/21/23 2:38 PM, Miaohe Lin wrote:
> When sched_init_numa() fails to allocate enough memory for sched domains
> numa masks, it forgot to free the allocated memory leading to memoryleak.
> Add a helper to help release the resource.
> 
> Fixes: cb83b629bae0 ("sched/numa: Rewrite the CONFIG_NUMA sched domain support")
> Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
> ---
>   kernel/sched/topology.c | 22 ++++++++++++++++++++--
>   1 file changed, 20 insertions(+), 2 deletions(-)
> 
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index 290509383419..dcec4d653ae3 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -1807,6 +1807,20 @@ static void init_numa_topology_type(int offline_node)
>   
>   #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
>   
> +static void sched_free_numa_mask(struct cpumask ***masks, int nr_levels)
> +{
> +	int i, j;
> +
> +	for (i = 0; i < nr_levels; i++) {
> +		if (!masks[i])
> +			continue;
> +		for_each_node(j)
> +			kfree(masks[i][j]);
> +		kfree(masks[i]);
> +	}
> +	kfree(masks);
> +}
> +
>   void sched_init_numa(int offline_node)
>   {
>   	struct sched_domain_topology_level *tl;
> @@ -1886,15 +1900,19 @@ void sched_init_numa(int offline_node)
>   	 */
>   	for (i = 0; i < nr_levels; i++) {
>   		masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
> -		if (!masks[i])
> +		if (!masks[i]) {
> +			sched_free_numa_mask(masks, nr_levels);
>   			return;
> +		}
>   
>   		for_each_cpu_node_but(j, offline_node) {
>   			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
>   			int k;
>   
> -			if (!mask)
> +			if (!mask) {
> +				sched_free_numa_mask(masks, nr_levels);
>   				return;
> +			}
>   
>   			masks[i][j] = mask;
>   

Allocation can also fail in @tl (topology level), and if that is the
case, masks[][] IMHO also needs be freed. So I think it might be better
if call sched_reset_numa() at proper place.

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-06-21  8:20 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-06-21  6:38 [PATCH] sched/topology: fix potential memoryleak in sched_init_numa() Miaohe Lin
2023-06-21  8:19 ` Abel Wu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).