BPF Archive mirror
 help / color / mirror / Atom feed
* [PATCH 2/5] arm64: mm: code and data partitioning for aslr
       [not found] <20240416122254.868007168-1-mbland@motorola.com>
@ 2024-04-03 21:08 ` Maxwell Bland
  2024-04-16 19:18   ` [PATCH 2/5 RESEND] " Maxwell Bland
  2024-04-17  5:14   ` [PATCH 2/5] " kernel test robot
  0 siblings, 2 replies; 3+ messages in thread
From: Maxwell Bland @ 2024-04-03 21:08 UTC (permalink / raw
  To: linux-arm-kernel
  Cc: Maxwell Bland, linux-kernel, Catalin Marinas, Will Deacon,
	Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	Zi Shen Lim, Mark Rutland, Ard Biesheuvel, Maxwell Bland,
	Kees Cook, Sami Tolvanen, Baoquan He, Jonathan Cameron,
	Greg Kroah-Hartman, Ryo Takakura, James Morse, Christophe Leroy,
	bpf

Uses hooks in the vmalloc infrastructure to prevent interleaving code
and data pages, working to both maintain compatible management
assumptions made by non-arch-specific code and make management of these
regions more precise and conformant, allowing, for example, the
maintenance of PXNTable bits on dynamically allocated memory or the
immutability of certain page middle directory and higher level
descriptors.

Signed-off-by: Maxwell Bland <mbland@motorola.com>
---
 arch/arm64/include/asm/module.h    | 12 +++++
 arch/arm64/include/asm/vmalloc.h   | 17 ++++++-
 arch/arm64/kernel/Makefile         |  2 +-
 arch/arm64/kernel/module.c         |  7 ++-
 arch/arm64/kernel/probes/kprobes.c |  7 +--
 arch/arm64/kernel/setup.c          |  4 ++
 arch/arm64/kernel/vmalloc.c        | 71 ++++++++++++++++++++++++++++++
 arch/arm64/mm/ptdump.c             |  4 +-
 arch/arm64/net/bpf_jit_comp.c      |  8 ++--
 9 files changed, 117 insertions(+), 15 deletions(-)
 create mode 100644 arch/arm64/kernel/vmalloc.c

diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 79550b22ba19..e50d7a240ad7 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -65,4 +65,16 @@ static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
 	return NULL;
 }
 
+extern u64 module_direct_base __ro_after_init;
+extern u64 module_plt_base __ro_after_init;
+
+int __init module_init_limits(void);
+
+#define MODULES_ASLR_START ((module_plt_base) ? module_plt_base : \
+		module_direct_base)
+#define MODULES_ASLR_END ((module_plt_base) ? module_plt_base + SZ_2G : \
+		module_direct_base + SZ_128M)
+
+void *module_alloc(unsigned long size);
+
 #endif /* __ASM_MODULE_H */
diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h
index 38fafffe699f..93f8f1e2b1ce 100644
--- a/arch/arm64/include/asm/vmalloc.h
+++ b/arch/arm64/include/asm/vmalloc.h
@@ -4,6 +4,9 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
+struct vmap_area;
+struct kmem_cache;
+
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
 
 #define arch_vmap_pud_supported arch_vmap_pud_supported
@@ -23,7 +26,7 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
 	return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
 }
 
-#endif
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
 
 #define arch_vmap_pgprot_tagged arch_vmap_pgprot_tagged
 static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
@@ -31,4 +34,16 @@ static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
 	return pgprot_tagged(prot);
 }
 
+#ifdef CONFIG_RANDOMIZE_BASE
+
+#define arch_skip_va arch_skip_va
+inline bool arch_skip_va(struct vmap_area *va, unsigned long vstart);
+
+#define arch_refine_vmap_space arch_refine_vmap_space
+inline void arch_refine_vmap_space(struct rb_root *root,
+					  struct list_head *head,
+					  struct kmem_cache *cachep);
+
+#endif /* CONFIG_RANDOMIZE_BASE */
+
 #endif /* _ASM_ARM64_VMALLOC_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 763824963ed1..4298a2168544 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -56,7 +56,7 @@ obj-$(CONFIG_ACPI)			+= acpi.o
 obj-$(CONFIG_ACPI_NUMA)			+= acpi_numa.o
 obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)	+= acpi_parking_protocol.o
 obj-$(CONFIG_PARAVIRT)			+= paravirt.o
-obj-$(CONFIG_RANDOMIZE_BASE)		+= kaslr.o
+obj-$(CONFIG_RANDOMIZE_BASE)		+= kaslr.o vmalloc.o
 obj-$(CONFIG_HIBERNATION)		+= hibernate.o hibernate-asm.o
 obj-$(CONFIG_ELF_CORE)			+= elfcore.o
 obj-$(CONFIG_KEXEC_CORE)		+= machine_kexec.o relocate_kernel.o	\
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 47e0be610bb6..58329b27624d 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -26,8 +26,8 @@
 #include <asm/scs.h>
 #include <asm/sections.h>
 
-static u64 module_direct_base __ro_after_init = 0;
-static u64 module_plt_base __ro_after_init = 0;
+u64 module_direct_base __ro_after_init;
+u64 module_plt_base __ro_after_init;
 
 /*
  * Choose a random page-aligned base address for a window of 'size' bytes which
@@ -66,7 +66,7 @@ static u64 __init random_bounding_box(u64 size, u64 start, u64 end)
  * we may fall back to PLTs where they could have been avoided, but this keeps
  * the logic significantly simpler.
  */
-static int __init module_init_limits(void)
+int __init module_init_limits(void)
 {
 	u64 kernel_end = (u64)_end;
 	u64 kernel_start = (u64)_text;
@@ -108,7 +108,6 @@ static int __init module_init_limits(void)
 
 	return 0;
 }
-subsys_initcall(module_init_limits);
 
 void *module_alloc(unsigned long size)
 {
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 327855a11df2..89968f05177f 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -131,9 +131,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
 
 void *alloc_insn_page(void)
 {
-	return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
-			GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
-			NUMA_NO_NODE, __builtin_return_address(0));
+	return __vmalloc_node_range(PAGE_SIZE, 1, MODULES_ASLR_START,
+			MODULES_ASLR_END, GFP_KERNEL, PAGE_KERNEL_ROX,
+			VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
+			__builtin_return_address(0));
 }
 
 /* arm kprobe: install breakpoint in text */
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 65a052bf741f..908ee0ccc606 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -53,6 +53,7 @@
 #include <asm/efi.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/mmu_context.h>
+#include <asm/module.h>
 
 static int num_standard_resources;
 static struct resource *standard_resources;
@@ -321,6 +322,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
 
 	arm64_memblock_init();
 
+
 	paging_init();
 
 	acpi_table_upgrade();
@@ -366,6 +368,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
 			"This indicates a broken bootloader or old kernel\n",
 			boot_args[1], boot_args[2], boot_args[3]);
 	}
+
+	module_init_limits();
 }
 
 static inline bool cpu_can_disable(unsigned int cpu)
diff --git a/arch/arm64/kernel/vmalloc.c b/arch/arm64/kernel/vmalloc.c
new file mode 100644
index 000000000000..00a463f3692f
--- /dev/null
+++ b/arch/arm64/kernel/vmalloc.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AArch64 vmap area management code
+ *
+ * Author: Maxwell Bland <mbland@motorola.com>
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/elf.h>
+
+#include <asm/module.h>
+
+/*
+ * Prevents the allocation of new vmap_areas from dynamic code
+ * region if the virtual address requested is not explicitly the
+ * module region.
+ */
+inline bool arch_skip_va(struct vmap_area *va, unsigned long vstart)
+{
+	return (vstart != MODULES_ASLR_START &&
+			va->va_start >= MODULES_ASLR_START &&
+			va->va_end <= MODULES_ASLR_END);
+}
+
+/*
+ * Splits a vmap area in two and allocates a new area if needed
+ */
+inline struct vmap_area *
+try_split_alloc_vmap_area(struct rb_root *root,
+		struct list_head *head,
+		struct kmem_cache *vmap_area_cachep,
+		unsigned long addr)
+{
+	struct vmap_area *va;
+	int ret;
+	struct vmap_area *lva = NULL;
+
+	va = __find_vmap_area(addr, root);
+	if (!va) {
+		pr_err("%s: could not find vmap\n", __func__);
+		return NULL;
+	}
+
+	lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
+	if (!lva) {
+		pr_err("%s: unable to allocate va for range\n", __func__);
+		return NULL;
+	}
+	lva->va_start = addr;
+	lva->va_end = va->va_end;
+	ret = va_clip(root, head, va, addr, va->va_end - addr);
+	if (WARN_ON_ONCE(ret)) {
+		pr_err("%s: unable to clip code base region\n", __func__);
+		kmem_cache_free(vmap_area_cachep, lva);
+		return NULL;
+	}
+	insert_vmap_area_augment(lva, NULL, root, head);
+	return lva;
+}
+
+/*
+ * Run during vmalloc_init, ensures that there exist explicit rb tree
+ * node delineations between code and data
+ */
+inline void arch_refine_vmap_space(struct rb_root *root,
+		struct list_head *head,
+		struct kmem_cache *cachep)
+{
+	try_split_alloc_vmap_area(root, head, cachep, MODULES_ASLR_START);
+	try_split_alloc_vmap_area(root, head, cachep, MODULES_ASLR_END);
+}
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index 6986827e0d64..796231a4fd63 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -261,9 +261,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
 		}
 		pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
 				   pg_level[st->level].name);
-		if (st->current_prot && pg_level[st->level].bits)
-			dump_prot(st, pg_level[st->level].bits,
-				  pg_level[st->level].num);
+		dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
 		pt_dump_seq_puts(st->seq, "\n");
 
 		if (addr >= st->marker[1].start_address) {
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 122021f9bdfc..6ed6e00b8b4a 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -13,6 +13,8 @@
 #include <linux/memory.h>
 #include <linux/printk.h>
 #include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleloader.h>
 
 #include <asm/asm-extable.h>
 #include <asm/byteorder.h>
@@ -1790,18 +1792,18 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len)
 
 u64 bpf_jit_alloc_exec_limit(void)
 {
-	return VMALLOC_END - VMALLOC_START;
+	return MODULES_ASLR_END - MODULES_ASLR_START;
 }
 
 void *bpf_jit_alloc_exec(unsigned long size)
 {
 	/* Memory is intended to be executable, reset the pointer tag. */
-	return kasan_reset_tag(vmalloc(size));
+	return kasan_reset_tag(module_alloc(size));
 }
 
 void bpf_jit_free_exec(void *addr)
 {
-	return vfree(addr);
+	return module_memfree(addr);
 }
 
 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH 2/5 RESEND] arm64: mm: code and data partitioning for aslr
  2024-04-03 21:08 ` [PATCH 2/5] arm64: mm: code and data partitioning for aslr Maxwell Bland
@ 2024-04-16 19:18   ` Maxwell Bland
  2024-04-17  5:14   ` [PATCH 2/5] " kernel test robot
  1 sibling, 0 replies; 3+ messages in thread
From: Maxwell Bland @ 2024-04-16 19:18 UTC (permalink / raw
  To: linux-arm-kernel
  Cc: Maxwell Bland, linux-kernel, Catalin Marinas, Will Deacon,
	Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	Zi Shen Lim, Mark Rutland, Ard Biesheuvel, Maxwell Bland,
	Kees Cook, Sami Tolvanen, Baoquan He, Jonathan Cameron,
	Greg Kroah-Hartman, Ryo Takakura, James Morse, Christophe Leroy,
	Mark Rutland, Greg Kroah-Hartman, Christoph Hellwig,
	Christophe Leroy, David Hildenbrand, Conor Dooley, bpf

Uses hooks in the vmalloc infrastructure to prevent interleaving code
and data pages, working to both maintain compatible management
assumptions made by non-arch-specific code and make management of these
regions more precise and conformant, allowing, for example, the
maintenance of PXNTable bits on dynamically allocated memory or the
immutability of certain page middle directory and higher level
descriptors.

Signed-off-by: Maxwell Bland <mbland@motorola.com>
---
 arch/arm64/include/asm/module.h    | 12 +++++
 arch/arm64/include/asm/vmalloc.h   | 17 ++++++-
 arch/arm64/kernel/Makefile         |  2 +-
 arch/arm64/kernel/module.c         |  7 ++-
 arch/arm64/kernel/probes/kprobes.c |  7 +--
 arch/arm64/kernel/setup.c          |  4 ++
 arch/arm64/kernel/vmalloc.c        | 71 ++++++++++++++++++++++++++++++
 arch/arm64/mm/ptdump.c             |  4 +-
 arch/arm64/net/bpf_jit_comp.c      |  8 ++--
 9 files changed, 117 insertions(+), 15 deletions(-)
 create mode 100644 arch/arm64/kernel/vmalloc.c

diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 79550b22ba19..e50d7a240ad7 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -65,4 +65,16 @@ static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
 	return NULL;
 }
 
+extern u64 module_direct_base __ro_after_init;
+extern u64 module_plt_base __ro_after_init;
+
+int __init module_init_limits(void);
+
+#define MODULES_ASLR_START ((module_plt_base) ? module_plt_base : \
+		module_direct_base)
+#define MODULES_ASLR_END ((module_plt_base) ? module_plt_base + SZ_2G : \
+		module_direct_base + SZ_128M)
+
+void *module_alloc(unsigned long size);
+
 #endif /* __ASM_MODULE_H */
diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h
index 38fafffe699f..93f8f1e2b1ce 100644
--- a/arch/arm64/include/asm/vmalloc.h
+++ b/arch/arm64/include/asm/vmalloc.h
@@ -4,6 +4,9 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
+struct vmap_area;
+struct kmem_cache;
+
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
 
 #define arch_vmap_pud_supported arch_vmap_pud_supported
@@ -23,7 +26,7 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
 	return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
 }
 
-#endif
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
 
 #define arch_vmap_pgprot_tagged arch_vmap_pgprot_tagged
 static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
@@ -31,4 +34,16 @@ static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
 	return pgprot_tagged(prot);
 }
 
+#ifdef CONFIG_RANDOMIZE_BASE
+
+#define arch_skip_va arch_skip_va
+inline bool arch_skip_va(struct vmap_area *va, unsigned long vstart);
+
+#define arch_refine_vmap_space arch_refine_vmap_space
+inline void arch_refine_vmap_space(struct rb_root *root,
+					  struct list_head *head,
+					  struct kmem_cache *cachep);
+
+#endif /* CONFIG_RANDOMIZE_BASE */
+
 #endif /* _ASM_ARM64_VMALLOC_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 763824963ed1..4298a2168544 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -56,7 +56,7 @@ obj-$(CONFIG_ACPI)			+= acpi.o
 obj-$(CONFIG_ACPI_NUMA)			+= acpi_numa.o
 obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)	+= acpi_parking_protocol.o
 obj-$(CONFIG_PARAVIRT)			+= paravirt.o
-obj-$(CONFIG_RANDOMIZE_BASE)		+= kaslr.o
+obj-$(CONFIG_RANDOMIZE_BASE)		+= kaslr.o vmalloc.o
 obj-$(CONFIG_HIBERNATION)		+= hibernate.o hibernate-asm.o
 obj-$(CONFIG_ELF_CORE)			+= elfcore.o
 obj-$(CONFIG_KEXEC_CORE)		+= machine_kexec.o relocate_kernel.o	\
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 47e0be610bb6..58329b27624d 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -26,8 +26,8 @@
 #include <asm/scs.h>
 #include <asm/sections.h>
 
-static u64 module_direct_base __ro_after_init = 0;
-static u64 module_plt_base __ro_after_init = 0;
+u64 module_direct_base __ro_after_init;
+u64 module_plt_base __ro_after_init;
 
 /*
  * Choose a random page-aligned base address for a window of 'size' bytes which
@@ -66,7 +66,7 @@ static u64 __init random_bounding_box(u64 size, u64 start, u64 end)
  * we may fall back to PLTs where they could have been avoided, but this keeps
  * the logic significantly simpler.
  */
-static int __init module_init_limits(void)
+int __init module_init_limits(void)
 {
 	u64 kernel_end = (u64)_end;
 	u64 kernel_start = (u64)_text;
@@ -108,7 +108,6 @@ static int __init module_init_limits(void)
 
 	return 0;
 }
-subsys_initcall(module_init_limits);
 
 void *module_alloc(unsigned long size)
 {
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 327855a11df2..89968f05177f 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -131,9 +131,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
 
 void *alloc_insn_page(void)
 {
-	return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
-			GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
-			NUMA_NO_NODE, __builtin_return_address(0));
+	return __vmalloc_node_range(PAGE_SIZE, 1, MODULES_ASLR_START,
+			MODULES_ASLR_END, GFP_KERNEL, PAGE_KERNEL_ROX,
+			VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
+			__builtin_return_address(0));
 }
 
 /* arm kprobe: install breakpoint in text */
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 65a052bf741f..908ee0ccc606 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -53,6 +53,7 @@
 #include <asm/efi.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/mmu_context.h>
+#include <asm/module.h>
 
 static int num_standard_resources;
 static struct resource *standard_resources;
@@ -321,6 +322,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
 
 	arm64_memblock_init();
 
+
 	paging_init();
 
 	acpi_table_upgrade();
@@ -366,6 +368,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
 			"This indicates a broken bootloader or old kernel\n",
 			boot_args[1], boot_args[2], boot_args[3]);
 	}
+
+	module_init_limits();
 }
 
 static inline bool cpu_can_disable(unsigned int cpu)
diff --git a/arch/arm64/kernel/vmalloc.c b/arch/arm64/kernel/vmalloc.c
new file mode 100644
index 000000000000..00a463f3692f
--- /dev/null
+++ b/arch/arm64/kernel/vmalloc.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AArch64 vmap area management code
+ *
+ * Author: Maxwell Bland <mbland@motorola.com>
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/elf.h>
+
+#include <asm/module.h>
+
+/*
+ * Prevents the allocation of new vmap_areas from dynamic code
+ * region if the virtual address requested is not explicitly the
+ * module region.
+ */
+inline bool arch_skip_va(struct vmap_area *va, unsigned long vstart)
+{
+	return (vstart != MODULES_ASLR_START &&
+			va->va_start >= MODULES_ASLR_START &&
+			va->va_end <= MODULES_ASLR_END);
+}
+
+/*
+ * Splits a vmap area in two and allocates a new area if needed
+ */
+inline struct vmap_area *
+try_split_alloc_vmap_area(struct rb_root *root,
+		struct list_head *head,
+		struct kmem_cache *vmap_area_cachep,
+		unsigned long addr)
+{
+	struct vmap_area *va;
+	int ret;
+	struct vmap_area *lva = NULL;
+
+	va = __find_vmap_area(addr, root);
+	if (!va) {
+		pr_err("%s: could not find vmap\n", __func__);
+		return NULL;
+	}
+
+	lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
+	if (!lva) {
+		pr_err("%s: unable to allocate va for range\n", __func__);
+		return NULL;
+	}
+	lva->va_start = addr;
+	lva->va_end = va->va_end;
+	ret = va_clip(root, head, va, addr, va->va_end - addr);
+	if (WARN_ON_ONCE(ret)) {
+		pr_err("%s: unable to clip code base region\n", __func__);
+		kmem_cache_free(vmap_area_cachep, lva);
+		return NULL;
+	}
+	insert_vmap_area_augment(lva, NULL, root, head);
+	return lva;
+}
+
+/*
+ * Run during vmalloc_init, ensures that there exist explicit rb tree
+ * node delineations between code and data
+ */
+inline void arch_refine_vmap_space(struct rb_root *root,
+		struct list_head *head,
+		struct kmem_cache *cachep)
+{
+	try_split_alloc_vmap_area(root, head, cachep, MODULES_ASLR_START);
+	try_split_alloc_vmap_area(root, head, cachep, MODULES_ASLR_END);
+}
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index 6986827e0d64..796231a4fd63 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -261,9 +261,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
 		}
 		pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
 				   pg_level[st->level].name);
-		if (st->current_prot && pg_level[st->level].bits)
-			dump_prot(st, pg_level[st->level].bits,
-				  pg_level[st->level].num);
+		dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
 		pt_dump_seq_puts(st->seq, "\n");
 
 		if (addr >= st->marker[1].start_address) {
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 122021f9bdfc..6ed6e00b8b4a 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -13,6 +13,8 @@
 #include <linux/memory.h>
 #include <linux/printk.h>
 #include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleloader.h>
 
 #include <asm/asm-extable.h>
 #include <asm/byteorder.h>
@@ -1790,18 +1792,18 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len)
 
 u64 bpf_jit_alloc_exec_limit(void)
 {
-	return VMALLOC_END - VMALLOC_START;
+	return MODULES_ASLR_END - MODULES_ASLR_START;
 }
 
 void *bpf_jit_alloc_exec(unsigned long size)
 {
 	/* Memory is intended to be executable, reset the pointer tag. */
-	return kasan_reset_tag(vmalloc(size));
+	return kasan_reset_tag(module_alloc(size));
 }
 
 void bpf_jit_free_exec(void *addr)
 {
-	return vfree(addr);
+	return module_memfree(addr);
 }
 
 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH 2/5] arm64: mm: code and data partitioning for aslr
  2024-04-03 21:08 ` [PATCH 2/5] arm64: mm: code and data partitioning for aslr Maxwell Bland
  2024-04-16 19:18   ` [PATCH 2/5 RESEND] " Maxwell Bland
@ 2024-04-17  5:14   ` kernel test robot
  1 sibling, 0 replies; 3+ messages in thread
From: kernel test robot @ 2024-04-17  5:14 UTC (permalink / raw
  To: Maxwell Bland, linux-arm-kernel
  Cc: oe-kbuild-all, Maxwell Bland, linux-kernel, Catalin Marinas,
	Will Deacon, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	Zi Shen Lim, Mark Rutland, Ard Biesheuvel, Kees Cook,
	Sami Tolvanen, Baoquan He, Jonathan Cameron, Greg Kroah-Hartman,
	Ryo Takakura, James Morse, Christophe Leroy, bpf

Hi Maxwell,

kernel test robot noticed the following build errors:

[auto build test ERROR on 0bbac3facb5d6cc0171c45c9873a2dc96bea9680]

url:    https://github.com/intel-lab-lkp/linux/commits/Maxwell-Bland/mm-allow-arch-refinement-skip-for-vmap-alloc/20240417-032149
base:   0bbac3facb5d6cc0171c45c9873a2dc96bea9680
patch link:    https://lore.kernel.org/r/20240416122254.868007168-3-mbland%40motorola.com
patch subject: [PATCH 2/5] arm64: mm: code and data partitioning for aslr
config: arm64-allnoconfig (https://download.01.org/0day-ci/archive/20240417/202404171355.jlsKaUGf-lkp@intel.com/config)
compiler: aarch64-linux-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240417/202404171355.jlsKaUGf-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404171355.jlsKaUGf-lkp@intel.com/

All errors (new ones prefixed by >>):

   aarch64-linux-ld: Unexpected GOT/PLT entries detected!
   aarch64-linux-ld: Unexpected run-time procedure linkages detected!
   aarch64-linux-ld: arch/arm64/kernel/setup.o: in function `setup_arch':
>> setup.c:(.init.text+0x694): undefined reference to `module_init_limits'

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2024-04-17  5:14 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20240416122254.868007168-1-mbland@motorola.com>
2024-04-03 21:08 ` [PATCH 2/5] arm64: mm: code and data partitioning for aslr Maxwell Bland
2024-04-16 19:18   ` [PATCH 2/5 RESEND] " Maxwell Bland
2024-04-17  5:14   ` [PATCH 2/5] " kernel test robot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).