All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v1 bpf-next] selftests/bpf: Test bpf_kptr_xchg stashing of bpf_rb_root
@ 2023-12-04 21:17 Dave Marchevsky
  2023-12-04 22:28 ` Yonghong Song
  2023-12-05 14:10 ` patchwork-bot+netdevbpf
  0 siblings, 2 replies; 3+ messages in thread
From: Dave Marchevsky @ 2023-12-04 21:17 UTC (permalink / raw
  To: bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Kernel Team, Dave Marchevsky

There was some confusion amongst Meta sched_ext folks regarding whether
stashing bpf_rb_root - the tree itself, rather than a single node - was
supported. This patch adds a small test which demonstrates this
functionality: a local kptr with rb_root is created, a node is created
and added to the tree, then the tree is kptr_xchg'd into a mapval.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
---
 .../bpf/prog_tests/local_kptr_stash.c         | 23 ++++++++
 .../selftests/bpf/progs/local_kptr_stash.c    | 53 +++++++++++++++++++
 2 files changed, 76 insertions(+)

diff --git a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
index e6e50a394472..827e713f6cf1 100644
--- a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
+++ b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
@@ -48,6 +48,27 @@ static void test_local_kptr_stash_plain(void)
 	local_kptr_stash__destroy(skel);
 }
 
+static void test_local_kptr_stash_local_with_root(void)
+{
+	LIBBPF_OPTS(bpf_test_run_opts, opts,
+		    .data_in = &pkt_v4,
+		    .data_size_in = sizeof(pkt_v4),
+		    .repeat = 1,
+	);
+	struct local_kptr_stash *skel;
+	int ret;
+
+	skel = local_kptr_stash__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
+		return;
+
+	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_local_with_root), &opts);
+	ASSERT_OK(ret, "local_kptr_stash_add_local_with_root run");
+	ASSERT_OK(opts.retval, "local_kptr_stash_add_local_with_root retval");
+
+	local_kptr_stash__destroy(skel);
+}
+
 static void test_local_kptr_stash_unstash(void)
 {
 	LIBBPF_OPTS(bpf_test_run_opts, opts,
@@ -115,6 +136,8 @@ void test_local_kptr_stash(void)
 		test_local_kptr_stash_simple();
 	if (test__start_subtest("local_kptr_stash_plain"))
 		test_local_kptr_stash_plain();
+	if (test__start_subtest("local_kptr_stash_local_with_root"))
+		test_local_kptr_stash_local_with_root();
 	if (test__start_subtest("local_kptr_stash_unstash"))
 		test_local_kptr_stash_unstash();
 	if (test__start_subtest("refcount_acquire_without_unstash"))
diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
index 1769fdff6aea..75043ffc5dad 100644
--- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c
+++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
@@ -37,11 +37,18 @@ struct plain_local {
 	long data;
 };
 
+struct local_with_root {
+	long key;
+	struct bpf_spin_lock l;
+	struct bpf_rb_root r __contains(node_data, node);
+};
+
 struct map_value {
 	struct prog_test_ref_kfunc *not_kptr;
 	struct prog_test_ref_kfunc __kptr *val;
 	struct node_data __kptr *node;
 	struct plain_local __kptr *plain;
+	struct local_with_root __kptr *local_root;
 };
 
 /* This is necessary so that LLVM generates BTF for node_data struct
@@ -65,6 +72,17 @@ struct {
 	__uint(max_entries, 2);
 } some_nodes SEC(".maps");
 
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+	struct node_data *node_a;
+	struct node_data *node_b;
+
+	node_a = container_of(a, struct node_data, node);
+	node_b = container_of(b, struct node_data, node);
+
+	return node_a->key < node_b->key;
+}
+
 static int create_and_stash(int idx, int val)
 {
 	struct map_value *mapval;
@@ -113,6 +131,41 @@ long stash_plain(void *ctx)
 	return 0;
 }
 
+SEC("tc")
+long stash_local_with_root(void *ctx)
+{
+	struct local_with_root *res;
+	struct map_value *mapval;
+	struct node_data *n;
+	int idx = 0;
+
+	mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+	if (!mapval)
+		return 1;
+
+	res = bpf_obj_new(typeof(*res));
+	if (!res)
+		return 2;
+	res->key = 41;
+
+	n = bpf_obj_new(typeof(*n));
+	if (!n) {
+		bpf_obj_drop(res);
+		return 3;
+	}
+
+	bpf_spin_lock(&res->l);
+	bpf_rbtree_add(&res->r, &n->node, less);
+	bpf_spin_unlock(&res->l);
+
+	res = bpf_kptr_xchg(&mapval->local_root, res);
+	if (res) {
+		bpf_obj_drop(res);
+		return 4;
+	}
+	return 0;
+}
+
 SEC("tc")
 long unstash_rb_node(void *ctx)
 {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v1 bpf-next] selftests/bpf: Test bpf_kptr_xchg stashing of bpf_rb_root
  2023-12-04 21:17 [PATCH v1 bpf-next] selftests/bpf: Test bpf_kptr_xchg stashing of bpf_rb_root Dave Marchevsky
@ 2023-12-04 22:28 ` Yonghong Song
  2023-12-05 14:10 ` patchwork-bot+netdevbpf
  1 sibling, 0 replies; 3+ messages in thread
From: Yonghong Song @ 2023-12-04 22:28 UTC (permalink / raw
  To: Dave Marchevsky, bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Kernel Team


On 12/4/23 4:17 PM, Dave Marchevsky wrote:
> There was some confusion amongst Meta sched_ext folks regarding whether
> stashing bpf_rb_root - the tree itself, rather than a single node - was
> supported. This patch adds a small test which demonstrates this
> functionality: a local kptr with rb_root is created, a node is created
> and added to the tree, then the tree is kptr_xchg'd into a mapval.
>
> Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>

Acked-by: Yonghong Song <yonghong.song@linux.dev>


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v1 bpf-next] selftests/bpf: Test bpf_kptr_xchg stashing of bpf_rb_root
  2023-12-04 21:17 [PATCH v1 bpf-next] selftests/bpf: Test bpf_kptr_xchg stashing of bpf_rb_root Dave Marchevsky
  2023-12-04 22:28 ` Yonghong Song
@ 2023-12-05 14:10 ` patchwork-bot+netdevbpf
  1 sibling, 0 replies; 3+ messages in thread
From: patchwork-bot+netdevbpf @ 2023-12-05 14:10 UTC (permalink / raw
  To: Dave Marchevsky; +Cc: bpf, ast, daniel, andrii, martin.lau, kernel-team

Hello:

This patch was applied to bpf/bpf-next.git (master)
by Daniel Borkmann <daniel@iogearbox.net>:

On Mon, 4 Dec 2023 13:17:22 -0800 you wrote:
> There was some confusion amongst Meta sched_ext folks regarding whether
> stashing bpf_rb_root - the tree itself, rather than a single node - was
> supported. This patch adds a small test which demonstrates this
> functionality: a local kptr with rb_root is created, a node is created
> and added to the tree, then the tree is kptr_xchg'd into a mapval.
> 
> Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
> 
> [...]

Here is the summary with links:
  - [v1,bpf-next] selftests/bpf: Test bpf_kptr_xchg stashing of bpf_rb_root
    https://git.kernel.org/bpf/bpf-next/c/1b4c7e20bfd6

You are awesome, thank you!
-- 
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html



^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2023-12-05 14:10 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-12-04 21:17 [PATCH v1 bpf-next] selftests/bpf: Test bpf_kptr_xchg stashing of bpf_rb_root Dave Marchevsky
2023-12-04 22:28 ` Yonghong Song
2023-12-05 14:10 ` patchwork-bot+netdevbpf

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.