Skip to content

Commit f8c67d8

Browse files
puranjaymohanAlexei Starovoitov
authored andcommitted
bpf: Use kmalloc_nolock() in range tree
The range tree uses bpf_mem_alloc() that is safe to be called from all contexts and uses a pre-allocated pool of memory to serve these allocations. Replace bpf_mem_alloc() with kmalloc_nolock() as it can be called safely from all contexts and is more scalable than bpf_mem_alloc(). Remove the migrate_disable/enable pairs as they were only needed for bpf_mem_alloc() as it does per-cpu operations, kmalloc_nolock() doesn't need this. Signed-off-by: Puranjay Mohan <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 6f1f4c1 commit f8c67d8

File tree

1 file changed

+6
-15
lines changed

1 file changed

+6
-15
lines changed

kernel/bpf/range_tree.c

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
33
#include <linux/interval_tree_generic.h>
44
#include <linux/slab.h>
5-
#include <linux/bpf_mem_alloc.h>
65
#include <linux/bpf.h>
76
#include "range_tree.h"
87

@@ -21,7 +20,7 @@
2120
* in commit 6772fcc8890a ("xfs: convert xbitmap to interval tree").
2221
*
2322
* The implementation relies on external lock to protect rbtree-s.
24-
* The alloc/free of range_node-s is done via bpf_mem_alloc.
23+
* The alloc/free of range_node-s is done via kmalloc_nolock().
2524
*
2625
* bpf arena is using range_tree to represent unallocated slots.
2726
* At init time:
@@ -150,9 +149,7 @@ int range_tree_clear(struct range_tree *rt, u32 start, u32 len)
150149
range_it_insert(rn, rt);
151150

152151
/* Add a range */
153-
migrate_disable();
154-
new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
155-
migrate_enable();
152+
new_rn = kmalloc_nolock(sizeof(struct range_node), 0, NUMA_NO_NODE);
156153
if (!new_rn)
157154
return -ENOMEM;
158155
new_rn->rn_start = last + 1;
@@ -172,9 +169,7 @@ int range_tree_clear(struct range_tree *rt, u32 start, u32 len)
172169
} else {
173170
/* in the middle of the clearing range */
174171
range_it_remove(rn, rt);
175-
migrate_disable();
176-
bpf_mem_free(&bpf_global_ma, rn);
177-
migrate_enable();
172+
kfree_nolock(rn);
178173
}
179174
}
180175
return 0;
@@ -227,9 +222,7 @@ int range_tree_set(struct range_tree *rt, u32 start, u32 len)
227222
range_it_remove(right, rt);
228223
left->rn_last = right->rn_last;
229224
range_it_insert(left, rt);
230-
migrate_disable();
231-
bpf_mem_free(&bpf_global_ma, right);
232-
migrate_enable();
225+
kfree_nolock(right);
233226
} else if (left) {
234227
/* Combine with the left range */
235228
range_it_remove(left, rt);
@@ -241,9 +234,7 @@ int range_tree_set(struct range_tree *rt, u32 start, u32 len)
241234
right->rn_start = start;
242235
range_it_insert(right, rt);
243236
} else {
244-
migrate_disable();
245-
left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
246-
migrate_enable();
237+
left = kmalloc_nolock(sizeof(struct range_node), 0, NUMA_NO_NODE);
247238
if (!left)
248239
return -ENOMEM;
249240
left->rn_start = start;
@@ -259,7 +250,7 @@ void range_tree_destroy(struct range_tree *rt)
259250

260251
while ((rn = range_it_iter_first(rt, 0, -1U))) {
261252
range_it_remove(rn, rt);
262-
bpf_mem_free(&bpf_global_ma, rn);
253+
kfree_nolock(rn);
263254
}
264255
}
265256

0 commit comments

Comments
 (0)