Skip to content

Commit 32f9152

Browse files
committed
Merge branch 'bpf-xdp-redirect'
Björn Töpel says: ==================== This two patch series contain two optimizations for the bpf_redirect_map() helper and the xdp_do_redirect() function. The bpf_redirect_map() optimization is about avoiding the map lookup dispatching. Instead of having a switch-statement and selecting the correct lookup function, we let bpf_redirect_map() be a map operation, where each map has its own bpf_redirect_map() implementation. This way the run-time lookup is avoided. The xdp_do_redirect() patch restructures the code, so that the map pointer indirection can be avoided. Performance-wise I got 4% improvement for XSKMAP (sample:xdpsock/rx-drop), and 8% (sample:xdp_redirect_map) on my machine. v5->v6: Removed REDIR enum, and instead use map_id and map_type. (Daniel) Applied Daniel's fixups on patch 1. (Daniel) v4->v5: Renamed map operation to map_redirect. (Daniel) v3->v4: Made bpf_redirect_map() a map operation. (Daniel) v2->v3: Fix build when CONFIG_NET is not set. (lkp) v1->v2: Removed warning when CONFIG_BPF_SYSCALL was not set. (lkp) Cleaned up case-clause in xdp_do_generic_redirect_map(). (Toke) Re-added comment. (Toke) rfc->v1: Use map_id, and remove bpf_clear_redirect_map(). (Toke) Get rid of the macro and use __always_inline. (Jesper) ==================== Signed-off-by: Daniel Borkmann <[email protected]>
2 parents 11d39cf + ee75aef commit 32f9152

File tree

9 files changed

+195
-208
lines changed

9 files changed

+195
-208
lines changed

include/linux/bpf.h

Lines changed: 6 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,9 @@ struct bpf_map_ops {
118118
void *owner, u32 size);
119119
struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
120120

121+
/* Misc helpers.*/
122+
int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
123+
121124
/* map_meta_equal must be implemented for maps that can be
122125
* used as an inner map. It is a runtime check to ensure
123126
* an inner map can be inserted to an outer map.
@@ -1450,9 +1453,9 @@ struct btf *bpf_get_btf_vmlinux(void);
14501453
/* Map specifics */
14511454
struct xdp_buff;
14521455
struct sk_buff;
1456+
struct bpf_dtab_netdev;
1457+
struct bpf_cpu_map_entry;
14531458

1454-
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
1455-
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
14561459
void __dev_flush(void);
14571460
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
14581461
struct net_device *dev_rx);
@@ -1462,7 +1465,6 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
14621465
struct bpf_prog *xdp_prog);
14631466
bool dev_map_can_have_prog(struct bpf_map *map);
14641467

1465-
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
14661468
void __cpu_map_flush(void);
14671469
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
14681470
struct net_device *dev_rx);
@@ -1593,17 +1595,6 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
15931595
return -EOPNOTSUPP;
15941596
}
15951597

1596-
static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
1597-
u32 key)
1598-
{
1599-
return NULL;
1600-
}
1601-
1602-
static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map,
1603-
u32 key)
1604-
{
1605-
return NULL;
1606-
}
16071598
static inline bool dev_map_can_have_prog(struct bpf_map *map)
16081599
{
16091600
return false;
@@ -1615,6 +1606,7 @@ static inline void __dev_flush(void)
16151606

16161607
struct xdp_buff;
16171608
struct bpf_dtab_netdev;
1609+
struct bpf_cpu_map_entry;
16181610

16191611
static inline
16201612
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
@@ -1639,12 +1631,6 @@ static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
16391631
return 0;
16401632
}
16411633

1642-
static inline
1643-
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
1644-
{
1645-
return NULL;
1646-
}
1647-
16481634
static inline void __cpu_map_flush(void)
16491635
{
16501636
}

include/linux/filter.h

Lines changed: 30 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -646,7 +646,8 @@ struct bpf_redirect_info {
646646
u32 flags;
647647
u32 tgt_index;
648648
void *tgt_value;
649-
struct bpf_map *map;
649+
u32 map_id;
650+
enum bpf_map_type map_type;
650651
u32 kern_flags;
651652
struct bpf_nh_params nh;
652653
};
@@ -1472,4 +1473,32 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
14721473
}
14731474
#endif /* IS_ENABLED(CONFIG_IPV6) */
14741475

1476+
static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex, u64 flags,
1477+
void *lookup_elem(struct bpf_map *map, u32 key))
1478+
{
1479+
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
1480+
1481+
/* Lower bits of the flags are used as return code on lookup failure */
1482+
if (unlikely(flags > XDP_TX))
1483+
return XDP_ABORTED;
1484+
1485+
ri->tgt_value = lookup_elem(map, ifindex);
1486+
if (unlikely(!ri->tgt_value)) {
1487+
/* If the lookup fails we want to clear out the state in the
1488+
* redirect_info struct completely, so that if an eBPF program
1489+
* performs multiple lookups, the last one always takes
1490+
* precedence.
1491+
*/
1492+
ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
1493+
ri->map_type = BPF_MAP_TYPE_UNSPEC;
1494+
return flags;
1495+
}
1496+
1497+
ri->tgt_index = ifindex;
1498+
ri->map_id = map->id;
1499+
ri->map_type = map->map_type;
1500+
1501+
return XDP_REDIRECT;
1502+
}
1503+
14751504
#endif /* __LINUX_FILTER_H__ */

include/net/xdp_sock.h

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -80,19 +80,6 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
8080
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
8181
void __xsk_map_flush(void);
8282

83-
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
84-
u32 key)
85-
{
86-
struct xsk_map *m = container_of(map, struct xsk_map, map);
87-
struct xdp_sock *xs;
88-
89-
if (key >= map->max_entries)
90-
return NULL;
91-
92-
xs = READ_ONCE(m->xsk_map[key]);
93-
return xs;
94-
}
95-
9683
#else
9784

9885
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
@@ -109,12 +96,6 @@ static inline void __xsk_map_flush(void)
10996
{
11097
}
11198

112-
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
113-
u32 key)
114-
{
115-
return NULL;
116-
}
117-
11899
#endif /* CONFIG_XDP_SOCKETS */
119100

120101
#endif /* _LINUX_XDP_SOCK_H */

include/trace/events/xdp.h

Lines changed: 35 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -86,19 +86,15 @@ struct _bpf_dtab_netdev {
8686
};
8787
#endif /* __DEVMAP_OBJ_TYPE */
8888

89-
#define devmap_ifindex(tgt, map) \
90-
(((map->map_type == BPF_MAP_TYPE_DEVMAP || \
91-
map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
92-
((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
93-
9489
DECLARE_EVENT_CLASS(xdp_redirect_template,
9590

9691
TP_PROTO(const struct net_device *dev,
9792
const struct bpf_prog *xdp,
9893
const void *tgt, int err,
99-
const struct bpf_map *map, u32 index),
94+
enum bpf_map_type map_type,
95+
u32 map_id, u32 index),
10096

101-
TP_ARGS(dev, xdp, tgt, err, map, index),
97+
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),
10298

10399
TP_STRUCT__entry(
104100
__field(int, prog_id)
@@ -111,14 +107,22 @@ DECLARE_EVENT_CLASS(xdp_redirect_template,
111107
),
112108

113109
TP_fast_assign(
110+
u32 ifindex = 0, map_index = index;
111+
112+
if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
113+
ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
114+
} else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
115+
ifindex = index;
116+
map_index = 0;
117+
}
118+
114119
__entry->prog_id = xdp->aux->id;
115120
__entry->act = XDP_REDIRECT;
116121
__entry->ifindex = dev->ifindex;
117122
__entry->err = err;
118-
__entry->to_ifindex = map ? devmap_ifindex(tgt, map) :
119-
index;
120-
__entry->map_id = map ? map->id : 0;
121-
__entry->map_index = map ? index : 0;
123+
__entry->to_ifindex = ifindex;
124+
__entry->map_id = map_id;
125+
__entry->map_index = map_index;
122126
),
123127

124128
TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
@@ -133,45 +137,49 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
133137
TP_PROTO(const struct net_device *dev,
134138
const struct bpf_prog *xdp,
135139
const void *tgt, int err,
136-
const struct bpf_map *map, u32 index),
137-
TP_ARGS(dev, xdp, tgt, err, map, index)
140+
enum bpf_map_type map_type,
141+
u32 map_id, u32 index),
142+
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
138143
);
139144

140145
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
141146
TP_PROTO(const struct net_device *dev,
142147
const struct bpf_prog *xdp,
143148
const void *tgt, int err,
144-
const struct bpf_map *map, u32 index),
145-
TP_ARGS(dev, xdp, tgt, err, map, index)
149+
enum bpf_map_type map_type,
150+
u32 map_id, u32 index),
151+
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
146152
);
147153

148-
#define _trace_xdp_redirect(dev, xdp, to) \
149-
trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to)
154+
#define _trace_xdp_redirect(dev, xdp, to) \
155+
trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
150156

151-
#define _trace_xdp_redirect_err(dev, xdp, to, err) \
152-
trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to)
157+
#define _trace_xdp_redirect_err(dev, xdp, to, err) \
158+
trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
153159

154-
#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
155-
trace_xdp_redirect(dev, xdp, to, 0, map, index)
160+
#define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
161+
trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)
156162

157-
#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
158-
trace_xdp_redirect_err(dev, xdp, to, err, map, index)
163+
#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
164+
trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
159165

160166
/* not used anymore, but kept around so as not to break old programs */
161167
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
162168
TP_PROTO(const struct net_device *dev,
163169
const struct bpf_prog *xdp,
164170
const void *tgt, int err,
165-
const struct bpf_map *map, u32 index),
166-
TP_ARGS(dev, xdp, tgt, err, map, index)
171+
enum bpf_map_type map_type,
172+
u32 map_id, u32 index),
173+
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
167174
);
168175

169176
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
170177
TP_PROTO(const struct net_device *dev,
171178
const struct bpf_prog *xdp,
172179
const void *tgt, int err,
173-
const struct bpf_map *map, u32 index),
174-
TP_ARGS(dev, xdp, tgt, err, map, index)
180+
enum bpf_map_type map_type,
181+
u32 map_id, u32 index),
182+
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
175183
);
176184

177185
TRACE_EVENT(xdp_cpumap_kthread,

kernel/bpf/cpumap.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -543,7 +543,6 @@ static void cpu_map_free(struct bpf_map *map)
543543
* complete.
544544
*/
545545

546-
bpf_clear_redirect_map(map);
547546
synchronize_rcu();
548547

549548
/* For cpu_map the remote CPUs can still be using the entries
@@ -563,7 +562,7 @@ static void cpu_map_free(struct bpf_map *map)
563562
kfree(cmap);
564563
}
565564

566-
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
565+
static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
567566
{
568567
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
569568
struct bpf_cpu_map_entry *rcpu;
@@ -600,6 +599,11 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
600599
return 0;
601600
}
602601

602+
static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
603+
{
604+
return __bpf_xdp_redirect_map(map, ifindex, flags, __cpu_map_lookup_elem);
605+
}
606+
603607
static int cpu_map_btf_id;
604608
const struct bpf_map_ops cpu_map_ops = {
605609
.map_meta_equal = bpf_map_meta_equal,
@@ -612,6 +616,7 @@ const struct bpf_map_ops cpu_map_ops = {
612616
.map_check_btf = map_check_no_btf,
613617
.map_btf_name = "bpf_cpu_map",
614618
.map_btf_id = &cpu_map_btf_id,
619+
.map_redirect = cpu_map_redirect,
615620
};
616621

617622
static void bq_flush_to_queue(struct xdp_bulk_queue *bq)

kernel/bpf/devmap.c

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,6 @@ static void dev_map_free(struct bpf_map *map)
197197
list_del_rcu(&dtab->list);
198198
spin_unlock(&dev_map_lock);
199199

200-
bpf_clear_redirect_map(map);
201200
synchronize_rcu();
202201

203202
/* Make sure prior __dev_map_entry_free() have completed. */
@@ -258,7 +257,7 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
258257
return 0;
259258
}
260259

261-
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
260+
static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
262261
{
263262
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
264263
struct hlist_head *head = dev_map_index_hash(dtab, key);
@@ -392,7 +391,7 @@ void __dev_flush(void)
392391
* update happens in parallel here a dev_put wont happen until after reading the
393392
* ifindex.
394393
*/
395-
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
394+
static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
396395
{
397396
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
398397
struct bpf_dtab_netdev *obj;
@@ -735,6 +734,16 @@ static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
735734
map, key, value, map_flags);
736735
}
737736

737+
static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
738+
{
739+
return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_lookup_elem);
740+
}
741+
742+
static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
743+
{
744+
return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_hash_lookup_elem);
745+
}
746+
738747
static int dev_map_btf_id;
739748
const struct bpf_map_ops dev_map_ops = {
740749
.map_meta_equal = bpf_map_meta_equal,
@@ -747,6 +756,7 @@ const struct bpf_map_ops dev_map_ops = {
747756
.map_check_btf = map_check_no_btf,
748757
.map_btf_name = "bpf_dtab",
749758
.map_btf_id = &dev_map_btf_id,
759+
.map_redirect = dev_map_redirect,
750760
};
751761

752762
static int dev_map_hash_map_btf_id;
@@ -761,6 +771,7 @@ const struct bpf_map_ops dev_map_hash_ops = {
761771
.map_check_btf = map_check_no_btf,
762772
.map_btf_name = "bpf_dtab",
763773
.map_btf_id = &dev_map_hash_map_btf_id,
774+
.map_redirect = dev_hash_map_redirect,
764775
};
765776

766777
static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,

kernel/bpf/verifier.c

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5582,7 +5582,8 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
55825582
func_id != BPF_FUNC_map_push_elem &&
55835583
func_id != BPF_FUNC_map_pop_elem &&
55845584
func_id != BPF_FUNC_map_peek_elem &&
5585-
func_id != BPF_FUNC_for_each_map_elem)
5585+
func_id != BPF_FUNC_for_each_map_elem &&
5586+
func_id != BPF_FUNC_redirect_map)
55865587
return 0;
55875588

55885589
if (map == NULL) {
@@ -12017,7 +12018,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
1201712018
insn->imm == BPF_FUNC_map_delete_elem ||
1201812019
insn->imm == BPF_FUNC_map_push_elem ||
1201912020
insn->imm == BPF_FUNC_map_pop_elem ||
12020-
insn->imm == BPF_FUNC_map_peek_elem)) {
12021+
insn->imm == BPF_FUNC_map_peek_elem ||
12022+
insn->imm == BPF_FUNC_redirect_map)) {
1202112023
aux = &env->insn_aux_data[i + delta];
1202212024
if (bpf_map_ptr_poisoned(aux))
1202312025
goto patch_call_imm;
@@ -12059,6 +12061,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
1205912061
(int (*)(struct bpf_map *map, void *value))NULL));
1206012062
BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
1206112063
(int (*)(struct bpf_map *map, void *value))NULL));
12064+
BUILD_BUG_ON(!__same_type(ops->map_redirect,
12065+
(int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL));
12066+
1206212067
patch_map_ops_generic:
1206312068
switch (insn->imm) {
1206412069
case BPF_FUNC_map_lookup_elem:
@@ -12085,6 +12090,10 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
1208512090
insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
1208612091
__bpf_call_base;
1208712092
continue;
12093+
case BPF_FUNC_redirect_map:
12094+
insn->imm = BPF_CAST_CALL(ops->map_redirect) -
12095+
__bpf_call_base;
12096+
continue;
1208812097
}
1208912098

1209012099
goto patch_call_imm;

0 commit comments

Comments
 (0)