Skip to content

Commit 294967a

Browse files
committed
bpf: table based bpf_insn_successors()
Converting bpf_insn_successors() to use lookup table makes it ~1.5 times faster. Also remove unnecessary conditionals: - `idx + 1 < prog->len` is unnecessary because after check_cfg() all jump targets are guaranteed to be within a program; - `i == 0 || succ[0] != dst` is unnecessary because any client of bpf_insn_successors() can handle duplicate edges: - compute_live_registers() - compute_scc() Moving bpf_insn_successors() to liveness.c allows its inlining in liveness.c:__update_stack_liveness(). Such inlining speeds up __update_stack_liveness() by ~40%. bpf_insn_successors() is used in both verifier.c and liveness.c. perf shows such move does not negatively impact users in verifier.c, as these are executed only once before main varification pass. Unlike __update_stack_liveness() which can be triggered multiple times. Signed-off-by: Eduard Zingerman <[email protected]>
1 parent ba8d72a commit 294967a

File tree

3 files changed

+53
-71
lines changed

3 files changed

+53
-71
lines changed

include/linux/bpf_verifier.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1049,6 +1049,7 @@ void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_st
10491049
u32 frameno);
10501050

10511051
struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
1052+
int bpf_jmp_offset(struct bpf_insn *insn);
10521053
int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2]);
10531054
void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
10541055
bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);

kernel/bpf/liveness.c

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -428,6 +428,57 @@ static void log_mask_change(struct bpf_verifier_env *env, struct callchain *call
428428
bpf_log(&env->log, "\n");
429429
}
430430

431+
int bpf_jmp_offset(struct bpf_insn *insn)
432+
{
433+
u8 code = insn->code;
434+
435+
if (code == (BPF_JMP32 | BPF_JA))
436+
return insn->imm;
437+
return insn->off;
438+
}
439+
440+
inline int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2])
441+
{
442+
static const struct opcode_info {
443+
bool can_jump;
444+
bool can_fallthrough;
445+
} opcode_info_tbl[256] = {
446+
[0 ... 255] = {.can_jump = false, .can_fallthrough = true},
447+
#define _J(code, ...) \
448+
[BPF_JMP | code] = __VA_ARGS__, \
449+
[BPF_JMP32 | code] = __VA_ARGS__
450+
451+
_J(BPF_EXIT, {.can_jump = false, .can_fallthrough = false}),
452+
_J(BPF_JA, {.can_jump = true, .can_fallthrough = false}),
453+
_J(BPF_JEQ, {.can_jump = true, .can_fallthrough = true}),
454+
_J(BPF_JNE, {.can_jump = true, .can_fallthrough = true}),
455+
_J(BPF_JLT, {.can_jump = true, .can_fallthrough = true}),
456+
_J(BPF_JLE, {.can_jump = true, .can_fallthrough = true}),
457+
_J(BPF_JGT, {.can_jump = true, .can_fallthrough = true}),
458+
_J(BPF_JGE, {.can_jump = true, .can_fallthrough = true}),
459+
_J(BPF_JSGT, {.can_jump = true, .can_fallthrough = true}),
460+
_J(BPF_JSGE, {.can_jump = true, .can_fallthrough = true}),
461+
_J(BPF_JSLT, {.can_jump = true, .can_fallthrough = true}),
462+
_J(BPF_JSLE, {.can_jump = true, .can_fallthrough = true}),
463+
_J(BPF_JCOND, {.can_jump = true, .can_fallthrough = true}),
464+
_J(BPF_JSET, {.can_jump = true, .can_fallthrough = true}),
465+
#undef _J
466+
};
467+
struct bpf_insn *insn = &prog->insnsi[idx];
468+
const struct opcode_info *opcode_info;
469+
int i = 0, insn_sz;
470+
471+
opcode_info = &opcode_info_tbl[BPF_CLASS(insn->code) | BPF_OP(insn->code)];
472+
insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
473+
if (opcode_info->can_fallthrough)
474+
succ[i++] = idx + insn_sz;
475+
476+
if (opcode_info->can_jump)
477+
succ[i++] = idx + bpf_jmp_offset(insn) + 1;
478+
479+
return i;
480+
}
481+
431482
static struct func_instance *get_outer_instance(struct bpf_verifier_env *env,
432483
struct func_instance *instance)
433484
{

kernel/bpf/verifier.c

Lines changed: 1 addition & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -3470,15 +3470,6 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
34703470
return 0;
34713471
}
34723472

3473-
static int jmp_offset(struct bpf_insn *insn)
3474-
{
3475-
u8 code = insn->code;
3476-
3477-
if (code == (BPF_JMP32 | BPF_JA))
3478-
return insn->imm;
3479-
return insn->off;
3480-
}
3481-
34823473
static int check_subprogs(struct bpf_verifier_env *env)
34833474
{
34843475
int i, subprog_start, subprog_end, off, cur_subprog = 0;
@@ -3505,7 +3496,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
35053496
goto next;
35063497
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
35073498
goto next;
3508-
off = i + jmp_offset(&insn[i]) + 1;
3499+
off = i + bpf_jmp_offset(&insn[i]) + 1;
35093500
if (off < subprog_start || off >= subprog_end) {
35103501
verbose(env, "jump out of range from insn %d to %d\n", i, off);
35113502
return -EINVAL;
@@ -23913,67 +23904,6 @@ static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr,
2391323904
return 0;
2391423905
}
2391523906

23916-
static bool can_fallthrough(struct bpf_insn *insn)
23917-
{
23918-
u8 class = BPF_CLASS(insn->code);
23919-
u8 opcode = BPF_OP(insn->code);
23920-
23921-
if (class != BPF_JMP && class != BPF_JMP32)
23922-
return true;
23923-
23924-
if (opcode == BPF_EXIT || opcode == BPF_JA)
23925-
return false;
23926-
23927-
return true;
23928-
}
23929-
23930-
static bool can_jump(struct bpf_insn *insn)
23931-
{
23932-
u8 class = BPF_CLASS(insn->code);
23933-
u8 opcode = BPF_OP(insn->code);
23934-
23935-
if (class != BPF_JMP && class != BPF_JMP32)
23936-
return false;
23937-
23938-
switch (opcode) {
23939-
case BPF_JA:
23940-
case BPF_JEQ:
23941-
case BPF_JNE:
23942-
case BPF_JLT:
23943-
case BPF_JLE:
23944-
case BPF_JGT:
23945-
case BPF_JGE:
23946-
case BPF_JSGT:
23947-
case BPF_JSGE:
23948-
case BPF_JSLT:
23949-
case BPF_JSLE:
23950-
case BPF_JCOND:
23951-
case BPF_JSET:
23952-
return true;
23953-
}
23954-
23955-
return false;
23956-
}
23957-
23958-
int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2])
23959-
{
23960-
struct bpf_insn *insn = &prog->insnsi[idx];
23961-
int i = 0, insn_sz;
23962-
u32 dst;
23963-
23964-
insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
23965-
if (can_fallthrough(insn) && idx + 1 < prog->len)
23966-
succ[i++] = idx + insn_sz;
23967-
23968-
if (can_jump(insn)) {
23969-
dst = idx + jmp_offset(insn) + 1;
23970-
if (i == 0 || succ[0] != dst)
23971-
succ[i++] = dst;
23972-
}
23973-
23974-
return i;
23975-
}
23976-
2397723907
/* Each field is a register bitmask */
2397823908
struct insn_live_regs {
2397923909
u16 use; /* registers read by instruction */

0 commit comments

Comments
 (0)