From 61c4589967e92abff07369f1a75da1feb97c96fc Mon Sep 17 00:00:00 2001 From: Yazhou Tang Date: Sun, 21 Dec 2025 17:39:53 +0800 Subject: [PATCH 1/2] bpf: Add interval and tnum analysis for signed and unsigned BPF_DIV This patch introduces interval analysis (range tracking) and tnum analysis (bitwise tracking) for both signed and unsigned division operations in the BPF verifier. The BPF verifier currently lacks support for value tracking on BPF_DIV instructions, which can lead to false positives during verification of BPF programs that utilize division instructions. According to the BPF instruction set[1], the instruction's offset field (`insn->off`) is used to distinguish between signed (`off == 1`) and unsigned division (`off == 0`). Moreover, we also follow the BPF division semantics to handle special cases, such as division by zero and signed division overflow. - UDIV: dst = (src != 0) ? (dst / src) : 0 - SDIV: dst = (src == 0) ? 0 : ((src == -1 && dst == LLONG_MIN) ? LLONG_MIN : (dst / src)) Here is the overview of the changes made in this patch: 1. For interval analysis: - Added `scalar_min_max_udiv` and `scalar32_min_max_udiv` to update umin/umax bounds, which is straightforward. - Added `scalar_min_max_sdiv` and `scalar32_min_max_sdiv` to update smin/smax bounds. It handles non-monotonic intervals by decomposing the divisor range into negative, zero, and positive sub-ranges, and computing the result range for each sub-range separately. Finally, it combines the results to get the final smin/smax bounds. 2. For tnum analysis, we referred to LLVM's KnownBits implementation[2] and the recent research on abstract interpretation of division[3]: - Added `tnum_udiv` to compute the tnum for unsigned division. It calculates the maximum possible result based on the maximum values of the dividend tnum and the minimum values of the divisor tnum, then constructs the resulting tnum accordingly. We have prove its soundness using Rocq Prover[4]. - Added `tnum_sdiv` to compute the tnum for signed division. It splits the operands into positive and negative components, then performs calculation on absolute values using `tnum_udiv`, finally unions the results to ensure soundness. 3. Also updated existing selftests based on the expected BPF_DIV behavior. [1] https://www.kernel.org/doc/Documentation/bpf/standardization/instruction-set.rst [2] https://llvm.org/doxygen/KnownBits_8cpp_source.html [3] https://dl.acm.org/doi/10.1145/3728905 [4] https://github.com/shenghaoyuan/open-verified-artifacts/tree/main/tnum Co-developed-by: Shenghao Yuan Signed-off-by: Shenghao Yuan Co-developed-by: Tianci Cao Signed-off-by: Tianci Cao Signed-off-by: Yazhou Tang --- include/linux/tnum.h | 4 + kernel/bpf/tnum.c | 159 ++++++++++++- kernel/bpf/verifier.c | 225 ++++++++++++++++++ .../bpf/progs/verifier_value_illegal_alu.c | 7 +- 4 files changed, 391 insertions(+), 4 deletions(-) diff --git a/include/linux/tnum.h b/include/linux/tnum.h index c52b862dad45b..fd00deb2cb889 100644 --- a/include/linux/tnum.h +++ b/include/linux/tnum.h @@ -50,6 +50,10 @@ struct tnum tnum_or(struct tnum a, struct tnum b); struct tnum tnum_xor(struct tnum a, struct tnum b); /* Multiply two tnums, return @a * @b */ struct tnum tnum_mul(struct tnum a, struct tnum b); +/* Unsigned division, return @a / @b */ +struct tnum tnum_udiv(struct tnum a, struct tnum b); +/* Signed division, return @a / @b */ +struct tnum tnum_sdiv(struct tnum a, struct tnum b, bool alu32); /* Return true if the known bits of both tnums have the same value */ bool tnum_overlap(struct tnum a, struct tnum b); diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c index f8e70e9c3998d..d115528da6a68 100644 --- a/kernel/bpf/tnum.c +++ b/kernel/bpf/tnum.c @@ -12,6 +12,13 @@ #define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m} /* A completely unknown value */ const struct tnum tnum_unknown = { .value = 0, .mask = -1 }; +/* Tnum bottom */ +const struct tnum tnum_bottom = { .value = -1, .mask = -1 }; + +static bool __tnum_eqb(struct tnum a, struct tnum b) +{ + return a.value == b.value && a.mask == b.mask; +} struct tnum tnum_const(u64 value) { @@ -83,9 +90,23 @@ struct tnum tnum_sub(struct tnum a, struct tnum b) return TNUM(dv & ~mu, mu); } +/* __tnum_neg_width: tnum negation with given bit width. + * @a: the tnum to be negated. + * @width: the bit width to perform negation, 32 or 64. + */ +static struct tnum __tnum_neg_width(struct tnum a, int width) +{ + if (width == 32) + return tnum_sub(TNUM(U32_MAX, 0), a); + else if (width == 64) + return tnum_sub(TNUM(0, 0), a); + else + return tnum_unknown; +} + struct tnum tnum_neg(struct tnum a) { - return tnum_sub(TNUM(0, 0), a); + return __tnum_neg_width(a, 64); } struct tnum tnum_and(struct tnum a, struct tnum b) @@ -167,6 +188,138 @@ bool tnum_overlap(struct tnum a, struct tnum b) return (a.value & mu) == (b.value & mu); } +/* __get_mask: get a mask that covers all bits up to the highest set bit in x. + * For example: + * x = 0b0000...0000 -> return 0b0000...0000 + * x = 0b0000...0001 -> return 0b0000...0001 + * x = 0b0000...1001 -> return 0b0000...1111 + * x = 0b1111...1111 -> return 0b1111...1111 + */ +static u64 __get_mask(u64 x) +{ + int width = 0; + + if (x > 0) + width = 64 - __builtin_clzll(x); + if (width == 0) + return 0; + else if (width == 64) + return U64_MAX; + else + return (1ULL << width) - 1; +} + +struct tnum tnum_udiv(struct tnum a, struct tnum b) +{ + if (tnum_is_const(b)) { + /* BPF div specification: x / 0 = 0 */ + if (b.value == 0) + return TNUM(0, 0); + if (tnum_is_const(a)) + return TNUM(a.value / b.value, 0); + } + + if (b.value == 0) + return tnum_unknown; + + u64 a_max = a.value + a.mask; + u64 b_min = b.value; + u64 max_res = a_max / b_min; + return TNUM(0, __get_mask(max_res)); +} + +static u64 __msb(u64 x, int width) +{ + return x & (1ULL << (width - 1)); +} + +static struct tnum __tnum_get_positive(struct tnum x, int width) +{ + if (__msb(x.value, width)) + return tnum_bottom; + if (__msb(x.mask, width)) + return TNUM(x.value, x.mask & ~(1ULL << (width - 1))); + return x; +} + +static struct tnum __tnum_get_negative(struct tnum x, int width) +{ + if (__msb(x.value, width)) + return x; + if (__msb(x.mask, width)) + return TNUM(x.value | (1ULL << (width - 1)), x.mask & ~(1ULL << (width - 1))); + return tnum_bottom; +} + +static struct tnum __tnum_abs(struct tnum x, int width) +{ + if (__msb(x.value, width)) + return __tnum_neg_width(x, width); + else + return x; +} + +/* __tnum_sdiv, a helper for tnum_sdiv. + * @a: tnum a, a's sign is fixed, __msb(a.mask) == 0 + * @b: tnum b, b's sign is fixed, __msb(b.mask) == 0 + * + * This function reuses tnum_udiv by operating on the absolute values of a and b, + * and then adjusting the sign of the result based on C's division rules. + * Here we don't need to specially handle the case of [S64_MIN / -1], because + * after __tnum_abs, S64_MIN becomes (S64_MAX + 1), and the behavior of + * unsigned [(S64_MAX + 1) / 1] is normal. + */ +static struct tnum __tnum_sdiv(struct tnum a, struct tnum b, int width) +{ + if (__tnum_eqb(a, tnum_bottom) || __tnum_eqb(b, tnum_bottom)) + return tnum_bottom; + + struct tnum a_abs = __tnum_abs(a, width); + struct tnum b_abs = __tnum_abs(b, width); + struct tnum res_abs = tnum_udiv(a_abs, b_abs); + + if (__msb(a.value, width) == __msb(b.value, width)) + return res_abs; + else + return __tnum_neg_width(res_abs, width); +} + +struct tnum tnum_sdiv(struct tnum a, struct tnum b, bool alu32) +{ + if (tnum_is_const(b)) { + /* BPF div specification: x / 0 = 0 */ + if (b.value == 0) + return TNUM(0, 0); + if (tnum_is_const(a)) { + /* BPF div specification: S32_MIN / -1 = S32_MIN */ + if (alu32 && (u32)a.value == (u32)S32_MIN && (u32)b.value == (u32)-1) + return TNUM((u32)S32_MIN, 0); + /* BPF div specification: S64_MIN / -1 = S64_MIN */ + if (!alu32 && a.value == S64_MIN && b.value == (u64)-1) + return TNUM((u64)S64_MIN, 0); + s64 sval = (s64)a.value / (s64)b.value; + return TNUM((u64)sval, 0); + } + } + + if (b.value == 0) + return tnum_unknown; + + int width = alu32 ? 32 : 64; + struct tnum a_pos = __tnum_get_positive(a, width); + struct tnum a_neg = __tnum_get_negative(a, width); + struct tnum b_pos = __tnum_get_positive(b, width); + struct tnum b_neg = __tnum_get_negative(b, width); + + struct tnum res_pos = __tnum_sdiv(a_pos, b_pos, width); + struct tnum res_neg = __tnum_sdiv(a_neg, b_neg, width); + struct tnum res_mix1 = __tnum_sdiv(a_pos, b_neg, width); + struct tnum res_mix2 = __tnum_sdiv(a_neg, b_pos, width); + + return tnum_union(tnum_union(res_pos, res_neg), + tnum_union(res_mix1, res_mix2)); +} + /* Note that if a and b disagree - i.e. one has a 'known 1' where the other has * a 'known 0' - this will return a 'known 1' for that bit. */ @@ -186,6 +339,10 @@ struct tnum tnum_intersect(struct tnum a, struct tnum b) */ struct tnum tnum_union(struct tnum a, struct tnum b) { + if (__tnum_eqb(a, tnum_bottom)) + return b; + if (__tnum_eqb(b, tnum_bottom)) + return a; u64 v = a.value & b.value; u64 mu = (a.value ^ b.value) | a.mask | b.mask; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d6b8a77fbe3bf..df04a35153ef0 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -15076,6 +15076,218 @@ static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, } } +static void __scalar32_min_max_join(s32 *res_min, s32 *res_max, s32 x_min, s32 x_max) +{ + *res_min = min(*res_min, x_min); + *res_max = max(*res_max, x_max); +} + +static void __scalar_min_max_join(s64 *res_min, s64 *res_max, s64 x_min, s64 x_max) +{ + *res_min = min(*res_min, x_min); + *res_max = max(*res_max, x_max); +} + +static void scalar32_min_max_udiv(struct bpf_reg_state *dst_reg, + struct bpf_reg_state *src_reg) +{ + u32 *dst_umin = &dst_reg->u32_min_value; + u32 *dst_umax = &dst_reg->u32_max_value; + u32 umin_val = src_reg->u32_min_value; + u32 umax_val = src_reg->u32_max_value; + + if (umin_val == 0) { + /* BPF div specification: x / 0 = 0 + * 1. If umin_val == umax_val == 0, i.e. divisor is certainly 0, + * then the result must be 0, [a,b] / [0,0] = [0,0]. + * 2. If umin_val == 0 && umax_val != 0, then dst_umin = x / 0 = 0, + * dst_umax = dst_umax / 1, remains unchanged, [a,b] / [0,x] = [0,b]. + */ + *dst_umin = 0; + if (umax_val == 0) + *dst_umax = 0; + } else { + *dst_umin = *dst_umin / umax_val; + *dst_umax = *dst_umax / umin_val; + } + + /* Reset signed interval to TOP. */ + dst_reg->s32_min_value = S32_MIN; + dst_reg->s32_max_value = S32_MAX; +} + +static void scalar_min_max_udiv(struct bpf_reg_state *dst_reg, + struct bpf_reg_state *src_reg) +{ + u64 *dst_umin = &dst_reg->umin_value; + u64 *dst_umax = &dst_reg->umax_value; + u64 umin_val = src_reg->umin_value; + u64 umax_val = src_reg->umax_value; + + if (umin_val == 0) { + /* BPF div specification: x / 0 = 0 + * 1. If umin_val == umax_val == 0, i.e. divisor is certainly 0, + * then the result must be 0, [a,b] / [0,0] = [0,0]. + * 2. If umin_val == 0 && umax_val != 0, then dst_umin = x / 0 = 0, + * dst_umax = dst_umax / 1, remains unchanged, [a,b] / [0,x] = [0,b]. + */ + *dst_umin = 0; + if (umax_val == 0) + *dst_umax = 0; + } else { + *dst_umin = *dst_umin / umax_val; + *dst_umax = *dst_umax / umin_val; + } + + /* Reset signed interval to TOP. */ + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; +} + +static s32 __bpf_sdiv32(s32 a, s32 b) +{ + /* BPF div specification: x / 0 = 0 */ + if (unlikely(b == 0)) + return 0; + /* BPF mod specification: S32_MIN / -1 = S32_MIN */ + if (unlikely(a == S32_MIN && b == -1)) + return S32_MIN; + return a / b; +} + +/* The divisor interval does not cross 0, + * i.e. src_min and src_max have same sign. + */ +static void __sdiv32_range(s32 dst_min, s32 dst_max, s32 src_min, s32 src_max, + s32 *res_min, s32 *res_max) +{ + s32 tmp_res[4] = { + __bpf_sdiv32(dst_min, src_min), + __bpf_sdiv32(dst_min, src_max), + __bpf_sdiv32(dst_max, src_min), + __bpf_sdiv32(dst_max, src_max) + }; + + *res_min = min_array(tmp_res, 4); + *res_max = max_array(tmp_res, 4); +} + +static void scalar32_min_max_sdiv(struct bpf_reg_state *dst_reg, + struct bpf_reg_state *src_reg) +{ + u32 *dst_smin = &dst_reg->s32_min_value; + u32 *dst_smax = &dst_reg->s32_max_value; + u32 smin_val = src_reg->s32_min_value; + u32 smax_val = src_reg->s32_max_value; + s32 res_min, res_max, tmp_min, tmp_max; + + if (smin_val <= 0 && smax_val >= 0) { + /* BPF div specification: x / 0 = 0 + * Set initial result to 0, as 0 is in divisor interval. + */ + res_min = 0; + res_max = 0; + /* negative divisor interval: [a_min,a_max] / [b_min,-1] */ + if (smin_val < 0) { + __sdiv32_range(*dst_smin, *dst_smax, smin_val, -1, + &tmp_min, &tmp_max); + __scalar32_min_max_join(&res_min, &res_max, tmp_min, tmp_max); + } + /* positive divisor interval: [a_min,a_max] / [1,b_max] */ + if (smax_val > 0) { + __sdiv32_range(*dst_smin, *dst_smax, 1, smax_val, + &tmp_min, &tmp_max); + __scalar32_min_max_join(&res_min, &res_max, tmp_min, tmp_max); + } + } else { + __sdiv32_range(*dst_smin, *dst_smax, smin_val, smax_val, + &res_min, &res_max); + } + + /* BPF mod specification: S32_MIN / -1 = S32_MIN */ + if (*dst_smin == S32_MIN && smin_val <= -1 && smax_val >= -1) + res_min = S32_MIN; + + *dst_smin = res_min; + *dst_smax = res_max; + + /* Reset unsigned interval to TOP. */ + dst_reg->u32_min_value = 0; + dst_reg->u32_max_value = U32_MAX; +} + +static s64 __bpf_sdiv(s64 a, s64 b) +{ + /* BPF div specification: x / 0 = 0 */ + if (unlikely(b == 0)) + return 0; + /* BPF div specification: S64_MIN / -1 = S64_MIN */ + if (unlikely(a == S64_MIN && b == -1)) + return S64_MIN; + return a / b; +} + +/* The divisor interval does not cross 0, + * i.e. src_min and src_max have same sign. + */ +static void __sdiv_range(s64 dst_min, s64 dst_max, s64 src_min, s64 src_max, + s64 *res_min, s64 *res_max) +{ + s64 tmp_res[4] = { + __bpf_sdiv(dst_min, src_min), + __bpf_sdiv(dst_min, src_max), + __bpf_sdiv(dst_max, src_min), + __bpf_sdiv(dst_max, src_max) + }; + + *res_min = min_array(tmp_res, 4); + *res_max = max_array(tmp_res, 4); +} + +static void scalar_min_max_sdiv(struct bpf_reg_state *dst_reg, + struct bpf_reg_state *src_reg) +{ + s64 *dst_smin = &dst_reg->smin_value; + s64 *dst_smax = &dst_reg->smax_value; + s64 smin_val = src_reg->smin_value; + s64 smax_val = src_reg->smax_value; + s64 res_min, res_max, tmp_min, tmp_max; + + if (smin_val <= 0 && smax_val >= 0) { + /* BPF div specification: x / 0 = 0 + * Set initial result to 0, as 0 is in divisor interval. + */ + res_min = 0; + res_max = 0; + /* negative divisor interval: [a_min,a_max] / [b_min,-1] */ + if (smin_val < 0) { + __sdiv_range(*dst_smin, *dst_smax, smin_val, -1, + &tmp_min, &tmp_max); + __scalar_min_max_join(&res_min, &res_max, tmp_min, tmp_max); + } + /* positive divisor interval: [a_min,a_max] / [1,b_max] */ + if (smax_val > 0) { + __sdiv_range(*dst_smin, *dst_smax, 1, smax_val, + &tmp_min, &tmp_max); + __scalar_min_max_join(&res_min, &res_max, tmp_min, tmp_max); + } + } else { + __sdiv_range(*dst_smin, *dst_smax, smin_val, smax_val, + &res_min, &res_max); + } + + /* BPF mod specification: S64_MIN / -1 = S64_MIN */ + if (*dst_smin == S64_MIN && smin_val <= -1 && smax_val >= -1) + res_min = S64_MIN; + + *dst_smin = res_min; + *dst_smax = res_max; + + /* Reset unsigned interval to TOP. */ + dst_reg->umin_value = 0; + dst_reg->umax_value = U64_MAX; +} + static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { @@ -15479,6 +15691,7 @@ static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn, case BPF_XOR: case BPF_OR: case BPF_MUL: + case BPF_DIV: return true; /* Shift operators range is only computable if shift dimension operand @@ -15504,6 +15717,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_reg_state src_reg) { u8 opcode = BPF_OP(insn->code); + s16 off = insn->off; bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); int ret; @@ -15555,6 +15769,17 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, scalar32_min_max_mul(dst_reg, &src_reg); scalar_min_max_mul(dst_reg, &src_reg); break; + case BPF_DIV: + if (off == 1) { + dst_reg->var_off = tnum_sdiv(dst_reg->var_off, src_reg.var_off, alu32); + scalar32_min_max_sdiv(dst_reg, &src_reg); + scalar_min_max_sdiv(dst_reg, &src_reg); + } else { + dst_reg->var_off = tnum_udiv(dst_reg->var_off, src_reg.var_off); + scalar32_min_max_udiv(dst_reg, &src_reg); + scalar_min_max_udiv(dst_reg, &src_reg); + } + break; case BPF_AND: dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); scalar32_min_max_and(dst_reg, &src_reg); diff --git a/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c index 2129e4353fd97..4d8273c258d51 100644 --- a/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c +++ b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c @@ -173,14 +173,15 @@ __naked void flow_keys_illegal_variable_offset_alu(void) asm volatile(" \ r6 = r1; \ r7 = *(u64*)(r6 + %[flow_keys_off]); \ - r8 = 8; \ - r8 /= 1; \ + call %[bpf_get_prandom_u32]; \ + r8 = r0; \ r8 &= 8; \ r7 += r8; \ r0 = *(u64*)(r7 + 0); \ exit; \ " : - : __imm_const(flow_keys_off, offsetof(struct __sk_buff, flow_keys)) + : __imm_const(flow_keys_off, offsetof(struct __sk_buff, flow_keys)), + __imm(bpf_get_prandom_u32) : __clobber_all); } From 5f0753137dd149060f795e8ce2f36062ff4aa255 Mon Sep 17 00:00:00 2001 From: Yazhou Tang Date: Sun, 21 Dec 2025 17:39:54 +0800 Subject: [PATCH 2/2] selftests/bpf: Add tests for BPF_DIV analysis Now BPF_DIV has value tracking support via interval and tnum analysis. This patch adds selftests to cover various cases of signed and unsigned division operations, including edge cases like division by zero and signed division overflow. Specifically, these selftests are based on dead code elimination: If the BPF verifier can precisely analyze the result of a division operation, it can prune the path that leads to an error (here we use invalid memory access as the error case), allowing the program to pass verification. Co-developed-by: Shenghao Yuan Signed-off-by: Shenghao Yuan Co-developed-by: Tianci Cao Signed-off-by: Tianci Cao Signed-off-by: Yazhou Tang --- .../selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_div_bounds.c | 404 ++++++++++++++++++ 2 files changed, 406 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/verifier_div_bounds.c diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 5829ffd70f8f2..d892ad7b688e4 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -33,6 +33,7 @@ #include "verifier_direct_packet_access.skel.h" #include "verifier_direct_stack_access_wraparound.skel.h" #include "verifier_div0.skel.h" +#include "verifier_div_bounds.skel.h" #include "verifier_div_overflow.skel.h" #include "verifier_global_subprogs.skel.h" #include "verifier_global_ptr_args.skel.h" @@ -174,6 +175,7 @@ void test_verifier_d_path(void) { RUN(verifier_d_path); } void test_verifier_direct_packet_access(void) { RUN(verifier_direct_packet_access); } void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } void test_verifier_div0(void) { RUN(verifier_div0); } +void test_verifier_div_bounds(void) { RUN(verifier_div_bounds); } void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } void test_verifier_global_subprogs(void) { RUN(verifier_global_subprogs); } void test_verifier_global_ptr_args(void) { RUN(verifier_global_ptr_args); } diff --git a/tools/testing/selftests/bpf/progs/verifier_div_bounds.c b/tools/testing/selftests/bpf/progs/verifier_div_bounds.c new file mode 100644 index 0000000000000..b6f790fc8b638 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_div_bounds.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include "bpf_misc.h" + +/* This file contains unit tests for signed/unsigned division + * operations, focusing on verifying whether BPF verifier's + * tnum and interval analysis modules soundly and precisely + * compute the results. + */ + +SEC("socket") +__description("UDIV32, non-zero divisor") +__success __retval(0) __log_level(2) +__msg("w1 /= w2 {{.*}}; R1=scalar(smin=smin32=0,smax=umax=smax32=umax32=4,var_off=(0x0; 0x7))") +__naked void udiv32_non_zero(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + w1 = w0; \ + w2 = w0; \ + w1 &= 8; \ + w1 |= 1; \ + w2 &= 1; \ + w2 |= 2; \ + w1 /= w2; \ + if w1 <= 4 goto l0_%=; \ + /* Precise analysis will prune the path with error code */\ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("UDIV32, zero divisor") +__success __retval(0) __log_level(2) +__msg("w1 /= w2 {{.*}}; R1=0 R2=0") +__naked void udiv32_zero_divisor(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + w1 = w0; \ + w1 &= 8; \ + w1 |= 1; \ + w2 = 0; \ + w1 /= w2; \ + if w1 == 0 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("UDIV64, non-zero divisor") +__success __retval(0) __log_level(2) +__msg("r1 /= r2 {{.*}}; R1=scalar(smin=smin32=0,smax=umax=smax32=umax32=4,var_off=(0x0; 0x7))") +__naked void udiv64_non_zero(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r2 = r0; \ + r1 &= 8; \ + r1 |= 1; \ + r2 &= 1; \ + r2 |= 2; \ + r1 /= r2; \ + if r1 <= 4 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("UDIV64, zero divisor") +__success __retval(0) __log_level(2) +__msg("r1 /= r2 {{.*}}; R1=0 R2=0") +__naked void udiv64_zero_divisor(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r1 &= 8; \ + r1 |= 1; \ + r2 = 0; \ + r1 /= r2; \ + if r1 == 0 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero divisor") +__success __retval(0) __log_level(2) +__msg("w1 s/= w2 {{.*}}; R1=scalar(smin=smin32=0,smax=umax=smax32=umax32=4,var_off=(0x0; 0x7))") +__naked void sdiv32_non_zero(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + w1 = w0; \ + w2 = w0; \ + w1 &= 8; \ + w1 |= 1; \ + w2 &= 1; \ + w2 |= 2; \ + w1 s/= w2; \ + if w1 s<= 4 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero divisor, negative dividend") +__success __retval(0) __log_level(2) +__msg("w1 s/= w2 {{.*}}; R1=scalar(smin=umin=umin32=0xfffffffc,smax=umax=0xffffffff,smin32=-4,smax32=-1,var_off=(0xfffffffc; 0x3))") +__naked void sdiv32_negative_dividend(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + w1 = w0; \ + w2 = w0; \ + w1 &= 8; \ + w1 |= 1; \ + w1 = -w1; \ + w2 &= 1; \ + w2 |= 2; \ + w1 s/= w2; \ + if w1 s>= -4 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero divisor, negative divisor") +__success __retval(0) __log_level(2) +__msg("w1 s/= w2 {{.*}}; R1=scalar(smin=0,smax=umax=0xffffffff,smin32=-4,smax32=0,var_off=(0x0; 0xffffffff))") +__naked void sdiv32_negative_divisor(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + w1 = w0; \ + w2 = w0; \ + w1 &= 8; \ + w1 |= 1; \ + w2 &= 1; \ + w2 |= 2; \ + w2 = -w2; \ + w1 s/= w2; \ + if w1 s>= -4 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero divisor, both negative") +__success __retval(0) __log_level(2) +__msg("w1 s/= w2 {{.*}}; R1=scalar(smin=smin32=0,smax=umax=smax32=umax32=4,var_off=(0x0; 0x7))") +__naked void sdiv32_both_negative(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + w1 = w0; \ + w2 = w0; \ + w1 &= 8; \ + w1 |= 1; \ + w2 &= 1; \ + w2 |= 2; \ + w1 = -w1; \ + w2 = -w2; \ + w1 s/= w2; \ + if w1 s<= 4 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, zero divisor") +__success __retval(0) __log_level(2) +__msg("w1 s/= w2 {{.*}}; R1=0 R2=0") +__naked void sdiv32_zero_divisor(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + w1 = w0; \ + w1 &= 8; \ + w1 |= 1; \ + w2 = 0; \ + w1 s/= w2; \ + if w1 == 0 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, S32_MIN/-1") +__success __retval(0) __log_level(2) +__msg("w2 s/= -1 {{.*}}; R2=0x80000000") +__naked void sdiv32_overflow(void) +{ + asm volatile (" \ + w1 = %[int_min]; \ + w2 = w1; \ + w2 s/= -1; \ + if w1 == w2 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + + +SEC("socket") +__description("SDIV64, non-zero divisor") +__success __retval(0) __log_level(2) +__msg("r1 s/= r2 {{.*}}; R1=scalar(smin=smin32=0,smax=umax=smax32=umax32=4,var_off=(0x0; 0x7))") +__naked void sdiv64_non_zero(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r2 = r0; \ + r1 &= 8; \ + r1 |= 1; \ + r2 &= 1; \ + r2 |= 2; \ + r1 s/= r2; \ + if r1 s<= 4 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero divisor, negative dividend") +__success __retval(0) __log_level(2) +__msg("r1 s/= r2 {{.*}}; R1=scalar(smin=smin32=-4,smax=smax32=0)") +__naked void sdiv64_negative_dividend(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r2 = r0; \ + r1 &= 8; \ + r1 |= 1; \ + r1 = -r1; \ + r2 &= 1; \ + r2 |= 2; \ + r1 s/= r2; \ + if r1 s>= -4 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero divisor, negative divisor") +__success __retval(0) __log_level(2) +__msg("r1 s/= r2 {{.*}}; R1=scalar(smin=smin32=-4,smax=smax32=0)") +__naked void sdiv64_negative_divisor(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r2 = r0; \ + r1 &= 8; \ + r1 |= 1; \ + r2 &= 1; \ + r2 |= 2; \ + r2 = -r2; \ + r1 s/= r2; \ + if r1 s>= -4 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero divisor, both negative") +__success __retval(0) __log_level(2) +__msg("r1 s/= r2 {{.*}}; R1=scalar(smin=smin32=0,smax=umax=smax32=umax32=4,var_off=(0x0; 0x7))") +__naked void sdiv64_both_negative(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r2 = r0; \ + r1 &= 8; \ + r1 |= 1; \ + r2 &= 1; \ + r2 |= 2; \ + r1 = -r1; \ + r2 = -r2; \ + r1 s/= r2; \ + if r1 s<= 4 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("SDIV64, zero divisor") +__success __retval(0) __log_level(2) +__msg("r1 s/= r2 {{.*}}; R1=0 R2=0") +__naked void sdiv64_zero_divisor(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r1 &= 8; \ + r1 |= 1; \ + r2 = 0; \ + r1 s/= r2; \ + if r1 == 0 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("SDIV64, S64_MIN/-1") +__success __retval(0) __log_level(2) +__msg("r2 s/= -1 {{.*}}; R2=0x8000000000000000") +__naked void sdiv64_overflow(void) +{ + asm volatile (" \ + r1 = %[llong_min] ll; \ + r2 = r1; \ + r2 s/= -1; \ + if r1 == r2 goto l0_%=; \ + r0 = *(u64 *)(r1 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} \ No newline at end of file