Skip to content

Commit ded1ea2

Browse files
eddyz87gregkh
authored andcommitted
bpf: Fix u32/s32 bounds when ranges cross min/max boundary
[ Upstream commit fbc7aef ] Same as in __reg64_deduce_bounds(), refine s32/u32 ranges in __reg32_deduce_bounds() in the following situations: - s32 range crosses U32_MAX/0 boundary, positive part of the s32 range overlaps with u32 range: 0 U32_MAX | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] | |----------------------------|----------------------------| |xxxxx s32 range xxxxxxxxx] [xxxxxxx| 0 S32_MAX S32_MIN -1 - s32 range crosses U32_MAX/0 boundary, negative part of the s32 range overlaps with u32 range: 0 U32_MAX | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] | |----------------------------|----------------------------| |xxxxxxxxx] [xxxxxxxxxxxx s32 range | 0 S32_MAX S32_MIN -1 - No refinement if ranges overlap in two intervals. This helps for e.g. consider the following program: call %[bpf_get_prandom_u32]; w0 &= 0xffffffff; if w0 < 0x3 goto 1f; // on fall-through u32 range [3..U32_MAX] if w0 s> 0x1 goto 1f; // on fall-through s32 range [S32_MIN..1] if w0 s< 0x0 goto 1f; // range can be narrowed to [S32_MIN..-1] r10 = 0; 1: ...; The reg_bounds.c selftest is updated to incorporate identical logic, refinement based on non-overflowing range halves: ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪ ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1])) Reported-by: Andrea Righi <arighi@nvidia.com> Reported-by: Emil Tsalapatis <emil@etsalapatis.com> Closes: https://lore.kernel.org/bpf/aakqucg4vcujVwif@gpd4/T/ Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com> Acked-by: Shung-Hsi Yu <shung-hsi.yu@suse.com> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20260306-bpf-32-bit-range-overflow-v3-1-f7f67e060a6b@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Paul Chaignon <paul.chaignon@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 58d4c4a commit ded1ea2

2 files changed

Lines changed: 82 additions & 4 deletions

File tree

kernel/bpf/verifier.c

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2046,6 +2046,30 @@ static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
20462046
if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
20472047
reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value);
20482048
reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value);
2049+
} else {
2050+
if (reg->u32_max_value < (u32)reg->s32_min_value) {
2051+
/* See __reg64_deduce_bounds() for detailed explanation.
2052+
* Refine ranges in the following situation:
2053+
*
2054+
* 0 U32_MAX
2055+
* | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] |
2056+
* |----------------------------|----------------------------|
2057+
* |xxxxx s32 range xxxxxxxxx] [xxxxxxx|
2058+
* 0 S32_MAX S32_MIN -1
2059+
*/
2060+
reg->s32_min_value = (s32)reg->u32_min_value;
2061+
reg->u32_max_value = min_t(u32, reg->u32_max_value, reg->s32_max_value);
2062+
} else if ((u32)reg->s32_max_value < reg->u32_min_value) {
2063+
/*
2064+
* 0 U32_MAX
2065+
* | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] |
2066+
* |----------------------------|----------------------------|
2067+
* |xxxxxxxxx] [xxxxxxxxxxxx s32 range |
2068+
* 0 S32_MAX S32_MIN -1
2069+
*/
2070+
reg->s32_max_value = (s32)reg->u32_max_value;
2071+
reg->u32_min_value = max_t(u32, reg->u32_min_value, reg->s32_min_value);
2072+
}
20492073
}
20502074
}
20512075

tools/testing/selftests/bpf/prog_tests/reg_bounds.c

Lines changed: 58 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -422,15 +422,69 @@ static bool is_valid_range(enum num_t t, struct range x)
422422
}
423423
}
424424

425-
static struct range range_improve(enum num_t t, struct range old, struct range new)
425+
static struct range range_intersection(enum num_t t, struct range old, struct range new)
426426
{
427427
return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
428428
}
429429

430+
/*
431+
* Result is precise when 'x' and 'y' overlap or form a continuous range,
432+
* result is an over-approximation if 'x' and 'y' do not overlap.
433+
*/
434+
static struct range range_union(enum num_t t, struct range x, struct range y)
435+
{
436+
if (!is_valid_range(t, x))
437+
return y;
438+
if (!is_valid_range(t, y))
439+
return x;
440+
return range(t, min_t(t, x.a, y.a), max_t(t, x.b, y.b));
441+
}
442+
443+
/*
444+
* This function attempts to improve x range intersecting it with y.
445+
* range_cast(... to_t ...) looses precision for ranges that pass to_t
446+
* min/max boundaries. To avoid such precision loses this function
447+
* splits both x and y into halves corresponding to non-overflowing
448+
* sub-ranges: [0, smin] and [smax, -1].
449+
* Final result is computed as follows:
450+
*
451+
* ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪
452+
* ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1]))
453+
*
454+
* Precision might still be lost if final union is not a continuous range.
455+
*/
456+
static struct range range_refine_in_halves(enum num_t x_t, struct range x,
457+
enum num_t y_t, struct range y)
458+
{
459+
struct range x_pos, x_neg, y_pos, y_neg, r_pos, r_neg;
460+
u64 smax, smin, neg_one;
461+
462+
if (t_is_32(x_t)) {
463+
smax = (u64)(u32)S32_MAX;
464+
smin = (u64)(u32)S32_MIN;
465+
neg_one = (u64)(u32)(s32)(-1);
466+
} else {
467+
smax = (u64)S64_MAX;
468+
smin = (u64)S64_MIN;
469+
neg_one = U64_MAX;
470+
}
471+
x_pos = range_intersection(x_t, x, range(x_t, 0, smax));
472+
x_neg = range_intersection(x_t, x, range(x_t, smin, neg_one));
473+
y_pos = range_intersection(y_t, y, range(x_t, 0, smax));
474+
y_neg = range_intersection(y_t, y, range(y_t, smin, neg_one));
475+
r_pos = range_intersection(x_t, x_pos, range_cast(y_t, x_t, y_pos));
476+
r_neg = range_intersection(x_t, x_neg, range_cast(y_t, x_t, y_neg));
477+
return range_union(x_t, r_pos, r_neg);
478+
479+
}
480+
430481
static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
431482
{
432483
struct range y_cast;
433484

485+
if (t_is_32(x_t) == t_is_32(y_t))
486+
x = range_refine_in_halves(x_t, x, y_t, y);
487+
434488
y_cast = range_cast(y_t, x_t, y);
435489

436490
/* If we know that
@@ -444,7 +498,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
444498
*/
445499
if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX && y_cast.b <= S32_MAX &&
446500
(s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
447-
return range_improve(x_t, x, y_cast);
501+
return range_intersection(x_t, x, y_cast);
448502

449503
/* the case when new range knowledge, *y*, is a 32-bit subregister
450504
* range, while previous range knowledge, *x*, is a full register
@@ -462,11 +516,11 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
462516
x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
463517
if (!is_valid_range(x_t, x_swap))
464518
return x;
465-
return range_improve(x_t, x, x_swap);
519+
return range_intersection(x_t, x, x_swap);
466520
}
467521

468522
/* otherwise, plain range cast and intersection works */
469-
return range_improve(x_t, x, y_cast);
523+
return range_intersection(x_t, x, y_cast);
470524
}
471525

472526
/* =======================

0 commit comments

Comments
 (0)