Skip to content

Commit f71b2f6

Browse files
kkdwvdAlexei Starovoitov
authored andcommitted
bpf: Refactor map->off_arr handling
Refactor map->off_arr handling into generic functions that can work on their own without hardcoding map specific code. The btf_fields_offs structure is now returned from btf_parse_field_offs, which can be reused later for types in program BTF. All functions like copy_map_value, zero_map_value call generic underlying functions so that they can also be reused later for copying to values allocated in programs which encode specific fields. Later, some helper functions will also require access to this btf_field_offs structure to be able to skip over special fields at runtime. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20221103191013.1236066-9-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent db55911 commit f71b2f6

4 files changed

Lines changed: 89 additions & 81 deletions

File tree

include/linux/bpf.h

Lines changed: 24 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -341,57 +341,64 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
341341
}
342342

343343
/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
344-
static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, bool long_memcpy)
344+
static inline void bpf_obj_memcpy(struct btf_field_offs *foffs,
345+
void *dst, void *src, u32 size,
346+
bool long_memcpy)
345347
{
346348
u32 curr_off = 0;
347349
int i;
348350

349-
if (likely(!map->field_offs)) {
351+
if (likely(!foffs)) {
350352
if (long_memcpy)
351-
bpf_long_memcpy(dst, src, round_up(map->value_size, 8));
353+
bpf_long_memcpy(dst, src, round_up(size, 8));
352354
else
353-
memcpy(dst, src, map->value_size);
355+
memcpy(dst, src, size);
354356
return;
355357
}
356358

357-
for (i = 0; i < map->field_offs->cnt; i++) {
358-
u32 next_off = map->field_offs->field_off[i];
359+
for (i = 0; i < foffs->cnt; i++) {
360+
u32 next_off = foffs->field_off[i];
359361
u32 sz = next_off - curr_off;
360362

361363
memcpy(dst + curr_off, src + curr_off, sz);
362-
curr_off += map->field_offs->field_sz[i];
364+
curr_off += foffs->field_sz[i];
363365
}
364-
memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
366+
memcpy(dst + curr_off, src + curr_off, size - curr_off);
365367
}
366368

367369
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
368370
{
369-
__copy_map_value(map, dst, src, false);
371+
bpf_obj_memcpy(map->field_offs, dst, src, map->value_size, false);
370372
}
371373

372374
static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
373375
{
374-
__copy_map_value(map, dst, src, true);
376+
bpf_obj_memcpy(map->field_offs, dst, src, map->value_size, true);
375377
}
376378

377-
static inline void zero_map_value(struct bpf_map *map, void *dst)
379+
static inline void bpf_obj_memzero(struct btf_field_offs *foffs, void *dst, u32 size)
378380
{
379381
u32 curr_off = 0;
380382
int i;
381383

382-
if (likely(!map->field_offs)) {
383-
memset(dst, 0, map->value_size);
384+
if (likely(!foffs)) {
385+
memset(dst, 0, size);
384386
return;
385387
}
386388

387-
for (i = 0; i < map->field_offs->cnt; i++) {
388-
u32 next_off = map->field_offs->field_off[i];
389+
for (i = 0; i < foffs->cnt; i++) {
390+
u32 next_off = foffs->field_off[i];
389391
u32 sz = next_off - curr_off;
390392

391393
memset(dst + curr_off, 0, sz);
392-
curr_off += map->field_offs->field_sz[i];
394+
curr_off += foffs->field_sz[i];
393395
}
394-
memset(dst + curr_off, 0, map->value_size - curr_off);
396+
memset(dst + curr_off, 0, size - curr_off);
397+
}
398+
399+
static inline void zero_map_value(struct bpf_map *map, void *dst)
400+
{
401+
bpf_obj_memzero(map->field_offs, dst, map->value_size);
395402
}
396403

397404
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,

include/linux/btf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,7 @@ int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
165165
int btf_find_timer(const struct btf *btf, const struct btf_type *t);
166166
struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
167167
u32 field_mask, u32 value_size);
168+
struct btf_field_offs *btf_parse_field_offs(struct btf_record *rec);
168169
bool btf_type_is_void(const struct btf_type *t);
169170
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
170171
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,

kernel/bpf/btf.c

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3551,6 +3551,61 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
35513551
return ERR_PTR(ret);
35523552
}
35533553

3554+
static int btf_field_offs_cmp(const void *_a, const void *_b, const void *priv)
3555+
{
3556+
const u32 a = *(const u32 *)_a;
3557+
const u32 b = *(const u32 *)_b;
3558+
3559+
if (a < b)
3560+
return -1;
3561+
else if (a > b)
3562+
return 1;
3563+
return 0;
3564+
}
3565+
3566+
static void btf_field_offs_swap(void *_a, void *_b, int size, const void *priv)
3567+
{
3568+
struct btf_field_offs *foffs = (void *)priv;
3569+
u32 *off_base = foffs->field_off;
3570+
u32 *a = _a, *b = _b;
3571+
u8 *sz_a, *sz_b;
3572+
3573+
sz_a = foffs->field_sz + (a - off_base);
3574+
sz_b = foffs->field_sz + (b - off_base);
3575+
3576+
swap(*a, *b);
3577+
swap(*sz_a, *sz_b);
3578+
}
3579+
3580+
struct btf_field_offs *btf_parse_field_offs(struct btf_record *rec)
3581+
{
3582+
struct btf_field_offs *foffs;
3583+
u32 i, *off;
3584+
u8 *sz;
3585+
3586+
BUILD_BUG_ON(ARRAY_SIZE(foffs->field_off) != ARRAY_SIZE(foffs->field_sz));
3587+
if (IS_ERR_OR_NULL(rec) || WARN_ON_ONCE(rec->cnt > sizeof(foffs->field_off)))
3588+
return NULL;
3589+
3590+
foffs = kzalloc(sizeof(*foffs), GFP_KERNEL | __GFP_NOWARN);
3591+
if (!foffs)
3592+
return ERR_PTR(-ENOMEM);
3593+
3594+
off = foffs->field_off;
3595+
sz = foffs->field_sz;
3596+
for (i = 0; i < rec->cnt; i++) {
3597+
off[i] = rec->fields[i].offset;
3598+
sz[i] = btf_field_type_size(rec->fields[i].type);
3599+
}
3600+
foffs->cnt = rec->cnt;
3601+
3602+
if (foffs->cnt == 1)
3603+
return foffs;
3604+
sort_r(foffs->field_off, foffs->cnt, sizeof(foffs->field_off[0]),
3605+
btf_field_offs_cmp, btf_field_offs_swap, foffs);
3606+
return foffs;
3607+
}
3608+
35543609
static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
35553610
u32 type_id, void *data, u8 bits_offset,
35563611
struct btf_show *show)

kernel/bpf/syscall.c

Lines changed: 9 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -943,66 +943,6 @@ int map_check_no_btf(const struct bpf_map *map,
943943
return -ENOTSUPP;
944944
}
945945

946-
static int map_field_offs_cmp(const void *_a, const void *_b, const void *priv)
947-
{
948-
const u32 a = *(const u32 *)_a;
949-
const u32 b = *(const u32 *)_b;
950-
951-
if (a < b)
952-
return -1;
953-
else if (a > b)
954-
return 1;
955-
return 0;
956-
}
957-
958-
static void map_field_offs_swap(void *_a, void *_b, int size, const void *priv)
959-
{
960-
struct bpf_map *map = (struct bpf_map *)priv;
961-
u32 *off_base = map->field_offs->field_off;
962-
u32 *a = _a, *b = _b;
963-
u8 *sz_a, *sz_b;
964-
965-
sz_a = map->field_offs->field_sz + (a - off_base);
966-
sz_b = map->field_offs->field_sz + (b - off_base);
967-
968-
swap(*a, *b);
969-
swap(*sz_a, *sz_b);
970-
}
971-
972-
static int bpf_map_alloc_off_arr(struct bpf_map *map)
973-
{
974-
bool has_fields = !IS_ERR_OR_NULL(map->record);
975-
struct btf_field_offs *fo;
976-
struct btf_record *rec;
977-
u32 i, *off;
978-
u8 *sz;
979-
980-
if (!has_fields) {
981-
map->field_offs = NULL;
982-
return 0;
983-
}
984-
985-
fo = kzalloc(sizeof(*map->field_offs), GFP_KERNEL | __GFP_NOWARN);
986-
if (!fo)
987-
return -ENOMEM;
988-
map->field_offs = fo;
989-
990-
rec = map->record;
991-
off = fo->field_off;
992-
sz = fo->field_sz;
993-
for (i = 0; i < rec->cnt; i++) {
994-
*off++ = rec->fields[i].offset;
995-
*sz++ = btf_field_type_size(rec->fields[i].type);
996-
}
997-
fo->cnt = rec->cnt;
998-
999-
if (fo->cnt == 1)
1000-
return 0;
1001-
sort_r(fo->field_off, fo->cnt, sizeof(fo->field_off[0]),
1002-
map_field_offs_cmp, map_field_offs_swap, map);
1003-
return 0;
1004-
}
1005-
1006946
static int map_check_btf(struct bpf_map *map, const struct btf *btf,
1007947
u32 btf_key_id, u32 btf_value_id)
1008948
{
@@ -1097,6 +1037,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
10971037
static int map_create(union bpf_attr *attr)
10981038
{
10991039
int numa_node = bpf_map_attr_numa_node(attr);
1040+
struct btf_field_offs *foffs;
11001041
struct bpf_map *map;
11011042
int f_flags;
11021043
int err;
@@ -1176,13 +1117,17 @@ static int map_create(union bpf_attr *attr)
11761117
attr->btf_vmlinux_value_type_id;
11771118
}
11781119

1179-
err = bpf_map_alloc_off_arr(map);
1180-
if (err)
1120+
1121+
foffs = btf_parse_field_offs(map->record);
1122+
if (IS_ERR(foffs)) {
1123+
err = PTR_ERR(foffs);
11811124
goto free_map;
1125+
}
1126+
map->field_offs = foffs;
11821127

11831128
err = security_bpf_map_alloc(map);
11841129
if (err)
1185-
goto free_map_off_arr;
1130+
goto free_map_field_offs;
11861131

11871132
err = bpf_map_alloc_id(map);
11881133
if (err)
@@ -1206,7 +1151,7 @@ static int map_create(union bpf_attr *attr)
12061151

12071152
free_map_sec:
12081153
security_bpf_map_free(map);
1209-
free_map_off_arr:
1154+
free_map_field_offs:
12101155
kfree(map->field_offs);
12111156
free_map:
12121157
btf_put(map->btf);

0 commit comments

Comments
 (0)