Skip to content

Commit db55911

Browse files
kkdwvdAlexei Starovoitov
authored andcommitted
bpf: Consolidate spin_lock, timer management into btf_record
Now that kptr_off_tab has been refactored into btf_record, and can hold more than one specific field type, accomodate bpf_spin_lock and bpf_timer as well. While they don't require any more metadata than offset, having all special fields in one place allows us to share the same code for allocated user defined types and handle both map values and these allocated objects in a similar fashion. As an optimization, we still keep spin_lock_off and timer_off offsets in the btf_record structure, just to avoid having to find the btf_field struct each time their offset is needed. This is mostly needed to manipulate such objects in a map value at runtime. It's ok to hardcode just one offset as more than one field is disallowed. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20221103191013.1236066-8-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent af085f5 commit db55911

12 files changed

Lines changed: 314 additions & 344 deletions

File tree

include/linux/bpf.h

Lines changed: 32 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -166,13 +166,13 @@ struct bpf_map_ops {
166166

167167
enum {
168168
/* Support at most 8 pointers in a BTF type */
169-
BTF_FIELDS_MAX = 8,
170-
BPF_MAP_OFF_ARR_MAX = BTF_FIELDS_MAX +
171-
1 + /* for bpf_spin_lock */
172-
1, /* for bpf_timer */
169+
BTF_FIELDS_MAX = 10,
170+
BPF_MAP_OFF_ARR_MAX = BTF_FIELDS_MAX,
173171
};
174172

175173
enum btf_field_type {
174+
BPF_SPIN_LOCK = (1 << 0),
175+
BPF_TIMER = (1 << 1),
176176
BPF_KPTR_UNREF = (1 << 2),
177177
BPF_KPTR_REF = (1 << 3),
178178
BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF,
@@ -196,6 +196,8 @@ struct btf_field {
196196
struct btf_record {
197197
u32 cnt;
198198
u32 field_mask;
199+
int spin_lock_off;
200+
int timer_off;
199201
struct btf_field fields[];
200202
};
201203

@@ -220,10 +222,8 @@ struct bpf_map {
220222
u32 max_entries;
221223
u64 map_extra; /* any per-map-type extra fields */
222224
u32 map_flags;
223-
int spin_lock_off; /* >=0 valid offset, <0 error */
224-
struct btf_record *record;
225-
int timer_off; /* >=0 valid offset, <0 error */
226225
u32 id;
226+
struct btf_record *record;
227227
int numa_node;
228228
u32 btf_key_type_id;
229229
u32 btf_value_type_id;
@@ -257,9 +257,29 @@ struct bpf_map {
257257
bool frozen; /* write-once; write-protected by freeze_mutex */
258258
};
259259

260+
static inline const char *btf_field_type_name(enum btf_field_type type)
261+
{
262+
switch (type) {
263+
case BPF_SPIN_LOCK:
264+
return "bpf_spin_lock";
265+
case BPF_TIMER:
266+
return "bpf_timer";
267+
case BPF_KPTR_UNREF:
268+
case BPF_KPTR_REF:
269+
return "kptr";
270+
default:
271+
WARN_ON_ONCE(1);
272+
return "unknown";
273+
}
274+
}
275+
260276
static inline u32 btf_field_type_size(enum btf_field_type type)
261277
{
262278
switch (type) {
279+
case BPF_SPIN_LOCK:
280+
return sizeof(struct bpf_spin_lock);
281+
case BPF_TIMER:
282+
return sizeof(struct bpf_timer);
263283
case BPF_KPTR_UNREF:
264284
case BPF_KPTR_REF:
265285
return sizeof(u64);
@@ -272,6 +292,10 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
272292
static inline u32 btf_field_type_align(enum btf_field_type type)
273293
{
274294
switch (type) {
295+
case BPF_SPIN_LOCK:
296+
return __alignof__(struct bpf_spin_lock);
297+
case BPF_TIMER:
298+
return __alignof__(struct bpf_timer);
275299
case BPF_KPTR_UNREF:
276300
case BPF_KPTR_REF:
277301
return __alignof__(u64);
@@ -288,22 +312,8 @@ static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_f
288312
return rec->field_mask & type;
289313
}
290314

291-
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
292-
{
293-
return map->spin_lock_off >= 0;
294-
}
295-
296-
static inline bool map_value_has_timer(const struct bpf_map *map)
297-
{
298-
return map->timer_off >= 0;
299-
}
300-
301315
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
302316
{
303-
if (unlikely(map_value_has_spin_lock(map)))
304-
memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
305-
if (unlikely(map_value_has_timer(map)))
306-
memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
307317
if (!IS_ERR_OR_NULL(map->record)) {
308318
struct btf_field *fields = map->record->fields;
309319
u32 cnt = map->record->cnt;
@@ -1740,6 +1750,7 @@ void btf_record_free(struct btf_record *rec);
17401750
void bpf_map_free_record(struct bpf_map *map);
17411751
struct btf_record *btf_record_dup(const struct btf_record *rec);
17421752
bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
1753+
void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
17431754
void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
17441755

17451756
struct bpf_map *bpf_map_get(u32 ufd);

include/linux/btf.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,8 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
163163
u32 expected_offset, u32 expected_size);
164164
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
165165
int btf_find_timer(const struct btf *btf, const struct btf_type *t);
166-
struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t);
166+
struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
167+
u32 field_mask, u32 value_size);
167168
bool btf_type_is_void(const struct btf_type *t);
168169
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
169170
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,

kernel/bpf/arraymap.c

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -306,13 +306,6 @@ static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key
306306
return 0;
307307
}
308308

309-
static void check_and_free_fields(struct bpf_array *arr, void *val)
310-
{
311-
if (map_value_has_timer(&arr->map))
312-
bpf_timer_cancel_and_free(val + arr->map.timer_off);
313-
bpf_obj_free_fields(arr->map.record, val);
314-
}
315-
316309
/* Called from syscall or from eBPF program */
317310
static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
318311
u64 map_flags)
@@ -334,21 +327,21 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
334327
return -EEXIST;
335328

336329
if (unlikely((map_flags & BPF_F_LOCK) &&
337-
!map_value_has_spin_lock(map)))
330+
!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
338331
return -EINVAL;
339332

340333
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
341334
val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
342335
copy_map_value(map, val, value);
343-
check_and_free_fields(array, val);
336+
bpf_obj_free_fields(array->map.record, val);
344337
} else {
345338
val = array->value +
346339
(u64)array->elem_size * (index & array->index_mask);
347340
if (map_flags & BPF_F_LOCK)
348341
copy_map_value_locked(map, val, value, false);
349342
else
350343
copy_map_value(map, val, value);
351-
check_and_free_fields(array, val);
344+
bpf_obj_free_fields(array->map.record, val);
352345
}
353346
return 0;
354347
}
@@ -385,7 +378,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
385378
pptr = array->pptrs[index & array->index_mask];
386379
for_each_possible_cpu(cpu) {
387380
copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
388-
check_and_free_fields(array, per_cpu_ptr(pptr, cpu));
381+
bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
389382
off += size;
390383
}
391384
rcu_read_unlock();
@@ -409,11 +402,11 @@ static void array_map_free_timers(struct bpf_map *map)
409402
int i;
410403

411404
/* We don't reset or free fields other than timer on uref dropping to zero. */
412-
if (!map_value_has_timer(map))
405+
if (!btf_record_has_field(map->record, BPF_TIMER))
413406
return;
414407

415408
for (i = 0; i < array->map.max_entries; i++)
416-
bpf_timer_cancel_and_free(array_map_elem_ptr(array, i) + map->timer_off);
409+
bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
417410
}
418411

419412
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */

kernel/bpf/bpf_local_storage.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,7 +382,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
382382
if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
383383
/* BPF_F_LOCK can only be used in a value with spin_lock */
384384
unlikely((map_flags & BPF_F_LOCK) &&
385-
!map_value_has_spin_lock(&smap->map)))
385+
!btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
386386
return ERR_PTR(-EINVAL);
387387

388388
if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)

0 commit comments

Comments
 (0)