Looks like we're almost there with the bpf syscall patches on the linux-morello list[1][2]. So seems like a good time to update the accompanying LTP tests.
Some of the bpf tests require either CAP_SYS_ADMIN or CAP_BPF, or enabling unprivileged bpf with: echo "0" > /proc/sys/kernel/unprivileged_bpf_disabled
Thanks,
Zach
Review branch: https://git.morello-project.org/zdleaf/morello-linux-test-project/-/tree/mor... Kernel changes: https://git.morello-project.org/zdleaf/linux/-/tree/morello/bpf_v5
[1] https://op-lists.linaro.org/archives/list/linux-morello@op-lists.linaro.org/... [2] famous last words
Zachary Leaf (3): bpf: align tests with PCuABI/uAPI bpf: add bpf_check_attr test runtest: add bpf to extended PCuABI syscall list
include/lapi/bpf.h | 354 +++++++++++++----- runtest/morello_transitional_extended | 13 + .../kernel/syscalls/bpf/bpf_check_attr.c | 62 +++ testcases/kernel/syscalls/bpf/bpf_common.c | 10 +- testcases/kernel/syscalls/bpf/bpf_map01.c | 12 +- testcases/kernel/syscalls/bpf/bpf_prog03.c | 4 +- 6 files changed, 352 insertions(+), 103 deletions(-) create mode 100644 testcases/kernel/syscalls/bpf/bpf_check_attr.c
-- 2.34.1
PCuABI/uAPI has been updated to allow capabilities to be passed to the bpf syscall.
Align LTP tests with the new uAPI by casting pointers to uintptr_t instead of to u64. This ensures that capabilities are passed in purecap applications and remains a cast to u64 for aarch64 applications.
In addition update the union bpf_attr definition in include/lapi/bpf.h to match the kernel as of v6.4.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com --- include/lapi/bpf.h | 354 +++++++++++++++------ testcases/kernel/syscalls/bpf/bpf_common.c | 10 +- testcases/kernel/syscalls/bpf/bpf_map01.c | 12 +- testcases/kernel/syscalls/bpf/bpf_prog03.c | 4 +- 4 files changed, 277 insertions(+), 103 deletions(-)
diff --git a/include/lapi/bpf.h b/include/lapi/bpf.h index b44ab7d65..ba4537a72 100644 --- a/include/lapi/bpf.h +++ b/include/lapi/bpf.h @@ -14,6 +14,7 @@ #include <stdint.h>
#include "lapi/syscalls.h" +#include "linux/types.h"
/* Start copy from linux/bpf_(common).h */ #define BPF_CLASS(code) ((code) & 0x07) @@ -91,6 +92,7 @@ enum bpf_cmd { BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN, + BPF_PROG_RUN = BPF_PROG_TEST_RUN, BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID, @@ -103,6 +105,19 @@ enum bpf_cmd { BPF_TASK_FD_QUERY, BPF_MAP_LOOKUP_AND_DELETE_ELEM, BPF_MAP_FREEZE, + BPF_BTF_GET_NEXT_ID, + BPF_MAP_LOOKUP_BATCH, + BPF_MAP_LOOKUP_AND_DELETE_BATCH, + BPF_MAP_UPDATE_BATCH, + BPF_MAP_DELETE_BATCH, + BPF_LINK_CREATE, + BPF_LINK_UPDATE, + BPF_LINK_GET_FD_BY_ID, + BPF_LINK_GET_NEXT_ID, + BPF_ENABLE_STATS, + BPF_ITER_CREATE, + BPF_LINK_DETACH, + BPF_PROG_BIND_MAP, };
enum bpf_map_type { @@ -125,7 +140,8 @@ enum bpf_map_type { BPF_MAP_TYPE_CPUMAP, BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH, - BPF_MAP_TYPE_CGROUP_STORAGE, + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, + BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, BPF_MAP_TYPE_QUEUE, @@ -137,6 +153,8 @@ enum bpf_map_type { BPF_MAP_TYPE_INODE_STORAGE, BPF_MAP_TYPE_TASK_STORAGE, BPF_MAP_TYPE_BLOOM_FILTER, + BPF_MAP_TYPE_USER_RINGBUF, + BPF_MAP_TYPE_CGRP_STORAGE, };
enum bpf_prog_type { @@ -166,6 +184,13 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT, + BPF_PROG_TYPE_TRACING, + BPF_PROG_TYPE_STRUCT_OPS, + BPF_PROG_TYPE_EXT, + BPF_PROG_TYPE_LSM, + BPF_PROG_TYPE_SK_LOOKUP, + BPF_PROG_TYPE_SYSCALL, + BPF_PROG_TYPE_NETFILTER, };
#define BPF_PSEUDO_MAP_FD 1 @@ -181,148 +206,301 @@ enum bpf_prog_type {
union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ - uint32_t map_type; /* one of enum bpf_map_type */ - uint32_t key_size; /* size of key in bytes */ - uint32_t value_size; /* size of value in bytes */ - uint32_t max_entries; /* max number of entries in a map */ - uint32_t map_flags; /* BPF_MAP_CREATE related + __u32 map_type; /* one of enum bpf_map_type */ + __u32 key_size; /* size of key in bytes */ + __u32 value_size; /* size of value in bytes */ + __u32 max_entries; /* max number of entries in a map */ + __u32 map_flags; /* BPF_MAP_CREATE related * flags defined above. */ - uint32_t inner_map_fd; /* fd pointing to the inner map */ - uint32_t numa_node; /* numa node (effective only if + __u32 inner_map_fd; /* fd pointing to the inner map */ + __u32 numa_node; /* numa node (effective only if * BPF_F_NUMA_NODE is set). */ char map_name[BPF_OBJ_NAME_LEN]; - uint32_t map_ifindex; /* ifindex of netdev to create on */ - uint32_t btf_fd; /* fd pointing to a BTF type data */ - uint32_t btf_key_type_id; /* BTF type_id of the key */ - uint32_t btf_value_type_id; /* BTF type_id of the value */ + __u32 map_ifindex; /* ifindex of netdev to create on */ + __u32 btf_fd; /* fd pointing to a BTF type data */ + __u32 btf_key_type_id; /* BTF type_id of the key */ + __u32 btf_value_type_id; /* BTF type_id of the value */ + __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- + * struct stored as the + * map value + */ + /* Any per-map-type extra fields + * + * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the + * number of hash functions (if 0, the bloom filter will default + * to using 5 hash functions). + */ + __u64 map_extra; };
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ - uint32_t map_fd; - aligned_uint64_t key; + __u32 map_fd; + __kernel_aligned_uintptr_t key; union { - aligned_uint64_t value; - aligned_uint64_t next_key; + __kernel_aligned_uintptr_t value; + __kernel_aligned_uintptr_t next_key; }; - uint64_t flags; + __u64 flags; };
+ struct { /* struct used by BPF_MAP_*_BATCH commands */ + /* start batch, NULL to start from beginning */ + __kernel_aligned_uintptr_t in_batch; + /* output: next start batch */ + __kernel_aligned_uintptr_t out_batch; + __kernel_aligned_uintptr_t keys; + __kernel_aligned_uintptr_t values; + __u32 count; /* input/output: + * input: # of key/value + * elements + * output: # of filled elements + */ + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { /* anonymous struct used by BPF_PROG_LOAD command */ - uint32_t prog_type; /* one of enum bpf_prog_type */ - uint32_t insn_cnt; - aligned_uint64_t insns; - aligned_uint64_t license; - uint32_t log_level; /* verbosity level of verifier */ - uint32_t log_size; /* size of user buffer */ - aligned_uint64_t log_buf; /* user supplied buffer */ - uint32_t kern_version; /* not used */ - uint32_t prog_flags; + __u32 prog_type; /* one of enum bpf_prog_type */ + __u32 insn_cnt; + __kernel_aligned_uintptr_t insns; + __kernel_aligned_uintptr_t license; + __u32 log_level; /* verbosity level of verifier */ + __u32 log_size; /* size of user buffer */ + __kernel_aligned_uintptr_t log_buf; /* user supplied buffer */ + __u32 kern_version; /* not used */ + __u32 prog_flags; char prog_name[BPF_OBJ_NAME_LEN]; - uint32_t prog_ifindex; /* ifindex of netdev to prep for */ + __u32 prog_ifindex; /* ifindex of netdev to prep for */ /* For some prog types expected attach type must be known at * load time to verify attach type specific parts of prog * (context accesses, allowed helpers, etc). */ - uint32_t expected_attach_type; - uint32_t prog_btf_fd; /* fd pointing to BTF type data */ - uint32_t func_info_rec_size; /* userspace bpf_func_info size */ - aligned_uint64_t func_info; /* func info */ - uint32_t func_info_cnt; /* number of bpf_func_info records */ - uint32_t line_info_rec_size; /* userspace bpf_line_info size */ - aligned_uint64_t line_info; /* line info */ - uint32_t line_info_cnt; /* number of bpf_line_info records */ + __u32 expected_attach_type; + __u32 prog_btf_fd; /* fd pointing to BTF type data */ + __u32 func_info_rec_size; /* userspace bpf_func_info size */ + __kernel_aligned_uintptr_t func_info; /* func info */ + __u32 func_info_cnt; /* number of bpf_func_info records */ + __u32 line_info_rec_size; /* userspace bpf_line_info size */ + __kernel_aligned_uintptr_t line_info; /* line info */ + __u32 line_info_cnt; /* number of bpf_line_info records */ + __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ + union { + /* valid prog_fd to attach to bpf prog */ + __u32 attach_prog_fd; + /* or valid module BTF object fd or 0 to attach to vmlinux */ + __u32 attach_btf_obj_fd; + }; + __u32 core_relo_cnt; /* number of bpf_core_relo */ + __kernel_aligned_uintptr_t fd_array; /* array of FDs */ + __kernel_aligned_uintptr_t core_relos; + __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + */ + __u32 log_true_size; };
struct { /* anonymous struct used by BPF_OBJ_* commands */ - aligned_uint64_t pathname; - uint32_t bpf_fd; - uint32_t file_flags; + __kernel_aligned_uintptr_t pathname; + __u32 bpf_fd; + __u32 file_flags; };
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ - uint32_t target_fd; /* container object to attach to */ - uint32_t attach_bpf_fd; /* eBPF program to attach */ - uint32_t attach_type; - uint32_t attach_flags; + __u32 target_fd; /* container object to attach to */ + __u32 attach_bpf_fd; /* eBPF program to attach */ + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; /* previously attached eBPF + * program to replace if + * BPF_F_REPLACE is used + */ };
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ - uint32_t prog_fd; - uint32_t retval; - uint32_t data_size_in; /* input: len of data_in */ - uint32_t data_size_out; /* input/output: len of data_out + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; /* input: len of data_in */ + __u32 data_size_out; /* input/output: len of data_out * returns ENOSPC if data_out * is too small. */ - aligned_uint64_t data_in; - aligned_uint64_t data_out; - uint32_t repeat; - uint32_t duration; - uint32_t ctx_size_in; /* input: len of ctx_in */ - uint32_t ctx_size_out; /* input/output: len of ctx_out + __kernel_aligned_uintptr_t data_in; + __kernel_aligned_uintptr_t data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; /* input: len of ctx_in */ + __u32 ctx_size_out; /* input/output: len of ctx_out * returns ENOSPC if ctx_out * is too small. */ - aligned_uint64_t ctx_in; - aligned_uint64_t ctx_out; + __kernel_aligned_uintptr_t ctx_in; + __kernel_aligned_uintptr_t ctx_out; + __u32 flags; + __u32 cpu; + __u32 batch_size; } test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */ union { - uint32_t start_id; - uint32_t prog_id; - uint32_t map_id; - uint32_t btf_id; + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; }; - uint32_t next_id; - uint32_t open_flags; + __u32 next_id; + __u32 open_flags; };
struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ - uint32_t bpf_fd; - uint32_t info_len; - aligned_uint64_t info; + __u32 bpf_fd; + __u32 info_len; + __kernel_aligned_uintptr_t info; } info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */ - uint32_t target_fd; /* container object to query */ - uint32_t attach_type; - uint32_t query_flags; - uint32_t attach_flags; - aligned_uint64_t prog_ids; - uint32_t prog_cnt; + __u32 target_fd; /* container object to query */ + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __kernel_aligned_uintptr_t prog_ids; + __u32 prog_cnt; + /* output: per-program attach_flags. + * not allowed to be set during effective query. + */ + __kernel_aligned_uintptr_t prog_attach_flags; } query;
- struct { - uint64_t name; - uint32_t prog_fd; + struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ + __kernel_aligned_uintptr_t name; + __u32 prog_fd; } raw_tracepoint;
struct { /* anonymous struct for BPF_BTF_LOAD */ - aligned_uint64_t btf; - aligned_uint64_t btf_log_buf; - uint32_t btf_size; - uint32_t btf_log_size; - uint32_t btf_log_level; + __kernel_aligned_uintptr_t btf; + __kernel_aligned_uintptr_t btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + */ + __u32 btf_log_true_size; };
struct { - uint32_t pid; /* input: pid */ - uint32_t fd; /* input: fd */ - uint32_t flags; /* input: flags */ - uint32_t buf_len; /* input/output: buf len */ - aligned_uint64_t buf; /* input/output: + __u32 pid; /* input: pid */ + __u32 fd; /* input: fd */ + __u32 flags; /* input: flags */ + __u32 buf_len; /* input/output: buf len */ + __kernel_aligned_uintptr_t buf; /* input/output: * tp_name for tracepoint * symbol for kprobe * filename for uprobe */ - uint32_t prog_id; /* output: prod_id */ - uint32_t fd_type; /* output: BPF_FD_TYPE_* */ - uint64_t probe_offset; /* output: probe_offset */ - uint64_t probe_addr; /* output: probe_addr */ + __u32 prog_id; /* output: prod_id */ + __u32 fd_type; /* output: BPF_FD_TYPE_* */ + __u64 probe_offset; /* output: probe_offset */ + __u64 probe_addr; /* output: probe_addr */ } task_fd_query; + + struct { /* struct used by BPF_LINK_CREATE command */ + union { + __u32 prog_fd; /* eBPF program to attach */ + __u32 map_fd; /* struct_ops to attach */ + }; + union { + __u32 target_fd; /* object to attach to */ + __u32 target_ifindex; /* target ifindex */ + }; + __u32 attach_type; /* attach type */ + __u32 flags; /* extra flags */ + union { + __u32 target_btf_id; /* btf_id of target to attach to */ + struct { + /* extra bpf_iter_link_info */ + __kernel_aligned_uintptr_t iter_info; + /* iter_info length */ + __u32 iter_info_len; + }; + struct { + /* black box user-provided value passed through + * to BPF program at the execution time and + * accessible through bpf_get_attach_cookie() BPF helper + */ + __u64 bpf_cookie; + } perf_event; + struct { + __u32 flags; + __u32 cnt; + __kernel_aligned_uintptr_t syms; + __kernel_aligned_uintptr_t addrs; + __kernel_aligned_uintptr_t cookies; + } kprobe_multi; + struct { + /* this is overlaid with the target_btf_id above. */ + __u32 target_btf_id; + /* black box user-provided value passed through + * to BPF program at the execution time and + * accessible through bpf_get_attach_cookie() BPF helper + */ + __u64 cookie; + } tracing; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + }; + } link_create; + + struct { /* struct used by BPF_LINK_UPDATE command */ + __u32 link_fd; /* link fd */ + union { + /* new program fd to update link with */ + __u32 new_prog_fd; + /* new struct_ops map fd to update link with */ + __u32 new_map_fd; + }; + __u32 flags; /* extra flags */ + union { + /* expected link's program fd; is specified only if + * BPF_F_REPLACE flag is set in flags. + */ + __u32 old_prog_fd; + /* expected link's map fd; is specified only + * if BPF_F_REPLACE flag is set. + */ + __u32 old_map_fd; + }; + } link_update; + + struct { + __u32 link_fd; + } link_detach; + + struct { /* struct used by BPF_ENABLE_STATS command */ + __u32 type; + } enable_stats; + + struct { /* struct used by BPF_ITER_CREATE command */ + __u32 link_fd; + __u32 flags; + } iter_create; + + struct { /* struct used by BPF_PROG_BIND_MAP command */ + __u32 prog_fd; + __u32 map_fd; + __u32 flags; /* extra flags */ + } prog_bind_map; + } __attribute__((aligned(8)));
#define __BPF_FUNC_MAPPER(FN) \ @@ -613,10 +791,6 @@ enum bpf_func_id { /* End copy from tools/include/filter.h */
/* Start copy from tools/lib/bpf */ -static inline uint64_t ptr_to_u64(const void *ptr) -{ - return (uint64_t) (unsigned long) ptr; -}
static inline int bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size) { diff --git a/testcases/kernel/syscalls/bpf/bpf_common.c b/testcases/kernel/syscalls/bpf/bpf_common.c index 95b5bc12e..1d2fcba61 100644 --- a/testcases/kernel/syscalls/bpf/bpf_common.c +++ b/testcases/kernel/syscalls/bpf/bpf_common.c @@ -66,8 +66,8 @@ void bpf_map_array_get(const int map_fd, { union bpf_attr elem_attr = { .map_fd = map_fd, - .key = ptr_to_u64(array_indx), - .value = ptr_to_u64(array_val), + .key = (uintptr_t)array_indx, + .value = (uintptr_t)array_val, .flags = 0 }; const int ret = bpf(BPF_MAP_LOOKUP_ELEM, &elem_attr, sizeof(elem_attr)); @@ -97,10 +97,10 @@ void bpf_init_prog_attr(union bpf_attr *const attr, memcpy(buf, prog, prog_size); memset(attr, 0, sizeof(*attr)); attr->prog_type = BPF_PROG_TYPE_SOCKET_FILTER; - attr->insns = ptr_to_u64(buf); + attr->insns = (uintptr_t)buf; attr->insn_cnt = prog_len; - attr->license = ptr_to_u64("GPL"); - attr->log_buf = ptr_to_u64(log_buf); + attr->license = (uintptr_t)"GPL"; + attr->log_buf = (uintptr_t)log_buf; attr->log_size = log_size; attr->log_level = 1; } diff --git a/testcases/kernel/syscalls/bpf/bpf_map01.c b/testcases/kernel/syscalls/bpf/bpf_map01.c index 94f9b7873..9491b256d 100644 --- a/testcases/kernel/syscalls/bpf/bpf_map01.c +++ b/testcases/kernel/syscalls/bpf/bpf_map01.c @@ -54,8 +54,8 @@ void run(unsigned int n)
memset(attr, 0, sizeof(*attr)); attr->map_fd = fd; - attr->key = ptr_to_u64(key); - attr->value = ptr_to_u64(val_get); + attr->key = (uintptr_t)key; + attr->value = (uintptr_t)val_get;
memset(val_get, 'x', VAL_SZ);
@@ -89,8 +89,8 @@ void run(unsigned int n)
memset(attr, 0, sizeof(*attr)); attr->map_fd = fd; - attr->key = ptr_to_u64(key); - attr->value = ptr_to_u64(val_set); + attr->key = (uintptr_t)key; + attr->value = (uintptr_t)val_set; attr->flags = BPF_ANY;
TEST(bpf(BPF_MAP_UPDATE_ELEM, attr, sizeof(*attr))); @@ -106,8 +106,8 @@ void run(unsigned int n)
memset(attr, 0, sizeof(*attr)); attr->map_fd = fd; - attr->key = ptr_to_u64(key); - attr->value = ptr_to_u64(val_get); + attr->key = (uintptr_t)key; + attr->value = (uintptr_t)val_get;
TEST(bpf(BPF_MAP_LOOKUP_ELEM, attr, sizeof(*attr))); if (TST_RET == -1) { diff --git a/testcases/kernel/syscalls/bpf/bpf_prog03.c b/testcases/kernel/syscalls/bpf/bpf_prog03.c index 35bb841c7..8fd5ecdaa 100644 --- a/testcases/kernel/syscalls/bpf/bpf_prog03.c +++ b/testcases/kernel/syscalls/bpf/bpf_prog03.c @@ -120,8 +120,8 @@ static void run(void)
memset(attr, 0, sizeof(*attr)); attr->map_fd = map_fd; - attr->key = ptr_to_u64(key); - attr->value = ptr_to_u64(val); + attr->key = (uintptr_t)key; + attr->value = (uintptr_t)val; attr->flags = BPF_ANY;
TEST(bpf(BPF_MAP_UPDATE_ELEM, attr, sizeof(*attr)));
On Tue, Nov 14, 2023 at 04:19:00PM +0000, Zachary Leaf wrote:
PCuABI/uAPI has been updated to allow capabilities to be passed to the bpf syscall.
Align LTP tests with the new uAPI by casting pointers to uintptr_t instead of to u64. This ensures that capabilities are passed in purecap applications and remains a cast to u64 for aarch64 applications.
In addition update the union bpf_attr definition in include/lapi/bpf.h to match the kernel as of v6.4.
It looks like the patch is doing slightly more than just that so it might be worth to split the patch into two, to keep the lapi update separate.
--- BR. Beata
Signed-off-by: Zachary Leaf zachary.leaf@arm.com
include/lapi/bpf.h | 354 +++++++++++++++------ testcases/kernel/syscalls/bpf/bpf_common.c | 10 +- testcases/kernel/syscalls/bpf/bpf_map01.c | 12 +- testcases/kernel/syscalls/bpf/bpf_prog03.c | 4 +- 4 files changed, 277 insertions(+), 103 deletions(-)
diff --git a/include/lapi/bpf.h b/include/lapi/bpf.h index b44ab7d65..ba4537a72 100644 --- a/include/lapi/bpf.h +++ b/include/lapi/bpf.h @@ -14,6 +14,7 @@ #include <stdint.h> #include "lapi/syscalls.h" +#include "linux/types.h" /* Start copy from linux/bpf_(common).h */ #define BPF_CLASS(code) ((code) & 0x07) @@ -91,6 +92,7 @@ enum bpf_cmd { BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN,
- BPF_PROG_RUN = BPF_PROG_TEST_RUN, BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID,
@@ -103,6 +105,19 @@ enum bpf_cmd { BPF_TASK_FD_QUERY, BPF_MAP_LOOKUP_AND_DELETE_ELEM, BPF_MAP_FREEZE,
- BPF_BTF_GET_NEXT_ID,
- BPF_MAP_LOOKUP_BATCH,
- BPF_MAP_LOOKUP_AND_DELETE_BATCH,
- BPF_MAP_UPDATE_BATCH,
- BPF_MAP_DELETE_BATCH,
- BPF_LINK_CREATE,
- BPF_LINK_UPDATE,
- BPF_LINK_GET_FD_BY_ID,
- BPF_LINK_GET_NEXT_ID,
- BPF_ENABLE_STATS,
- BPF_ITER_CREATE,
- BPF_LINK_DETACH,
- BPF_PROG_BIND_MAP,
}; enum bpf_map_type { @@ -125,7 +140,8 @@ enum bpf_map_type { BPF_MAP_TYPE_CPUMAP, BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH,
- BPF_MAP_TYPE_CGROUP_STORAGE,
- BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
- BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, BPF_MAP_TYPE_QUEUE,
@@ -137,6 +153,8 @@ enum bpf_map_type { BPF_MAP_TYPE_INODE_STORAGE, BPF_MAP_TYPE_TASK_STORAGE, BPF_MAP_TYPE_BLOOM_FILTER,
- BPF_MAP_TYPE_USER_RINGBUF,
- BPF_MAP_TYPE_CGRP_STORAGE,
}; enum bpf_prog_type { @@ -166,6 +184,13 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT,
- BPF_PROG_TYPE_TRACING,
- BPF_PROG_TYPE_STRUCT_OPS,
- BPF_PROG_TYPE_EXT,
- BPF_PROG_TYPE_LSM,
- BPF_PROG_TYPE_SK_LOOKUP,
- BPF_PROG_TYPE_SYSCALL,
- BPF_PROG_TYPE_NETFILTER,
}; #define BPF_PSEUDO_MAP_FD 1 @@ -181,148 +206,301 @@ enum bpf_prog_type { union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */
uint32_t map_type; /* one of enum bpf_map_type */
uint32_t key_size; /* size of key in bytes */
uint32_t value_size; /* size of value in bytes */
uint32_t max_entries; /* max number of entries in a map */
uint32_t map_flags; /* BPF_MAP_CREATE related
__u32 map_type; /* one of enum bpf_map_type */
__u32 key_size; /* size of key in bytes */
__u32 value_size; /* size of value in bytes */
__u32 max_entries; /* max number of entries in a map */
__u32 map_flags; /* BPF_MAP_CREATE related * flags defined above. */
uint32_t inner_map_fd; /* fd pointing to the inner map */
uint32_t numa_node; /* numa node (effective only if
__u32 inner_map_fd; /* fd pointing to the inner map */
char map_name[BPF_OBJ_NAME_LEN];__u32 numa_node; /* numa node (effective only if * BPF_F_NUMA_NODE is set). */
uint32_t map_ifindex; /* ifindex of netdev to create on */
uint32_t btf_fd; /* fd pointing to a BTF type data */
uint32_t btf_key_type_id; /* BTF type_id of the key */
uint32_t btf_value_type_id; /* BTF type_id of the value */
__u32 map_ifindex; /* ifindex of netdev to create on */
__u32 btf_fd; /* fd pointing to a BTF type data */
__u32 btf_key_type_id; /* BTF type_id of the key */
__u32 btf_value_type_id; /* BTF type_id of the value */
__u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
* struct stored as the
* map value
*/
/* Any per-map-type extra fields
*
* BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
* number of hash functions (if 0, the bloom filter will default
* to using 5 hash functions).
*/
};__u64 map_extra;
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
uint32_t map_fd;
aligned_uint64_t key;
__u32 map_fd;
union {__kernel_aligned_uintptr_t key;
aligned_uint64_t value;
aligned_uint64_t next_key;
__kernel_aligned_uintptr_t value;
};__kernel_aligned_uintptr_t next_key;
uint64_t flags;
};__u64 flags;
- struct { /* struct used by BPF_MAP_*_BATCH commands */
/* start batch, NULL to start from beginning */
__kernel_aligned_uintptr_t in_batch;
/* output: next start batch */
__kernel_aligned_uintptr_t out_batch;
__kernel_aligned_uintptr_t keys;
__kernel_aligned_uintptr_t values;
__u32 count; /* input/output:
* input: # of key/value
* elements
* output: # of filled elements
*/
__u32 map_fd;
__u64 elem_flags;
__u64 flags;
- } batch;
- struct { /* anonymous struct used by BPF_PROG_LOAD command */
uint32_t prog_type; /* one of enum bpf_prog_type */
uint32_t insn_cnt;
aligned_uint64_t insns;
aligned_uint64_t license;
uint32_t log_level; /* verbosity level of verifier */
uint32_t log_size; /* size of user buffer */
aligned_uint64_t log_buf; /* user supplied buffer */
uint32_t kern_version; /* not used */
uint32_t prog_flags;
__u32 prog_type; /* one of enum bpf_prog_type */
__u32 insn_cnt;
__kernel_aligned_uintptr_t insns;
__kernel_aligned_uintptr_t license;
__u32 log_level; /* verbosity level of verifier */
__u32 log_size; /* size of user buffer */
__kernel_aligned_uintptr_t log_buf; /* user supplied buffer */
__u32 kern_version; /* not used */
char prog_name[BPF_OBJ_NAME_LEN];__u32 prog_flags;
uint32_t prog_ifindex; /* ifindex of netdev to prep for */
/* For some prog types expected attach type must be known at__u32 prog_ifindex; /* ifindex of netdev to prep for */
*/
- load time to verify attach type specific parts of prog
- (context accesses, allowed helpers, etc).
uint32_t expected_attach_type;
uint32_t prog_btf_fd; /* fd pointing to BTF type data */
uint32_t func_info_rec_size; /* userspace bpf_func_info size */
aligned_uint64_t func_info; /* func info */
uint32_t func_info_cnt; /* number of bpf_func_info records */
uint32_t line_info_rec_size; /* userspace bpf_line_info size */
aligned_uint64_t line_info; /* line info */
uint32_t line_info_cnt; /* number of bpf_line_info records */
__u32 expected_attach_type;
__u32 prog_btf_fd; /* fd pointing to BTF type data */
__u32 func_info_rec_size; /* userspace bpf_func_info size */
__kernel_aligned_uintptr_t func_info; /* func info */
__u32 func_info_cnt; /* number of bpf_func_info records */
__u32 line_info_rec_size; /* userspace bpf_line_info size */
__kernel_aligned_uintptr_t line_info; /* line info */
__u32 line_info_cnt; /* number of bpf_line_info records */
__u32 attach_btf_id; /* in-kernel BTF type id to attach to */
union {
/* valid prog_fd to attach to bpf prog */
__u32 attach_prog_fd;
/* or valid module BTF object fd or 0 to attach to vmlinux */
__u32 attach_btf_obj_fd;
};
__u32 core_relo_cnt; /* number of bpf_core_relo */
__kernel_aligned_uintptr_t fd_array; /* array of FDs */
__kernel_aligned_uintptr_t core_relos;
__u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
/* output: actual total log contents size (including termintaing zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
*/
};__u32 log_true_size;
struct { /* anonymous struct used by BPF_OBJ_* commands */
aligned_uint64_t pathname;
uint32_t bpf_fd;
uint32_t file_flags;
__kernel_aligned_uintptr_t pathname;
__u32 bpf_fd;
};__u32 file_flags;
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
uint32_t target_fd; /* container object to attach to */
uint32_t attach_bpf_fd; /* eBPF program to attach */
uint32_t attach_type;
uint32_t attach_flags;
__u32 target_fd; /* container object to attach to */
__u32 attach_bpf_fd; /* eBPF program to attach */
__u32 attach_type;
__u32 attach_flags;
__u32 replace_bpf_fd; /* previously attached eBPF
* program to replace if
* BPF_F_REPLACE is used
};*/
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
uint32_t prog_fd;
uint32_t retval;
uint32_t data_size_in; /* input: len of data_in */
uint32_t data_size_out; /* input/output: len of data_out
__u32 prog_fd;
__u32 retval;
__u32 data_size_in; /* input: len of data_in */
__u32 data_size_out; /* input/output: len of data_out * returns ENOSPC if data_out * is too small. */
aligned_uint64_t data_in;
aligned_uint64_t data_out;
uint32_t repeat;
uint32_t duration;
uint32_t ctx_size_in; /* input: len of ctx_in */
uint32_t ctx_size_out; /* input/output: len of ctx_out
__kernel_aligned_uintptr_t data_in;
__kernel_aligned_uintptr_t data_out;
__u32 repeat;
__u32 duration;
__u32 ctx_size_in; /* input: len of ctx_in */
__u32 ctx_size_out; /* input/output: len of ctx_out * returns ENOSPC if ctx_out * is too small. */
aligned_uint64_t ctx_in;
aligned_uint64_t ctx_out;
__kernel_aligned_uintptr_t ctx_in;
__kernel_aligned_uintptr_t ctx_out;
__u32 flags;
__u32 cpu;
} test;__u32 batch_size;
struct { /* anonymous struct used by BPF_*_GET_*_ID */ union {
uint32_t start_id;
uint32_t prog_id;
uint32_t map_id;
uint32_t btf_id;
__u32 start_id;
__u32 prog_id;
__u32 map_id;
__u32 btf_id;
};__u32 link_id;
uint32_t next_id;
uint32_t open_flags;
__u32 next_id;
};__u32 open_flags;
struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
uint32_t bpf_fd;
uint32_t info_len;
aligned_uint64_t info;
__u32 bpf_fd;
__u32 info_len;
} info;__kernel_aligned_uintptr_t info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */
uint32_t target_fd; /* container object to query */
uint32_t attach_type;
uint32_t query_flags;
uint32_t attach_flags;
aligned_uint64_t prog_ids;
uint32_t prog_cnt;
__u32 target_fd; /* container object to query */
__u32 attach_type;
__u32 query_flags;
__u32 attach_flags;
__kernel_aligned_uintptr_t prog_ids;
__u32 prog_cnt;
/* output: per-program attach_flags.
* not allowed to be set during effective query.
*/
} query;__kernel_aligned_uintptr_t prog_attach_flags;
- struct {
uint64_t name;
uint32_t prog_fd;
- struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
__kernel_aligned_uintptr_t name;
} raw_tracepoint;__u32 prog_fd;
struct { /* anonymous struct for BPF_BTF_LOAD */
aligned_uint64_t btf;
aligned_uint64_t btf_log_buf;
uint32_t btf_size;
uint32_t btf_log_size;
uint32_t btf_log_level;
__kernel_aligned_uintptr_t btf;
__kernel_aligned_uintptr_t btf_log_buf;
__u32 btf_size;
__u32 btf_log_size;
__u32 btf_log_level;
/* output: actual total log contents size (including termintaing zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
*/
};__u32 btf_log_true_size;
struct {
uint32_t pid; /* input: pid */
uint32_t fd; /* input: fd */
uint32_t flags; /* input: flags */
uint32_t buf_len; /* input/output: buf len */
aligned_uint64_t buf; /* input/output:
__u32 pid; /* input: pid */
__u32 fd; /* input: fd */
__u32 flags; /* input: flags */
__u32 buf_len; /* input/output: buf len */
__kernel_aligned_uintptr_t buf; /* input/output: * tp_name for tracepoint * symbol for kprobe * filename for uprobe */
uint32_t prog_id; /* output: prod_id */
uint32_t fd_type; /* output: BPF_FD_TYPE_* */
uint64_t probe_offset; /* output: probe_offset */
uint64_t probe_addr; /* output: probe_addr */
__u32 prog_id; /* output: prod_id */
__u32 fd_type; /* output: BPF_FD_TYPE_* */
__u64 probe_offset; /* output: probe_offset */
} task_fd_query;__u64 probe_addr; /* output: probe_addr */
- struct { /* struct used by BPF_LINK_CREATE command */
union {
__u32 prog_fd; /* eBPF program to attach */
__u32 map_fd; /* struct_ops to attach */
};
union {
__u32 target_fd; /* object to attach to */
__u32 target_ifindex; /* target ifindex */
};
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
union {
__u32 target_btf_id; /* btf_id of target to attach to */
struct {
/* extra bpf_iter_link_info */
__kernel_aligned_uintptr_t iter_info;
/* iter_info length */
__u32 iter_info_len;
};
struct {
/* black box user-provided value passed through
* to BPF program at the execution time and
* accessible through bpf_get_attach_cookie() BPF helper
*/
__u64 bpf_cookie;
} perf_event;
struct {
__u32 flags;
__u32 cnt;
__kernel_aligned_uintptr_t syms;
__kernel_aligned_uintptr_t addrs;
__kernel_aligned_uintptr_t cookies;
} kprobe_multi;
struct {
/* this is overlaid with the target_btf_id above. */
__u32 target_btf_id;
/* black box user-provided value passed through
* to BPF program at the execution time and
* accessible through bpf_get_attach_cookie() BPF helper
*/
__u64 cookie;
} tracing;
struct {
__u32 pf;
__u32 hooknum;
__s32 priority;
__u32 flags;
} netfilter;
};
- } link_create;
- struct { /* struct used by BPF_LINK_UPDATE command */
__u32 link_fd; /* link fd */
union {
/* new program fd to update link with */
__u32 new_prog_fd;
/* new struct_ops map fd to update link with */
__u32 new_map_fd;
};
__u32 flags; /* extra flags */
union {
/* expected link's program fd; is specified only if
* BPF_F_REPLACE flag is set in flags.
*/
__u32 old_prog_fd;
/* expected link's map fd; is specified only
* if BPF_F_REPLACE flag is set.
*/
__u32 old_map_fd;
};
- } link_update;
- struct {
__u32 link_fd;
- } link_detach;
- struct { /* struct used by BPF_ENABLE_STATS command */
__u32 type;
- } enable_stats;
- struct { /* struct used by BPF_ITER_CREATE command */
__u32 link_fd;
__u32 flags;
- } iter_create;
- struct { /* struct used by BPF_PROG_BIND_MAP command */
__u32 prog_fd;
__u32 map_fd;
__u32 flags; /* extra flags */
- } prog_bind_map;
} __attribute__((aligned(8))); #define __BPF_FUNC_MAPPER(FN) \ @@ -613,10 +791,6 @@ enum bpf_func_id { /* End copy from tools/include/filter.h */ /* Start copy from tools/lib/bpf */ -static inline uint64_t ptr_to_u64(const void *ptr) -{
- return (uint64_t) (unsigned long) ptr;
-} static inline int bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size) { diff --git a/testcases/kernel/syscalls/bpf/bpf_common.c b/testcases/kernel/syscalls/bpf/bpf_common.c index 95b5bc12e..1d2fcba61 100644 --- a/testcases/kernel/syscalls/bpf/bpf_common.c +++ b/testcases/kernel/syscalls/bpf/bpf_common.c @@ -66,8 +66,8 @@ void bpf_map_array_get(const int map_fd, { union bpf_attr elem_attr = { .map_fd = map_fd,
.key = ptr_to_u64(array_indx),
.value = ptr_to_u64(array_val),
.key = (uintptr_t)array_indx,
.flags = 0 }; const int ret = bpf(BPF_MAP_LOOKUP_ELEM, &elem_attr, sizeof(elem_attr));.value = (uintptr_t)array_val,
@@ -97,10 +97,10 @@ void bpf_init_prog_attr(union bpf_attr *const attr, memcpy(buf, prog, prog_size); memset(attr, 0, sizeof(*attr)); attr->prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
- attr->insns = ptr_to_u64(buf);
- attr->insns = (uintptr_t)buf; attr->insn_cnt = prog_len;
- attr->license = ptr_to_u64("GPL");
- attr->log_buf = ptr_to_u64(log_buf);
- attr->license = (uintptr_t)"GPL";
- attr->log_buf = (uintptr_t)log_buf; attr->log_size = log_size; attr->log_level = 1;
} diff --git a/testcases/kernel/syscalls/bpf/bpf_map01.c b/testcases/kernel/syscalls/bpf/bpf_map01.c index 94f9b7873..9491b256d 100644 --- a/testcases/kernel/syscalls/bpf/bpf_map01.c +++ b/testcases/kernel/syscalls/bpf/bpf_map01.c @@ -54,8 +54,8 @@ void run(unsigned int n) memset(attr, 0, sizeof(*attr)); attr->map_fd = fd;
- attr->key = ptr_to_u64(key);
- attr->value = ptr_to_u64(val_get);
- attr->key = (uintptr_t)key;
- attr->value = (uintptr_t)val_get;
memset(val_get, 'x', VAL_SZ); @@ -89,8 +89,8 @@ void run(unsigned int n) memset(attr, 0, sizeof(*attr)); attr->map_fd = fd;
- attr->key = ptr_to_u64(key);
- attr->value = ptr_to_u64(val_set);
- attr->key = (uintptr_t)key;
- attr->value = (uintptr_t)val_set; attr->flags = BPF_ANY;
TEST(bpf(BPF_MAP_UPDATE_ELEM, attr, sizeof(*attr))); @@ -106,8 +106,8 @@ void run(unsigned int n) memset(attr, 0, sizeof(*attr)); attr->map_fd = fd;
- attr->key = ptr_to_u64(key);
- attr->value = ptr_to_u64(val_get);
- attr->key = (uintptr_t)key;
- attr->value = (uintptr_t)val_get;
TEST(bpf(BPF_MAP_LOOKUP_ELEM, attr, sizeof(*attr))); if (TST_RET == -1) { diff --git a/testcases/kernel/syscalls/bpf/bpf_prog03.c b/testcases/kernel/syscalls/bpf/bpf_prog03.c index 35bb841c7..8fd5ecdaa 100644 --- a/testcases/kernel/syscalls/bpf/bpf_prog03.c +++ b/testcases/kernel/syscalls/bpf/bpf_prog03.c @@ -120,8 +120,8 @@ static void run(void) memset(attr, 0, sizeof(*attr)); attr->map_fd = map_fd;
- attr->key = ptr_to_u64(key);
- attr->value = ptr_to_u64(val);
- attr->key = (uintptr_t)key;
- attr->value = (uintptr_t)val; attr->flags = BPF_ANY;
TEST(bpf(BPF_MAP_UPDATE_ELEM, attr, sizeof(*attr))); -- 2.34.1
linux-morello-ltp mailing list -- linux-morello-ltp@op-lists.linaro.org To unsubscribe send an email to linux-morello-ltp-leave@op-lists.linaro.org
On 05/12/2023 00:25, Beata Michalska wrote:
On Tue, Nov 14, 2023 at 04:19:00PM +0000, Zachary Leaf wrote:
PCuABI/uAPI has been updated to allow capabilities to be passed to the bpf syscall.
Align LTP tests with the new uAPI by casting pointers to uintptr_t instead of to u64. This ensures that capabilities are passed in purecap applications and remains a cast to u64 for aarch64 applications.
In addition update the union bpf_attr definition in include/lapi/bpf.h to match the kernel as of v6.4.
It looks like the patch is doing slightly more than just that so it might be worth to split the patch into two, to keep the lapi update separate.
Ack
Thanks, Zach
BR. Beata
Signed-off-by: Zachary Leaf zachary.leaf@arm.com
include/lapi/bpf.h | 354 +++++++++++++++------ testcases/kernel/syscalls/bpf/bpf_common.c | 10 +- testcases/kernel/syscalls/bpf/bpf_map01.c | 12 +- testcases/kernel/syscalls/bpf/bpf_prog03.c | 4 +- 4 files changed, 277 insertions(+), 103 deletions(-)
diff --git a/include/lapi/bpf.h b/include/lapi/bpf.h index b44ab7d65..ba4537a72 100644 --- a/include/lapi/bpf.h +++ b/include/lapi/bpf.h @@ -14,6 +14,7 @@ #include <stdint.h> #include "lapi/syscalls.h" +#include "linux/types.h" /* Start copy from linux/bpf_(common).h */ #define BPF_CLASS(code) ((code) & 0x07) @@ -91,6 +92,7 @@ enum bpf_cmd { BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN,
- BPF_PROG_RUN = BPF_PROG_TEST_RUN, BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID,
@@ -103,6 +105,19 @@ enum bpf_cmd { BPF_TASK_FD_QUERY, BPF_MAP_LOOKUP_AND_DELETE_ELEM, BPF_MAP_FREEZE,
- BPF_BTF_GET_NEXT_ID,
- BPF_MAP_LOOKUP_BATCH,
- BPF_MAP_LOOKUP_AND_DELETE_BATCH,
- BPF_MAP_UPDATE_BATCH,
- BPF_MAP_DELETE_BATCH,
- BPF_LINK_CREATE,
- BPF_LINK_UPDATE,
- BPF_LINK_GET_FD_BY_ID,
- BPF_LINK_GET_NEXT_ID,
- BPF_ENABLE_STATS,
- BPF_ITER_CREATE,
- BPF_LINK_DETACH,
- BPF_PROG_BIND_MAP,
}; enum bpf_map_type { @@ -125,7 +140,8 @@ enum bpf_map_type { BPF_MAP_TYPE_CPUMAP, BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH,
- BPF_MAP_TYPE_CGROUP_STORAGE,
- BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
- BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, BPF_MAP_TYPE_QUEUE,
@@ -137,6 +153,8 @@ enum bpf_map_type { BPF_MAP_TYPE_INODE_STORAGE, BPF_MAP_TYPE_TASK_STORAGE, BPF_MAP_TYPE_BLOOM_FILTER,
- BPF_MAP_TYPE_USER_RINGBUF,
- BPF_MAP_TYPE_CGRP_STORAGE,
}; enum bpf_prog_type { @@ -166,6 +184,13 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT,
- BPF_PROG_TYPE_TRACING,
- BPF_PROG_TYPE_STRUCT_OPS,
- BPF_PROG_TYPE_EXT,
- BPF_PROG_TYPE_LSM,
- BPF_PROG_TYPE_SK_LOOKUP,
- BPF_PROG_TYPE_SYSCALL,
- BPF_PROG_TYPE_NETFILTER,
}; #define BPF_PSEUDO_MAP_FD 1 @@ -181,148 +206,301 @@ enum bpf_prog_type { union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */
uint32_t map_type; /* one of enum bpf_map_type */
uint32_t key_size; /* size of key in bytes */
uint32_t value_size; /* size of value in bytes */
uint32_t max_entries; /* max number of entries in a map */
uint32_t map_flags; /* BPF_MAP_CREATE related
__u32 map_type; /* one of enum bpf_map_type */
__u32 key_size; /* size of key in bytes */
__u32 value_size; /* size of value in bytes */
__u32 max_entries; /* max number of entries in a map */
__u32 map_flags; /* BPF_MAP_CREATE related * flags defined above. */
uint32_t inner_map_fd; /* fd pointing to the inner map */
uint32_t numa_node; /* numa node (effective only if
__u32 inner_map_fd; /* fd pointing to the inner map */
char map_name[BPF_OBJ_NAME_LEN];__u32 numa_node; /* numa node (effective only if * BPF_F_NUMA_NODE is set). */
uint32_t map_ifindex; /* ifindex of netdev to create on */
uint32_t btf_fd; /* fd pointing to a BTF type data */
uint32_t btf_key_type_id; /* BTF type_id of the key */
uint32_t btf_value_type_id; /* BTF type_id of the value */
__u32 map_ifindex; /* ifindex of netdev to create on */
__u32 btf_fd; /* fd pointing to a BTF type data */
__u32 btf_key_type_id; /* BTF type_id of the key */
__u32 btf_value_type_id; /* BTF type_id of the value */
__u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
* struct stored as the
* map value
*/
/* Any per-map-type extra fields
*
* BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
* number of hash functions (if 0, the bloom filter will default
* to using 5 hash functions).
*/
};__u64 map_extra;
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
uint32_t map_fd;
aligned_uint64_t key;
__u32 map_fd;
union {__kernel_aligned_uintptr_t key;
aligned_uint64_t value;
aligned_uint64_t next_key;
__kernel_aligned_uintptr_t value;
};__kernel_aligned_uintptr_t next_key;
uint64_t flags;
};__u64 flags;
- struct { /* struct used by BPF_MAP_*_BATCH commands */
/* start batch, NULL to start from beginning */
__kernel_aligned_uintptr_t in_batch;
/* output: next start batch */
__kernel_aligned_uintptr_t out_batch;
__kernel_aligned_uintptr_t keys;
__kernel_aligned_uintptr_t values;
__u32 count; /* input/output:
* input: # of key/value
* elements
* output: # of filled elements
*/
__u32 map_fd;
__u64 elem_flags;
__u64 flags;
- } batch;
- struct { /* anonymous struct used by BPF_PROG_LOAD command */
uint32_t prog_type; /* one of enum bpf_prog_type */
uint32_t insn_cnt;
aligned_uint64_t insns;
aligned_uint64_t license;
uint32_t log_level; /* verbosity level of verifier */
uint32_t log_size; /* size of user buffer */
aligned_uint64_t log_buf; /* user supplied buffer */
uint32_t kern_version; /* not used */
uint32_t prog_flags;
__u32 prog_type; /* one of enum bpf_prog_type */
__u32 insn_cnt;
__kernel_aligned_uintptr_t insns;
__kernel_aligned_uintptr_t license;
__u32 log_level; /* verbosity level of verifier */
__u32 log_size; /* size of user buffer */
__kernel_aligned_uintptr_t log_buf; /* user supplied buffer */
__u32 kern_version; /* not used */
char prog_name[BPF_OBJ_NAME_LEN];__u32 prog_flags;
uint32_t prog_ifindex; /* ifindex of netdev to prep for */
/* For some prog types expected attach type must be known at__u32 prog_ifindex; /* ifindex of netdev to prep for */
*/
- load time to verify attach type specific parts of prog
- (context accesses, allowed helpers, etc).
uint32_t expected_attach_type;
uint32_t prog_btf_fd; /* fd pointing to BTF type data */
uint32_t func_info_rec_size; /* userspace bpf_func_info size */
aligned_uint64_t func_info; /* func info */
uint32_t func_info_cnt; /* number of bpf_func_info records */
uint32_t line_info_rec_size; /* userspace bpf_line_info size */
aligned_uint64_t line_info; /* line info */
uint32_t line_info_cnt; /* number of bpf_line_info records */
__u32 expected_attach_type;
__u32 prog_btf_fd; /* fd pointing to BTF type data */
__u32 func_info_rec_size; /* userspace bpf_func_info size */
__kernel_aligned_uintptr_t func_info; /* func info */
__u32 func_info_cnt; /* number of bpf_func_info records */
__u32 line_info_rec_size; /* userspace bpf_line_info size */
__kernel_aligned_uintptr_t line_info; /* line info */
__u32 line_info_cnt; /* number of bpf_line_info records */
__u32 attach_btf_id; /* in-kernel BTF type id to attach to */
union {
/* valid prog_fd to attach to bpf prog */
__u32 attach_prog_fd;
/* or valid module BTF object fd or 0 to attach to vmlinux */
__u32 attach_btf_obj_fd;
};
__u32 core_relo_cnt; /* number of bpf_core_relo */
__kernel_aligned_uintptr_t fd_array; /* array of FDs */
__kernel_aligned_uintptr_t core_relos;
__u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
/* output: actual total log contents size (including termintaing zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
*/
};__u32 log_true_size;
struct { /* anonymous struct used by BPF_OBJ_* commands */
aligned_uint64_t pathname;
uint32_t bpf_fd;
uint32_t file_flags;
__kernel_aligned_uintptr_t pathname;
__u32 bpf_fd;
};__u32 file_flags;
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
uint32_t target_fd; /* container object to attach to */
uint32_t attach_bpf_fd; /* eBPF program to attach */
uint32_t attach_type;
uint32_t attach_flags;
__u32 target_fd; /* container object to attach to */
__u32 attach_bpf_fd; /* eBPF program to attach */
__u32 attach_type;
__u32 attach_flags;
__u32 replace_bpf_fd; /* previously attached eBPF
* program to replace if
* BPF_F_REPLACE is used
};*/
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
uint32_t prog_fd;
uint32_t retval;
uint32_t data_size_in; /* input: len of data_in */
uint32_t data_size_out; /* input/output: len of data_out
__u32 prog_fd;
__u32 retval;
__u32 data_size_in; /* input: len of data_in */
__u32 data_size_out; /* input/output: len of data_out * returns ENOSPC if data_out * is too small. */
aligned_uint64_t data_in;
aligned_uint64_t data_out;
uint32_t repeat;
uint32_t duration;
uint32_t ctx_size_in; /* input: len of ctx_in */
uint32_t ctx_size_out; /* input/output: len of ctx_out
__kernel_aligned_uintptr_t data_in;
__kernel_aligned_uintptr_t data_out;
__u32 repeat;
__u32 duration;
__u32 ctx_size_in; /* input: len of ctx_in */
__u32 ctx_size_out; /* input/output: len of ctx_out * returns ENOSPC if ctx_out * is too small. */
aligned_uint64_t ctx_in;
aligned_uint64_t ctx_out;
__kernel_aligned_uintptr_t ctx_in;
__kernel_aligned_uintptr_t ctx_out;
__u32 flags;
__u32 cpu;
} test;__u32 batch_size;
struct { /* anonymous struct used by BPF_*_GET_*_ID */ union {
uint32_t start_id;
uint32_t prog_id;
uint32_t map_id;
uint32_t btf_id;
__u32 start_id;
__u32 prog_id;
__u32 map_id;
__u32 btf_id;
};__u32 link_id;
uint32_t next_id;
uint32_t open_flags;
__u32 next_id;
};__u32 open_flags;
struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
uint32_t bpf_fd;
uint32_t info_len;
aligned_uint64_t info;
__u32 bpf_fd;
__u32 info_len;
} info;__kernel_aligned_uintptr_t info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */
uint32_t target_fd; /* container object to query */
uint32_t attach_type;
uint32_t query_flags;
uint32_t attach_flags;
aligned_uint64_t prog_ids;
uint32_t prog_cnt;
__u32 target_fd; /* container object to query */
__u32 attach_type;
__u32 query_flags;
__u32 attach_flags;
__kernel_aligned_uintptr_t prog_ids;
__u32 prog_cnt;
/* output: per-program attach_flags.
* not allowed to be set during effective query.
*/
} query;__kernel_aligned_uintptr_t prog_attach_flags;
- struct {
uint64_t name;
uint32_t prog_fd;
- struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
__kernel_aligned_uintptr_t name;
} raw_tracepoint;__u32 prog_fd;
struct { /* anonymous struct for BPF_BTF_LOAD */
aligned_uint64_t btf;
aligned_uint64_t btf_log_buf;
uint32_t btf_size;
uint32_t btf_log_size;
uint32_t btf_log_level;
__kernel_aligned_uintptr_t btf;
__kernel_aligned_uintptr_t btf_log_buf;
__u32 btf_size;
__u32 btf_log_size;
__u32 btf_log_level;
/* output: actual total log contents size (including termintaing zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
*/
};__u32 btf_log_true_size;
struct {
uint32_t pid; /* input: pid */
uint32_t fd; /* input: fd */
uint32_t flags; /* input: flags */
uint32_t buf_len; /* input/output: buf len */
aligned_uint64_t buf; /* input/output:
__u32 pid; /* input: pid */
__u32 fd; /* input: fd */
__u32 flags; /* input: flags */
__u32 buf_len; /* input/output: buf len */
__kernel_aligned_uintptr_t buf; /* input/output: * tp_name for tracepoint * symbol for kprobe * filename for uprobe */
uint32_t prog_id; /* output: prod_id */
uint32_t fd_type; /* output: BPF_FD_TYPE_* */
uint64_t probe_offset; /* output: probe_offset */
uint64_t probe_addr; /* output: probe_addr */
__u32 prog_id; /* output: prod_id */
__u32 fd_type; /* output: BPF_FD_TYPE_* */
__u64 probe_offset; /* output: probe_offset */
} task_fd_query;__u64 probe_addr; /* output: probe_addr */
- struct { /* struct used by BPF_LINK_CREATE command */
union {
__u32 prog_fd; /* eBPF program to attach */
__u32 map_fd; /* struct_ops to attach */
};
union {
__u32 target_fd; /* object to attach to */
__u32 target_ifindex; /* target ifindex */
};
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
union {
__u32 target_btf_id; /* btf_id of target to attach to */
struct {
/* extra bpf_iter_link_info */
__kernel_aligned_uintptr_t iter_info;
/* iter_info length */
__u32 iter_info_len;
};
struct {
/* black box user-provided value passed through
* to BPF program at the execution time and
* accessible through bpf_get_attach_cookie() BPF helper
*/
__u64 bpf_cookie;
} perf_event;
struct {
__u32 flags;
__u32 cnt;
__kernel_aligned_uintptr_t syms;
__kernel_aligned_uintptr_t addrs;
__kernel_aligned_uintptr_t cookies;
} kprobe_multi;
struct {
/* this is overlaid with the target_btf_id above. */
__u32 target_btf_id;
/* black box user-provided value passed through
* to BPF program at the execution time and
* accessible through bpf_get_attach_cookie() BPF helper
*/
__u64 cookie;
} tracing;
struct {
__u32 pf;
__u32 hooknum;
__s32 priority;
__u32 flags;
} netfilter;
};
- } link_create;
- struct { /* struct used by BPF_LINK_UPDATE command */
__u32 link_fd; /* link fd */
union {
/* new program fd to update link with */
__u32 new_prog_fd;
/* new struct_ops map fd to update link with */
__u32 new_map_fd;
};
__u32 flags; /* extra flags */
union {
/* expected link's program fd; is specified only if
* BPF_F_REPLACE flag is set in flags.
*/
__u32 old_prog_fd;
/* expected link's map fd; is specified only
* if BPF_F_REPLACE flag is set.
*/
__u32 old_map_fd;
};
- } link_update;
- struct {
__u32 link_fd;
- } link_detach;
- struct { /* struct used by BPF_ENABLE_STATS command */
__u32 type;
- } enable_stats;
- struct { /* struct used by BPF_ITER_CREATE command */
__u32 link_fd;
__u32 flags;
- } iter_create;
- struct { /* struct used by BPF_PROG_BIND_MAP command */
__u32 prog_fd;
__u32 map_fd;
__u32 flags; /* extra flags */
- } prog_bind_map;
} __attribute__((aligned(8))); #define __BPF_FUNC_MAPPER(FN) \ @@ -613,10 +791,6 @@ enum bpf_func_id { /* End copy from tools/include/filter.h */ /* Start copy from tools/lib/bpf */ -static inline uint64_t ptr_to_u64(const void *ptr) -{
- return (uint64_t) (unsigned long) ptr;
-} static inline int bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size) { diff --git a/testcases/kernel/syscalls/bpf/bpf_common.c b/testcases/kernel/syscalls/bpf/bpf_common.c index 95b5bc12e..1d2fcba61 100644 --- a/testcases/kernel/syscalls/bpf/bpf_common.c +++ b/testcases/kernel/syscalls/bpf/bpf_common.c @@ -66,8 +66,8 @@ void bpf_map_array_get(const int map_fd, { union bpf_attr elem_attr = { .map_fd = map_fd,
.key = ptr_to_u64(array_indx),
.value = ptr_to_u64(array_val),
.key = (uintptr_t)array_indx,
.flags = 0 }; const int ret = bpf(BPF_MAP_LOOKUP_ELEM, &elem_attr, sizeof(elem_attr));.value = (uintptr_t)array_val,
@@ -97,10 +97,10 @@ void bpf_init_prog_attr(union bpf_attr *const attr, memcpy(buf, prog, prog_size); memset(attr, 0, sizeof(*attr)); attr->prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
- attr->insns = ptr_to_u64(buf);
- attr->insns = (uintptr_t)buf; attr->insn_cnt = prog_len;
- attr->license = ptr_to_u64("GPL");
- attr->log_buf = ptr_to_u64(log_buf);
- attr->license = (uintptr_t)"GPL";
- attr->log_buf = (uintptr_t)log_buf; attr->log_size = log_size; attr->log_level = 1;
} diff --git a/testcases/kernel/syscalls/bpf/bpf_map01.c b/testcases/kernel/syscalls/bpf/bpf_map01.c index 94f9b7873..9491b256d 100644 --- a/testcases/kernel/syscalls/bpf/bpf_map01.c +++ b/testcases/kernel/syscalls/bpf/bpf_map01.c @@ -54,8 +54,8 @@ void run(unsigned int n) memset(attr, 0, sizeof(*attr)); attr->map_fd = fd;
- attr->key = ptr_to_u64(key);
- attr->value = ptr_to_u64(val_get);
- attr->key = (uintptr_t)key;
- attr->value = (uintptr_t)val_get;
memset(val_get, 'x', VAL_SZ); @@ -89,8 +89,8 @@ void run(unsigned int n) memset(attr, 0, sizeof(*attr)); attr->map_fd = fd;
- attr->key = ptr_to_u64(key);
- attr->value = ptr_to_u64(val_set);
- attr->key = (uintptr_t)key;
- attr->value = (uintptr_t)val_set; attr->flags = BPF_ANY;
TEST(bpf(BPF_MAP_UPDATE_ELEM, attr, sizeof(*attr))); @@ -106,8 +106,8 @@ void run(unsigned int n) memset(attr, 0, sizeof(*attr)); attr->map_fd = fd;
- attr->key = ptr_to_u64(key);
- attr->value = ptr_to_u64(val_get);
- attr->key = (uintptr_t)key;
- attr->value = (uintptr_t)val_get;
TEST(bpf(BPF_MAP_LOOKUP_ELEM, attr, sizeof(*attr))); if (TST_RET == -1) { diff --git a/testcases/kernel/syscalls/bpf/bpf_prog03.c b/testcases/kernel/syscalls/bpf/bpf_prog03.c index 35bb841c7..8fd5ecdaa 100644 --- a/testcases/kernel/syscalls/bpf/bpf_prog03.c +++ b/testcases/kernel/syscalls/bpf/bpf_prog03.c @@ -120,8 +120,8 @@ static void run(void) memset(attr, 0, sizeof(*attr)); attr->map_fd = map_fd;
- attr->key = ptr_to_u64(key);
- attr->value = ptr_to_u64(val);
- attr->key = (uintptr_t)key;
- attr->value = (uintptr_t)val; attr->flags = BPF_ANY;
TEST(bpf(BPF_MAP_UPDATE_ELEM, attr, sizeof(*attr))); -- 2.34.1
linux-morello-ltp mailing list -- linux-morello-ltp@op-lists.linaro.org To unsubscribe send an email to linux-morello-ltp-leave@op-lists.linaro.org
Check kernel/bpf/syscall.c:CHECK_ATTR macro implementation. The syscall should fail when we have non-zero memory in the union beyond the last element of the active sub-command struct.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com --- .../kernel/syscalls/bpf/bpf_check_attr.c | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 testcases/kernel/syscalls/bpf/bpf_check_attr.c
diff --git a/testcases/kernel/syscalls/bpf/bpf_check_attr.c b/testcases/kernel/syscalls/bpf/bpf_check_attr.c new file mode 100644 index 000000000..2ad70ee2e --- /dev/null +++ b/testcases/kernel/syscalls/bpf/bpf_check_attr.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) Arm Ltd. 2023. All rights reserved. + * Author: Zachary Leaf zachary.leaf@arm.com + * + * Check kernel/bpf/syscall.c:CHECK_ATTR macro implementation. The syscall + * should fail when we have non-zero memory in the union beyond the last + * element of the active sub-command struct. + */ + +#include <limits.h> +#include <string.h> +#include <stdio.h> +#include <stddef.h> + +#include "config.h" +#include "tst_test.h" +#include "lapi/bpf.h" +#include "bpf_common.h" + +static char *bpf_log_buf; +static union bpf_attr *attr; + +void run(void) +{ + size_t offset; + char *ptr; + + printf("sizeof(bpf_attr): %lu\n", sizeof(union bpf_attr)); + memset(attr, 0, sizeof(*attr)); + attr->map_type = BPF_MAP_TYPE_ARRAY; + attr->key_size = 4; + attr->value_size = 8; + attr->max_entries = 1; + attr->map_flags = 0; + + /* + * test CHECK_ATTR() macro + * check syscall fails if there is non-null data somewhere beyond + * the last struct member for the BPF_MAP_CREATE option + */ + offset = offsetof(union bpf_attr, map_extra); + printf("offset map_extra: %#lx\n", offset); + offset += 8; + ptr = (char *)attr; + *(ptr+offset) = 'x'; + TST_EXP_FAIL(bpf(BPF_MAP_CREATE, attr, sizeof(*attr)), EINVAL); + + /* remove the non-null data and BPF_MAP_CREATE should pass */ + *(ptr+offset) = '\0'; + TST_EXP_POSITIVE(bpf_map_create(attr)); +} + +static struct tst_test test = { + .test_all = run, + .min_kver = "5.16", /* map_extra field added in commit 9330986c0300 */ + .bufs = (struct tst_buffers []) { + {&bpf_log_buf, .size = BUFSIZE}, + {&attr, .size = sizeof(*attr)}, + {}, + } +};
On 14/11/2023 17:19, Zachary Leaf wrote:
[...]
+static char *bpf_log_buf;
That doesn't seem to be used any more.
+static union bpf_attr *attr;
+void run(void) +{
- size_t offset;
- char *ptr;
- printf("sizeof(bpf_attr): %lu\n", sizeof(union bpf_attr));
I'm not sure tests should print anything unless there's an error (though clearly some existing tests do).
The series looks good to me otherwise.
Kevin
On Tue, Nov 14, 2023 at 04:19:01PM +0000, Zachary Leaf wrote:
Check kernel/bpf/syscall.c:CHECK_ATTR macro implementation. The syscall
The aim here should be to verify that the syscall, for a given setup, is being properly handled, not kernel's internals per se - the CHECK_ATTR macro is a helper one and it may disappear at any point of time*, whether be renamed, substituted or removed. In other words - test should be validating syscalls.
should fail when we have non-zero memory in the union beyond the last element of the active sub-command struct.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com
.../kernel/syscalls/bpf/bpf_check_attr.c | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 testcases/kernel/syscalls/bpf/bpf_check_attr.c
diff --git a/testcases/kernel/syscalls/bpf/bpf_check_attr.c b/testcases/kernel/syscalls/bpf/bpf_check_attr.c new file mode 100644 index 000000000..2ad70ee2e --- /dev/null +++ b/testcases/kernel/syscalls/bpf/bpf_check_attr.c
Any particular reason why not to follow the general test naming scheme here ?
@@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/*
- Copyright (c) Arm Ltd. 2023. All rights reserved.
- Author: Zachary Leaf zachary.leaf@arm.com
- Check kernel/bpf/syscall.c:CHECK_ATTR macro implementation. The syscall
- should fail when we have non-zero memory in the union beyond the last
- element of the active sub-command struct.
- */
+#include <limits.h> +#include <string.h> +#include <stdio.h> +#include <stddef.h>
+#include "config.h" +#include "tst_test.h" +#include "lapi/bpf.h" +#include "bpf_common.h"
+static char *bpf_log_buf; +static union bpf_attr *attr;
+void run(void) +{
- size_t offset;
- char *ptr;
- printf("sizeof(bpf_attr): %lu\n", sizeof(union bpf_attr));
This looks more like debug log so it might be better to drop this one and the one below (and related includes)
- memset(attr, 0, sizeof(*attr));
- attr->map_type = BPF_MAP_TYPE_ARRAY;
- attr->key_size = 4;
- attr->value_size = 8;
- attr->max_entries = 1;
- attr->map_flags = 0;
- /*
* test CHECK_ATTR() macro
* check syscall fails if there is non-null data somewhere beyond
* the last struct member for the BPF_MAP_CREATE option
*/
- offset = offsetof(union bpf_attr, map_extra);
- printf("offset map_extra: %#lx\n", offset);
- offset += 8;
Nit: this could be handled like kernel's offsetofend.
--- BR. Beata
- ptr = (char *)attr;
- *(ptr+offset) = 'x';
- TST_EXP_FAIL(bpf(BPF_MAP_CREATE, attr, sizeof(*attr)), EINVAL);
- /* remove the non-null data and BPF_MAP_CREATE should pass */
- *(ptr+offset) = '\0';
- TST_EXP_POSITIVE(bpf_map_create(attr));
+}
+static struct tst_test test = {
- .test_all = run,
- .min_kver = "5.16", /* map_extra field added in commit 9330986c0300 */
- .bufs = (struct tst_buffers []) {
{&bpf_log_buf, .size = BUFSIZE},
{&attr, .size = sizeof(*attr)},
{},
- }
+};
2.34.1
linux-morello-ltp mailing list -- linux-morello-ltp@op-lists.linaro.org To unsubscribe send an email to linux-morello-ltp-leave@op-lists.linaro.org
On 05/12/2023 00:24, Beata Michalska wrote:
On Tue, Nov 14, 2023 at 04:19:01PM +0000, Zachary Leaf wrote:
Check kernel/bpf/syscall.c:CHECK_ATTR macro implementation. The syscall
The aim here should be to verify that the syscall, for a given setup, is being properly handled, not kernel's internals per se - the CHECK_ATTR macro is a helper one and it may disappear at any point of time*, whether be renamed, substituted or removed. In other words - test should be validating syscalls.
Okay. It's true. We're not checking the particular implementation, but we're checking that sending junk in the input fails. So how about just re-word to something like:
The bpf syscall should fail when there is non-zero memory in the bpf_attr input union beyond the last element of the active sub-command struct.
should fail when we have non-zero memory in the union beyond the last element of the active sub-command struct.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com
.../kernel/syscalls/bpf/bpf_check_attr.c | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 testcases/kernel/syscalls/bpf/bpf_check_attr.c
diff --git a/testcases/kernel/syscalls/bpf/bpf_check_attr.c b/testcases/kernel/syscalls/bpf/bpf_check_attr.c new file mode 100644 index 000000000..2ad70ee2e --- /dev/null +++ b/testcases/kernel/syscalls/bpf/bpf_check_attr.c
Any particular reason why not to follow the general test naming scheme here ?
I just thought to avoid clashes in future if someone else adds bpf_prog08.c to save you the rebase :)
I can see we did just add epoll_wait{n+1} for what looks to be the only other test we've added as far as I can tell (git log --name-status --diff-filter=A).
Up to you - I can rename it to bpf_prog08 or something else.
@@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/*
- Copyright (c) Arm Ltd. 2023. All rights reserved.
- Author: Zachary Leaf zachary.leaf@arm.com
- Check kernel/bpf/syscall.c:CHECK_ATTR macro implementation. The syscall
- should fail when we have non-zero memory in the union beyond the last
- element of the active sub-command struct.
- */
+#include <limits.h> +#include <string.h> +#include <stdio.h> +#include <stddef.h>
+#include "config.h" +#include "tst_test.h" +#include "lapi/bpf.h" +#include "bpf_common.h"
+static char *bpf_log_buf; +static union bpf_attr *attr;
+void run(void) +{
- size_t offset;
- char *ptr;
- printf("sizeof(bpf_attr): %lu\n", sizeof(union bpf_attr));
This looks more like debug log so it might be better to drop this one and the one below (and related includes)
Ack
- memset(attr, 0, sizeof(*attr));
- attr->map_type = BPF_MAP_TYPE_ARRAY;
- attr->key_size = 4;
- attr->value_size = 8;
- attr->max_entries = 1;
- attr->map_flags = 0;
- /*
* test CHECK_ATTR() macro
* check syscall fails if there is non-null data somewhere beyond
* the last struct member for the BPF_MAP_CREATE option
*/
- offset = offsetof(union bpf_attr, map_extra);
- printf("offset map_extra: %#lx\n", offset);
- offset += 8;
Nit: this could be handled like kernel's offsetofend.
Good idea, will steal that macro from the kernel.
Thanks, Zach
BR. Beata
- ptr = (char *)attr;
- *(ptr+offset) = 'x';
- TST_EXP_FAIL(bpf(BPF_MAP_CREATE, attr, sizeof(*attr)), EINVAL);
- /* remove the non-null data and BPF_MAP_CREATE should pass */
- *(ptr+offset) = '\0';
- TST_EXP_POSITIVE(bpf_map_create(attr));
+}
+static struct tst_test test = {
- .test_all = run,
- .min_kver = "5.16", /* map_extra field added in commit 9330986c0300 */
- .bufs = (struct tst_buffers []) {
{&bpf_log_buf, .size = BUFSIZE},
{&attr, .size = sizeof(*attr)},
{},
- }
+};
2.34.1
linux-morello-ltp mailing list -- linux-morello-ltp@op-lists.linaro.org To unsubscribe send an email to linux-morello-ltp-leave@op-lists.linaro.org
On Tue, Dec 05, 2023 at 10:05:34AM +0000, Zachary Leaf wrote:
On 05/12/2023 00:24, Beata Michalska wrote:
On Tue, Nov 14, 2023 at 04:19:01PM +0000, Zachary Leaf wrote:
Check kernel/bpf/syscall.c:CHECK_ATTR macro implementation. The syscall
The aim here should be to verify that the syscall, for a given setup, is being properly handled, not kernel's internals per se - the CHECK_ATTR macro is a helper one and it may disappear at any point of time*, whether be renamed, substituted or removed. In other words - test should be validating syscalls.
Okay. It's true. We're not checking the particular implementation, but we're checking that sending junk in the input fails. So how about just re-word to something like:
The bpf syscall should fail when there is non-zero memory in the bpf_attr input union beyond the last element of the active sub-command struct.
Sounds good, thanks.
should fail when we have non-zero memory in the union beyond the last element of the active sub-command struct.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com
.../kernel/syscalls/bpf/bpf_check_attr.c | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 testcases/kernel/syscalls/bpf/bpf_check_attr.c
diff --git a/testcases/kernel/syscalls/bpf/bpf_check_attr.c b/testcases/kernel/syscalls/bpf/bpf_check_attr.c new file mode 100644 index 000000000..2ad70ee2e --- /dev/null +++ b/testcases/kernel/syscalls/bpf/bpf_check_attr.c
Any particular reason why not to follow the general test naming scheme here ?
I just thought to avoid clashes in future if someone else adds bpf_prog08.c to save you the rebase :)
I can see we did just add epoll_wait{n+1} for what looks to be the only other test we've added as far as I can tell (git log --name-status --diff-filter=A).
Up to you - I can rename it to bpf_prog08 or something else.
You are actually right. We do already have that issue with epoll. So maybe naming it with smth unique is a good idea after all, though maybe smth else that does not use kernel specific notion in it , like bpf_validate_attr, or even bpf_attr ?
--- BR Beata
@@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/*
- Copyright (c) Arm Ltd. 2023. All rights reserved.
- Author: Zachary Leaf zachary.leaf@arm.com
- Check kernel/bpf/syscall.c:CHECK_ATTR macro implementation. The syscall
- should fail when we have non-zero memory in the union beyond the last
- element of the active sub-command struct.
- */
+#include <limits.h> +#include <string.h> +#include <stdio.h> +#include <stddef.h>
+#include "config.h" +#include "tst_test.h" +#include "lapi/bpf.h" +#include "bpf_common.h"
+static char *bpf_log_buf; +static union bpf_attr *attr;
+void run(void) +{
- size_t offset;
- char *ptr;
- printf("sizeof(bpf_attr): %lu\n", sizeof(union bpf_attr));
This looks more like debug log so it might be better to drop this one and the one below (and related includes)
Ack
- memset(attr, 0, sizeof(*attr));
- attr->map_type = BPF_MAP_TYPE_ARRAY;
- attr->key_size = 4;
- attr->value_size = 8;
- attr->max_entries = 1;
- attr->map_flags = 0;
- /*
* test CHECK_ATTR() macro
* check syscall fails if there is non-null data somewhere beyond
* the last struct member for the BPF_MAP_CREATE option
*/
- offset = offsetof(union bpf_attr, map_extra);
- printf("offset map_extra: %#lx\n", offset);
- offset += 8;
Nit: this could be handled like kernel's offsetofend.
Good idea, will steal that macro from the kernel.
Thanks, Zach
BR. Beata
- ptr = (char *)attr;
- *(ptr+offset) = 'x';
- TST_EXP_FAIL(bpf(BPF_MAP_CREATE, attr, sizeof(*attr)), EINVAL);
- /* remove the non-null data and BPF_MAP_CREATE should pass */
- *(ptr+offset) = '\0';
- TST_EXP_POSITIVE(bpf_map_create(attr));
+}
+static struct tst_test test = {
- .test_all = run,
- .min_kver = "5.16", /* map_extra field added in commit 9330986c0300 */
- .bufs = (struct tst_buffers []) {
{&bpf_log_buf, .size = BUFSIZE},
{&attr, .size = sizeof(*attr)},
{},
- }
+};
2.34.1
linux-morello-ltp mailing list -- linux-morello-ltp@op-lists.linaro.org To unsubscribe send an email to linux-morello-ltp-leave@op-lists.linaro.org
Add all bpf tests to the extended Morello transitional syscall run list.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com --- runtest/morello_transitional_extended | 13 +++++++++++++ 1 file changed, 13 insertions(+)
diff --git a/runtest/morello_transitional_extended b/runtest/morello_transitional_extended index b350ebdaf..0689b39c1 100644 --- a/runtest/morello_transitional_extended +++ b/runtest/morello_transitional_extended @@ -1,5 +1,18 @@ #DESCRIPTION: Morello transitional extended ABI system calls
+#KERN - depends on Morello Linux kernel release > morello-release-1.X.0 +# requires CAP_SYS_ADMIN or CAP_BPF, or enabling unprivileged bpf with: +# echo "0" > /proc/sys/kernel/unprivileged_bpf_disabled +bpf_map01 bpf_map01 +bpf_prog01 bpf_prog01 +bpf_prog02 bpf_prog02 +bpf_prog03 bpf_prog03 +bpf_prog04 bpf_prog04 +bpf_prog05 bpf_prog05 +bpf_prog06 bpf_prog06 +bpf_prog07 bpf_prog07 +bpf_check_attr bpf_check_attr + #KERN - depends on Morello Linux kernel release > morello-release-1.5.0 brk01 brk01 brk02 brk02
On 14/11/2023 16:19, Zachary Leaf wrote:
Add all bpf tests to the extended Morello transitional syscall run list.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com
runtest/morello_transitional_extended | 13 +++++++++++++ 1 file changed, 13 insertions(+)
diff --git a/runtest/morello_transitional_extended b/runtest/morello_transitional_extended index b350ebdaf..0689b39c1 100644 --- a/runtest/morello_transitional_extended +++ b/runtest/morello_transitional_extended @@ -1,5 +1,18 @@ #DESCRIPTION: Morello transitional extended ABI system calls +#KERN - depends on Morello Linux kernel release > morello-release-1.X.0
^ version will need updating when merging
+# requires CAP_SYS_ADMIN or CAP_BPF, or enabling unprivileged bpf with: +# echo "0" > /proc/sys/kernel/unprivileged_bpf_disabled +bpf_map01 bpf_map01 +bpf_prog01 bpf_prog01 +bpf_prog02 bpf_prog02 +bpf_prog03 bpf_prog03 +bpf_prog04 bpf_prog04 +bpf_prog05 bpf_prog05 +bpf_prog06 bpf_prog06 +bpf_prog07 bpf_prog07 +bpf_check_attr bpf_check_attr
#KERN - depends on Morello Linux kernel release > morello-release-1.5.0 brk01 brk01 brk02 brk02
On Tue, Nov 14, 2023 at 04:23:39PM +0000, Zachary Leaf wrote:
On 14/11/2023 16:19, Zachary Leaf wrote:
Add all bpf tests to the extended Morello transitional syscall run list.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com
runtest/morello_transitional_extended | 13 +++++++++++++ 1 file changed, 13 insertions(+)
diff --git a/runtest/morello_transitional_extended b/runtest/morello_transitional_extended index b350ebdaf..0689b39c1 100644 --- a/runtest/morello_transitional_extended +++ b/runtest/morello_transitional_extended @@ -1,5 +1,18 @@ #DESCRIPTION: Morello transitional extended ABI system calls +#KERN - depends on Morello Linux kernel release > morello-release-1.X.0
^ version will need updating when merging
I guess it is safe to assume: kernel release > morello-release-1.7.0 as the kernel changes are ready to be merged (?)
+# requires CAP_SYS_ADMIN or CAP_BPF, or enabling unprivileged bpf with: +# echo "0" > /proc/sys/kernel/unprivileged_bpf_disabled +bpf_map01 bpf_map01 +bpf_prog01 bpf_prog01 +bpf_prog02 bpf_prog02 +bpf_prog03 bpf_prog03 +bpf_prog04 bpf_prog04 +bpf_prog05 bpf_prog05 +bpf_prog06 bpf_prog06 +bpf_prog07 bpf_prog07 +bpf_check_attr bpf_check_attr
#KERN - depends on Morello Linux kernel release > morello-release-1.5.0 brk01 brk01 brk02 brk02
linux-morello-ltp mailing list -- linux-morello-ltp@op-lists.linaro.org To unsubscribe send an email to linux-morello-ltp-leave@op-lists.linaro.org
On 05/12/2023 01:27, Beata Michalska wrote:
On Tue, Nov 14, 2023 at 04:23:39PM +0000, Zachary Leaf wrote:
On 14/11/2023 16:19, Zachary Leaf wrote:
Add all bpf tests to the extended Morello transitional syscall run list.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com
runtest/morello_transitional_extended | 13 +++++++++++++ 1 file changed, 13 insertions(+)
diff --git a/runtest/morello_transitional_extended b/runtest/morello_transitional_extended index b350ebdaf..0689b39c1 100644 --- a/runtest/morello_transitional_extended +++ b/runtest/morello_transitional_extended @@ -1,5 +1,18 @@ #DESCRIPTION: Morello transitional extended ABI system calls +#KERN - depends on Morello Linux kernel release > morello-release-1.X.0
^ version will need updating when merging
I guess it is safe to assume: kernel release > morello-release-1.7.0 as the kernel changes are ready to be merged (?)
Yes that's right.
Kevin
linux-morello-ltp@op-lists.linaro.org