Update include/lapi/bpf.h to match the kernel as of 6.4, including changes to enable capabilities in the bpf syscall.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com --- include/lapi/bpf.h | 350 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 264 insertions(+), 86 deletions(-)
diff --git a/include/lapi/bpf.h b/include/lapi/bpf.h index b44ab7d65..5f6718b16 100644 --- a/include/lapi/bpf.h +++ b/include/lapi/bpf.h @@ -14,6 +14,7 @@ #include <stdint.h>
#include "lapi/syscalls.h" +#include "linux/types.h"
/* Start copy from linux/bpf_(common).h */ #define BPF_CLASS(code) ((code) & 0x07) @@ -91,6 +92,7 @@ enum bpf_cmd { BPF_PROG_ATTACH, BPF_PROG_DETACH, BPF_PROG_TEST_RUN, + BPF_PROG_RUN = BPF_PROG_TEST_RUN, BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID, @@ -103,6 +105,19 @@ enum bpf_cmd { BPF_TASK_FD_QUERY, BPF_MAP_LOOKUP_AND_DELETE_ELEM, BPF_MAP_FREEZE, + BPF_BTF_GET_NEXT_ID, + BPF_MAP_LOOKUP_BATCH, + BPF_MAP_LOOKUP_AND_DELETE_BATCH, + BPF_MAP_UPDATE_BATCH, + BPF_MAP_DELETE_BATCH, + BPF_LINK_CREATE, + BPF_LINK_UPDATE, + BPF_LINK_GET_FD_BY_ID, + BPF_LINK_GET_NEXT_ID, + BPF_ENABLE_STATS, + BPF_ITER_CREATE, + BPF_LINK_DETACH, + BPF_PROG_BIND_MAP, };
enum bpf_map_type { @@ -125,7 +140,8 @@ enum bpf_map_type { BPF_MAP_TYPE_CPUMAP, BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH, - BPF_MAP_TYPE_CGROUP_STORAGE, + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, + BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, BPF_MAP_TYPE_QUEUE, @@ -137,6 +153,8 @@ enum bpf_map_type { BPF_MAP_TYPE_INODE_STORAGE, BPF_MAP_TYPE_TASK_STORAGE, BPF_MAP_TYPE_BLOOM_FILTER, + BPF_MAP_TYPE_USER_RINGBUF, + BPF_MAP_TYPE_CGRP_STORAGE, };
enum bpf_prog_type { @@ -166,6 +184,13 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT, + BPF_PROG_TYPE_TRACING, + BPF_PROG_TYPE_STRUCT_OPS, + BPF_PROG_TYPE_EXT, + BPF_PROG_TYPE_LSM, + BPF_PROG_TYPE_SK_LOOKUP, + BPF_PROG_TYPE_SYSCALL, + BPF_PROG_TYPE_NETFILTER, };
#define BPF_PSEUDO_MAP_FD 1 @@ -181,148 +206,301 @@ enum bpf_prog_type {
union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ - uint32_t map_type; /* one of enum bpf_map_type */ - uint32_t key_size; /* size of key in bytes */ - uint32_t value_size; /* size of value in bytes */ - uint32_t max_entries; /* max number of entries in a map */ - uint32_t map_flags; /* BPF_MAP_CREATE related + __u32 map_type; /* one of enum bpf_map_type */ + __u32 key_size; /* size of key in bytes */ + __u32 value_size; /* size of value in bytes */ + __u32 max_entries; /* max number of entries in a map */ + __u32 map_flags; /* BPF_MAP_CREATE related * flags defined above. */ - uint32_t inner_map_fd; /* fd pointing to the inner map */ - uint32_t numa_node; /* numa node (effective only if + __u32 inner_map_fd; /* fd pointing to the inner map */ + __u32 numa_node; /* numa node (effective only if * BPF_F_NUMA_NODE is set). */ char map_name[BPF_OBJ_NAME_LEN]; - uint32_t map_ifindex; /* ifindex of netdev to create on */ - uint32_t btf_fd; /* fd pointing to a BTF type data */ - uint32_t btf_key_type_id; /* BTF type_id of the key */ - uint32_t btf_value_type_id; /* BTF type_id of the value */ + __u32 map_ifindex; /* ifindex of netdev to create on */ + __u32 btf_fd; /* fd pointing to a BTF type data */ + __u32 btf_key_type_id; /* BTF type_id of the key */ + __u32 btf_value_type_id; /* BTF type_id of the value */ + __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- + * struct stored as the + * map value + */ + /* Any per-map-type extra fields + * + * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the + * number of hash functions (if 0, the bloom filter will default + * to using 5 hash functions). + */ + __u64 map_extra; };
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ - uint32_t map_fd; - aligned_uint64_t key; + __u32 map_fd; + __kernel_aligned_uintptr_t key; union { - aligned_uint64_t value; - aligned_uint64_t next_key; + __kernel_aligned_uintptr_t value; + __kernel_aligned_uintptr_t next_key; }; - uint64_t flags; + __u64 flags; };
+ struct { /* struct used by BPF_MAP_*_BATCH commands */ + /* start batch, NULL to start from beginning */ + __kernel_aligned_uintptr_t in_batch; + /* output: next start batch */ + __kernel_aligned_uintptr_t out_batch; + __kernel_aligned_uintptr_t keys; + __kernel_aligned_uintptr_t values; + __u32 count; /* input/output: + * input: # of key/value + * elements + * output: # of filled elements + */ + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { /* anonymous struct used by BPF_PROG_LOAD command */ - uint32_t prog_type; /* one of enum bpf_prog_type */ - uint32_t insn_cnt; - aligned_uint64_t insns; - aligned_uint64_t license; - uint32_t log_level; /* verbosity level of verifier */ - uint32_t log_size; /* size of user buffer */ - aligned_uint64_t log_buf; /* user supplied buffer */ - uint32_t kern_version; /* not used */ - uint32_t prog_flags; + __u32 prog_type; /* one of enum bpf_prog_type */ + __u32 insn_cnt; + __kernel_aligned_uintptr_t insns; + __kernel_aligned_uintptr_t license; + __u32 log_level; /* verbosity level of verifier */ + __u32 log_size; /* size of user buffer */ + __kernel_aligned_uintptr_t log_buf; /* user supplied buffer */ + __u32 kern_version; /* not used */ + __u32 prog_flags; char prog_name[BPF_OBJ_NAME_LEN]; - uint32_t prog_ifindex; /* ifindex of netdev to prep for */ + __u32 prog_ifindex; /* ifindex of netdev to prep for */ /* For some prog types expected attach type must be known at * load time to verify attach type specific parts of prog * (context accesses, allowed helpers, etc). */ - uint32_t expected_attach_type; - uint32_t prog_btf_fd; /* fd pointing to BTF type data */ - uint32_t func_info_rec_size; /* userspace bpf_func_info size */ - aligned_uint64_t func_info; /* func info */ - uint32_t func_info_cnt; /* number of bpf_func_info records */ - uint32_t line_info_rec_size; /* userspace bpf_line_info size */ - aligned_uint64_t line_info; /* line info */ - uint32_t line_info_cnt; /* number of bpf_line_info records */ + __u32 expected_attach_type; + __u32 prog_btf_fd; /* fd pointing to BTF type data */ + __u32 func_info_rec_size; /* userspace bpf_func_info size */ + __kernel_aligned_uintptr_t func_info; /* func info */ + __u32 func_info_cnt; /* number of bpf_func_info records */ + __u32 line_info_rec_size; /* userspace bpf_line_info size */ + __kernel_aligned_uintptr_t line_info; /* line info */ + __u32 line_info_cnt; /* number of bpf_line_info records */ + __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ + union { + /* valid prog_fd to attach to bpf prog */ + __u32 attach_prog_fd; + /* or valid module BTF object fd or 0 to attach to vmlinux */ + __u32 attach_btf_obj_fd; + }; + __u32 core_relo_cnt; /* number of bpf_core_relo */ + __kernel_aligned_uintptr_t fd_array; /* array of FDs */ + __kernel_aligned_uintptr_t core_relos; + __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + */ + __u32 log_true_size; };
struct { /* anonymous struct used by BPF_OBJ_* commands */ - aligned_uint64_t pathname; - uint32_t bpf_fd; - uint32_t file_flags; + __kernel_aligned_uintptr_t pathname; + __u32 bpf_fd; + __u32 file_flags; };
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ - uint32_t target_fd; /* container object to attach to */ - uint32_t attach_bpf_fd; /* eBPF program to attach */ - uint32_t attach_type; - uint32_t attach_flags; + __u32 target_fd; /* container object to attach to */ + __u32 attach_bpf_fd; /* eBPF program to attach */ + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; /* previously attached eBPF + * program to replace if + * BPF_F_REPLACE is used + */ };
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ - uint32_t prog_fd; - uint32_t retval; - uint32_t data_size_in; /* input: len of data_in */ - uint32_t data_size_out; /* input/output: len of data_out + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; /* input: len of data_in */ + __u32 data_size_out; /* input/output: len of data_out * returns ENOSPC if data_out * is too small. */ - aligned_uint64_t data_in; - aligned_uint64_t data_out; - uint32_t repeat; - uint32_t duration; - uint32_t ctx_size_in; /* input: len of ctx_in */ - uint32_t ctx_size_out; /* input/output: len of ctx_out + __kernel_aligned_uintptr_t data_in; + __kernel_aligned_uintptr_t data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; /* input: len of ctx_in */ + __u32 ctx_size_out; /* input/output: len of ctx_out * returns ENOSPC if ctx_out * is too small. */ - aligned_uint64_t ctx_in; - aligned_uint64_t ctx_out; + __kernel_aligned_uintptr_t ctx_in; + __kernel_aligned_uintptr_t ctx_out; + __u32 flags; + __u32 cpu; + __u32 batch_size; } test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */ union { - uint32_t start_id; - uint32_t prog_id; - uint32_t map_id; - uint32_t btf_id; + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; }; - uint32_t next_id; - uint32_t open_flags; + __u32 next_id; + __u32 open_flags; };
struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ - uint32_t bpf_fd; - uint32_t info_len; - aligned_uint64_t info; + __u32 bpf_fd; + __u32 info_len; + __kernel_aligned_uintptr_t info; } info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */ - uint32_t target_fd; /* container object to query */ - uint32_t attach_type; - uint32_t query_flags; - uint32_t attach_flags; - aligned_uint64_t prog_ids; - uint32_t prog_cnt; + __u32 target_fd; /* container object to query */ + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __kernel_aligned_uintptr_t prog_ids; + __u32 prog_cnt; + /* output: per-program attach_flags. + * not allowed to be set during effective query. + */ + __kernel_aligned_uintptr_t prog_attach_flags; } query;
- struct { - uint64_t name; - uint32_t prog_fd; + struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ + __kernel_aligned_uintptr_t name; + __u32 prog_fd; } raw_tracepoint;
struct { /* anonymous struct for BPF_BTF_LOAD */ - aligned_uint64_t btf; - aligned_uint64_t btf_log_buf; - uint32_t btf_size; - uint32_t btf_log_size; - uint32_t btf_log_level; + __kernel_aligned_uintptr_t btf; + __kernel_aligned_uintptr_t btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + */ + __u32 btf_log_true_size; };
struct { - uint32_t pid; /* input: pid */ - uint32_t fd; /* input: fd */ - uint32_t flags; /* input: flags */ - uint32_t buf_len; /* input/output: buf len */ - aligned_uint64_t buf; /* input/output: + __u32 pid; /* input: pid */ + __u32 fd; /* input: fd */ + __u32 flags; /* input: flags */ + __u32 buf_len; /* input/output: buf len */ + __kernel_aligned_uintptr_t buf; /* input/output: * tp_name for tracepoint * symbol for kprobe * filename for uprobe */ - uint32_t prog_id; /* output: prod_id */ - uint32_t fd_type; /* output: BPF_FD_TYPE_* */ - uint64_t probe_offset; /* output: probe_offset */ - uint64_t probe_addr; /* output: probe_addr */ + __u32 prog_id; /* output: prod_id */ + __u32 fd_type; /* output: BPF_FD_TYPE_* */ + __u64 probe_offset; /* output: probe_offset */ + __u64 probe_addr; /* output: probe_addr */ } task_fd_query; + + struct { /* struct used by BPF_LINK_CREATE command */ + union { + __u32 prog_fd; /* eBPF program to attach */ + __u32 map_fd; /* struct_ops to attach */ + }; + union { + __u32 target_fd; /* object to attach to */ + __u32 target_ifindex; /* target ifindex */ + }; + __u32 attach_type; /* attach type */ + __u32 flags; /* extra flags */ + union { + __u32 target_btf_id; /* btf_id of target to attach to */ + struct { + /* extra bpf_iter_link_info */ + __kernel_aligned_uintptr_t iter_info; + /* iter_info length */ + __u32 iter_info_len; + }; + struct { + /* black box user-provided value passed through + * to BPF program at the execution time and + * accessible through bpf_get_attach_cookie() BPF helper + */ + __u64 bpf_cookie; + } perf_event; + struct { + __u32 flags; + __u32 cnt; + __kernel_aligned_uintptr_t syms; + __kernel_aligned_uintptr_t addrs; + __kernel_aligned_uintptr_t cookies; + } kprobe_multi; + struct { + /* this is overlaid with the target_btf_id above. */ + __u32 target_btf_id; + /* black box user-provided value passed through + * to BPF program at the execution time and + * accessible through bpf_get_attach_cookie() BPF helper + */ + __u64 cookie; + } tracing; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; + }; + } link_create; + + struct { /* struct used by BPF_LINK_UPDATE command */ + __u32 link_fd; /* link fd */ + union { + /* new program fd to update link with */ + __u32 new_prog_fd; + /* new struct_ops map fd to update link with */ + __u32 new_map_fd; + }; + __u32 flags; /* extra flags */ + union { + /* expected link's program fd; is specified only if + * BPF_F_REPLACE flag is set in flags. + */ + __u32 old_prog_fd; + /* expected link's map fd; is specified only + * if BPF_F_REPLACE flag is set. + */ + __u32 old_map_fd; + }; + } link_update; + + struct { + __u32 link_fd; + } link_detach; + + struct { /* struct used by BPF_ENABLE_STATS command */ + __u32 type; + } enable_stats; + + struct { /* struct used by BPF_ITER_CREATE command */ + __u32 link_fd; + __u32 flags; + } iter_create; + + struct { /* struct used by BPF_PROG_BIND_MAP command */ + __u32 prog_fd; + __u32 map_fd; + __u32 flags; /* extra flags */ + } prog_bind_map; + } __attribute__((aligned(8)));
#define __BPF_FUNC_MAPPER(FN) \