union bpf_attr and structs bpf_xyz_info are used to pass config to the kernel or return info to userspace via the bpf syscall.
In order to avoid needing a separate 32-bit compat handler for the bpf syscall, __user pointers inside these union/structs are stored as __u64 or __aligned_u64.
In Morello PCuABI a user pointer is a 129-bit capability so __aligned_u64 type is not big enough to hold it. Use type __kernel_aligned_uintptr_t instead, which is big enough on the affected architectures while remaining __aligned_64 on others.
Use copy_from_user_with_ptr where blocks of memory containing pointers are being copied to preserve capabilities.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com --- drivers/media/rc/bpf-lirc.c | 2 +- include/linux/bpfptr.h | 6 +- include/uapi/linux/bpf.h | 94 +++++++++++---------- kernel/bpf/bpf_iter.c | 2 +- kernel/bpf/btf.c | 12 +-- kernel/bpf/cgroup.c | 5 +- kernel/bpf/hashtab.c | 8 +- kernel/bpf/net_namespace.c | 2 +- kernel/bpf/offload.c | 2 +- kernel/bpf/syscall.c | 163 +++++++++++++++++++++--------------- kernel/bpf/verifier.c | 2 +- kernel/trace/bpf_trace.c | 6 +- net/bpf/test_run.c | 16 ++-- net/core/sock_map.c | 2 +- 14 files changed, 177 insertions(+), 145 deletions(-)
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 9988232aeab5..c49d80a6dbef 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -295,7 +295,7 @@ int lirc_prog_detach(const union bpf_attr *attr)
int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { - __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + __u32 __user *prog_ids = (__u32 __user *)attr->query.prog_ids; struct bpf_prog_array *progs; struct rc_dev *rcdev; u32 cnt, flags = 0; diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h index 407e25d608eb..2aebf9b59035 100644 --- a/include/linux/bpfptr.h +++ b/include/linux/bpfptr.h @@ -23,12 +23,12 @@ static inline bpfptr_t USER_BPFPTR(void __user *p) return (bpfptr_t) { .user = p }; }
-static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel) +static inline bpfptr_t make_bpfptr(__kernel_uintptr_t ptr, bool is_kernel) { if (is_kernel) - return KERNEL_BPFPTR((void*) (uintptr_t) addr); + return KERNEL_BPFPTR((void *)(uintptr_t)ptr); else - return USER_BPFPTR(u64_to_user_ptr(addr)); + return USER_BPFPTR((void __user *)ptr); }
static inline bool bpfptr_is_null(bpfptr_t bpfptr) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 51b9aa640ad2..e95709cf4147 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1330,21 +1330,21 @@ union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ __u32 map_fd; - __aligned_u64 key; + __kernel_aligned_uintptr_t key; union { - __aligned_u64 value; - __aligned_u64 next_key; + __kernel_aligned_uintptr_t value; + __kernel_aligned_uintptr_t next_key; }; __u64 flags; };
struct { /* struct used by BPF_MAP_*_BATCH commands */ - __aligned_u64 in_batch; /* start batch, - * NULL to start from beginning - */ - __aligned_u64 out_batch; /* output: next start batch */ - __aligned_u64 keys; - __aligned_u64 values; + /* start batch, NULL to start from beginning */ + __kernel_aligned_uintptr_t in_batch; + /* output: next start batch */ + __kernel_aligned_uintptr_t out_batch; + __kernel_aligned_uintptr_t keys; + __kernel_aligned_uintptr_t values; __u32 count; /* input/output: * input: # of key/value * elements @@ -1358,11 +1358,11 @@ union bpf_attr { struct { /* anonymous struct used by BPF_PROG_LOAD command */ __u32 prog_type; /* one of enum bpf_prog_type */ __u32 insn_cnt; - __aligned_u64 insns; - __aligned_u64 license; + __kernel_aligned_uintptr_t insns; + __kernel_aligned_uintptr_t license; __u32 log_level; /* verbosity level of verifier */ __u32 log_size; /* size of user buffer */ - __aligned_u64 log_buf; /* user supplied buffer */ + __kernel_aligned_uintptr_t log_buf; /* user supplied buffer */ __u32 kern_version; /* not used */ __u32 prog_flags; char prog_name[BPF_OBJ_NAME_LEN]; @@ -1374,10 +1374,10 @@ union bpf_attr { __u32 expected_attach_type; __u32 prog_btf_fd; /* fd pointing to BTF type data */ __u32 func_info_rec_size; /* userspace bpf_func_info size */ - __aligned_u64 func_info; /* func info */ + __kernel_aligned_uintptr_t func_info; /* func info */ __u32 func_info_cnt; /* number of bpf_func_info records */ __u32 line_info_rec_size; /* userspace bpf_line_info size */ - __aligned_u64 line_info; /* line info */ + __kernel_aligned_uintptr_t line_info; /* line info */ __u32 line_info_cnt; /* number of bpf_line_info records */ __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ union { @@ -1387,13 +1387,13 @@ union bpf_attr { __u32 attach_btf_obj_fd; }; __u32 core_relo_cnt; /* number of bpf_core_relo */ - __aligned_u64 fd_array; /* array of FDs */ - __aligned_u64 core_relos; + __kernel_aligned_uintptr_t fd_array; /* array of FDs */ + __kernel_aligned_uintptr_t core_relos; __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ };
struct { /* anonymous struct used by BPF_OBJ_* commands */ - __aligned_u64 pathname; + __kernel_aligned_uintptr_t pathname; __u32 bpf_fd; __u32 file_flags; }; @@ -1417,8 +1417,8 @@ union bpf_attr { * returns ENOSPC if data_out * is too small. */ - __aligned_u64 data_in; - __aligned_u64 data_out; + __kernel_aligned_uintptr_t data_in; + __kernel_aligned_uintptr_t data_out; __u32 repeat; __u32 duration; __u32 ctx_size_in; /* input: len of ctx_in */ @@ -1426,8 +1426,8 @@ union bpf_attr { * returns ENOSPC if ctx_out * is too small. */ - __aligned_u64 ctx_in; - __aligned_u64 ctx_out; + __kernel_aligned_uintptr_t ctx_in; + __kernel_aligned_uintptr_t ctx_out; __u32 flags; __u32 cpu; __u32 batch_size; @@ -1448,7 +1448,7 @@ union bpf_attr { struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ __u32 bpf_fd; __u32 info_len; - __aligned_u64 info; + __kernel_aligned_uintptr_t info; } info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */ @@ -1456,22 +1456,22 @@ union bpf_attr { __u32 attach_type; __u32 query_flags; __u32 attach_flags; - __aligned_u64 prog_ids; + __kernel_aligned_uintptr_t prog_ids; __u32 prog_cnt; /* output: per-program attach_flags. * not allowed to be set during effective query. */ - __aligned_u64 prog_attach_flags; + __kernel_aligned_uintptr_t prog_attach_flags; } query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ - __u64 name; + __kernel_aligned_uintptr_t name; __u32 prog_fd; } raw_tracepoint;
struct { /* anonymous struct for BPF_BTF_LOAD */ - __aligned_u64 btf; - __aligned_u64 btf_log_buf; + __kernel_aligned_uintptr_t btf; + __kernel_aligned_uintptr_t btf_log_buf; __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; @@ -1482,7 +1482,7 @@ union bpf_attr { __u32 fd; /* input: fd */ __u32 flags; /* input: flags */ __u32 buf_len; /* input/output: buf len */ - __aligned_u64 buf; /* input/output: + __kernel_aligned_uintptr_t buf; /* input/output: * tp_name for tracepoint * symbol for kprobe * filename for uprobe @@ -1504,8 +1504,10 @@ union bpf_attr { union { __u32 target_btf_id; /* btf_id of target to attach to */ struct { - __aligned_u64 iter_info; /* extra bpf_iter_link_info */ - __u32 iter_info_len; /* iter_info length */ + /* extra bpf_iter_link_info */ + __kernel_aligned_uintptr_t iter_info; + /* iter_info length */ + __u32 iter_info_len; }; struct { /* black box user-provided value passed through @@ -1517,9 +1519,9 @@ union bpf_attr { struct { __u32 flags; __u32 cnt; - __aligned_u64 syms; - __aligned_u64 addrs; - __aligned_u64 cookies; + __kernel_aligned_uintptr_t syms; + __kernel_aligned_uintptr_t addrs; + __kernel_aligned_uintptr_t cookies; } kprobe_multi; struct { /* this is overlaid with the target_btf_id above. */ @@ -6168,12 +6170,12 @@ struct bpf_prog_info { __u8 tag[BPF_TAG_SIZE]; __u32 jited_prog_len; __u32 xlated_prog_len; - __aligned_u64 jited_prog_insns; - __aligned_u64 xlated_prog_insns; + __kernel_aligned_uintptr_t jited_prog_insns; + __kernel_aligned_uintptr_t xlated_prog_insns; __u64 load_time; /* ns since boottime */ __u32 created_by_uid; __u32 nr_map_ids; - __aligned_u64 map_ids; + __kernel_aligned_uintptr_t map_ids; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 gpl_compatible:1; @@ -6182,20 +6184,20 @@ struct bpf_prog_info { __u64 netns_ino; __u32 nr_jited_ksyms; __u32 nr_jited_func_lens; - __aligned_u64 jited_ksyms; - __aligned_u64 jited_func_lens; + __kernel_aligned_uintptr_t jited_ksyms; + __kernel_aligned_uintptr_t jited_func_lens; __u32 btf_id; __u32 func_info_rec_size; - __aligned_u64 func_info; + __kernel_aligned_uintptr_t func_info; __u32 nr_func_info; __u32 nr_line_info; - __aligned_u64 line_info; - __aligned_u64 jited_line_info; + __kernel_aligned_uintptr_t line_info; + __kernel_aligned_uintptr_t jited_line_info; __u32 nr_jited_line_info; __u32 line_info_rec_size; __u32 jited_line_info_rec_size; __u32 nr_prog_tags; - __aligned_u64 prog_tags; + __kernel_aligned_uintptr_t prog_tags; __u64 run_time_ns; __u64 run_cnt; __u64 recursion_misses; @@ -6224,10 +6226,10 @@ struct bpf_map_info { } __attribute__((aligned(8)));
struct bpf_btf_info { - __aligned_u64 btf; + __kernel_aligned_uintptr_t btf; __u32 btf_size; __u32 id; - __aligned_u64 name; + __kernel_aligned_uintptr_t name; __u32 name_len; __u32 kernel_btf; } __attribute__((aligned(8))); @@ -6238,7 +6240,7 @@ struct bpf_link_info { __u32 prog_id; union { struct { - __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ + __kernel_aligned_uintptr_t tp_name; /* in/out: tp_name buffer ptr */ __u32 tp_name_len; /* in/out: tp_name buffer len */ } raw_tracepoint; struct { @@ -6251,7 +6253,7 @@ struct bpf_link_info { __u32 attach_type; } cgroup; struct { - __aligned_u64 target_name; /* in/out: target_name buffer ptr */ + __kernel_aligned_uintptr_t target_name; /* in/out: target_name buffer ptr */ __u32 target_name_len; /* in/out: target_name buffer len */
/* If the iter specific field is 32 bits, it can be put diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 5dc307bdeaeb..fc3d2b440a16 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -456,7 +456,7 @@ static int bpf_iter_link_fill_link_info(const struct bpf_link *link, { struct bpf_iter_link *iter_link = container_of(link, struct bpf_iter_link, link); - char __user *ubuf = u64_to_user_ptr(info->iter.target_name); + char __user *ubuf = (char __user *)info->iter.target_name; bpf_iter_fill_link_info_t fill_link_info; u32 ulen = info->iter.target_name_len; const char *target_name; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 25cb5231f4df..f7da1077b488 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6897,7 +6897,7 @@ int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr)
btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel), attr->btf_size, attr->btf_log_level, - u64_to_user_ptr(attr->btf_log_buf), + (char __user *)attr->btf_log_buf, attr->btf_log_size); if (IS_ERR(btf)) return PTR_ERR(btf); @@ -6947,10 +6947,10 @@ struct btf *btf_get_by_fd(int fd) void convert_compat_btf_info_in(struct bpf_btf_info *dest, struct compat_bpf_btf_info *cinfo) { - dest->btf = cinfo->btf; + dest->btf = (__kernel_aligned_uintptr_t)compat_ptr(cinfo->btf); dest->btf_size = cinfo->btf_size; dest->id = cinfo->id; - dest->name = cinfo->name; + dest->name = (__kernel_aligned_uintptr_t)compat_ptr(cinfo->name); dest->name_len = cinfo->name_len; dest->kernel_btf = cinfo->kernel_btf; } @@ -7010,7 +7010,7 @@ int btf_get_info_by_fd(const struct btf *btf, } else #endif { - uinfo = u64_to_user_ptr(attr->info.info); + uinfo = (struct bpf_btf_info __user *)attr->info.info; uinfo_len = attr->info.info_len;
ret = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), @@ -7029,7 +7029,7 @@ int btf_get_info_by_fd(const struct btf *btf, * anything in compat64 case */ info.id = btf->id; - ubtf = u64_to_user_ptr(info.btf); + ubtf = (void __user *)info.btf; btf_copy = min_t(u32, btf->data_size, info.btf_size); if (copy_to_user(ubtf, btf->data, btf_copy)) return -EFAULT; @@ -7037,7 +7037,7 @@ int btf_get_info_by_fd(const struct btf *btf,
info.kernel_btf = btf->kernel_btf;
- uname = u64_to_user_ptr(info.name); + uname = (char __user *)info.name; uname_len = info.name_len; if (!uname ^ !uname_len) return -EINVAL; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 5f9d13848986..5345898af631 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1020,9 +1020,10 @@ static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr) { - __u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags); + __u32 __user *prog_attach_flags = + (__u32 __user *)attr->query.prog_attach_flags; bool effective_query = attr->query.query_flags & BPF_F_QUERY_EFFECTIVE; - __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + __u32 __user *prog_ids = (__u32 __user *)attr->query.prog_ids; enum bpf_attach_type type = attr->query.attach_type; enum cgroup_bpf_attach_type from_atype, to_atype; enum cgroup_bpf_attach_type atype; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 5cf4ddc2a433..0b187a352483 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1660,9 +1660,9 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, struct bpf_htab *htab = container_of(map, struct bpf_htab, map); u32 bucket_cnt, total, key_size, value_size, roundup_key_size; void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val; - void __user *uvalues = u64_to_user_ptr(attr->batch.values); - void __user *ukeys = u64_to_user_ptr(attr->batch.keys); - void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); + void __user *uvalues = (void __user *)attr->batch.values; + void __user *ukeys = (void __user *)attr->batch.keys; + void __user *ubatch = (void __user *)attr->batch.in_batch; u32 batch, max_count, size, bucket_size, map_id; struct htab_elem *node_to_free = NULL; u64 elem_map_flags, map_flags; @@ -1866,7 +1866,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, goto out;
/* copy # of entries and next batch */ - ubatch = u64_to_user_ptr(attr->batch.out_batch); + ubatch = (void __user *)attr->batch.out_batch; if (copy_to_user(ubatch, &batch, sizeof(batch)) || PUT_USER_UATTR(total, batch.count)) ret = -EFAULT; diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 3bfc97b37774..4a527ae97c2d 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -249,7 +249,7 @@ static int __netns_bpf_prog_query(const union bpf_attr *attr, struct net *net, enum netns_bpf_attach_type type) { - __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + __u32 __user *prog_ids = (__u32 __user *)attr->query.prog_ids; struct bpf_prog_array *run_array; u32 prog_cnt = 0, flags = 0;
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 13e4efc971e6..cdca154ffea7 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -322,7 +322,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info, ulen = info->jited_prog_len; info->jited_prog_len = aux->offload->jited_len; if (info->jited_prog_len && ulen) { - uinsns = u64_to_user_ptr(info->jited_prog_insns); + uinsns = (char __user *)info->jited_prog_insns; ulen = min_t(u32, info->jited_prog_len, ulen); if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) { up_read(&bpf_devs_lock); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 6fc61b71d1ce..b45855ac7c50 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1312,8 +1312,8 @@ static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
static int map_lookup_elem(union bpf_attr *attr) { - void __user *ukey = u64_to_user_ptr(attr->key); - void __user *uvalue = u64_to_user_ptr(attr->value); + void __user *ukey = (void __user *)attr->key; + void __user *uvalue = (void __user *)attr->value; int ufd = attr->map_fd; struct bpf_map *map; void *key, *value; @@ -1489,8 +1489,8 @@ static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
static int map_get_next_key(union bpf_attr *attr) { - void __user *ukey = u64_to_user_ptr(attr->key); - void __user *unext_key = u64_to_user_ptr(attr->next_key); + void __user *ukey = (void __user *)attr->key; + void __user *unext_key = (void __user *)attr->next_key; int ufd = attr->map_fd; struct bpf_map *map; void *key, *next_key; @@ -1552,7 +1552,7 @@ int generic_map_delete_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr) { - void __user *keys = u64_to_user_ptr(attr->batch.keys); + void __user *keys = (void __user *)attr->batch.keys; u32 cp, max_count; int err = 0; void *key; @@ -1606,8 +1606,8 @@ int generic_map_update_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr) { - void __user *values = u64_to_user_ptr(attr->batch.values); - void __user *keys = u64_to_user_ptr(attr->batch.keys); + void __user *values = (void __user *)attr->batch.values; + void __user *keys = (void __user *)attr->batch.keys; u32 value_size, cp, max_count; int ufd = attr->batch.map_fd; void *key, *value; @@ -1669,10 +1669,10 @@ int generic_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr) { - void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); - void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); - void __user *values = u64_to_user_ptr(attr->batch.values); - void __user *keys = u64_to_user_ptr(attr->batch.keys); + void __user *uobatch = (void __user *)attr->batch.out_batch; + void __user *ubatch = (void __user *)attr->batch.in_batch; + void __user *values = (void __user *)attr->batch.values; + void __user *keys = (void __user *)attr->batch.keys; void *buf, *buf_prevkey, *prev_key, *key, *value; int err, retry = MAP_LOOKUP_RETRIES; u32 value_size, cp, max_count; @@ -1769,8 +1769,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
static int map_lookup_and_delete_elem(union bpf_attr *attr) { - void __user *ukey = u64_to_user_ptr(attr->key); - void __user *uvalue = u64_to_user_ptr(attr->value); + void __user *ukey = (void __user *)attr->key; + void __user *uvalue = (void __user *)attr->value; int ufd = attr->map_fd; struct bpf_map *map; void *key, *value; @@ -2645,7 +2645,7 @@ static int bpf_obj_pin(const union bpf_attr *attr) if (attr->file_flags != 0) return -EINVAL;
- return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); + return bpf_obj_pin_user(attr->bpf_fd, (void __user *)attr->pathname); }
static int bpf_obj_get(const union bpf_attr *attr) @@ -2653,7 +2653,7 @@ static int bpf_obj_get(const union bpf_attr *attr) if (attr->bpf_fd != 0 || attr->file_flags & ~BPF_OBJ_FLAG_MASK) return -EINVAL;
- return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), + return bpf_obj_get_user((void __user *)attr->pathname, attr->file_flags); }
@@ -3148,7 +3148,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, { struct bpf_raw_tp_link *raw_tp_link = container_of(link, struct bpf_raw_tp_link, link); - char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); + char __user *ubuf = (char __user *)info->raw_tracepoint.tp_name; const char *tp_name = raw_tp_link->btp->tp->name; u32 ulen = info->raw_tracepoint.tp_name_len; size_t tp_len = strlen(tp_name); @@ -3341,7 +3341,8 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) if (IS_ERR(prog)) return PTR_ERR(prog);
- fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name)); + fd = bpf_raw_tp_link_attach(prog, + (const char __user *)attr->raw_tracepoint.name); if (fd < 0) bpf_prog_put(prog); return fd; @@ -3864,12 +3865,15 @@ void convert_compat_prog_info_in(struct bpf_prog_info *dest, strncpy((char *)dest->tag, (char *)cinfo->tag, BPF_TAG_SIZE); dest->jited_prog_len = cinfo->jited_prog_len; dest->xlated_prog_len = cinfo->xlated_prog_len; - dest->jited_prog_insns = cinfo->jited_prog_insns; - dest->xlated_prog_insns = cinfo->xlated_prog_insns; + dest->jited_prog_insns = (__kernel_aligned_uintptr_t) + compat_ptr(cinfo->jited_prog_insns); + dest->xlated_prog_insns = (__kernel_aligned_uintptr_t) + compat_ptr(cinfo->xlated_prog_insns); dest->load_time = cinfo->load_time; dest->created_by_uid = cinfo->created_by_uid; dest->nr_map_ids = cinfo->nr_map_ids; - dest->map_ids = cinfo->map_ids; + dest->map_ids = (__kernel_aligned_uintptr_t) + compat_ptr(cinfo->map_ids); strncpy((char *)dest->name, (char *)cinfo->name, BPF_OBJ_NAME_LEN); dest->ifindex = cinfo->ifindex; dest->gpl_compatible = cinfo->gpl_compatible; @@ -3877,20 +3881,26 @@ void convert_compat_prog_info_in(struct bpf_prog_info *dest, dest->netns_ino = cinfo->netns_ino; dest->nr_jited_ksyms = cinfo->nr_jited_ksyms; dest->nr_jited_func_lens = cinfo->nr_jited_func_lens; - dest->jited_ksyms = cinfo->jited_ksyms; - dest->jited_func_lens = cinfo->jited_func_lens; + dest->jited_ksyms = (__kernel_aligned_uintptr_t) + compat_ptr(cinfo->jited_ksyms); + dest->jited_func_lens = (__kernel_aligned_uintptr_t) + compat_ptr(cinfo->jited_func_lens); dest->btf_id = cinfo->btf_id; dest->func_info_rec_size = cinfo->func_info_rec_size; - dest->func_info = cinfo->func_info; + dest->func_info = (__kernel_aligned_uintptr_t) + compat_ptr(cinfo->func_info); dest->nr_func_info = cinfo->nr_func_info; dest->nr_line_info = cinfo->nr_line_info; - dest->line_info = cinfo->line_info; - dest->jited_line_info = cinfo->jited_line_info; + dest->line_info = (__kernel_aligned_uintptr_t) + compat_ptr(cinfo->line_info); + dest->jited_line_info = (__kernel_aligned_uintptr_t) + compat_ptr(cinfo->jited_line_info); dest->nr_jited_line_info = cinfo->nr_jited_line_info; dest->line_info_rec_size = cinfo->line_info_rec_size; dest->jited_line_info_rec_size = cinfo->jited_line_info_rec_size; dest->nr_prog_tags = cinfo->nr_prog_tags; - dest->prog_tags = cinfo->prog_tags; + dest->prog_tags = (__kernel_aligned_uintptr_t) + compat_ptr(cinfo->prog_tags); dest->run_time_ns = cinfo->run_time_ns; dest->run_cnt = cinfo->run_cnt; dest->recursion_misses = cinfo->recursion_misses; @@ -3988,7 +3998,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, } else #endif { - uinfo = u64_to_user_ptr(attr->info.info); + uinfo = (struct bpf_prog_info __user *)attr->info.info; info_len = attr->info.info_len;
err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), @@ -4017,7 +4027,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, info.nr_map_ids = prog->aux->used_map_cnt; ulen = min_t(u32, info.nr_map_ids, ulen); if (ulen) { - u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); + u32 __user *user_map_ids = (u32 __user *)info.map_ids; u32 i;
for (i = 0; i < ulen; i++) @@ -4064,7 +4074,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); if (!insns_sanitized) return -ENOMEM; - uinsns = u64_to_user_ptr(info.xlated_prog_insns); + uinsns = (char __user *)info.xlated_prog_insns; ulen = min_t(u32, info.xlated_prog_len, ulen); fault = copy_to_user(uinsns, insns_sanitized, ulen); kfree(insns_sanitized); @@ -4096,7 +4106,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
if (info.jited_prog_len && ulen) { if (bpf_dump_raw_ok(file->f_cred)) { - uinsns = u64_to_user_ptr(info.jited_prog_insns); + uinsns = (char __user *)info.jited_prog_insns; ulen = min_t(u32, info.jited_prog_len, ulen);
/* for multi-function programs, copy the JITed @@ -4139,7 +4149,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, * corresponding to each function */ ulen = min_t(u32, info.nr_jited_ksyms, ulen); - user_ksyms = u64_to_user_ptr(info.jited_ksyms); + user_ksyms = (u64 __user *)info.jited_ksyms; if (prog->aux->func_cnt) { for (i = 0; i < ulen; i++) { ksym_addr = (unsigned long) @@ -4167,7 +4177,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
/* copy the JITed image lengths for each function */ ulen = min_t(u32, info.nr_jited_func_lens, ulen); - user_lens = u64_to_user_ptr(info.jited_func_lens); + user_lens = (u32 __user *)info.jited_func_lens; if (prog->aux->func_cnt) { for (i = 0; i < ulen; i++) { func_len = @@ -4196,7 +4206,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, if (info.nr_func_info && ulen) { char __user *user_finfo;
- user_finfo = u64_to_user_ptr(info.func_info); + user_finfo = (char __user *)info.func_info; ulen = min_t(u32, info.nr_func_info, ulen); if (copy_to_user(user_finfo, prog->aux->func_info, info.func_info_rec_size * ulen)) @@ -4208,7 +4218,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, if (info.nr_line_info && ulen) { __u8 __user *user_linfo;
- user_linfo = u64_to_user_ptr(info.line_info); + user_linfo = (__u8 __user *)info.line_info; ulen = min_t(u32, info.nr_line_info, ulen); if (copy_to_user(user_linfo, prog->aux->linfo, info.line_info_rec_size * ulen)) @@ -4226,7 +4236,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, __u64 __user *user_linfo; u32 i;
- user_linfo = u64_to_user_ptr(info.jited_line_info); + user_linfo = (__u64 __user *)info.jited_line_info; ulen = min_t(u32, info.nr_jited_line_info, ulen); for (i = 0; i < ulen; i++) { line_addr = (unsigned long)prog->aux->jited_linfo[i]; @@ -4241,20 +4251,20 @@ static int bpf_prog_get_info_by_fd(struct file *file, ulen = info.nr_prog_tags; info.nr_prog_tags = prog->aux->func_cnt ? : 1; if (ulen) { - __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; + __u8 __user *user_prog_tags; u32 i;
- user_prog_tags = u64_to_user_ptr(info.prog_tags); + user_prog_tags = (__u8 __user *)info.prog_tags; ulen = min_t(u32, info.nr_prog_tags, ulen); if (prog->aux->func_cnt) { for (i = 0; i < ulen; i++) { - if (copy_to_user(user_prog_tags[i], + if (copy_to_user(user_prog_tags+i, prog->aux->func[i]->tag, BPF_TAG_SIZE)) return -EFAULT; } } else { - if (copy_to_user(user_prog_tags[0], + if (copy_to_user(user_prog_tags, prog->tag, BPF_TAG_SIZE)) return -EFAULT; } @@ -4336,7 +4346,7 @@ static int bpf_map_get_info_by_fd(struct file *file, } else #endif { - uinfo = u64_to_user_ptr(attr->info.info); + uinfo = (struct bpf_map_info __user *)attr->info.info; info_len = attr->info.info_len;
err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), @@ -4404,14 +4414,14 @@ void convert_compat_link_info_in(struct bpf_link_info *dest, * rest do not need conversion in */ if (type == BPF_LINK_TYPE_RAW_TRACEPOINT) { - dest->raw_tracepoint.tp_name = - cinfo->raw_tracepoint.tp_name; - dest->raw_tracepoint.tp_name_len = - cinfo->raw_tracepoint.tp_name_len; + dest->raw_tracepoint.tp_name = (__kernel_aligned_uintptr_t) + compat_ptr(cinfo->raw_tracepoint.tp_name); + dest->raw_tracepoint.tp_name_len = cinfo->raw_tracepoint.tp_name_len; return; } if (type == BPF_LINK_TYPE_ITER) { - dest->iter.target_name = cinfo->iter.target_name; + dest->iter.target_name = (__kernel_aligned_uintptr_t) + compat_ptr(cinfo->iter.target_name); dest->iter.target_name_len = cinfo->iter.target_name_len; return; } @@ -4536,7 +4546,7 @@ static int bpf_link_get_info_by_fd(struct file *file, } else #endif { - uinfo = u64_to_user_ptr(attr->info.info); + uinfo = (struct bpf_link_info __user *)attr->info.info; info_len = attr->info.info_len;
err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), @@ -4635,7 +4645,7 @@ static int bpf_task_fd_query_copy(const union bpf_attr *attr, const char *buf, u64 probe_offset, u64 probe_addr) { - char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); + char __user *ubuf = (char __user *)attr->task_fd_query.buf; u32 len = buf ? strlen(buf) : 0, input_len; int err = 0;
@@ -5399,19 +5409,25 @@ static void convert_compat_bpf_attr(union bpf_attr *dest, const union compat_bpf case BPF_MAP_DELETE_ELEM: case BPF_MAP_LOOKUP_AND_DELETE_ELEM: dest->map_fd = cattr->map_fd; - dest->key = cattr->key; - dest->value = cattr->value; - /* u64 next_key is in a union with u64 value */ + dest->key = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->key); + dest->value = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->value); + /* next_key is in a union with value */ dest->flags = cattr->flags; break; case BPF_MAP_LOOKUP_BATCH: case BPF_MAP_LOOKUP_AND_DELETE_BATCH: case BPF_MAP_UPDATE_BATCH: case BPF_MAP_DELETE_BATCH: - dest->batch.in_batch = cattr->batch.in_batch; - dest->batch.out_batch = cattr->batch.out_batch; - dest->batch.keys = cattr->batch.keys; - dest->batch.values = cattr->batch.values; + dest->batch.in_batch = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->batch.in_batch); + dest->batch.out_batch = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->batch.out_batch); + dest->batch.keys = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->batch.keys); + dest->batch.values = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->batch.values); dest->batch.count = cattr->batch.count; dest->batch.map_fd = cattr->batch.map_fd; dest->batch.elem_flags = cattr->batch.elem_flags; @@ -5420,11 +5436,14 @@ static void convert_compat_bpf_attr(union bpf_attr *dest, const union compat_bpf case BPF_PROG_LOAD: dest->prog_type = cattr->prog_type; dest->insn_cnt = cattr->insn_cnt; - dest->insns = cattr->insns; - dest->license = cattr->license; + dest->insns = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->insns); + dest->license = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->license); dest->log_level = cattr->log_level; dest->log_size = cattr->log_size; - dest->log_buf = cattr->log_buf; + dest->log_buf = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->log_buf); dest->kern_version = cattr->kern_version; dest->prog_flags = cattr->prog_flags; strncpy(dest->prog_name, cattr->prog_name, BPF_OBJ_NAME_LEN); @@ -5432,22 +5451,27 @@ static void convert_compat_bpf_attr(union bpf_attr *dest, const union compat_bpf dest->expected_attach_type = cattr->expected_attach_type; dest->prog_btf_fd = cattr->prog_btf_fd; dest->func_info_rec_size = cattr->func_info_rec_size; - dest->func_info = cattr->func_info; + dest->func_info = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->func_info); dest->func_info_cnt = cattr->func_info_cnt; dest->line_info_rec_size = cattr->line_info_rec_size; - dest->line_info = cattr->line_info; + dest->line_info = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->line_info); dest->line_info_cnt = cattr->line_info_cnt; dest->attach_btf_id = cattr->attach_btf_id; dest->attach_prog_fd = cattr->attach_prog_fd; /* u32 attach_btf_obj_fd is in a union with u32 attach_prog_fd */ dest->core_relo_cnt = cattr->core_relo_cnt; - dest->fd_array = cattr->fd_array; - dest->core_relos = cattr->core_relos; + dest->fd_array = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->fd_array); + dest->core_relos = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->core_relos); dest->core_relo_rec_size = cattr->core_relo_rec_size; break; case BPF_OBJ_PIN: case BPF_OBJ_GET: - dest->pathname = cattr->pathname; + dest->pathname = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->pathname); dest->bpf_fd = cattr->bpf_fd; dest->file_flags = cattr->file_flags; break; @@ -5464,14 +5488,18 @@ static void convert_compat_bpf_attr(union bpf_attr *dest, const union compat_bpf dest->test.retval = cattr->test.retval; dest->test.data_size_in = cattr->test.data_size_in; dest->test.data_size_out = cattr->test.data_size_out; - dest->test.data_in = cattr->test.data_in; - dest->test.data_out = cattr->test.data_out; + dest->test.data_in = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->test.data_in); + dest->test.data_out = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->test.data_out); dest->test.repeat = cattr->test.repeat; dest->test.duration = cattr->test.duration; dest->test.ctx_size_in = cattr->test.ctx_size_in; dest->test.ctx_size_out = cattr->test.ctx_size_out; - dest->test.ctx_in = cattr->test.ctx_in; - dest->test.ctx_out = cattr->test.ctx_out; + dest->test.ctx_in = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->test.ctx_in); + dest->test.ctx_out = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->test.ctx_out); dest->test.flags = cattr->test.flags; dest->test.cpu = cattr->test.cpu; dest->test.batch_size = cattr->test.batch_size; @@ -5493,7 +5521,8 @@ static void convert_compat_bpf_attr(union bpf_attr *dest, const union compat_bpf case BPF_OBJ_GET_INFO_BY_FD: dest->info.bpf_fd = cattr->info.bpf_fd; dest->info.info_len = cattr->info.info_len; - dest->info.info = cattr->info.info; + dest->info.info = + (__kernel_aligned_uintptr_t)compat_ptr(cattr->info.info); break; case BPF_PROG_QUERY: dest->query.target_fd = cattr->query.target_fd; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 264b3dc714cc..d513cd5f6cb8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -15213,7 +15213,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) * and supplied buffer to store the verification trace */ log->level = attr->log_level; - log->ubuf = (char __user *) (unsigned long) attr->log_buf; + log->ubuf = (char __user *)attr->log_buf; log->len_total = attr->log_size;
/* log attributes have to be sane */ diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 1ed08967fb97..526c8ae45f5d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2686,8 +2686,8 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr if (flags & ~BPF_F_KPROBE_MULTI_RETURN) return -EINVAL;
- uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); - usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); + uaddrs = (void __user *)attr->link_create.kprobe_multi.addrs; + usyms = (void __user *)attr->link_create.kprobe_multi.syms; if (!!uaddrs == !!usyms) return -EINVAL;
@@ -2700,7 +2700,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr if (!addrs) return -ENOMEM;
- ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); + ucookies = (void __user *)attr->link_create.kprobe_multi.cookies; if (ucookies) { cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); if (!cookies) { diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 58a5d02c5a8e..92ded1c16d93 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -416,7 +416,7 @@ static int bpf_test_finish(const union bpf_attr *kattr, struct skb_shared_info *sinfo, u32 size, u32 retval, u32 duration) { - void __user *data_out = u64_to_user_ptr(kattr->test.data_out); + void __user *data_out = (void __user *)kattr->test.data_out; int err = -EFAULT; u32 copy_size = size;
@@ -766,7 +766,7 @@ BTF_SET8_END(test_sk_check_kfunc_ids) static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, u32 size, u32 headroom, u32 tailroom) { - void __user *data_in = u64_to_user_ptr(kattr->test.data_in); + void __user *data_in = (void __user *)kattr->test.data_in; void *data;
if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) @@ -852,7 +852,7 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { - void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); + void __user *ctx_in = (void __user *)kattr->test.ctx_in; __u32 ctx_size_in = kattr->test.ctx_size_in; struct bpf_raw_tp_test_run_info info; int cpu = kattr->test.cpu, err = 0; @@ -907,8 +907,8 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) { - void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); - void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); + void __user *data_in = (void __user *)kattr->test.ctx_in; + void __user *data_out = (void __user *)kattr->test.ctx_out; u32 size = kattr->test.ctx_size_in; void *data; int err; @@ -940,7 +940,7 @@ static int bpf_ctx_finish(const union bpf_attr *kattr, union bpf_attr __user *uattr, const void *data, u32 size) { - void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); + void __user *data_out = (void __user *)kattr->test.ctx_out; int err = -EFAULT; u32 copy_size = size;
@@ -1347,7 +1347,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, goto free_data;
if (unlikely(kattr->test.data_size_in > size)) { - void __user *data_in = u64_to_user_ptr(kattr->test.data_in); + void __user *data_in = (void __user *)kattr->test.data_in;
while (size < kattr->test.data_size_in) { struct page *page; @@ -1605,7 +1605,7 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { - void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); + void __user *ctx_in = (void __user *)kattr->test.ctx_in; __u32 ctx_size_in = kattr->test.ctx_size_in; void *ctx = NULL; u32 retval; diff --git a/net/core/sock_map.c b/net/core/sock_map.c index e476159a53c7..718f3621ad9f 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -1487,7 +1487,7 @@ static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, int sock_map_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { - __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + __u32 __user *prog_ids = (__u32 __user *)attr->query.prog_ids; u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd; struct bpf_prog **pprog; struct bpf_prog *prog;