union bpf_attr and structs bpf_xyz_info are used to pass config to the kernel or return info to userspace via the bpf syscall.
In order to avoid needing a separate 32-bit compat handler for the bpf syscall, __user pointers inside these union/structs are stored as __u64 or __aligned_u64.
In Morello PCuABI a user pointer is a 129-bit capability so __aligned_u64 type is not big enough to hold it. Use type __kernel_aligned_uintptr_t instead, which is big enough on the affected architectures while remaining __aligned_64 on others.
Use copy_{to,from}_user_with_ptr variants where blocks of memory containing pointers are being copied to preserve capabilities.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com --- drivers/media/rc/bpf-lirc.c | 2 +- include/linux/bpf_compat.h | 3 + include/linux/bpfptr.h | 6 +- include/uapi/linux/bpf.h | 94 +++++++++++----------- kernel/bpf/bpf_iter.c | 2 +- kernel/bpf/btf.c | 16 ++-- kernel/bpf/cgroup.c | 5 +- kernel/bpf/hashtab.c | 8 +- kernel/bpf/net_namespace.c | 2 +- kernel/bpf/offload.c | 2 +- kernel/bpf/syscall.c | 138 +++++++++++++++++---------------- kernel/bpf/verifier.c | 3 +- kernel/trace/bpf_trace.c | 6 +- net/bpf/bpf_dummy_struct_ops.c | 6 +- net/bpf/test_run.c | 16 ++-- net/core/sock_map.c | 2 +- 16 files changed, 161 insertions(+), 150 deletions(-)
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index f419d7452295..093ec66dbedd 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -295,7 +295,7 @@ int lirc_prog_detach(const union bpf_attr *attr)
int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { - __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + __u32 __user *prog_ids = (__u32 __user *)attr->query.prog_ids; struct bpf_prog_array *progs; struct rc_dev *rcdev; u32 cnt, flags = 0; diff --git a/include/linux/bpf_compat.h b/include/linux/bpf_compat.h index 9e16f7f5eda6..0c1c299deebf 100644 --- a/include/linux/bpf_compat.h +++ b/include/linux/bpf_compat.h @@ -1,6 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2023 Arm Ltd */
+#define bpf_uattr_compat_ptr(DEST, SRC, FIELD) \ + ((DEST)->FIELD = (__kernel_aligned_uintptr_t)compat_ptr((SRC)->FIELD)) + #define bpf_copy_from_user_with_ptr(dest, src, size) \ (in_compat64_syscall() ? copy_from_user(dest, src, size) \ : copy_from_user_with_ptr(dest, src, size)) diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h index f6828c5f7858..485deaf96dcb 100644 --- a/include/linux/bpfptr.h +++ b/include/linux/bpfptr.h @@ -31,12 +31,12 @@ static inline bpfptr_t USER_BPFPTR(void __user *p) return (bpfptr_t) { .user = p }; }
-static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel) +static inline bpfptr_t make_bpfptr(__kernel_uintptr_t ptr, bool is_kernel) { if (is_kernel) - return KERNEL_BPFPTR((void*) (uintptr_t) addr); + return KERNEL_BPFPTR((void *)(uintptr_t)ptr); else - return USER_BPFPTR(u64_to_user_ptr(addr)); + return USER_BPFPTR((void __user *)ptr); }
static inline bool bpfptr_is_null(bpfptr_t bpfptr) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index c994ff5b157c..a536d8f8805e 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1350,21 +1350,21 @@ union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ __u32 map_fd; - __aligned_u64 key; + __kernel_aligned_uintptr_t key; union { - __aligned_u64 value; - __aligned_u64 next_key; + __kernel_aligned_uintptr_t value; + __kernel_aligned_uintptr_t next_key; }; __u64 flags; };
struct { /* struct used by BPF_MAP_*_BATCH commands */ - __aligned_u64 in_batch; /* start batch, - * NULL to start from beginning - */ - __aligned_u64 out_batch; /* output: next start batch */ - __aligned_u64 keys; - __aligned_u64 values; + /* start batch, NULL to start from beginning */ + __kernel_aligned_uintptr_t in_batch; + /* output: next start batch */ + __kernel_aligned_uintptr_t out_batch; + __kernel_aligned_uintptr_t keys; + __kernel_aligned_uintptr_t values; __u32 count; /* input/output: * input: # of key/value * elements @@ -1378,11 +1378,11 @@ union bpf_attr { struct { /* anonymous struct used by BPF_PROG_LOAD command */ __u32 prog_type; /* one of enum bpf_prog_type */ __u32 insn_cnt; - __aligned_u64 insns; - __aligned_u64 license; + __kernel_aligned_uintptr_t insns; + __kernel_aligned_uintptr_t license; __u32 log_level; /* verbosity level of verifier */ __u32 log_size; /* size of user buffer */ - __aligned_u64 log_buf; /* user supplied buffer */ + __kernel_aligned_uintptr_t log_buf; /* user supplied buffer */ __u32 kern_version; /* not used */ __u32 prog_flags; char prog_name[BPF_OBJ_NAME_LEN]; @@ -1394,10 +1394,10 @@ union bpf_attr { __u32 expected_attach_type; __u32 prog_btf_fd; /* fd pointing to BTF type data */ __u32 func_info_rec_size; /* userspace bpf_func_info size */ - __aligned_u64 func_info; /* func info */ + __kernel_aligned_uintptr_t func_info; /* func info */ __u32 func_info_cnt; /* number of bpf_func_info records */ __u32 line_info_rec_size; /* userspace bpf_line_info size */ - __aligned_u64 line_info; /* line info */ + __kernel_aligned_uintptr_t line_info; /* line info */ __u32 line_info_cnt; /* number of bpf_line_info records */ __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ union { @@ -1407,8 +1407,8 @@ union bpf_attr { __u32 attach_btf_obj_fd; }; __u32 core_relo_cnt; /* number of bpf_core_relo */ - __aligned_u64 fd_array; /* array of FDs */ - __aligned_u64 core_relos; + __kernel_aligned_uintptr_t fd_array; /* array of FDs */ + __kernel_aligned_uintptr_t core_relos; __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ /* output: actual total log contents size (including termintaing zero). * It could be both larger than original log_size (if log was @@ -1418,7 +1418,7 @@ union bpf_attr { };
struct { /* anonymous struct used by BPF_OBJ_* commands */ - __aligned_u64 pathname; + __kernel_aligned_uintptr_t pathname; __u32 bpf_fd; __u32 file_flags; }; @@ -1442,8 +1442,8 @@ union bpf_attr { * returns ENOSPC if data_out * is too small. */ - __aligned_u64 data_in; - __aligned_u64 data_out; + __kernel_aligned_uintptr_t data_in; + __kernel_aligned_uintptr_t data_out; __u32 repeat; __u32 duration; __u32 ctx_size_in; /* input: len of ctx_in */ @@ -1451,8 +1451,8 @@ union bpf_attr { * returns ENOSPC if ctx_out * is too small. */ - __aligned_u64 ctx_in; - __aligned_u64 ctx_out; + __kernel_aligned_uintptr_t ctx_in; + __kernel_aligned_uintptr_t ctx_out; __u32 flags; __u32 cpu; __u32 batch_size; @@ -1473,7 +1473,7 @@ union bpf_attr { struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ __u32 bpf_fd; __u32 info_len; - __aligned_u64 info; + __kernel_aligned_uintptr_t info; } info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */ @@ -1481,22 +1481,22 @@ union bpf_attr { __u32 attach_type; __u32 query_flags; __u32 attach_flags; - __aligned_u64 prog_ids; + __kernel_aligned_uintptr_t prog_ids; __u32 prog_cnt; /* output: per-program attach_flags. * not allowed to be set during effective query. */ - __aligned_u64 prog_attach_flags; + __kernel_aligned_uintptr_t prog_attach_flags; } query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ - __u64 name; + __kernel_aligned_uintptr_t name; __u32 prog_fd; } raw_tracepoint;
struct { /* anonymous struct for BPF_BTF_LOAD */ - __aligned_u64 btf; - __aligned_u64 btf_log_buf; + __kernel_aligned_uintptr_t btf; + __kernel_aligned_uintptr_t btf_log_buf; __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; @@ -1512,7 +1512,7 @@ union bpf_attr { __u32 fd; /* input: fd */ __u32 flags; /* input: flags */ __u32 buf_len; /* input/output: buf len */ - __aligned_u64 buf; /* input/output: + __kernel_aligned_uintptr_t buf; /* input/output: * tp_name for tracepoint * symbol for kprobe * filename for uprobe @@ -1537,8 +1537,10 @@ union bpf_attr { union { __u32 target_btf_id; /* btf_id of target to attach to */ struct { - __aligned_u64 iter_info; /* extra bpf_iter_link_info */ - __u32 iter_info_len; /* iter_info length */ + /* extra bpf_iter_link_info */ + __kernel_aligned_uintptr_t iter_info; + /* iter_info length */ + __u32 iter_info_len; }; struct { /* black box user-provided value passed through @@ -1550,9 +1552,9 @@ union bpf_attr { struct { __u32 flags; __u32 cnt; - __aligned_u64 syms; - __aligned_u64 addrs; - __aligned_u64 cookies; + __kernel_aligned_uintptr_t syms; + __kernel_aligned_uintptr_t addrs; + __kernel_aligned_uintptr_t cookies; } kprobe_multi; struct { /* this is overlaid with the target_btf_id above. */ @@ -6303,12 +6305,12 @@ struct bpf_prog_info { __u8 tag[BPF_TAG_SIZE]; __u32 jited_prog_len; __u32 xlated_prog_len; - __aligned_u64 jited_prog_insns; - __aligned_u64 xlated_prog_insns; + __kernel_aligned_uintptr_t jited_prog_insns; + __kernel_aligned_uintptr_t xlated_prog_insns; __u64 load_time; /* ns since boottime */ __u32 created_by_uid; __u32 nr_map_ids; - __aligned_u64 map_ids; + __kernel_aligned_uintptr_t map_ids; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 gpl_compatible:1; @@ -6317,20 +6319,20 @@ struct bpf_prog_info { __u64 netns_ino; __u32 nr_jited_ksyms; __u32 nr_jited_func_lens; - __aligned_u64 jited_ksyms; - __aligned_u64 jited_func_lens; + __kernel_aligned_uintptr_t jited_ksyms; + __kernel_aligned_uintptr_t jited_func_lens; __u32 btf_id; __u32 func_info_rec_size; - __aligned_u64 func_info; + __kernel_aligned_uintptr_t func_info; __u32 nr_func_info; __u32 nr_line_info; - __aligned_u64 line_info; - __aligned_u64 jited_line_info; + __kernel_aligned_uintptr_t line_info; + __kernel_aligned_uintptr_t jited_line_info; __u32 nr_jited_line_info; __u32 line_info_rec_size; __u32 jited_line_info_rec_size; __u32 nr_prog_tags; - __aligned_u64 prog_tags; + __kernel_aligned_uintptr_t prog_tags; __u64 run_time_ns; __u64 run_cnt; __u64 recursion_misses; @@ -6359,10 +6361,10 @@ struct bpf_map_info { } __attribute__((aligned(8)));
struct bpf_btf_info { - __aligned_u64 btf; + __kernel_aligned_uintptr_t btf; __u32 btf_size; __u32 id; - __aligned_u64 name; + __kernel_aligned_uintptr_t name; __u32 name_len; __u32 kernel_btf; } __attribute__((aligned(8))); @@ -6373,7 +6375,7 @@ struct bpf_link_info { __u32 prog_id; union { struct { - __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ + __kernel_aligned_uintptr_t tp_name; /* in/out: tp_name buffer ptr */ __u32 tp_name_len; /* in/out: tp_name buffer len */ } raw_tracepoint; struct { @@ -6386,7 +6388,7 @@ struct bpf_link_info { __u32 attach_type; } cgroup; struct { - __aligned_u64 target_name; /* in/out: target_name buffer ptr */ + __kernel_aligned_uintptr_t target_name; /* in/out: target_name buffer ptr */ __u32 target_name_len; /* in/out: target_name buffer len */
/* If the iter specific field is 32 bits, it can be put diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 96856f130cbf..669efe46a5a6 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -456,7 +456,7 @@ static int bpf_iter_link_fill_link_info(const struct bpf_link *link, { struct bpf_iter_link *iter_link = container_of(link, struct bpf_iter_link, link); - char __user *ubuf = u64_to_user_ptr(info->iter.target_name); + char __user *ubuf = (char __user *)info->iter.target_name; bpf_iter_fill_link_info_t fill_link_info; u32 ulen = info->iter.target_name_len; const char *target_name; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index caeb98dcf3cf..d050fa1371b2 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5465,7 +5465,7 @@ static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_ static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) { bpfptr_t btf_data = make_bpfptr(attr->btf, uattr.is_kernel); - char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf); + char __user *log_ubuf = (char __user *)attr->btf_log_buf; struct btf_struct_metas *struct_meta_tab; struct btf_verifier_env *env = NULL; struct btf *btf = NULL; @@ -7218,10 +7218,10 @@ struct btf *btf_get_by_fd(int fd) static void convert_compat_btf_info_in(struct bpf_btf_info *dest, const struct compat_bpf_btf_info *cinfo) { - copy_field(dest, cinfo, btf); + bpf_uattr_compat_ptr(dest, cinfo, btf); copy_field(dest, cinfo, btf_size); copy_field(dest, cinfo, id); - copy_field(dest, cinfo, name); + bpf_uattr_compat_ptr(dest, cinfo, name); copy_field(dest, cinfo, name_len); copy_field(dest, cinfo, kernel_btf); } @@ -7246,7 +7246,7 @@ static int copy_bpf_btf_info_from_user(const union bpf_attr *attr, void *select_info = in_compat64_syscall() ? &cinfo : info; size_t info_size = in_compat64_syscall() ? sizeof(struct compat_bpf_btf_info) : sizeof(struct bpf_btf_info); - void __user *uinfo = u64_to_user_ptr(attr->info.info); + void __user *uinfo = (void __user *)attr->info.info; *info_len = attr->info.info_len;
err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), @@ -7256,7 +7256,7 @@ static int copy_bpf_btf_info_from_user(const union bpf_attr *attr, *info_len = min_t(u32, info_size, *info_len);
memset(info, 0, sizeof(struct bpf_btf_info)); - if (copy_from_user(select_info, uinfo, *info_len)) + if (bpf_copy_from_user_with_ptr(select_info, uinfo, *info_len)) return -EFAULT;
if (in_compat64_syscall()) @@ -7277,7 +7277,7 @@ static int copy_bpf_btf_info_to_user(const union bpf_attr *attr, if (in_compat64_syscall()) convert_compat_btf_info_out(&cinfo, info);
- if (copy_to_user(uinfo, select_info, *info_len) || + if (bpf_copy_to_user_with_ptr(uinfo, select_info, *info_len) || bpf_put_uattr(uattr, *info_len, info.info_len)) return -EFAULT;
@@ -7300,7 +7300,7 @@ int btf_get_info_by_fd(const struct btf *btf, return ret;
info.id = btf->id; - ubtf = u64_to_user_ptr(info.btf); + ubtf = (void __user *)info.btf; btf_copy = min_t(u32, btf->data_size, info.btf_size); if (copy_to_user(ubtf, btf->data, btf_copy)) return -EFAULT; @@ -7308,7 +7308,7 @@ int btf_get_info_by_fd(const struct btf *btf,
info.kernel_btf = btf->kernel_btf;
- uname = u64_to_user_ptr(info.name); + uname = (char __user *)info.name; uname_len = info.name_len; if (!uname ^ !uname_len) return -EINVAL; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index fa5e67fbbbca..1f00b75cf2f9 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1020,9 +1020,10 @@ static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr) { - __u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags); + __u32 __user *prog_attach_flags = + (__u32 __user *)attr->query.prog_attach_flags; bool effective_query = attr->query.query_flags & BPF_F_QUERY_EFFECTIVE; - __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + __u32 __user *prog_ids = (__u32 __user *)attr->query.prog_ids; enum bpf_attach_type type = attr->query.attach_type; enum cgroup_bpf_attach_type from_atype, to_atype; enum cgroup_bpf_attach_type atype; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index acd3561a1254..bab4b2cdd1e2 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1667,9 +1667,9 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, struct bpf_htab *htab = container_of(map, struct bpf_htab, map); u32 bucket_cnt, total, key_size, value_size, roundup_key_size; void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val; - void __user *uvalues = u64_to_user_ptr(attr->batch.values); - void __user *ukeys = u64_to_user_ptr(attr->batch.keys); - void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); + void __user *uvalues = (void __user *)attr->batch.values; + void __user *ukeys = (void __user *)attr->batch.keys; + void __user *ubatch = (void __user *)attr->batch.in_batch; u32 batch, max_count, size, bucket_size, map_id; struct htab_elem *node_to_free = NULL; u64 elem_map_flags, map_flags; @@ -1874,7 +1874,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, goto out;
/* copy # of entries and next batch */ - ubatch = u64_to_user_ptr(attr->batch.out_batch); + ubatch = (void __user *)attr->batch.out_batch; if (copy_to_user(ubatch, &batch, sizeof(batch)) || bpf_put_uattr(uattr, total, batch.count)) ret = -EFAULT; diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 29ae0e3fe5bd..7056cd18ee5f 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -249,7 +249,7 @@ static int __netns_bpf_prog_query(const union bpf_attr *attr, struct net *net, enum netns_bpf_attach_type type) { - __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + __u32 __user *prog_ids = (__u32 __user *)attr->query.prog_ids; struct bpf_prog_array *run_array; u32 prog_cnt = 0, flags = 0;
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 8a26cd8814c1..a6c0d674fdec 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -479,7 +479,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info, ulen = info->jited_prog_len; info->jited_prog_len = aux->offload->jited_len; if (info->jited_prog_len && ulen) { - uinsns = u64_to_user_ptr(info->jited_prog_insns); + uinsns = (char __user *)info->jited_prog_insns; ulen = min_t(u32, info->jited_prog_len, ulen); if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) { up_read(&bpf_devs_lock); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 89c202b69f6c..dfcc9a9c8dff 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1358,8 +1358,8 @@ static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
static int map_lookup_elem(union bpf_attr *attr) { - void __user *ukey = u64_to_user_ptr(attr->key); - void __user *uvalue = u64_to_user_ptr(attr->value); + void __user *ukey = (void __user *)attr->key; + void __user *uvalue = (void __user *)attr->value; int ufd = attr->map_fd; struct bpf_map *map; void *key, *value; @@ -1535,8 +1535,8 @@ static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
static int map_get_next_key(union bpf_attr *attr) { - void __user *ukey = u64_to_user_ptr(attr->key); - void __user *unext_key = u64_to_user_ptr(attr->next_key); + void __user *ukey = (void __user *)attr->key; + void __user *unext_key = (void __user *)attr->next_key; int ufd = attr->map_fd; struct bpf_map *map; void *key, *next_key; @@ -1598,7 +1598,7 @@ int generic_map_delete_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr) { - void __user *keys = u64_to_user_ptr(attr->batch.keys); + void __user *keys = (void __user *)attr->batch.keys; u32 cp, max_count; int err = 0; void *key; @@ -1652,8 +1652,8 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file, const union bpf_attr *attr, union bpf_attr __user *uattr) { - void __user *values = u64_to_user_ptr(attr->batch.values); - void __user *keys = u64_to_user_ptr(attr->batch.keys); + void __user *values = (void __user *)attr->batch.values; + void __user *keys = (void __user *)attr->batch.keys; u32 value_size, cp, max_count; void *key, *value; int err = 0; @@ -1711,10 +1711,10 @@ int generic_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr) { - void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); - void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); - void __user *values = u64_to_user_ptr(attr->batch.values); - void __user *keys = u64_to_user_ptr(attr->batch.keys); + void __user *uobatch = (void __user *)attr->batch.out_batch; + void __user *ubatch = (void __user *)attr->batch.in_batch; + void __user *values = (void __user *)attr->batch.values; + void __user *keys = (void __user *)attr->batch.keys; void *buf, *buf_prevkey, *prev_key, *key, *value; int err, retry = MAP_LOOKUP_RETRIES; u32 value_size, cp, max_count; @@ -1811,8 +1811,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
static int map_lookup_and_delete_elem(union bpf_attr *attr) { - void __user *ukey = u64_to_user_ptr(attr->key); - void __user *uvalue = u64_to_user_ptr(attr->value); + void __user *ukey = (void __user *)attr->key; + void __user *uvalue = (void __user *)attr->value; int ufd = attr->map_fd; struct bpf_map *map; void *key, *value; @@ -2689,7 +2689,7 @@ static int bpf_obj_pin(const union bpf_attr *attr) if (attr->file_flags != 0) return -EINVAL;
- return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); + return bpf_obj_pin_user(attr->bpf_fd, (void __user *)attr->pathname); }
static int bpf_obj_get(const union bpf_attr *attr) @@ -2697,7 +2697,7 @@ static int bpf_obj_get(const union bpf_attr *attr) if (attr->bpf_fd != 0 || attr->file_flags & ~BPF_OBJ_FLAG_MASK) return -EINVAL;
- return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), + return bpf_obj_get_user((void __user *)attr->pathname, attr->file_flags); }
@@ -3200,7 +3200,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, { struct bpf_raw_tp_link *raw_tp_link = container_of(link, struct bpf_raw_tp_link, link); - char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); + char __user *ubuf = (void __user *)info->raw_tracepoint.tp_name; const char *tp_name = raw_tp_link->btp->tp->name; u32 ulen = info->raw_tracepoint.tp_name_len; size_t tp_len = strlen(tp_name); @@ -3393,7 +3393,8 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) if (IS_ERR(prog)) return PTR_ERR(prog);
- fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name)); + fd = bpf_raw_tp_link_attach(prog, + (const char __user *)attr->raw_tracepoint.name); if (fd < 0) bpf_prog_put(prog); return fd; @@ -3921,12 +3922,12 @@ convert_compat_prog_info_in(struct bpf_prog_info *dest, strncpy((char *)dest->tag, (char *)cinfo->tag, BPF_TAG_SIZE); copy_field(dest, cinfo, jited_prog_len); copy_field(dest, cinfo, xlated_prog_len); - copy_field(dest, cinfo, jited_prog_insns); - copy_field(dest, cinfo, xlated_prog_insns); + bpf_uattr_compat_ptr(dest, cinfo, jited_prog_insns); + bpf_uattr_compat_ptr(dest, cinfo, xlated_prog_insns); copy_field(dest, cinfo, load_time); copy_field(dest, cinfo, created_by_uid); copy_field(dest, cinfo, nr_map_ids); - copy_field(dest, cinfo, map_ids); + bpf_uattr_compat_ptr(dest, cinfo, map_ids); strncpy((char *)dest->name, (char *)cinfo->name, BPF_OBJ_NAME_LEN); copy_field(dest, cinfo, ifindex); copy_field(dest, cinfo, gpl_compatible); @@ -3934,20 +3935,20 @@ convert_compat_prog_info_in(struct bpf_prog_info *dest, copy_field(dest, cinfo, netns_ino); copy_field(dest, cinfo, nr_jited_ksyms); copy_field(dest, cinfo, nr_jited_func_lens); - copy_field(dest, cinfo, jited_ksyms); - copy_field(dest, cinfo, jited_func_lens); + bpf_uattr_compat_ptr(dest, cinfo, jited_ksyms); + bpf_uattr_compat_ptr(dest, cinfo, jited_func_lens); copy_field(dest, cinfo, btf_id); copy_field(dest, cinfo, func_info_rec_size); - copy_field(dest, cinfo, func_info); + bpf_uattr_compat_ptr(dest, cinfo, func_info); copy_field(dest, cinfo, nr_func_info); copy_field(dest, cinfo, nr_line_info); - copy_field(dest, cinfo, line_info); - copy_field(dest, cinfo, jited_line_info); + bpf_uattr_compat_ptr(dest, cinfo, line_info); + bpf_uattr_compat_ptr(dest, cinfo, jited_line_info); copy_field(dest, cinfo, nr_jited_line_info); copy_field(dest, cinfo, line_info_rec_size); copy_field(dest, cinfo, jited_line_info_rec_size); copy_field(dest, cinfo, nr_prog_tags); - copy_field(dest, cinfo, prog_tags); + bpf_uattr_compat_ptr(dest, cinfo, prog_tags); copy_field(dest, cinfo, run_time_ns); copy_field(dest, cinfo, run_cnt); copy_field(dest, cinfo, recursion_misses); @@ -4009,7 +4010,7 @@ static int copy_bpf_prog_info_from_user(const union bpf_attr *attr, void *select_info = in_compat64_syscall() ? &cinfo : info; size_t info_size = in_compat64_syscall() ? sizeof(struct compat_bpf_prog_info) : sizeof(struct bpf_prog_info); - void __user *uinfo = u64_to_user_ptr(attr->info.info); + void __user *uinfo = (void __user *)attr->info.info; *info_len = attr->info.info_len;
err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), @@ -4019,7 +4020,7 @@ static int copy_bpf_prog_info_from_user(const union bpf_attr *attr, *info_len = min_t(u32, info_size, *info_len);
memset(info, 0, sizeof(struct bpf_prog_info)); - if (copy_from_user(select_info, uinfo, *info_len)) + if (bpf_copy_from_user_with_ptr(select_info, uinfo, *info_len)) return -EFAULT;
if (in_compat64_syscall()) @@ -4040,7 +4041,7 @@ static int copy_bpf_prog_info_to_user(const union bpf_attr *attr, if (in_compat64_syscall()) convert_compat_prog_info_out(&cinfo, info);
- if (copy_to_user(uinfo, select_info, *info_len) || + if (bpf_copy_to_user_with_ptr(uinfo, select_info, *info_len) || bpf_put_uattr(uattr, *info_len, info.info_len)) return -EFAULT;
@@ -4079,7 +4080,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, info.nr_map_ids = prog->aux->used_map_cnt; ulen = min_t(u32, info.nr_map_ids, ulen); if (ulen) { - u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); + u32 __user *user_map_ids = (u32 __user *)info.map_ids; u32 i;
for (i = 0; i < ulen; i++) @@ -4126,7 +4127,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); if (!insns_sanitized) return -ENOMEM; - uinsns = u64_to_user_ptr(info.xlated_prog_insns); + uinsns = (char __user *)info.xlated_prog_insns; ulen = min_t(u32, info.xlated_prog_len, ulen); fault = copy_to_user(uinsns, insns_sanitized, ulen); kfree(insns_sanitized); @@ -4158,7 +4159,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
if (info.jited_prog_len && ulen) { if (bpf_dump_raw_ok(file->f_cred)) { - uinsns = u64_to_user_ptr(info.jited_prog_insns); + uinsns = (char __user *)info.jited_prog_insns; ulen = min_t(u32, info.jited_prog_len, ulen);
/* for multi-function programs, copy the JITed @@ -4201,7 +4202,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, * corresponding to each function */ ulen = min_t(u32, info.nr_jited_ksyms, ulen); - user_ksyms = u64_to_user_ptr(info.jited_ksyms); + user_ksyms = (u64 __user *)info.jited_ksyms; if (prog->aux->func_cnt) { for (i = 0; i < ulen; i++) { ksym_addr = (unsigned long) @@ -4229,7 +4230,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
/* copy the JITed image lengths for each function */ ulen = min_t(u32, info.nr_jited_func_lens, ulen); - user_lens = u64_to_user_ptr(info.jited_func_lens); + user_lens = (u32 __user *)info.jited_func_lens; if (prog->aux->func_cnt) { for (i = 0; i < ulen; i++) { func_len = @@ -4258,7 +4259,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, if (info.nr_func_info && ulen) { char __user *user_finfo;
- user_finfo = u64_to_user_ptr(info.func_info); + user_finfo = (char __user *)info.func_info; ulen = min_t(u32, info.nr_func_info, ulen); if (copy_to_user(user_finfo, prog->aux->func_info, info.func_info_rec_size * ulen)) @@ -4270,7 +4271,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, if (info.nr_line_info && ulen) { __u8 __user *user_linfo;
- user_linfo = u64_to_user_ptr(info.line_info); + user_linfo = (__u8 __user *)info.line_info; ulen = min_t(u32, info.nr_line_info, ulen); if (copy_to_user(user_linfo, prog->aux->linfo, info.line_info_rec_size * ulen)) @@ -4288,7 +4289,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, __u64 __user *user_linfo; u32 i;
- user_linfo = u64_to_user_ptr(info.jited_line_info); + user_linfo = (__u64 __user *)info.jited_line_info; ulen = min_t(u32, info.nr_jited_line_info, ulen); for (i = 0; i < ulen; i++) { line_addr = (unsigned long)prog->aux->jited_linfo[i]; @@ -4303,20 +4304,20 @@ static int bpf_prog_get_info_by_fd(struct file *file, ulen = info.nr_prog_tags; info.nr_prog_tags = prog->aux->func_cnt ? : 1; if (ulen) { - __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; + __u8 __user *user_prog_tags; u32 i;
- user_prog_tags = u64_to_user_ptr(info.prog_tags); + user_prog_tags = (__u8 __user *)info.prog_tags; ulen = min_t(u32, info.nr_prog_tags, ulen); if (prog->aux->func_cnt) { for (i = 0; i < ulen; i++) { - if (copy_to_user(user_prog_tags[i], + if (copy_to_user(&user_prog_tags[i], prog->aux->func[i]->tag, BPF_TAG_SIZE)) return -EFAULT; } } else { - if (copy_to_user(user_prog_tags[0], + if (copy_to_user(&user_prog_tags[0], prog->tag, BPF_TAG_SIZE)) return -EFAULT; } @@ -4335,7 +4336,8 @@ static int bpf_map_get_info_by_fd(struct file *file, const union bpf_attr *attr, union bpf_attr __user *uattr) { - struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); + struct bpf_map_info __user *uinfo = + (struct bpf_map_info __user *)attr->info.info; struct bpf_map_info info; u32 info_len = attr->info.info_len; int err; @@ -4394,11 +4396,11 @@ convert_compat_link_info_in(struct bpf_link_info *dest, */ switch (type) { case BPF_LINK_TYPE_RAW_TRACEPOINT: - copy_field(dest, cinfo, raw_tracepoint.tp_name); + bpf_uattr_compat_ptr(dest, cinfo, raw_tracepoint.tp_name); copy_field(dest, cinfo, raw_tracepoint.tp_name_len); return; case BPF_LINK_TYPE_ITER: - copy_field(dest, cinfo, iter.target_name); + bpf_uattr_compat_ptr(dest, cinfo, iter.target_name); copy_field(dest, cinfo, iter.target_name_len); return; default: @@ -4471,7 +4473,7 @@ static int copy_bpf_link_info_from_user(const union bpf_attr *attr, void *select_info = in_compat64_syscall() ? &cinfo : info; size_t info_size = in_compat64_syscall() ? sizeof(struct compat_bpf_link_info) : sizeof(struct bpf_link_info); - void __user *uinfo = u64_to_user_ptr(attr->info.info); + void __user *uinfo = (void __user *)attr->info.info; *info_len = attr->info.info_len;
err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), @@ -4481,7 +4483,7 @@ static int copy_bpf_link_info_from_user(const union bpf_attr *attr, *info_len = min_t(u32, info_size, *info_len);
memset(info, 0, sizeof(struct bpf_link_info)); - if (copy_from_user(select_info, uinfo, *info_len)) + if (bpf_copy_from_user_with_ptr(select_info, uinfo, *info_len)) return -EFAULT;
if (in_compat64_syscall()) @@ -4503,7 +4505,7 @@ static int copy_bpf_link_info_to_user(const union bpf_attr *attr, if (in_compat64_syscall()) convert_compat_link_info_out(&cinfo, info, type);
- if (copy_to_user(uinfo, select_info, *info_len) || + if (bpf_copy_to_user_with_ptr(uinfo, select_info, *info_len) || bpf_put_uattr(uattr, *info_len, info.info_len)) return -EFAULT;
@@ -4600,7 +4602,7 @@ static int bpf_task_fd_query_copy(const union bpf_attr *attr, const char *buf, u64 probe_offset, u64 probe_addr) { - char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); + char __user *ubuf = (char __user *)attr->task_fd_query.buf; u32 len = buf ? strlen(buf) : 0, input_len; int err = 0;
@@ -5422,10 +5424,10 @@ static void convert_compat_bpf_attr(union bpf_attr *dest, case BPF_MAP_LOOKUP_AND_DELETE_BATCH: case BPF_MAP_UPDATE_BATCH: case BPF_MAP_DELETE_BATCH: - copy_field(dest, cattr, batch.in_batch); - copy_field(dest, cattr, batch.out_batch); - copy_field(dest, cattr, batch.keys); - copy_field(dest, cattr, batch.values); + bpf_uattr_compat_ptr(dest, cattr, batch.in_batch); + bpf_uattr_compat_ptr(dest, cattr, batch.out_batch); + bpf_uattr_compat_ptr(dest, cattr, batch.keys); + bpf_uattr_compat_ptr(dest, cattr, batch.values); copy_field(dest, cattr, batch.count); copy_field(dest, cattr, batch.map_fd); copy_field(dest, cattr, batch.elem_flags); @@ -5434,11 +5436,11 @@ static void convert_compat_bpf_attr(union bpf_attr *dest, case BPF_PROG_LOAD: copy_field(dest, cattr, prog_type); copy_field(dest, cattr, insn_cnt); - copy_field(dest, cattr, insns); - copy_field(dest, cattr, license); + bpf_uattr_compat_ptr(dest, cattr, insns); + bpf_uattr_compat_ptr(dest, cattr, license); copy_field(dest, cattr, log_level); copy_field(dest, cattr, log_size); - copy_field(dest, cattr, log_buf); + bpf_uattr_compat_ptr(dest, cattr, log_buf); copy_field(dest, cattr, kern_version); copy_field(dest, cattr, prog_flags); strncpy(dest->prog_name, cattr->prog_name, BPF_OBJ_NAME_LEN); @@ -5446,23 +5448,23 @@ static void convert_compat_bpf_attr(union bpf_attr *dest, copy_field(dest, cattr, expected_attach_type); copy_field(dest, cattr, prog_btf_fd); copy_field(dest, cattr, func_info_rec_size); - copy_field(dest, cattr, func_info); + bpf_uattr_compat_ptr(dest, cattr, func_info); copy_field(dest, cattr, func_info_cnt); copy_field(dest, cattr, line_info_rec_size); - copy_field(dest, cattr, line_info); + bpf_uattr_compat_ptr(dest, cattr, line_info); copy_field(dest, cattr, line_info_cnt); copy_field(dest, cattr, attach_btf_id); copy_field(dest, cattr, attach_prog_fd); /* u32 attach_btf_obj_fd is in a union with u32 attach_prog_fd */ copy_field(dest, cattr, core_relo_cnt); - copy_field(dest, cattr, fd_array); - copy_field(dest, cattr, core_relos); + bpf_uattr_compat_ptr(dest, cattr, fd_array); + bpf_uattr_compat_ptr(dest, cattr, core_relos); copy_field(dest, cattr, core_relo_rec_size); copy_field(dest, cattr, log_true_size); break; case BPF_OBJ_PIN: case BPF_OBJ_GET: - copy_field(dest, cattr, pathname); + bpf_uattr_compat_ptr(dest, cattr, pathname); copy_field(dest, cattr, bpf_fd); copy_field(dest, cattr, file_flags); break; @@ -5479,14 +5481,14 @@ static void convert_compat_bpf_attr(union bpf_attr *dest, copy_field(dest, cattr, test.retval); copy_field(dest, cattr, test.data_size_in); copy_field(dest, cattr, test.data_size_out); - copy_field(dest, cattr, test.data_in); - copy_field(dest, cattr, test.data_out); + bpf_uattr_compat_ptr(dest, cattr, test.data_in); + bpf_uattr_compat_ptr(dest, cattr, test.data_out); copy_field(dest, cattr, test.repeat); copy_field(dest, cattr, test.duration); copy_field(dest, cattr, test.ctx_size_in); copy_field(dest, cattr, test.ctx_size_out); - copy_field(dest, cattr, test.ctx_in); - copy_field(dest, cattr, test.ctx_out); + bpf_uattr_compat_ptr(dest, cattr, test.ctx_in); + bpf_uattr_compat_ptr(dest, cattr, test.ctx_out); copy_field(dest, cattr, test.flags); copy_field(dest, cattr, test.cpu); copy_field(dest, cattr, test.batch_size); @@ -5508,7 +5510,7 @@ static void convert_compat_bpf_attr(union bpf_attr *dest, case BPF_OBJ_GET_INFO_BY_FD: copy_field(dest, cattr, info.bpf_fd); copy_field(dest, cattr, info.info_len); - copy_field(dest, cattr, info.info); + bpf_uattr_compat_ptr(dest, cattr, info.info); break; case BPF_PROG_QUERY: copy_field(dest, cattr, query.target_fd); @@ -5665,6 +5667,9 @@ static int bpf_check_perms(int cmd) return 0; }
+#define bpfptr_copy(dest, src, size) \ + (in_compat64_syscall() ? copy_from_bpfptr(dest, src, size) \ + : copy_from_bpfptr_with_ptr(dest, src, size)) static int copy_bpf_attr_from_user(union bpf_attr *attr, int cmd, bpfptr_t uattr, unsigned int *size) { @@ -5681,7 +5686,7 @@ static int copy_bpf_attr_from_user(union bpf_attr *attr, int cmd,
/* copy attributes from user space, may be less than sizeof(bpf_attr) */ memset(select_attr, 0, attr_size); - if (copy_from_bpfptr(select_attr, uattr, *size) != 0) + if (bpfptr_copy(select_attr, uattr, *size) != 0) return -EFAULT;
err = check_attr(cmd, select_attr); @@ -5693,6 +5698,7 @@ static int copy_bpf_attr_from_user(union bpf_attr *attr, int cmd,
return 0; } +#undef bpfptr_copy
static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e524f2ef0e73..c2787fe4ec6f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -18830,8 +18830,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 * and supplied buffer to store the verification trace */ ret = bpf_vlog_init(&env->log, attr->log_level, - (char __user *) (unsigned long) attr->log_buf, - attr->log_size); + (char __user *)attr->log_buf, attr->log_size); if (ret) goto err_unlock;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 1f4b07da327a..dcaf4c329ee6 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2797,8 +2797,8 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr if (flags & ~BPF_F_KPROBE_MULTI_RETURN) return -EINVAL;
- uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); - usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); + uaddrs = (void __user *)attr->link_create.kprobe_multi.addrs; + usyms = (void __user *)attr->link_create.kprobe_multi.syms; if (!!uaddrs == !!usyms) return -EINVAL;
@@ -2811,7 +2811,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr if (!addrs) return -ENOMEM;
- ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); + ucookies = (void __user *)attr->link_create.kprobe_multi.cookies; if (ucookies) { cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); if (!cookies) { diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index 50848fbeb26c..b414762e9d32 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -34,12 +34,12 @@ dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr) if (!args) return ERR_PTR(-ENOMEM);
- ctx_in = u64_to_user_ptr(kattr->test.ctx_in); + ctx_in = (void __user *)kattr->test.ctx_in; if (copy_from_user(args->args, ctx_in, size_in)) goto out;
/* args[0] is 0 means state argument of test_N will be NULL */ - u_state = u64_to_user_ptr(args->args[0]); + u_state = (void __user *)args->args[0]; if (u_state && copy_from_user(&args->state, u_state, sizeof(args->state))) goto out; @@ -54,7 +54,7 @@ static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args) { void __user *u_state;
- u_state = u64_to_user_ptr(args->args[0]); + u_state = (void __user *)args->args[0]; if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state))) return -EFAULT;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 899f6d2d3ff4..abcc0390e5d2 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -437,7 +437,7 @@ static int bpf_test_finish(const union bpf_attr *kattr, struct skb_shared_info *sinfo, u32 size, u32 retval, u32 duration) { - void __user *data_out = u64_to_user_ptr(kattr->test.data_out); + void __user *data_out = (void __user *)kattr->test.data_out; int err = -EFAULT; u32 copy_size = size;
@@ -814,7 +814,7 @@ BTF_SET8_END(test_sk_check_kfunc_ids) static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, u32 size, u32 headroom, u32 tailroom) { - void __user *data_in = u64_to_user_ptr(kattr->test.data_in); + void __user *data_in = (void __user *)kattr->test.data_in; void *data;
if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) @@ -901,7 +901,7 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { - void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); + void __user *ctx_in = (void __user *)kattr->test.ctx_in; __u32 ctx_size_in = kattr->test.ctx_size_in; struct bpf_raw_tp_test_run_info info; int cpu = kattr->test.cpu, err = 0; @@ -956,8 +956,8 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) { - void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); - void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); + void __user *data_in = (void __user *)kattr->test.ctx_in; + void __user *data_out = (void __user *)kattr->test.ctx_out; u32 size = kattr->test.ctx_size_in; void *data; int err; @@ -989,7 +989,7 @@ static int bpf_ctx_finish(const union bpf_attr *kattr, union bpf_attr __user *uattr, const void *data, u32 size) { - void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); + void __user *data_out = (void __user *)kattr->test.ctx_out; int err = -EFAULT; u32 copy_size = size;
@@ -1396,7 +1396,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, goto free_data;
if (unlikely(kattr->test.data_size_in > size)) { - void __user *data_in = u64_to_user_ptr(kattr->test.data_in); + void __user *data_in = (void __user *)kattr->test.data_in;
while (size < kattr->test.data_size_in) { struct page *page; @@ -1654,7 +1654,7 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { - void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); + void __user *ctx_in = (void __user *)kattr->test.ctx_in; __u32 ctx_size_in = kattr->test.ctx_size_in; void *ctx = NULL; u32 retval; diff --git a/net/core/sock_map.c b/net/core/sock_map.c index aa7522a61e30..d38e20a3f049 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -1509,7 +1509,7 @@ static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, int sock_map_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { - __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + __u32 __user *prog_ids = (__u32 __user *)attr->query.prog_ids; u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd; struct bpf_prog **pprog; struct bpf_prog *prog;