There are two ways that the kernel writes back to __user *bpf_attr unions and each requires different compat64 handling: 1) Where the union member to be written to is a __user *ptr e.g. a pointer to some memory buffer 2) Writing to a normal non-ptr field e.g. __u32 prog_cnt
In the first instance, no additional conversion out or handling is required. When converting inbound each __user *ptr is simply copied from union compat_bpf_attr into a new union bpf_attr and this can be written to directly with copy_to_user().
For the second case, the kernel writes back out directly to user memory for a specific field. This is problematic for compat64 where the field offsets are different between native/compat64. That means if compat64 is enabled and we're in a compat syscall, we need to cast uattr to correctly select the correct member field/offset to write to.
The "technically correct" approach is probably to replace all functions with a union bpf_attr __user * parameter with void __user * (at least where we pass uattr.user in dispatch_bpf() and propagate this down). This forces us to check the type and cast before we use uattr, since the type is technically unknown. This avoids any potential mis-use by not checking for compat64 before trying to access a ptr with native union offsets. The downside of this approach is we would end up touching many function signatures as uattr propagates down the call stack (currently 73).
The minimal diff approach used here is to handle only specifically where we write directly to uattr with a fairly bad macro (it's not ideal but I've seen worse). This greatly reduces the diff size at the cost of having compat_bpf_attr unions passed around as union bpf_attr and needing to remember to cast back where required.
The instances where we write out to uattr were mostly found by grepping 'copy_to_user(&uattr' + 'put_user(.*uattr' so the list may or may not be complete. Luckily this doesn't appear to happen that often and usage seems to be consistent.
We can also replace all instances of copy_to_user() here with put_user() which somewhat simplifies things.
Signed-off-by: Zachary Leaf zachary.leaf@arm.com --- drivers/media/rc/bpf-lirc.c | 5 +++-- include/linux/bpf_compat.h | 17 +++++++++++++++++ kernel/bpf/cgroup.c | 5 +++-- kernel/bpf/hashtab.c | 5 +++-- kernel/bpf/net_namespace.c | 5 +++-- kernel/bpf/syscall.c | 22 +++++++++++----------- net/bpf/bpf_dummy_struct_ops.c | 3 ++- net/bpf/test_run.c | 16 ++++++++-------- net/core/sock_map.c | 5 +++-- 9 files changed, 53 insertions(+), 30 deletions(-)
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index fe17c7f98e81..9988232aeab5 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -4,6 +4,7 @@ // Copyright (C) 2018 Sean Young sean@mess.org
#include <linux/bpf.h> +#include <linux/bpf_compat.h> #include <linux/filter.h> #include <linux/bpf_lirc.h> #include "rc-core-priv.h" @@ -319,12 +320,12 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) progs = lirc_rcu_dereference(rcdev->raw->progs); cnt = progs ? bpf_prog_array_length(progs) : 0;
- if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) { + if (PUT_USER_UATTR(cnt, query.prog_cnt)) { ret = -EFAULT; goto unlock; }
- if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) { + if (PUT_USER_UATTR(flags, query.attach_flags)) { ret = -EFAULT; goto unlock; } diff --git a/include/linux/bpf_compat.h b/include/linux/bpf_compat.h index cc12f2e3b204..3d5f6bd2cd46 100644 --- a/include/linux/bpf_compat.h +++ b/include/linux/bpf_compat.h @@ -1,6 +1,23 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2023 Arm Ltd */
+/* + * This isn't great but the cast is necessary to select the correct member + * offset when writing out to userspace for compat64 + */ +#define __PUT_USER_UATTR(TYPE, X, TO_FIELD) \ + (put_user(X, &(((TYPE)uattr)->TO_FIELD))) + +#ifdef CONFIG_COMPAT64 +#define PUT_USER_UATTR(X, TO_FIELD) \ + (in_compat_syscall() ? \ + __PUT_USER_UATTR(union compat_bpf_attr __user *, X, TO_FIELD) : \ + __PUT_USER_UATTR(union bpf_attr __user *, X, TO_FIELD)) +#else +#define PUT_USER_UATTR(X, TO_FIELD) \ + __PUT_USER_UATTR(union bpf_attr __user *, X, TO_FIELD) +#endif + #ifdef CONFIG_COMPAT64
union compat_bpf_attr { diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index bf2fdb33fb31..5f9d13848986 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -14,6 +14,7 @@ #include <linux/string.h> #include <linux/bpf.h> #include <linux/bpf-cgroup.h> +#include <linux/bpf_compat.h> #include <linux/bpf_lsm.h> #include <linux/bpf_verifier.h> #include <net/sock.h> @@ -1061,9 +1062,9 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
/* always output uattr->query.attach_flags as 0 during effective query */ flags = effective_query ? 0 : flags; - if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) + if (PUT_USER_UATTR(flags, query.attach_flags)) return -EFAULT; - if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt))) + if (PUT_USER_UATTR(total_cnt, query.prog_cnt)) return -EFAULT; if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt) /* return early if user requested only program count + flags */ diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index f39ee3e05589..5cf4ddc2a433 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -3,6 +3,7 @@ * Copyright (c) 2016 Facebook */ #include <linux/bpf.h> +#include <linux/bpf_compat.h> #include <linux/btf.h> #include <linux/jhash.h> #include <linux/filter.h> @@ -1686,7 +1687,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, if (!max_count) return 0;
- if (put_user(0, &uattr->batch.count)) + if (PUT_USER_UATTR(0, batch.count)) return -EFAULT;
batch = 0; @@ -1867,7 +1868,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, /* copy # of entries and next batch */ ubatch = u64_to_user_ptr(attr->batch.out_batch); if (copy_to_user(ubatch, &batch, sizeof(batch)) || - put_user(total, &uattr->batch.count)) + PUT_USER_UATTR(total, batch.count)) ret = -EFAULT;
out: diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 868cc2c43899..3bfc97b37774 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h> +#include <linux/bpf_compat.h> #include <linux/bpf-netns.h> #include <linux/filter.h> #include <net/net_namespace.h> @@ -257,9 +258,9 @@ static int __netns_bpf_prog_query(const union bpf_attr *attr, if (run_array) prog_cnt = bpf_prog_array_length(run_array);
- if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) + if (PUT_USER_UATTR(flags, query.attach_flags)) return -EFAULT; - if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) + if (PUT_USER_UATTR(prog_cnt, query.prog_cnt)) return -EFAULT; if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) return 0; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 818ca8b63295..4c86b1b23f36 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1601,7 +1601,7 @@ int generic_map_delete_batch(struct bpf_map *map, break; cond_resched(); } - if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) + if (PUT_USER_UATTR(cp, batch.count)) err = -EFAULT;
kvfree(key); @@ -1662,7 +1662,7 @@ int generic_map_update_batch(struct bpf_map *map, cond_resched(); }
- if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) + if (PUT_USER_UATTR(cp, batch.count)) err = -EFAULT;
kvfree(value); @@ -1698,7 +1698,7 @@ int generic_map_lookup_batch(struct bpf_map *map, if (!max_count) return 0;
- if (put_user(0, &uattr->batch.count)) + if (PUT_USER_UATTR(0, batch.count)) return -EFAULT;
buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); @@ -1763,8 +1763,8 @@ int generic_map_lookup_batch(struct bpf_map *map, if (err == -EFAULT) goto free_buf;
- if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || - (cp && copy_to_user(uobatch, prev_key, map->key_size)))) + if (PUT_USER_UATTR(cp, batch.count) || + (cp && copy_to_user(uobatch, prev_key, map->key_size))) err = -EFAULT;
free_buf: @@ -3657,7 +3657,7 @@ static int bpf_obj_get_next_id(const union bpf_attr *attr, spin_unlock_bh(lock);
if (!err) - err = put_user(next_id, &uattr->next_id); + err = PUT_USER_UATTR(next_id, next_id);
return err; } @@ -4348,7 +4348,7 @@ static int bpf_task_fd_query_copy(const union bpf_attr *attr, u32 len = buf ? strlen(buf) : 0, input_len; int err = 0;
- if (put_user(len, &uattr->task_fd_query.buf_len)) + if (PUT_USER_UATTR(len, task_fd_query.buf_len)) return -EFAULT; input_len = attr->task_fd_query.buf_len; if (input_len && ubuf) { @@ -4376,10 +4376,10 @@ static int bpf_task_fd_query_copy(const union bpf_attr *attr, } }
- if (put_user(prog_id, &uattr->task_fd_query.prog_id) || - put_user(fd_type, &uattr->task_fd_query.fd_type) || - put_user(probe_offset, &uattr->task_fd_query.probe_offset) || - put_user(probe_addr, &uattr->task_fd_query.probe_addr)) + if (PUT_USER_UATTR(prog_id, task_fd_query.prog_id) || + PUT_USER_UATTR(fd_type, task_fd_query.fd_type) || + PUT_USER_UATTR(probe_offset, task_fd_query.probe_offset) || + PUT_USER_UATTR(probe_addr, task_fd_query.probe_addr)) return -EFAULT;
return err; diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index e78dadfc5829..20cecfda5188 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -5,6 +5,7 @@ #include <linux/kernel.h> #include <linux/bpf_verifier.h> #include <linux/bpf.h> +#include <linux/bpf_compat.h> #include <linux/btf.h>
extern struct bpf_struct_ops bpf_bpf_dummy_ops; @@ -131,7 +132,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, err = dummy_ops_copy_args(args); if (err) goto out; - if (put_user(prog_ret, &uattr->test.retval)) + if (PUT_USER_UATTR(prog_ret, test.retval)) err = -EFAULT; out: kfree(args); diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index fcb3e6c5e03c..58a5d02c5a8e 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -2,6 +2,7 @@ /* Copyright (c) 2017 Facebook */ #include <linux/bpf.h> +#include <linux/bpf_compat.h> #include <linux/btf.h> #include <linux/btf_ids.h> #include <linux/slab.h> @@ -464,11 +465,11 @@ static int bpf_test_finish(const union bpf_attr *kattr, } }
- if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) + if (PUT_USER_UATTR(size, test.data_size_out)) goto out; - if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) + if (PUT_USER_UATTR(retval, test.retval)) goto out; - if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) + if (PUT_USER_UATTR(duration, test.duration)) goto out; if (err != -ENOSPC) err = 0; @@ -822,7 +823,7 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog, }
retval = ((u32)side_effect << 16) | ret; - if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) + if (PUT_USER_UATTR(retval, test.retval)) goto out;
err = 0; @@ -897,8 +898,7 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, } put_cpu();
- if (!err && - copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) + if (!err && PUT_USER_UATTR(info.retval, test.retval)) err = -EFAULT;
kfree(info.ctx); @@ -954,7 +954,7 @@ static int bpf_ctx_finish(const union bpf_attr *kattr,
if (copy_to_user(data_out, data, copy_size)) goto out; - if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) + if (PUT_USER_UATTR(size, test.ctx_size_out)) goto out; if (err != -ENOSPC) err = 0; @@ -1632,7 +1632,7 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog, retval = bpf_prog_run_pin_on_cpu(prog, ctx); rcu_read_unlock_trace();
- if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { + if (PUT_USER_UATTR(retval, test.retval)) { err = -EFAULT; goto out; } diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 81beb16ab1eb..e476159a53c7 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -2,6 +2,7 @@ /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
#include <linux/bpf.h> +#include <linux/bpf_compat.h> #include <linux/btf_ids.h> #include <linux/filter.h> #include <linux/errno.h> @@ -1525,9 +1526,9 @@ int sock_map_bpf_prog_query(const union bpf_attr *attr, end: rcu_read_unlock();
- if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) || + if (PUT_USER_UATTR(flags, query.attach_flags) || (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) || - copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) + PUT_USER_UATTR(prog_cnt, query.prog_cnt)) ret = -EFAULT;
fdput(f);