This commit adds support to the futex module to correctly load, store and handle capabilities.
Signed-off-by: Luca Vizzarro Luca.Vizzarro@arm.com --- This commit tackles the issue reported at: https://git.morello-project.org/morello/kernel/linux/-/issues/6
Commit also available at: https://git.morello-project.org/Sevenarth/linux/-/commits/morello/futex
The definitions added at arch/arm64/include/asm/futex.h are open to debate and not final. I assume that the mode switch can be important for many other parts of the project and not just futexes. I thought that this could go in cheri.h but I'm not that knowledgeable in these regards. Hence, I am seeking for feedback.
Best, Luca Vizzarro
arch/arm64/include/asm/futex.h | 47 ++++++++++++++++++++++++---------- kernel/futex/core.c | 26 +++++++++++-------- kernel/futex/requeue.c | 4 +-- 3 files changed, 50 insertions(+), 27 deletions(-)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 99d73e8a3175..db85ef63124a 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -10,6 +10,18 @@
#include <asm/errno.h>
+#ifdef CONFIG_CHERI_PURECAP_UABI +#define __SWITCH_TO_C64 " bx #4\n" \ + ".arch morello+c64\n" +#define __SWITCH_TO_A64 " bx #4\n" \ + ".arch morello\n" +#define __ADDR_REGISTER "+C" +#else +#define __SWITCH_TO_C64 +#define __SWITCH_TO_A64 +#define __ADDR_REGISTER "+r" +#endif + #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ @@ -18,20 +30,24 @@ do { \ \ uaccess_enable_privileged(); \ asm volatile( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %w1, %2\n" \ + __SWITCH_TO_C64 \ +" prfm pstl1strm, [%2]\n" \ +"1: ldxr %w1, [%2]\n" \ insn "\n" \ -"2: stlxr %w0, %w3, %2\n" \ +"2: stlxr %w0, %w3, [%2]\n" \ " cbz %w0, 3f\n" \ " sub %w4, %w4, %w0\n" \ " cbnz %w4, 1b\n" \ " mov %w0, %w6\n" \ "3:\n" \ " dmb ish\n" \ + __SWITCH_TO_A64 \ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0) \ - : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \ - "+r" (loops) \ + /* FIXME: temporary solution for uaddr. Must be reverted to +Q once + * LLVM supports it for capabilities. */ \ + : "=&r" (ret), "=&r" (oldval), __ADDR_REGISTER (uaddr), \ + "=&r" (tmp), "+r" (loops) \ : "r" (oparg), "Ir" (-EAGAIN) \ : "memory"); \ uaccess_disable_privileged(); \ @@ -41,8 +57,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) { int oldval = 0, ret, tmp; - /* TODO [PCuABI] - perform the access via the user capability */ - u32 *uaddr = (u32 *)user_ptr_addr(__uaccess_mask_ptr(_uaddr)); + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT; @@ -85,20 +100,20 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, int ret = 0; unsigned int loops = FUTEX_MAX_LOOPS; u32 val, tmp; - u32 *uaddr; + u32 __user *uaddr;
if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT;
- /* TODO [PCuABI] - perform the access via the user capability */ - uaddr = (u32 *)user_ptr_addr(__uaccess_mask_ptr(_uaddr)); + uaddr = __uaccess_mask_ptr(_uaddr); uaccess_enable_privileged(); asm volatile("// futex_atomic_cmpxchg_inatomic\n" -" prfm pstl1strm, %2\n" -"1: ldxr %w1, %2\n" + __SWITCH_TO_C64 +" prfm pstl1strm, [%2]\n" +"1: ldxr %w1, [%2]\n" " sub %w3, %w1, %w5\n" " cbnz %w3, 4f\n" -"2: stlxr %w3, %w6, %2\n" +"2: stlxr %w3, %w6, [%2]\n" " cbz %w3, 3f\n" " sub %w4, %w4, %w3\n" " cbnz %w4, 1b\n" @@ -106,9 +121,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, "3:\n" " dmb ish\n" "4:\n" + __SWITCH_TO_A64 _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0) - : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) + /* FIXME: temporary solution for uaddr. Must be reverted to +Q once + * LLVM supports it for capabilities. */ + : "+r" (ret), "=&r" (val), __ADDR_REGISTER (uaddr), "=&r" (tmp), + "+r" (loops) : "r" (oldval), "r" (newval), "Ir" (-EAGAIN) : "memory"); uaccess_disable_privileged(); diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 759332a26b5a..1234223f274e 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -31,6 +31,7 @@ * "The futexes are also cursed." * "But they come in a choice of three flavours!" */ +#include <linux/cheri.h> #include <linux/compat.h> #include <linux/jhash.h> #include <linux/pagemap.h> @@ -226,6 +227,8 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key, struct address_space *mapping; int err, ro = 0;
+ /* TODO [PCuABI] - capability checks for uaccess */ + /* * The futex address must be "naturally" aligned. */ @@ -411,6 +414,8 @@ int fault_in_user_writeable(u32 __user *uaddr) struct mm_struct *mm = current->mm; int ret;
+ /* TODO [PCuABI] - capability checks for uaccess */ + mmap_read_lock(mm); ret = fixup_user_fault(mm, user_ptr_addr(uaddr), FAULT_FLAG_WRITE, NULL); @@ -750,20 +755,19 @@ static inline int fetch_robust_entry(struct robust_list __user **entry, #endif unsigned int *pi) { - unsigned long uentry; + struct robust_list __user *uentry; + ptraddr_t uentry_ptr;
- if (get_user(uentry, (unsigned long __user *)head)) + if (get_user_ptr(uentry, head)) return -EFAULT;
- /* - * TODO [PCuABI] - pointer conversion to be checked - * Each entry points to either next one or head of the list - * so this should probably operate on capabilities and use - * get_user_ptr instead, or validate the capability prior to - * get_user - */ - *entry = uaddr_to_user_ptr(uentry & ~1UL); - *pi = uentry & 1; + uentry_ptr = user_ptr_addr(uentry); +#ifdef CONFIG_CHERI_PURECAP_UABI + *entry = cheri_address_set(uentry, uentry_ptr & ~1UL); +#else + *entry = uaddr_to_user_ptr(uentry_ptr & ~1UL); +#endif + *pi = uentry_ptr & 1;
return 0; } diff --git a/kernel/futex/requeue.c b/kernel/futex/requeue.c index cba8b1a6a4cc..77b2443a880c 100644 --- a/kernel/futex/requeue.c +++ b/kernel/futex/requeue.c @@ -388,7 +388,7 @@ int futex_requeue(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, * Requeue PI only works on two distinct uaddrs. This * check is only valid for private futexes. See below. */ - if (uaddr1 == uaddr2) + if (user_ptr_addr(uaddr1) == user_ptr_addr(uaddr2)) return -EINVAL;
/* @@ -774,7 +774,7 @@ int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (!IS_ENABLED(CONFIG_FUTEX_PI)) return -ENOSYS;
- if (uaddr == uaddr2) + if (user_ptr_addr(uaddr) == user_ptr_addr(uaddr2)) return -EINVAL;
if (!bitset)