On 13-04-2023 09:04, Kevin Brodsky wrote:
On 12/04/2023 18:11, Luca Vizzarro wrote:
When working with PCuABI we need to ensure that we receive capabilities from user space instead of pointers and perform the appropriate memory accesses.
This commit updates the arm64-specific code for the futex module so that the atomic load and store work with capabilities, hence enabling PCuABI. This commit therefore reverts "arm64: futex: Access user memory via raw 64-bit pointers".
Because the instructions used to perform the atomic operations, ldxr and stlxr, only support capabilities in C64 mode, this commit therefore brings C64/A64 mode switching to the inline assembly.
Whenever a load/store faults, the exception raised will be handled by ex_handler_uaccess_err_zero(), which will correctly set -EFAULT as error and return execution past the loop. Although exception handling is always ran in A64, hence the mode is switched back from C64, this works as expected as the C64/A64 mode is saved per exception level (SPSR_ELx.C64). Thus the selected C64 mode is retained upon return.
These two sentences are built in a strange way, mixing up exception entry and return. How about:
Exceptions are always handled in A64, hence the mode is automatically switched back from C64. The A64/C64 mode is saved in SPSR_ELx.C64 on exception entry, thus C64 is restored upon return.
Happy with the patches otherwise, so if you agree with that wording I can take care of changing the commit message when merging them.
I am a bit late to the party. FWIW, I have just finished reviewing the series and it looks good to me, great work!
Tudor
Kevin
What is more relevant in this case is that the exception correctly returns to the fixup machinery (label 3), which is when the switch back to A64 happens.
Signed-off-by: Luca Vizzarro Luca.Vizzarro@arm.com
arch/arm64/include/asm/futex.h | 47 ++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 14 deletions(-)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 99d73e8a3175..2ef9b5167007 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -10,6 +10,18 @@ #include <asm/errno.h> +#ifdef CONFIG_CHERI_PURECAP_UABI +#define __ASM_SWITCH_TO_C64 " bx #4\n" \
".arch morello+c64\n"
+#define __ASM_SWITCH_TO_A64 " bx #4\n" \
".arch morello\n"
+#define __ASM_RW_UPTR_CONSTR "+C" +#else +#define __ASM_SWITCH_TO_C64 +#define __ASM_SWITCH_TO_A64 +#define __ASM_RW_UPTR_CONSTR "+r" +#endif
- #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ @@ -18,20 +30,24 @@ do { \ \ uaccess_enable_privileged(); \ asm volatile( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %w1, %2\n" \
- __ASM_SWITCH_TO_C64 \
+" prfm pstl1strm, [%2]\n" \ +"1: ldxr %w1, [%2]\n" \ insn "\n" \ -"2: stlxr %w0, %w3, %2\n" \ +"2: stlxr %w0, %w3, [%2]\n" \ " cbz %w0, 3f\n" \ " sub %w4, %w4, %w0\n" \ " cbnz %w4, 1b\n" \ " mov %w0, %w6\n" \ "3:\n" \ " dmb ish\n" \
- __ASM_SWITCH_TO_A64 \ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0) \
- : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
"+r" (loops) \
- /* TODO [PCuABI]: temporary solution for uaddr. Should be reverted to +Q
* once LLVM supports it for capabilities. */ \
- : "=&r" (ret), "=&r" (oldval), __ASM_RW_UPTR_CONSTR (uaddr), \
: "r" (oparg), "Ir" (-EAGAIN) \ : "memory"); \ uaccess_disable_privileged(); \"=&r" (tmp), "+r" (loops) \
@@ -41,8 +57,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) { int oldval = 0, ret, tmp;
- /* TODO [PCuABI] - perform the access via the user capability */
- u32 *uaddr = (u32 *)user_ptr_addr(__uaccess_mask_ptr(_uaddr));
- u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT; @@ -85,20 +100,20 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, int ret = 0; unsigned int loops = FUTEX_MAX_LOOPS; u32 val, tmp;
- u32 *uaddr;
- u32 __user *uaddr;
if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT;
- /* TODO [PCuABI] - perform the access via the user capability */
- uaddr = (u32 *)user_ptr_addr(__uaccess_mask_ptr(_uaddr));
- uaddr = __uaccess_mask_ptr(_uaddr); uaccess_enable_privileged(); asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-" prfm pstl1strm, %2\n" -"1: ldxr %w1, %2\n"
- __ASM_SWITCH_TO_C64
+" prfm pstl1strm, [%2]\n" +"1: ldxr %w1, [%2]\n" " sub %w3, %w1, %w5\n" " cbnz %w3, 4f\n" -"2: stlxr %w3, %w6, %2\n" +"2: stlxr %w3, %w6, [%2]\n" " cbz %w3, 3f\n" " sub %w4, %w4, %w3\n" " cbnz %w4, 1b\n" @@ -106,9 +121,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, "3:\n" " dmb ish\n" "4:\n"
- __ASM_SWITCH_TO_A64 _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0)
- : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
- /* TODO [PCuABI]: temporary solution for uaddr. Should be reverted to +Q once
* LLVM supports it for capabilities. */
- : "+r" (ret), "=&r" (val), __ASM_RW_UPTR_CONSTR (uaddr), "=&r" (tmp),
: "r" (oldval), "r" (newval), "Ir" (-EAGAIN) : "memory"); uaccess_disable_privileged();"+r" (loops)
linux-morello mailing list -- linux-morello@op-lists.linaro.org To unsubscribe send an email to linux-morello-leave@op-lists.linaro.org