In PCuABI, {get,put}_user currently access user memory via standard 64-bit pointers. This patch switches to capability-based accesses: instead of extracting the address of the input capability, it is directly used to make the access. As a result, {get,put}_user validate the capability metadata, failing with -EFAULT if the hardware checks fail (causing a capability exception).
Unprivileged load and store instructions operating on a capability base register (e.g. ldtr/sttr x0, [c1]) are only available in the C64 ISA, we therefore need to switch to C64 before executing them (and then return to A64). We also need to use the "C" asm constraint for capability registers. This requirement has recently disappeared in Clang, but for backwards-compatibility we stick to using that constraint for now.
Because __{get,put}_mem_asm can operate on either a kernel or user pointer, fallback macros are defined for the kernel case. The Morello-specific variants that load or store a capability (__morello_raw_{get,put}_user_cap) are only passed user pointers, so the user macros can directly be used there.
Signed-off-by: Kevin Brodsky kevin.brodsky@arm.com --- arch/arm64/include/asm/uaccess.h | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index c28850a79aad..5b6134066b7e 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -32,15 +32,21 @@ ".arch morello+c64\n" #define __ASM_SWITCH_TO_A64 " bx #4\n" \ ".arch morello\n" +#define __ASM_RO_UPTR_CONSTR "C" #define __ASM_RW_UPTR_CONSTR "+C" #else #define __ASM_SWITCH_TO_C64 #define __ASM_SWITCH_TO_A64 +#define __ASM_RO_UPTR_CONSTR "r" #define __ASM_RW_UPTR_CONSTR "+r" #endif
#define __ASM_UACCESS_BEFORE __ASM_SWITCH_TO_C64 #define __ASM_UACCESS_AFTER __ASM_SWITCH_TO_A64 +#define __ASM_KACCESS_BEFORE +#define __ASM_KACCESS_AFTER +#define __ASM_RO_KPTR_CONSTR "r" +#define __ASM_RW_KPTR_CONSTR "+r"
static inline int __access_ok(const void __user *ptr, unsigned long size);
@@ -217,12 +223,13 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) */ #define __get_mem_asm(load, reg, x, addr, err, type) \ asm volatile( \ + __ASM_##type##ACCESS_BEFORE \ "1: " load " " reg "1, [%2]\n" \ "2:\n" \ + __ASM_##type##ACCESS_AFTER \ _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \ : "+r" (err), "=r" (x) \ - /* TODO [PCuABI] - perform the access via the user capability */\ - : "r" ((ptraddr_t)(user_uintptr_t)(addr))) + : __ASM_RO_##type##PTR_CONSTR (addr))
#define __raw_get_mem(ldr, x, ptr, err, type) \ do { \ @@ -307,12 +314,13 @@ do { \
#define __put_mem_asm(store, reg, x, addr, err, type) \ asm volatile( \ + __ASM_##type##ACCESS_BEFORE \ "1: " store " " reg "1, [%2]\n" \ "2:\n" \ + __ASM_##type##ACCESS_AFTER \ _ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \ : "+r" (err) \ - /* TODO [PCuABI] - perform the access via the user capability */\ - : "rZ" (x), "r" ((ptraddr_t)(user_uintptr_t)(addr))) + : "rZ" (x), __ASM_RO_##type##PTR_CONSTR (addr))
#define __raw_put_mem(str, x, ptr, err, type) \ do { \ @@ -452,12 +460,13 @@ do { \ __chk_user_ptr(ptr); \ uaccess_ttbr0_enable(); \ asm volatile( \ + __ASM_UACCESS_BEFORE \ "1: ldtr %1, [%2]\n" \ "2:\n" \ + __ASM_UACCESS_AFTER \ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \ : "+r" (err), "=C" (x) \ - /* TODO [PCuABI] - perform the access via the user capability */\ - : "r" ((ptraddr_t)(user_uintptr_t)(ptr))); \ + : __ASM_RO_UPTR_CONSTR (ptr)); \ uaccess_ttbr0_disable(); \ } while (0)
@@ -488,12 +497,13 @@ do { \ __chk_user_ptr(ptr); \ uaccess_ttbr0_enable(); \ asm volatile( \ + __ASM_UACCESS_BEFORE \ "1: sttr %1, [%2]\n" \ "2:\n" \ + __ASM_UACCESS_AFTER \ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \ : "+r" (err) \ - /* TODO [PCuABI] - perform the access via the user capability */\ - : "CZ" (x), "r" ((ptraddr_t)(user_uintptr_t)(ptr))); \ + : "CZ" (x), __ASM_RO_UPTR_CONSTR (ptr)); \ uaccess_ttbr0_disable(); \ } while (0)