Provide purecap userspace binaries with a vDSO they can use. Redefine the aarch64 vDSO as the compat vDSO, therefore allowing both regular arm64 and purecap binaries to use a vDSO under PCuABI.
Signed-off-by: Aditya Deshpande aditya.deshpande@arm.com --- arch/arm64/include/asm/elf.h | 16 ++++++-- arch/arm64/include/asm/vdso.h | 1 + arch/arm64/kernel/Makefile | 4 +- arch/arm64/kernel/vdso-purecap-wrap.S | 22 ++++++++++ arch/arm64/kernel/vdso.c | 58 ++++++++++++++++++++++++++- fs/compat_binfmt_elf.c | 16 ++++++++ include/linux/elf.h | 2 +- 7 files changed, 111 insertions(+), 8 deletions(-) create mode 100644 arch/arm64/kernel/vdso-purecap-wrap.S
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index f01f1f99cf03..86805d6e5180 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -190,17 +190,20 @@ do { \
#ifdef CONFIG_CHERI_PURECAP_UABI /* - * TODO [PCuABI]: In Transitional PCuABI, AT_SYSINFO_EHDR is passed as NULL - * as there is no purecap vDSO yet. + * In Transitional PCuABI, we put a valid unsealed capability into the + * auxiliary vector. + * + * TODO [PCuABI]: Look into restricting the bounds of this capability to just + * the vDSO pages, as currently the bounds are of the root user capability. */ -#define ARCH_DLINFO SETUP_DLINFO(0) +#define ARCH_DLINFO SETUP_DLINFO(elf_uaddr_to_user_ptr((elf_addr_t)current->mm->context.vdso)) #else /* !CONFIG_CHERI_PURECAP_UABI */ #define ARCH_DLINFO SETUP_DLINFO((elf_addr_t)current->mm->context.vdso) #endif /* CONFIG_CHERI_PURECAP_UABI */
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; -extern int arch_setup_additional_pages(struct linux_binprm *bprm, +extern int purecap_setup_additional_pages(struct linux_binprm *bprm, int uses_interp);
/* 1GB of VA */ @@ -244,6 +247,11 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
#define COMPAT_ARCH_DLINFO SETUP_DLINFO((elf_addr_t)current->mm->context.vdso)
+extern int aarch64_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); +#define compat_arch_setup_additional_pages \ + aarch64_setup_additional_pages + #else /* !CONFIG_COMPAT64 */
/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h index 83a50071a85e..709b5bbec255 100644 --- a/arch/arm64/include/asm/vdso.h +++ b/arch/arm64/include/asm/vdso.h @@ -28,6 +28,7 @@
extern char vdso_start[], vdso_end[]; extern char vdso32_start[], vdso32_end[]; +extern char vdso_purecap_start[], vdso_purecap_end[];
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 12eb4c55234e..934a677c8f69 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -72,12 +72,14 @@ obj-$(CONFIG_ARM64_MTE) += mte.o obj-$(CONFIG_ARM64_MORELLO) += morello.o obj-y += vdso-wrap.o obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o +obj-$(CONFIG_CHERI_PURECAP_UABI) += vdso-purecap-wrap.o obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o CFLAGS_patch-scs.o += -mbranch-protection=none
# Force dependency (vdso*-wrap.S includes vdso.so through incbin) $(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so $(obj)/vdso32-wrap.o: $(obj)/vdso32/vdso.so +$(obj)/vdso-purecap-wrap.o: $(obj)/vdso-purecap/vdso.so
obj-y += probes/ obj-y += head.o @@ -88,4 +90,4 @@ AFLAGS_head.o += -DVMLINUX_PATH=""$(realpath $(objtree)/vmlinux)"" endif
# for cleaning -subdir- += vdso vdso32 +subdir- += vdso vdso32 vdso-purecap diff --git a/arch/arm64/kernel/vdso-purecap-wrap.S b/arch/arm64/kernel/vdso-purecap-wrap.S new file mode 100644 index 000000000000..b0bac3af4925 --- /dev/null +++ b/arch/arm64/kernel/vdso-purecap-wrap.S @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023 ARM Limited + */ + +#include <linux/init.h> +#include <linux/linkage.h> +#include <linux/const.h> +#include <asm/assembler.h> +#include <asm/page.h> + + .globl vdso_purecap_start, vdso_purecap_end + .section .rodata + .balign PAGE_SIZE +vdso_purecap_start: + .incbin "arch/arm64/kernel/vdso-purecap/vdso.so" + .balign PAGE_SIZE +vdso_purecap_end: + + .previous + +emit_aarch64_feature_1_and diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index c9d961249894..8f1305128799 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -32,6 +32,7 @@ enum vdso_abi { VDSO_ABI_AA64, VDSO_ABI_AA32, + VDSO_ABI_PURECAP, };
enum vvar_pages { @@ -64,6 +65,13 @@ static struct vdso_abi_info vdso_info[] __ro_after_init = { .vdso_code_end = vdso32_end, }, #endif /* CONFIG_COMPAT_VDSO */ +#ifdef CONFIG_CHERI_PURECAP_UABI + [VDSO_ABI_PURECAP] = { + .name = "vdso_purecap", + .vdso_code_start = vdso_purecap_start, + .vdso_code_end = vdso_purecap_end, + }, +#endif /* CONFIG_CHERI_PURECAP_UABI */ };
/* @@ -79,7 +87,6 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { current->mm->context.vdso = (void *)new_vma->vm_start; - return 0; }
@@ -143,6 +150,10 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) #ifdef CONFIG_COMPAT_VDSO if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm)) zap_vma_pages(vma); +#endif +#ifdef CONFIG_CHERI_PURECAP_UABI + if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_PURECAP].dm)) + zap_vma_pages(vma); #endif }
@@ -194,6 +205,7 @@ static int __setup_additional_pages(enum vdso_abi abi, unsigned long gp_flags = 0; void *ret;
+ BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT; @@ -411,6 +423,48 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) } #endif /* CONFIG_COMPAT32 */
+#ifdef CONFIG_CHERI_PURECAP_UABI +enum purecap_map { + PURECAP_MAP_VVAR, + PURECAP_MAP_VDSO, +}; + +static struct vm_special_mapping purecap_vdso_maps[] __ro_after_init = { + [PURECAP_MAP_VVAR] = { + .name = "[vvar]", + .fault = vvar_fault, + }, + [PURECAP_MAP_VDSO] = { + .name = "[vdso]", + .mremap = vdso_mremap, + }, +}; +\ +static int __init purecap_vdso_init(void) { + vdso_info[VDSO_ABI_PURECAP].dm = &purecap_vdso_maps[PURECAP_MAP_VVAR]; + vdso_info[VDSO_ABI_PURECAP].cm = &purecap_vdso_maps[PURECAP_MAP_VDSO]; + + return __vdso_init(VDSO_ABI_PURECAP); +} +arch_initcall(purecap_vdso_init); + +int purecap_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + int ret; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + ret = __setup_additional_pages(VDSO_ABI_PURECAP, mm, bprm, uses_interp); + mmap_write_unlock(mm); + + return ret; +} + + +#endif /* CONFIG CHERI PURECAP UABI */ + enum aarch64_map { AA64_MAP_VVAR, AA64_MAP_VDSO, @@ -436,7 +490,7 @@ static int __init vdso_init(void) } arch_initcall(vdso_init);
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +int aarch64_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; int ret; diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c index b6b453b35946..e22e866d587f 100644 --- a/fs/compat_binfmt_elf.c +++ b/fs/compat_binfmt_elf.c @@ -150,6 +150,22 @@
#endif /* CONFIG_COMPAT32 */
+#ifdef CONFIG_COMPAT64 + +#ifdef compat_arch_setup_additional_pages +#define COMPAT_ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \ + compat_arch_setup_additional_pages(bprm, interpreter) +#endif + +#ifdef COMPAT_ARCH_SETUP_ADDITIONAL_PAGES +#undef ARCH_HAS_SETUP_ADDITIONAL_PAGES +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +#undef ARCH_SETUP_ADDITIONAL_PAGES +#define ARCH_SETUP_ADDITIONAL_PAGES COMPAT_ARCH_SETUP_ADDITIONAL_PAGES +#endif + +#endif /* CONFIG_COMPAT64*/ + #undef elf_check_arch #define elf_check_arch compat_elf_check_arch
diff --git a/include/linux/elf.h b/include/linux/elf.h index 039ad1867045..2b66123c1f94 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -32,7 +32,7 @@
#if defined(ARCH_HAS_SETUP_ADDITIONAL_PAGES) && !defined(ARCH_SETUP_ADDITIONAL_PAGES) #define ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \ - arch_setup_additional_pages(bprm, interpreter) + purecap_setup_additional_pages(bprm, interpreter) #endif
#define ELF32_GNU_PROPERTY_ALIGN 4