Use the recently introduced PCuABI reservation interfaces to create the appropriate bounded capability for executable/interpreter load segments.
Signed-off-by: Amit Daniel Kachhap amitdaniel.kachhap@arm.com --- fs/binfmt_elf.c | 100 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 72 insertions(+), 28 deletions(-)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index c10ba610be50..1adf5789668a 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -48,6 +48,7 @@ #include <linux/uaccess.h> #include <linux/rseq.h> #include <linux/cheri.h> +#include <linux/cap_addr_mgmt.h> #include <asm/param.h> #include <asm/page.h>
@@ -119,15 +120,14 @@ static struct linux_binfmt elf_format = { * p_filesz when it ends before the page ends (e.g. bss), otherwise this * memory will contain the junk from the file that should not be present. */ -static int padzero(unsigned long address) +static int padzero(user_uintptr_t user_ptr) { unsigned long nbyte;
- nbyte = ELF_PAGEOFFSET(address); + nbyte = ELF_PAGEOFFSET((ptraddr_t)user_ptr); if (nbyte) { nbyte = ELF_MIN_ALIGN - nbyte; - if (clear_user(make_user_ptr_for_write_uaccess(address, nbyte), - nbyte)) + if (clear_user((void __user *)user_ptr, nbyte)) return -EFAULT; } return 0; @@ -163,6 +163,7 @@ struct elf_load_info { unsigned long end_elf_rx; unsigned long start_elf_rw; unsigned long end_elf_rw; + user_uintptr_t user_ptr_elf; };
static int @@ -298,22 +299,23 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, NEW_AUX_ENT(AT_RSEQ_ALIGN, __alignof__(struct rseq)); #endif #if defined(CONFIG_CHERI_PURECAP_UABI) && (ELF_COMPAT == 0) - /* - * TODO [PCuABI] - Restrict bounds/perms for AT_CHERI_* entries - */ NEW_AUX_ENT(AT_CHERI_EXEC_RW_CAP, (exec_load_info->start_elf_rw != ~0UL ? - elf_uaddr_to_user_ptr(exec_load_info->start_elf_rw) : + (void __user *)cheri_address_set(exec_load_info->user_ptr_elf, + exec_load_info->start_elf_rw) : NULL)); NEW_AUX_ENT(AT_CHERI_EXEC_RX_CAP, - elf_uaddr_to_user_ptr(exec_load_info->start_elf_rx)); + (void __user *)cheri_address_set(exec_load_info->user_ptr_elf, + exec_load_info->start_elf_rx)); NEW_AUX_ENT(AT_CHERI_INTERP_RW_CAP, ((interp_load_addr && interp_load_info->start_elf_rw != ~0UL) ? - elf_uaddr_to_user_ptr(interp_load_info->start_elf_rw) : + (void __user *)cheri_address_set(interp_load_info->user_ptr_elf, + interp_load_info->start_elf_rw) : NULL)); NEW_AUX_ENT(AT_CHERI_INTERP_RX_CAP, (interp_load_addr ? - elf_uaddr_to_user_ptr(interp_load_info->start_elf_rx) : + (void __user *)cheri_address_set(interp_load_info->user_ptr_elf, + interp_load_info->start_elf_rx) : NULL)); NEW_AUX_ENT(AT_CHERI_STACK_CAP, elf_uaddr_to_user_ptr(0)); NEW_AUX_ENT(AT_CHERI_SEAL_CAP, cheri_user_root_seal_cap); @@ -420,14 +422,14 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, * into memory at "addr". (Note that p_filesz is rounded up to the * next page, so any extra bytes from the file must be wiped.) */ -static unsigned long elf_map(struct file *filep, unsigned long addr, +static unsigned long elf_map(struct file *filep, user_uintptr_t user_ptr, const struct elf_phdr *eppnt, int prot, int type, unsigned long total_size) { unsigned long map_addr; unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr); unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr); - addr = ELF_PAGESTART(addr); + unsigned long addr = ELF_PAGESTART((ptraddr_t)user_ptr); size = ELF_PAGEALIGN(size);
/* mmap() will return -EINVAL if given a zero size, but a @@ -435,6 +437,10 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, if (!size) return addr;
+ if (reserv_is_supported(current->mm)) + user_ptr = (user_uintptr_t)user_ptr_set_addr((void __user *)user_ptr, addr); + else + user_ptr = addr; /* * total_size is the size of the ELF (interpreter) image. * The _first_ mmap needs to know the full size, otherwise @@ -445,11 +451,11 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, */ if (total_size) { total_size = ELF_PAGEALIGN(total_size); - map_addr = vm_mmap(filep, addr, total_size, prot, type, off); - if (!BAD_ADDR(map_addr)) + map_addr = (ptraddr_t)vm_mmap(filep, user_ptr, total_size, prot, type, off); + if (!reserv_is_supported(current->mm) && !BAD_ADDR(map_addr)) vm_munmap(map_addr+size, total_size-size); } else - map_addr = vm_mmap(filep, addr, size, prot, type, off); + map_addr = (ptraddr_t)vm_mmap(filep, user_ptr, size, prot, type, off);
if ((type & MAP_FIXED_NOREPLACE) && PTR_ERR((void *)map_addr) == -EEXIST) @@ -464,28 +470,44 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, * into memory at "addr". Memory from "p_filesz" through "p_memsz" * rounded up to the next page is zeroed. */ -static unsigned long elf_load(struct file *filep, unsigned long addr, - const struct elf_phdr *eppnt, int prot, int type, - unsigned long total_size) +static unsigned long elf_load(struct elf_load_info *load_info, struct file *filep, + unsigned long addr, const struct elf_phdr *eppnt, + int prot, int type, unsigned long total_size) { unsigned long zero_start, zero_end; unsigned long map_addr; + user_uintptr_t map_user_ptr;
+ if (reserv_is_supported(current->mm) && !total_size) + map_user_ptr = (user_uintptr_t)user_ptr_set_addr((void __user *)load_info->user_ptr_elf, addr); + else + map_user_ptr = addr; if (eppnt->p_filesz) { - map_addr = elf_map(filep, addr, eppnt, prot, type, total_size); + map_addr = elf_map(filep, map_user_ptr, eppnt, prot, type, total_size); if (BAD_ADDR(map_addr)) return map_addr; + if (reserv_is_supported(current->mm) && total_size) { + load_info->user_ptr_elf = + reserv_range_set_reserv(map_addr, ELF_PAGEALIGN(total_size), + user_ptr_perms_from_prot(PROT_READ | PROT_WRITE | PROT_EXEC, + true), false); + if (IS_ERR_VALUE(load_info->user_ptr_elf)) + return (long)load_info->user_ptr_elf; + } if (eppnt->p_memsz > eppnt->p_filesz) { zero_start = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + eppnt->p_filesz; zero_end = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + eppnt->p_memsz; - + map_user_ptr = zero_start; + if (reserv_is_supported(current->mm)) + map_user_ptr = (user_uintptr_t)user_ptr_set_addr((void __user *)load_info->user_ptr_elf, + zero_start); /* * Zero the end of the last mapped page but ignore * any errors if the segment isn't writable. */ - if (padzero(zero_start) && (prot & PROT_WRITE)) + if (padzero(map_user_ptr) && (prot & PROT_WRITE)) return -EFAULT; } } else { @@ -499,15 +521,24 @@ static unsigned long elf_load(struct file *filep, unsigned long addr, * If the header is requesting these pages to be * executable, honour that (ppc32 needs this). */ - int error;
zero_start = ELF_PAGEALIGN(zero_start); zero_end = ELF_PAGEALIGN(zero_end);
- error = vm_brk_flags(zero_start, zero_end - zero_start, + if (!reserv_is_supported(current->mm)) + return vm_brk_flags(zero_start, zero_end - zero_start, prot & PROT_EXEC ? VM_EXEC : 0); - if (error) - map_addr = error; + + if (zero_end <= zero_start) + return map_addr; + map_user_ptr = (user_uintptr_t)user_ptr_set_addr((void __user *)load_info->user_ptr_elf, + zero_start); + map_addr = vm_mmap(0, map_user_ptr, zero_end - zero_start, prot, + MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0); + if (BAD_ADDR(map_addr)) + return (int)map_addr; + if (padzero(map_user_ptr)) + map_addr = -EFAULT; } return map_addr; } @@ -745,7 +776,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, else if (no_base && interp_elf_ex->e_type == ET_DYN) load_addr = -vaddr;
- map_addr = elf_load(interpreter, load_addr + vaddr, + map_addr = elf_load(load_info, interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size); total_size = 0; error = map_addr; @@ -1090,6 +1121,11 @@ static int load_elf_binary(struct linux_binprm *bprm)
setup_new_exec(bprm);
+#if defined(CONFIG_CHERI_PURECAP_UABI) && (ELF_COMPAT == 0) + set_bit(MMF_PCUABI_RESERV, ¤t->mm->flags); +#else + clear_bit(MMF_PCUABI_RESERV, ¤t->mm->flags); +#endif /* Do this so that we can load the interpreter, if need be. We will change some of these later */ retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), @@ -1217,7 +1253,15 @@ static int load_elf_binary(struct linux_binprm *bprm) } }
- error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt, + if (reserv_is_supported(current->mm) && first_pt_load && !total_size) { + total_size = total_mapping_size(elf_phdata, elf_ex->e_phnum); + if (!total_size) { + retval = -EINVAL; + goto out_free_dentry; + } + } + + error = elf_load(&exec_load_info, bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, total_size); if (BAD_ADDR(error)) { retval = IS_ERR_VALUE(error) ?