Helper functions capability_may_set_prot(), mapping_to_capability_perm() and build_owning_capability() are added/modified to manage capability permissions in address space management syscalls as per PCuABI specifications.
Also a arch specific hook arch_map_to_cap_perm() is added to manage arch specific capability permissions.
Signed-off-by: Amit Daniel Kachhap amit.kachhap@arm.com --- arch/arm64/include/asm/cap_addr_mgmt.h | 22 +++++++ include/linux/cap_addr_mgmt.h | 42 ++++++++++-- mm/cap_addr_mgmt.c | 88 ++++++++++++++++++++------ mm/mmap.c | 8 ++- 4 files changed, 133 insertions(+), 27 deletions(-) create mode 100644 arch/arm64/include/asm/cap_addr_mgmt.h
diff --git a/arch/arm64/include/asm/cap_addr_mgmt.h b/arch/arm64/include/asm/cap_addr_mgmt.h new file mode 100644 index 000000000000..aadb4768d2fd --- /dev/null +++ b/arch/arm64/include/asm/cap_addr_mgmt.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_CAP_ADDR_MGMT_H +#define __ASM_CAP_ADDR_MGMT_H + +#include <linux/cheri.h> +#include <linux/mman.h> + +static __always_inline cheri_perms_t arch_map_to_cap_perm(int prot, bool has_tag_access) +{ + cheri_perms_t perms = 0; + + if ((prot & PROT_READ) && has_tag_access) + perms |= ARM_CAP_PERMISSION_MUTABLE_LOAD; + + if ((prot & PROT_EXEC) && + (cheri_perms_get(cheri_pcc_get()) & ARM_CAP_PERMISSION_EXECUTIVE)) + perms |= ARM_CAP_PERMISSION_EXECUTIVE; + + return perms; +} +#define arch_map_to_cap_perm arch_map_to_cap_perm +#endif /* __ASM_CAP_ADDR_MGMT_H */ diff --git a/include/linux/cap_addr_mgmt.h b/include/linux/cap_addr_mgmt.h index 60af5633ef6f..717f51008bc0 100644 --- a/include/linux/cap_addr_mgmt.h +++ b/include/linux/cap_addr_mgmt.h @@ -7,6 +7,7 @@ #include <linux/list.h> #include <linux/mm_types.h> #include <linux/types.h> +#include <asm/cap_addr_mgmt.h>
#ifdef CONFIG_CHERI_PURECAP_UABI #define CHERI_REPRESENTABLE_ALIGNMENT(len) \ @@ -26,13 +27,13 @@ * @vma: VMA pointer to insert the reservation entry. * @start: Reservation start value. * @len: Reservation length. - * @perm: Memory mapping permission for the reserved range. + * @perm: Capability permission for the reserved range. * * Return: 0 if reservation entry added successfully or -ERESERVATION * otherwise. */ int reserv_vma_insert_entry(struct vm_area_struct *vma, unsigned long start, - unsigned long len, unsigned long perm); + unsigned long len, cheri_perms_t perm);
/** * reserv_range_insert_entry() - Adds the reservation details across the VMA's @@ -42,13 +43,13 @@ int reserv_vma_insert_entry(struct vm_area_struct *vma, unsigned long start, * This function internally uses mmap_lock to synchronize the vma updates. * @start: Reservation start value. * @len: Reservation length. - * @perm: Memory mapping permission for the reserved range. + * @perm: Capability permission for the reserved range. * * Return: valid capability with bounded range and requested permission or * negative errorcode otherwise. */ user_uintptr_t reserv_range_insert_entry(unsigned long start, unsigned long len, - unsigned long perm); + cheri_perms_t perm);
/** * reserv_vmi_range_fully_mapped() - Searches the reservation interface for the @@ -130,9 +131,38 @@ bool capability_owns_range(user_uintptr_t cap, unsigned long addr, unsigned long * requested base address, length and memory protection flags. * @addr: Requested capability address. * @len: Requested capability length. - * @perm: Requested memory mapping permission flags. + * @perm: Requested capability permission flags. * * Return: A new capability derived from cheri_user_root_cap. */ -user_uintptr_t build_owning_capability(unsigned long addr, unsigned long len, unsigned long perm); +user_uintptr_t build_owning_capability(unsigned long addr, unsigned long len, cheri_perms_t perm); + +/** + * capability_may_set_prot() - Verify if the mapping protection flags confirms + * with the capability permission flags. + * @cap: Capability value. + * @prot: Memory protection flags. + * + * Return: True if the capability permissions includes the protection flags + * or false otherwise. + */ +bool capability_may_set_prot(user_uintptr_t cap, int prot); + +/** + * mapping_to_capability_perm() - Converts memory mapping protection flags to + * capability permission flags. + * @prot: Memory protection flags. + * @has_tag_access: Capability permissions to have tag check flags. + * + * Return: Capability permission flags + */ +cheri_perms_t mapping_to_capability_perm(int prot, bool has_tag_access); + +#ifndef arch_map_to_cap_perm +static __always_inline cheri_perms_t arch_map_to_cap_perm(int prot, + bool has_tag_access) +{ + return 0; +} +#endif /* arch_map_to_cap_perm */ #endif /* _LINUX_CAP_ADDR_MGMT_H */ diff --git a/mm/cap_addr_mgmt.c b/mm/cap_addr_mgmt.c index a4a85b37d59d..b451fa279a48 100644 --- a/mm/cap_addr_mgmt.c +++ b/mm/cap_addr_mgmt.c @@ -9,12 +9,8 @@ #ifdef CONFIG_CHERI_PURECAP_UABI
int reserv_vma_insert_entry(struct vm_area_struct *vma, unsigned long start, - unsigned long len, unsigned long perm) + unsigned long len, cheri_perms_t perm) { - /* TODO [PCuABI] - capability permission conversion from memory permission */ - cheri_perms_t cheri_perms = CHERI_PERMS_READ | CHERI_PERMS_WRITE | - CHERI_PERMS_EXEC | CHERI_PERMS_ROOTCAP; - if (is_compat_task() || !(vma->vm_flags & VM_PCUABI_RESERVE)) return 0;
@@ -34,17 +30,13 @@ int reserv_vma_insert_entry(struct vm_area_struct *vma, unsigned long start, vma->reserv_start = start; vma->reserv_len = cheri_representable_length(len); if (perm) - vma->reserv_perm = cheri_perms; + vma->reserv_perm = perm;
return 0; }
-user_uintptr_t reserv_range_insert_entry(unsigned long start, unsigned long len, - unsigned long perm __maybe_unused) +user_uintptr_t reserv_range_insert_entry(unsigned long start, unsigned long len, cheri_perms_t perm) { - /* TODO [PCuABI] - capability permission conversion from memory permission */ - cheri_perms_t cheri_perm = CHERI_PERMS_READ | CHERI_PERMS_WRITE | - CHERI_PERMS_EXEC | CHERI_PERMS_ROOTCAP; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long end = start + len; @@ -74,7 +66,7 @@ user_uintptr_t reserv_range_insert_entry(unsigned long start, unsigned long len, vm_flags_set(vma, VM_PCUABI_RESERVE); WRITE_ONCE(vma->reserv_start, start); WRITE_ONCE(vma->reserv_len, len); - WRITE_ONCE(vma->reserv_perm, cheri_perm); + WRITE_ONCE(vma->reserv_perm, perm); } mmap_write_unlock(current->mm); ret = build_owning_capability(start, len, perm); @@ -190,19 +182,67 @@ bool capability_owns_range(user_uintptr_t cap, unsigned long addr, unsigned long align_len, CHERI_PERM_GLOBAL | CHERI_PERM_SW_VMEM); }
-user_uintptr_t build_owning_capability(unsigned long addr, unsigned long len, - unsigned long perm __maybe_unused) +user_uintptr_t build_owning_capability(unsigned long addr, unsigned long len, cheri_perms_t perm) { unsigned long align_start = round_down(addr, PAGE_SIZE); unsigned long align_len = cheri_representable_length(round_up(len, PAGE_SIZE));
- /* TODO [PCuABI] - capability permission conversion from memory permission */ - cheri_perms_t perms = CHERI_PERMS_READ | CHERI_PERMS_WRITE | - CHERI_PERMS_EXEC | CHERI_PERMS_ROOTCAP; + return (user_uintptr_t)cheri_build_user_cap(align_start, align_len, perm); +} + +static bool mapping_may_have_prot_flag(int prot, int map_val) +{ + int prot_max = PROT_MAX_EXTRACT(prot); + + if (prot_max) + return !!(prot_max & map_val); + else + return !!(prot & map_val); +} + +bool capability_may_set_prot(user_uintptr_t cap, int prot) +{ + cheri_perms_t perms = cheri_perms_get(cap); + + if (is_compat_task()) + return true; + + if (((prot & PROT_READ) && !(perms & CHERI_PERM_LOAD)) || + ((prot & PROT_WRITE) && !(perms & CHERI_PERM_STORE)) || + ((prot & PROT_EXEC) && !(perms & CHERI_PERM_EXECUTE))) + return false;
- return (user_uintptr_t)cheri_build_user_cap(align_start, align_len, perms); + return true; }
+cheri_perms_t mapping_to_capability_perm(int prot, bool has_tag_access) +{ + cheri_perms_t perms = 0; + + if (mapping_may_have_prot_flag(prot, PROT_READ)) { + perms |= CHERI_PERM_LOAD; + if (has_tag_access) + perms |= CHERI_PERM_LOAD_CAP; + } + if (mapping_may_have_prot_flag(prot, PROT_WRITE)) { + perms |= CHERI_PERM_STORE; + if (has_tag_access) + perms |= (CHERI_PERM_STORE_CAP | CHERI_PERM_STORE_LOCAL_CAP); + } + if (mapping_may_have_prot_flag(prot, PROT_EXEC)) { + perms |= CHERI_PERM_EXECUTE; + if (cheri_perms_get(cheri_pcc_get()) & CHERI_PERM_SYSTEM_REGS) + perms |= CHERI_PERM_SYSTEM_REGS; + } + /* Fetch any extra architecture specific permissions */ + perms |= arch_map_to_cap_perm(PROT_MAX_EXTRACT(prot) ? PROT_MAX_EXTRACT(prot) : prot, + has_tag_access); + perms |= CHERI_PERMS_ROOTCAP; + + return perms; +} + + #else
int reserv_vma_insert_entry(struct vm_area_struct *vma, unsigned long start, @@ -249,9 +289,19 @@ bool capability_owns_range(user_uintptr_t cap, unsigned long addr, unsigned long return true; }
-user_uintptr_t build_owning_capability(unsigned long addr, unsigned long len, unsigned long perm) +user_uintptr_t build_owning_capability(unsigned long addr, unsigned long len, cheri_perms_t perm) { return addr; }
+bool capability_may_set_prot(user_uintptr_t cap, int prot) +{ + return true; +} + +cheri_perms_t mapping_to_capability_perm(int prot, bool has_tag_access) +{ + return 0; +} + #endif /* CONFIG_CHERI_PURECAP_UABI */ diff --git a/mm/mmap.c b/mm/mmap.c index 803b18c7d746..cb7a4b71ad82 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1440,7 +1440,9 @@ user_uintptr_t do_mmap(struct file *file, user_uintptr_t user_addr, if (!IS_ERR_VALUE(addr)) { if (!ignore_reserv) { if (new_caps) - user_addr = build_owning_capability(addr, len, prot); + user_addr = build_owning_capability(addr, len, + mapping_to_capability_perm(prot, + (flags & MAP_SHARED) ? false : true)); } else { user_addr = (user_uintptr_t)uaddr_to_user_ptr_safe(addr); } @@ -2751,7 +2753,9 @@ unsigned long mmap_region(struct file *file, user_uintptr_t user_addr, vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; if (vm_flags & VM_PCUABI_RESERVE) { - error = reserv_vma_insert_entry(vma, addr, len, prot); + error = reserv_vma_insert_entry(vma, addr, len, + mapping_to_capability_perm(prot, + (vm_flags & (VM_SHARED | VM_MAYSHARE)) ? false : true)); if (error) goto free_vma; }