PCuABI needs the address space reservation interfaces to manage the owning capability of the allocated addresses. This interface prevents two unrelated owning capabilities created by the kernel to overlap.
The reservation interface stores the ranges of different virtual addresses mappings and is tied to reservation which is same as the bound of the capability provided by the kernel to userspace. It also stores the owning capability permissions to manage the syscall requests for updating permissions.
Few basic rules are followed by the reservation interfaces:
- Reservations can only be created or destroyed and they are never expanded or shrunk. Reservations are created when new memory mapping is made outside of an existing reservation.
- A single reservation can have many mappings. However unused region of the reservation cannot be re-used again.
- Reservations start and end addresses are aligned to page size.
- Reservations length value is aligned to CHERI re-presentable length.
More rules about the address space reservation interface can be found in the PCuABI specification.
Here, We use maple tree library functions to create and use the reservation interface. This reservation interface supports four allowed operation (insert/delete/move/check) similar to the VMA update operations (create/delete/expand/shrink/move). These interfaces can be used before and after the VMA updates to implement the different reservation rules.
The different reservation API's are supposed to be called with {struct mm}.mmap_lock read/write lock.
Signed-off-by: Amit Daniel Kachhap amit.kachhap@arm.com --- include/linux/cap_addr_mgmt.h | 112 +++++++++++++++++++++ include/linux/mm_types.h | 3 + kernel/fork.c | 8 ++ lib/Makefile | 1 + lib/cap_addr_mgmt.c | 181 ++++++++++++++++++++++++++++++++++ 5 files changed, 305 insertions(+) create mode 100644 include/linux/cap_addr_mgmt.h create mode 100644 lib/cap_addr_mgmt.c
diff --git a/include/linux/cap_addr_mgmt.h b/include/linux/cap_addr_mgmt.h new file mode 100644 index 000000000000..fd67e9b21ecd --- /dev/null +++ b/include/linux/cap_addr_mgmt.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_CAP_ADDR_MGMT_H +#define _LINUX_CAP_ADDR_MGMT_H + +#include <linux/init.h> +#include <linux/maple_tree.h> +#include <linux/types.h> + +#ifdef CONFIG_CHERI_PURECAP_UABI + +struct reserv_mt_entry { + unsigned long reserv_start; + unsigned long reserv_len; + unsigned long reserv_perm; +}; + +/** + * reserv_mt_insert_entry() - Add the reservation for the virtual address + * range from start to (start + len) with perm permission as the entry. + * @rv_mt: Maple tree pointer to insert the reservation entry. + * @start: Reservation start value. + * @len: Reservation length. + * @perm: Capability permission for the reserved range. + * + * Return: 0 if reservation entry added successfully or -ERESERVATION/-ENOMEM + * otherwise. + */ +int reserv_mt_insert_entry(struct maple_tree *rv_mt, unsigned long start, + unsigned long len, unsigned long perm); + +/** + * reserv_mt_capability_bound_valid() - Search and matches the reservation + * interface for the virtual address range derived from the capability bound + * values. + * @rv_mt: Maple tree pointer to search the reservation entry. + * @start: Reservation capability value. + * + * Return: True if reservation entry found with the exact capability bound or + * false otherwise. + */ +bool reserv_mt_capability_bound_valid(struct maple_tree *rv_mt, uintcap_t start); + +/** + * reserv_mt_range_valid() - Searches the reservation interface for the virtual + * address range from start to (start + len). This is useful to find any + * overlaps with the existing mappngs. + * @rv_mt: Maple tree pointer to search the reservation entry. + * @start: Virtual address start value. + * @len: Virtual address length. + * + * Return: True if the maple tree has any overlap with the given range or + * false otherwise. + */ +bool reserv_mt_range_valid(struct maple_tree *rv_mt, unsigned long start, + unsigned long len); + +/** + * reserv_mt_range_fully_mapped() - Searches the reservation interface for the + * virtual address range from start to (start + len). This is useful to find + * if the requested range maps exactly with the reserved range. + * @rv_mt: Maple tree pointer to search the reservation entry. + * @start: Virtual address start value. + * @len: Virtual address length. + * + * Return: True if the maple tree mapping matches fully with the given range or + * false otherwise. + */ +bool reserv_mt_range_fully_mapped(struct maple_tree *rv_mt, unsigned long start, + unsigned long len); +/** + * reserv_mt_move_entry() - Remove the old reservation for the virtual address range + * from old_start to (old_start + old_len) and add a new reservation with range + * new_start to (new_start + new_len) with the same perm permission as the entry. + * @rv_mt: Maple tree pointer to search/insert the reservation entry. + * @old_start: Reservation old start value. + * @old_len: Reservation old length. + * @new_start: Reservation new start value. + * @new_len: Reservation new length. + * @perm: Capability permission for the reserved range (out parameter). + * + * Return: 0 if reservation entry moved successfully or -ERESERVATION otherwise. + */ +int reserv_mt_move_entry(struct maple_tree *rv_mt, unsigned long old_start, + unsigned long old_len, unsigned long new_start, + unsigned long new_len, unsigned long *perm); + +/** + * reserv_mt_delete_range() - Deletes the maple tree entry for the virtual + * address range from start to (start + len). If the requested range does + * not match completely and falls in the start, end or in between then the + * entry is shrunk appropriately. + * @rv_mt: Maple tree pointer to search the reservation entry. + * @start: Virtual address start value. + * @len: Virtual address length. + * + * Return: 0 if virtual address range deleted successfully or -ERESERVATION + * otherwise. + */ +int reserv_mt_delete_range(struct maple_tree *rv_mt, unsigned long start, + unsigned long len); + +/** + * reserv_mt_init() - Initialises the reservation interfaces. + * + * Return: None. + */ +void __init reserv_mt_init(void); + +#endif /* CONFIG_CHERI_PURECAP_UABI */ + +#endif /* _LINUX_CAP_ADDR_MGMT_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 12e87f83287d..81e8f80d5bd6 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -584,6 +584,9 @@ struct kioctx_table; struct mm_struct { struct { struct maple_tree mm_mt; +#ifdef CONFIG_CHERI_PURECAP_UABI + struct maple_tree reserv_mt; /* Tree to hold reserved address ranges */ +#endif #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, diff --git a/kernel/fork.c b/kernel/fork.c index d6fd09ba8d0a..45083d3e92ab 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -99,6 +99,7 @@ #include <linux/stackprotector.h> #include <linux/user_events.h> #include <linux/iommu.h> +#include <linux/cap_addr_mgmt.h>
#include <asm/pgalloc.h> #include <linux/uaccess.h> @@ -1081,6 +1082,9 @@ void __init fork_init(void)
lockdep_init_task(&init_task); uprobes_init(); +#ifdef CONFIG_CHERI_PURECAP_UABI + reserv_mt_init(); +#endif }
int __weak arch_dup_task_struct(struct task_struct *dst, @@ -1259,6 +1263,10 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); +#ifdef CONFIG_CHERI_PURECAP_UABI + mt_init_flags(&mm->reserv_mt, MM_MT_FLAGS); + mt_set_external_lock(&mm->reserv_mt, &mm->mmap_lock); +#endif atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); seqcount_init(&mm->write_protect_seq); diff --git a/lib/Makefile b/lib/Makefile index 3072f6caa337..0c3f6b57ca63 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -274,6 +274,7 @@ obj-$(CONFIG_POLYNOMIAL) += polynomial.o
obj-y += cheri.o obj-$(CONFIG_CHERI_PURECAP_UABI) += user_ptr.o +obj-$(CONFIG_CHERI_PURECAP_UABI) += cap_addr_mgmt.o
# stackdepot.c should not be instrumented or call instrumented functions. # Prevent the compiler from calling builtins like memcmp() or bcmp() from this diff --git a/lib/cap_addr_mgmt.c b/lib/cap_addr_mgmt.c new file mode 100644 index 000000000000..e22868506e70 --- /dev/null +++ b/lib/cap_addr_mgmt.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bug.h> +#include <linux/cap_addr_mgmt.h> +#include <linux/cheri.h> +#include <linux/slab.h> + +/* SLAB cache for reserv_mt_entry structures */ +static struct kmem_cache *reserv_mt_entry_cachep; + +static struct reserv_mt_entry *reserv_mt_alloc(void) +{ + struct reserv_mt_entry *rv_entry; + + rv_entry = kmem_cache_alloc(reserv_mt_entry_cachep, GFP_KERNEL); + + return rv_entry; +} + +int reserv_mt_insert_entry(struct maple_tree *rv_mt, unsigned long start, + unsigned long len, unsigned long perm) +{ + struct reserv_mt_entry *rv_entry; + unsigned long align_start = round_down(start, PAGE_SIZE); + unsigned long align_end = align_start + round_up(len, PAGE_SIZE) - 1; + MA_STATE(mas, rv_mt, align_start, align_end); + + rv_entry = reserv_mt_alloc(); + if (!rv_entry) + return -ENOMEM; + rv_entry->reserv_perm = perm; + rv_entry->reserv_start = align_start; + rv_entry->reserv_len = round_up(len, PAGE_SIZE); + + if (mas_store_gfp(&mas, rv_entry, GFP_KERNEL)) + return -ERESERVATION; + + return 0; +} + +bool reserv_mt_capability_bound_valid(struct maple_tree *rv_mt, uintcap_t start) +{ + struct reserv_mt_entry *rv_entry; + unsigned long align_start = cheri_base_get(start); + unsigned long align_end = align_start + cheri_length_get(start) - 1; + MA_STATE(mas, rv_mt, align_start, align_end); + + /* Check if there is match with the existing reservations */ + do { + rv_entry = mas_find(&mas, align_end); + if (!rv_entry) + return false; + if (rv_entry->reserv_start == align_start && + (rv_entry->reserv_start + cheri_representable_length(rv_entry->reserv_len) - 1) == align_end) + return true; + } while (1); + + return false; +} + +bool reserv_mt_range_valid(struct maple_tree *rv_mt, unsigned long start, + unsigned long len) +{ + unsigned long align_start = round_down(start, PAGE_SIZE); + unsigned long align_end = align_start + round_up(len, PAGE_SIZE) - 1; + MA_STATE(mas, rv_mt, align_start, align_end); + + /* Check if there is overlap with the existing mappings */ + if (mas_find(&mas, align_end)) + return true; + + return false; +} + +bool reserv_mt_range_fully_mapped(struct maple_tree *rv_mt, unsigned long start, + unsigned long len) +{ + unsigned long align_start = round_down(start, PAGE_SIZE); + unsigned long align_end = align_start + round_up(len, PAGE_SIZE) - 1; + struct reserv_mt_entry *rv_entry; + MA_STATE(mas, rv_mt, align_start, align_end); + + /* Try finding the given range */ + rv_entry = mas_find(&mas, align_end); + if (!rv_entry) + return false; + + /* Check if the range fully mapped */ + if (align_start != mas.index || align_end != mas.last || + mas.index != rv_entry->reserv_start || + mas.last != (rv_entry->reserv_start + rv_entry->reserv_len - 1)) + return false; + + return true; +} + +int reserv_mt_move_entry(struct maple_tree *rv_mt, unsigned long old_start, + unsigned long old_len, unsigned long new_start, + unsigned long new_len, unsigned long *perm) +{ + struct reserv_mt_entry *rv_entry; + unsigned long align_start = round_down(old_start, PAGE_SIZE); + unsigned long align_end = align_start + round_up(old_len, PAGE_SIZE) - 1; + MA_STATE(mas, rv_mt, align_start, align_end); + + /* Try finding the old range */ + rv_entry = mas_find(&mas, align_end); + if (!rv_entry) + return -ERESERVATION; + + if (align_start != mas.index || align_end != mas.last || + mas.index != rv_entry->reserv_start || + mas.last != (rv_entry->reserv_start + rv_entry->reserv_len - 1)) + return -ERESERVATION; /* Only full mapped range can be moved */ + + /* Try removing the old reservation */ + rv_entry = mas_erase(&mas); + if (!rv_entry) + return -ERESERVATION; + + align_start = round_down(new_start, PAGE_SIZE); + align_end = align_start + round_up(new_len, PAGE_SIZE) - 1; + mas_set_range(&mas, align_start, align_end); + rv_entry->reserv_start = align_start; + rv_entry->reserv_len = round_up(new_len, PAGE_SIZE); + if (mas_store_gfp(&mas, rv_entry, GFP_KERNEL)) + return -ERESERVATION; + *perm = rv_entry->reserv_perm; + + return 0; +} + +int reserv_mt_delete_range(struct maple_tree *rv_mt, unsigned long start, + unsigned long len) +{ + struct reserv_mt_entry *rv_entry, *rv_new; + unsigned long align_start = round_down(start, PAGE_SIZE); + unsigned long align_end = align_start + round_up(len, PAGE_SIZE) - 1; + unsigned long deleted_start, deleted_end; + MA_STATE(mas, rv_mt, align_start, align_end); + + rv_entry = mas_find(&mas, align_end); + if (!rv_entry) + return -ERESERVATION; + + /* mas_erase() used below does not retain the index so store it */ + deleted_start = mas.index; + deleted_end = mas.last; + + mas_erase(&mas); + /* Return if the deleted range matches with the requested range */ + if (align_start == deleted_start && align_end == deleted_end) + return 0; + mas.index = deleted_start; + mas.last = deleted_end; + /* Process if the deleted range falls in between, start or end */ + if (align_start > deleted_start && align_end < deleted_end) { + rv_new = reserv_mt_alloc(); + if (!rv_new) + return -ENOMEM; + memcpy(rv_new, rv_entry, sizeof(struct reserv_mt_entry)); + mas.last = deleted_start - 1; + if (mas_store_gfp(&mas, rv_new, GFP_KERNEL)) + return -ERESERVATION; + mas.index = align_end + 1; + mas.last = deleted_end; + } else if (align_start > deleted_start) { + mas.last = align_start - 1; + } else if (align_end < deleted_end) { + mas.index = align_end + 1; + } + if (mas_store_gfp(&mas, rv_entry, GFP_KERNEL)) + return -ERESERVATION; + + return 0; +} + +void __init reserv_mt_init(void) +{ + reserv_mt_entry_cachep = KMEM_CACHE(reserv_mt_entry, SLAB_PANIC|SLAB_ACCOUNT); +}