In CHERI architecture, all the ranges cannot be represented in capability so add the necessary CHERI base and length alignment checks when generating the free unmapped virtual address or evaluating the fixed input address.
The PCuABI reservation interface stores the unusable alignment gaps at the start and end. These gaps should be considered when finding the free unmapped address space.
In the case of fixed valid capability type addresses, the requested address range should completely overlap with the reservation range. In the case of fixed null capability addresses, they are verified to not overlap with any existing reservation range.
Due to the above requirement, get_unmapped_area() function should not be used for limit checks for fixed valid capability addresses or for already mapped addresses like done for vma_expandable(). A function vm_area_range_within_limit() is created for sanity checks in those cases.
Signed-off-by: Amit Daniel Kachhap amitdaniel.kachhap@arm.com --- include/linux/mm.h | 8 +++++ mm/mmap.c | 78 +++++++++++++++++++++++++++++++++++++--------- mm/mremap.c | 17 +++++++--- 3 files changed, 85 insertions(+), 18 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h index ce2501062292..f7f09fe0684e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -30,6 +30,7 @@ #include <linux/kasan.h> #include <linux/memremap.h> #include <linux/slab.h> +#include <linux/cap_addr_mgmt.h>
struct mempolicy; struct anon_vma; @@ -3409,6 +3410,8 @@ struct vm_unmapped_area_info { };
extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); +extern int vm_area_range_within_limit(unsigned long addr, unsigned long len, + unsigned long flags);
/* truncate.c */ extern void truncate_inode_pages(struct address_space *, loff_t); @@ -3472,9 +3475,12 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma) unsigned long gap = stack_guard_start_gap(vma); unsigned long vm_start = vma->vm_start;
+ if (reserv_is_supported(vma->vm_mm)) + vm_start = reserv_vma_reserv_start(vma); vm_start -= gap; if (vm_start > vma->vm_start) vm_start = 0; + return vm_start; }
@@ -3482,6 +3488,8 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma) { unsigned long vm_end = vma->vm_end;
+ if (reserv_is_supported(vma->vm_mm)) + vm_end = reserv_vma_reserv_start(vma) + reserv_vma_reserv_len(vma); if (vma->vm_flags & VM_GROWSUP) { vm_end += stack_guard_gap; if (vm_end < vma->vm_end) diff --git a/mm/mmap.c b/mm/mmap.c index bec26ad4fdb0..305c90332424 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -48,6 +48,8 @@ #include <linux/sched/mm.h> #include <linux/ksm.h>
+#include <linux/cap_addr_mgmt.h> +#include <linux/cheri.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> @@ -1656,6 +1658,8 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) tmp = mas_prev(&mas, 0); if (tmp && vm_end_gap(tmp) > gap) { high_limit = tmp->vm_start; + if (reserv_is_supported(tmp->vm_mm)) + high_limit = reserv_vma_reserv_start(tmp); mas_reset(&mas); goto retry; } @@ -1686,6 +1690,19 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) return addr; }
+int vm_area_range_within_limit(unsigned long addr, unsigned long len, + unsigned long flags) +{ + const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); + unsigned long align_len = reserv_representable_length(len); + + /* requested length too big for entire address space */ + if (align_len > mmap_end - mmap_min_addr) + return -ENOMEM; + + return 0; +} + /* Get an address range which is currently unmapped. * For shmat() with addr=0. * @@ -1706,27 +1723,44 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); + unsigned long align_len; + unsigned long align_addr;
- if (len > mmap_end - mmap_min_addr) + align_len = reserv_representable_length(len); + if (align_len > mmap_end - mmap_min_addr) return -ENOMEM;
- if (flags & MAP_FIXED) + /* + * In case of PCuABI, fixed address without valid capability should + * not overlap with any existing reservation. Let this scenario + * fallthrough below for such checks. + */ + if ((flags & MAP_FIXED) && !reserv_is_supported(mm)) return addr;
if (addr) { addr = PAGE_ALIGN(addr); + /* + * Here CHERI representable address is aligned down as reservation + * layer holds this unusable aligned down gap. + */ + align_addr = reserv_representable_base(addr, len); vma = find_vma_prev(mm, addr, &prev); - if (mmap_end - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma)) && - (!prev || addr >= vm_end_gap(prev))) + if (mmap_end - align_len >= align_addr && align_addr >= mmap_min_addr && + (!vma || align_addr + align_len <= vm_start_gap(vma)) && + (!prev || align_addr >= vm_end_gap(prev))) return addr; + else if (flags & MAP_FIXED) + /* This non-tagged fixed address overlaps with other reservation */ + return -ERESERVATION; }
info.flags = 0; - info.length = len; + info.length = align_len; info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; + info.align_mask = reserv_representable_alignment(len); info.align_offset = 0; return vm_unmapped_area(&info); } @@ -1754,29 +1788,45 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); + unsigned long align_len; + unsigned long align_addr;
+ align_len = reserv_representable_length(len); /* requested length too big for entire address space */ - if (len > mmap_end - mmap_min_addr) + if (align_len > mmap_end - mmap_min_addr) return -ENOMEM; - - if (flags & MAP_FIXED) + /* + * In case of PCuABI, fixed address without valid capability should + * not overlap with any existing reservation. Let this scenario + * fallthrough below for such checks. + */ + if ((flags & MAP_FIXED) && !reserv_is_supported(mm)) return addr;
/* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma_prev(mm, addr, &prev); - if (mmap_end - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma)) && - (!prev || addr >= vm_end_gap(prev))) + /* + * Here CHERI representable address is aligned down as reservation + * layer holds this unusable aligned down gap. + */ + align_addr = reserv_representable_base(addr, len); + vma = find_vma_prev(mm, align_addr, &prev); + if (mmap_end - align_len >= align_addr && align_addr >= mmap_min_addr && + (!vma || align_addr + align_len <= vm_start_gap(vma)) && + (!prev || align_addr >= vm_end_gap(prev))) return addr; + else if (flags & MAP_FIXED) + /* This fixed address overlaps with other reservation. */ + return -ERESERVATION; }
info.flags = VM_UNMAPPED_AREA_TOPDOWN; - info.length = len; + info.length = align_len; info.low_limit = PAGE_SIZE; info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; + info.align_mask = reserv_representable_alignment(len); info.align_offset = 0; addr = vm_unmapped_area(&info);
diff --git a/mm/mremap.c b/mm/mremap.c index 515217a95293..f014ac50d9f1 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -934,7 +934,10 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED;
- ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + + if (reserv_is_supported(vma->vm_mm) && (map_flags & MAP_FIXED)) + ret = vm_area_range_within_limit(new_addr, new_len, map_flags); + else + ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); if (IS_ERR_VALUE(ret)) @@ -959,9 +962,15 @@ static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) return 0; if (find_vma_intersection(vma->vm_mm, vma->vm_end, end)) return 0; - if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, - 0, MAP_FIXED) & ~PAGE_MASK) - return 0; + if (reserv_is_supported(vma->vm_mm)) { + if (vm_area_range_within_limit(vma->vm_start, end - vma->vm_start, + MAP_FIXED)) + return 0; + } else { + if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, + 0, MAP_FIXED) & ~PAGE_MASK) + return 0; + } return 1; }