In CHERI architecture, all the ranges cannot be represented in capability so add the necessary CHERI base and length alignment checks when generating the free unmapped virtual address or evaluating the fixed input address.
The PCuABI reservation interface stores the unusable alignment gaps at the start and end. These gaps should be considered when finding the free unmapped address space.
In the case of fixed type addresses, the requested address range should completely reside within the reservation range or not overlap with any existing reservation range.
Signed-off-by: Amit Daniel Kachhap amitdaniel.kachhap@arm.com --- include/linux/mm.h | 5 ++-- mm/mmap.c | 71 ++++++++++++++++++++++++++++++++++++---------- 2 files changed, 59 insertions(+), 17 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h index ce2501062292..73dc5ca47b55 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -30,6 +30,7 @@ #include <linux/kasan.h> #include <linux/memremap.h> #include <linux/slab.h> +#include <linux/cap_addr_mgmt.h>
struct mempolicy; struct anon_vma; @@ -3470,7 +3471,7 @@ static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) static inline unsigned long vm_start_gap(struct vm_area_struct *vma) { unsigned long gap = stack_guard_start_gap(vma); - unsigned long vm_start = vma->vm_start; + unsigned long vm_start = reserv_vma_reserv_start(vma);
vm_start -= gap; if (vm_start > vma->vm_start) @@ -3480,7 +3481,7 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
static inline unsigned long vm_end_gap(struct vm_area_struct *vma) { - unsigned long vm_end = vma->vm_end; + unsigned long vm_end = reserv_vma_reserv_start(vma) + reserv_vma_reserv_len(vma);
if (vma->vm_flags & VM_GROWSUP) { vm_end += stack_guard_gap; diff --git a/mm/mmap.c b/mm/mmap.c index bec26ad4fdb0..64e64ab5e819 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -48,6 +48,7 @@ #include <linux/sched/mm.h> #include <linux/ksm.h>
+#include <linux/cap_addr_mgmt.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> @@ -1655,7 +1656,7 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) } else { tmp = mas_prev(&mas, 0); if (tmp && vm_end_gap(tmp) > gap) { - high_limit = tmp->vm_start; + high_limit = reserv_vma_reserv_start(tmp); mas_reset(&mas); goto retry; } @@ -1706,27 +1707,47 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); + unsigned long align_len; + unsigned long align_addr;
- if (len > mmap_end - mmap_min_addr) + align_len = reserv_representable_length(len); + if (align_len > mmap_end - mmap_min_addr) return -ENOMEM;
- if (flags & MAP_FIXED) + /* + * In case of PCuABI reservation, fixed should not overlap with any + * existing reservation or completely contained inside the reservation. + * Let this scenario fallthrough below for such checks. + */ + if ((flags & MAP_FIXED) && !reserv_is_supported(mm)) return addr;
if (addr) { addr = PAGE_ALIGN(addr); + /* + * Here CHERI representable address is aligned down as reservation + * layer holds this unusable aligned down gap. + */ + align_addr = reserv_representable_base(addr, len); vma = find_vma_prev(mm, addr, &prev); - if (mmap_end - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma)) && - (!prev || addr >= vm_end_gap(prev))) + if (mmap_end - align_len >= align_addr && align_addr >= mmap_min_addr && + (!vma || align_addr + align_len <= vm_start_gap(vma)) && + (!prev || align_addr >= vm_end_gap(prev))) return addr; + else if (flags & MAP_FIXED) { + if ((vma && reserv_vma_range_within_reserv(vma, align_addr, align_len)) || + (prev && reserv_vma_range_within_reserv(prev, align_addr, align_len))) + return addr; + return -ERESERVATION; + } }
info.flags = 0; - info.length = len; + info.length = align_len; info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; + info.align_mask = reserv_representable_alignment(len); info.align_offset = 0; return vm_unmapped_area(&info); } @@ -1754,29 +1775,49 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); + unsigned long align_len; + unsigned long align_addr;
+ align_len = reserv_representable_length(len); /* requested length too big for entire address space */ - if (len > mmap_end - mmap_min_addr) + if (align_len > mmap_end - mmap_min_addr) return -ENOMEM; - - if (flags & MAP_FIXED) + /* + * In case of PCuABI reservation, fixed should not overlap with any + * existing reservation or completely contained inside the reservation. + * Let this scenario fallthrough below for such checks. + */ + if ((flags & MAP_FIXED) && !reserv_is_supported(mm)) return addr;
/* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma_prev(mm, addr, &prev); - if (mmap_end - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma)) && - (!prev || addr >= vm_end_gap(prev))) + /* + * Here CHERI representable address is aligned down as reservation + * layer holds this unusable aligned down gap. + */ + align_addr = reserv_representable_base(addr, len); + vma = find_vma_prev(mm, align_addr, &prev); + if (mmap_end - align_len >= align_addr && align_addr >= mmap_min_addr && + (!vma || align_addr + align_len <= vm_start_gap(vma)) && + (!prev || align_addr >= vm_end_gap(prev))) return addr; + else if (flags & MAP_FIXED) { + if ((vma && reserv_vma_range_within_reserv(vma, align_addr, align_len)) || + (prev && reserv_vma_range_within_reserv(prev, align_addr, align_len))) { + return addr; + } + return -ERESERVATION; + } }
info.flags = VM_UNMAPPED_AREA_TOPDOWN; - info.length = len; + info.length = align_len; info.low_limit = PAGE_SIZE; info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; + info.align_mask = reserv_representable_alignment(len); info.align_offset = 0; addr = vm_unmapped_area(&info);