From: Amit Daniel Kachhap amitdaniel.kachhap@arm.com
In CHERI architectures, all ranges cannot be represented as capability bounds so add the necessary CHERI base and length alignment checks when generating free unmapped virtual address ranges or evaluating a fixed range.
The PCuABI reservation interface stores the unusable alignment gaps at the start and end. These gaps should be considered when finding the free unmapped address space.
In the MAP_FIXED case, the requested address range should completely reside within the reservation range or not overlap with any existing reservation range.
Signed-off-by: Amit Daniel Kachhap amitdaniel.kachhap@arm.com Co-developed-by: Kevin Brodsky kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky kevin.brodsky@arm.com --- include/linux/mm.h | 5 ++-- mm/mmap.c | 72 ++++++++++++++++++++++++++++++++++------------ 2 files changed, 56 insertions(+), 21 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h index ce2501062292..efc17977a31e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -30,6 +30,7 @@ #include <linux/kasan.h> #include <linux/memremap.h> #include <linux/slab.h> +#include <linux/mm_reserv.h>
struct mempolicy; struct anon_vma; @@ -3470,7 +3471,7 @@ static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) static inline unsigned long vm_start_gap(struct vm_area_struct *vma) { unsigned long gap = stack_guard_start_gap(vma); - unsigned long vm_start = vma->vm_start; + unsigned long vm_start = reserv_vma_reserv_start(vma);
vm_start -= gap; if (vm_start > vma->vm_start) @@ -3480,7 +3481,7 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
static inline unsigned long vm_end_gap(struct vm_area_struct *vma) { - unsigned long vm_end = vma->vm_end; + unsigned long vm_end = reserv_vma_reserv_start(vma) + reserv_vma_reserv_len(vma);
if (vma->vm_flags & VM_GROWSUP) { vm_end += stack_guard_gap; diff --git a/mm/mmap.c b/mm/mmap.c index bec26ad4fdb0..6ae675961785 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -48,6 +48,7 @@ #include <linux/sched/mm.h> #include <linux/ksm.h>
+#include <linux/mm_reserv.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> @@ -1655,7 +1656,7 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) } else { tmp = mas_prev(&mas, 0); if (tmp && vm_end_gap(tmp) > gap) { - high_limit = tmp->vm_start; + high_limit = reserv_vma_reserv_start(tmp); mas_reset(&mas); goto retry; } @@ -1706,27 +1707,43 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); + unsigned long aligned_len = reserv_representable_length(len);
- if (len > mmap_end - mmap_min_addr) + if (aligned_len > mmap_end - mmap_min_addr) return -ENOMEM;
- if (flags & MAP_FIXED) + /* + * If MAP_FIXED is passed in the reservation case, the aligned range + * should be either completely contained inside an existing + * reservation, or completely outside (new reservation). + * Let this scenario fallthrough for the corresponding checks below. + */ + if ((flags & MAP_FIXED) && !reserv_is_supported(mm)) return addr;
- if (addr) { + if (addr || (flags & MAP_FIXED)) { + unsigned long aligned_addr; + addr = PAGE_ALIGN(addr); - vma = find_vma_prev(mm, addr, &prev); - if (mmap_end - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma)) && - (!prev || addr >= vm_end_gap(prev))) + aligned_addr = reserv_representable_base(addr, len); + vma = find_vma_prev(mm, aligned_addr, &prev); + if (mmap_end - aligned_len >= aligned_addr && aligned_addr >= mmap_min_addr && + (!vma || aligned_addr + aligned_len <= vm_start_gap(vma)) && + (!prev || aligned_addr >= vm_end_gap(prev))) return addr; + else if (flags & MAP_FIXED) { + if ((vma && reserv_vma_range_within_reserv(vma, aligned_addr, aligned_len)) || + (prev && reserv_vma_range_within_reserv(prev, aligned_addr, aligned_len))) + return addr; + return -ERESERVATION; + } }
info.flags = 0; - info.length = len; + info.length = aligned_len; info.low_limit = mm->mmap_base; info.high_limit = mmap_end; - info.align_mask = 0; + info.align_mask = reserv_representable_alignment(len); info.align_offset = 0; return vm_unmapped_area(&info); } @@ -1754,29 +1771,46 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); + unsigned long aligned_len = reserv_representable_length(len);
/* requested length too big for entire address space */ - if (len > mmap_end - mmap_min_addr) + if (aligned_len > mmap_end - mmap_min_addr) return -ENOMEM;
- if (flags & MAP_FIXED) + /* + * If MAP_FIXED is passed in the reservation case, the aligned range + * should be either completely contained inside an existing + * reservation, or completely outside (new reservation). + * Let this scenario fallthrough for the corresponding checks below. + */ + if ((flags & MAP_FIXED) && !reserv_is_supported(mm)) return addr;
/* requesting a specific address */ - if (addr) { + if (addr || (flags & MAP_FIXED)) { + unsigned long aligned_addr; + addr = PAGE_ALIGN(addr); - vma = find_vma_prev(mm, addr, &prev); - if (mmap_end - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma)) && - (!prev || addr >= vm_end_gap(prev))) + aligned_addr = reserv_representable_base(addr, len); + vma = find_vma_prev(mm, aligned_addr, &prev); + if (mmap_end - aligned_len >= aligned_addr && aligned_addr >= mmap_min_addr && + (!vma || aligned_addr + aligned_len <= vm_start_gap(vma)) && + (!prev || aligned_addr >= vm_end_gap(prev))) return addr; + else if (flags & MAP_FIXED) { + if ((vma && reserv_vma_range_within_reserv(vma, aligned_addr, aligned_len)) || + (prev && reserv_vma_range_within_reserv(prev, aligned_addr, aligned_len))) { + return addr; + } + return -ERESERVATION; + } }
info.flags = VM_UNMAPPED_AREA_TOPDOWN; - info.length = len; + info.length = aligned_len; info.low_limit = PAGE_SIZE; info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); - info.align_mask = 0; + info.align_mask = reserv_representable_alignment(len); info.align_offset = 0; addr = vm_unmapped_area(&info);