In CHERI architecture, all the ranges cannot be represented in capability so add necessary CHERI base and length alignment code when generating the free unmapped virtual address.
Signed-off-by: Amit Daniel Kachhap amit.kachhap@arm.com --- include/linux/mm.h | 8 ++++++ mm/mmap.c | 67 ++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 67 insertions(+), 8 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h index 913b79b204be..00cb9fd3a5ee 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3241,6 +3241,10 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma) { unsigned long vm_start = vma->vm_start;
+#ifdef CONFIG_CHERI_PURECAP_UABI + if (vma->vm_flags & VM_PCUABI_RESERVE) + vm_start = vma->reserv_start; +#endif if (vma->vm_flags & VM_GROWSDOWN) { vm_start -= stack_guard_gap; if (vm_start > vma->vm_start) @@ -3253,6 +3257,10 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma) { unsigned long vm_end = vma->vm_end;
+#ifdef CONFIG_CHERI_PURECAP_UABI + if (vma->vm_flags & VM_PCUABI_RESERVE) + vm_end = vma->reserv_start + vma->reserv_len; +#endif if (vma->vm_flags & VM_GROWSUP) { vm_end += stack_guard_gap; if (vm_end < vma->vm_end) diff --git a/mm/mmap.c b/mm/mmap.c index bc422cc4a14b..74e52dc512fa 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -56,6 +56,7 @@ #define CREATE_TRACE_POINTS #include <trace/events/mmap.h>
+#include "cap_addr_mgmt.h" #include "internal.h"
#ifndef arch_mmap_check @@ -1561,6 +1562,11 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
/* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; +#if defined(CONFIG_CHERI_PURECAP_UABI) + /* Cheri Representable length is sufficient for alignment */ + if (!is_compat_task()) + length = cheri_representable_length(info->length); +#endif if (length < info->length) return -ENOMEM;
@@ -1612,6 +1618,11 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; +#if defined(CONFIG_CHERI_PURECAP_UABI) + /* Cheri Representable length is sufficient for alignment */ + if (!is_compat_task()) + length = cheri_representable_length(info->length); +#endif if (length < info->length) return -ENOMEM;
@@ -1637,6 +1648,10 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) tmp = mas_prev(&mas, 0); if (tmp && vm_end_gap(tmp) > gap) { high_limit = tmp->vm_start; +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (tmp->vm_flags & VM_PCUABI_RESERVE) + high_limit = tmp->reserv_start; +#endif mas_reset(&mas); goto retry; } @@ -1687,18 +1702,32 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); + unsigned long align_len = len;
- if (len > mmap_end - mmap_min_addr) +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (!is_compat_task()) + align_len = cheri_representable_length(len); +#endif + if (align_len > mmap_end - mmap_min_addr) return -ENOMEM;
- if (flags & MAP_FIXED) + if (flags & MAP_FIXED) { +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (!is_compat_task() && (addr & ~cheri_representable_alignment_mask(len))) + return -ERESERVATION; +#endif return addr; + }
if (addr) { addr = PAGE_ALIGN(addr); +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (!is_compat_task()) + addr = round_up(addr, CHERI_REPRESENTABLE_ALIGNMENT(len)); +#endif vma = find_vma_prev(mm, addr, &prev); - if (mmap_end - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma)) && + if (mmap_end - align_len >= addr && addr >= mmap_min_addr && + (!vma || addr + align_len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -1708,6 +1737,10 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (!is_compat_task()) + info.align_mask = ~(cheri_representable_alignment_mask(len)); +#endif info.align_offset = 0; return vm_unmapped_area(&info); } @@ -1735,20 +1768,34 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); + unsigned long align_len = len;
+#if defined(CONFIG_CHERI_PURECAP_UABI) + if (!is_compat_task()) + align_len = cheri_representable_length(len); +#endif /* requested length too big for entire address space */ - if (len > mmap_end - mmap_min_addr) + if (align_len > mmap_end - mmap_min_addr) return -ENOMEM;
- if (flags & MAP_FIXED) + if (flags & MAP_FIXED) { +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (!is_compat_task() && (addr & ~cheri_representable_alignment_mask(len))) + return -ERESERVATION; +#endif return addr; + }
/* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (!is_compat_task()) + addr = CHERI_REPRESENTABLE_BASE(addr, len); +#endif vma = find_vma_prev(mm, addr, &prev); - if (mmap_end - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma)) && + if (mmap_end - align_len >= addr && addr >= mmap_min_addr && + (!vma || addr + align_len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -1758,6 +1805,10 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, info.low_limit = PAGE_SIZE; info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (!is_compat_task()) + info.align_mask = ~(cheri_representable_alignment_mask(len)); +#endif info.align_offset = 0; addr = vm_unmapped_area(&info);