In CHERI architecture, all the ranges cannot be represented in capability so add the necessary CHERI base and length alignment checks when generating the free unmapped virtual address or evaluating the fixed input address.
The PCuABI reservation interface stores the un-usable alignment gaps at the start and end. These gaps should be considered when finding the free unmapped address space.
In case of fixed valid capability type addresses, the requested address range should completely overlap with the reservation range. In case of fixed null capability addresses, they are verified to not overlap with any existing reservation range.
Signed-off-by: Amit Daniel Kachhap amitdaniel.kachhap@arm.com --- include/linux/mm.h | 8 ++++ mm/mmap.c | 97 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 95 insertions(+), 10 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h index a4b7381b4977..5b79120c999a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3239,6 +3239,10 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma) { unsigned long vm_start = vma->vm_start;
+#ifdef CONFIG_CHERI_PURECAP_UABI + if (test_bit(MMF_PCUABI_RESERVE, &vma->vm_mm->flags)) + vm_start = vma->reserv_start; +#endif if (vma->vm_flags & VM_GROWSDOWN) { vm_start -= stack_guard_gap; if (vm_start > vma->vm_start) @@ -3251,6 +3255,10 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma) { unsigned long vm_end = vma->vm_end;
+#ifdef CONFIG_CHERI_PURECAP_UABI + if (test_bit(MMF_PCUABI_RESERVE, &vma->vm_mm->flags)) + vm_end = vma->reserv_start + vma->reserv_len; +#endif if (vma->vm_flags & VM_GROWSUP) { vm_end += stack_guard_gap; if (vm_end < vma->vm_end) diff --git a/mm/mmap.c b/mm/mmap.c index 7f2246cbc969..6027da2c248b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -48,6 +48,7 @@ #include <linux/sched/mm.h> #include <linux/ksm.h>
+#include <linux/cap_addr_mgmt.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> @@ -1561,6 +1562,11 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
/* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; +#if defined(CONFIG_CHERI_PURECAP_UABI) + /* Cheri Representable length is sufficient for alignment */ + if (test_bit(MMF_PCUABI_RESERVE, ¤t->mm->flags)) + length = cheri_representable_length(info->length); +#endif if (length < info->length) return -ENOMEM;
@@ -1612,6 +1618,11 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; +#if defined(CONFIG_CHERI_PURECAP_UABI) + /* Cheri Representable length is sufficient for alignment */ + if (test_bit(MMF_PCUABI_RESERVE, ¤t->mm->flags)) + length = cheri_representable_length(info->length); +#endif if (length < info->length) return -ENOMEM;
@@ -1637,6 +1648,10 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) tmp = mas_prev(&mas, 0); if (tmp && vm_end_gap(tmp) > gap) { high_limit = tmp->vm_start; +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (test_bit(MMF_PCUABI_RESERVE, ¤t->mm->flags)) + high_limit = tmp->reserv_start; +#endif mas_reset(&mas); goto retry; } @@ -1688,20 +1703,46 @@ generic_get_unmapped_area(struct file *filp, user_uintptr_t user_ptr, struct vm_unmapped_area_info info; unsigned long addr = (ptraddr_t)user_ptr; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); + unsigned long align_len = len;
- if (len > mmap_end - mmap_min_addr) +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (test_bit(MMF_PCUABI_RESERVE, &mm->flags)) + align_len = cheri_representable_length(len); +#endif + if (align_len > mmap_end - mmap_min_addr) return -ENOMEM;
- if (flags & MAP_FIXED) + if (flags & MAP_FIXED) { +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (test_bit(MMF_PCUABI_RESERVE, &mm->flags) && cheri_tag_get(user_ptr)) { + /* Ensure that this range is within the reservation bound */ + vma = find_vma(mm, addr); + if (!vma || !reserv_vma_valid_address(vma, addr, len)) + return -ERESERVATION; + return addr; + } else if (!test_bit(MMF_PCUABI_RESERVE, &mm->flags)) + return addr; +#else return addr; +#endif + }
if (addr) { addr = PAGE_ALIGN(addr); +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (test_bit(MMF_PCUABI_RESERVE, &mm->flags)) + addr = round_up(addr, CHERI_REPRESENTABLE_ALIGNMENT(len)); +#endif vma = find_vma_prev(mm, addr, &prev); - if (mmap_end - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma)) && + if (mmap_end - align_len >= addr && addr >= mmap_min_addr && + (!vma || addr + align_len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; +#if defined(CONFIG_CHERI_PURECAP_UABI) + else if (flags & MAP_FIXED) + /* This non-tagged fixed address overlaps with other reservation */ + return -ERESERVATION; +#endif }
info.flags = 0; @@ -1709,6 +1750,10 @@ generic_get_unmapped_area(struct file *filp, user_uintptr_t user_ptr, info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (test_bit(MMF_PCUABI_RESERVE, &mm->flags)) + info.align_mask = ~(cheri_representable_alignment_mask(len)); +#endif info.align_offset = 0; return vm_unmapped_area(&info); } @@ -1737,22 +1782,50 @@ generic_get_unmapped_area_topdown(struct file *filp, user_uintptr_t user_ptr, struct vm_unmapped_area_info info; unsigned long addr = (ptraddr_t)user_ptr; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); + unsigned long align_len = len; + unsigned long align_addr;
+#if defined(CONFIG_CHERI_PURECAP_UABI) + if (test_bit(MMF_PCUABI_RESERVE, &mm->flags)) + align_len = cheri_representable_length(len); +#endif /* requested length too big for entire address space */ - if (len > mmap_end - mmap_min_addr) + if (align_len > mmap_end - mmap_min_addr) return -ENOMEM;
- if (flags & MAP_FIXED) + if (flags & MAP_FIXED) { +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (test_bit(MMF_PCUABI_RESERVE, &mm->flags) && cheri_tag_get(user_ptr)) { + /* Ensure that this range is within the reservation bound */ + vma = find_vma(mm, addr); + if (!vma || !reserv_vma_valid_address(vma, addr, len)) + return -ERESERVATION; + return addr; + } else if (!test_bit(MMF_PCUABI_RESERVE, &mm->flags)) + return addr; +#else return addr; +#endif + }
/* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma_prev(mm, addr, &prev); - if (mmap_end - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma)) && - (!prev || addr >= vm_end_gap(prev))) + align_addr = addr; +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (test_bit(MMF_PCUABI_RESERVE, &mm->flags)) + align_addr = CHERI_REPRESENTABLE_BASE(addr, len); +#endif + vma = find_vma_prev(mm, align_addr, &prev); + if (mmap_end - align_len >= align_addr && align_addr >= mmap_min_addr && + (!vma || align_addr + align_len <= vm_start_gap(vma)) && + (!prev || align_addr >= vm_end_gap(prev))) return addr; +#if defined(CONFIG_CHERI_PURECAP_UABI) + else if (flags & MAP_FIXED) + /* This non-tagged fixed address overlaps with other reservation */ + return -ERESERVATION; +#endif }
info.flags = VM_UNMAPPED_AREA_TOPDOWN; @@ -1760,6 +1833,10 @@ generic_get_unmapped_area_topdown(struct file *filp, user_uintptr_t user_ptr, info.low_limit = PAGE_SIZE; info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; +#if defined(CONFIG_CHERI_PURECAP_UABI) + if (test_bit(MMF_PCUABI_RESERVE, &mm->flags)) + info.align_mask = ~(cheri_representable_alignment_mask(len)); +#endif info.align_offset = 0; addr = vm_unmapped_area(&info);