On 11/03/2024 10:28, Amit Daniel Kachhap wrote:
[...]
@@ -3472,9 +3475,12 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma) unsigned long gap = stack_guard_start_gap(vma); unsigned long vm_start = vma->vm_start;
- if (reserv_is_supported(vma->vm_mm))
vm_start = reserv_vma_reserv_start(vma);
If we made reserv_vma_reserv_start(vma) return vma->vm_start in the !PCuABI case, then we could use it unconditionally. Similar idea for for reserv_vma_reserv_len(vma).
vm_start -= gap; if (vm_start > vma->vm_start) vm_start = 0;
Nit: spurious change.
return vm_start; } @@ -3482,6 +3488,8 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma) { unsigned long vm_end = vma->vm_end;
- if (reserv_is_supported(vma->vm_mm))
if (vma->vm_flags & VM_GROWSUP) { vm_end += stack_guard_gap; if (vm_end < vma->vm_end)vm_end = reserv_vma_reserv_start(vma) + reserv_vma_reserv_len(vma);
diff --git a/mm/mmap.c b/mm/mmap.c index bec26ad4fdb0..305c90332424 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -48,6 +48,8 @@ #include <linux/sched/mm.h> #include <linux/ksm.h> +#include <linux/cap_addr_mgmt.h> +#include <linux/cheri.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> @@ -1656,6 +1658,8 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) tmp = mas_prev(&mas, 0); if (tmp && vm_end_gap(tmp) > gap) { high_limit = tmp->vm_start;
if (reserv_is_supported(tmp->vm_mm))
}high_limit = reserv_vma_reserv_start(tmp); mas_reset(&mas); goto retry;
@@ -1686,6 +1690,19 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) return addr; } +int vm_area_range_within_limit(unsigned long addr, unsigned long len,
unsigned long flags)
+{
- const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
- unsigned long align_len = reserv_representable_length(len);
- /* requested length too big for entire address space */
- if (align_len > mmap_end - mmap_min_addr)
return -ENOMEM;
- return 0;
+}
/* Get an address range which is currently unmapped.
- For shmat() with addr=0.
@@ -1706,27 +1723,44 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
- unsigned long align_len;
- unsigned long align_addr;
- if (len > mmap_end - mmap_min_addr)
- align_len = reserv_representable_length(len);
- if (align_len > mmap_end - mmap_min_addr) return -ENOMEM;
- if (flags & MAP_FIXED)
- /*
* In case of PCuABI, fixed address without valid capability should
* not overlap with any existing reservation. Let this scenario
* fallthrough below for such checks.
*/
- if ((flags & MAP_FIXED) && !reserv_is_supported(mm)) return addr;
if (addr) { addr = PAGE_ALIGN(addr);
/*
* Here CHERI representable address is aligned down as reservation
* layer holds this unusable aligned down gap.
*/
vma = find_vma_prev(mm, addr, &prev);align_addr = reserv_representable_base(addr, len);
if (mmap_end - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
if (mmap_end - align_len >= align_addr && align_addr >= mmap_min_addr &&
(!vma || align_addr + align_len <= vm_start_gap(vma)) &&
(!prev || align_addr >= vm_end_gap(prev))) return addr;
else if (flags & MAP_FIXED)
/* This non-tagged fixed address overlaps with other reservation */
return -ERESERVATION;
Expanding slightly on my comments in patch 7 here: what I am proposing is that we allow MAP_FIXED inside a reservation here. This means that instead of unconditionally returning -ERESERVATION, we check that the range is wholly contained within a reservation. If it is, then we just return addr as usual, otherwise (overlap case) we return -ERESERVATION. This might call for a new helper in cap_addr_mgmt.h.
Kevin
} [...]