Hi,
On 10/27/23 19:33, Kevin Brodsky wrote:
On 25/10/2023 07:36, Amit Daniel Kachhap wrote:
[...]
diff --git a/mm/mmap.c b/mm/mmap.c index bc422cc4a14b..74e52dc512fa 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -56,6 +56,7 @@ #define CREATE_TRACE_POINTS #include <trace/events/mmap.h> +#include "cap_addr_mgmt.h"
Looks like an invalid #include that is fixed in the next patch.
Yes typo error.
#include "internal.h" #ifndef arch_mmap_check @@ -1561,6 +1562,11 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; +#if defined(CONFIG_CHERI_PURECAP_UABI)
- /* Cheri Representable length is sufficient for alignment */
- if (!is_compat_task())
length = cheri_representable_length(info->length);
Not sure I understand this. Why can we skip adding align_mask in this case? And also why isn't info->length adjusted in generic_get_unmapped_area()?
(len + align_mask) was higher value than cheri_representable_length(len) so gap requirement was more(Even keeping as align_mask is fine but may be less optimized). I will add some more documentation here to explain this.
info->length is not adjusted as the function needs the original length during AND operation with align_mask.
+#endif if (length < info->length) return -ENOMEM; @@ -1612,6 +1618,11 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; +#if defined(CONFIG_CHERI_PURECAP_UABI)
- /* Cheri Representable length is sufficient for alignment */
- if (!is_compat_task())
length = cheri_representable_length(info->length);
+#endif if (length < info->length) return -ENOMEM; @@ -1637,6 +1648,10 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) tmp = mas_prev(&mas, 0); if (tmp && vm_end_gap(tmp) > gap) { high_limit = tmp->vm_start; +#if defined(CONFIG_CHERI_PURECAP_UABI)
if (tmp->vm_flags & VM_PCUABI_RESERVE)
high_limit = tmp->reserv_start;
+#endif mas_reset(&mas); goto retry; } @@ -1687,18 +1702,32 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
- unsigned long align_len = len;
- if (len > mmap_end - mmap_min_addr)
+#if defined(CONFIG_CHERI_PURECAP_UABI)
- if (!is_compat_task())
align_len = cheri_representable_length(len);
+#endif
- if (align_len > mmap_end - mmap_min_addr) return -ENOMEM;
- if (flags & MAP_FIXED)
- if (flags & MAP_FIXED) {
+#if defined(CONFIG_CHERI_PURECAP_UABI)
if (!is_compat_task() && (addr & ~cheri_representable_alignment_mask(len)))
There is no requirement for the specified address to be aligned. Rather, the requirement is for the new reservation, implied from the specified address and length, not to overlap with any other (as usual). In other words, no reservation should already exist within the aligned range.
As mmap() is now being modified for MAP_FIXED. I will check on removing this restriction.
//Amit
Kevin