do_vmi_munmap()/do_unmap() are the functions used in several places to unmap the memory mapping. However when we introduce PCuABI memory reservation interface then we need to ignore reservation in internal functions during memory unmapping, shrinking the mappings or merging the fragmented VMA's.
Both functions are modified to add flags to ignore PCuABI reservation and carry on with the usual unmapping activity. As do_unmap() is used in several external places so instead of adding a parameter to use reservation, an equivalent unmapping function do_munmap_use_reserv() is created.
These changes keep the functionality intact and will help to integrate reservation interfaces in different scenarios in subsequent commits.
Signed-off-by: Amit Daniel Kachhap amit.kachhap@arm.com --- include/linux/mm.h | 5 ++++- mm/mmap.c | 28 +++++++++++++++++++++++----- mm/mremap.c | 4 ++-- 3 files changed, 29 insertions(+), 8 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h index c1f4996a957f..1b32c2b81464 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3143,11 +3143,14 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr, extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf); + extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, - bool downgrade); + bool downgrade, bool reserve_ignore); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); +extern int do_munmap_use_reserv(struct mm_struct *mm, unsigned long start, size_t len, + struct list_head *uf); extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU diff --git a/mm/mmap.c b/mm/mmap.c index bc422cc4a14b..f4a9099365bf 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2500,6 +2500,8 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, * @uf: The userfaultfd list_head * @downgrade: set to true if the user wants to attempt to write_downgrade the * mmap_lock + * @reserve_ignore: set to true if the user wants to ignore reservation + * completely or false if the user wants to strictly use reservation. * * This function takes a @mas that is either pointing to the previous VMA or set * to MA_START and sets it up to remove the mapping(s). The @len will be @@ -2509,7 +2511,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, */ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, - bool downgrade) + bool downgrade, bool reserve_ignore) { unsigned long end; struct vm_area_struct *vma; @@ -2543,9 +2545,25 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, { VMA_ITERATOR(vmi, mm, start);
- return do_vmi_munmap(&vmi, mm, start, len, uf, false); + return do_vmi_munmap(&vmi, mm, start, len, uf, false, true); }
+/* do_munmap_use_reserv() - Wrapper function for non-maple tree aware do_munmap() + * calls used in cases where PCuABI memory reservation is used. + * @mm: The mm_struct + * @start: The start address to munmap + * @len: The length to be munmapped. + * @uf: The userfaultfd list_head + */ +int do_munmap_use_reserv(struct mm_struct *mm, unsigned long start, size_t len, + struct list_head *uf) +{ + VMA_ITERATOR(vmi, mm, start); + + return do_vmi_munmap(&vmi, mm, start, len, uf, false, false); +} + + unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) @@ -2577,7 +2595,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, }
/* Unmap any existing mapping in the area */ - if (do_vmi_munmap(&vmi, mm, addr, len, uf, false)) + if (do_vmi_munmap(&vmi, mm, addr, len, uf, false, true)) return -ENOMEM;
/* @@ -2804,7 +2822,7 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade) if (mmap_write_lock_killable(mm)) return -EINTR;
- ret = do_vmi_munmap(&vmi, mm, start, len, &uf, downgrade); + ret = do_vmi_munmap(&vmi, mm, start, len, &uf, downgrade, true); /* * Returning 1 indicates mmap_lock is downgraded. * But 1 is not legal return value of vm_munmap() and munmap(), reset @@ -3057,7 +3075,7 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) if (ret) goto limits_failed;
- ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); + ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0, true); if (ret) goto munmap_failed;
diff --git a/mm/mremap.c b/mm/mremap.c index b52592303e8b..305e7bcf06f9 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -703,7 +703,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, }
vma_iter_init(&vmi, mm, old_addr); - if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) { + if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false, true) < 0) { /* OOM: unable to split vma, just get accounts right */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) vm_acct_memory(old_len >> PAGE_SHIFT); @@ -994,7 +994,7 @@ SYSCALL_DEFINE5(__retptr__(mremap), user_uintptr_t, addr, unsigned long, old_len VMA_ITERATOR(vmi, mm, addr + new_len);
retval = do_vmi_munmap(&vmi, mm, addr + new_len, - old_len - new_len, &uf_unmap, true); + old_len - new_len, &uf_unmap, true, true); /* Returning 1 indicates mmap_lock is downgraded to read. */ if (retval == 1) { downgraded = true;