Use the recently introduced PCuABI reservation interfaces to verify the address range for mlock, mlock2, and munlock syscalls.
Signed-off-by: Amit Daniel Kachhap amitdaniel.kachhap@arm.com --- mm/mlock.c | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-)
diff --git a/mm/mlock.c b/mm/mlock.c index 086546ac5766..ecc36a698843 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -25,6 +25,7 @@ #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/secretmem.h> +#include <linux/cap_addr_mgmt.h>
#include "internal.h"
@@ -621,14 +622,16 @@ static int __mlock_posix_error_return(long retval) return retval; }
-static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) +static __must_check int do_mlock(user_uintptr_t user_ptr, size_t len, vm_flags_t flags) { unsigned long locked; unsigned long lock_limit; int error = -ENOMEM; + unsigned long start = untagged_addr(user_ptr); + struct vma_iterator vmi;
- start = untagged_addr(start); - + if (!check_user_ptr_owning(user_ptr, start, len)) + return -EINVAL; if (!can_do_mlock()) return -EPERM;
@@ -642,6 +645,12 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla if (mmap_write_lock_killable(current->mm)) return -EINTR;
+ vma_iter_init(&vmi, current->mm, start); + /* Check if the range exists within the reservation with mmap lock. */ + if (!reserv_vmi_cap_within_reserv(&vmi, user_ptr, true)) { + mmap_write_unlock(current->mm); + return -ERESERVATION; + } locked += current->mm->locked_vm; if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { /* @@ -668,12 +677,12 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla return 0; }
-SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) +SYSCALL_DEFINE2(mlock, user_uintptr_t, user_ptr, size_t, len) { - return do_mlock(start, len, VM_LOCKED); + return do_mlock(user_ptr, len, VM_LOCKED); }
-SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) +SYSCALL_DEFINE3(mlock2, user_uintptr_t, user_ptr, size_t, len, int, flags) { vm_flags_t vm_flags = VM_LOCKED;
@@ -683,20 +692,29 @@ SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) if (flags & MLOCK_ONFAULT) vm_flags |= VM_LOCKONFAULT;
- return do_mlock(start, len, vm_flags); + return do_mlock(user_ptr, len, vm_flags); }
-SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) +SYSCALL_DEFINE2(munlock, user_uintptr_t, user_ptr, size_t, len) { int ret; + unsigned long start = untagged_addr(user_ptr); + struct vma_iterator vmi;
- start = untagged_addr(start); + if (!check_user_ptr_owning(user_ptr, start, len)) + return -EINVAL;
len = PAGE_ALIGN(len + (offset_in_page(start))); start &= PAGE_MASK;
if (mmap_write_lock_killable(current->mm)) return -EINTR; + vma_iter_init(&vmi, current->mm, start); + /* Check if the range exists within the reservation with mmap lock. */ + if (!reserv_vmi_cap_within_reserv(&vmi, user_ptr, true)) { + mmap_write_unlock(current->mm); + return -ERESERVATION; + } ret = apply_vma_lock_flags(start, len, 0); mmap_write_unlock(current->mm);