Create an appropriate reservation for the vDSO spanning both mappings.
Signed-off-by: Kevin Brodsky kevin.brodsky@arm.com --- arch/arm64/kernel/vdso.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 3121e70f598a..f9059577581f 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -21,6 +21,7 @@ #include <linux/time_namespace.h> #include <linux/timekeeper_internal.h> #include <linux/vmalloc.h> +#include <linux/mman.h> #include <vdso/datapage.h> #include <vdso/helpers.h> #include <vdso/vsyscall.h> @@ -202,9 +203,9 @@ static int __setup_additional_pages(enum vdso_abi abi, struct linux_binprm *bprm, int uses_interp) { - unsigned long vdso_base, vdso_text_len, vdso_mapping_len; + unsigned long vdso_base, vdso_text_base, vdso_text_len, vdso_mapping_len; unsigned long gp_flags = 0; - void *ret; + struct vm_area_struct *ret;
BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
@@ -224,18 +225,26 @@ static int __setup_additional_pages(enum vdso_abi abi, if (IS_ERR(ret)) goto up_fail;
+ if (reserv_vma_set_reserv(ret, vdso_base, vdso_mapping_len, + PROT_READ | PROT_EXEC)) + goto up_fail; + if (system_supports_bti_kernel()) gp_flags = VM_ARM64_BTI;
- vdso_base += VVAR_NR_PAGES * PAGE_SIZE; - mm->context.vdso = (void *)vdso_base; - ret = _install_special_mapping(mm, vdso_base, vdso_text_len, + vdso_text_base = vdso_base + VVAR_NR_PAGES * PAGE_SIZE; + mm->context.vdso = (void *)vdso_text_base; + ret = _install_special_mapping(mm, vdso_text_base, vdso_text_len, VM_READ|VM_EXEC|gp_flags| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_info[abi].cm); if (IS_ERR(ret)) goto up_fail;
+ if (reserv_vma_set_reserv(ret, vdso_base, vdso_mapping_len, + PROT_READ | PROT_EXEC)) + goto up_fail; + return 0;
up_fail: