On 26/02/2024 10:20, Beata Michalska wrote:
Signed-off-by: Beata Michalska beata.michalska@arm.com
arch/arm64/include/asm/kvm_asm.h | 102 +++++++++++++++++++-- arch/arm64/kernel/hyp-stub.S | 5 + arch/arm64/kvm/hyp/entry.S | 12 ++- arch/arm64/kvm/hyp/include/hyp/switch.h | 4 +- arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 58 +++++++++++- arch/arm64/kvm/hyp/nvhe/host.S | 66 +++++++++++-- arch/arm64/kvm/hyp/nvhe/hyp-init.S | 14 +++ arch/arm64/kvm/hyp/nvhe/sys_regs.c | 4 +- 8 files changed, 243 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 4ca7ece15385..b76f29470389 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -353,10 +353,74 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt, stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
+#ifdef CONFIG_ARM64_MORELLO
- /*
* In theory this could be if/else but to play
* on the safe side for the time being ...
* to avoid clearing tag when storing non-capability
* to capability-tagged location
Not sure this is very clear. It looks like the main reason we need this whole dance (saving separately + merging when restoring) is because KVM happens to modify the saved X regs in places, like the kernel, and we use the merging approach for the same reason. Would probably be better to have this comment in the commit message as it applies to a lot of code in this patch.
*/
- str c18, [\ctxt, #CPU_CREG_OFFSET(18)]
- stp c19, c20, [\ctxt, #CPU_CREG_OFFSET(19)]
- stp c21, c22, [\ctxt, #CPU_CREG_OFFSET(21)]
- stp c23, c24, [\ctxt, #CPU_CREG_OFFSET(23)]
- stp c25, c26, [\ctxt, #CPU_CREG_OFFSET(25)]
- stp c27, c28, [\ctxt, #CPU_CREG_OFFSET(27)]
- stp c29, clr, [\ctxt, #CPU_CREG_OFFSET(29)]
+#endif .endm +#ifdef CONFIG_ARM64_MORELLO +.macro sync_regs_c_x nr:req, x:req
- cmp x\nr, \x
- b.eq .Lskip_sync@
- scvalue c\nr, c\nr, \x
+.Lskip_sync@: +.endm
Unless we can't include <asm/morello.h> for some reason, we could use the existing morello_merge_c_x instead.
+.macro sync_savged_regs_c_x ctxt, nr_1, nr_2, tmp_1, tmp_2
- ldp \tmp_1, \tmp_2, [\ctxt, #CPU_XREG_OFFSET(\nr_1)]
- ldp c\nr_1, c\nr_2, [\ctxt, #CPU_CREG_OFFSET(\nr_1)]
- sync_regs_c_x \nr_1, \tmp_1
- sync_regs_c_x \nr_2, \tmp_2
- stp c\nr_1, c\nr_2, [\ctxt, #CPU_CREG_OFFSET(\nr_1)]
+.endm +#endif
.macro restore_callee_saved_regs ctxt // We require \ctxt is not x18-x28 +#ifdef CONFIG_ARM64_MORELLO
- ldr x19, [\ctxt, #CPU_XREG_OFFSET(18)]
- ldr c18, [\ctxt, #CPU_CREG_OFFSET(18)]
- sync_regs_c_x 18, x19
- str c18, [\ctxt, #CPU_CREG_OFFSET(18)]
- sync_savged_regs_c_x \ctxt, 19, 20, x21, x22
- sync_savged_regs_c_x \ctxt, 21, 22, x19, x20
- sync_savged_regs_c_x \ctxt, 23, 24, x19, x20
- sync_savged_regs_c_x \ctxt, 25, 26, x19, x20
- sync_savged_regs_c_x \ctxt, 27, 28, x19, x20
- // Things get tricky here as we cannot use c30 for the sync here
- // Note: the context is in x29
- ldr x18, [\ctxt, #CPU_XREG_OFFSET(29)]
- ldr c19, [\ctxt, #CPU_CREG_OFFSET(29)]
- sync_regs_c_x 19, x18
- str c19, [\ctxt, #CPU_CREG_OFFSET(29)]
- ldr x18, [\ctxt, #CPU_XREG_OFFSET(30)]
- ldr c19, [\ctxt, #CPU_CREG_OFFSET(30)]
- sync_regs_c_x 19, x18
- str c19, [\ctxt, #CPU_CREG_OFFSET(30)]
- ldr c18, [\ctxt, #CPU_CREG_OFFSET(18)]
- ldp c19, c20, [\ctxt, #CPU_CREG_OFFSET(19)]
- ldp c21, c22, [\ctxt, #CPU_CREG_OFFSET(21)]
- ldp c23, c24, [\ctxt, #CPU_CREG_OFFSET(23)]
- ldp c25, c26, [\ctxt, #CPU_CREG_OFFSET(25)]
- ldp c27, c28, [\ctxt, #CPU_CREG_OFFSET(27)]
- ldp c29, c30, [\ctxt, #CPU_CREG_OFFSET(29)]
This feels more complicated than it needs to be, couldn't it be very similar to the restore code in kernel_exit (entry.S)? Two registers do need to be stashed and reloaded at the end, but apart from that it should be possible to handle everything with the merge macro (without systematically writing back). You may need to introduce one for working on a single register though (like morello_ldr_merge_c_x in entry.S), that would be useful for restore_sp_el0 as well.
Also nit: better to use clr everywhere for consistency (not c30).
+#else ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)] ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] @@ -364,16 +428,42 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt, ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] +#endif .endm -.macro save_sp_el0 ctxt, tmp
- mrs \tmp, sp_el0
- str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
+.macro save_sp_el0 ctxt, nr +#ifdef CONFIG_ARM64_MORELLO
- mrs c\nr, csp_el0
- str c\nr, [\ctxt, #CPU_CSP_EL0_OFFSET]
- mrs c\nr, rcsp_el0
- str c\nr, [\ctxt, #CPU_RCSP_EL0_OFFSET]
+#endif
- mrs x\nr, sp_el0
- str x\nr, [\ctxt, #CPU_SP_EL0_OFFSET]
If the saved SP is ever read by KVM, then the Executive/Restricted selection needs to be done too, like in entry.S.
.endm -.macro restore_sp_el0 ctxt, tmp
- ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
- msr sp_el0, \tmp
+.macro restore_sp_el0 ctxt, nr_1, nr_2
It doesn't look like we're short on registers at the point where this is called, so might as well take a third temporary to simplify the macro (one for CSP, one for RCSP, one for SP).
+#ifdef CONFIG_ARM64_MORELLO
- morello_tst_cap_has_executive c\nr_1, x\nr_2
- b.eq .L_merge_rcsp@
- ldr c\nr_1, [\ctxt, #CPU_CSP_EL0_OFFSET]
- ldr x\nr_2, [\ctxt, #CPU_SP_EL0_OFFSET]
- sync_regs_c_x \nr_1, x\nr_2
- ldr c\nr_2, [\ctxt, #CPU_RCSP_EL0_OFFSET]
- b .L_restore@
+.L_merge_rcsp@:
- ldr c\nr_1, [\ctxt, #CPU_RCSP_EL0_OFFSET]
- ldr x\nr_2, [\ctxt, #CPU_SP_EL0_OFFSET]
- sync_regs_c_x \nr_2, x\nr_1
- ldr c\nr_1, [\ctxt, #CPU_CSP_EL0_OFFSET]
+.L_restore@:
- msr csp_el0, c\nr_1
- msr rcsp_el0, c\nr_2
+#else
- ldr x\nr_1, [\ctxt, #CPU_SP_EL0_OFFSET]
- msr sp_el0, x\nr_1
+#endif .endm #endif diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 49f0b7eb8abe..a6c649ac2e00 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -48,7 +48,12 @@ SYM_CODE_END(__hyp_stub_vectors) SYM_CODE_START_LOCAL(elx_sync) cmp x0, #HVC_SET_VECTORS b.ne 1f +#ifdef CONFIG_ARM64_MORELLO
- cvtp c1, x1
- msr cvbar_el2, c1
+#else msr vbar_el2, x1 +#endif b 9f 1: cmp x0, #HVC_FINALISE_EL2 diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index f3aa7738b477..77ee6759a8e6 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -32,7 +32,7 @@ SYM_FUNC_START(__guest_enter) save_callee_saved_regs x1 // Save hyp's sp_el0
- save_sp_el0 x1, x2
- save_sp_el0 x1, 2
// Now the hyp state is stored if we have a pending RAS SError it must // affect the host or hyp. If any asynchronous exception is pending we @@ -63,7 +63,7 @@ alternative_else_nop_endif ptrauth_switch_to_guest x29, x0, x1, x2 // Restore the guest's sp_el0
- restore_sp_el0 x29, x0
- restore_sp_el0 x29, 0, 1
// Restore guest regs x0-x17 ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] @@ -135,7 +135,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) save_callee_saved_regs x1 // Store the guest's sp_el0
- save_sp_el0 x1, x2
- save_sp_el0 x1, 2
adr_this_cpu x2, kvm_hyp_ctxt, x3 @@ -150,7 +150,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) mte_switch_to_hyp x1, x2, x3 // Restore hyp's sp_el0
- restore_sp_el0 x2, x3
- restore_sp_el0 x2, 3, 4
// Now restore the hyp regs restore_callee_saved_regs x2 @@ -208,6 +208,10 @@ abort_guest_exit_end: // restore the EL1 exception context so that we can report some // information. Merge the exception code with the SError pending bit. msr elr_el2, x2
Should be in #else after the #ifdef.
+#ifdef CONFIG_ARM64_MORELLO
- cvtp c2, x2
- msr celr_el2, c2
+#endif msr esr_el2, x3 msr spsr_el2, x4 orr x0, x0, x5 diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index f99d8af0b9af..fe4c35144cee 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -733,12 +733,12 @@ static inline void __kvm_unexpected_el2_exception(void) continue; }
write_sysreg(fixup, elr_el2);
return; }write_sysreg_variant(fixup, elr_el2);
/* Trigger a panic after restoring the hyp context. */
- write_sysreg(__guest_exit_panic, elr_el2);
- write_sysreg_variant(__guest_exit_panic, elr_el2);
} #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index bb6b571ec627..23215827aef6 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -9,6 +9,7 @@ #include <linux/compiler.h> #include <linux/kvm_host.h> +#include <linux/cheri.h> #include <asm/kprobes.h> #include <asm/kvm_asm.h> @@ -37,6 +38,15 @@ static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) return kvm_has_mte(kern_hyp_va(vcpu->kvm)); } +static __always_inline void +__sysreg_save_el1_state_ext(struct kvm_cpu_context *ctxt) +{ +#ifdef CONFIG_ARM64_MORELLO
- ctxt_cap_sys_reg(ctxt, CVBAR_EL1) = read_cap_sysreg_el1(SYS_VBAR);
- ctxt_cap_sys_reg(ctxt, CELR_EL1) = read_cap_sysreg_el1(SYS_ELR);
+#endif +}
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) { ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR); @@ -70,6 +80,17 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1); ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR);
Does this still need to be done if we save CELR_EL1? In other words, does it have a purpose beyond being restored (which is redundant on Morello)? Same question for VBAR_EL1. I'd just #ifdef those otherwise, without adding new helpers.
ctxt_sys_reg(ctxt, SPSR_EL1) = read_sysreg_el1(SYS_SPSR);
- if (IS_ENABLED(CONFIG_ARM64_MORELLO))
__sysreg_save_el1_state_ext(ctxt);
+}
+static __always_inline void +__sysreg_el2_return_state_ext(struct kvm_cpu_context *ctxt) +{ +#ifdef CONFIG_ARM64_MORELLO
- ctxt->pcc = read_cap_sysreg_el2(SYS_ELR);
+#endif } static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) @@ -97,6 +118,18 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0); } +static __always_inline void +__sysreg_restore_el1_state_ext(struct kvm_cpu_context *ctxt) +{ +#ifdef CONFIG_ARM64_MORELLO
- if (cheri_is_valid(ctxt_cap_sys_reg(ctxt, CVBAR_EL1)))
I don't really understand this. This seems to be meant for guests that are Morello-unaware, but such guests do not enable Morello in the first place, so they use non-capability exception entry/return and only the 64-bit part of VBAR_EL1 is relevant (see section 2.13 of the Morello spec for more details).
write_cap_sysreg_el1(ctxt_cap_sys_reg(ctxt, CVBAR_EL1), SYS_VBAR);
- else
write_cap_sysreg_el1(ctxt_sys_reg(ctxt, VBAR_EL1), SYS_VBAR);
- write_cap_sysreg_el1(ctxt_sys_reg(ctxt, ELR_EL1), SYS_ELR);
+#endif +}
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) { write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1), vmpidr_el2); @@ -164,6 +197,9 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt_sys_reg(ctxt, SP_EL1), sp_el1); write_sysreg_el1(ctxt_sys_reg(ctxt, ELR_EL1), SYS_ELR); write_sysreg_el1(ctxt_sys_reg(ctxt, SPSR_EL1), SYS_SPSR);
- if (IS_ENABLED(CONFIG_ARM64_MORELLO))
__sysreg_restore_el1_state_ext(ctxt);
} /* Read the VCPU state's PSTATE, but translate (v)EL2 to EL1. */ @@ -183,6 +219,22 @@ static inline u64 to_hw_pstate(const struct kvm_cpu_context *ctxt) return (ctxt->regs.pstate & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode; } +static __always_inline void +__sysreg_restore_el2_return_state_ext(struct kvm_cpu_context *ctxt) +{ +#ifdef CONFIG_ARM64_MORELLO
- /*
* Cutting corners a bit here to avoid massive changes to
* KVM_GET_REGS/KVM_SET_REGS: for the time being vcpu remains
* unaware of capabilities
*/
- if (cheri_is_valid(ctxt->pcc))
write_cap_sysreg_el2(ctxt->pcc, SYS_ELR);
- else
write_cap_sysreg_el2(ctxt->regs.pc, SYS_ELR);
+#endif +}
static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) { u64 pstate = to_hw_pstate(ctxt); @@ -202,11 +254,13 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t) pstate = PSR_MODE_EL2h | PSR_IL_BIT;
- write_sysreg_el2(ctxt->regs.pc, SYS_ELR);
- write_sysreg_el2(pstate, SYS_SPSR);
- write_sysreg_el2(ctxt->regs.pc, SYS_ELR);
- write_sysreg_el2(pstate, SYS_SPSR);
That seems to be purely whitespace change so I would leave it untouched.
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2);
- if (IS_ENABLED(CONFIG_ARM64_MORELLO))
__sysreg_restore_el2_return_state_ext(ctxt);
} static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index 7693a6757cd7..40bd4cd897a0 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -19,10 +19,16 @@ SYM_FUNC_START(__host_exit) /* Store the host regs x2 and x3 */ stp x2, x3, [x0, #CPU_XREG_OFFSET(2)] +#ifdef CONFIG_ARM64_MORELLO
- stp c2, c3, [x0, #CPU_CREG_OFFSET(2)]
+#endif /* Retrieve the host regs x0-x1 from the stack */
- ldp x2, x3, [sp], #16 // x0, x1
+#ifdef CONFIG_ARM64_MORELLO
- ldp c2, c3, [sp], #32
+#else
- ldp x2, x3, [sp], #16 // x0, x1
+#endif /* Store the host regs x0-x1 and x4-x17 */ stp x2, x3, [x0, #CPU_XREG_OFFSET(0)] stp x4, x5, [x0, #CPU_XREG_OFFSET(4)] @@ -33,6 +39,16 @@ SYM_FUNC_START(__host_exit) stp x14, x15, [x0, #CPU_XREG_OFFSET(14)] stp x16, x17, [x0, #CPU_XREG_OFFSET(16)] +#ifdef CONFIG_ARM64_MORELLO
- stp c2, c3, [x0, #CPU_CREG_OFFSET(0)]
- stp c4, c5, [x0, #CPU_CREG_OFFSET(4)]
- stp c6, c7, [x0, #CPU_CREG_OFFSET(6)]
- stp c8, c9, [x0, #CPU_CREG_OFFSET(8)]
- stp c10, c11, [x0, #CPU_CREG_OFFSET(10)]
- stp c12, c13, [x0, #CPU_CREG_OFFSET(12)]
- stp c14, c15, [x0, #CPU_CREG_OFFSET(14)]
- stp c16, c17, [x0, #CPU_CREG_OFFSET(16)]
+#endif /* Store the host regs x18-x29, lr */ save_callee_saved_regs x0 @@ -75,19 +91,43 @@ __skip_pauth_restore: #endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */ /* Restore host regs x0-x17 */ +#ifdef CONFIG_ARM64_MORELLO
- sync_savged_regs_c_x x29, 0, 1, x2, x3
- sync_savged_regs_c_x x29, 2, 3, x0, x1
- sync_savged_regs_c_x x29, 4, 5, x0, x1
- sync_savged_regs_c_x x29, 6, 7, x0, x1
- ldp c0, c1, [x29, #CPU_CREG_OFFSET(0)]
- ldp c2, c3, [x29, #CPU_CREG_OFFSET(2)]
- ldp c4, c5, [x29, #CPU_CREG_OFFSET(4)]
- ldp c6, c7, [x29, #CPU_CREG_OFFSET(6)]
+#else ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
+#endif /* x0-7 are use for panic arguments */ __host_enter_for_panic: +#ifdef CONFIG_ARM64_MORELLO
- sync_savged_regs_c_x x29, 8, 9, x10, x11
- sync_savged_regs_c_x x29, 10, 11, x8, x9
- sync_savged_regs_c_x x29, 12, 13, x8, x9
- sync_savged_regs_c_x x29, 14, 15, x8, x9
- sync_savged_regs_c_x x29, 16, 17, x8, x9
- ldp c8, c9, [x29, #CPU_CREG_OFFSET(8)]
- ldp c10, c11, [x29, #CPU_CREG_OFFSET(10)]
- ldp c12, c13, [x29, #CPU_CREG_OFFSET(12)]
- ldp c14, c15, [x29, #CPU_CREG_OFFSET(14)]
- ldp c16, c17, [x29, #CPU_CREG_OFFSET(16)]
+#else ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)] ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)] ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)] ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)] ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
+#endif /* Restore host regs x18-x29, lr */ restore_callee_saved_regs x29 @@ -117,7 +157,10 @@ SYM_FUNC_START(__hyp_do_panic) adr_l lr, nvhe_hyp_panic_handler hyp_kimg_va lr, x6 msr elr_el2, lr
+#ifdef CONFIG_ARM64_MORELLO
- cvtd clr, lr
s/cvtd/cvtp/, and the msr elr_el2 should be in #else.
- msr celr_el2, clr
+#endif mov x29, x0 #ifdef CONFIG_NVHE_EL2_DEBUG @@ -145,8 +188,11 @@ SYM_FUNC_START(__hyp_do_panic) SYM_FUNC_END(__hyp_do_panic) SYM_FUNC_START(__host_hvc) +#ifndef CONFIG_ARM64_MORELLO
Nit: better use #ifdef to avoid the double negation in #else (there are a few instances of this).
ldp x0, x1, [sp] // Don't fixup the stack yet
+#else
- ldp c0, c1, [sp]
+#endif /* No stub for you, sonny Jim */ alternative_if ARM64_KVM_PROTECTED_MODE b __host_exit @@ -156,7 +202,11 @@ alternative_else_nop_endif cmp x0, #HVC_STUB_HCALL_NR b.hs __host_exit +#ifndef CONFIG_ARM64_MORELLO add sp, sp, #16 +#else
- add sp, sp, #32
+#endif /* * Compute the idmap address of __kvm_handle_stub_hvc and * jump there. @@ -171,7 +221,11 @@ SYM_FUNC_END(__host_hvc) .macro host_el1_sync_vect .align 7 .L__vect_start@: +#ifndef CONFIG_ARM64_MORELLO stp x0, x1, [sp, #-16]! +#else
- stp c0, c1, [sp, #-32]!
+#endif mrs x0, esr_el2 ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH cmp x0, #ESR_ELx_EC_HVC64 diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index 1cc06e6797bd..9b621692af2c 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -154,7 +154,12 @@ alternative_else_nop_endif /* Set the host vector */ ldr x0, =__kvm_hyp_host_vector +#ifndef CONFIG_ARM64_MORELLO msr vbar_el2, x0 +#else
- cvtp c0, x0
- msr cvbar_el2, c0
+#endif ret SYM_CODE_END(___kvm_hyp_init) @@ -228,6 +233,10 @@ SYM_CODE_START(__kvm_handle_stub_hvc) /* This is where we're about to jump, staying at EL2 */ msr elr_el2, x1 +#ifdef CONFIG_ARM64_MORELLO
- cvtp c1, x1
- msr celr_el2, c1
+#endif mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h) msr spsr_el2, x0 @@ -260,7 +269,12 @@ alternative_else_nop_endif /* Install stub vectors */ adr_l x5, __hyp_stub_vectors +#ifdef CONFIG_ARM64_MORELLO
- cvtp c5, x5
- msr cvbar_el1, c5
cvbar_el2 surely?
Kevin
+#else msr vbar_el2, x5 +#endif eret 1: /* Bad stub call */ diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index edd969a1f36b..15e14961a0bb 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -44,8 +44,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu) __kvm_adjust_pc(vcpu); write_sysreg_el1(esr, SYS_ESR);
- write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR);
- write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
- write_cap_sysreg_el1(read_cap_sysreg_el2(SYS_ELR), SYS_ELR);
- write_cap_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
}