Add bare minimum to enable saving & restoring relevant capability registers. Note that, at this point, only a subset of capability registers is to be handled,with the remaining ones being provided for completeness only.
Signed-off-by: Beata Michalska beata.michalska@arm.com --- arch/arm64/include/asm/kvm_asm.h | 6 +++++ arch/arm64/include/asm/kvm_host.h | 38 +++++++++++++++++++++++++++++++ arch/arm64/kernel/asm-offsets.c | 3 +++ 3 files changed, 47 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 24b5e6b23417..4ca7ece15385 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -334,6 +334,12 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt, #define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x) #define CPU_LR_OFFSET CPU_XREG_OFFSET(30) #define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8) +#ifdef CONFIG_ARM64_MORELLO +#define CPU_CREG_OFFSET(x) (CPU_USER_CREGS + 16*x) +#define CPU_CLR_OFFSET CPU_CREG_OFFSET(30) +#define CPU_CSP_EL0_OFFSET (CPU_CLR_OFFSET + 16) +#define CPU_RCSP_EL0_OFFSET (CPU_CLR_OFFSET + 32) +#endif
/* * We treat x18 as callee-saved as the host may use it as a platform diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 824f29f04916..808645b7108d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -451,9 +451,40 @@ enum vcpu_sysreg { NR_SYS_REGS /* Nothing after this line! */ };
+#ifdef CONFIG_ARM64_MORELLO +enum vcpu_cap_sysregs { + __INVALID_CAP_SYSREG__, + CTPIDR_EL0, + RTPIDR_EL0, + CTPIDRRO_EL0, + + CTPIDR_EL1, + CELR_EL1, + CVBAR_EL1, + CSP_EL1, + DDC_EL1, + + CELR_EL2, + CVBAR_EL2, + CTPIDR_EL2, + CSP_EL2, + DDC_EL2, + + NR_CAP_SYS_REGS +}; +#endif + struct kvm_cpu_context { struct user_pt_regs regs; /* sp = sp_el0 */
+ /* This should probably end up in user_pt_regs at one point */ +#ifdef CONFIG_ARM64_MORELLO + uintcap_t cregs[31]; + uintcap_t csp; + uintcap_t rcsp; + uintcap_t pcc; +#endif + u64 spsr_abt; u64 spsr_und; u64 spsr_irq; @@ -463,6 +494,9 @@ struct kvm_cpu_context {
u64 sys_regs[NR_SYS_REGS];
+#ifdef CONFIG_ARM64_MORELLO + uintcap_t cap_sys_regs[NR_CAP_SYS_REGS]; +#endif struct kvm_vcpu *__hyp_running_vcpu; };
@@ -833,6 +867,10 @@ struct kvm_vcpu_arch {
#define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
+#ifdef CONFIG_ARM64_MORELLO +#define __ctxt_cap_sys_reg(c, r) (&(c)->cap_sys_regs[(r)]) +#define ctxt_cap_sys_reg(c, r) (*__ctxt_cap_sys_reg(c,r)) +#endif u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 90ca25039583..249bf3ca2fbc 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -143,6 +143,9 @@ int main(void) DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); +#ifdef CONFIG_ARM64_MORELLO + DEFINE(CPU_USER_CREGS, offsetof(struct kvm_cpu_context, cregs)); +#endif DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1])); DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));