Skip to content

Commit 626dcb1

Browse files
Fuad Tabbagregkh
authored andcommitted
KVM: arm64: Calculate cptr_el2 traps on activating traps
[ Upstream commit 2fd5b4b ] Similar to VHE, calculate the value of cptr_el2 from scratch on activate traps. This removes the need to store cptr_el2 in every vcpu structure. Moreover, some traps, such as whether the guest owns the fp registers, need to be set on every vcpu run. Reported-by: James Clark <james.clark@linaro.org> Fixes: 5294afd ("KVM: arm64: Exclude FP ownership from kvm_vcpu_arch") Signed-off-by: Fuad Tabba <tabba@google.com> Link: https://lore.kernel.org/r/20241216105057.579031-13-tabba@google.com Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Mark Brown <broonie@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 9752dd3 commit 626dcb1

4 files changed

Lines changed: 32 additions & 51 deletions

File tree

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -708,7 +708,6 @@ struct kvm_vcpu_arch {
708708
u64 hcr_el2;
709709
u64 hcrx_el2;
710710
u64 mdcr_el2;
711-
u64 cptr_el2;
712711

713712
/* Exception Information */
714713
struct kvm_vcpu_fault_info fault;

arch/arm64/kvm/arm.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1569,7 +1569,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
15691569
}
15701570

15711571
vcpu_reset_hcr(vcpu);
1572-
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
15731572

15741573
/*
15751574
* Handle the "start in power-off" case.

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,6 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
3131
const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
3232
u64 hcr_set = HCR_RW;
3333
u64 hcr_clear = 0;
34-
u64 cptr_set = 0;
35-
u64 cptr_clear = 0;
3634

3735
/* Protected KVM does not support AArch32 guests. */
3836
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
@@ -62,21 +60,10 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
6260
/* Trap AMU */
6361
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
6462
hcr_clear |= HCR_AMVOFFEN;
65-
cptr_set |= CPTR_EL2_TAM;
66-
}
67-
68-
/* Trap SVE */
69-
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
70-
if (has_hvhe())
71-
cptr_clear |= CPACR_ELx_ZEN;
72-
else
73-
cptr_set |= CPTR_EL2_TZ;
7463
}
7564

7665
vcpu->arch.hcr_el2 |= hcr_set;
7766
vcpu->arch.hcr_el2 &= ~hcr_clear;
78-
vcpu->arch.cptr_el2 |= cptr_set;
79-
vcpu->arch.cptr_el2 &= ~cptr_clear;
8067
}
8168

8269
/*
@@ -106,7 +93,6 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
10693
const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
10794
u64 mdcr_set = 0;
10895
u64 mdcr_clear = 0;
109-
u64 cptr_set = 0;
11096

11197
/* Trap/constrain PMU */
11298
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
@@ -133,21 +119,12 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
133119
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
134120
mdcr_set |= MDCR_EL2_TTRF;
135121

136-
/* Trap Trace */
137-
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
138-
if (has_hvhe())
139-
cptr_set |= CPACR_EL1_TTA;
140-
else
141-
cptr_set |= CPTR_EL2_TTA;
142-
}
143-
144122
/* Trap External Trace */
145123
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
146124
mdcr_clear |= MDCR_EL2_E2TB_MASK;
147125

148126
vcpu->arch.mdcr_el2 |= mdcr_set;
149127
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
150-
vcpu->arch.cptr_el2 |= cptr_set;
151128
}
152129

153130
/*
@@ -198,10 +175,6 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
198175
/* Clear res0 and set res1 bits to trap potential new features. */
199176
vcpu->arch.hcr_el2 &= ~(HCR_RES0);
200177
vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
201-
if (!has_hvhe()) {
202-
vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
203-
vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
204-
}
205178
}
206179

207180
static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
@@ -236,7 +209,6 @@ static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
236209
*/
237210
static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
238211
{
239-
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
240212
vcpu->arch.mdcr_el2 = 0;
241213

242214
pkvm_vcpu_reset_hcr(vcpu);
@@ -693,8 +665,6 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
693665
return ret;
694666
}
695667

696-
hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
697-
698668
return 0;
699669
}
700670

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 32 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -36,33 +36,46 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
3636

3737
extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
3838

39-
static void __activate_traps(struct kvm_vcpu *vcpu)
39+
static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
4040
{
41-
u64 val;
41+
u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
4242

43-
___activate_traps(vcpu, vcpu->arch.hcr_el2);
44-
__activate_traps_common(vcpu);
43+
if (has_hvhe()) {
44+
val |= CPACR_ELx_TTA;
4545

46-
val = vcpu->arch.cptr_el2;
47-
val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */
48-
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
49-
if (cpus_have_final_cap(ARM64_SME)) {
50-
if (has_hvhe())
51-
val &= ~CPACR_ELx_SMEN;
52-
else
53-
val |= CPTR_EL2_TSM;
54-
}
46+
if (guest_owns_fp_regs()) {
47+
val |= CPACR_ELx_FPEN;
48+
if (vcpu_has_sve(vcpu))
49+
val |= CPACR_ELx_ZEN;
50+
}
51+
} else {
52+
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
5553

56-
if (!guest_owns_fp_regs()) {
57-
if (has_hvhe())
58-
val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
59-
else
60-
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
54+
/*
55+
* Always trap SME since it's not supported in KVM.
56+
* TSM is RES1 if SME isn't implemented.
57+
*/
58+
val |= CPTR_EL2_TSM;
6159

62-
__activate_traps_fpsimd32(vcpu);
60+
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
61+
val |= CPTR_EL2_TZ;
62+
63+
if (!guest_owns_fp_regs())
64+
val |= CPTR_EL2_TFP;
6365
}
6466

67+
if (!guest_owns_fp_regs())
68+
__activate_traps_fpsimd32(vcpu);
69+
6570
kvm_write_cptr_el2(val);
71+
}
72+
73+
static void __activate_traps(struct kvm_vcpu *vcpu)
74+
{
75+
___activate_traps(vcpu, vcpu->arch.hcr_el2);
76+
__activate_traps_common(vcpu);
77+
__activate_cptr_traps(vcpu);
78+
6679
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
6780

6881
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {

0 commit comments

Comments
 (0)