Skip to content

Commit 6e827fa

Browse files
committed
Merge tag 'kvmarm-fixes-7.0-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 7.0, take #3 - Correctly handle deeactivation of out-of-LRs interrupts by starting the EOIcount deactivation walk *after* the last irq that made it into an LR. This avoids deactivating irqs that are in the LRs and that the vcpu hasn't deactivated yet. - Avoid calling into the stubs to probe for ICH_VTR_EL2.TDS when pKVM is already enabled -- not only thhis isn't possible (pKVM will reject the call), but it is also useless: this can only happen for a CPU that has already booted once, and the capability will not change.
2 parents 94fe3e6 + a79f7b4 commit 6e827fa

5 files changed

Lines changed: 26 additions & 8 deletions

File tree

arch/arm64/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -784,6 +784,9 @@ struct kvm_host_data {
784784
/* Number of debug breakpoints/watchpoints for this CPU (minus 1) */
785785
unsigned int debug_brps;
786786
unsigned int debug_wrps;
787+
788+
/* Last vgic_irq part of the AP list recorded in an LR */
789+
struct vgic_irq *last_lr_irq;
787790
};
788791

789792
struct kvm_host_psci_config {

arch/arm64/kernel/cpufeature.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2345,6 +2345,15 @@ static bool can_trap_icv_dir_el1(const struct arm64_cpu_capabilities *entry,
23452345
!is_midr_in_range_list(has_vgic_v3))
23462346
return false;
23472347

2348+
/*
2349+
* pKVM prevents late onlining of CPUs. This means that whatever
2350+
* state the capability is in after deprivilege cannot be affected
2351+
* by a new CPU booting -- this is garanteed to be a CPU we have
2352+
* already seen, and the cap is therefore unchanged.
2353+
*/
2354+
if (system_capabilities_finalized() && is_protected_kvm_enabled())
2355+
return cpus_have_final_cap(ARM64_HAS_ICH_HCR_EL2_TDIR);
2356+
23482357
if (is_kernel_in_hyp_mode())
23492358
res.a1 = read_sysreg_s(SYS_ICH_VTR_EL2);
23502359
else

arch/arm64/kvm/vgic/vgic-v2.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,15 +115,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
115115
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
116116
struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
117117
u32 eoicount = FIELD_GET(GICH_HCR_EOICOUNT, cpuif->vgic_hcr);
118-
struct vgic_irq *irq;
118+
struct vgic_irq *irq = *host_data_ptr(last_lr_irq);
119119

120120
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
121121

122122
for (int lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++)
123123
vgic_v2_fold_lr(vcpu, cpuif->vgic_lr[lr]);
124124

125125
/* See the GICv3 equivalent for the EOIcount handling rationale */
126-
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
126+
list_for_each_entry_continue(irq, &vgic_cpu->ap_list_head, ap_list) {
127127
u32 lr;
128128

129129
if (!eoicount) {

arch/arm64/kvm/vgic/vgic-v3.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
148148
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
149149
struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
150150
u32 eoicount = FIELD_GET(ICH_HCR_EL2_EOIcount, cpuif->vgic_hcr);
151-
struct vgic_irq *irq;
151+
struct vgic_irq *irq = *host_data_ptr(last_lr_irq);
152152

153153
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
154154

@@ -158,12 +158,12 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
158158
/*
159159
* EOIMode=0: use EOIcount to emulate deactivation. We are
160160
* guaranteed to deactivate in reverse order of the activation, so
161-
* just pick one active interrupt after the other in the ap_list,
162-
* and replay the deactivation as if the CPU was doing it. We also
163-
* rely on priority drop to have taken place, and the list to be
164-
* sorted by priority.
161+
* just pick one active interrupt after the other in the tail part
162+
* of the ap_list, past the LRs, and replay the deactivation as if
163+
* the CPU was doing it. We also rely on priority drop to have taken
164+
* place, and the list to be sorted by priority.
165165
*/
166-
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
166+
list_for_each_entry_continue(irq, &vgic_cpu->ap_list_head, ap_list) {
167167
u64 lr;
168168

169169
/*

arch/arm64/kvm/vgic/vgic.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -814,6 +814,9 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
814814

815815
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
816816
{
817+
if (!*host_data_ptr(last_lr_irq))
818+
return;
819+
817820
if (kvm_vgic_global_state.type == VGIC_V2)
818821
vgic_v2_fold_lr_state(vcpu);
819822
else
@@ -960,10 +963,13 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
960963
if (irqs_outside_lrs(&als))
961964
vgic_sort_ap_list(vcpu);
962965

966+
*host_data_ptr(last_lr_irq) = NULL;
967+
963968
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
964969
scoped_guard(raw_spinlock, &irq->irq_lock) {
965970
if (likely(vgic_target_oracle(irq) == vcpu)) {
966971
vgic_populate_lr(vcpu, irq, count++);
972+
*host_data_ptr(last_lr_irq) = irq;
967973
}
968974
}
969975

0 commit comments

Comments
 (0)