Skip to content

Commit

Permalink
Merge tag 'kvmarm-fixes-5.18-2' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 5.18, take #2

- Take care of faults occuring between the PARange and
  IPA range by injecting an exception

- Fix S2 faults taken from a host EL0 in protected mode

- Work around Oops caused by a PMU access from a 32bit
  guest when PMU has been created. This is a temporary
  bodge until we fix it for good.
  • Loading branch information
bonzini committed Apr 29, 2022
2 parents e852be8 + 85ea6b1 commit 484c22d
Show file tree
Hide file tree
Showing 5 changed files with 79 additions and 10 deletions.
1 change: 1 addition & 0 deletions arch/arm64/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);

void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);

Expand Down
18 changes: 9 additions & 9 deletions arch/arm64/kvm/hyp/nvhe/host.S
Original file line number Diff line number Diff line change
Expand Up @@ -198,15 +198,15 @@ SYM_CODE_START(__kvm_hyp_host_vector)
invalid_host_el2_vect // FIQ EL2h
invalid_host_el2_vect // Error EL2h

host_el1_sync_vect // Synchronous 64-bit EL1
invalid_host_el1_vect // IRQ 64-bit EL1
invalid_host_el1_vect // FIQ 64-bit EL1
invalid_host_el1_vect // Error 64-bit EL1

invalid_host_el1_vect // Synchronous 32-bit EL1
invalid_host_el1_vect // IRQ 32-bit EL1
invalid_host_el1_vect // FIQ 32-bit EL1
invalid_host_el1_vect // Error 32-bit EL1
host_el1_sync_vect // Synchronous 64-bit EL1/EL0
invalid_host_el1_vect // IRQ 64-bit EL1/EL0
invalid_host_el1_vect // FIQ 64-bit EL1/EL0
invalid_host_el1_vect // Error 64-bit EL1/EL0

host_el1_sync_vect // Synchronous 32-bit EL1/EL0
invalid_host_el1_vect // IRQ 32-bit EL1/EL0
invalid_host_el1_vect // FIQ 32-bit EL1/EL0
invalid_host_el1_vect // Error 32-bit EL1/EL0
SYM_CODE_END(__kvm_hyp_host_vector)

/*
Expand Down
28 changes: 28 additions & 0 deletions arch/arm64/kvm/inject_fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,34 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
inject_abt64(vcpu, true, addr);
}

void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
{
unsigned long addr, esr;

addr = kvm_vcpu_get_fault_ipa(vcpu);
addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);

if (kvm_vcpu_trap_is_iabt(vcpu))
kvm_inject_pabt(vcpu, addr);
else
kvm_inject_dabt(vcpu, addr);

/*
* If AArch64 or LPAE, set FSC to 0 to indicate an Address
* Size Fault at level 0, as if exceeding PARange.
*
* Non-LPAE guests will only get the external abort, as there
* is no way to to describe the ASF.
*/
if (vcpu_el1_is_32bit(vcpu) &&
!(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
return;

esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
esr &= ~GENMASK_ULL(5, 0);
vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
}

/**
* kvm_inject_undefined - inject an undefined instruction into the guest
* @vcpu: The vCPU in which to inject the exception
Expand Down
19 changes: 19 additions & 0 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1337,6 +1337,25 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);

if (fault_status == FSC_FAULT) {
/* Beyond sanitised PARange (which is the IPA limit) */
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
kvm_inject_size_fault(vcpu);
return 1;
}

/* Falls between the IPA range and the PARange? */
if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);

if (is_iabt)
kvm_inject_pabt(vcpu, fault_ipa);
else
kvm_inject_dabt(vcpu, fault_ipa);
return 1;
}
}

/* Synchronous External Abort? */
if (kvm_vcpu_abt_issea(vcpu)) {
/*
Expand Down
23 changes: 22 additions & 1 deletion arch/arm64/kvm/pmu-emul.c
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,9 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc = &pmu->pmc[select_idx];

if (!kvm_vcpu_has_pmu(vcpu))
return 0;

counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);

if (kvm_pmu_pmc_is_chained(pmc) &&
Expand All @@ -198,6 +201,9 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
{
u64 reg;

if (!kvm_vcpu_has_pmu(vcpu))
return;

reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
Expand Down Expand Up @@ -322,6 +328,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc;

if (!kvm_vcpu_has_pmu(vcpu))
return;

if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
return;

Expand Down Expand Up @@ -357,7 +366,7 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc;

if (!val)
if (!kvm_vcpu_has_pmu(vcpu) || !val)
return;

for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
Expand Down Expand Up @@ -527,6 +536,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
int i;

if (!kvm_vcpu_has_pmu(vcpu))
return;

if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
return;

Expand Down Expand Up @@ -576,6 +588,9 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
{
int i;

if (!kvm_vcpu_has_pmu(vcpu))
return;

if (val & ARMV8_PMU_PMCR_E) {
kvm_pmu_enable_counter_mask(vcpu,
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
Expand Down Expand Up @@ -739,6 +754,9 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
{
u64 reg, mask;

if (!kvm_vcpu_has_pmu(vcpu))
return;

mask = ARMV8_PMU_EVTYPE_MASK;
mask &= ~ARMV8_PMU_EVTYPE_EVENT;
mask |= kvm_pmu_event_mask(vcpu->kvm);
Expand Down Expand Up @@ -827,6 +845,9 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
u64 val, mask = 0;
int base, i, nr_events;

if (!kvm_vcpu_has_pmu(vcpu))
return 0;

if (!pmceid1) {
val = read_sysreg(pmceid0_el0);
base = 0;
Expand Down

0 comments on commit 484c22d

Please sign in to comment.