Skip to content

Commit

Permalink
Merge tag 'kvmarm-fixes-6.4-3' of git://git.kernel.org/pub/scm/linux/…
Browse files Browse the repository at this point in the history
…kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 6.4, take android-rpi#3

- Fix the reported address of a watchpoint forwarded to userspace

- Fix the freeing of the root of stage-2 page tables

- Stop creating spurious PMU events to perform detection of the
  default PMU and use the existing PMU list instead.
  • Loading branch information
bonzini committed Jun 3, 2023
2 parents 26f3149 + 40e54ca commit 49661a5
Show file tree
Hide file tree
Showing 5 changed files with 35 additions and 37 deletions.
8 changes: 6 additions & 2 deletions arch/arm64/kvm/hyp/include/hyp/switch.h
Original file line number Diff line number Diff line change
Expand Up @@ -412,17 +412,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}

static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
return true;

return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);

static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
return true;

if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/kvm/hyp/nvhe/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};

Expand All @@ -196,6 +197,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};

Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/kvm/hyp/pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -1332,4 +1332,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
};

WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));

WARN_ON(mm_ops->page_count(pgtable) != 1);
mm_ops->put_page(pgtable);
}
1 change: 1 addition & 0 deletions arch/arm64/kvm/hyp/vhe/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};

Expand Down
58 changes: 23 additions & 35 deletions arch/arm64/kvm/pmu-emul.c
Original file line number Diff line number Diff line change
Expand Up @@ -694,45 +694,23 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)

static struct arm_pmu *kvm_pmu_probe_armpmu(void)
{
struct perf_event_attr attr = { };
struct perf_event *event;
struct arm_pmu *pmu = NULL;

/*
* Create a dummy event that only counts user cycles. As we'll never
* leave this function with the event being live, it will never
* count anything. But it allows us to probe some of the PMU
* details. Yes, this is terrible.
*/
attr.type = PERF_TYPE_RAW;
attr.size = sizeof(attr);
attr.pinned = 1;
attr.disabled = 0;
attr.exclude_user = 0;
attr.exclude_kernel = 1;
attr.exclude_hv = 1;
attr.exclude_host = 1;
attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
attr.sample_period = GENMASK(63, 0);
struct arm_pmu *tmp, *pmu = NULL;
struct arm_pmu_entry *entry;
int cpu;

event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow, &attr);
mutex_lock(&arm_pmus_lock);

if (IS_ERR(event)) {
pr_err_once("kvm: pmu event creation failed %ld\n",
PTR_ERR(event));
return NULL;
}
cpu = smp_processor_id();
list_for_each_entry(entry, &arm_pmus, entry) {
tmp = entry->arm_pmu;

if (event->pmu) {
pmu = to_arm_pmu(event->pmu);
if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
pmu = NULL;
if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
pmu = tmp;
break;
}
}

perf_event_disable(event);
perf_event_release_kernel(event);
mutex_unlock(&arm_pmus_lock);

return pmu;
}
Expand Down Expand Up @@ -912,7 +890,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return -EBUSY;

if (!kvm->arch.arm_pmu) {
/* No PMU set, get the default one */
/*
* No PMU set, get the default one.
*
* The observant among you will notice that the supported_cpus
* mask does not get updated for the default PMU even though it
* is quite possible the selected instance supports only a
* subset of cores in the system. This is intentional, and
* upholds the preexisting behavior on heterogeneous systems
* where vCPUs can be scheduled on any core but the guest
* counters could stop working.
*/
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
if (!kvm->arch.arm_pmu)
return -ENODEV;
Expand Down

0 comments on commit 49661a5

Please sign in to comment.