@@ -24,7 +24,11 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
2424
2525static u32 kvm_pmu_event_mask (struct kvm * kvm )
2626{
27- switch (kvm -> arch .pmuver ) {
27+ unsigned int pmuver ;
28+
29+ pmuver = kvm -> arch .arm_pmu -> pmuver ;
30+
31+ switch (pmuver ) {
2832 case ID_AA64DFR0_PMUVER_8_0 :
2933 return GENMASK (9 , 0 );
3034 case ID_AA64DFR0_PMUVER_8_1 :
@@ -33,7 +37,7 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
3337 case ID_AA64DFR0_PMUVER_8_7 :
3438 return GENMASK (15 , 0 );
3539 default : /* Shouldn't be here, just for sanity */
36- WARN_ONCE (1 , "Unknown PMU version %d\n" , kvm -> arch . pmuver );
40+ WARN_ONCE (1 , "Unknown PMU version %d\n" , pmuver );
3741 return 0 ;
3842 }
3943}
@@ -600,6 +604,7 @@ static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
600604 */
601605static void kvm_pmu_create_perf_event (struct kvm_vcpu * vcpu , u64 select_idx )
602606{
607+ struct arm_pmu * arm_pmu = vcpu -> kvm -> arch .arm_pmu ;
603608 struct kvm_pmu * pmu = & vcpu -> arch .pmu ;
604609 struct kvm_pmc * pmc ;
605610 struct perf_event * event ;
@@ -636,7 +641,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
636641 return ;
637642
638643 memset (& attr , 0 , sizeof (struct perf_event_attr ));
639- attr .type = PERF_TYPE_RAW ;
644+ attr .type = arm_pmu -> pmu . type ;
640645 attr .size = sizeof (attr );
641646 attr .pinned = 1 ;
642647 attr .disabled = !kvm_pmu_counter_is_enabled (vcpu , pmc -> idx );
@@ -750,12 +755,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
750755 static_branch_enable (& kvm_arm_pmu_available );
751756}
752757
753- static int kvm_pmu_probe_pmuver (void )
758+ static struct arm_pmu * kvm_pmu_probe_armpmu (void )
754759{
755760 struct perf_event_attr attr = { };
756761 struct perf_event * event ;
757- struct arm_pmu * pmu ;
758- int pmuver = ID_AA64DFR0_PMUVER_IMP_DEF ;
762+ struct arm_pmu * pmu = NULL ;
759763
760764 /*
761765 * Create a dummy event that only counts user cycles. As we'll never
@@ -780,19 +784,20 @@ static int kvm_pmu_probe_pmuver(void)
780784 if (IS_ERR (event )) {
781785 pr_err_once ("kvm: pmu event creation failed %ld\n" ,
782786 PTR_ERR (event ));
783- return ID_AA64DFR0_PMUVER_IMP_DEF ;
787+ return NULL ;
784788 }
785789
786790 if (event -> pmu ) {
787791 pmu = to_arm_pmu (event -> pmu );
788- if (pmu -> pmuver )
789- pmuver = pmu -> pmuver ;
792+ if (pmu -> pmuver == 0 ||
793+ pmu -> pmuver == ID_AA64DFR0_PMUVER_IMP_DEF )
794+ pmu = NULL ;
790795 }
791796
792797 perf_event_disable (event );
793798 perf_event_release_kernel (event );
794799
795- return pmuver ;
800+ return pmu ;
796801}
797802
798803u64 kvm_pmu_get_pmceid (struct kvm_vcpu * vcpu , bool pmceid1 )
@@ -810,7 +815,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
810815 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
811816 * as RAZ
812817 */
813- if (vcpu -> kvm -> arch .pmuver >= ID_AA64DFR0_PMUVER_8_4 )
818+ if (vcpu -> kvm -> arch .arm_pmu -> pmuver >= ID_AA64DFR0_PMUVER_8_4 )
814819 val &= ~BIT_ULL (ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32 );
815820 base = 32 ;
816821 }
@@ -932,11 +937,16 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
932937 if (vcpu -> arch .pmu .created )
933938 return - EBUSY ;
934939
935- if (!vcpu -> kvm -> arch .pmuver )
936- vcpu -> kvm -> arch .pmuver = kvm_pmu_probe_pmuver ();
937-
938- if (vcpu -> kvm -> arch .pmuver == ID_AA64DFR0_PMUVER_IMP_DEF )
939- return - ENODEV ;
940+ mutex_lock (& kvm -> lock );
941+ if (!kvm -> arch .arm_pmu ) {
942+ /* No PMU set, get the default one */
943+ kvm -> arch .arm_pmu = kvm_pmu_probe_armpmu ();
944+ if (!kvm -> arch .arm_pmu ) {
945+ mutex_unlock (& kvm -> lock );
946+ return - ENODEV ;
947+ }
948+ }
949+ mutex_unlock (& kvm -> lock );
940950
941951 switch (attr -> attr ) {
942952 case KVM_ARM_VCPU_PMU_V3_IRQ : {
0 commit comments