@@ -4598,6 +4598,16 @@ static void intel_pmu_check_num_counters(int *num_counters,
45984598 int * num_counters_fixed ,
45994599 u64 * intel_ctrl , u64 fixed_mask );
46004600
4601+ static inline bool intel_pmu_broken_perf_cap (void )
4602+ {
4603+ /* The Perf Metric (Bit 15) is always cleared */
4604+ if ((boot_cpu_data .x86_model == INTEL_FAM6_METEORLAKE ) ||
4605+ (boot_cpu_data .x86_model == INTEL_FAM6_METEORLAKE_L ))
4606+ return true;
4607+
4608+ return false;
4609+ }
4610+
46014611static void update_pmu_cap (struct x86_hybrid_pmu * pmu )
46024612{
46034613 unsigned int sub_bitmaps = cpuid_eax (ARCH_PERFMON_EXT_LEAF );
@@ -4610,7 +4620,27 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
46104620 pmu -> num_counters_fixed = fls (ebx );
46114621 intel_pmu_check_num_counters (& pmu -> num_counters , & pmu -> num_counters_fixed ,
46124622 & pmu -> intel_ctrl , ebx );
4623+ pmu -> max_pebs_events = min_t (unsigned , MAX_PEBS_EVENTS , pmu -> num_counters );
4624+ pmu -> unconstrained = (struct event_constraint )
4625+ __EVENT_CONSTRAINT (0 , (1ULL << pmu -> num_counters ) - 1 ,
4626+ 0 , pmu -> num_counters , 0 , 0 );
46134627 }
4628+
4629+
4630+ if (!intel_pmu_broken_perf_cap ()) {
4631+ /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
4632+ rdmsrl (MSR_IA32_PERF_CAPABILITIES , pmu -> intel_cap .capabilities );
4633+ }
4634+
4635+ if (pmu -> intel_cap .perf_metrics )
4636+ pmu -> intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS ;
4637+ else
4638+ pmu -> intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS );
4639+
4640+ if (pmu -> intel_cap .pebs_output_pt_available )
4641+ pmu -> pmu .capabilities |= PERF_PMU_CAP_AUX_OUTPUT ;
4642+ else
4643+ pmu -> pmu .capabilities |= ~PERF_PMU_CAP_AUX_OUTPUT ;
46144644}
46154645
46164646static struct x86_hybrid_pmu * find_hybrid_pmu_for_cpu (void )
@@ -5915,10 +5945,52 @@ static void intel_pmu_check_hybrid_pmus(u64 fixed_mask)
59155945 }
59165946}
59175947
5918- static __always_inline bool is_mtl (u8 x86_model )
5948+ static const struct { enum hybrid_pmu_type id ; char * name ; } intel_hybrid_pmu_type_map [] __initconst = {
5949+ { hybrid_small , "cpu_atom" },
5950+ { hybrid_big , "cpu_core" },
5951+ };
5952+
5953+ static __always_inline int intel_pmu_init_hybrid (enum hybrid_pmu_type pmus )
59195954{
5920- return (x86_model == INTEL_FAM6_METEORLAKE ) ||
5921- (x86_model == INTEL_FAM6_METEORLAKE_L );
5955+ unsigned long pmus_mask = pmus ;
5956+ struct x86_hybrid_pmu * pmu ;
5957+ int idx = 0 , bit ;
5958+
5959+ x86_pmu .num_hybrid_pmus = hweight_long (pmus_mask );
5960+ x86_pmu .hybrid_pmu = kcalloc (x86_pmu .num_hybrid_pmus ,
5961+ sizeof (struct x86_hybrid_pmu ),
5962+ GFP_KERNEL );
5963+ if (!x86_pmu .hybrid_pmu )
5964+ return - ENOMEM ;
5965+
5966+ static_branch_enable (& perf_is_hybrid );
5967+ x86_pmu .filter = intel_pmu_filter ;
5968+
5969+ for_each_set_bit (bit , & pmus_mask , ARRAY_SIZE (intel_hybrid_pmu_type_map )) {
5970+ pmu = & x86_pmu .hybrid_pmu [idx ++ ];
5971+ pmu -> pmu_type = intel_hybrid_pmu_type_map [bit ].id ;
5972+ pmu -> name = intel_hybrid_pmu_type_map [bit ].name ;
5973+
5974+ pmu -> num_counters = x86_pmu .num_counters ;
5975+ pmu -> num_counters_fixed = x86_pmu .num_counters_fixed ;
5976+ pmu -> max_pebs_events = min_t (unsigned , MAX_PEBS_EVENTS , pmu -> num_counters );
5977+ pmu -> unconstrained = (struct event_constraint )
5978+ __EVENT_CONSTRAINT (0 , (1ULL << pmu -> num_counters ) - 1 ,
5979+ 0 , pmu -> num_counters , 0 , 0 );
5980+
5981+ pmu -> intel_cap .capabilities = x86_pmu .intel_cap .capabilities ;
5982+ if (pmu -> pmu_type & hybrid_small ) {
5983+ pmu -> intel_cap .perf_metrics = 0 ;
5984+ pmu -> intel_cap .pebs_output_pt_available = 1 ;
5985+ pmu -> mid_ack = true;
5986+ } else if (pmu -> pmu_type & hybrid_big ) {
5987+ pmu -> intel_cap .perf_metrics = 1 ;
5988+ pmu -> intel_cap .pebs_output_pt_available = 0 ;
5989+ pmu -> late_ack = true;
5990+ }
5991+ }
5992+
5993+ return 0 ;
59225994}
59235995
59245996static __always_inline void intel_pmu_init_glc (struct pmu * pmu )
@@ -6602,23 +6674,14 @@ __init int intel_pmu_init(void)
66026674 case INTEL_FAM6_RAPTORLAKE :
66036675 case INTEL_FAM6_RAPTORLAKE_P :
66046676 case INTEL_FAM6_RAPTORLAKE_S :
6605- case INTEL_FAM6_METEORLAKE :
6606- case INTEL_FAM6_METEORLAKE_L :
66076677 /*
66086678 * Alder Lake has 2 types of CPU, core and atom.
66096679 *
66106680 * Initialize the common PerfMon capabilities here.
66116681 */
6612- x86_pmu .hybrid_pmu = kcalloc (X86_HYBRID_NUM_PMUS ,
6613- sizeof (struct x86_hybrid_pmu ),
6614- GFP_KERNEL );
6615- if (!x86_pmu .hybrid_pmu )
6616- return - ENOMEM ;
6617- static_branch_enable (& perf_is_hybrid );
6618- x86_pmu .num_hybrid_pmus = X86_HYBRID_NUM_PMUS ;
6682+ intel_pmu_init_hybrid (hybrid_big_small );
66196683
66206684 x86_pmu .pebs_latency_data = adl_latency_data_small ;
6621- x86_pmu .filter = intel_pmu_filter ;
66226685 x86_pmu .get_event_constraints = adl_get_event_constraints ;
66236686 x86_pmu .hw_config = adl_hw_config ;
66246687 x86_pmu .get_hybrid_cpu_type = adl_get_hybrid_cpu_type ;
@@ -6631,10 +6694,7 @@ __init int intel_pmu_init(void)
66316694
66326695 /* Initialize big core specific PerfMon capabilities.*/
66336696 pmu = & x86_pmu .hybrid_pmu [X86_HYBRID_PMU_CORE_IDX ];
6634- pmu -> name = "cpu_core" ;
6635- pmu -> pmu_type = hybrid_big ;
66366697 intel_pmu_init_glc (& pmu -> pmu );
6637- pmu -> late_ack = true;
66386698 if (cpu_feature_enabled (X86_FEATURE_HYBRID_CPU )) {
66396699 pmu -> num_counters = x86_pmu .num_counters + 2 ;
66406700 pmu -> num_counters_fixed = x86_pmu .num_counters_fixed + 1 ;
@@ -6659,45 +6719,45 @@ __init int intel_pmu_init(void)
66596719 pmu -> unconstrained = (struct event_constraint )
66606720 __EVENT_CONSTRAINT (0 , (1ULL << pmu -> num_counters ) - 1 ,
66616721 0 , pmu -> num_counters , 0 , 0 );
6662- pmu -> intel_cap .capabilities = x86_pmu .intel_cap .capabilities ;
6663- pmu -> intel_cap .perf_metrics = 1 ;
6664- pmu -> intel_cap .pebs_output_pt_available = 0 ;
6665-
66666722 pmu -> extra_regs = intel_glc_extra_regs ;
66676723
66686724 /* Initialize Atom core specific PerfMon capabilities.*/
66696725 pmu = & x86_pmu .hybrid_pmu [X86_HYBRID_PMU_ATOM_IDX ];
6670- pmu -> name = "cpu_atom" ;
6671- pmu -> pmu_type = hybrid_small ;
66726726 intel_pmu_init_grt (& pmu -> pmu );
6673- pmu -> mid_ack = true;
6674- pmu -> num_counters = x86_pmu .num_counters ;
6675- pmu -> num_counters_fixed = x86_pmu .num_counters_fixed ;
6676- pmu -> max_pebs_events = x86_pmu .max_pebs_events ;
6677- pmu -> unconstrained = (struct event_constraint )
6678- __EVENT_CONSTRAINT (0 , (1ULL << pmu -> num_counters ) - 1 ,
6679- 0 , pmu -> num_counters , 0 , 0 );
6680- pmu -> intel_cap .capabilities = x86_pmu .intel_cap .capabilities ;
6681- pmu -> intel_cap .perf_metrics = 0 ;
6682- pmu -> intel_cap .pebs_output_pt_available = 1 ;
6683-
6684- if (is_mtl (boot_cpu_data .x86_model )) {
6685- x86_pmu .hybrid_pmu [X86_HYBRID_PMU_CORE_IDX ].extra_regs = intel_rwc_extra_regs ;
6686- x86_pmu .pebs_latency_data = mtl_latency_data_small ;
6687- extra_attr = boot_cpu_has (X86_FEATURE_RTM ) ?
6688- mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr ;
6689- mem_attr = mtl_hybrid_mem_attrs ;
6690- intel_pmu_pebs_data_source_mtl ();
6691- x86_pmu .get_event_constraints = mtl_get_event_constraints ;
6692- pmu -> extra_regs = intel_cmt_extra_regs ;
6693- pr_cont ("Meteorlake Hybrid events, " );
6694- name = "meteorlake_hybrid" ;
6695- } else {
6696- x86_pmu .flags |= PMU_FL_MEM_LOADS_AUX ;
6697- intel_pmu_pebs_data_source_adl ();
6698- pr_cont ("Alderlake Hybrid events, " );
6699- name = "alderlake_hybrid" ;
6700- }
6727+
6728+ x86_pmu .flags |= PMU_FL_MEM_LOADS_AUX ;
6729+ intel_pmu_pebs_data_source_adl ();
6730+ pr_cont ("Alderlake Hybrid events, " );
6731+ name = "alderlake_hybrid" ;
6732+ break ;
6733+
6734+ case INTEL_FAM6_METEORLAKE :
6735+ case INTEL_FAM6_METEORLAKE_L :
6736+ intel_pmu_init_hybrid (hybrid_big_small );
6737+
6738+ x86_pmu .pebs_latency_data = mtl_latency_data_small ;
6739+ x86_pmu .get_event_constraints = mtl_get_event_constraints ;
6740+ x86_pmu .hw_config = adl_hw_config ;
6741+
6742+ td_attr = adl_hybrid_events_attrs ;
6743+ mem_attr = mtl_hybrid_mem_attrs ;
6744+ tsx_attr = adl_hybrid_tsx_attrs ;
6745+ extra_attr = boot_cpu_has (X86_FEATURE_RTM ) ?
6746+ mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr ;
6747+
6748+ /* Initialize big core specific PerfMon capabilities.*/
6749+ pmu = & x86_pmu .hybrid_pmu [X86_HYBRID_PMU_CORE_IDX ];
6750+ intel_pmu_init_glc (& pmu -> pmu );
6751+ pmu -> extra_regs = intel_rwc_extra_regs ;
6752+
6753+ /* Initialize Atom core specific PerfMon capabilities.*/
6754+ pmu = & x86_pmu .hybrid_pmu [X86_HYBRID_PMU_ATOM_IDX ];
6755+ intel_pmu_init_grt (& pmu -> pmu );
6756+ pmu -> extra_regs = intel_cmt_extra_regs ;
6757+
6758+ intel_pmu_pebs_data_source_mtl ();
6759+ pr_cont ("Meteorlake Hybrid events, " );
6760+ name = "meteorlake_hybrid" ;
67016761 break ;
67026762
67036763 default :
@@ -6809,7 +6869,7 @@ __init int intel_pmu_init(void)
68096869 if (!is_hybrid () && x86_pmu .intel_cap .perf_metrics )
68106870 x86_pmu .intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS ;
68116871
6812- if (is_hybrid ())
6872+ if (is_hybrid () && ! boot_cpu_has ( X86_FEATURE_ARCH_PERFMON_EXT ) )
68136873 intel_pmu_check_hybrid_pmus ((u64 )fixed_mask );
68146874
68156875 if (x86_pmu .intel_cap .pebs_timing_info )
0 commit comments