@@ -5940,6 +5940,25 @@ static __always_inline void intel_pmu_init_glc(struct pmu *pmu)
59405940 hybrid (pmu , pebs_constraints ) = intel_glc_pebs_event_constraints ;
59415941}
59425942
5943+ static __always_inline void intel_pmu_init_grt (struct pmu * pmu )
5944+ {
5945+ x86_pmu .mid_ack = true;
5946+ x86_pmu .limit_period = glc_limit_period ;
5947+ x86_pmu .pebs_aliases = NULL ;
5948+ x86_pmu .pebs_prec_dist = true;
5949+ x86_pmu .pebs_block = true;
5950+ x86_pmu .lbr_pt_coexist = true;
5951+ x86_pmu .flags |= PMU_FL_HAS_RSP_1 ;
5952+ x86_pmu .flags |= PMU_FL_INSTR_LATENCY ;
5953+
5954+ memcpy (hybrid_var (pmu , hw_cache_event_ids ), glp_hw_cache_event_ids , sizeof (hw_cache_event_ids ));
5955+ memcpy (hybrid_var (pmu , hw_cache_extra_regs ), tnt_hw_cache_extra_regs , sizeof (hw_cache_extra_regs ));
5956+ hybrid_var (pmu , hw_cache_event_ids )[C (ITLB )][C (OP_READ )][C (RESULT_ACCESS )] = -1 ;
5957+ hybrid (pmu , event_constraints ) = intel_slm_event_constraints ;
5958+ hybrid (pmu , pebs_constraints ) = intel_grt_pebs_event_constraints ;
5959+ hybrid (pmu , extra_regs ) = intel_grt_extra_regs ;
5960+ }
5961+
59435962__init int intel_pmu_init (void )
59445963{
59455964 struct attribute * * extra_skl_attr = & empty_attrs ;
@@ -6218,28 +6237,10 @@ __init int intel_pmu_init(void)
62186237 break ;
62196238
62206239 case INTEL_FAM6_ATOM_GRACEMONT :
6221- x86_pmu .mid_ack = true;
6222- memcpy (hw_cache_event_ids , glp_hw_cache_event_ids ,
6223- sizeof (hw_cache_event_ids ));
6224- memcpy (hw_cache_extra_regs , tnt_hw_cache_extra_regs ,
6225- sizeof (hw_cache_extra_regs ));
6226- hw_cache_event_ids [C (ITLB )][C (OP_READ )][C (RESULT_ACCESS )] = -1 ;
6227-
6228- x86_pmu .event_constraints = intel_slm_event_constraints ;
6229- x86_pmu .pebs_constraints = intel_grt_pebs_event_constraints ;
6230- x86_pmu .extra_regs = intel_grt_extra_regs ;
6231-
6232- x86_pmu .pebs_aliases = NULL ;
6233- x86_pmu .pebs_prec_dist = true;
6234- x86_pmu .pebs_block = true;
6235- x86_pmu .lbr_pt_coexist = true;
6236- x86_pmu .flags |= PMU_FL_HAS_RSP_1 ;
6237- x86_pmu .flags |= PMU_FL_INSTR_LATENCY ;
6238-
6240+ intel_pmu_init_grt (NULL );
62396241 intel_pmu_pebs_data_source_grt ();
62406242 x86_pmu .pebs_latency_data = adl_latency_data_small ;
62416243 x86_pmu .get_event_constraints = tnt_get_event_constraints ;
6242- x86_pmu .limit_period = glc_limit_period ;
62436244 td_attr = tnt_events_attrs ;
62446245 mem_attr = grt_mem_attrs ;
62456246 extra_attr = nhm_format_attr ;
@@ -6249,28 +6250,11 @@ __init int intel_pmu_init(void)
62496250
62506251 case INTEL_FAM6_ATOM_CRESTMONT :
62516252 case INTEL_FAM6_ATOM_CRESTMONT_X :
6252- x86_pmu .mid_ack = true;
6253- memcpy (hw_cache_event_ids , glp_hw_cache_event_ids ,
6254- sizeof (hw_cache_event_ids ));
6255- memcpy (hw_cache_extra_regs , tnt_hw_cache_extra_regs ,
6256- sizeof (hw_cache_extra_regs ));
6257- hw_cache_event_ids [C (ITLB )][C (OP_READ )][C (RESULT_ACCESS )] = -1 ;
6258-
6259- x86_pmu .event_constraints = intel_slm_event_constraints ;
6260- x86_pmu .pebs_constraints = intel_grt_pebs_event_constraints ;
6253+ intel_pmu_init_grt (NULL );
62616254 x86_pmu .extra_regs = intel_cmt_extra_regs ;
6262-
6263- x86_pmu .pebs_aliases = NULL ;
6264- x86_pmu .pebs_prec_dist = true;
6265- x86_pmu .lbr_pt_coexist = true;
6266- x86_pmu .pebs_block = true;
6267- x86_pmu .flags |= PMU_FL_HAS_RSP_1 ;
6268- x86_pmu .flags |= PMU_FL_INSTR_LATENCY ;
6269-
62706255 intel_pmu_pebs_data_source_cmt ();
62716256 x86_pmu .pebs_latency_data = mtl_latency_data_small ;
62726257 x86_pmu .get_event_constraints = cmt_get_event_constraints ;
6273- x86_pmu .limit_period = glc_limit_period ;
62746258 td_attr = cmt_events_attrs ;
62756259 mem_attr = grt_mem_attrs ;
62766260 extra_attr = cmt_format_attr ;
0 commit comments