Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
73 commits
Select commit Hold shift + click to select a range
034ee18
perf/x86/intel: Use the common uarch name for the shared functions
Aug 29, 2023
74266a2
perf/x86/intel: Factor out the initialization code for SPR
Aug 29, 2023
bfc924d
perf/x86/intel: Factor out the initialization code for ADL e-core
Aug 29, 2023
222204b
perf/x86/intel: Apply the common initialization code for ADL
Aug 29, 2023
4247ae8
perf/x86/intel: Clean up the hybrid CPU type handling code
Aug 29, 2023
9d6b6df
perf/x86/intel: Add common intel_pmu_init_hybrid()
Aug 29, 2023
2e9d4f0
perf/x86/intel: Fix broken fixed event constraints extension
Sep 11, 2023
ffebd24
tools headers UAPI: Sync include/uapi/linux/perf_event.h header with …
Oct 25, 2023
acbb082
perf/x86/intel: Correct incorrect 'or' operation for PMU capabilities
Nov 21, 2023
65fc7d5
powercap: intel_rapl: Sort header files
zhang-rui Apr 8, 2024
11c3d76
powercap: intel_rapl: Introduce APIs for PMU support
zhang-rui Apr 28, 2024
72224de
powercap: intel_rapl_tpmi: Enable PMU support
zhang-rui Apr 28, 2024
a88aa1a
perf/x86/uncore: Save the unit control address of all units
Jun 14, 2024
fb7689c
perf/x86/uncore: Support per PMU cpumask
Jun 14, 2024
9e28788
perf/x86/uncore: Retrieve the unit ID from the unit control RB tree
Jun 14, 2024
a470e11
perf/x86/uncore: Apply the unit control RB tree to MMIO uncore units
Jun 14, 2024
015220a
perf/x86/uncore: Apply the unit control RB tree to MSR uncore units
Jun 14, 2024
d28a141
perf/x86/uncore: Apply the unit control RB tree to PCI uncore units
Jun 14, 2024
c3084d5
perf/x86/uncore: Cleanup unused unit structure
Jun 14, 2024
9323be5
perf/x86/intel/uncore: Support HBM and CXL PMON counters
Jun 14, 2024
9850751
perf/x86/intel: Support the PEBS event mask
Jun 26, 2024
b78a65e
perf/x86: Support counter mask
Jun 26, 2024
acb3483
perf/x86: Add Lunar Lake and Arrow Lake support
Jun 26, 2024
6ef38c4
perf/x86/intel: Rename model-specific pebs_latency_data functions
Jun 26, 2024
c74074d
perf/x86/intel: Support new data source for Lunar Lake
Jun 26, 2024
f3cb5c9
perf/x86: Add config_mask to represent EVENTSEL bitmask
Jun 26, 2024
c8e4b56
perf/x86/intel: Support PERFEVTSEL extension
Jun 26, 2024
bb4c83d
perf/x86/intel: Support Perfmon MSRs aliasing
Jun 26, 2024
3302873
perf/x86/intel: Add a distinct name for Granite Rapids
Jul 8, 2024
d20e846
perf/x86/intel/ds: Clarify adaptive PEBS processing
Nov 19, 2024
ccc7ce7
perf/x86/intel/ds: Factor out functions for PEBS records processing
Nov 19, 2024
16fbff6
perf/x86/intel/ds: Simplify the PEBS records processing for adaptive …
Nov 19, 2024
65e86f5
perf/x86/intel: Support RDPMC metrics clear mode
Dec 11, 2024
a924084
perf/x86/intel/uncore: Add Clearwater Forest support
Dec 11, 2024
f8a71b3
perf/x86/intel/ds: Add PEBS format 6
Dec 16, 2024
dfc67d2
perf/x86/intel/uncore: Clean up func_id
Jan 8, 2025
7c09abf
perf/x86/intel/uncore: Support more units on Granite Rapids
Jan 8, 2025
2f06677
perf/x86/intel: Support PEBS counters snapshotting
Jan 21, 2025
d5f3ad3
perf vendor events: Add Clearwaterforest events
captain5050 Feb 11, 2025
1b36948
perf: Extend per event callchain limit to branch stack
Mar 10, 2025
aa365f0
perf/x86: Add dynamic constraint
Mar 27, 2025
a15b4ab
perf/x86/intel: Track the num of events needs late setup
Mar 27, 2025
ffcb11d
perf: Extend the bit width of the arch-specific flag
Mar 27, 2025
9230dc2
perf/x86/intel: Add CPUID enumeration for the auto counter reload
Mar 27, 2025
ee27104
perf/x86/intel: Support auto counter reload
Mar 27, 2025
8a6a341
perf/x86/intel: Don't clear perf metrics overflow bit unconditionally
Apr 15, 2025
66848f1
perf/x86/intel: Add PMU support for Clearwater Forest
Apr 15, 2025
43d65a1
perf/x86/intel: Parse CPUID archPerfmonExt leaves for non-hybrid CPUs
Apr 15, 2025
ee90848
perf/x86/intel: Only check the group flag for X86 leader
Apr 24, 2025
df2ae3e
perf/x86/intel: Check the X86 leader for pebs_counter_event_group
Apr 24, 2025
22f2333
perf/x86/intel: Check the X86 leader for ACR group
Apr 24, 2025
a4aa95b
perf/x86: Optimize the is_x86_event
Apr 24, 2025
ccca95b
tools/include: Sync uapi/linux/perf.h with the kernel sources
namhyung Aug 6, 2024
e1777c5
tools headers: Update the uapi/linux/perf_event.h copy with the kerne…
namhyung Apr 10, 2025
3b3b0cb
perf/uapi: Clean up <uapi/linux/perf_event.h> a bit
May 22, 2025
d9ea82b
perf/x86/intel/uncore: Support MSR portal for discovery tables
Jul 7, 2025
0842307
perf/x86/intel/uncore: Support customized MMIO map size
Jul 7, 2025
0f54bf2
perf/x86/intel: Fix crash in icl_update_topdown_event()
Jul 24, 2025
a814092
perf/x86/intel: Fix segfault with PEBS-via-PT with sample_freq
ahunter6 May 8, 2025
0c1fc87
perf/x86/intel/ds: Fix counter backwards of non-precise events counte…
Apr 24, 2025
07ddac5
perf/x86/intel: Fix event constraints for LNC
Feb 19, 2025
2304bc0
perf/x86/intel: Fix bitmask of OCR and FRONTEND events for LNC
Dec 16, 2024
c425056
perf/x86/intel: Fix incorrect MSR index calculations in intel_pmu_con…
May 29, 2025
48eb0e3
tools/include: Sync x86 headers with the kernel sources
namhyung Aug 6, 2024
df05990
x86/msr: Standardize on u64 in <asm/msr-index.h>
Apr 9, 2025
8f23972
tools arch x86: Sync the msr-index.h copy with the kernel sources
acmel Jun 12, 2025
e077884
perf/x86/intel: Fix IA32_PMC_x_CFG_B MSRs access error
Aug 20, 2025
9b69ca5
KVM: x86/pmu: Add common define to capture fixed counters offset
sean-jc Nov 10, 2023
03961d2
KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu
Apr 30, 2024
d228425
KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros
Apr 30, 2024
6fe6d06
perf/x86/intel: Add ICL_FIXED_0_ADAPTIVE bit into INTEL_FIXED_BITS_MASK
Aug 20, 2025
f8f3aca
perf/x86: Print PMU counters bitmap in x86_pmu_show_pmu_cap()
Aug 20, 2025
1f9d89d
perf mem: Fix printing PERF_MEM_LVLNUM_{L2_MHB|MSC}
tlfalcon Sep 26, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions arch/x86/events/amd/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
* be removed on one CPU at a time AND PMU is disabled
* when we come here
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (cmpxchg(nb->owners + i, event, NULL) == event)
break;
}
Expand Down Expand Up @@ -484,7 +484,7 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
* because of successive calls to x86_schedule_events() from
* hw_perf_group_sched_in() without hw_perf_enable()
*/
for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
for_each_set_bit(idx, c->idxmsk, x86_pmu_max_num_counters(NULL)) {
if (new == -1 || hwc->idx == idx)
/* assign free slot, prefer hwc->idx */
old = cmpxchg(nb->owners + idx, NULL, event);
Expand Down Expand Up @@ -527,7 +527,7 @@ static struct amd_nb *amd_alloc_nb(int cpu)
/*
* initialize all possible NB constraints
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
__set_bit(i, nb->event_constraints[i].idxmsk);
nb->event_constraints[i].weight = 1;
}
Expand Down Expand Up @@ -720,7 +720,7 @@ static void amd_pmu_check_overflow(void)
* counters are always enabled when this function is called and
* ARCH_PERFMON_EVENTSEL_INT is always set.
*/
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask))
continue;

Expand All @@ -740,7 +740,7 @@ static void amd_pmu_enable_all(int added)

amd_brs_enable_all();

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
/* only activate events which are marked as active */
if (!test_bit(idx, cpuc->active_mask))
continue;
Expand Down Expand Up @@ -933,7 +933,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
/* Clear any reserved bits set by buggy microcode */
status &= amd_pmu_global_cntr_mask;

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask))
continue;

Expand Down Expand Up @@ -1273,7 +1273,7 @@ static __initconst const struct x86_pmu amd_pmu = {
.addr_offset = amd_pmu_addr_offset,
.event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
.num_counters = AMD64_NUM_COUNTERS,
.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS - 1, 0),
.add = amd_pmu_add_event,
.del = amd_pmu_del_event,
.cntval_bits = 48,
Expand Down Expand Up @@ -1372,7 +1372,7 @@ static int __init amd_core_pmu_init(void)
*/
x86_pmu.eventsel = MSR_F15H_PERF_CTL;
x86_pmu.perfctr = MSR_F15H_PERF_CTR;
x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
x86_pmu.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS_CORE - 1, 0);

/* Check for Performance Monitoring v2 support */
if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
Expand All @@ -1382,9 +1382,9 @@ static int __init amd_core_pmu_init(void)
x86_pmu.version = 2;

/* Find the number of available Core PMCs */
x86_pmu.num_counters = ebx.split.num_core_pmc;
x86_pmu.cntr_mask64 = GENMASK_ULL(ebx.split.num_core_pmc - 1, 0);

amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1;
amd_pmu_global_cntr_mask = x86_pmu.cntr_mask64;

/* Update PMC handling functions */
x86_pmu.enable_all = amd_pmu_v2_enable_all;
Expand Down Expand Up @@ -1412,12 +1412,12 @@ static int __init amd_core_pmu_init(void)
* even numbered counter that has a consecutive adjacent odd
* numbered counter following it.
*/
for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
for (i = 0; i < x86_pmu_max_num_counters(NULL) - 1; i += 2)
even_ctr_mask |= BIT_ULL(i);

pair_constraint = (struct event_constraint)
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,
x86_pmu.num_counters / 2, 0,
x86_pmu_max_num_counters(NULL) / 2, 0,
PERF_X86_EVENT_PAIR);

x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
Expand Down
Loading