Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
ce59662
KVM: x86: Advertise AVX-VNNI-INT8 CPUID to user space
Nov 25, 2022
0f64779
x86/cpu: Add model number for Intel Clearwater Forest processor
aegl Jan 17, 2024
eeb8a6b
x86: KVM: Advertise CPUIDs for new instructions in Clearwater Forest
taosu-linux Nov 5, 2024
dddc919
x86/cpu: Add model number for another Intel Arrow Lake mobile processor
aegl Mar 22, 2024
0022198
x86/cpu/vfm: Add/initialize x86_vfm field to struct cpuinfo_x86
aegl Apr 16, 2024
3f3718b
x86/cpu/vfm: Add new macros to work with (vendor/family/model) values
aegl Apr 16, 2024
15d8cfd
x86/cpu/vfm: Update arch/x86/include/asm/intel-family.h
aegl Apr 16, 2024
1e155a4
x86/cpu: Switch to new Intel CPU model defines
aegl May 20, 2024
eff6242
x86/cpu/intel: Switch to new Intel CPU model defines
aegl May 20, 2024
e424012
x86/cpu/intel: Drop stray FAM6 check with new Intel CPU model defines
andyhhp May 29, 2024
200184b
perf/x86/intel: Switch to new Intel CPU model defines
aegl May 20, 2024
a831a3b
perf/x86/intel: Use the common uarch name for the shared functions
Aug 29, 2023
1238f1f
perf/x86/intel: Factor out the initialization code for SPR
Aug 29, 2023
049ef50
perf/x86/intel: Factor out the initialization code for ADL e-core
Aug 29, 2023
eba54d3
perf/x86/intel: Apply the common initialization code for ADL
Aug 29, 2023
1c1b0b1
perf/x86/intel: Clean up the hybrid CPU type handling code
Aug 29, 2023
3d725f1
perf/x86/intel: Add common intel_pmu_init_hybrid()
Aug 29, 2023
b0ac021
perf/x86/intel: Fix broken fixed event constraints extension
Sep 11, 2023
8646ab0
perf/x86/intel: Support the PEBS event mask
Jun 26, 2024
b1e09df
perf/x86: Support counter mask
Jun 26, 2024
e81d46e
perf/x86: Add Lunar Lake and Arrow Lake support
Jun 26, 2024
0c795bb
perf/x86/intel: Rename model-specific pebs_latency_data functions
Jun 26, 2024
5a365e6
perf/x86/intel: Support new data source for Lunar Lake
Jun 26, 2024
e49b961
perf/x86: Add config_mask to represent EVENTSEL bitmask
Jun 26, 2024
d1ac0ad
perf/x86/intel: Support PERFEVTSEL extension
Jun 26, 2024
b909f36
perf/x86/intel: Support Perfmon MSRs aliasing
Jun 26, 2024
fc42512
perf/x86/intel: Add PMU support for Clearwater Forest
Apr 15, 2025
b73275c
perf/x86/intel: Parse CPUID archPerfmonExt leaves for non-hybrid CPUs
Apr 15, 2025
8addcf2
perf/x86/intel: Introduce pairs of PEBS static calls
Apr 15, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions arch/x86/events/amd/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,7 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
* be removed on one CPU at a time AND PMU is disabled
* when we come here
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (cmpxchg(nb->owners + i, event, NULL) == event)
break;
}
Expand Down Expand Up @@ -500,7 +500,7 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
* because of successive calls to x86_schedule_events() from
* hw_perf_group_sched_in() without hw_perf_enable()
*/
for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
for_each_set_bit(idx, c->idxmsk, x86_pmu_max_num_counters(NULL)) {
if (new == -1 || hwc->idx == idx)
/* assign free slot, prefer hwc->idx */
old = cmpxchg(nb->owners + idx, NULL, event);
Expand Down Expand Up @@ -543,7 +543,7 @@ static struct amd_nb *amd_alloc_nb(int cpu)
/*
* initialize all possible NB constraints
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
__set_bit(i, nb->event_constraints[i].idxmsk);
nb->event_constraints[i].weight = 1;
}
Expand Down Expand Up @@ -739,7 +739,7 @@ static void amd_pmu_check_overflow(void)
* counters are always enabled when this function is called and
* ARCH_PERFMON_EVENTSEL_INT is always set.
*/
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask))
continue;

Expand All @@ -760,7 +760,7 @@ static void amd_pmu_enable_all(int added)

amd_brs_enable_all();

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
hwc = &cpuc->events[idx]->hw;

/* only activate events which are marked as active */
Expand Down Expand Up @@ -954,7 +954,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
/* Clear any reserved bits set by buggy microcode */
status &= amd_pmu_global_cntr_mask;

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask))
continue;

Expand Down Expand Up @@ -1289,7 +1289,7 @@ static __initconst const struct x86_pmu amd_pmu = {
.addr_offset = amd_pmu_addr_offset,
.event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
.num_counters = AMD64_NUM_COUNTERS,
.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS - 1, 0),
.add = amd_pmu_add_event,
.del = amd_pmu_del_event,
.cntval_bits = 48,
Expand Down Expand Up @@ -1388,7 +1388,7 @@ static int __init amd_core_pmu_init(void)
*/
x86_pmu.eventsel = MSR_F15H_PERF_CTL;
x86_pmu.perfctr = MSR_F15H_PERF_CTR;
x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
x86_pmu.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS_CORE - 1, 0);

/* Check for Performance Monitoring v2 support */
if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
Expand All @@ -1398,9 +1398,9 @@ static int __init amd_core_pmu_init(void)
x86_pmu.version = 2;

/* Find the number of available Core PMCs */
x86_pmu.num_counters = ebx.split.num_core_pmc;
x86_pmu.cntr_mask64 = GENMASK_ULL(ebx.split.num_core_pmc - 1, 0);

amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1;
amd_pmu_global_cntr_mask = x86_pmu.cntr_mask64;

/* Update PMC handling functions */
x86_pmu.enable_all = amd_pmu_v2_enable_all;
Expand Down Expand Up @@ -1428,12 +1428,12 @@ static int __init amd_core_pmu_init(void)
* even numbered counter that has a consecutive adjacent odd
* numbered counter following it.
*/
for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
for (i = 0; i < x86_pmu_max_num_counters(NULL) - 1; i += 2)
even_ctr_mask |= BIT_ULL(i);

pair_constraint = (struct event_constraint)
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,
x86_pmu.num_counters / 2, 0,
x86_pmu_max_num_counters(NULL) / 2, 0,
PERF_X86_EVENT_PAIR);

x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
Expand Down
Loading