Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions Documentation/arch/x86/topology.rst
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ Package-related topology information in the kernel:
Modern systems use this value for the socket. There may be multiple
packages within a socket. This value may differ from topo.die_id.

- cpuinfo_x86.logical_proc_id:
- cpuinfo_x86.topo.logical_pkg_id:

The logical ID of the package. As we do not trust BIOSes to enumerate the
packages in a consistent way, we introduced the concept of logical package
Expand All @@ -79,9 +79,7 @@ Package-related topology information in the kernel:
The maximum possible number of packages in the system. Helpful for per
package facilities to preallocate per package information.

- cpu_llc_id:

A per-CPU variable containing:
- cpuinfo_x86.topo.llc_id:

- On Intel, the first APIC ID of the list of CPUs sharing the Last Level
Cache
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/events/amd/uncore.c
Original file line number Diff line number Diff line change
Expand Up @@ -775,7 +775,7 @@ void amd_uncore_l3_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
info.split.aux_data = 0;
info.split.num_pmcs = NUM_COUNTERS_L2;
info.split.gid = 0;
info.split.cid = get_llc_id(cpu);
info.split.cid = per_cpu_llc_id(cpu);

if (boot_cpu_data.x86 >= 0x17)
info.split.num_pmcs = NUM_COUNTERS_L3;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/events/intel/uncore.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ int uncore_device_to_die(struct pci_dev *dev)
struct cpuinfo_x86 *c = &cpu_data(cpu);

if (c->initialized && cpu_to_node(cpu) == node)
return c->logical_die_id;
return c->topo.logical_die_id;
}

return -1;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/hyperv/hv_vtl.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ static int hv_vtl_apicid_to_vp_id(u32 apic_id)
return ret;
}

static int hv_vtl_wakeup_secondary_cpu(int apicid, unsigned long start_eip)
static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip)
{
int vp_id, cpu;

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/hyperv/ivm.c
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
free_page((unsigned long)vmsa);
}

int hv_snp_boot_ap(int cpu, unsigned long start_ip)
int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
{
struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
__get_free_page(GFP_KERNEL | __GFP_ZERO);
Expand Down
39 changes: 14 additions & 25 deletions arch/x86/include/asm/apic.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ extern int local_apic_timer_c2_ok;
extern bool apic_is_disabled;
extern unsigned int lapic_timer_period;

extern int cpuid_to_apicid[];
extern u32 cpuid_to_apicid[];

extern enum apic_intr_mode_id apic_intr_mode;
enum apic_intr_mode_id {
Expand Down Expand Up @@ -294,19 +294,19 @@ struct apic {
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
bool (*apic_id_registered)(void);

bool (*check_apicid_used)(physid_mask_t *map, int apicid);
bool (*check_apicid_used)(physid_mask_t *map, u32 apicid);
void (*init_apic_ldr)(void);
void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
int (*cpu_present_to_apicid)(int mps_cpu);
int (*phys_pkg_id)(int cpuid_apic, int index_msb);
u32 (*cpu_present_to_apicid)(int mps_cpu);
u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);

u32 (*get_apic_id)(unsigned long x);
u32 (*set_apic_id)(unsigned int id);
u32 (*get_apic_id)(u32 id);
u32 (*set_apic_id)(u32 apicid);

/* wakeup_secondary_cpu */
int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip);
/* wakeup secondary CPU using 64-bit wakeup point */
int (*wakeup_secondary_cpu_64)(int apicid, unsigned long start_eip);
int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip);

char *name;
};
Expand All @@ -324,8 +324,8 @@ struct apic_override {
void (*send_IPI_self)(int vector);
u64 (*icr_read)(void);
void (*icr_write)(u32 low, u32 high);
int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
int (*wakeup_secondary_cpu_64)(int apicid, unsigned long start_eip);
int (*wakeup_secondary_cpu)(u32 apicid, unsigned long start_eip);
int (*wakeup_secondary_cpu_64)(u32 apicid, unsigned long start_eip);
};

/*
Expand Down Expand Up @@ -495,16 +495,6 @@ static inline bool lapic_vector_set_in_irr(unsigned int vector)
return !!(irr & (1U << (vector % 32)));
}

static inline unsigned default_get_apic_id(unsigned long x)
{
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));

if (APIC_XAPIC(ver) || boot_cpu_has(X86_FEATURE_EXTD_APICID))
return (x >> 24) & 0xFF;
else
return (x >> 24) & 0x0F;
}

/*
* Warm reset vector position:
*/
Expand All @@ -519,9 +509,9 @@ extern void generic_bigsmp_probe(void);

extern struct apic apic_noop;

static inline unsigned int read_apic_id(void)
static inline u32 read_apic_id(void)
{
unsigned int reg = apic_read(APIC_ID);
u32 reg = apic_read(APIC_ID);

return apic->get_apic_id(reg);
}
Expand All @@ -540,15 +530,14 @@ extern int default_apic_id_valid(u32 apicid);
extern u32 apic_default_calc_apicid(unsigned int cpu);
extern u32 apic_flat_calc_apicid(unsigned int cpu);

extern bool default_check_apicid_used(physid_mask_t *map, int apicid);
extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap);
extern int default_cpu_present_to_apicid(int mps_cpu);
extern u32 default_cpu_present_to_apicid(int mps_cpu);

void apic_send_nmi_to_offline_cpu(unsigned int cpu);

#else /* CONFIG_X86_LOCAL_APIC */

static inline unsigned int read_apic_id(void) { return 0; }
static inline u32 read_apic_id(void) { return 0; }

#endif /* !CONFIG_X86_LOCAL_APIC */

Expand Down
3 changes: 0 additions & 3 deletions arch/x86/include/asm/cacheinfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@ extern unsigned int memory_caching_control;
#define CACHE_MTRR 0x01
#define CACHE_PAT 0x02

void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);

void cache_disable(void);
void cache_enable(void);
void set_cache_aps_delayed_init(bool val);
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/mpspec.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ extern int mp_bus_id_to_type[MAX_MP_BUSSES];

extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);

extern unsigned int boot_cpu_physical_apicid;
extern u32 boot_cpu_physical_apicid;
extern u8 boot_cpu_apic_version;

#ifdef CONFIG_X86_LOCAL_APIC
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/mshyperv.h
Original file line number Diff line number Diff line change
Expand Up @@ -275,11 +275,11 @@ int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
#ifdef CONFIG_AMD_MEM_ENCRYPT
bool hv_ghcb_negotiate_protocol(void);
void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason);
int hv_snp_boot_ap(int cpu, unsigned long start_ip);
int hv_snp_boot_ap(u32 cpu, unsigned long start_ip);
#else
static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
static inline int hv_snp_boot_ap(int cpu, unsigned long start_ip) { return 0; }
static inline int hv_snp_boot_ap(u32 cpu, unsigned long start_ip) { return 0; }
#endif

#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
Expand Down
29 changes: 22 additions & 7 deletions arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,19 @@ struct cpuinfo_topology {
// AMD Node ID and Nodes per Package info
u32 amd_node_id;

// Compute unit ID - AMD specific
u32 cu_id;

// Core ID relative to the package
u32 core_id;

// Logical ID mappings
u32 logical_pkg_id;
u32 logical_die_id;

// Cache level topology IDs
u32 llc_id;
u32 l2c_id;
};

struct cpuinfo_x86 {
Expand Down Expand Up @@ -124,7 +137,6 @@ struct cpuinfo_x86 {
__u8 x86_phys_bits;
/* CPUID returned core id bits: */
__u8 x86_coreid_bits;
__u8 cu_id;
/* Max extended CPUID function supported: */
__u32 extended_cpuid_level;
/* Maximum supported CPUID level, -1=no CPUID: */
Expand Down Expand Up @@ -157,11 +169,6 @@ struct cpuinfo_x86 {
u16 x86_clflush_size;
/* number of cores as seen by the OS: */
u16 booted_cores;
/* Logical processor id: */
u16 logical_proc_id;
/* Core id: */
u16 cpu_core_id;
u16 logical_die_id;
/* Index into per_cpu list: */
u16 cpu_index;
/* Is SMT active on this core? */
Expand Down Expand Up @@ -703,7 +710,15 @@ extern int set_tsc_mode(unsigned int val);

DECLARE_PER_CPU(u64, msr_misc_features_shadow);

extern u16 get_llc_id(unsigned int cpu);
static inline u32 per_cpu_llc_id(unsigned int cpu)
{
return per_cpu(cpu_info.topo.llc_id, cpu);
}

static inline u32 per_cpu_l2c_id(unsigned int cpu)
{
return per_cpu(cpu_info.topo.l2c_id, cpu);
}

#ifdef CONFIG_CPU_SUP_AMD
extern u32 amd_get_nodes_per_socket(void);
Expand Down
4 changes: 1 addition & 3 deletions arch/x86/include/asm/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,8 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
/* cpus sharing the last level cache: */
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id);

DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);

struct task_struct;
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/include/asm/topology.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,19 +105,19 @@ static inline void setup_node_to_cpumask_map(void) { }
extern const struct cpumask *cpu_coregroup_mask(int cpu);
extern const struct cpumask *cpu_clustergroup_mask(int cpu);

#define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id)
#define topology_logical_package_id(cpu) (cpu_data(cpu).topo.logical_pkg_id)
#define topology_physical_package_id(cpu) (cpu_data(cpu).topo.pkg_id)
#define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id)
#define topology_logical_die_id(cpu) (cpu_data(cpu).topo.logical_die_id)
#define topology_die_id(cpu) (cpu_data(cpu).topo.die_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
#define topology_core_id(cpu) (cpu_data(cpu).topo.core_id)
#define topology_ppin(cpu) (cpu_data(cpu).ppin)

#define topology_amd_node_id(cpu) (cpu_data(cpu).topo.die_id)

extern unsigned int __max_die_per_package;

#ifdef CONFIG_SMP
#define topology_cluster_id(cpu) (per_cpu(cpu_l2c_id, cpu))
#define topology_cluster_id(cpu) (cpu_data(cpu).topo.l2c_id)
#define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
#define topology_cluster_cpumask(cpu) (cpu_clustergroup_mask(cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/acpi/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -799,7 +799,7 @@ int acpi_unmap_cpu(int cpu)
set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
#endif

per_cpu(x86_cpu_to_apicid, cpu) = -1;
per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
set_cpu_present(cpu, false);
num_processors--;

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/acpi/madt_wakeup.c
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ static int __init acpi_mp_setup_reset(u64 reset_vector)
return 0;
}

static int acpi_wakeup_cpu(int apicid, unsigned long start_ip)
static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip)
{
if (!acpi_mp_wake_mailbox_paddr) {
pr_warn_once("No MADT mailbox: cannot bringup secondary CPUs. Booting with kexec?\n");
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/amd_nb.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ int amd_get_subcaches(int cpu)

pci_read_config_dword(link, 0x1d4, &mask);

return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf;
}

int amd_set_subcaches(int cpu, unsigned long mask)
Expand All @@ -205,7 +205,7 @@ int amd_set_subcaches(int cpu, unsigned long mask)
pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
}

cuid = cpu_data(cpu).cpu_core_id;
cuid = cpu_data(cpu).topo.core_id;
mask <<= 4 * cuid;
mask |= (0xf ^ (1 << cuid)) << 26;

Expand Down
18 changes: 8 additions & 10 deletions arch/x86/kernel/apic/apic.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ unsigned int num_processors;
unsigned disabled_cpus;

/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid __ro_after_init = -1U;
u32 boot_cpu_physical_apicid __ro_after_init = BAD_APICID;
EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);

u8 boot_cpu_apic_version __ro_after_init;
Expand All @@ -87,7 +87,7 @@ physid_mask_t phys_cpu_present_map;
* disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
* avoid undefined behaviour caused by sending INIT from AP to BSP.
*/
static unsigned int disabled_cpu_apicid __ro_after_init = BAD_APICID;
static u32 disabled_cpu_apicid __ro_after_init = BAD_APICID;

/*
* This variable controls which CPUs receive external NMIs. By default,
Expand All @@ -111,7 +111,7 @@ static inline bool apic_accessible(void)
/*
* Map cpu index to physical APIC ID
*/
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
Expand Down Expand Up @@ -1777,7 +1777,7 @@ static void __x2apic_enable(void)
static int __init setup_nox2apic(char *str)
{
if (x2apic_enabled()) {
int apicid = native_apic_msr_read(APIC_ID);
u32 apicid = native_apic_msr_read(APIC_ID);

if (apicid >= 255) {
pr_warn("Apicid: %08x, cannot enforce nox2apic\n",
Expand Down Expand Up @@ -2337,13 +2337,11 @@ static int nr_logical_cpuids = 1;
/*
* Used to store mapping between logical CPU IDs and APIC IDs.
*/
int cpuid_to_apicid[] = {
[0 ... NR_CPUS - 1] = -1,
};
u32 cpuid_to_apicid[] = { [0 ... NR_CPUS - 1] = BAD_APICID, };

bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return phys_id == cpuid_to_apicid[cpu];
return phys_id == (u64)cpuid_to_apicid[cpu];
}

#ifdef CONFIG_SMP
Expand Down Expand Up @@ -2412,7 +2410,7 @@ static int allocate_logical_cpuid(int apicid)
return nr_logical_cpuids++;
}

static void cpu_update_apic(int cpu, int apicid)
static void cpu_update_apic(int cpu, u32 apicid)
{
#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
Expand Down Expand Up @@ -2565,7 +2563,7 @@ static struct {
*/
int active;
/* r/w apic fields */
unsigned int apic_id;
u32 apic_id;
unsigned int apic_taskpri;
unsigned int apic_ldr;
unsigned int apic_dfr;
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/apic/apic_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ u32 apic_flat_calc_apicid(unsigned int cpu)
return 1U << cpu;
}

bool default_check_apicid_used(physid_mask_t *map, int apicid)
bool default_check_apicid_used(physid_mask_t *map, u32 apicid)
{
return physid_isset(apicid, *map);
}
Expand All @@ -28,7 +28,7 @@ void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
*retmap = *phys_map;
}

int default_cpu_present_to_apicid(int mps_cpu)
u32 default_cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
return (int)per_cpu(x86_cpu_to_apicid, mps_cpu);
Expand Down
Loading