diff --git a/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst b/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst index 5ab3440e6cee0..4a36763cd1f3f 100644 --- a/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst +++ b/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst @@ -91,12 +91,22 @@ Attributes in each directory: ``domain_id`` This attribute is used to get the power domain id of this instance. +``die_id`` + This attribute is used to get the Linux die id of this instance. + This attribute is only present for domains with core agents and + when the CPUID leaf 0x1f presents die ID. + ``fabric_cluster_id`` This attribute is used to get the fabric cluster id of this instance. ``package_id`` This attribute is used to get the package id of this instance. +``agent_types`` + This attribute displays all the hardware agents present within the + domain. Each agent has the capability to control one or more hardware + subsystems, which include: core, cache, memory, and I/O. + The other attributes are same as presented at package_*_die_* level. In most of current use cases, the "max_freq_khz" and "min_freq_khz" diff --git a/config.x86_64 b/config.x86_64 index 6cd53619fa6c9..4c60c0e8e0e05 100644 --- a/config.x86_64 +++ b/config.x86_64 @@ -8196,6 +8196,7 @@ CONFIG_INTEL_OAKTRAIL=m CONFIG_INTEL_RST=m CONFIG_INTEL_SMARTCONNECT=m CONFIG_INTEL_TPMI=m +CONFIG_INTEL_PLR_TPMI=m # CONFIG_INTEL_TURBO_MAX_3 is not set CONFIG_INTEL_VSEC=m # CONFIG_MSI_EC is not set diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig index e9dc0c0210299..ad50bbabec615 100644 --- a/drivers/platform/x86/intel/Kconfig +++ b/drivers/platform/x86/intel/Kconfig @@ -192,10 +192,14 @@ config INTEL_SMARTCONNECT This driver checks to determine whether the device has Intel Smart Connect enabled, and if so disables it. +config INTEL_TPMI_POWER_DOMAINS + tristate + config INTEL_TPMI tristate "Intel Topology Aware Register and PM Capsule Interface (TPMI)" depends on INTEL_VSEC depends on X86_64 + select INTEL_TPMI_POWER_DOMAINS help The Intel Topology Aware Register and PM Capsule Interface (TPMI), provides enumerable MMIO interface for power management features. @@ -205,6 +209,13 @@ config INTEL_TPMI To compile this driver as a module, choose M here: the module will be called intel_vsec_tpmi. +config INTEL_PLR_TPMI + tristate "Intel SoC TPMI Power Limit Reasons driver" + depends on INTEL_TPMI + help + This driver provides the TPMI power limit reasons status information + via debugfs files. + config INTEL_TURBO_MAX_3 bool "Intel Turbo Boost Max Technology 3.0 enumeration driver" depends on X86_64 && SCHED_MC_PRIO diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile index c1d5fe05e3f30..74db065c82d61 100644 --- a/drivers/platform/x86/intel/Makefile +++ b/drivers/platform/x86/intel/Makefile @@ -52,6 +52,10 @@ obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o # TPMI drivers intel_vsec_tpmi-y := tpmi.o obj-$(CONFIG_INTEL_TPMI) += intel_vsec_tpmi.o +obj-$(CONFIG_INTEL_PLR_TPMI) += intel_plr_tpmi.o + +intel_tpmi_power_domains-y := tpmi_power_domains.o +obj-$(CONFIG_INTEL_TPMI_POWER_DOMAINS) += intel_tpmi_power_domains.o # Intel Uncore drivers intel-rst-y := rst.o diff --git a/drivers/platform/x86/intel/intel_plr_tpmi.c b/drivers/platform/x86/intel/intel_plr_tpmi.c new file mode 100644 index 0000000000000..69ace6a629bc7 --- /dev/null +++ b/drivers/platform/x86/intel/intel_plr_tpmi.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Performance Limit Reasons via TPMI + * + * Copyright (c) 2024, Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tpmi_power_domains.h" + +#define PLR_HEADER 0x00 +#define PLR_MAILBOX_INTERFACE 0x08 +#define PLR_MAILBOX_DATA 0x10 +#define PLR_DIE_LEVEL 0x18 + +#define PLR_MODULE_ID_MASK GENMASK_ULL(19, 12) +#define PLR_RUN_BUSY BIT_ULL(63) + +#define PLR_COMMAND_WRITE 1 + +#define PLR_INVALID GENMASK_ULL(63, 0) + +#define PLR_TIMEOUT_US 5 +#define PLR_TIMEOUT_MAX_US 1000 + +#define PLR_COARSE_REASON_BITS 32 + +struct tpmi_plr; + +struct tpmi_plr_die { + void __iomem *base; + struct mutex lock; /* Protect access to PLR mailbox */ + int package_id; + int die_id; + struct tpmi_plr *plr; +}; + +struct tpmi_plr { + struct dentry *dbgfs_dir; + struct tpmi_plr_die *die_info; + int num_dies; + struct auxiliary_device *auxdev; +}; + +static const char * const plr_coarse_reasons[] = { + "FREQUENCY", + "CURRENT", + "POWER", + "THERMAL", + "PLATFORM", + "MCP", + "RAS", + "MISC", + "QOS", + "DFC", +}; + +static const char * const plr_fine_reasons[] = { + "FREQUENCY_CDYN0", + "FREQUENCY_CDYN1", + "FREQUENCY_CDYN2", + "FREQUENCY_CDYN3", + "FREQUENCY_CDYN4", + "FREQUENCY_CDYN5", + "FREQUENCY_FCT", + "FREQUENCY_PCS_TRL", + "CURRENT_MTPMAX", + "POWER_FAST_RAPL", + "POWER_PKG_PL1_MSR_TPMI", + "POWER_PKG_PL1_MMIO", + "POWER_PKG_PL1_PCS", + "POWER_PKG_PL2_MSR_TPMI", + "POWER_PKG_PL2_MMIO", + "POWER_PKG_PL2_PCS", + "POWER_PLATFORM_PL1_MSR_TPMI", + "POWER_PLATFORM_PL1_MMIO", + "POWER_PLATFORM_PL1_PCS", + "POWER_PLATFORM_PL2_MSR_TPMI", + "POWER_PLATFORM_PL2_MMIO", + "POWER_PLATFORM_PL2_PCS", + "UNKNOWN(22)", + "THERMAL_PER_CORE", + "DFC_UFS", + "PLATFORM_PROCHOT", + "PLATFORM_HOT_VR", + "UNKNOWN(27)", + "UNKNOWN(28)", + "MISC_PCS_PSTATE", +}; + +static u64 plr_read(struct tpmi_plr_die *plr_die, int offset) +{ + return readq(plr_die->base + offset); +} + +static void plr_write(u64 val, struct tpmi_plr_die *plr_die, int offset) +{ + writeq(val, plr_die->base + offset); +} + +static int plr_read_cpu_status(struct tpmi_plr_die *plr_die, int cpu, + u64 *status) +{ + u64 regval; + int ret; + + lockdep_assert_held(&plr_die->lock); + + regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu)); + regval |= PLR_RUN_BUSY; + + plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE); + + ret = readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval, + !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US, + PLR_TIMEOUT_MAX_US); + if (ret) + return ret; + + *status = plr_read(plr_die, PLR_MAILBOX_DATA); + + return 0; +} + +static int plr_clear_cpu_status(struct tpmi_plr_die *plr_die, int cpu) +{ + u64 regval; + + lockdep_assert_held(&plr_die->lock); + + regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu)); + regval |= PLR_RUN_BUSY | PLR_COMMAND_WRITE; + + plr_write(0, plr_die, PLR_MAILBOX_DATA); + + plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE); + + return readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval, + !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US, + PLR_TIMEOUT_MAX_US); +} + +static void plr_print_bits(struct seq_file *s, u64 val, int bits) +{ + const unsigned long mask[] = { BITMAP_FROM_U64(val) }; + int bit, index; + + for_each_set_bit(bit, mask, bits) { + const char *str = NULL; + + if (bit < PLR_COARSE_REASON_BITS) { + if (bit < ARRAY_SIZE(plr_coarse_reasons)) + str = plr_coarse_reasons[bit]; + } else { + index = bit - PLR_COARSE_REASON_BITS; + if (index < ARRAY_SIZE(plr_fine_reasons)) + str = plr_fine_reasons[index]; + } + + if (str) + seq_printf(s, " %s", str); + else + seq_printf(s, " UNKNOWN(%d)", bit); + } + + if (!val) + seq_puts(s, " none"); + + seq_putc(s, '\n'); +} + +static int plr_status_show(struct seq_file *s, void *unused) +{ + struct tpmi_plr_die *plr_die = s->private; + int ret; + u64 val; + + val = plr_read(plr_die, PLR_DIE_LEVEL); + seq_puts(s, "cpus"); + plr_print_bits(s, val, 32); + + guard(mutex)(&plr_die->lock); + + for (int cpu = 0; cpu < nr_cpu_ids; cpu++) { + if (plr_die->die_id != tpmi_get_power_domain_id(cpu)) + continue; + + if (plr_die->package_id != topology_physical_package_id(cpu)) + continue; + + seq_printf(s, "cpu%d", cpu); + ret = plr_read_cpu_status(plr_die, cpu, &val); + if (ret) { + dev_err(&plr_die->plr->auxdev->dev, "Failed to read PLR for cpu %d, ret=%d\n", + cpu, ret); + return ret; + } + + plr_print_bits(s, val, 64); + } + + return 0; +} + +static ssize_t plr_status_write(struct file *filp, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = filp->private_data; + struct tpmi_plr_die *plr_die = s->private; + bool val; + int ret; + + ret = kstrtobool_from_user(ubuf, count, &val); + if (ret) + return ret; + + if (val != 0) + return -EINVAL; + + plr_write(0, plr_die, PLR_DIE_LEVEL); + + guard(mutex)(&plr_die->lock); + + for (int cpu = 0; cpu < nr_cpu_ids; cpu++) { + if (plr_die->die_id != tpmi_get_power_domain_id(cpu)) + continue; + + if (plr_die->package_id != topology_physical_package_id(cpu)) + continue; + + plr_clear_cpu_status(plr_die, cpu); + } + + return count; +} +DEFINE_SHOW_STORE_ATTRIBUTE(plr_status); + +static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) +{ + struct intel_tpmi_plat_info *plat_info; + struct dentry *dentry; + int i, num_resources; + struct resource *res; + struct tpmi_plr *plr; + void __iomem *base; + char name[16]; + int err; + + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) + return dev_err_probe(&auxdev->dev, -EINVAL, "No platform info\n"); + + dentry = tpmi_get_debugfs_dir(auxdev); + if (!dentry) + return dev_err_probe(&auxdev->dev, -ENODEV, "No TPMI debugfs directory.\n"); + + num_resources = tpmi_get_resource_count(auxdev); + if (!num_resources) + return -EINVAL; + + plr = devm_kzalloc(&auxdev->dev, sizeof(*plr), GFP_KERNEL); + if (!plr) + return -ENOMEM; + + plr->die_info = devm_kcalloc(&auxdev->dev, num_resources, sizeof(*plr->die_info), + GFP_KERNEL); + if (!plr->die_info) + return -ENOMEM; + + plr->num_dies = num_resources; + plr->dbgfs_dir = debugfs_create_dir("plr", dentry); + plr->auxdev = auxdev; + + for (i = 0; i < num_resources; i++) { + res = tpmi_get_resource_at_index(auxdev, i); + if (!res) { + err = dev_err_probe(&auxdev->dev, -EINVAL, "No resource\n"); + goto err; + } + + base = devm_ioremap_resource(&auxdev->dev, res); + if (IS_ERR(base)) { + err = PTR_ERR(base); + goto err; + } + + plr->die_info[i].base = base; + plr->die_info[i].package_id = plat_info->package_id; + plr->die_info[i].die_id = i; + plr->die_info[i].plr = plr; + mutex_init(&plr->die_info[i].lock); + + if (plr_read(&plr->die_info[i], PLR_HEADER) == PLR_INVALID) + continue; + + snprintf(name, sizeof(name), "domain%d", i); + + dentry = debugfs_create_dir(name, plr->dbgfs_dir); + debugfs_create_file("status", 0444, dentry, &plr->die_info[i], + &plr_status_fops); + } + + auxiliary_set_drvdata(auxdev, plr); + + return 0; + +err: + debugfs_remove_recursive(plr->dbgfs_dir); + return err; +} + +static void intel_plr_remove(struct auxiliary_device *auxdev) +{ + struct tpmi_plr *plr = auxiliary_get_drvdata(auxdev); + + debugfs_remove_recursive(plr->dbgfs_dir); +} + +static const struct auxiliary_device_id intel_plr_id_table[] = { + { .name = "intel_vsec.tpmi-plr" }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, intel_plr_id_table); + +static struct auxiliary_driver intel_plr_aux_driver = { + .id_table = intel_plr_id_table, + .remove = intel_plr_remove, + .probe = intel_plr_probe, +}; +module_auxiliary_driver(intel_plr_aux_driver); + +MODULE_IMPORT_NS(INTEL_TPMI); +MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN); +MODULE_DESCRIPTION("Intel TPMI PLR Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index 060f9b86bc030..a3430abeb9869 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -357,6 +357,15 @@ int tpmi_get_feature_status(struct auxiliary_device *auxdev, } EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI); +struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev) +{ + struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent); + struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev); + + return tpmi_info->dbgfs_dir; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_debugfs_dir, INTEL_TPMI); + static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused) { struct intel_tpmi_info *tpmi_info = s->private; @@ -576,6 +585,8 @@ static const char *intel_tpmi_name(enum intel_tpmi_id id) return "uncore"; case TPMI_ID_SST: return "sst"; + case TPMI_ID_PLR: + return "plr"; default: return NULL; } diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c new file mode 100644 index 0000000000000..1fc15a58657fd --- /dev/null +++ b/drivers/platform/x86/intel/tpmi_power_domains.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Mapping of TPMI power domains CPU mapping + * + * Copyright (c) 2024, Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "tpmi_power_domains.h" + +#define MSR_PM_LOGICAL_ID 0x54 + +/* + * Struct of MSR 0x54 + * [15:11] PM_DOMAIN_ID + * [10:3] MODULE_ID (aka IDI_AGENT_ID) + * [2:0] LP_ID + * For Atom: + * [2] Always 0 + * [1:0] core ID within module + * For Core + * [2:1] Always 0 + * [0] thread ID + */ + +#define LP_ID_MASK GENMASK_ULL(2, 0) +#define MODULE_ID_MASK GENMASK_ULL(10, 3) +#define PM_DOMAIN_ID_MASK GENMASK_ULL(15, 11) + +/** + * struct tpmi_cpu_info - Mapping information for a CPU + * @hnode: Used to add mapping information to hash list + * @linux_cpu: Linux CPU number + * @pkg_id: Package ID of this CPU + * @punit_thread_id: Punit thread id of this CPU + * @punit_core_id: Punit core id + * @punit_domain_id: Power domain id from Punit + * + * Structure to store mapping information for a Linux CPU + * to a Punit core, thread and power domain. + */ +struct tpmi_cpu_info { + struct hlist_node hnode; + int linux_cpu; + u8 pkg_id; + u8 punit_thread_id; + u8 punit_core_id; + u8 punit_domain_id; +}; + +static DEFINE_PER_CPU(struct tpmi_cpu_info, tpmi_cpu_info); + +/* The dynamically assigned cpu hotplug state to free later */ +static enum cpuhp_state tpmi_hp_state __read_mostly; + +#define MAX_POWER_DOMAINS 8 + +static cpumask_t *tpmi_power_domain_mask; + +static u16 *domain_die_map; + +/* Lock to protect tpmi_power_domain_mask and tpmi_cpu_hash */ +static DEFINE_MUTEX(tpmi_lock); + +static const struct x86_cpu_id tpmi_cpu_ids[] = { + X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, NULL), + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, NULL), + X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, NULL), + X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, NULL), + X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, tpmi_cpu_ids); + +static DECLARE_HASHTABLE(tpmi_cpu_hash, 8); + +static bool tpmi_domain_is_valid(struct tpmi_cpu_info *info) +{ + return info->pkg_id < topology_max_packages() && + info->punit_domain_id < MAX_POWER_DOMAINS; +} + +int tpmi_get_linux_cpu_number(int package_id, int domain_id, int punit_core_id) +{ + struct tpmi_cpu_info *info; + int ret = -EINVAL; + + guard(mutex)(&tpmi_lock); + hash_for_each_possible(tpmi_cpu_hash, info, hnode, punit_core_id) { + if (info->punit_domain_id == domain_id && info->pkg_id == package_id) { + ret = info->linux_cpu; + break; + } + } + + return ret; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_linux_cpu_number, INTEL_TPMI_POWER_DOMAIN); + +int tpmi_get_punit_core_number(int cpu_no) +{ + if (cpu_no >= num_possible_cpus()) + return -EINVAL; + + return per_cpu(tpmi_cpu_info, cpu_no).punit_core_id; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_punit_core_number, INTEL_TPMI_POWER_DOMAIN); + +int tpmi_get_power_domain_id(int cpu_no) +{ + if (cpu_no >= num_possible_cpus()) + return -EINVAL; + + return per_cpu(tpmi_cpu_info, cpu_no).punit_domain_id; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_power_domain_id, INTEL_TPMI_POWER_DOMAIN); + +cpumask_t *tpmi_get_power_domain_mask(int cpu_no) +{ + struct tpmi_cpu_info *info; + cpumask_t *mask; + int index; + + if (cpu_no >= num_possible_cpus()) + return NULL; + + info = &per_cpu(tpmi_cpu_info, cpu_no); + if (!tpmi_domain_is_valid(info)) + return NULL; + + index = info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id; + guard(mutex)(&tpmi_lock); + mask = &tpmi_power_domain_mask[index]; + + return mask; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_power_domain_mask, INTEL_TPMI_POWER_DOMAIN); + +int tpmi_get_linux_die_id(int pkg_id, int domain_id) +{ + if (pkg_id >= topology_max_packages() || domain_id >= MAX_POWER_DOMAINS) + return -EINVAL; + + return domain_die_map[pkg_id * MAX_POWER_DOMAINS + domain_id]; +} +EXPORT_SYMBOL_NS_GPL(tpmi_get_linux_die_id, INTEL_TPMI_POWER_DOMAIN); + +static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info) +{ + u64 data; + int ret; + + ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); + if (ret) + return ret; + + info->punit_domain_id = FIELD_GET(PM_DOMAIN_ID_MASK, data); + if (info->punit_domain_id >= MAX_POWER_DOMAINS) + return -EINVAL; + + info->punit_thread_id = FIELD_GET(LP_ID_MASK, data); + info->punit_core_id = FIELD_GET(MODULE_ID_MASK, data); + info->pkg_id = topology_logical_package_id(cpu); + info->linux_cpu = cpu; + + return 0; +} + +static int tpmi_cpu_online(unsigned int cpu) +{ + struct tpmi_cpu_info *info = &per_cpu(tpmi_cpu_info, cpu); + int ret, index; + + /* Don't fail CPU online for some bad mapping of CPUs */ + ret = tpmi_get_logical_id(cpu, info); + if (ret) + return 0; + + index = info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id; + + guard(mutex)(&tpmi_lock); + cpumask_set_cpu(cpu, &tpmi_power_domain_mask[index]); + hash_add(tpmi_cpu_hash, &info->hnode, info->punit_core_id); + + domain_die_map[info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id] = + topology_die_id(cpu); + + return 0; +} + +static int __init tpmi_init(void) +{ + const struct x86_cpu_id *id; + u64 data; + int ret; + + id = x86_match_cpu(tpmi_cpu_ids); + if (!id) + return -ENODEV; + + /* Check for MSR 0x54 presence */ + ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); + if (ret) + return ret; + + tpmi_power_domain_mask = kcalloc(size_mul(topology_max_packages(), MAX_POWER_DOMAINS), + sizeof(*tpmi_power_domain_mask), GFP_KERNEL); + if (!tpmi_power_domain_mask) + return -ENOMEM; + + domain_die_map = kcalloc(size_mul(topology_max_packages(), MAX_POWER_DOMAINS), + sizeof(*domain_die_map), GFP_KERNEL); + if (!domain_die_map) { + ret = -ENOMEM; + goto free_domain_mask; + } + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "platform/x86/tpmi_power_domains:online", + tpmi_cpu_online, NULL); + if (ret < 0) + goto free_domain_map; + + tpmi_hp_state = ret; + + return 0; + +free_domain_map: + kfree(domain_die_map); + +free_domain_mask: + kfree(tpmi_power_domain_mask); + + return ret; +} +module_init(tpmi_init) + +static void __exit tpmi_exit(void) +{ + cpuhp_remove_state(tpmi_hp_state); + kfree(tpmi_power_domain_mask); + kfree(domain_die_map); +} +module_exit(tpmi_exit) + +MODULE_DESCRIPTION("TPMI Power Domains Mapping"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/tpmi_power_domains.h b/drivers/platform/x86/intel/tpmi_power_domains.h new file mode 100644 index 0000000000000..2fd0dd7afbd22 --- /dev/null +++ b/drivers/platform/x86/intel/tpmi_power_domains.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Mapping of TPMI power domain and CPUs + * + * Copyright (c) 2024, Intel Corporation. + */ + +#ifndef _TPMI_POWER_DOMAINS_H_ +#define _TPMI_POWER_DOMAINS_H_ + +#include + +int tpmi_get_linux_cpu_number(int package_id, int die_id, int punit_core_id); +int tpmi_get_punit_core_number(int cpu_no); +int tpmi_get_power_domain_id(int cpu_no); +cpumask_t *tpmi_get_power_domain_mask(int cpu_no); +int tpmi_get_linux_die_id(int pkg_id, int domain_id); + +#endif diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c index 4e880585cbe4d..577da9867b213 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c @@ -43,6 +43,29 @@ static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr return sprintf(buf, "%u\n", data->package_id); } +#define MAX_UNCORE_AGENT_TYPES 4 + +/* The order follows AGENT_TYPE_* defines */ +static const char *agent_name[MAX_UNCORE_AGENT_TYPES] = {"core", "cache", "memory", "io"}; + +static ssize_t show_agent_types(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct uncore_data *data = container_of(attr, struct uncore_data, agent_types_kobj_attr); + unsigned long agent_mask = data->agent_type_mask; + int agent, length = 0; + + for_each_set_bit(agent, &agent_mask, MAX_UNCORE_AGENT_TYPES) { + if (length) + length += sysfs_emit_at(buf, length, " "); + + length += sysfs_emit_at(buf, length, "%s", agent_name[agent]); + } + + length += sysfs_emit_at(buf, length, "\n"); + + return length; +} + static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index index) { unsigned int value; @@ -60,11 +83,16 @@ static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index static ssize_t store_attr(struct uncore_data *data, const char *buf, ssize_t count, enum uncore_index index) { - unsigned int input; + unsigned int input = 0; int ret; - if (kstrtouint(buf, 10, &input)) - return -EINVAL; + if (index == UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE) { + if (kstrtobool(buf, (bool *)&input)) + return -EINVAL; + } else { + if (kstrtouint(buf, 10, &input)) + return -EINVAL; + } mutex_lock(&uncore_lock); ret = uncore_write(data, input, index); @@ -103,6 +131,20 @@ show_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ); show_uncore_attr(current_freq_khz, UNCORE_INDEX_CURRENT_FREQ); +store_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); +store_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD); +store_uncore_attr(elc_high_threshold_enable, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE); +store_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ); + +show_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); +show_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD); +show_uncore_attr(elc_high_threshold_enable, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE); +show_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ); + +show_uncore_attr(die_id, UNCORE_INDEX_DIE_ID); + #define show_uncore_data(member_name) \ static ssize_t show_##member_name(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf)\ @@ -146,7 +188,8 @@ show_uncore_data(initial_max_freq_khz); static int create_attr_group(struct uncore_data *data, char *name) { - int ret, freq, index = 0; + int ret, index = 0; + unsigned int val; init_attribute_rw(max_freq_khz); init_attribute_rw(min_freq_khz); @@ -161,6 +204,15 @@ static int create_attr_group(struct uncore_data *data, char *name) data->uncore_attrs[index++] = &data->fabric_cluster_id_kobj_attr.attr; init_attribute_root_ro(package_id); data->uncore_attrs[index++] = &data->package_id_kobj_attr.attr; + if (data->agent_type_mask) { + init_attribute_ro(agent_types); + data->uncore_attrs[index++] = &data->agent_types_kobj_attr.attr; + } + if (topology_max_die_per_package() > 1 && + data->agent_type_mask & AGENT_TYPE_CORE) { + init_attribute_ro(die_id); + data->uncore_attrs[index++] = &data->die_id_kobj_attr.attr; + } } data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr; @@ -168,10 +220,24 @@ static int create_attr_group(struct uncore_data *data, char *name) data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr; data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr; - ret = uncore_read(data, &freq, UNCORE_INDEX_CURRENT_FREQ); + ret = uncore_read(data, &val, UNCORE_INDEX_CURRENT_FREQ); if (!ret) data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr; + ret = uncore_read(data, &val, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); + if (!ret) { + init_attribute_rw(elc_low_threshold_percent); + init_attribute_rw(elc_high_threshold_percent); + init_attribute_rw(elc_high_threshold_enable); + init_attribute_rw(elc_floor_freq_khz); + + data->uncore_attrs[index++] = &data->elc_low_threshold_percent_kobj_attr.attr; + data->uncore_attrs[index++] = &data->elc_high_threshold_percent_kobj_attr.attr; + data->uncore_attrs[index++] = + &data->elc_high_threshold_enable_kobj_attr.attr; + data->uncore_attrs[index++] = &data->elc_floor_freq_khz_kobj_attr.attr; + } + data->uncore_attrs[index] = NULL; data->uncore_attr_group.name = name; diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h index 4c245b945e4ea..0abe850ef54ea 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h @@ -11,6 +11,18 @@ #include +/* + * Define uncore agents, which are under uncore frequency control. + * Defined in the same order as specified in the TPMI UFS Specifications. + * It is possible that there are common uncore frequency control to more than + * one hardware agents. So, these defines are used as a bit mask. +*/ + +#define AGENT_TYPE_CORE 0x01 +#define AGENT_TYPE_CACHE 0x02 +#define AGENT_TYPE_MEMORY 0x04 +#define AGENT_TYPE_IO 0x08 + /** * struct uncore_data - Encapsulate all uncore data * @stored_uncore_data: Last user changed MSR 620 value, which will be restored @@ -25,15 +37,25 @@ * @cluster_id: cluster id in a domain * @instance_id: Unique instance id to append to directory name * @name: Sysfs entry name for this instance + * @agent_type_mask: Bit mask of all hardware agents for this domain * @uncore_attr_group: Attribute group storage * @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz - * @mix_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz + * @min_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz * @initial_max_freq_khz_kobj_attr: Storage for kobject attribute initial_max_freq_khz * @initial_min_freq_khz_kobj_attr: Storage for kobject attribute initial_min_freq_khz * @current_freq_khz_kobj_attr: Storage for kobject attribute current_freq_khz * @domain_id_kobj_attr: Storage for kobject attribute domain_id * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id * @package_id_kobj_attr: Storage for kobject attribute package_id + * @elc_low_threshold_percent_kobj_attr: + * Storage for kobject attribute elc_low_threshold_percent + * @elc_high_threshold_percent_kobj_attr: + * Storage for kobject attribute elc_high_threshold_percent + * @elc_high_threshold_enable_kobj_attr: + * Storage for kobject attribute elc_high_threshold_enable + * @elc_floor_freq_khz_kobj_attr: Storage for kobject attribute elc_floor_freq_khz + * @agent_types_kobj_attr: Storage for kobject attribute agent_type + * @die_id_kobj_attr: Attribute storage for die_id information * @uncore_attrs: Attribute storage for group creation * * This structure is used to encapsulate all data related to uncore sysfs @@ -51,6 +73,7 @@ struct uncore_data { int cluster_id; int instance_id; char name[32]; + u16 agent_type_mask; struct attribute_group uncore_attr_group; struct kobj_attribute max_freq_khz_kobj_attr; @@ -61,7 +84,13 @@ struct uncore_data { struct kobj_attribute domain_id_kobj_attr; struct kobj_attribute fabric_cluster_id_kobj_attr; struct kobj_attribute package_id_kobj_attr; - struct attribute *uncore_attrs[9]; + struct kobj_attribute elc_low_threshold_percent_kobj_attr; + struct kobj_attribute elc_high_threshold_percent_kobj_attr; + struct kobj_attribute elc_high_threshold_enable_kobj_attr; + struct kobj_attribute elc_floor_freq_khz_kobj_attr; + struct kobj_attribute agent_types_kobj_attr; + struct kobj_attribute die_id_kobj_attr; + struct attribute *uncore_attrs[15]; }; #define UNCORE_DOMAIN_ID_INVALID -1 @@ -70,6 +99,11 @@ enum uncore_index { UNCORE_INDEX_MIN_FREQ, UNCORE_INDEX_MAX_FREQ, UNCORE_INDEX_CURRENT_FREQ, + UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, + UNCORE_INDEX_EFF_LAT_CTRL_FREQ, + UNCORE_INDEX_DIE_ID, }; int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value, diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index cd48bd4bf6461..b03dbf5695b0f 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -26,10 +26,12 @@ #include #include +#include "../tpmi_power_domains.h" #include "uncore-frequency-common.h" #define UNCORE_MAJOR_VERSION 0 #define UNCORE_MINOR_VERSION 2 +#define UNCORE_ELC_SUPPORTED_VERSION 2 #define UNCORE_HEADER_INDEX 0 #define UNCORE_FABRIC_CLUSTER_OFFSET 8 @@ -46,7 +48,9 @@ struct tpmi_uncore_struct; /* Information for each cluster */ struct tpmi_uncore_cluster_info { bool root_domain; + bool elc_supported; u8 __iomem *cluster_base; + u16 cdie_id; struct uncore_data uncore_data; struct tpmi_uncore_struct *uncore_root; }; @@ -75,6 +79,10 @@ struct tpmi_uncore_struct { /* Bit definitions for CONTROL register */ #define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8) #define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15) +#define UNCORE_EFF_LAT_CTRL_RATIO_MASK GENMASK_ULL(28, 22) +#define UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK GENMASK_ULL(38, 32) +#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE BIT(39) +#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK GENMASK_ULL(46, 40) /* Helper function to read MMIO offset for max/min control frequency */ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, @@ -89,6 +97,48 @@ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; } +/* Helper function to read efficiency latency control values over MMIO */ +static int read_eff_lat_ctrl(struct uncore_data *data, unsigned int *val, enum uncore_index index) +{ + struct tpmi_uncore_cluster_info *cluster_info; + u64 ctrl; + + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + if (cluster_info->root_domain) + return -ENODATA; + + if (!cluster_info->elc_supported) + return -EOPNOTSUPP; + + ctrl = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, ctrl); + *val *= 100; + *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK)); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, ctrl); + *val *= 100; + *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK)); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, ctrl); + break; + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_RATIO_MASK, ctrl) * UNCORE_FREQ_KHZ_MULTIPLIER; + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + #define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK) /* Helper for sysfs read for max/min frequencies. Called under mutex locks */ @@ -137,6 +187,82 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu return 0; } +/* Helper function for writing efficiency latency control values over MMIO */ +static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index) +{ + struct tpmi_uncore_cluster_info *cluster_info; + u64 control; + + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + + if (cluster_info->root_domain) + return -ENODATA; + + if (!cluster_info->elc_supported) + return -EOPNOTSUPP; + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + if (val > 100) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + if (val > 100) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + if (val > 1) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + val /= UNCORE_FREQ_KHZ_MULTIPLIER; + if (val > FIELD_MAX(UNCORE_EFF_LAT_CTRL_RATIO_MASK)) + return -EINVAL; + break; + + default: + return -EOPNOTSUPP; + } + + control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK); + val /= 100; + control &= ~UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK); + val /= 100; + control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + control &= ~UNCORE_EFF_LAT_CTRL_RATIO_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_RATIO_MASK, val); + break; + + default: + break; + } + + writeq(control, cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + return 0; +} + /* Helper function to write MMIO offset for max/min control frequency */ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input, unsigned int index) @@ -156,7 +282,7 @@ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, un writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX)); } -/* Callback for sysfs write for max/min frequencies. Called under mutex locks */ +/* Helper for sysfs write for max/min frequencies. Called under mutex locks */ static int uncore_write_control_freq(struct uncore_data *data, unsigned int input, enum uncore_index index) { @@ -223,9 +349,102 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) return 0; } +/* + * Agent types as per the TPMI UFS Specification for UFS_STATUS + * Agent Type - Core Bit: 23 + * Agent Type - Cache Bit: 24 + * Agent Type - Memory Bit: 25 + * Agent Type - IO Bit: 26 + */ + +#define UNCORE_AGENT_TYPES GENMASK_ULL(26, 23) + +/* Helper function to read agent type over MMIO and set the agent type mask */ +static void uncore_set_agent_type(struct tpmi_uncore_cluster_info *cluster_info) +{ + u64 status; + + status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX); + cluster_info->uncore_data.agent_type_mask = FIELD_GET(UNCORE_AGENT_TYPES, status); +} + +#define MAX_PARTITIONS 2 + +/* IO domain ID start index for a partition */ +static u8 io_die_start[MAX_PARTITIONS]; + +/* Next IO domain ID index after the current partition IO die IDs */ +static u8 io_die_index_next; + +/* Lock to protect io_die_start, io_die_index_next */ +static DEFINE_MUTEX(domain_lock); + +static void set_domain_id(int id, int num_resources, + struct intel_tpmi_plat_info *plat_info, + struct tpmi_uncore_cluster_info *cluster_info) +{ + u8 part_io_index, cdie_range, pkg_io_index, max_dies; + + if (plat_info->partition >= MAX_PARTITIONS) { + cluster_info->uncore_data.domain_id = id; + return; + } + + if (cluster_info->uncore_data.agent_type_mask & AGENT_TYPE_CORE) { + cluster_info->uncore_data.domain_id = cluster_info->cdie_id; + return; + } + + /* Unlikely but cdie_mask may have holes, so take range */ + cdie_range = fls(plat_info->cdie_mask) - ffs(plat_info->cdie_mask) + 1; + max_dies = topology_max_die_per_package(); + + /* + * If the CPU doesn't enumerate dies, then use current cdie range + * as the max. + */ + if (cdie_range > max_dies) + max_dies = cdie_range; + + guard(mutex)(&domain_lock); + + if (!io_die_index_next) + io_die_index_next = max_dies; + + if (!io_die_start[plat_info->partition]) { + io_die_start[plat_info->partition] = io_die_index_next; + /* + * number of IO dies = num_resources - cdie_range. Hence + * next partition io_die_index_next is set after IO dies + * in the current partition. + */ + io_die_index_next += (num_resources - cdie_range); + } + + /* + * Index from IO die start within the partition: + * This is the first valid domain after the cdies. + * For example the current resource index 5 and cdies end at + * index 3 (cdie_cnt = 4). Then the IO only index 5 - 4 = 1. + */ + part_io_index = id - cdie_range; + + /* + * Add to the IO die start index for this partition in this package + * to make unique in the package. + */ + pkg_io_index = io_die_start[plat_info->partition] + part_io_index; + + /* Assign this to domain ID */ + cluster_info->uncore_data.domain_id = pkg_io_index; +} + /* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */ static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index) { + struct tpmi_uncore_cluster_info *cluster_info; + int ret; + switch (index) { case UNCORE_INDEX_MIN_FREQ: case UNCORE_INDEX_MAX_FREQ: @@ -234,6 +453,43 @@ static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncor case UNCORE_INDEX_CURRENT_FREQ: return uncore_read_freq(data, value); + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + return read_eff_lat_ctrl(data, value, index); + + case UNCORE_INDEX_DIE_ID: + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + ret = tpmi_get_linux_die_id(cluster_info->uncore_data.package_id, + cluster_info->cdie_id); + if (ret < 0) + return ret; + + *value = ret; + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +/* Callback for sysfs write for TPMI uncore data. Called under mutex locks. */ +static int uncore_write(struct uncore_data *data, unsigned int value, enum uncore_index index) +{ + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + return write_eff_lat_ctrl(data, value, index); + + case UNCORE_INDEX_MIN_FREQ: + case UNCORE_INDEX_MAX_FREQ: + return uncore_write_control_freq(data, value, index); + default: break; } @@ -262,6 +518,16 @@ static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore) } } +static void set_cdie_id(int domain_id, struct tpmi_uncore_cluster_info *cluster_info, + struct intel_tpmi_plat_info *plat_info) +{ + + cluster_info->cdie_id = domain_id; + + if (plat_info->cdie_mask && cluster_info->uncore_data.agent_type_mask & AGENT_TYPE_CORE) + cluster_info->cdie_id = domain_id + ffs(plat_info->cdie_mask) - 1; +} + #define UNCORE_VERSION_MASK GENMASK_ULL(7, 0) #define UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK GENMASK_ULL(15, 8) #define UNCORE_CLUSTER_OFF_MASK GENMASK_ULL(7, 0) @@ -291,7 +557,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ return -EINVAL; /* Register callbacks to uncore core */ - ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq); + ret = uncore_freq_common_init(uncore_read, uncore_write); if (ret) return ret; @@ -404,14 +670,22 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ cluster_info->cluster_base = pd_info->uncore_base + mask; + uncore_set_agent_type(cluster_info); + cluster_info->uncore_data.package_id = pkg; /* There are no dies like Cascade Lake */ cluster_info->uncore_data.die_id = 0; - cluster_info->uncore_data.domain_id = i; cluster_info->uncore_data.cluster_id = j; + set_cdie_id(i, cluster_info, plat_info); + + set_domain_id(i, num_resources, plat_info, cluster_info); + cluster_info->uncore_root = tpmi_uncore; + if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION) + cluster_info->elc_supported = true; + ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0); if (ret) { cluster_info->cluster_base = NULL; @@ -430,6 +704,9 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ auxiliary_set_drvdata(auxdev, tpmi_uncore); + if (topology_max_die_per_package() > 1 || plat_info->partition) + return 0; + tpmi_uncore->root_cluster.root_domain = true; tpmi_uncore->root_cluster.uncore_root = tpmi_uncore; @@ -453,7 +730,9 @@ static void uncore_remove(struct auxiliary_device *auxdev) { struct tpmi_uncore_struct *tpmi_uncore = auxiliary_get_drvdata(auxdev); - uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data); + if (tpmi_uncore->root_cluster.root_domain) + uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data); + remove_cluster_entries(tpmi_uncore); uncore_freq_common_exit(); @@ -475,5 +754,6 @@ module_auxiliary_driver(intel_uncore_aux_driver); MODULE_IMPORT_NS(INTEL_TPMI); MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY); +MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN); MODULE_DESCRIPTION("Intel TPMI UFS Driver"); MODULE_LICENSE("GPL"); diff --git a/include/linux/array_size.h b/include/linux/array_size.h new file mode 100644 index 0000000000000..06d7d83196ca3 --- /dev/null +++ b/include/linux/array_size.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ARRAY_SIZE_H +#define _LINUX_ARRAY_SIZE_H + +#include + +/** + * ARRAY_SIZE - get the number of elements in array @arr + * @arr: array to be sized + */ +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) + +#endif /* _LINUX_ARRAY_SIZE_H */ diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index 1e880cb0f4541..ff480b47ae642 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -21,6 +21,7 @@ enum intel_tpmi_id { TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */ TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */ TPMI_ID_SST = 5, /* Speed Select Technology */ + TPMI_ID_PLR = 0xc, /* Performance Limit Reasons */ TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */ TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */ }; @@ -53,4 +54,5 @@ struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int int tpmi_get_resource_count(struct auxiliary_device *auxdev); int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked, bool *write_blocked); +struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev); #endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cee8fe87e9f4f..d9ad21058eed9 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -50,12 +51,6 @@ #define READ 0 #define WRITE 1 -/** - * ARRAY_SIZE - get the number of elements in array @arr - * @arr: array to be sized - */ -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) - #define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL) #define u64_to_user_ptr(x) ( \ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 386ab580b839b..234bcdb1fba45 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -207,6 +207,21 @@ static const struct file_operations __name ## _fops = { \ .release = single_release, \ } +#define DEFINE_SHOW_STORE_ATTRIBUTE(__name) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __name ## _show, inode->i_private); \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .write = __name ## _write, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + #define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ diff --git a/include/linux/string.h b/include/linux/string.h index 5077776e995e0..ce137830a0b99 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -2,6 +2,7 @@ #ifndef _LINUX_STRING_H_ #define _LINUX_STRING_H_ +#include #include /* for inline */ #include /* for size_t */ #include /* for NULL */ diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index e8a4499155667..357b3a6a3b4c9 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -6,6 +6,7 @@ #include "main.h" +#include #include #include #include @@ -20,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index 0c64d81a77617..1f7ed9d4f6fdf 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -7,6 +7,7 @@ #include "netlink.h" #include "main.h" +#include #include #include #include @@ -20,7 +21,6 @@ #include #include #include -#include #include #include #include