[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4f3c0938-7ca4-c1b5-06e4-a03bba47b408@linux.intel.com>
Date: Thu, 19 Dec 2024 16:53:34 +0200 (EET)
From: Ilpo Järvinen <ilpo.jarvinen@...ux.intel.com>
To: Mario Limonciello <mario.limonciello@....com>
cc: Borislav Petkov <bp@...en8.de>, Hans de Goede <hdegoede@...hat.com>,
x86@...nel.org, "Gautham R . Shenoy" <gautham.shenoy@....com>,
Perry Yuan <perry.yuan@....com>, LKML <linux-kernel@...r.kernel.org>,
linux-doc@...r.kernel.org, linux-pm@...r.kernel.org,
platform-driver-x86@...r.kernel.org,
Shyam Sundar S K <Shyam-sundar.S-k@....com>,
Perry Yuan <Perry.Yuan@....com>
Subject: Re: [PATCH v7 05/12] platform/x86: hfi: parse CPU core ranking data
from shared memory
On Sat, 30 Nov 2024, Mario Limonciello wrote:
> From: Perry Yuan <Perry.Yuan@....com>
>
> When `amd_hfi` driver is loaded, it will use PCCT subspace type 4 table
> to retrieve the shared memory address which contains the CPU core ranking
> table. This table includes a header that specifies the number of ranking
> data entries to be parsed and rank each CPU core with the Performance and
> Energy Efficiency capability as implemented by the CPU power management
> firmware.
>
> Once the table has been parsed, each CPU is assigned a ranking score
> within its class. Subsequently, when the scheduler selects cores, it
> chooses from the ranking list based on the assigned scores in each class,
> thereby ensuring the optimal selection of CPU cores according to their
> predefined classifications and priorities.
>
> Signed-off-by: Perry Yuan <Perry.Yuan@....com>
> Co-developed-by: Mario Limonciello <mario.limonciello@....com>
> Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@...ux.intel.com>
> Signed-off-by: Mario Limonciello <mario.limonciello@....com>
> ---
> * Drop __packed
> ---
> drivers/platform/x86/amd/hfi/hfi.c | 196 +++++++++++++++++++++++++++++
> 1 file changed, 196 insertions(+)
>
> diff --git a/drivers/platform/x86/amd/hfi/hfi.c b/drivers/platform/x86/amd/hfi/hfi.c
> index 2cd71d79a22c9..3c90e93fd192c 100644
> --- a/drivers/platform/x86/amd/hfi/hfi.c
> +++ b/drivers/platform/x86/amd/hfi/hfi.c
> @@ -18,20 +18,72 @@
> #include <linux/io.h>
> #include <linux/kernel.h>
> #include <linux/module.h>
> +#include <linux/mailbox_client.h>
> #include <linux/mutex.h>
> +#include <linux/percpu-defs.h>
> #include <linux/platform_device.h>
> #include <linux/smp.h>
> +#include <linux/topology.h>
> +#include <linux/workqueue.h>
> +
> +#include <asm/cpu_device_id.h>
> +
> +#include <acpi/pcc.h>
> +#include <acpi/cppc_acpi.h>
>
> #define AMD_HFI_DRIVER "amd_hfi"
> +#define AMD_HFI_MAILBOX_COUNT 1
> +#define AMD_HETERO_RANKING_TABLE_VER 2
> +
> #define AMD_HETERO_CPUID_27 0x80000027
> +
> static struct platform_device *device;
Please add the empty lines already in the patch introducing
AMD_HETERO_CPUID_27 define.
>
> +/**
> + * struct amd_shmem_info - Shared memory table for AMD HFI
> + *
> + * @header: The PCCT table header including signature, length flags and command.
> + * @version_number: Version number of the table
> + * @n_logical_processors: Number of logical processors
> + * @n_capabilities: Number of ranking dimensions (performance, efficiency, etc)
> + * @table_update_context: Command being sent over the subspace
> + * @n_bitmaps: Number of 32-bit bitmaps to enumerate all the APIC IDs
> + * This is based on the maximum APIC ID enumerated in the system
> + * @reserved: 24 bit spare
> + * @table_data: Bit Map(s) of enabled logical processors
> + * Followed by the ranking data for each logical processor
> + */
> +struct amd_shmem_info {
> + struct acpi_pcct_ext_pcc_shared_memory header;
> + u32 version_number :8,
> + n_logical_processors :8,
> + n_capabilities :8,
> + table_update_context :8;
> + u32 n_bitmaps :8,
> + reserved :24;
> + u32 table_data[];
> +};
> +
> struct amd_hfi_data {
> const char *name;
> struct device *dev;
> struct mutex lock;
> +
> + /* PCCT table related*/
> + struct pcc_mbox_chan *pcc_chan;
> + void __iomem *pcc_comm_addr;
> + struct acpi_subtable_header *pcct_entry;
> + struct amd_shmem_info *shmem;
> };
>
> +/**
> + * struct amd_hfi_classes - HFI class capabilities per CPU
> + * @perf: Performance capability
> + * @eff: Power efficiency capability
> + *
> + * Capabilities of a logical processor in the ranking table. These capabilities
> + * are unitless and specific to each HFI class.
> + */
> struct amd_hfi_classes {
> u32 perf;
> u32 eff;
> @@ -40,23 +92,105 @@ struct amd_hfi_classes {
> /**
> * struct amd_hfi_cpuinfo - HFI workload class info per CPU
> * @cpu: cpu index
> + * @apic_id: apic id of the current cpu
> * @cpus: mask of cpus associated with amd_hfi_cpuinfo
> * @class_index: workload class ID index
> * @nr_class: max number of workload class supported
> + * @ipcc_scores: ipcc scores for each class
> * @amd_hfi_classes: current cpu workload class ranking data
> *
> * Parameters of a logical processor linked with hardware feedback class
> */
> struct amd_hfi_cpuinfo {
> int cpu;
> + u32 apic_id;
> cpumask_var_t cpus;
> s16 class_index;
> u8 nr_class;
> + int *ipcc_scores;
> struct amd_hfi_classes *amd_hfi_classes;
> };
>
> static DEFINE_PER_CPU(struct amd_hfi_cpuinfo, amd_hfi_cpuinfo) = {.class_index = -1};
>
> +static int find_cpu_index_by_apicid(unsigned int target_apicid)
> +{
> + int cpu_index;
> +
> + for_each_present_cpu(cpu_index) {
> + struct cpuinfo_x86 *info = &cpu_data(cpu_index);
> +
> + if (info->topo.apicid == target_apicid) {
> + pr_debug("match APIC id %d for CPU index: %d\n",
> + info->topo.apicid, cpu_index);
> + return cpu_index;
> + }
> + }
> +
> + return -ENODEV;
> +}
> +
> +static int amd_hfi_fill_metadata(struct amd_hfi_data *amd_hfi_data)
> +{
> + struct acpi_pcct_ext_pcc_slave *pcct_ext =
> + (struct acpi_pcct_ext_pcc_slave *)amd_hfi_data->pcct_entry;
> + void __iomem *pcc_comm_addr;
> +
> + pcc_comm_addr = acpi_os_ioremap(amd_hfi_data->pcc_chan->shmem_base_addr,
> + amd_hfi_data->pcc_chan->shmem_size);
> + if (!pcc_comm_addr) {
> + pr_err("failed to ioremap PCC common region mem\n");
> + return -ENOMEM;
> + }
> +
> + memcpy_fromio(amd_hfi_data->shmem, pcc_comm_addr, pcct_ext->length);
> + iounmap(pcc_comm_addr);
> +
> + if (amd_hfi_data->shmem->header.signature != PCC_SIGNATURE) {
> + pr_err("invalid signature in shared memory\n");
> + return -EINVAL;
> + }
> + if (amd_hfi_data->shmem->version_number != AMD_HETERO_RANKING_TABLE_VER) {
> + pr_err("invalid version %d\n", amd_hfi_data->shmem->version_number);
> + return -EINVAL;
> + }
> +
> + for (unsigned int i = 0; i < amd_hfi_data->shmem->n_bitmaps; i++) {
> + u32 bitmap = amd_hfi_data->shmem->table_data[i];
> +
> + for (unsigned int j = 0; j < BITS_PER_TYPE(u32); j++) {
> + struct amd_hfi_cpuinfo *info;
> + int apic_id = i * BITS_PER_TYPE(u32) + j;
> + int cpu_index;
Reverse xmas order.
--
i.
> +
> + if (!(bitmap & BIT(j)))
> + continue;
> +
> + cpu_index = find_cpu_index_by_apicid(apic_id);
> + if (cpu_index < 0) {
> + pr_warn("APIC ID %d not found\n", apic_id);
> + continue;
> + }
> +
> + info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu_index);
> + info->apic_id = apic_id;
> +
> + /* Fill the ranking data for each logical processor */
> + info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu_index);
> + for (unsigned int k = 0; k < info->nr_class; k++) {
> + u32 *table = amd_hfi_data->shmem->table_data +
> + amd_hfi_data->shmem->n_bitmaps +
> + i * info->nr_class;
> + info->amd_hfi_classes[k].eff = table[apic_id + 2 * k];
> + info->amd_hfi_classes[k].perf = table[apic_id + 2 * k + 1];
> + }
> + }
> + }
> +
> + return 0;
> +}
> +
> static int amd_hfi_alloc_class_data(struct platform_device *pdev)
> {
> struct amd_hfi_cpuinfo *hfi_cpuinfo;
> @@ -73,20 +207,78 @@ static int amd_hfi_alloc_class_data(struct platform_device *pdev)
>
> for_each_present_cpu(idx) {
> struct amd_hfi_classes *classes;
> + int *ipcc_scores;
>
> classes = devm_kzalloc(dev,
> nr_class_id * sizeof(struct amd_hfi_classes),
> GFP_KERNEL);
> if (!classes)
> return -ENOMEM;
> + ipcc_scores = devm_kcalloc(dev, nr_class_id, sizeof(int), GFP_KERNEL);
> + if (!ipcc_scores)
> + return -ENOMEM;
> hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, idx);
> hfi_cpuinfo->amd_hfi_classes = classes;
> + hfi_cpuinfo->ipcc_scores = ipcc_scores;
> hfi_cpuinfo->nr_class = nr_class_id;
> }
>
> return 0;
> }
>
> +static int amd_hfi_metadata_parser(struct platform_device *pdev,
> + struct amd_hfi_data *amd_hfi_data)
> +{
> + struct acpi_pcct_ext_pcc_slave *pcct_ext;
> + struct acpi_subtable_header *pcct_entry;
> + struct mbox_chan *pcc_mbox_channels;
> + struct acpi_table_header *pcct_tbl;
> + struct pcc_mbox_chan *pcc_chan;
> + acpi_status status;
> + int ret;
> +
> + pcc_mbox_channels = devm_kcalloc(&pdev->dev, AMD_HFI_MAILBOX_COUNT,
> + sizeof(*pcc_mbox_channels), GFP_KERNEL);
> + if (!pcc_mbox_channels)
> + return -ENOMEM;
> +
> + pcc_chan = devm_kcalloc(&pdev->dev, AMD_HFI_MAILBOX_COUNT,
> + sizeof(*pcc_chan), GFP_KERNEL);
> + if (!pcc_chan)
> + return -ENOMEM;
> +
> + status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
> + if (ACPI_FAILURE(status) || !pcct_tbl)
> + return -ENODEV;
> +
> + /* get pointer to the first PCC subspace entry */
> + pcct_entry = (struct acpi_subtable_header *) (
> + (unsigned long)pcct_tbl + sizeof(struct acpi_table_pcct));
> +
> + pcc_chan->mchan = &pcc_mbox_channels[0];
> +
> + amd_hfi_data->pcc_chan = pcc_chan;
> + amd_hfi_data->pcct_entry = pcct_entry;
> + pcct_ext = (struct acpi_pcct_ext_pcc_slave *)pcct_entry;
> +
> + if (pcct_ext->length <= 0)
> + return -EINVAL;
> +
> + amd_hfi_data->shmem = devm_kzalloc(amd_hfi_data->dev, pcct_ext->length, GFP_KERNEL);
> + if (!amd_hfi_data->shmem)
> + return -ENOMEM;
> +
> + pcc_chan->shmem_base_addr = pcct_ext->base_address;
> + pcc_chan->shmem_size = pcct_ext->length;
> +
> + /* parse the shared memory info from the pcct table */
> + ret = amd_hfi_fill_metadata(amd_hfi_data);
> +
> + acpi_put_table(pcct_tbl);
> +
> + return ret;
> +}
> +
> static const struct acpi_device_id amd_hfi_platform_match[] = {
> {"AMDI0104", 0},
> { }
> @@ -115,6 +307,10 @@ static int amd_hfi_probe(struct platform_device *pdev)
> if (ret)
> return ret;
>
> + ret = amd_hfi_metadata_parser(pdev, amd_hfi_data);
> + if (ret)
> + return ret;
> +
> return 0;
> }
>
>
Powered by blists - more mailing lists