lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <2604d622-08b7-e517-b423-73e38dc5b286@redhat.com>
Date:   Sat, 2 Jul 2022 11:44:23 +0200
From:   Hans de Goede <hdegoede@...hat.com>
To:     Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
        markgross@...nel.org
Cc:     platform-driver-x86@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2] platform/x86: ISST: PUNIT device mapping with Sub-NUMA
 clustering

Hi,

On 6/29/22 21:48, Srinivas Pandruvada wrote:
> On a multiple package system using Sub-NUMA clustering, there is an issue
> in mapping Linux CPU number to PUNIT PCI device when manufacturer decided
> to reuse the PCI bus number across packages. Bus number can be reused as
> long as they are in different domain or segment. In this case some CPU
> will fail to find a PCI device to issue SST requests.
> 
> When bus numbers are reused across CPU packages, we are using proximity
> information by matching CPU numa node id to PUNIT PCI device numa node
> id. But on a package there can be only one PUNIT PCI device, but multiple
> numa nodes (one for each sub cluster). So, the numa node ID of the PUNIT
> PCI device can only match with one numa node id of CPUs in a sub cluster
> in the package.
> 
> Since there can be only one PUNIT PCI device per package, if we match
> with numa node id of any sub cluster in that package, we can use that
> mapping for any CPU in that package. So, store the match information
> in a per package data structure and return the information when there
> is no match.
> 
> While here, use defines for max bus number instead of hardcoding.
> 
> Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>

Thank you for your patch, I've applied this patch to my review-hans 
branch:
https://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git/log/?h=review-hans

Note it will show up in my review-hans branch once I've pushed my
local branch there, which might take a while.

Once I've run some tests on this branch the patches there will be
added to the platform-drivers-x86/for-next branch and eventually
will be included in the pdx86 pull-request to Linus for the next
merge-window.

Regards,

Hans


> ---
> v2
> - Use #define for max bus number and use
> 
>  .../intel/speed_select_if/isst_if_common.c    | 39 +++++++++++++++----
>  1 file changed, 32 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
> index e8424e70d81d..fd102678c75f 100644
> --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
> +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
> @@ -277,29 +277,38 @@ static int isst_if_get_platform_info(void __user *argp)
>  	return 0;
>  }
>  
> +#define ISST_MAX_BUS_NUMBER	2
>  
>  struct isst_if_cpu_info {
>  	/* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
> -	int bus_info[2];
> -	struct pci_dev *pci_dev[2];
> +	int bus_info[ISST_MAX_BUS_NUMBER];
> +	struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
>  	int punit_cpu_id;
>  	int numa_node;
>  };
>  
> +struct isst_if_pkg_info {
> +	struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
> +};
> +
>  static struct isst_if_cpu_info *isst_cpu_info;
> +static struct isst_if_pkg_info *isst_pkg_info;
> +
>  #define ISST_MAX_PCI_DOMAINS	8
>  
>  static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
>  {
>  	struct pci_dev *matched_pci_dev = NULL;
>  	struct pci_dev *pci_dev = NULL;
> -	int no_matches = 0;
> +	int no_matches = 0, pkg_id;
>  	int i, bus_number;
>  
> -	if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
> -	    cpu >= num_possible_cpus())
> +	if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
> +	    cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
>  		return NULL;
>  
> +	pkg_id = topology_physical_package_id(cpu);
> +
>  	bus_number = isst_cpu_info[cpu].bus_info[bus_no];
>  	if (bus_number < 0)
>  		return NULL;
> @@ -324,6 +333,8 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
>  		}
>  
>  		if (node == isst_cpu_info[cpu].numa_node) {
> +			isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
> +
>  			pci_dev = _pci_dev;
>  			break;
>  		}
> @@ -342,6 +353,10 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
>  	if (!pci_dev && no_matches == 1)
>  		pci_dev = matched_pci_dev;
>  
> +	/* Return pci_dev pointer for any matched CPU in the package */
> +	if (!pci_dev)
> +		pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
> +
>  	return pci_dev;
>  }
>  
> @@ -361,8 +376,8 @@ struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
>  {
>  	struct pci_dev *pci_dev;
>  
> -	if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
> -	    cpu >= num_possible_cpus())
> +	if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER  || cpu < 0 ||
> +	    cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
>  		return NULL;
>  
>  	pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
> @@ -417,10 +432,19 @@ static int isst_if_cpu_info_init(void)
>  	if (!isst_cpu_info)
>  		return -ENOMEM;
>  
> +	isst_pkg_info = kcalloc(topology_max_packages(),
> +				sizeof(*isst_pkg_info),
> +				GFP_KERNEL);
> +	if (!isst_pkg_info) {
> +		kfree(isst_cpu_info);
> +		return -ENOMEM;
> +	}
> +
>  	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
>  				"platform/x86/isst-if:online",
>  				isst_if_cpu_online, NULL);
>  	if (ret < 0) {
> +		kfree(isst_pkg_info);
>  		kfree(isst_cpu_info);
>  		return ret;
>  	}
> @@ -433,6 +457,7 @@ static int isst_if_cpu_info_init(void)
>  static void isst_if_cpu_info_exit(void)
>  {
>  	cpuhp_remove_state(isst_if_online_id);
> +	kfree(isst_pkg_info);
>  	kfree(isst_cpu_info);
>  };
>  

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ