lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220203174942.31630-12-nchatrad@amd.com>
Date:   Thu, 3 Feb 2022 11:49:41 -0600
From:   Naveen Krishna Chatradhi <nchatrad@....com>
To:     <linux-edac@...r.kernel.org>, <x86@...nel.org>
CC:     <linux-kernel@...r.kernel.org>, <bp@...en8.de>, <mingo@...hat.com>,
        <mchehab@...nel.org>, <yazen.ghannam@....com>,
        Muralidhara M K <muralimk@....com>,
        Naveen Krishna Chatradhi <nchatrad@....com>
Subject: [PATCH v7 11/12] EDAC/amd64: Add address translation support for DF3.5

From: Muralidhara M K <muralimk@....com>

Add support for address translation on Data Fabric version 3.5.

Add new data fabric ops and interleaving modes. Also, adjust how the
DRAM address maps are found early in the translation for certain cases.

Signed-off-by: Muralidhara M K <muralimk@....com>
Co-developed-by: Yazen Ghannam <yazen.ghannam@....com>
Signed-off-by: Yazen Ghannam <yazen.ghannam@....com>
Signed-off-by: Naveen Krishna Chatradhi <nchatrad@....com>
---
Link:
https://lkml.kernel.org/r/20211028175728.121452-34-yazen.ghannam@amd.com

v3->v7:
* Was in patch 33 in v3.

v2->v3:
* New in v3. Original version at link above.
* Squashed the following into this patch:
  https://lkml.kernel.org/r/20210630152828.162659-8-nchatrad@amd.com
* Dropped "df_regs" use.
* Set "df_ops" during module init.
* Dropped hard-coded Node ID values.


 drivers/edac/amd64_edac.c | 216 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 209 insertions(+), 7 deletions(-)

diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8f2b5c8be651..6e0d617fd95f 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1004,6 +1004,7 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
 /*
  * Glossary of acronyms used in address translation for Zen-based systems
  *
+ * CCM         =       Cache Coherent Master
  * COD         =       Cluster-on-Die
  * CS          =       Coherent Slave
  * DF          =       Data Fabric
@@ -1084,9 +1085,14 @@ enum intlv_modes {
 	NOHASH_2CH	= 0x01,
 	NOHASH_4CH	= 0x03,
 	NOHASH_8CH	= 0x05,
+	NOHASH_16CH	= 0x07,
+	NOHASH_32CH	= 0x08,
 	HASH_COD4_2CH	= 0x0C,
 	HASH_COD2_4CH	= 0x0D,
 	HASH_COD1_8CH	= 0x0E,
+	HASH_8CH	= 0x1C,
+	HASH_16CH	= 0x1D,
+	HASH_32CH	= 0x1E,
 	DF2_HASH_2CH	= 0x21,
 };
 
@@ -1100,6 +1106,7 @@ struct addr_ctx {
 	u32 reg_limit_addr;
 	u32 reg_fab_id_mask0;
 	u32 reg_fab_id_mask1;
+	u32 reg_fab_id_mask2;
 	u16 cs_fabric_id;
 	u16 die_id_mask;
 	u16 socket_id_mask;
@@ -1436,6 +1443,141 @@ struct data_fabric_ops df3_ops = {
 	.get_component_id_mask		=	get_component_id_mask_df3,
 };
 
+static int dehash_addr_df35(struct addr_ctx *ctx)
+{
+	u8 hashed_bit, intlv_ctl_64k, intlv_ctl_2M, intlv_ctl_1G;
+	u8 num_intlv_bits = ctx->intlv_num_chan;
+	u32 i;
+
+	/* Read D18F0x3F8 (DfGlobalCtrl). */
+	if (df_indirect_read_broadcast(0, 0, 0x3F8, &ctx->tmp))
+		return -EINVAL;
+
+	intlv_ctl_64k = !!((ctx->tmp >> 20) & 0x1);
+	intlv_ctl_2M  = !!((ctx->tmp >> 21) & 0x1);
+	intlv_ctl_1G  = !!((ctx->tmp >> 22) & 0x1);
+
+	/*
+	 * CSSelect[0] = XOR of addr{8,  16, 21, 30};
+	 * CSSelect[1] = XOR of addr{9,  17, 22, 31};
+	 * CSSelect[2] = XOR of addr{10, 18, 23, 32};
+	 * CSSelect[3] = XOR of addr{11, 19, 24, 33}; - 16 and 32 channel only
+	 * CSSelect[4] = XOR of addr{12, 20, 25, 34}; - 32 channel only
+	 */
+	for (i = 0; i < num_intlv_bits; i++) {
+		hashed_bit =	((ctx->ret_addr >> (8 + i)) ^
+				((ctx->ret_addr >> (16 + i)) & intlv_ctl_64k) ^
+				((ctx->ret_addr >> (21 + i)) & intlv_ctl_2M) ^
+				((ctx->ret_addr >> (30 + i)) & intlv_ctl_1G));
+
+		hashed_bit &= BIT(0);
+		if (hashed_bit != ((ctx->ret_addr >> (8 + i)) & BIT(0)))
+			ctx->ret_addr ^= BIT(8 + i);
+	}
+
+	return 0;
+}
+
+static int get_intlv_mode_df35(struct addr_ctx *ctx)
+{
+	ctx->intlv_mode = (ctx->reg_base_addr >> 2) & 0x1F;
+
+	if (ctx->intlv_mode == HASH_COD4_2CH ||
+	    ctx->intlv_mode == HASH_COD2_4CH ||
+	    ctx->intlv_mode == HASH_COD1_8CH) {
+		ctx->make_space_for_cs_id	= make_space_for_cs_id_cod_hash;
+		ctx->insert_cs_id		= insert_cs_id_cod_hash;
+		ctx->dehash_addr		= dehash_addr_df3;
+	} else {
+		ctx->make_space_for_cs_id	= make_space_for_cs_id_simple;
+		ctx->insert_cs_id		= insert_cs_id_simple;
+		ctx->dehash_addr		= dehash_addr_df35;
+	}
+
+	return 0;
+}
+
+static void get_intlv_num_dies_df35(struct addr_ctx *ctx)
+{
+	ctx->intlv_num_dies  = (ctx->reg_base_addr >> 7) & 0x1;
+}
+
+static u8 get_die_id_shift_df35(struct addr_ctx *ctx)
+{
+	return ctx->node_id_shift;
+}
+
+static u8 get_socket_id_shift_df35(struct addr_ctx *ctx)
+{
+	return (ctx->reg_fab_id_mask1 >> 8) & 0xF;
+}
+
+static int get_masks_df35(struct addr_ctx *ctx)
+{
+	/* Read D18F1x150 (SystemFabricIdMask0). */
+	if (df_indirect_read_broadcast(0, 1, 0x150, &ctx->reg_fab_id_mask0))
+		return -EINVAL;
+
+	/* Read D18F1x154 (SystemFabricIdMask1) */
+	if (df_indirect_read_broadcast(0, 1, 0x154, &ctx->reg_fab_id_mask1))
+		return -EINVAL;
+
+	/* Read D18F1x158 (SystemFabricIdMask2) */
+	if (df_indirect_read_broadcast(0, 1, 0x158, &ctx->reg_fab_id_mask2))
+		return -EINVAL;
+
+	ctx->node_id_shift = ctx->reg_fab_id_mask1 & 0xF;
+
+	ctx->die_id_mask = ctx->reg_fab_id_mask2 & 0xFFFF;
+
+	ctx->socket_id_mask = (ctx->reg_fab_id_mask2 >> 16) & 0xFFFF;
+
+	return 0;
+}
+
+static u16 get_dst_fabric_id_df35(struct addr_ctx *ctx)
+{
+	return ctx->reg_limit_addr & 0xFFF;
+}
+
+static int get_cs_fabric_id_df35(struct addr_ctx *ctx)
+{
+	u16 nid = ctx->nid;
+
+	/* Special handling for GPU nodes.*/
+	if (nid >= amd_nb_num()) {
+		if (get_umc_to_cs_mapping(ctx))
+			return -EINVAL;
+
+		/* Need to convert back to the hardware-provided Node ID. */
+		nid -= amd_nb_num();
+		nid += amd_gpu_node_start_id();
+	}
+
+	ctx->cs_fabric_id = ctx->inst_id | (nid << ctx->node_id_shift);
+
+	return 0;
+}
+
+static u16 get_component_id_mask_df35(struct addr_ctx *ctx)
+{
+	return ctx->reg_fab_id_mask0 & 0xFFFF;
+}
+
+struct data_fabric_ops df3point5_ops = {
+	.get_hi_addr_offset		=	get_hi_addr_offset_df3,
+	.get_intlv_mode			=	get_intlv_mode_df35,
+	.get_intlv_addr_sel		=	get_intlv_addr_sel_df3,
+	.get_intlv_num_dies		=	get_intlv_num_dies_df35,
+	.get_intlv_num_sockets		=	get_intlv_num_sockets_df3,
+	.get_masks			=	get_masks_df35,
+	.get_die_id_shift		=	get_die_id_shift_df35,
+	.get_socket_id_shift		=	get_socket_id_shift_df35,
+	.get_dst_fabric_id		=	get_dst_fabric_id_df35,
+	.get_cs_fabric_id		=	get_cs_fabric_id_df35,
+	.get_component_id_mask		=	get_component_id_mask_df35,
+};
+
 struct data_fabric_ops *df_ops;
 
 static int get_blk_inst_cnt(struct addr_ctx *ctx)
@@ -1534,8 +1676,17 @@ static void get_intlv_num_chan(struct addr_ctx *ctx)
 		break;
 	case NOHASH_8CH:
 	case HASH_COD1_8CH:
+	case HASH_8CH:
 		ctx->intlv_num_chan = 3;
 		break;
+	case NOHASH_16CH:
+	case HASH_16CH:
+		ctx->intlv_num_chan = 4;
+		break;
+	case NOHASH_32CH:
+	case HASH_32CH:
+		ctx->intlv_num_chan = 5;
+		break;
 	default:
 		/* Valid interleaving modes where checked earlier. */
 		break;
@@ -1642,6 +1793,44 @@ static int addr_over_limit(struct addr_ctx *ctx)
 	return 0;
 }
 
+static int find_ccm_instance_id(struct addr_ctx *ctx)
+{
+	u32 temp;
+
+	for (ctx->inst_id = 0; ctx->inst_id < ctx->num_blk_instances; ctx->inst_id++) {
+		/* Read D18F0x44 (FabricBlockInstanceInformation0). */
+		if (df_indirect_read_instance(0, 0, 0x44, ctx->inst_id, &temp))
+			return -EINVAL;
+
+		if (temp == 0)
+			continue;
+
+		if ((temp & 0xF) == 0)
+			return 0;
+	}
+
+	return -EINVAL;
+}
+
+#define DF_NUM_DRAM_MAPS_AVAILABLE  16
+static int find_map_reg_by_dstfabricid(struct addr_ctx *ctx)
+{
+	u16 node_id_mask = (ctx->reg_fab_id_mask0 >> 16) & 0xFFFF;
+	u16 dst_fabric_id;
+
+	for (ctx->map_num = 0; ctx->map_num < DF_NUM_DRAM_MAPS_AVAILABLE ; ctx->map_num++) {
+		if (get_dram_addr_map(ctx))
+			continue;
+
+		dst_fabric_id = df_ops->get_dst_fabric_id(ctx);
+
+		if ((dst_fabric_id & node_id_mask) == (ctx->cs_fabric_id & node_id_mask))
+			return 0;
+	}
+
+	return -EINVAL;
+}
+
 static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
 {
 	struct addr_ctx ctx;
@@ -1659,6 +1848,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr
 	ctx.nid = nid;
 	ctx.inst_id = umc;
 
+	if (df_ops == &df3point5_ops)
+		ctx.late_hole_remove = true;
+
 	if (get_blk_inst_cnt(&ctx)) {
 		pr_debug("Failed to get Block Instance Count");
 		return -EINVAL;
@@ -1674,14 +1866,22 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr
 		return -EINVAL;
 	}
 
-	if (remove_dram_offset(&ctx)) {
-		pr_debug("Failed to remove DRAM offset");
-		return -EINVAL;
-	}
+	if (ctx.nid >= amd_nb_num()) {
+		if (find_ccm_instance_id(&ctx))
+			return -EINVAL;
 
-	if (get_dram_addr_map(&ctx)) {
-		pr_debug("Failed to get DRAM address map");
-		return -EINVAL;
+		if (find_map_reg_by_dstfabricid(&ctx))
+			return -EINVAL;
+	} else {
+		if (remove_dram_offset(&ctx)) {
+			pr_debug("Failed to remove DRAM offset");
+			return -EINVAL;
+		}
+
+		if (get_dram_addr_map(&ctx)) {
+			pr_debug("Failed to get DRAM address map");
+			return -EINVAL;
+		}
 	}
 
 	if (df_ops->get_intlv_mode(&ctx)) {
@@ -4756,12 +4956,14 @@ static void per_family_init(struct amd64_pvt *pvt)
 				pvt->ops->populate_csrows	= gpu_init_csrows;
 				pvt->ops->get_umc_err_info	= gpu_update_umc_err_info;
 				pvt->ops->find_umc_inst_id	= gpu_df_inst_id;
+				df_ops				= &df3point5_ops;
 				goto end_fam;
 			} else {
 				pvt->ctl_name		= "F19h_M30h";
 				pvt->f0_id		= PCI_DEVICE_ID_AMD_19H_DF_F0;
 				pvt->f6_id		= PCI_DEVICE_ID_AMD_19H_DF_F6;
 				pvt->max_mcs		= 8;
+				df_ops			= &df3point5_ops;
 			}
 		} else {
 			pvt->ctl_name			= "F19h";
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ