[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220127204115.384161-25-yazen.ghannam@amd.com>
Date: Thu, 27 Jan 2022 20:41:15 +0000
From: Yazen Ghannam <yazen.ghannam@....com>
To: <linux-edac@...r.kernel.org>
CC: <linux-kernel@...r.kernel.org>, <bp@...en8.de>,
<mchehab@...nel.org>, <tony.luck@...el.com>, <james.morse@....com>,
<rric@...nel.org>, <Smita.KoralahalliChannabasappa@....com>,
Yazen Ghannam <yazen.ghannam@....com>
Subject: [PATCH v4 24/24] EDAC/amd64: Add support for address translation on DF3 systems
DF3-based systems (Rome and later) support new interleaving modes and a
number of bit fields have changed or moved entirely. Add support for
these new modes and fields.
Refactoring should be minimal due to earlier changes, and most updates
will be additions.
Signed-off-by: Yazen Ghannam <yazen.ghannam@....com>
---
Link:
https://lore.kernel.org/r/20211028175728.121452-30-yazen.ghannam@amd.com
v3->v4:
* Added glossary entry.
v2->v3:
* Was patch 30 in v2.
* Drop "df_regs" use.
* Set "df_ops" during module init.
v1->v2:
* Moved from arch/x86 to EDAC.
* Use function pointers as needed.
drivers/edac/amd64_edac.c | 188 +++++++++++++++++++++++++++++++++++++-
1 file changed, 186 insertions(+), 2 deletions(-)
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 34405c8940fb..d213f9ecab16 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -991,6 +991,7 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
/*
* Glossary of acronyms used in address translation for Zen-based systems
*
+ * COD = Cluster-on-Die
* CS = Coherent Slave
* DF = Data Fabric
*/
@@ -1062,6 +1063,11 @@ static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
enum intlv_modes {
NONE = 0x00,
NOHASH_2CH = 0x01,
+ NOHASH_4CH = 0x03,
+ NOHASH_8CH = 0x05,
+ HASH_COD4_2CH = 0x0C,
+ HASH_COD2_4CH = 0x0D,
+ HASH_COD1_8CH = 0x0E,
DF2_HASH_2CH = 0x21,
};
@@ -1074,6 +1080,7 @@ struct addr_ctx {
u32 reg_base_addr;
u32 reg_limit_addr;
u32 reg_fab_id_mask0;
+ u32 reg_fab_id_mask1;
u16 cs_fabric_id;
u16 die_id_mask;
u16 socket_id_mask;
@@ -1085,6 +1092,7 @@ struct addr_ctx {
u8 intlv_num_dies;
u8 intlv_num_sockets;
u8 cs_id;
+ u8 node_id_shift;
int (*dehash_addr)(struct addr_ctx *ctx);
void (*make_space_for_cs_id)(struct addr_ctx *ctx);
void (*insert_cs_id)(struct addr_ctx *ctx);
@@ -1245,6 +1253,168 @@ struct data_fabric_ops df2_ops = {
.get_component_id_mask = get_component_id_mask_df2,
};
+static u64 get_hi_addr_offset_df3(struct addr_ctx *ctx)
+{
+ return (ctx->reg_dram_offset & GENMASK_ULL(31, 12)) << 16;
+}
+
+static void make_space_for_cs_id_cod_hash(struct addr_ctx *ctx)
+{
+ u8 num_intlv_bits = ctx->intlv_num_chan;
+
+ num_intlv_bits += ctx->intlv_num_sockets;
+ expand_bits(ctx->intlv_addr_bit, 1, &ctx->ret_addr);
+ if (num_intlv_bits > 1)
+ expand_bits(12, num_intlv_bits - 1, &ctx->ret_addr);
+}
+
+static void insert_cs_id_cod_hash(struct addr_ctx *ctx)
+{
+ ctx->ret_addr |= ((ctx->cs_id & 0x1) << ctx->intlv_addr_bit);
+ ctx->ret_addr |= ((ctx->cs_id & 0xE) << 11);
+}
+
+static int dehash_addr_df3(struct addr_ctx *ctx)
+{
+ u8 hashed_bit, intlv_ctl_64k, intlv_ctl_2M, intlv_ctl_1G;
+
+ /* Read D18F0x3F8 (DfGlobalCtrl). */
+ if (df_indirect_read_broadcast(0, 0, 0x3F8, &ctx->tmp))
+ return -EINVAL;
+
+ intlv_ctl_64k = !!((ctx->tmp >> 20) & 0x1);
+ intlv_ctl_2M = !!((ctx->tmp >> 21) & 0x1);
+ intlv_ctl_1G = !!((ctx->tmp >> 22) & 0x1);
+
+ hashed_bit = (ctx->ret_addr >> 14) ^
+ ((ctx->ret_addr >> 18) & intlv_ctl_64k) ^
+ ((ctx->ret_addr >> 23) & intlv_ctl_2M) ^
+ ((ctx->ret_addr >> 32) & intlv_ctl_1G) ^
+ (ctx->ret_addr >> ctx->intlv_addr_bit);
+
+ hashed_bit &= BIT(0);
+
+ if (hashed_bit != ((ctx->ret_addr >> ctx->intlv_addr_bit) & BIT(0)))
+ ctx->ret_addr ^= BIT(ctx->intlv_addr_bit);
+
+ if (ctx->intlv_mode != HASH_COD2_4CH &&
+ ctx->intlv_mode != HASH_COD1_8CH)
+ return 0;
+
+ hashed_bit = (ctx->ret_addr >> 12) ^
+ ((ctx->ret_addr >> 16) & intlv_ctl_64k) ^
+ ((ctx->ret_addr >> 21) & intlv_ctl_2M) ^
+ ((ctx->ret_addr >> 30) & intlv_ctl_1G);
+
+ hashed_bit &= BIT(0);
+
+ if (hashed_bit != ((ctx->ret_addr >> 12) & BIT(0)))
+ ctx->ret_addr ^= BIT(12);
+
+ if (ctx->intlv_mode != HASH_COD1_8CH)
+ return 0;
+
+ hashed_bit = (ctx->ret_addr >> 13) ^
+ ((ctx->ret_addr >> 17) & intlv_ctl_64k) ^
+ ((ctx->ret_addr >> 22) & intlv_ctl_2M) ^
+ ((ctx->ret_addr >> 31) & intlv_ctl_1G);
+
+ hashed_bit &= BIT(0);
+
+ if (hashed_bit != ((ctx->ret_addr >> 13) & BIT(0)))
+ ctx->ret_addr ^= BIT(13);
+
+ return 0;
+}
+
+static int get_intlv_mode_df3(struct addr_ctx *ctx)
+{
+ ctx->intlv_mode = (ctx->reg_base_addr >> 2) & 0xF;
+
+ if (ctx->intlv_mode == HASH_COD4_2CH ||
+ ctx->intlv_mode == HASH_COD2_4CH ||
+ ctx->intlv_mode == HASH_COD1_8CH) {
+ ctx->make_space_for_cs_id = make_space_for_cs_id_cod_hash;
+ ctx->insert_cs_id = insert_cs_id_cod_hash;
+ ctx->dehash_addr = dehash_addr_df3;
+ } else {
+ ctx->make_space_for_cs_id = make_space_for_cs_id_simple;
+ ctx->insert_cs_id = insert_cs_id_simple;
+ }
+
+ return 0;
+}
+
+static u8 get_intlv_addr_sel_df3(struct addr_ctx *ctx)
+{
+ return (ctx->reg_base_addr >> 9) & 0x7;
+}
+
+static void get_intlv_num_dies_df3(struct addr_ctx *ctx)
+{
+ ctx->intlv_num_dies = (ctx->reg_base_addr >> 6) & 0x3;
+}
+
+static void get_intlv_num_sockets_df3(struct addr_ctx *ctx)
+{
+ ctx->intlv_num_sockets = (ctx->reg_base_addr >> 8) & 0x1;
+}
+
+static u8 get_die_id_shift_df3(struct addr_ctx *ctx)
+{
+ return ctx->node_id_shift;
+}
+
+static u8 get_socket_id_shift_df3(struct addr_ctx *ctx)
+{
+ return ((ctx->reg_fab_id_mask1 >> 8) & 0x3) + ctx->node_id_shift;
+}
+
+static int get_masks_df3(struct addr_ctx *ctx)
+{
+ /* Read D18F1x208 (SystemFabricIdMask). */
+ if (df_indirect_read_broadcast(ctx->nid, 1, 0x208, &ctx->reg_fab_id_mask0))
+ return -EINVAL;
+
+ /* Read D18F1x20C (SystemFabricIdMask1) */
+ if (df_indirect_read_broadcast(0, 1, 0x20C, &ctx->reg_fab_id_mask1))
+ return -EINVAL;
+
+ ctx->node_id_shift = ctx->reg_fab_id_mask1 & 0xF;
+
+ ctx->die_id_mask = (ctx->reg_fab_id_mask1 >> 16) & 0x7;
+ ctx->die_id_mask <<= ctx->node_id_shift;
+
+ ctx->socket_id_mask = (ctx->reg_fab_id_mask1 >> 24) & 0x7;
+ ctx->socket_id_mask <<= ctx->node_id_shift;
+
+ return 0;
+}
+
+static u16 get_dst_fabric_id_df3(struct addr_ctx *ctx)
+{
+ return ctx->reg_limit_addr & 0x3FF;
+}
+
+static u16 get_component_id_mask_df3(struct addr_ctx *ctx)
+{
+ return ctx->reg_fab_id_mask0 & 0x3FF;
+}
+
+struct data_fabric_ops df3_ops = {
+ .get_hi_addr_offset = get_hi_addr_offset_df3,
+ .get_intlv_mode = get_intlv_mode_df3,
+ .get_intlv_addr_sel = get_intlv_addr_sel_df3,
+ .get_intlv_num_dies = get_intlv_num_dies_df3,
+ .get_intlv_num_sockets = get_intlv_num_sockets_df3,
+ .get_cs_fabric_id = get_cs_fabric_id_df2,
+ .get_masks = get_masks_df3,
+ .get_die_id_shift = get_die_id_shift_df3,
+ .get_socket_id_shift = get_socket_id_shift_df3,
+ .get_dst_fabric_id = get_dst_fabric_id_df3,
+ .get_component_id_mask = get_component_id_mask_df3,
+};
+
struct data_fabric_ops *df_ops;
static int get_dram_offset_reg(struct addr_ctx *ctx)
@@ -1303,8 +1473,8 @@ static int get_intlv_addr_bit(struct addr_ctx *ctx)
{
u8 intlv_addr_sel = df_ops->get_intlv_addr_sel(ctx);
- /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
- if (intlv_addr_sel > 3) {
+ /* {0, 1, 2, 3, 4} map to address bits {8, 9, 10, 11, 12} respectively */
+ if (intlv_addr_sel > 4) {
pr_debug("Invalid interleave address select %d.\n", intlv_addr_sel);
return -EINVAL;
}
@@ -1322,9 +1492,18 @@ static void get_intlv_num_chan(struct addr_ctx *ctx)
ctx->intlv_num_chan = 0;
break;
case NOHASH_2CH:
+ case HASH_COD4_2CH:
case DF2_HASH_2CH:
ctx->intlv_num_chan = 1;
break;
+ case NOHASH_4CH:
+ case HASH_COD2_4CH:
+ ctx->intlv_num_chan = 2;
+ break;
+ case NOHASH_8CH:
+ case HASH_COD1_8CH:
+ ctx->intlv_num_chan = 3;
+ break;
default:
/* Valid interleaving modes where checked earlier. */
break;
@@ -4197,14 +4376,17 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
fam_type = &family_types[F17_M30H_CPUS];
pvt->ops = &family_types[F17_M30H_CPUS].ops;
+ df_ops = &df3_ops;
break;
} else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
fam_type = &family_types[F17_M60H_CPUS];
pvt->ops = &family_types[F17_M60H_CPUS].ops;
+ df_ops = &df3_ops;
break;
} else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
fam_type = &family_types[F17_M70H_CPUS];
pvt->ops = &family_types[F17_M70H_CPUS].ops;
+ df_ops = &df3_ops;
break;
}
fallthrough;
@@ -4226,6 +4408,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F17_M70H_CPUS];
pvt->ops = &family_types[F17_M70H_CPUS].ops;
fam_type->ctl_name = "F19h_M20h";
+ df_ops = &df3_ops;
break;
} else if (pvt->model >= 0x50 && pvt->model <= 0x5f) {
fam_type = &family_types[F19_M50H_CPUS];
@@ -4241,6 +4424,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F19_CPUS];
pvt->ops = &family_types[F19_CPUS].ops;
family_types[F19_CPUS].ctl_name = "F19h";
+ df_ops = &df3_ops;
break;
default:
--
2.25.1
Powered by blists - more mailing lists