[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240324-dcd-type2-upstream-v1-13-b7b00d623625@intel.com>
Date: Sun, 24 Mar 2024 16:18:16 -0700
From: ira.weiny@...el.com
To: Dave Jiang <dave.jiang@...el.com>, Fan Ni <fan.ni@...sung.com>,
Jonathan Cameron <Jonathan.Cameron@...wei.com>,
Navneet Singh <navneet.singh@...el.com>
Cc: Dan Williams <dan.j.williams@...el.com>,
Davidlohr Bueso <dave@...olabs.net>,
Alison Schofield <alison.schofield@...el.com>,
Vishal Verma <vishal.l.verma@...el.com>, Ira Weiny <ira.weiny@...el.com>,
linux-btrfs@...r.kernel.org, linux-cxl@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 13/26] cxl/mem: Configure dynamic capacity interrupts
From: Navneet Singh <navneet.singh@...el.com>
Dynamic Capacity Devices (DCD) support extent change notifications
through the event log mechanism. The interrupt mailbox commands were
extended in CXL 3.1 to support these notifications.
Firmware can't configure DCD events to be FW controlled but can retain
control of memory events. Split irq configuration of memory events and
DCD events to allow for FW control of memory events while DCD is host
controlled.
Configure DCD event log interrupts on devices supporting dynamic
capacity. Disable DCD if interrupts are not supported.
Signed-off-by: Navneet Singh <navneet.singh@...el.com>
Co-developed-by: Ira Weiny <ira.weiny@...el.com>
Signed-off-by: Ira Weiny <ira.weiny@...el.com>
---
Changes for v1
[iweiny: rebase to upstream irq code]
[iweiny: disable DCD if irqs not supported]
---
drivers/cxl/core/mbox.c | 9 ++++++-
drivers/cxl/cxl.h | 4 ++-
drivers/cxl/cxlmem.h | 4 +++
drivers/cxl/pci.c | 71 ++++++++++++++++++++++++++++++++++++++++---------
4 files changed, 74 insertions(+), 14 deletions(-)
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 14e8a7528a8b..58b31fa47b93 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1323,10 +1323,17 @@ static int cxl_get_dc_config(struct cxl_memdev_state *mds, u8 start_region,
return rc;
}
-static bool cxl_dcd_supported(struct cxl_memdev_state *mds)
+bool cxl_dcd_supported(struct cxl_memdev_state *mds)
{
return test_bit(CXL_DCD_ENABLED_GET_CONFIG, mds->dcd_cmds);
}
+EXPORT_SYMBOL_NS_GPL(cxl_dcd_supported, CXL);
+
+void cxl_disable_dcd(struct cxl_memdev_state *mds)
+{
+ clear_bit(CXL_DCD_ENABLED_GET_CONFIG, mds->dcd_cmds);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_disable_dcd, CXL);
/**
* cxl_dev_dynamic_capacity_identify() - Reads the dynamic capacity
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 15d418b3bc9b..d585f5fdd3ae 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -164,11 +164,13 @@ static inline int ways_to_eiw(unsigned int ways, u8 *eiw)
#define CXLDEV_EVENT_STATUS_WARN BIT(1)
#define CXLDEV_EVENT_STATUS_FAIL BIT(2)
#define CXLDEV_EVENT_STATUS_FATAL BIT(3)
+#define CXLDEV_EVENT_STATUS_DCD BIT(4)
#define CXLDEV_EVENT_STATUS_ALL (CXLDEV_EVENT_STATUS_INFO | \
CXLDEV_EVENT_STATUS_WARN | \
CXLDEV_EVENT_STATUS_FAIL | \
- CXLDEV_EVENT_STATUS_FATAL)
+ CXLDEV_EVENT_STATUS_FATAL| \
+ CXLDEV_EVENT_STATUS_DCD)
/* CXL rev 3.0 section 8.2.9.2.4; Table 8-52 */
#define CXLDEV_EVENT_INT_MODE_MASK GENMASK(1, 0)
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 4624cf612c1e..01bee6eedff3 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -225,7 +225,9 @@ struct cxl_event_interrupt_policy {
u8 warn_settings;
u8 failure_settings;
u8 fatal_settings;
+ u8 dcd_settings;
} __packed;
+#define CXL_EVENT_INT_POLICY_BASE_SIZE 4 /* info, warn, failure, fatal */
/**
* struct cxl_event_state - Event log driver state
@@ -890,6 +892,8 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt);
+bool cxl_dcd_supported(struct cxl_memdev_state *mds);
+void cxl_disable_dcd(struct cxl_memdev_state *mds);
int cxl_set_timestamp(struct cxl_memdev_state *mds);
int cxl_poison_state_init(struct cxl_memdev_state *mds);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 12cd5d399230..ef482eae09e9 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -669,22 +669,33 @@ static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
}
static int cxl_event_config_msgnums(struct cxl_memdev_state *mds,
- struct cxl_event_interrupt_policy *policy)
+ struct cxl_event_interrupt_policy *policy,
+ bool native_cxl)
{
struct cxl_mbox_cmd mbox_cmd;
+ size_t size_in;
int rc;
- *policy = (struct cxl_event_interrupt_policy) {
- .info_settings = CXL_INT_MSI_MSIX,
- .warn_settings = CXL_INT_MSI_MSIX,
- .failure_settings = CXL_INT_MSI_MSIX,
- .fatal_settings = CXL_INT_MSI_MSIX,
- };
+ if (native_cxl) {
+ *policy = (struct cxl_event_interrupt_policy) {
+ .info_settings = CXL_INT_MSI_MSIX,
+ .warn_settings = CXL_INT_MSI_MSIX,
+ .failure_settings = CXL_INT_MSI_MSIX,
+ .fatal_settings = CXL_INT_MSI_MSIX,
+ .dcd_settings = 0,
+ };
+ }
+ size_in = CXL_EVENT_INT_POLICY_BASE_SIZE;
+
+ if (cxl_dcd_supported(mds)) {
+ policy->dcd_settings = CXL_INT_MSI_MSIX;
+ size_in += sizeof(policy->dcd_settings);
+ }
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_SET_EVT_INT_POLICY,
.payload_in = policy,
- .size_in = sizeof(*policy),
+ .size_in = size_in,
};
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
@@ -731,6 +742,31 @@ static int cxl_event_irqsetup(struct cxl_memdev_state *mds,
return 0;
}
+static int cxl_irqsetup(struct cxl_memdev_state *mds,
+ struct cxl_event_interrupt_policy *policy,
+ bool native_cxl)
+{
+ struct cxl_dev_state *cxlds = &mds->cxlds;
+ int rc;
+
+ if (native_cxl) {
+ rc = cxl_event_irqsetup(mds, policy);
+ if (rc)
+ return rc;
+ }
+
+ if (cxl_dcd_supported(mds)) {
+ rc = cxl_event_req_irq(cxlds, policy->dcd_settings);
+ if (rc) {
+ dev_err(cxlds->dev, "Failed to get interrupt for DCD event log\n");
+ cxl_disable_dcd(mds);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
static bool cxl_event_int_is_fw(u8 setting)
{
u8 mode = FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting);
@@ -757,17 +793,25 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
struct cxl_memdev_state *mds, bool irq_avail)
{
struct cxl_event_interrupt_policy policy = { 0 };
+ bool native_cxl = host_bridge->native_cxl_error;
int rc;
/*
* When BIOS maintains CXL error reporting control, it will process
* event records. Only one agent can do so.
+ *
+ * If BIOS has control of events and DCD is not supported skip event
+ * configuration.
*/
- if (!host_bridge->native_cxl_error)
+ if (!native_cxl && !cxl_dcd_supported(mds))
return 0;
if (!irq_avail) {
dev_info(mds->cxlds.dev, "No interrupt support, disable event processing.\n");
+ if (cxl_dcd_supported(mds)) {
+ dev_info(mds->cxlds.dev, "DCD requires interrupts, disable DCD\n");
+ cxl_disable_dcd(mds);
+ }
return 0;
}
@@ -775,10 +819,10 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
if (rc)
return rc;
- if (!cxl_event_validate_mem_policy(mds, &policy))
+ if (native_cxl && !cxl_event_validate_mem_policy(mds, &policy))
return -EBUSY;
- rc = cxl_event_config_msgnums(mds, &policy);
+ rc = cxl_event_config_msgnums(mds, &policy, native_cxl);
if (rc)
return rc;
@@ -786,12 +830,15 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
if (rc)
return rc;
- rc = cxl_event_irqsetup(mds, &policy);
+ rc = cxl_irqsetup(mds, &policy, native_cxl);
if (rc)
return rc;
cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
+ dev_dbg(mds->cxlds.dev, "Event config : %d %d\n",
+ native_cxl, cxl_dcd_supported(mds));
+
return 0;
}
--
2.44.0
Powered by blists - more mailing lists