lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1490703176-429-4-git-send-email-Yuval.Mintz@cavium.com>
Date:   Tue, 28 Mar 2017 15:12:52 +0300
From:   Yuval Mintz <Yuval.Mintz@...ium.com>
To:     <davem@...emloft.net>, <netdev@...r.kernel.org>
CC:     Tomer Tayar <Tomer.Tayar@...ium.com>,
        Yuval Mintz <Yuval.Mintz@...ium.com>
Subject: [PATCH net-next 3/7] qed: Move to new load request scheme

From: Tomer Tayar <Tomer.Tayar@...ium.com>

Management firmware is used as an arbiter between the various PFs
in regard to loading - it causes the various PFs to load/unload
sequentially and informs each of its appropriate rule in the init.

But the existing flow is too weak to handle some scenarios where
PFs aren't properly cleaned prior to loading.
The significant scenarios falling under this criteria:
  a. Preboot drivers in some environment can't properly unload.
  b. Unexpected driver replacement [kdump, PDA].

Modern management firmware supports a more intricate loading flow,
where the driver has the ability to overcome previous limitations.
This moves qed into using this newer scheme.

Notice new scheme is backward compatible, so new drivers would
still be able to load properly on top of older management firmwares
and vice versa.

Signed-off-by: Tomer Tayar <Tomer.Tayar@...ium.com>
Signed-off-by: Yuval Mintz <Yuval.Mintz@...ium.com>
---
 drivers/net/ethernet/qlogic/qed/qed.h         |  29 ++-
 drivers/net/ethernet/qlogic/qed/qed_dcbx.h    |   3 -
 drivers/net/ethernet/qlogic/qed/qed_dev.c     |  35 ++-
 drivers/net/ethernet/qlogic/qed/qed_dev_api.h |  32 +++
 drivers/net/ethernet/qlogic/qed/qed_hsi.h     |  50 +++-
 drivers/net/ethernet/qlogic/qed/qed_main.c    |   9 +
 drivers/net/ethernet/qlogic/qed/qed_mcp.c     | 356 ++++++++++++++++++++++++--
 drivers/net/ethernet/qlogic/qed/qed_mcp.h     |  41 +--
 8 files changed, 497 insertions(+), 58 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 90a1988..187a84b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -51,7 +51,19 @@
 #include "qed_hsi.h"
 
 extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.10.10.21"
+
+#define QED_MAJOR_VERSION               8
+#define QED_MINOR_VERSION               10
+#define QED_REVISION_VERSION            10
+#define QED_ENGINEERING_VERSION 21
+
+#define QED_VERSION						 \
+	((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
+	 (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION				       \
+	((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+	 (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
 
 #define MAX_HWFNS_PER_DEVICE    (4)
 #define NAME_SIZE 16
@@ -76,6 +88,15 @@ enum qed_coalescing_mode {
 enum qed_mcp_protocol_type;
 
 /* helpers */
+#define QED_MFW_GET_FIELD(name, field) \
+	(((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define QED_MFW_SET_FIELD(name, field, value)				       \
+	do {								       \
+		(name)	&= ~((field ## _MASK) << (field ## _SHIFT));	       \
+		(name)	|= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
+	} while (0)
+
 static inline u32 qed_db_addr(u32 cid, u32 DEMS)
 {
 	u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
@@ -355,6 +376,12 @@ struct qed_fw_data {
 	u32			init_ops_size;
 };
 
+#define DRV_MODULE_VERSION		      \
+	__stringify(QED_MAJOR_VERSION) "."    \
+	__stringify(QED_MINOR_VERSION) "."    \
+	__stringify(QED_REVISION_VERSION) "." \
+	__stringify(QED_ENGINEERING_VERSION)
+
 struct qed_simd_fp_handler {
 	void	*token;
 	void	(*func)(void *);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index 0fabe97..2eb988f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata {
 	enum qed_pci_personality personality;
 };
 
-#define QED_MFW_GET_FIELD(name, field) \
-	(((name) & (field ## _MASK)) >> (field ## _SHIFT))
-
 struct qed_dcbx_info {
 	struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
 	struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 1092998..759b819 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1106,8 +1106,22 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
 	       p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
 }
 
+static void
+qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
+			 struct qed_drv_load_params *p_drv_load)
+{
+	memset(p_load_req, 0, sizeof(*p_load_req));
+
+	p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+			       QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
+	p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+	p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+	p_load_req->override_force_load = p_drv_load->override_force_load;
+}
+
 int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
 {
+	struct qed_load_req_params load_req_params;
 	u32 load_code, param, drv_mb_param;
 	bool b_default_mtu = true;
 	struct qed_hwfn *p_hwfn;
@@ -1145,17 +1159,21 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
 		if (rc)
 			return rc;
 
-		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
+		qed_fill_load_req_params(&load_req_params,
+					 p_params->p_drv_load_params);
+		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+				      &load_req_params);
 		if (rc) {
-			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+			DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n");
 			return rc;
 		}
 
-		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
-
+		load_code = load_req_params.load_code;
 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
-			   "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
-			   rc, load_code);
+			   "Load request was sent. Load code: 0x%x\n",
+			   load_code);
+
+		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
 
 		p_hwfn->first_on_engine = (load_code ==
 					   FW_MSG_CODE_DRV_LOAD_ENGINE);
@@ -1224,10 +1242,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
 
 	if (IS_PF(cdev)) {
 		p_hwfn = QED_LEADING_HWFN(cdev);
-		drv_mb_param = (FW_MAJOR_VERSION << 24) |
-			       (FW_MINOR_VERSION << 16) |
-			       (FW_REVISION_VERSION << 8) |
-			       (FW_ENGINEERING_VERSION);
+		drv_mb_param = STORM_FW_VERSION;
 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
 				 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
 				 drv_mb_param, &load_code, &param);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 8f45f46..cb97349 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -82,6 +82,35 @@ void qed_init_dp(struct qed_dev *cdev,
  */
 void qed_resc_setup(struct qed_dev *cdev);
 
+enum qed_override_force_load {
+	QED_OVERRIDE_FORCE_LOAD_NONE,
+	QED_OVERRIDE_FORCE_LOAD_ALWAYS,
+	QED_OVERRIDE_FORCE_LOAD_NEVER,
+};
+
+struct qed_drv_load_params {
+	/* Indicates whether the driver is running over a crash kernel.
+	 * As part of the load request, this will be used for providing the
+	 * driver role to the MFW.
+	 * In case of a crash kernel over PDA - this should be set to false.
+	 */
+	bool is_crash_kernel;
+
+	/* The timeout value that the MFW should use when locking the engine for
+	 * the driver load process.
+	 * A value of '0' means the default value, and '255' means no timeout.
+	 */
+	u8 mfw_timeout_val;
+#define QED_LOAD_REQ_LOCK_TO_DEFAULT    0
+#define QED_LOAD_REQ_LOCK_TO_NONE       255
+
+	/* Avoid engine reset when first PF loads on it */
+	bool avoid_eng_reset;
+
+	/* Allow overriding the default force load behavior */
+	enum qed_override_force_load override_force_load;
+};
+
 struct qed_hw_init_params {
 	/* Tunneling parameters */
 	struct qed_tunn_start_params *p_tunn;
@@ -96,6 +125,9 @@ struct qed_hw_init_params {
 
 	/* Binary fw data pointer in binary fw file */
 	const u8 *bin_fw_data;
+
+	/* Driver load parameters */
+	struct qed_drv_load_params *p_drv_load_params;
 };
 
 /**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index e9acdc9..bf2b201 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -9887,9 +9887,11 @@ struct public_func {
 #define DRV_ID_PDA_COMP_VER_MASK	0x0000ffff
 #define DRV_ID_PDA_COMP_VER_SHIFT	0
 
+#define LOAD_REQ_HSI_VERSION		2
 #define DRV_ID_MCP_HSI_VER_MASK		0x00ff0000
 #define DRV_ID_MCP_HSI_VER_SHIFT	16
-#define DRV_ID_MCP_HSI_VER_CURRENT	(1 << DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_CURRENT	(LOAD_REQ_HSI_VERSION << \
+					 DRV_ID_MCP_HSI_VER_SHIFT)
 
 #define DRV_ID_DRV_TYPE_MASK		0x7f000000
 #define DRV_ID_DRV_TYPE_SHIFT		24
@@ -10001,6 +10003,46 @@ struct resource_info {
 #define RESOURCE_ELEMENT_STRICT (1 << 0)
 };
 
+#define DRV_ROLE_NONE           0
+#define DRV_ROLE_PREBOOT        1
+#define DRV_ROLE_OS             2
+#define DRV_ROLE_KDUMP          3
+
+struct load_req_stc {
+	u32 drv_ver_0;
+	u32 drv_ver_1;
+	u32 fw_ver;
+	u32 misc0;
+#define LOAD_REQ_ROLE_MASK              0x000000FF
+#define LOAD_REQ_ROLE_SHIFT             0
+#define LOAD_REQ_LOCK_TO_MASK           0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT          8
+#define LOAD_REQ_LOCK_TO_DEFAULT        0
+#define LOAD_REQ_LOCK_TO_NONE           255
+#define LOAD_REQ_FORCE_MASK             0x000F0000
+#define LOAD_REQ_FORCE_SHIFT            16
+#define LOAD_REQ_FORCE_NONE             0
+#define LOAD_REQ_FORCE_PF               1
+#define LOAD_REQ_FORCE_ALL              2
+#define LOAD_REQ_FLAGS0_MASK            0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT           20
+#define LOAD_REQ_FLAGS0_AVOID_RESET     (0x1 << 0)
+};
+
+struct load_rsp_stc {
+	u32 drv_ver_0;
+	u32 drv_ver_1;
+	u32 fw_ver;
+	u32 misc0;
+#define LOAD_RSP_ROLE_MASK              0x000000FF
+#define LOAD_RSP_ROLE_SHIFT             0
+#define LOAD_RSP_HSI_MASK               0x0000FF00
+#define LOAD_RSP_HSI_SHIFT              8
+#define LOAD_RSP_FLAGS0_MASK            0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT           16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS      (0x1 << 0)
+};
+
 union drv_union_data {
 	u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
 	struct mcp_mac wol_mac;
@@ -10032,6 +10074,7 @@ struct public_drv_mb {
 #define DRV_MSG_CODE_LOAD_REQ			0x10000000
 #define DRV_MSG_CODE_LOAD_DONE			0x11000000
 #define DRV_MSG_CODE_INIT_HW			0x12000000
+#define DRV_MSG_CODE_CANCEL_LOAD_REQ            0x13000000
 #define DRV_MSG_CODE_UNLOAD_REQ			0x20000000
 #define DRV_MSG_CODE_UNLOAD_DONE		0x21000000
 #define DRV_MSG_CODE_INIT_PHY			0x22000000
@@ -10167,8 +10210,11 @@ struct public_drv_mb {
 #define FW_MSG_CODE_DRV_LOAD_PORT		0x10110000
 #define FW_MSG_CODE_DRV_LOAD_FUNCTION		0x10120000
 #define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA	0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI	0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1	0x10210000
 #define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG	0x10220000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI        0x10230000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT     0x10310000
 #define FW_MSG_CODE_DRV_LOAD_DONE		0x11100000
 #define FW_MSG_CODE_DRV_UNLOAD_ENGINE		0x20110000
 #define FW_MSG_CODE_DRV_UNLOAD_PORT		0x20120000
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index ae0ab3b..d4edb99 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -45,6 +45,7 @@
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
 #include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
 #include <linux/qed/qed_if.h>
 #include <linux/qed/qed_ll2_if.h>
 
@@ -901,6 +902,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
 static int qed_slowpath_start(struct qed_dev *cdev,
 			      struct qed_slowpath_params *params)
 {
+	struct qed_drv_load_params drv_load_params;
 	struct qed_hw_init_params hw_init_params;
 	struct qed_tunn_start_params tunn_info;
 	struct qed_mcp_drv_version drv_version;
@@ -974,6 +976,13 @@ static int qed_slowpath_start(struct qed_dev *cdev,
 	hw_init_params.allow_npar_tx_switch = true;
 	hw_init_params.bin_fw_data = data;
 
+	memset(&drv_load_params, 0, sizeof(drv_load_params));
+	drv_load_params.is_crash_kernel = is_kdump_kernel();
+	drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
+	drv_load_params.avoid_eng_reset = false;
+	drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
+	hw_init_params.p_drv_load_params = &drv_load_params;
+
 	rc = qed_hw_init(cdev, &hw_init_params);
 	if (rc)
 		goto err2;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index f2e5ab9..1a820b4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -598,48 +598,352 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
 	return 0;
 }
 
-int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
-		     struct qed_ptt *p_ptt, u32 *p_load_code)
+static bool
+qed_mcp_can_force_load(u8 drv_role,
+		       u8 exist_drv_role,
+		       enum qed_override_force_load override_force_load)
+{
+	bool can_force_load = false;
+
+	switch (override_force_load) {
+	case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
+		can_force_load = true;
+		break;
+	case QED_OVERRIDE_FORCE_LOAD_NEVER:
+		can_force_load = false;
+		break;
+	default:
+		can_force_load = (drv_role == DRV_ROLE_OS &&
+				  exist_drv_role == DRV_ROLE_PREBOOT) ||
+				 (drv_role == DRV_ROLE_KDUMP &&
+				  exist_drv_role == DRV_ROLE_OS);
+		break;
+	}
+
+	return can_force_load;
+}
+
+static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt)
+{
+	u32 resp = 0, param = 0;
+	int rc;
+
+	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+			 &resp, &param);
+	if (rc)
+		DP_NOTICE(p_hwfn,
+			  "Failed to send cancel load request, rc = %d\n", rc);
+
+	return rc;
+}
+
+#define CONFIG_QEDE_BITMAP_IDX		BIT(0)
+#define CONFIG_QED_SRIOV_BITMAP_IDX	BIT(1)
+#define CONFIG_QEDR_BITMAP_IDX		BIT(2)
+#define CONFIG_QEDF_BITMAP_IDX		BIT(4)
+#define CONFIG_QEDI_BITMAP_IDX		BIT(5)
+#define CONFIG_QED_LL2_BITMAP_IDX	BIT(6)
+
+static u32 qed_get_config_bitmap(void)
+{
+	u32 config_bitmap = 0x0;
+
+	if (IS_ENABLED(CONFIG_QEDE))
+		config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
+
+	if (IS_ENABLED(CONFIG_QED_SRIOV))
+		config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
+
+	if (IS_ENABLED(CONFIG_QED_RDMA))
+		config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
+
+	if (IS_ENABLED(CONFIG_QED_FCOE))
+		config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
+
+	if (IS_ENABLED(CONFIG_QED_ISCSI))
+		config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
+
+	if (IS_ENABLED(CONFIG_QED_LL2))
+		config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
+
+	return config_bitmap;
+}
+
+struct qed_load_req_in_params {
+	u8 hsi_ver;
+#define QED_LOAD_REQ_HSI_VER_DEFAULT	0
+#define QED_LOAD_REQ_HSI_VER_1		1
+	u32 drv_ver_0;
+	u32 drv_ver_1;
+	u32 fw_ver;
+	u8 drv_role;
+	u8 timeout_val;
+	u8 force_cmd;
+	bool avoid_eng_reset;
+};
+
+struct qed_load_req_out_params {
+	u32 load_code;
+	u32 exist_drv_ver_0;
+	u32 exist_drv_ver_1;
+	u32 exist_fw_ver;
+	u8 exist_drv_role;
+	u8 mfw_hsi_ver;
+	bool drv_exists;
+};
+
+static int
+__qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+		   struct qed_ptt *p_ptt,
+		   struct qed_load_req_in_params *p_in_params,
+		   struct qed_load_req_out_params *p_out_params)
 {
-	struct qed_dev *cdev = p_hwfn->cdev;
 	struct qed_mcp_mb_params mb_params;
-	union drv_union_data union_data;
+	struct load_req_stc load_req;
+	struct load_rsp_stc load_rsp;
+	u32 hsi_ver;
 	int rc;
 
+	memset(&load_req, 0, sizeof(load_req));
+	load_req.drv_ver_0 = p_in_params->drv_ver_0;
+	load_req.drv_ver_1 = p_in_params->drv_ver_1;
+	load_req.fw_ver = p_in_params->fw_ver;
+	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+			  p_in_params->timeout_val);
+	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
+			  p_in_params->force_cmd);
+	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+			  p_in_params->avoid_eng_reset);
+
+	hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
+		  DRV_ID_MCP_HSI_VER_CURRENT :
+		  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+
 	memset(&mb_params, 0, sizeof(mb_params));
-	/* Load Request */
 	mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
-	mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
-			  cdev->drv_type;
-	memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
-	mb_params.p_data_src = &union_data;
-	mb_params.data_src_size = sizeof(union_data.ver_str);
-	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+	mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
+	mb_params.p_data_src = &load_req;
+	mb_params.data_src_size = sizeof(load_req);
+	mb_params.p_data_dst = &load_rsp;
+	mb_params.data_dst_size = sizeof(load_rsp);
 
-	/* if mcp fails to respond we must abort */
+	DP_VERBOSE(p_hwfn, QED_MSG_SP,
+		   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+		   mb_params.param,
+		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
+		DP_VERBOSE(p_hwfn, QED_MSG_SP,
+			   "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+			   load_req.drv_ver_0,
+			   load_req.drv_ver_1,
+			   load_req.fw_ver,
+			   load_req.misc0,
+			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+			   QED_MFW_GET_FIELD(load_req.misc0,
+					     LOAD_REQ_LOCK_TO),
+			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
+	}
+
+	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
 	if (rc) {
-		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+		DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
 		return rc;
 	}
 
-	*p_load_code = mb_params.mcp_resp;
+	DP_VERBOSE(p_hwfn, QED_MSG_SP,
+		   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+	p_out_params->load_code = mb_params.mcp_resp;
 
-	/* If MFW refused (e.g. other port is in diagnostic mode) we
-	 * must abort. This can happen in the following cases:
-	 * - Other port is in diagnostic mode
-	 * - Previously loaded function on the engine is not compliant with
-	 *   the requester.
-	 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
-	 *      -
+	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+	    p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_SP,
+			   "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+			   load_rsp.drv_ver_0,
+			   load_rsp.drv_ver_1,
+			   load_rsp.fw_ver,
+			   load_rsp.misc0,
+			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
+
+		p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+		p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+		p_out_params->exist_fw_ver = load_rsp.fw_ver;
+		p_out_params->exist_drv_role =
+		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+		p_out_params->mfw_hsi_ver =
+		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+		p_out_params->drv_exists =
+		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+		    LOAD_RSP_FLAGS0_DRV_EXISTS;
+	}
+
+	return 0;
+}
+
+static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
+				  enum qed_drv_role drv_role,
+				  u8 *p_mfw_drv_role)
+{
+	switch (drv_role) {
+	case QED_DRV_ROLE_OS:
+		*p_mfw_drv_role = DRV_ROLE_OS;
+		break;
+	case QED_DRV_ROLE_KDUMP:
+		*p_mfw_drv_role = DRV_ROLE_KDUMP;
+		break;
+	default:
+		DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+enum qed_load_req_force {
+	QED_LOAD_REQ_FORCE_NONE,
+	QED_LOAD_REQ_FORCE_PF,
+	QED_LOAD_REQ_FORCE_ALL,
+};
+
+static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
+
+				  enum qed_load_req_force force_cmd,
+				  u8 *p_mfw_force_cmd)
+{
+	switch (force_cmd) {
+	case QED_LOAD_REQ_FORCE_NONE:
+		*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+		break;
+	case QED_LOAD_REQ_FORCE_PF:
+		*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+		break;
+	case QED_LOAD_REQ_FORCE_ALL:
+		*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+		break;
+	}
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt,
+		     struct qed_load_req_params *p_params)
+{
+	struct qed_load_req_out_params out_params;
+	struct qed_load_req_in_params in_params;
+	u8 mfw_drv_role, mfw_force_cmd;
+	int rc;
+
+	memset(&in_params, 0, sizeof(in_params));
+	in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
+	in_params.drv_ver_0 = QED_VERSION;
+	in_params.drv_ver_1 = qed_get_config_bitmap();
+	in_params.fw_ver = STORM_FW_VERSION;
+	rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+	if (rc)
+		return rc;
+
+	in_params.drv_role = mfw_drv_role;
+	in_params.timeout_val = p_params->timeout_val;
+	qed_get_mfw_force_cmd(p_hwfn,
+			      QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
+
+	in_params.force_cmd = mfw_force_cmd;
+	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
+
+	memset(&out_params, 0, sizeof(out_params));
+	rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+	if (rc)
+		return rc;
+
+	/* First handle cases where another load request should/might be sent:
+	 * - MFW expects the old interface [HSI version = 1]
+	 * - MFW responds that a force load request is required
 	 */
-	if (!(*p_load_code) ||
-	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
-	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
-	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
-		DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+	if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+		DP_INFO(p_hwfn,
+			"MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
+
+		in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
+		memset(&out_params, 0, sizeof(out_params));
+		rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+		if (rc)
+			return rc;
+	} else if (out_params.load_code ==
+		   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+		if (qed_mcp_can_force_load(in_params.drv_role,
+					   out_params.exist_drv_role,
+					   p_params->override_force_load)) {
+			DP_INFO(p_hwfn,
+				"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
+				in_params.drv_role, in_params.fw_ver,
+				in_params.drv_ver_0, in_params.drv_ver_1,
+				out_params.exist_drv_role,
+				out_params.exist_fw_ver,
+				out_params.exist_drv_ver_0,
+				out_params.exist_drv_ver_1);
+
+			qed_get_mfw_force_cmd(p_hwfn,
+					      QED_LOAD_REQ_FORCE_ALL,
+					      &mfw_force_cmd);
+
+			in_params.force_cmd = mfw_force_cmd;
+			memset(&out_params, 0, sizeof(out_params));
+			rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
+						&out_params);
+			if (rc)
+				return rc;
+		} else {
+			DP_NOTICE(p_hwfn,
+				  "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
+				  in_params.drv_role, in_params.fw_ver,
+				  in_params.drv_ver_0, in_params.drv_ver_1,
+				  out_params.exist_drv_role,
+				  out_params.exist_fw_ver,
+				  out_params.exist_drv_ver_0,
+				  out_params.exist_drv_ver_1);
+			DP_NOTICE(p_hwfn,
+				  "Avoid sending a force load request to prevent disruption of active PFs\n");
+
+			qed_mcp_cancel_load_req(p_hwfn, p_ptt);
+			return -EBUSY;
+		}
+	}
+
+	/* Now handle the other types of responses.
+	 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+	 * expected here after the additional revised load requests were sent.
+	 */
+	switch (out_params.load_code) {
+	case FW_MSG_CODE_DRV_LOAD_ENGINE:
+	case FW_MSG_CODE_DRV_LOAD_PORT:
+	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+		if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+		    out_params.drv_exists) {
+			/* The role and fw/driver version match, but the PF is
+			 * already loaded and has not been unloaded gracefully.
+			 */
+			DP_NOTICE(p_hwfn,
+				  "PF is already loaded\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		DP_NOTICE(p_hwfn,
+			  "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
+			  out_params.load_code);
 		return -EBUSY;
 	}
 
+	p_params->load_code = out_params.load_code;
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 7d067d0..f76afec 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -39,6 +39,7 @@
 #include <linux/spinlock.h>
 #include <linux/qed/qed_fcoe_if.h>
 #include "qed_hsi.h"
+#include "qed_dev_api.h"
 
 struct qed_mcp_link_speed_params {
 	bool    autoneg;
@@ -570,27 +571,35 @@ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
 			  struct qed_ptt *p_ptt);
 
+enum qed_drv_role {
+	QED_DRV_ROLE_OS,
+	QED_DRV_ROLE_KDUMP,
+};
+
+struct qed_load_req_params {
+	/* Input params */
+	enum qed_drv_role drv_role;
+	u8 timeout_val;
+	bool avoid_eng_reset;
+	enum qed_override_force_load override_force_load;
+
+	/* Output params */
+	u32 load_code;
+};
+
 /**
- * @brief Sends a LOAD_REQ to the MFW, and in case operation
- *        succeed, returns whether this PF is the first on the
- *        chip/engine/port or function. This function should be
- *        called when driver is ready to accept MFW events after
- *        Storms initializations are done.
+ * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+ *        returns whether this PF is the first on the engine/port or function.
  *
- * @param p_hwfn       - hw function
- * @param p_ptt        - PTT required for register access
- * @param p_load_code  - The MCP response param containing one
- *      of the following:
- *      FW_MSG_CODE_DRV_LOAD_ENGINE
- *      FW_MSG_CODE_DRV_LOAD_PORT
- *      FW_MSG_CODE_DRV_LOAD_FUNCTION
- * @return int -
- *      0 - Operation was successul.
- *      -EBUSY - Operation failed
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - Operation was successful.
  */
 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
 		     struct qed_ptt *p_ptt,
-		     u32 *p_load_code);
+		     struct qed_load_req_params *p_params);
 
 /**
  * @brief Sends a UNLOAD_REQ message to the MFW
-- 
1.9.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ