lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 17 Jan 2018 03:21:11 -0500
From:   Michael Chan <michael.chan@...adcom.com>
To:     davem@...emloft.net
Cc:     netdev@...r.kernel.org
Subject: [PATCH net-next 09/14] bnxt_en: Implement new method for the PF to assign SRIOV resources.

Instead of the old method of evenly dividing the resources to the VFs,
use the new firmware API to specify min and max resources for each VF.
This way, there is more flexibility for each VF to allocate more or less
resources.

The min is the absolute minimum for each VF to function.  The max is the
global resources minus the resources used by the PF.  Each VF is
guaranteed the min.  Up to max resources may be available for some VFs.

The PF driver can use one of 2 strategies specified in NVRAM to assign
the resources.  The old legacy strategy of evenly dividing the resources
or the new flexible strategy.

Signed-off-by: Michael Chan <michael.chan@...adcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c       |   8 ++
 drivers/net/ethernet/broadcom/bnxt/bnxt.h       |   3 +
 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 121 +++++++++++++++++++++++-
 3 files changed, 127 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8b74e6e..de79c90 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5018,6 +5018,14 @@ static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
 	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
 
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		pf->vf_resv_strategy =
+			le16_to_cpu(resp->vf_reservation_strategy);
+		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
+			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
+	}
 hwrm_func_resc_qcaps_exit:
 	mutex_unlock(&bp->hwrm_cmd_lock);
 	return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 9d12985..920c19b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -836,6 +836,9 @@ struct bnxt_pf_info {
 	u32	max_rx_wm_flows;
 	unsigned long	*vf_event_bmap;
 	u16	hwrm_cmd_req_pages;
+	u8	vf_resv_strategy;
+#define BNXT_VF_RESV_STRATEGY_MAXIMAL	0
+#define BNXT_VF_RESV_STRATEGY_MINIMAL	1
 	void			*hwrm_cmd_req_addr[4];
 	dma_addr_t		hwrm_cmd_req_dma_addr[4];
 	struct bnxt_vf_info	*vf;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index b369286..508e917 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -416,7 +416,100 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 }
 
-/* only call by PF to reserve resources for VF */
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
+static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
+{
+	struct hwrm_func_vf_resource_cfg_input req = {0};
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
+	u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
+	struct bnxt_pf_info *pf = &bp->pf;
+	int i, rc = 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
+
+	vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+	vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
+	else
+		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
+	vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
+	vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
+	vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
+	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+
+	req.min_rsscos_ctx = cpu_to_le16(1);
+	req.max_rsscos_ctx = cpu_to_le16(1);
+	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
+		req.min_cmpl_rings = cpu_to_le16(1);
+		req.min_tx_rings = cpu_to_le16(1);
+		req.min_rx_rings = cpu_to_le16(1);
+		req.min_l2_ctxs = cpu_to_le16(1);
+		req.min_vnics = cpu_to_le16(1);
+		req.min_stat_ctx = cpu_to_le16(1);
+		req.min_hw_ring_grps = cpu_to_le16(1);
+	} else {
+		vf_cp_rings /= num_vfs;
+		vf_tx_rings /= num_vfs;
+		vf_rx_rings /= num_vfs;
+		vf_vnics /= num_vfs;
+		vf_stat_ctx /= num_vfs;
+		vf_ring_grps /= num_vfs;
+
+		req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
+		req.min_tx_rings = cpu_to_le16(vf_tx_rings);
+		req.min_rx_rings = cpu_to_le16(vf_rx_rings);
+		req.min_l2_ctxs = cpu_to_le16(4);
+		req.min_vnics = cpu_to_le16(vf_vnics);
+		req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
+		req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+	}
+	req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
+	req.max_tx_rings = cpu_to_le16(vf_tx_rings);
+	req.max_rx_rings = cpu_to_le16(vf_rx_rings);
+	req.max_l2_ctxs = cpu_to_le16(4);
+	req.max_vnics = cpu_to_le16(vf_vnics);
+	req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
+	req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < num_vfs; i++) {
+		req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc) {
+			rc = -ENOMEM;
+			break;
+		}
+		pf->active_vfs = i + 1;
+		pf->vf[i].fw_fid = pf->first_vf_id + i;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	if (pf->active_vfs) {
+		u16 n = 1;
+
+		if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
+			n = pf->active_vfs;
+
+		hw_resc->max_tx_rings -= vf_tx_rings * n;
+		hw_resc->max_rx_rings -= vf_rx_rings * n;
+		hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
+		hw_resc->max_cp_rings -= vf_cp_rings * n;
+		hw_resc->max_rsscos_ctxs -= pf->active_vfs;
+		hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
+		hw_resc->max_vnics -= vf_vnics * n;
+
+		rc = pf->active_vfs;
+	}
+	return rc;
+}
+
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 {
 	u32 rc = 0, mtu, i;
@@ -489,7 +582,9 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 		total_vf_tx_rings += vf_tx_rsvd;
 	}
 	mutex_unlock(&bp->hwrm_cmd_lock);
-	if (!rc) {
+	if (rc)
+		rc = -ENOMEM;
+	if (pf->active_vfs) {
 		hw_resc->max_tx_rings -= total_vf_tx_rings;
 		hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
 		hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
@@ -497,10 +592,19 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 		hw_resc->max_rsscos_ctxs -= num_vfs;
 		hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
 		hw_resc->max_vnics -= vf_vnics * num_vfs;
+		rc = pf->active_vfs;
 	}
 	return rc;
 }
 
+static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
+{
+	if (bp->flags & BNXT_FLAG_NEW_RM)
+		return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
+	else
+		return bnxt_hwrm_func_cfg(bp, num_vfs);
+}
+
 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 {
 	int rc = 0, vfs_supported;
@@ -567,9 +671,16 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 		goto err_out1;
 
 	/* Reserve resources for VFs */
-	rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
-	if (rc)
-		goto err_out2;
+	rc = bnxt_func_cfg(bp, *num_vfs);
+	if (rc != *num_vfs) {
+		if (rc <= 0) {
+			netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
+			*num_vfs = 0;
+			goto err_out2;
+		}
+		netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
+		*num_vfs = rc;
+	}
 
 	/* Register buffers for VFs */
 	rc = bnxt_hwrm_func_buf_rgtr(bp);
-- 
1.8.3.1

Powered by blists - more mailing lists