[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240528043813.1342483-2-michal.swiatkowski@linux.intel.com>
Date: Tue, 28 May 2024 06:37:59 +0200
From: Michal Swiatkowski <michal.swiatkowski@...ux.intel.com>
To: intel-wired-lan@...ts.osuosl.org
Cc: netdev@...r.kernel.org,
jacob.e.keller@...el.com,
michal.kubiak@...el.com,
maciej.fijalkowski@...el.com,
sridhar.samudrala@...el.com,
przemyslaw.kitszel@...el.com,
wojciech.drewek@...el.com,
pio.raczynski@...il.com,
jiri@...dia.com,
mateusz.polchlopek@...el.com,
shayd@...dia.com,
kalesh-anakkur.purayil@...adcom.com
Subject: [iwl-next v3 01/15] ice: add new VSI type for subfunctions
From: Piotr Raczynski <piotr.raczynski@...el.com>
Add required plumbing for new VSI type dedicated to devlink subfunctions.
Make sure that the vsi is properly configured and destroyed. Also allow
loading XDP and AF_XDP sockets.
The first implementation of devlink subfunctions supports only one Tx/Rx
queue pair per given subfunction.
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@...el.com>
Reviewed-by: Wojciech Drewek <wojciech.drewek@...el.com>
Signed-off-by: Piotr Raczynski <piotr.raczynski@...el.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@...ux.intel.com>
---
drivers/net/ethernet/intel/ice/ice_base.c | 5 +++-
drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 1 +
drivers/net/ethernet/intel/ice/ice_lib.c | 25 ++++++++++++++++++--
drivers/net/ethernet/intel/ice/ice_main.c | 7 ++++--
drivers/net/ethernet/intel/ice/ice_type.h | 1 +
drivers/net/ethernet/intel/ice/ice_xsk.c | 2 +-
6 files changed, 35 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 5d396c1a7731..34beee29d205 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -330,6 +330,9 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
+ case ICE_VSI_SF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ break;
default:
return;
}
@@ -526,7 +529,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len;
- if (ring->vsi->type == ICE_VSI_PF) {
+ if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a94e7072b570..a7c510832824 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -187,6 +187,7 @@ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
break;
case ICE_VSI_CHNL:
+ case ICE_VSI_SF:
vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
vsi->tc_cfg.numtc = 1;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index f559e60992fa..be1e65bafe13 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -20,6 +20,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_PF";
case ICE_VSI_VF:
return "ICE_VSI_VF";
+ case ICE_VSI_SF:
+ return "ICE_VSI_SF";
case ICE_VSI_CTRL:
return "ICE_VSI_CTRL";
case ICE_VSI_CHNL:
@@ -135,6 +137,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SF:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -201,6 +204,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
+ case ICE_VSI_SF:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
+ vsi->num_q_vectors = 1;
+ vsi->irq_dyn_alloc = true;
+ break;
case ICE_VSI_VF:
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
@@ -558,6 +567,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SF:
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
@@ -886,6 +896,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
+ case ICE_VSI_SF:
+ vsi->rss_table_size = ICE_LUT_VSI_SIZE;
+ vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+ vsi->rss_lut_type = ICE_LUT_VSI;
+ break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
@@ -1133,6 +1148,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
break;
case ICE_VSI_VF:
+ case ICE_VSI_SF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
break;
@@ -1211,6 +1227,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_SF:
case ICE_VSI_CHNL:
ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
break;
@@ -2092,6 +2109,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
+ case ICE_VSI_SF:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2261,6 +2279,7 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SF:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2647,7 +2666,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
@@ -2676,7 +2696,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 7d9a4e856f61..d05a875b81fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2966,6 +2966,9 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
if (avail < cpus / 2)
return -ENOMEM;
+ if (vsi->type == ICE_VSI_SF)
+ avail = vsi->alloc_txq;
+
vsi->num_xdp_txq = min_t(u16, avail, cpus);
if (vsi->num_xdp_txq < cpus)
@@ -3082,8 +3085,8 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_vsi *vsi = np->vsi;
- if (vsi->type != ICE_VSI_PF) {
- NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index aac59c85a911..ec368694c86f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -150,6 +150,7 @@ enum ice_vsi_type {
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
+ ICE_VSI_SF = 9,
};
struct ice_link_status {
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index a65955eb23c0..6247a3fec9de 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -287,7 +287,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
int err;
- if (vsi->type != ICE_VSI_PF)
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF)
return -EINVAL;
if (qid >= vsi->netdev->real_num_rx_queues ||
--
2.42.0
Powered by blists - more mailing lists