[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251013231341.1139603-4-joshua.a.hay@intel.com>
Date: Mon, 13 Oct 2025 16:13:35 -0700
From: Joshua Hay <joshua.a.hay@...el.com>
To: intel-wired-lan@...ts.osuosl.org
Cc: netdev@...r.kernel.org
Subject: [Intel-wired-lan][PATCH iwl-next v8 3/9] idpf: move queue resources to idpf_q_vec_rsrc structure
From: Pavan Kumar Linga <pavan.kumar.linga@...el.com>
Move both TX and RX queue resources to the newly introduced
idpf_q_vec_rsrc structure.
While at it, declare the loop iterator in loop and use the correct type.
Reviewed-by: Anton Nadezhdin <anton.nadezhdin@...el.com>
Signed-off-by: Pavan Kumar Linga <pavan.kumar.linga@...el.com>
Signed-off-by: Joshua Hay <joshua.a.hay@...el.com>
---
v8:
- rebase on AF_XDP series
- remove dev param from rx_desc_rel and access through q_vector
- introduce per queue RSC flag to avoid vport check
---
drivers/net/ethernet/intel/idpf/idpf.h | 69 +--
.../net/ethernet/intel/idpf/idpf_ethtool.c | 91 ++--
drivers/net/ethernet/intel/idpf/idpf_lib.c | 71 +--
drivers/net/ethernet/intel/idpf/idpf_ptp.c | 17 +-
drivers/net/ethernet/intel/idpf/idpf_txrx.c | 451 +++++++++---------
drivers/net/ethernet/intel/idpf/idpf_txrx.h | 21 +-
.../net/ethernet/intel/idpf/idpf_virtchnl.c | 258 +++++-----
.../net/ethernet/intel/idpf/idpf_virtchnl.h | 12 +-
drivers/net/ethernet/intel/idpf/xdp.c | 37 +-
drivers/net/ethernet/intel/idpf/xdp.h | 6 +-
drivers/net/ethernet/intel/idpf/xsk.c | 7 +-
11 files changed, 561 insertions(+), 479 deletions(-)
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index f32dc148cfc2..781e0c8dbc2f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -297,6 +297,25 @@ struct idpf_fsteer_fltr {
* @noirq_v_idx: ID of the NOIRQ vector
* @noirq_dyn_ctl_ena: value to write to the above to enable it
* @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
+ * @txq_grps: array of TX queue groups
+ * @txq_desc_count: TX queue descriptor count
+ * @complq_desc_count: nompletion queue descriptor count
+ * @txq_model: split queue or single queue queuing model
+ * @num_txq: number of allocated TX queues
+ * @num_complq: number of allocated completion queues
+ * @num_txq_grp: number of TX queue groups
+ * @num_rxq_grp: number of RX queues in a group
+ * @rxq_model: splitq queue or single queue queuing model
+ * @rxq_grps: total number of RX groups. Number of groups * number of RX per
+ * group will yield total number of RX queues.
+ * @num_rxq: number of allocated RX queues
+ * @num_bufq: number of allocated buffer queues
+ * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
+ * to complete all buffer descriptors for all buffer queues in
+ * the worst case.
+ * @bufq_desc_count: buffer queue descriptor count
+ * @num_bufqs_per_qgrp: buffer queues per RX queue in a given grouping
+ * @base_rxd: true if the driver should use base descriptors instead of flex
*/
struct idpf_q_vec_rsrc {
struct idpf_q_vector *q_vectors;
@@ -306,36 +325,36 @@ struct idpf_q_vec_rsrc {
u32 noirq_dyn_ctl_ena;
void __iomem *noirq_dyn_ctl;
+ struct idpf_txq_group *txq_grps;
+ u32 txq_desc_count;
+ u32 complq_desc_count;
+ u32 txq_model;
+ u16 num_txq;
+ u16 num_complq;
+ u16 num_txq_grp;
+
+ u16 num_rxq_grp;
+ u32 rxq_model;
+ struct idpf_rxq_group *rxq_grps;
+ u16 num_rxq;
+ u16 num_bufq;
+ u32 rxq_desc_count;
+ u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+ u8 num_bufqs_per_qgrp;
+ bool base_rxd;
};
/**
* struct idpf_vport - Handle for netdevices and queue resources
* @dflt_qv_rsrc: contains default queue and vector resources
* @num_txq: Number of allocated TX queues
- * @num_complq: Number of allocated completion queues
- * @txq_desc_count: TX queue descriptor count
- * @complq_desc_count: Completion queue descriptor count
* @compln_clean_budget: Work budget for completion clean
- * @num_txq_grp: Number of TX queue groups
- * @txq_grps: Array of TX queue groups
- * @txq_model: Split queue or single queue queuing model
* @txqs: Used only in hotpath to get to the right queue very fast
* @crc_enable: Enable CRC insertion offload
* @xdpsq_share: whether XDPSQ sharing is enabled
* @num_xdp_txq: number of XDPSQs
* @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs)
* @xdp_prog: installed XDP program
- * @num_rxq: Number of allocated RX queues
- * @num_bufq: Number of allocated buffer queues
- * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
- * to complete all buffer descriptors for all buffer queues in
- * the worst case.
- * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
- * @bufq_desc_count: Buffer queue descriptor count
- * @num_rxq_grp: Number of RX queues in a group
- * @rxq_grps: Total number of RX groups. Number of groups * number of RX per
- * group will yield total number of RX queues.
- * @rxq_model: Splitq queue or single queue queuing model
* @rx_ptype_lkup: Lookup table for ptypes on RX
* @vdev_info: IDC vport device info pointer
* @adapter: back pointer to associated adapter
@@ -346,7 +365,6 @@ struct idpf_q_vec_rsrc {
* @vport_id: Device given vport identifier
* @idx: Software index in adapter vports struct
* @default_vport: Use this vport if one isn't specified
- * @base_rxd: True if the driver should use base descriptors instead of flex
* @max_mtu: device given max possible MTU
* @default_mac_addr: device will give a default MAC to use
* @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
@@ -361,13 +379,7 @@ struct idpf_q_vec_rsrc {
struct idpf_vport {
struct idpf_q_vec_rsrc dflt_qv_rsrc;
u16 num_txq;
- u16 num_complq;
- u32 txq_desc_count;
- u32 complq_desc_count;
u32 compln_clean_budget;
- u16 num_txq_grp;
- struct idpf_txq_group *txq_grps;
- u32 txq_model;
struct idpf_tx_queue **txqs;
bool crc_enable;
@@ -376,14 +388,6 @@ struct idpf_vport {
u16 xdp_txq_offset;
struct bpf_prog *xdp_prog;
- u16 num_rxq;
- u16 num_bufq;
- u32 rxq_desc_count;
- u8 num_bufqs_per_qgrp;
- u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
- u16 num_rxq_grp;
- struct idpf_rxq_group *rxq_grps;
- u32 rxq_model;
struct libeth_rx_pt *rx_ptype_lkup;
struct iidc_rdma_vport_dev_info *vdev_info;
@@ -395,7 +399,6 @@ struct idpf_vport {
u32 vport_id;
u16 idx;
bool default_vport;
- bool base_rxd;
u16 max_mtu;
u8 default_mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 3fbe94a4ce6b..6c6eaf7a7851 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -31,7 +31,7 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = vport->num_rxq;
+ cmd->data = vport->dflt_qv_rsrc.num_rxq;
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = user_config->num_fsteer_fltrs;
@@ -622,8 +622,8 @@ static void idpf_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
- ring->rx_pending = vport->rxq_desc_count;
- ring->tx_pending = vport->txq_desc_count;
+ ring->rx_pending = vport->dflt_qv_rsrc.rxq_desc_count;
+ ring->tx_pending = vport->dflt_qv_rsrc.txq_desc_count;
kring->tcp_data_split = idpf_vport_get_hsplit(vport);
@@ -647,8 +647,9 @@ static int idpf_set_ringparam(struct net_device *netdev,
{
struct idpf_vport_user_config_data *config_data;
u32 new_rx_count, new_tx_count;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
- int i, err = 0;
+ int err = 0;
u16 idx;
idpf_vport_ctrl_lock(netdev);
@@ -682,8 +683,9 @@ static int idpf_set_ringparam(struct net_device *netdev,
netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
new_tx_count);
- if (new_tx_count == vport->txq_desc_count &&
- new_rx_count == vport->rxq_desc_count &&
+ rsrc = &vport->dflt_qv_rsrc;
+ if (new_tx_count == rsrc->txq_desc_count &&
+ new_rx_count == rsrc->rxq_desc_count &&
kring->tcp_data_split == idpf_vport_get_hsplit(vport))
goto unlock_mutex;
@@ -702,10 +704,10 @@ static int idpf_set_ringparam(struct net_device *netdev,
/* Since we adjusted the RX completion queue count, the RX buffer queue
* descriptor count needs to be adjusted as well
*/
- for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
- vport->bufq_desc_count[i] =
+ for (u8 i = 0; i < rsrc->num_bufqs_per_qgrp; i++)
+ rsrc->bufq_desc_count[i] =
IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
- vport->num_bufqs_per_qgrp);
+ rsrc->num_bufqs_per_qgrp);
err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
@@ -1082,7 +1084,7 @@ static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
static void idpf_collect_queue_stats(struct idpf_vport *vport)
{
struct idpf_port_stats *pstats = &vport->port_stats;
- int i, j;
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
/* zero out port stats since they're actually tracked in per
* queue stats; this is only for reporting
@@ -1098,22 +1100,22 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
u64_stats_set(&pstats->tx_dma_map_errs, 0);
u64_stats_update_end(&pstats->stats_sync);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i];
u16 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rxq_grp->splitq.num_rxq_sets;
else
num_rxq = rxq_grp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (u16 j = 0; j < num_rxq; j++) {
u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
struct idpf_rx_queue_stats *stats;
struct idpf_rx_queue *rxq;
unsigned int start;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
else
rxq = rxq_grp->singleq.rxqs[j];
@@ -1140,10 +1142,10 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
}
}
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (u16 i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++) {
+ for (u16 j = 0; j < txq_grp->num_txq; j++) {
u64 linearize, qbusy, skb_drops, dma_map_errs;
struct idpf_tx_queue *txq = txq_grp->txqs[j];
struct idpf_tx_queue_stats *stats;
@@ -1186,9 +1188,9 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
unsigned int total = 0;
- unsigned int i, j;
bool is_splitq;
u16 qtype;
@@ -1206,12 +1208,13 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_collect_queue_stats(vport);
idpf_add_port_stats(vport, &data);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ rsrc = &vport->dflt_qv_rsrc;
+ for (u16 i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
qtype = VIRTCHNL2_QUEUE_TYPE_TX;
- for (j = 0; j < txq_grp->num_txq; j++, total++) {
+ for (u16 j = 0; j < txq_grp->num_txq; j++, total++) {
struct idpf_tx_queue *txq = txq_grp->txqs[j];
if (!txq)
@@ -1231,10 +1234,10 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
total = 0;
- is_splitq = idpf_is_queue_model_split(vport->rxq_model);
+ is_splitq = idpf_is_queue_model_split(rsrc->rxq_model);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i];
u16 num_rxq;
qtype = VIRTCHNL2_QUEUE_TYPE_RX;
@@ -1244,7 +1247,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
else
num_rxq = rxq_grp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, total++) {
+ for (u16 j = 0; j < num_rxq; j++, total++) {
struct idpf_rx_queue *rxq;
if (is_splitq)
@@ -1276,15 +1279,16 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
u32 q_num)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
int q_grp, q_idx;
- if (!idpf_is_queue_model_split(vport->rxq_model))
- return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
+ return rsrc->rxq_grps->singleq.rxqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
- return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
+ return rsrc->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
}
/**
@@ -1297,14 +1301,15 @@ struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
u32 q_num)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
int q_grp;
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
return vport->txqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
- return vport->txq_grps[q_grp].complq->q_vector;
+ return rsrc->txq_grps[q_grp].complq->q_vector;
}
/**
@@ -1341,7 +1346,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
u32 q_num)
{
const struct idpf_netdev_priv *np = netdev_priv(netdev);
- const struct idpf_vport *vport;
+ struct idpf_q_vec_rsrc *rsrc;
+ struct idpf_vport *vport;
int err = 0;
idpf_vport_ctrl_lock(netdev);
@@ -1350,16 +1356,17 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
- if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
+ rsrc = &vport->dflt_qv_rsrc;
+ if (q_num >= rsrc->num_rxq && q_num >= rsrc->num_txq) {
err = -EINVAL;
goto unlock_mutex;
}
- if (q_num < vport->num_rxq)
+ if (q_num < rsrc->num_rxq)
__idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_RX);
- if (q_num < vport->num_txq)
+ if (q_num < rsrc->num_txq)
__idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_TX);
@@ -1527,8 +1534,9 @@ static int idpf_set_coalesce(struct net_device *netdev,
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
struct idpf_q_coalesce *q_coal;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
- int i, err = 0;
+ int err = 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -1538,14 +1546,15 @@ static int idpf_set_coalesce(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
- for (i = 0; i < vport->num_txq; i++) {
+ rsrc = &vport->dflt_qv_rsrc;
+ for (u16 i = 0; i < rsrc->num_txq; i++) {
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
if (err)
goto unlock_mutex;
}
- for (i = 0; i < vport->num_rxq; i++) {
+ for (u16 i = 0; i < rsrc->num_rxq; i++) {
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
if (err)
@@ -1726,6 +1735,7 @@ static void idpf_get_ts_stats(struct net_device *netdev,
struct ethtool_ts_stats *ts_stats)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
unsigned int start;
@@ -1741,8 +1751,9 @@ static void idpf_get_ts_stats(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto exit;
- for (u16 i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ rsrc = &vport->dflt_qv_rsrc;
+ for (u16 i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
for (u16 j = 0; j < txq_grp->num_txq; j++) {
struct idpf_tx_queue *txq = txq_grp->txqs[j];
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 9ede6780758d..6d64db81e1e6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -932,7 +932,7 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
idpf_send_disable_vport_msg(vport);
idpf_send_disable_queues_msg(vport);
- idpf_send_map_unmap_queue_vector_msg(vport, false);
+ idpf_send_map_unmap_queue_vector_msg(vport, rsrc, false);
/* Normally we ask for queues in create_vport, but if the number of
* initially requested queues have changed, for example via ethtool
* set channels, we do delete queues and then add the queues back
@@ -945,8 +945,8 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
vport->link_up = false;
idpf_vport_intr_deinit(vport, rsrc);
- idpf_xdp_rxq_info_deinit_all(vport);
- idpf_vport_queues_rel(vport);
+ idpf_xdp_rxq_info_deinit_all(rsrc);
+ idpf_vport_queues_rel(vport, rsrc);
idpf_vport_intr_rel(rsrc);
if (rtnl)
@@ -1096,7 +1096,7 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
*/
static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
{
- return idpf_is_queue_model_split(vport->rxq_model) &&
+ return idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model) &&
idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
IDPF_CAP_HSPLIT);
}
@@ -1356,9 +1356,10 @@ static void idpf_restore_features(struct idpf_vport *vport)
*/
static int idpf_set_real_num_queues(struct idpf_vport *vport)
{
- int err, txq = vport->num_txq - vport->num_xdp_txq;
+ int err, txq = vport->dflt_qv_rsrc.num_txq - vport->num_xdp_txq;
- err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
+ err = netif_set_real_num_rx_queues(vport->netdev,
+ vport->dflt_qv_rsrc.num_rxq);
if (err)
return err;
@@ -1387,24 +1388,22 @@ static int idpf_up_complete(struct idpf_vport *vport)
/**
* idpf_rx_init_buf_tail - Write initial buffer ring tail value
- * @vport: virtual port struct
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
+static void idpf_rx_init_buf_tail(struct idpf_q_vec_rsrc *rsrc)
{
- int i, j;
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
-
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
+ for (u8 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
const struct idpf_buf_queue *q =
&grp->splitq.bufq_sets[j].bufq;
writel(q->next_to_alloc, q->tail);
}
} else {
- for (j = 0; j < grp->singleq.num_rxq; j++) {
+ for (u16 j = 0; j < grp->singleq.num_rxq; j++) {
const struct idpf_rx_queue *q =
grp->singleq.rxqs[j];
@@ -1444,14 +1443,14 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
goto err_rtnl_unlock;
}
- err = idpf_vport_queues_alloc(vport);
+ err = idpf_vport_queues_alloc(vport, rsrc);
if (err)
goto intr_rel;
vport_config = adapter->vport_config[vport->idx];
chunks = &vport_config->qid_reg_info;
- err = idpf_vport_queue_ids_init(vport, chunks);
+ err = idpf_vport_queue_ids_init(vport, rsrc, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
vport->vport_id, err);
@@ -1465,23 +1464,23 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
goto queues_rel;
}
- err = idpf_queue_reg_init(vport, chunks);
+ err = idpf_queue_reg_init(vport, rsrc, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_rx_bufs_init_all(vport);
+ err = idpf_rx_bufs_init_all(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- idpf_rx_init_buf_tail(vport);
+ idpf_rx_init_buf_tail(rsrc);
- err = idpf_xdp_rxq_info_init_all(vport);
+ err = idpf_xdp_rxq_info_init_all(rsrc);
if (err) {
netdev_err(vport->netdev,
"Failed to initialize XDP RxQ info for vport %u: %pe\n",
@@ -1491,14 +1490,14 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
idpf_vport_intr_ena(vport, rsrc);
- err = idpf_send_config_queues_msg(vport);
+ err = idpf_send_config_queues_msg(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
vport->vport_id, err);
goto rxq_deinit;
}
- err = idpf_send_map_unmap_queue_vector_msg(vport, true);
+ err = idpf_send_map_unmap_queue_vector_msg(vport, rsrc, true);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
vport->vport_id, err);
@@ -1551,13 +1550,13 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
disable_queues:
idpf_send_disable_queues_msg(vport);
unmap_queue_vectors:
- idpf_send_map_unmap_queue_vector_msg(vport, false);
+ idpf_send_map_unmap_queue_vector_msg(vport, rsrc, false);
rxq_deinit:
- idpf_xdp_rxq_info_deinit_all(vport);
+ idpf_xdp_rxq_info_deinit_all(rsrc);
intr_deinit:
idpf_vport_intr_deinit(vport, rsrc);
queues_rel:
- idpf_vport_queues_rel(vport);
+ idpf_vport_queues_rel(vport, rsrc);
intr_rel:
idpf_vport_intr_rel(rsrc);
@@ -1980,8 +1979,10 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
+ struct idpf_q_vec_rsrc *new_rsrc;
struct idpf_vport *new_vport;
int err;
@@ -2008,16 +2009,18 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
*/
memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
+ new_rsrc = &new_vport->dflt_qv_rsrc;
+
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
case IDPF_SR_Q_CHANGE:
- err = idpf_vport_adjust_qs(new_vport);
+ err = idpf_vport_adjust_qs(new_vport, new_rsrc);
if (err)
goto free_vport;
break;
case IDPF_SR_Q_DESC_CHANGE:
/* Update queue parameters before allocating resources */
- idpf_vport_calc_num_q_desc(new_vport);
+ idpf_vport_calc_num_q_desc(new_vport, new_rsrc);
break;
case IDPF_SR_MTU_CHANGE:
idpf_idc_vdev_mtu_event(vport->vdev_info,
@@ -2047,10 +2050,10 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* to add code to add_queues to change the vport config within
* vport itself as it will be wiped with a memcpy later.
*/
- err = idpf_send_add_queues_msg(vport, new_vport->num_txq,
- new_vport->num_complq,
- new_vport->num_rxq,
- new_vport->num_bufq);
+ err = idpf_send_add_queues_msg(vport, new_rsrc->num_txq,
+ new_rsrc->num_complq,
+ new_rsrc->num_rxq,
+ new_rsrc->num_bufq);
if (err)
goto err_reset;
@@ -2072,8 +2075,8 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
err_reset:
- idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq,
- vport->num_rxq, vport->num_bufq);
+ idpf_send_add_queues_msg(vport, rsrc->num_txq, rsrc->num_complq,
+ rsrc->num_rxq, rsrc->num_bufq);
err_open:
if (vport_is_up)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
index 3e1052d070cf..990e78686786 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -384,15 +384,17 @@ static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter)
WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies);
idpf_for_each_vport(adapter, vport) {
+ struct idpf_q_vec_rsrc *rsrc;
bool split;
- if (!vport || !vport->rxq_grps)
+ if (!vport || !vport->dflt_qv_rsrc.rxq_grps)
continue;
- split = idpf_is_queue_model_split(vport->rxq_model);
+ rsrc = &vport->dflt_qv_rsrc;
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
- for (u16 i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
idpf_ptp_update_phctime_rxq_grp(grp, split, systime);
}
@@ -681,9 +683,10 @@ int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
*/
static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
bool enable = true, splitq;
- splitq = idpf_is_queue_model_split(vport->rxq_model);
+ splitq = idpf_is_queue_model_split(rsrc->rxq_model);
if (rx_filter == HWTSTAMP_FILTER_NONE) {
enable = false;
@@ -692,8 +695,8 @@ static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
}
- for (u16 i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
struct idpf_rx_queue *rx_queue;
u16 j, num_rxq;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index b362b7766c42..313b71a18f75 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -146,24 +146,22 @@ static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
/**
* idpf_tx_desc_rel_all - Free Tx Resources for All Queues
- * @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
* Free all transmit software resources
*/
-static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
+static void idpf_tx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- int i, j;
-
- if (!vport->txq_grps)
+ if (!rsrc->txq_grps)
return;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (u16 i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++)
+ for (u16 j = 0; j < txq_grp->num_txq; j++)
idpf_tx_desc_rel(txq_grp->txqs[j]);
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
idpf_compl_desc_rel(txq_grp->complq);
}
}
@@ -263,7 +261,7 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
/**
* idpf_compl_desc_alloc - allocate completion descriptors
- * @vport: vport to allocate resources for
+ * @vport: virtual port private structure
* @complq: completion queue to set up
*
* Return: 0 on success, -errno on failure.
@@ -296,20 +294,21 @@ static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
/**
* idpf_tx_desc_alloc_all - allocate all queues Tx resources
* @vport: virtual port private structure
+ * @rsrc: pointer to queue and vector resources
*
* Returns 0 on success, negative on failure
*/
-static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
+static int idpf_tx_desc_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int err = 0;
- int i, j;
/* Setup buffer queues. In single queue model buffer queues and
* completion queues will be same
*/
- for (i = 0; i < vport->num_txq_grp; i++) {
- for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
- struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
+ for (u16 i = 0; i < rsrc->num_txq_grp; i++) {
+ for (u16 j = 0; j < rsrc->txq_grps[i].num_txq; j++) {
+ struct idpf_tx_queue *txq = rsrc->txq_grps[i].txqs[j];
err = idpf_tx_desc_alloc(vport, txq);
if (err) {
@@ -320,11 +319,11 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
}
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
continue;
/* Setup completion queues */
- err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
+ err = idpf_compl_desc_alloc(vport, rsrc->txq_grps[i].complq);
if (err) {
pci_err(vport->adapter->pdev,
"Allocation for Tx Completion Queue %u failed\n",
@@ -335,7 +334,7 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
err_out:
if (err)
- idpf_tx_desc_rel_all(vport);
+ idpf_tx_desc_rel_all(rsrc);
return err;
}
@@ -436,9 +435,10 @@ static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
*
* Free a specific rx queue resources
*/
-static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
- u32 model)
+static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, u32 model)
{
+ struct device *dev;
+
if (!rxq)
return;
@@ -456,6 +456,7 @@ static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
if (!rxq->desc_ring)
return;
+ dev = &rxq->q_vector->vport->adapter->pdev->dev;
dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
rxq->desc_ring = NULL;
}
@@ -465,9 +466,10 @@ static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
* @bufq: buffer queue to clean the resources from
* @dev: device to free DMA memory
*/
-static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
- struct device *dev)
+static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq)
{
+ struct device *dev;
+
if (!bufq)
return;
@@ -481,6 +483,7 @@ static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
if (!bufq->split_buf)
return;
+ dev = &bufq->q_vector->vport->adapter->pdev->dev;
dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
bufq->split_buf = NULL;
}
@@ -488,42 +491,41 @@ static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
/**
* idpf_rx_desc_rel_all - Free Rx Resources for All Queues
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
* Free all rx queues resources
*/
-static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
+static void idpf_rx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- struct device *dev = &vport->adapter->pdev->dev;
struct idpf_rxq_group *rx_qgrp;
u16 num_rxq;
- int i, j;
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ rx_qgrp = &rsrc->rxq_grps[i];
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
- idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
+ for (u16 j = 0; j < rx_qgrp->singleq.num_rxq; j++)
+ idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j],
VIRTCHNL2_QUEUE_MODEL_SINGLE);
continue;
}
num_rxq = rx_qgrp->splitq.num_rxq_sets;
- for (j = 0; j < num_rxq; j++)
+ for (u16 j = 0; j < num_rxq; j++)
idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
- dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
+ VIRTCHNL2_QUEUE_MODEL_SPLIT);
if (!rx_qgrp->splitq.bufq_sets)
continue;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u8 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
- idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
+ idpf_rx_desc_rel_bufq(&bufq_set->bufq);
}
}
}
@@ -777,26 +779,28 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
/**
* idpf_rx_bufs_init_all - Initialize all RX bufs
- * @vport: virtual port struct
+ * @vport: pointer to vport struct
+ * @rsrc: pointer to queue and vector resources
*
* Returns 0 on success, negative on failure
*/
-int idpf_rx_bufs_init_all(struct idpf_vport *vport)
+int idpf_rx_bufs_init_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- bool split = idpf_is_queue_model_split(vport->rxq_model);
- int i, j, err;
+ bool split = idpf_is_queue_model_split(rsrc->rxq_model);
+ int err;
- idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog);
+ idpf_xdp_copy_prog_to_rqs(rsrc, vport->xdp_prog);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 truesize = 0;
/* Allocate bufs for the rxq itself in singleq */
if (!split) {
int num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (u16 j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
q = rx_qgrp->singleq.rxqs[j];
@@ -809,7 +813,7 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
}
/* Otherwise, allocate bufs for the buffer queues */
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u8 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
enum libeth_fqe_type type;
struct idpf_buf_queue *q;
@@ -895,26 +899,28 @@ static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
/**
* idpf_rx_desc_alloc_all - allocate all RX queues resources
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
+static int idpf_rx_desc_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_rxq_group *rx_qgrp;
- int i, j, err;
u16 num_rxq;
+ int err;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
- if (idpf_is_queue_model_split(vport->rxq_model))
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ rx_qgrp = &rsrc->rxq_grps[i];
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (u16 j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -928,10 +934,10 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
}
}
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
continue;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u8 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
@@ -949,7 +955,7 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
return 0;
err_out:
- idpf_rx_desc_rel_all(vport);
+ idpf_rx_desc_rel_all(rsrc);
return err;
}
@@ -960,7 +966,7 @@ static int idpf_init_queue_set(const struct idpf_queue_set *qs)
bool splitq;
int err;
- splitq = idpf_is_queue_model_split(vport->rxq_model);
+ splitq = idpf_is_queue_model_split(qs->qv_rsrc->rxq_model);
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
@@ -1030,19 +1036,18 @@ static int idpf_init_queue_set(const struct idpf_queue_set *qs)
static void idpf_clean_queue_set(const struct idpf_queue_set *qs)
{
- const struct idpf_vport *vport = qs->vport;
- struct device *dev = vport->netdev->dev.parent;
+ const struct idpf_q_vec_rsrc *rsrc = qs->qv_rsrc;
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
switch (q->type) {
case VIRTCHNL2_QUEUE_TYPE_RX:
- idpf_xdp_rxq_info_deinit(q->rxq, vport->rxq_model);
- idpf_rx_desc_rel(q->rxq, dev, vport->rxq_model);
+ idpf_xdp_rxq_info_deinit(q->rxq, rsrc->rxq_model);
+ idpf_rx_desc_rel(q->rxq, rsrc->rxq_model);
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- idpf_rx_desc_rel_bufq(q->bufq, dev);
+ idpf_rx_desc_rel_bufq(q->bufq);
break;
case VIRTCHNL2_QUEUE_TYPE_TX:
idpf_tx_desc_rel(q->txq);
@@ -1179,6 +1184,7 @@ idpf_vector_to_queue_set(struct idpf_q_vector *qv)
static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
{
+ struct idpf_q_vec_rsrc *rsrc = qs->qv_rsrc;
struct idpf_vport *vport = qs->vport;
struct idpf_q_vector *q_vector;
int err;
@@ -1195,8 +1201,8 @@ static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
if (!vport->xdp_txq_offset)
goto config;
- q_vector->xsksq = kcalloc(DIV_ROUND_UP(vport->num_rxq_grp,
- qs->qv_rsrc->num_q_vectors),
+ q_vector->xsksq = kcalloc(DIV_ROUND_UP(rsrc->num_rxq_grp,
+ rsrc->num_q_vectors),
sizeof(*q_vector->xsksq), GFP_KERNEL);
if (!q_vector->xsksq)
return -ENOMEM;
@@ -1291,25 +1297,22 @@ int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
/**
* idpf_txq_group_rel - Release all resources for txq groups
- * @vport: vport to release txq groups on
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_txq_group_rel(struct idpf_vport *vport)
+static void idpf_txq_group_rel(struct idpf_q_vec_rsrc *rsrc)
{
- bool split, flow_sch_en;
- int i, j;
+ bool split;
- if (!vport->txq_grps)
+ if (!rsrc->txq_grps)
return;
- split = idpf_is_queue_model_split(vport->txq_model);
- flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
- VIRTCHNL2_CAP_SPLITQ_QSCHED);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (u16 i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++) {
- if (flow_sch_en) {
+ for (u16 j = 0; j < txq_grp->num_txq; j++) {
+ if (idpf_queue_has(FLOW_SCH_EN, txq_grp->txqs[j])) {
kfree(txq_grp->txqs[j]->refillq);
txq_grp->txqs[j]->refillq = NULL;
}
@@ -1324,8 +1327,8 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->complq);
txq_grp->complq = NULL;
}
- kfree(vport->txq_grps);
- vport->txq_grps = NULL;
+ kfree(rsrc->txq_grps);
+ rsrc->txq_grps = NULL;
}
/**
@@ -1334,12 +1337,10 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
*/
static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
{
- int i, j;
-
- for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
+ for (u8 i = 0; i < rx_qgrp->splitq.num_bufq_sets; i++) {
struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
- for (j = 0; j < bufq_set->num_refillqs; j++) {
+ for (u16 j = 0; j < bufq_set->num_refillqs; j++) {
kfree(bufq_set->refillqs[j].ring);
bufq_set->refillqs[j].ring = NULL;
}
@@ -1350,23 +1351,20 @@ static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
/**
* idpf_rxq_group_rel - Release all resources for rxq groups
- * @vport: vport to release rxq groups on
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_rxq_group_rel(struct idpf_vport *vport)
+static void idpf_rxq_group_rel(struct idpf_q_vec_rsrc *rsrc)
{
- int i;
-
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq;
- int j;
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
num_rxq = rx_qgrp->splitq.num_rxq_sets;
- for (j = 0; j < num_rxq; j++) {
+ for (u16 j = 0; j < num_rxq; j++) {
kfree(rx_qgrp->splitq.rxq_sets[j]);
rx_qgrp->splitq.rxq_sets[j] = NULL;
}
@@ -1376,41 +1374,44 @@ static void idpf_rxq_group_rel(struct idpf_vport *vport)
rx_qgrp->splitq.bufq_sets = NULL;
} else {
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (u16 j = 0; j < num_rxq; j++) {
kfree(rx_qgrp->singleq.rxqs[j]);
rx_qgrp->singleq.rxqs[j] = NULL;
}
}
}
- kfree(vport->rxq_grps);
- vport->rxq_grps = NULL;
+ kfree(rsrc->rxq_grps);
+ rsrc->rxq_grps = NULL;
}
/**
* idpf_vport_queue_grp_rel_all - Release all queue groups
* @vport: vport to release queue groups for
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
+static void idpf_vport_queue_grp_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- idpf_txq_group_rel(vport);
- idpf_rxq_group_rel(vport);
+ idpf_txq_group_rel(rsrc);
+ idpf_rxq_group_rel(rsrc);
}
/**
* idpf_vport_queues_rel - Free memory for all queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Free the memory allocated for queues associated to a vport
*/
-void idpf_vport_queues_rel(struct idpf_vport *vport)
+void idpf_vport_queues_rel(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- idpf_xdp_copy_prog_to_rqs(vport, NULL);
+ idpf_xdp_copy_prog_to_rqs(rsrc, NULL);
- idpf_tx_desc_rel_all(vport);
- idpf_rx_desc_rel_all(vport);
+ idpf_tx_desc_rel_all(rsrc);
+ idpf_rx_desc_rel_all(rsrc);
idpf_xdpsqs_put(vport);
- idpf_vport_queue_grp_rel_all(vport);
+ idpf_vport_queue_grp_rel_all(rsrc);
kfree(vport->txqs);
vport->txqs = NULL;
@@ -1419,6 +1420,7 @@ void idpf_vport_queues_rel(struct idpf_vport *vport)
/**
* idpf_vport_init_fast_path_txqs - Initialize fast path txq array
* @vport: vport to init txqs on
+ * @rsrc: pointer to queue and vector resources
*
* We get a queue index from skb->queue_mapping and we need a fast way to
* dereference the queue from queue groups. This allows us to quickly pull a
@@ -1426,22 +1428,23 @@ void idpf_vport_queues_rel(struct idpf_vport *vport)
*
* Returns 0 on success, negative on failure
*/
-static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
+static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
struct work_struct *tstamp_task = &vport->tstamp_task;
- int i, j, k = 0;
+ int k = 0;
- vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
+ vport->txqs = kcalloc(rsrc->num_txq, sizeof(*vport->txqs),
GFP_KERNEL);
-
if (!vport->txqs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
+ vport->num_txq = rsrc->num_txq;
+ for (u16 i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_grp = &rsrc->txq_grps[i];
- for (j = 0; j < tx_grp->num_txq; j++, k++) {
+ for (u16 j = 0; j < tx_grp->num_txq; j++, k++) {
vport->txqs[k] = tx_grp->txqs[j];
vport->txqs[k]->idx = k;
@@ -1460,16 +1463,18 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
* idpf_vport_init_num_qs - Initialize number of queues
* @vport: vport to initialize queues
* @vport_msg: data to be filled into vport
+ * @rsrc: pointer to queue and vector resources
*/
void idpf_vport_init_num_qs(struct idpf_vport *vport,
- struct virtchnl2_create_vport *vport_msg)
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vport_user_config_data *config_data;
u16 idx = vport->idx;
config_data = &vport->adapter->vport_config[idx]->user_config;
- vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
- vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
+ rsrc->num_txq = le16_to_cpu(vport_msg->num_tx_q);
+ rsrc->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
/* number of txqs and rxqs in config data will be zeros only in the
* driver load path and we dont update them there after
*/
@@ -1478,10 +1483,10 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
}
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
- if (idpf_is_queue_model_split(vport->rxq_model))
- vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
+ rsrc->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
vport->xdp_prog = config_data->xdp_prog;
if (idpf_xdp_enabled(vport)) {
@@ -1496,56 +1501,57 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
}
/* Adjust number of buffer queues per Rx queue group. */
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- vport->num_bufqs_per_qgrp = 0;
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
+ rsrc->num_bufqs_per_qgrp = 0;
return;
}
- vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ rsrc->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
}
/**
* idpf_vport_calc_num_q_desc - Calculate number of queue groups
* @vport: vport to calculate q groups for
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vport_user_config_data *config_data;
- int num_bufqs = vport->num_bufqs_per_qgrp;
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
u32 num_req_txq_desc, num_req_rxq_desc;
u16 idx = vport->idx;
- int i;
config_data = &vport->adapter->vport_config[idx]->user_config;
num_req_txq_desc = config_data->num_req_txq_desc;
num_req_rxq_desc = config_data->num_req_rxq_desc;
- vport->complq_desc_count = 0;
+ rsrc->complq_desc_count = 0;
if (num_req_txq_desc) {
- vport->txq_desc_count = num_req_txq_desc;
- if (idpf_is_queue_model_split(vport->txq_model)) {
- vport->complq_desc_count = num_req_txq_desc;
- if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
- vport->complq_desc_count =
+ rsrc->txq_desc_count = num_req_txq_desc;
+ if (idpf_is_queue_model_split(rsrc->txq_model)) {
+ rsrc->complq_desc_count = num_req_txq_desc;
+ if (rsrc->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
+ rsrc->complq_desc_count =
IDPF_MIN_TXQ_COMPLQ_DESC;
}
} else {
- vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->complq_desc_count =
+ rsrc->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->complq_desc_count =
IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
}
if (num_req_rxq_desc)
- vport->rxq_desc_count = num_req_rxq_desc;
+ rsrc->rxq_desc_count = num_req_rxq_desc;
else
- vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
+ rsrc->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
- for (i = 0; i < num_bufqs; i++) {
- if (!vport->bufq_desc_count[i])
- vport->bufq_desc_count[i] =
- IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
+ for (u8 i = 0; i < num_bufqs; i++) {
+ if (!rsrc->bufq_desc_count[i])
+ rsrc->bufq_desc_count[i] =
+ IDPF_RX_BUFQ_DESC_COUNT(rsrc->rxq_desc_count,
num_bufqs);
}
}
@@ -1634,54 +1640,54 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
/**
* idpf_vport_calc_num_q_groups - Calculate number of queue groups
- * @vport: vport to calculate q groups for
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
+void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc)
{
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->num_txq_grp = vport->num_txq;
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->num_txq_grp = rsrc->num_txq;
else
- vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
+ rsrc->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
- if (idpf_is_queue_model_split(vport->rxq_model))
- vport->num_rxq_grp = vport->num_rxq;
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
+ rsrc->num_rxq_grp = rsrc->num_rxq;
else
- vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
+ rsrc->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
}
/**
* idpf_vport_calc_numq_per_grp - Calculate number of queues per group
- * @vport: vport to calculate queues for
+ * @rsrc: pointer to queue and vector resources
* @num_txq: return parameter for number of TX queues
* @num_rxq: return parameter for number of RX queues
*/
-static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
+static void idpf_vport_calc_numq_per_grp(struct idpf_q_vec_rsrc *rsrc,
u16 *num_txq, u16 *num_rxq)
{
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
else
- *num_txq = vport->num_txq;
+ *num_txq = rsrc->num_txq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
else
- *num_rxq = vport->num_rxq;
+ *num_rxq = rsrc->num_rxq;
}
/**
* idpf_rxq_set_descids - set the descids supported by this queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: rx queue for which descids are set
*
*/
-static void idpf_rxq_set_descids(const struct idpf_vport *vport,
+static void idpf_rxq_set_descids(struct idpf_q_vec_rsrc *rsrc,
struct idpf_rx_queue *q)
{
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
return;
- if (vport->base_rxd)
+ if (rsrc->base_rxd)
q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
else
q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
@@ -1690,44 +1696,45 @@ static void idpf_rxq_set_descids(const struct idpf_vport *vport,
/**
* idpf_txq_group_alloc - Allocate all txq group resources
* @vport: vport to allocate txq groups for
+ * @rsrc: pointer to queue and vector resources
* @num_txq: number of txqs to allocate for each group
*
* Returns 0 on success, negative on failure
*/
-static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
+static int idpf_txq_group_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ u16 num_txq)
{
bool split, flow_sch_en;
- int i;
- vport->txq_grps = kcalloc(vport->num_txq_grp,
- sizeof(*vport->txq_grps), GFP_KERNEL);
- if (!vport->txq_grps)
+ rsrc->txq_grps = kcalloc(rsrc->num_txq_grp,
+ sizeof(*rsrc->txq_grps), GFP_KERNEL);
+ if (!rsrc->txq_grps)
return -ENOMEM;
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_SPLITQ_QSCHED);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u16 i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
- int j;
tx_qgrp->vport = vport;
tx_qgrp->num_txq = num_txq;
- for (j = 0; j < tx_qgrp->num_txq; j++) {
+ for (u16 j = 0; j < tx_qgrp->num_txq; j++) {
tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
GFP_KERNEL);
if (!tx_qgrp->txqs[j])
goto err_alloc;
}
- for (j = 0; j < tx_qgrp->num_txq; j++) {
+ for (u16 j = 0; j < tx_qgrp->num_txq; j++) {
struct idpf_tx_queue *q = tx_qgrp->txqs[j];
q->dev = &adapter->pdev->dev;
- q->desc_count = vport->txq_desc_count;
+ q->desc_count = rsrc->txq_desc_count;
q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
q->netdev = vport->netdev;
@@ -1762,7 +1769,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
if (!tx_qgrp->complq)
goto err_alloc;
- tx_qgrp->complq->desc_count = vport->complq_desc_count;
+ tx_qgrp->complq->desc_count = rsrc->complq_desc_count;
tx_qgrp->complq->txq_grp = tx_qgrp;
tx_qgrp->complq->netdev = vport->netdev;
tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
@@ -1774,7 +1781,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
return 0;
err_alloc:
- idpf_txq_group_rel(vport);
+ idpf_txq_group_rel(rsrc);
return -ENOMEM;
}
@@ -1782,30 +1789,33 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
/**
* idpf_rxq_group_alloc - Allocate all rxq group resources
* @vport: vport to allocate rxq groups for
+ * @rsrc: pointer to queue and vector resources
* @num_rxq: number of rxqs to allocate for each group
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
+static int idpf_rxq_group_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ u16 num_rxq)
{
- int i, k, err = 0;
- bool hs;
+ int k, err = 0;
+ bool hs, rsc;
- vport->rxq_grps = kcalloc(vport->num_rxq_grp,
- sizeof(struct idpf_rxq_group), GFP_KERNEL);
- if (!vport->rxq_grps)
+ rsrc->rxq_grps = kcalloc(rsrc->num_rxq_grp,
+ sizeof(struct idpf_rxq_group), GFP_KERNEL);
+ if (!rsrc->rxq_grps)
return -ENOMEM;
hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ rsc = idpf_is_feature_ena(vport, NETIF_F_GRO_HW);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- int j;
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
rx_qgrp->vport = vport;
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
rx_qgrp->singleq.num_rxq = num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (u16 j = 0; j < num_rxq; j++) {
rx_qgrp->singleq.rxqs[j] =
kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
GFP_KERNEL);
@@ -1818,7 +1828,7 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
rx_qgrp->splitq.num_rxq_sets = num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (u16 j = 0; j < num_rxq; j++) {
rx_qgrp->splitq.rxq_sets[j] =
kzalloc(sizeof(struct idpf_rxq_set),
GFP_KERNEL);
@@ -1828,25 +1838,27 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
}
- rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
+ rx_qgrp->splitq.bufq_sets = kcalloc(rsrc->num_bufqs_per_qgrp,
sizeof(struct idpf_bufq_set),
GFP_KERNEL);
if (!rx_qgrp->splitq.bufq_sets) {
err = -ENOMEM;
goto err_alloc;
}
+ rx_qgrp->splitq.num_bufq_sets = rsrc->num_bufqs_per_qgrp;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u8 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
int swq_size = sizeof(struct idpf_sw_queue);
struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- q->desc_count = vport->bufq_desc_count[j];
+ q->desc_count = rsrc->bufq_desc_count[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
idpf_queue_assign(HSPLIT_EN, q, hs);
+ idpf_queue_assign(RSC_EN, q, rsc);
bufq_set->num_refillqs = num_rxq;
bufq_set->refillqs = kcalloc(num_rxq, swq_size,
@@ -1860,7 +1872,7 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
&bufq_set->refillqs[k];
refillq->desc_count =
- vport->bufq_desc_count[j];
+ rsrc->bufq_desc_count[j];
idpf_queue_set(GEN_CHK, refillq);
idpf_queue_set(RFL_GEN_CHK, refillq);
refillq->ring = kcalloc(refillq->desc_count,
@@ -1874,37 +1886,38 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
skip_splitq_rx_init:
- for (j = 0; j < num_rxq; j++) {
+ for (u16 j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
q = rx_qgrp->singleq.rxqs[j];
goto setup_rxq;
}
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
&rx_qgrp->splitq.bufq_sets[0].refillqs[j];
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
+ if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
idpf_queue_assign(HSPLIT_EN, q, hs);
+ idpf_queue_assign(RSC_EN, q, rsc);
setup_rxq:
- q->desc_count = vport->rxq_desc_count;
+ q->desc_count = rsrc->rxq_desc_count;
q->rx_ptype_lkup = vport->rx_ptype_lkup;
q->bufq_sets = rx_qgrp->splitq.bufq_sets;
q->idx = (i * num_rxq) + j;
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_max_pkt_size = vport->netdev->mtu +
LIBETH_RX_LL_LEN;
- idpf_rxq_set_descids(vport, q);
+ idpf_rxq_set_descids(rsrc, q);
}
}
err_alloc:
if (err)
- idpf_rxq_group_rel(vport);
+ idpf_rxq_group_rel(rsrc);
return err;
}
@@ -1912,28 +1925,30 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
/**
* idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
* @vport: vport with qgrps to allocate
+ * @rsrc: pointer to queue and vector resources
*
* Returns 0 on success, negative on failure
*/
-static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
+static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
u16 num_txq, num_rxq;
int err;
- idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
+ idpf_vport_calc_numq_per_grp(rsrc, &num_txq, &num_rxq);
- err = idpf_txq_group_alloc(vport, num_txq);
+ err = idpf_txq_group_alloc(vport, rsrc, num_txq);
if (err)
goto err_out;
- err = idpf_rxq_group_alloc(vport, num_rxq);
+ err = idpf_rxq_group_alloc(vport, rsrc, num_rxq);
if (err)
goto err_out;
return 0;
err_out:
- idpf_vport_queue_grp_rel_all(vport);
+ idpf_vport_queue_grp_rel_all(rsrc);
return err;
}
@@ -1941,19 +1956,21 @@ static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
/**
* idpf_vport_queues_alloc - Allocate memory for all queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Allocate memory for queues associated with a vport. Returns 0 on success,
* negative on failure.
*/
-int idpf_vport_queues_alloc(struct idpf_vport *vport)
+int idpf_vport_queues_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int err;
- err = idpf_vport_queue_grp_alloc_all(vport);
+ err = idpf_vport_queue_grp_alloc_all(vport, rsrc);
if (err)
goto err_out;
- err = idpf_vport_init_fast_path_txqs(vport);
+ err = idpf_vport_init_fast_path_txqs(vport, rsrc);
if (err)
goto err_out;
@@ -1961,18 +1978,18 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport)
if (err)
goto err_out;
- err = idpf_tx_desc_alloc_all(vport);
+ err = idpf_tx_desc_alloc_all(vport, rsrc);
if (err)
goto err_out;
- err = idpf_rx_desc_alloc_all(vport);
+ err = idpf_rx_desc_alloc_all(vport, rsrc);
if (err)
goto err_out;
return 0;
err_out:
- idpf_vport_queues_rel(vport);
+ idpf_vport_queues_rel(vport, rsrc);
return err;
}
@@ -3143,7 +3160,7 @@ netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model))
return idpf_tx_splitq_frame(skb, tx_q);
else
return idpf_tx_singleq_frame(skb, tx_q);
@@ -4333,19 +4350,19 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport,
struct idpf_q_vec_rsrc *rsrc)
{
- u16 num_txq_grp = vport->num_txq_grp - vport->num_xdp_txq;
- bool split = idpf_is_queue_model_split(vport->rxq_model);
+ u16 num_txq_grp = rsrc->num_txq_grp - vport->num_xdp_txq;
+ bool split = idpf_is_queue_model_split(rsrc->rxq_model);
struct idpf_rxq_group *rx_qgrp;
struct idpf_txq_group *tx_qgrp;
u32 i, qv_idx, q_index;
- for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
+ for (i = 0, qv_idx = 0; i < rsrc->num_rxq_grp; i++) {
u16 num_rxq;
if (qv_idx >= rsrc->num_q_vectors)
qv_idx = 0;
- rx_qgrp = &vport->rxq_grps[i];
+ rx_qgrp = &rsrc->rxq_grps[i];
if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
@@ -4368,7 +4385,7 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport,
}
if (split) {
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_buf_queue *bufq;
bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
@@ -4382,7 +4399,7 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport,
qv_idx++;
}
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
u16 num_txq;
@@ -4390,7 +4407,7 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport,
if (qv_idx >= rsrc->num_q_vectors)
qv_idx = 0;
- tx_qgrp = &vport->txq_grps[i];
+ tx_qgrp = &rsrc->txq_grps[i];
num_txq = tx_qgrp->num_txq;
for (u32 j = 0; j < num_txq; j++) {
@@ -4481,7 +4498,7 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport,
int irq_num;
u16 qv_idx;
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
else
napi_poll = idpf_vport_singleq_napi_poll;
@@ -4523,14 +4540,14 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport,
if (!rsrc->q_vectors)
return -ENOMEM;
- txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
+ txqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
rsrc->num_q_vectors);
- rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
+ rxqs_per_vector = DIV_ROUND_UP(rsrc->num_rxq_grp,
rsrc->num_q_vectors);
- bufqs_per_vector = vport->num_bufqs_per_qgrp *
- DIV_ROUND_UP(vport->num_rxq_grp,
+ bufqs_per_vector = rsrc->num_bufqs_per_qgrp *
+ DIV_ROUND_UP(rsrc->num_rxq_grp,
rsrc->num_q_vectors);
- complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
+ complqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
rsrc->num_q_vectors);
for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
@@ -4556,7 +4573,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport,
if (!q_vector->rx)
goto error;
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
continue;
q_vector->bufq = kcalloc(bufqs_per_vector,
@@ -4652,8 +4669,8 @@ int idpf_config_rss(struct idpf_vport *vport)
*/
static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
{
+ u16 num_active_rxq = vport->dflt_qv_rsrc.num_rxq;
struct idpf_adapter *adapter = vport->adapter;
- u16 num_active_rxq = vport->num_rxq;
struct idpf_rss_data *rss_data;
int i;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index a28781e41c53..b40d4e8e4d95 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -282,6 +282,7 @@ struct idpf_ptype_state {
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
* @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
+ * @__IDPF_Q_RSC_EN: enable Receive Side Coalescing on Rx (splitq)
* @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
* @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
* queue
@@ -296,6 +297,7 @@ enum idpf_queue_flags_t {
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
__IDPF_Q_CRC_EN,
+ __IDPF_Q_RSC_EN,
__IDPF_Q_HSPLIT_EN,
__IDPF_Q_PTP,
__IDPF_Q_NOIRQ,
@@ -924,6 +926,7 @@ struct idpf_bufq_set {
* @singleq.rxqs: Array of RX queue pointers
* @splitq: Struct with split queue related members
* @splitq.num_rxq_sets: Number of RX queue sets
+ * @splitq.num_rxq_sets: Number of Buffer queue sets
* @splitq.rxq_sets: Array of RX queue sets
* @splitq.bufq_sets: Buffer queue set pointer
*
@@ -941,6 +944,7 @@ struct idpf_rxq_group {
} singleq;
struct {
u16 num_rxq_sets;
+ u16 num_bufq_sets;
struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
struct idpf_bufq_set *bufq_sets;
} splitq;
@@ -1071,14 +1075,18 @@ static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
- struct virtchnl2_create_vport *vport_msg);
-void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
struct virtchnl2_create_vport *vport_msg,
struct idpf_vport_max_q *max_q);
-void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
-int idpf_vport_queues_alloc(struct idpf_vport *vport);
-void idpf_vport_queues_rel(struct idpf_vport *vport);
+void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_queues_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_queues_rel(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_intr_alloc(struct idpf_vport *vport,
struct idpf_q_vec_rsrc *rsrc);
@@ -1092,7 +1100,8 @@ void idpf_vport_intr_ena(struct idpf_vport *vport,
int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport);
void idpf_deinit_rss(struct idpf_vport *vport);
-int idpf_rx_bufs_init_all(struct idpf_vport *vport);
+int idpf_rx_bufs_init_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
u32 q_num);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 0a62a7116f5f..e5e68ac436ea 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1384,13 +1384,15 @@ static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
/**
* __idpf_queue_reg_init - initialize queue registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
* @reg_vals: registers we are initializing
* @num_regs: how many registers there are in total
* @q_type: queue model
*
* Return number of queues that are initialized
*/
-static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
+static int __idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc, u32 *reg_vals,
int num_regs, u32 q_type)
{
struct idpf_adapter *adapter = vport->adapter;
@@ -1398,8 +1400,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
switch (q_type) {
case VIRTCHNL2_QUEUE_TYPE_TX:
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
tx_qgrp->txqs[j]->tail =
@@ -1407,8 +1409,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
@@ -1421,9 +1423,9 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u8 num_bufqs = vport->num_bufqs_per_qgrp;
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
struct idpf_buf_queue *q;
@@ -1444,11 +1446,13 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
/**
* idpf_queue_reg_init - initialize queue registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
* @chunks: queue registers received over mailbox
*
* Return 0 on success, negative on failure
*/
int idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
struct idpf_queue_id_reg_info *chunks)
{
int num_regs, ret = 0;
@@ -1463,14 +1467,14 @@ int idpf_queue_reg_init(struct idpf_vport *vport,
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_TX,
chunks);
- if (num_regs < vport->num_txq) {
+ if (num_regs < rsrc->num_txq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_TX);
- if (num_regs < vport->num_txq) {
+ if (num_regs < rsrc->num_txq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1478,18 +1482,18 @@ int idpf_queue_reg_init(struct idpf_vport *vport,
/* Initialize Rx/buffer queue tail register address based on Rx queue
* model
*/
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
chunks);
- if (num_regs < vport->num_bufq) {
+ if (num_regs < rsrc->num_bufq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
- if (num_regs < vport->num_bufq) {
+ if (num_regs < rsrc->num_bufq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1497,14 +1501,14 @@ int idpf_queue_reg_init(struct idpf_vport *vport,
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_RX,
chunks);
- if (num_regs < vport->num_rxq) {
+ if (num_regs < rsrc->num_rxq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_RX);
- if (num_regs < vport->num_rxq) {
+ if (num_regs < rsrc->num_rxq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1603,6 +1607,7 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
*/
int idpf_check_supported_desc_ids(struct idpf_vport *vport)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_create_vport *vport_msg;
u64 rx_desc_ids, tx_desc_ids;
@@ -1619,17 +1624,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
}
} else {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
- vport->base_rxd = true;
+ rsrc->base_rxd = true;
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
return 0;
if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
@@ -1714,24 +1719,24 @@ int idpf_send_disable_vport_msg(struct idpf_vport *vport)
/**
* idpf_fill_txq_config_chunk - fill chunk describing the Tx queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: Tx queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_txq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_tx_queue *q,
struct virtchnl2_txq_info *qi)
{
u32 val;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->txq_model);
+ qi->model = cpu_to_le16(rsrc->txq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
qi->relative_queue_id = cpu_to_le16(q->rel_q_id);
- if (!idpf_is_queue_model_split(vport->txq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->txq_model)) {
qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
return;
}
@@ -1753,18 +1758,18 @@ static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_fill_complq_config_chunk - fill chunk describing the completion queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: completion queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_complq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_compl_queue *q,
struct virtchnl2_txq_info *qi)
{
u32 val;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->txq_model);
+ qi->model = cpu_to_le16(rsrc->txq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
@@ -1830,10 +1835,10 @@ static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
for (u32 i = 0; i < qs->num; i++) {
if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX)
- idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq,
+ idpf_fill_txq_config_chunk(qs->qv_rsrc, qs->qs[i].txq,
&qi[params.num_chunks++]);
else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION)
- idpf_fill_complq_config_chunk(qs->vport,
+ idpf_fill_complq_config_chunk(qs->qv_rsrc,
qs->qs[i].complq,
&qi[params.num_chunks++]);
}
@@ -1844,29 +1849,31 @@ static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
/**
* idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message
* @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
*
* Return: 0 on success, -errno on failure.
*/
-static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 totqs = vport->num_txq + vport->num_complq;
+ u32 totqs = rsrc->num_txq + rsrc->num_complq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, totqs);
+ qs = idpf_alloc_queue_set(vport, rsrc, totqs);
if (!qs)
return -ENOMEM;
/* Populate the queue info buffer with all queue context info */
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
qs->qs[k++].txq = tx_qgrp->txqs[j];
}
- if (idpf_is_queue_model_split(vport->txq_model)) {
+ if (idpf_is_queue_model_split(rsrc->txq_model)) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
qs->qs[k++].complq = tx_qgrp->complq;
}
@@ -1881,28 +1888,28 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
/**
* idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: Rx queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_rxq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
struct idpf_rx_queue *q,
struct virtchnl2_rxq_info *qi)
{
const struct idpf_bufq_set *sets;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->rxq_model);
+ qi->model = cpu_to_le16(rsrc->rxq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size);
qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ if (idpf_queue_has(RSC_EN, q))
qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
qi->desc_ids = cpu_to_le64(q->rxdids);
@@ -1919,7 +1926,7 @@ static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
+ if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
qi->bufq2_ena = IDPF_BUFQ2_ENA;
qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id);
}
@@ -1936,16 +1943,16 @@ static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: buffer queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_bufq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_buf_queue *q,
struct virtchnl2_rxq_info *qi)
{
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->rxq_model);
+ qi->model = cpu_to_le16(rsrc->rxq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
@@ -1953,7 +1960,7 @@ static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ if (idpf_queue_has(RSC_EN, q))
qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC);
if (idpf_queue_has(HSPLIT_EN, q)) {
@@ -2015,10 +2022,10 @@ static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
for (u32 i = 0; i < qs->num; i++) {
if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX)
- idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq,
+ idpf_fill_rxq_config_chunk(qs->qv_rsrc, qs->qs[i].rxq,
&qi[params.num_chunks++]);
else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER)
- idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq,
+ idpf_fill_bufq_config_chunk(qs->qv_rsrc, qs->qs[i].bufq,
&qi[params.num_chunks++]);
}
@@ -2028,23 +2035,25 @@ static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
/**
* idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message
* @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
*
* Return: 0 on success, -errno on failure.
*/
-static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ bool splitq = idpf_is_queue_model_split(rsrc->rxq_model);
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 totqs = vport->num_rxq + vport->num_bufq;
+ u32 totqs = rsrc->num_rxq + rsrc->num_bufq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, totqs);
+ qs = idpf_alloc_queue_set(vport, rsrc, totqs);
if (!qs)
return -ENOMEM;
/* Populate the queue info buffer with all queue context info */
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (!splitq) {
@@ -2052,7 +2061,7 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
goto rxq;
}
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
@@ -2176,21 +2185,22 @@ static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
*/
static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_queue_set *qs __free(kfree) = NULL;
u32 num_txq, num_q, k = 0;
bool split;
- num_txq = vport->num_txq + vport->num_complq;
- num_q = num_txq + vport->num_rxq + vport->num_bufq;
+ num_txq = rsrc->num_txq + rsrc->num_complq;
+ num_q = num_txq + rsrc->num_rxq + rsrc->num_bufq;
- qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, num_q);
+ qs = idpf_alloc_queue_set(vport, rsrc, num_q);
if (!qs)
return -ENOMEM;
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
@@ -2207,10 +2217,10 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
if (k != num_txq)
return -EINVAL;
- split = idpf_is_queue_model_split(vport->rxq_model);
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (split)
@@ -2231,7 +2241,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
if (!split)
continue;
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
@@ -2299,7 +2309,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
params.chunks = vqv;
- split = idpf_is_queue_model_split(qs->vport->txq_model);
+ split = idpf_is_queue_model_split(qs->qv_rsrc->txq_model);
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
@@ -2361,22 +2371,25 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
* idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue
* vector message
* @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @map: true for map and false for unmap
*
* Return: 0 on success, -errno on failure.
*/
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ bool map)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 num_q = vport->num_txq + vport->num_rxq;
+ u32 num_q = rsrc->num_txq + rsrc->num_rxq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, num_q);
+ qs = idpf_alloc_queue_set(vport, rsrc, num_q);
if (!qs)
return -ENOMEM;
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
@@ -2384,14 +2397,14 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
}
- if (k != vport->num_txq)
+ if (k != rsrc->num_txq)
return -EINVAL;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
@@ -2399,7 +2412,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
for (u32 j = 0; j < num_rxq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
qs->qs[k++].rxq =
&rx_qgrp->splitq.rxq_sets[j]->rxq;
else
@@ -2558,19 +2571,21 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport,
/**
* idpf_send_config_queues_msg - Send config queues virtchnl message
* @vport: Virtual port private data structure
+ * @rsrc: pointer to queue and vector resources
*
* Will send config queues virtchnl message. Returns 0 on success, negative on
* failure.
*/
-int idpf_send_config_queues_msg(struct idpf_vport *vport)
+int idpf_send_config_queues_msg(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int err;
- err = idpf_send_config_tx_queues_msg(vport);
+ err = idpf_send_config_tx_queues_msg(vport, rsrc);
if (err)
return err;
- return idpf_send_config_rx_queues_msg(vport);
+ return idpf_send_config_rx_queues_msg(vport, rsrc);
}
/**
@@ -3025,12 +3040,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
struct idpf_vc_xn_params xn_params = {};
u16 next_ptype_id = 0;
ssize_t reply_sz;
+ bool is_splitq;
int i, j, k;
if (vport->rx_ptype_lkup)
return 0;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ is_splitq = idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model);
+ if (is_splitq)
max_ptype = IDPF_RX_MAX_PTYPE;
else
max_ptype = IDPF_RX_MAX_BASE_PTYPE;
@@ -3094,7 +3111,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
IDPF_INVALID_PTYPE_ID)
goto out;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (is_splitq)
k = le16_to_cpu(ptype->ptype_id_10);
else
k = ptype->ptype_id_8;
@@ -3628,7 +3645,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
/* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */
- req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) +
+ req = max(rsrc->num_txq - vport->num_xdp_txq, rsrc->num_rxq) +
IDPF_RESERVED_VECS;
vec_info.num_req_vecs = req;
@@ -3684,8 +3701,8 @@ int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
vport_config->max_q.max_complq = max_q->max_complq;
vport_config->max_q.max_bufq = max_q->max_bufq;
- vport->txq_model = le16_to_cpu(vport_msg->txq_model);
- vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
+ rsrc->txq_model = le16_to_cpu(vport_msg->txq_model);
+ rsrc->rxq_model = le16_to_cpu(vport_msg->rxq_model);
vport->vport_type = le16_to_cpu(vport_msg->vport_type);
vport->vport_id = le32_to_cpu(vport_msg->vport_id);
@@ -3702,9 +3719,9 @@ int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
- idpf_vport_init_num_qs(vport, vport_msg);
- idpf_vport_calc_num_q_desc(vport);
- idpf_vport_calc_num_q_groups(vport);
+ idpf_vport_init_num_qs(vport, vport_msg, rsrc);
+ idpf_vport_calc_num_q_desc(vport, rsrc);
+ idpf_vport_calc_num_q_groups(rsrc);
idpf_vport_alloc_vec_indexes(vport, rsrc);
vport->crc_enable = adapter->crc_enable;
@@ -3813,6 +3830,7 @@ static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
/**
* __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
* @vport: virtual port for which the queues ids are initialized
+ * @rsrc: pointer to queue and vector resources
* @qids: queue ids
* @num_qids: number of queue ids
* @q_type: type of queue
@@ -3821,6 +3839,7 @@ static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
* parameters. Returns number of queue ids initialized.
*/
static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
const u32 *qids,
int num_qids,
u32 q_type)
@@ -3829,19 +3848,19 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
switch (q_type) {
case VIRTCHNL2_QUEUE_TYPE_TX:
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
tx_qgrp->txqs[j]->q_id = qids[k];
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
@@ -3849,7 +3868,7 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -3858,16 +3877,16 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
}
break;
case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
- for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp && k < num_qids; i++, k++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
tx_qgrp->complq->q_id = qids[k];
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u8 num_bufqs = vport->num_bufqs_per_qgrp;
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
struct idpf_buf_queue *q;
@@ -3887,12 +3906,14 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
/**
* idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
* @vport: virtual port for which the queues ids are initialized
+ * @rsrc: pointer to queue and vector resources
* @chunks: queue ids received over mailbox
*
* Will initialize all queue ids with ids received as mailbox parameters.
* Returns 0 on success, negative if all the queues are not initialized.
*/
int idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
struct idpf_queue_id_reg_info *chunks)
{
int num_ids, err = 0;
@@ -3906,13 +3927,13 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport,
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
VIRTCHNL2_QUEUE_TYPE_TX,
chunks);
- if (num_ids < vport->num_txq) {
+ if (num_ids < rsrc->num_txq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
VIRTCHNL2_QUEUE_TYPE_TX);
- if (num_ids < vport->num_txq) {
+ if (num_ids < rsrc->num_txq) {
err = -EINVAL;
goto mem_rel;
}
@@ -3920,44 +3941,46 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport,
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
VIRTCHNL2_QUEUE_TYPE_RX,
chunks);
- if (num_ids < vport->num_rxq) {
+ if (num_ids < rsrc->num_rxq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
VIRTCHNL2_QUEUE_TYPE_RX);
- if (num_ids < vport->num_rxq) {
+ if (num_ids < rsrc->num_rxq) {
err = -EINVAL;
goto mem_rel;
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
goto check_rxq;
q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
- if (num_ids < vport->num_complq) {
+ if (num_ids < rsrc->num_complq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
- if (num_ids < vport->num_complq) {
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
+ num_ids, q_type);
+ if (num_ids < rsrc->num_complq) {
err = -EINVAL;
goto mem_rel;
}
check_rxq:
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
goto mem_rel;
q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
- if (num_ids < vport->num_bufq) {
+ if (num_ids < rsrc->num_bufq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
- if (num_ids < vport->num_bufq)
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
+ num_ids, q_type);
+ if (num_ids < rsrc->num_bufq)
err = -EINVAL;
mem_rel:
@@ -3969,23 +3992,24 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport,
/**
* idpf_vport_adjust_qs - Adjust to new requested queues
* @vport: virtual port data struct
+ * @rsrc: pointer to queue and vector resources
*
* Renegotiate queues. Returns 0 on success, negative on failure.
*/
-int idpf_vport_adjust_qs(struct idpf_vport *vport)
+int idpf_vport_adjust_qs(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
struct virtchnl2_create_vport vport_msg;
int err;
- vport_msg.txq_model = cpu_to_le16(vport->txq_model);
- vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
+ vport_msg.txq_model = cpu_to_le16(rsrc->txq_model);
+ vport_msg.rxq_model = cpu_to_le16(rsrc->rxq_model);
err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
NULL);
if (err)
return err;
- idpf_vport_init_num_qs(vport, &vport_msg);
- idpf_vport_calc_num_q_groups(vport);
+ idpf_vport_init_num_qs(vport, &vport_msg, rsrc);
+ idpf_vport_calc_num_q_groups(rsrc);
return 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index ee194d74c669..26fd16d49b5f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -104,8 +104,10 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter);
int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
struct idpf_vec_regs *reg_vals);
int idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
struct idpf_queue_id_reg_info *chunks);
int idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
struct idpf_queue_id_reg_info *chunks);
bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
@@ -145,8 +147,9 @@ int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
u32 idpf_get_vport_id(struct idpf_vport *vport);
@@ -156,7 +159,8 @@ int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
int idpf_send_enable_vport_msg(struct idpf_vport *vport);
int idpf_send_disable_vport_msg(struct idpf_vport *vport);
-int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_vport_adjust_qs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
@@ -173,7 +177,9 @@ int idpf_get_vec_ids(struct idpf_adapter *adapter,
struct virtchnl2_vector_chunks *chunks);
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ bool map);
int idpf_add_del_mac_filters(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
index 958d16f87424..2b411bf5184f 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.c
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -6,17 +6,17 @@
#include "xdp.h"
#include "xsk.h"
-static int idpf_rxq_for_each(const struct idpf_vport *vport,
+static int idpf_rxq_for_each(const struct idpf_q_vec_rsrc *rsrc,
int (*fn)(struct idpf_rx_queue *rxq, void *arg),
void *arg)
{
- bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ bool splitq = idpf_is_queue_model_split(rsrc->rxq_model);
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return -ENETDOWN;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (splitq)
@@ -45,7 +45,8 @@ static int idpf_rxq_for_each(const struct idpf_vport *vport,
static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
{
const struct idpf_vport *vport = rxq->q_vector->vport;
- bool split = idpf_is_queue_model_split(vport->rxq_model);
+ const struct idpf_q_vec_rsrc *rsrc;
+ bool split;
int err;
err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
@@ -54,6 +55,9 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
if (err)
return err;
+ rsrc = &vport->dflt_qv_rsrc;
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
+
if (idpf_queue_has(XSK, rxq)) {
err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
@@ -86,9 +90,9 @@ int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
return __idpf_xdp_rxq_info_init(rxq, NULL);
}
-int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
+int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc)
{
- return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
+ return idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_init, NULL);
}
static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
@@ -111,10 +115,10 @@ void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
__idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
}
-void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc)
{
- idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
- (void *)(size_t)vport->rxq_model);
+ idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_deinit,
+ (void *)(size_t)rsrc->rxq_model);
}
static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
@@ -132,10 +136,10 @@ static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
return 0;
}
-void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc,
struct bpf_prog *xdp_prog)
{
- idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
+ idpf_rxq_for_each(rsrc, idpf_xdp_rxq_assign_prog, xdp_prog);
}
static void idpf_xdp_tx_timer(struct work_struct *work);
@@ -397,7 +401,7 @@ static const struct xdp_metadata_ops idpf_xdpmo = {
void idpf_xdp_set_features(const struct idpf_vport *vport)
{
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model))
return;
libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
@@ -409,6 +413,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport,
const struct netdev_bpf *xdp)
{
const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct bpf_prog *old, *prog = xdp->prog;
struct idpf_vport_config *cfg;
int ret;
@@ -419,7 +424,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport,
!test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
!!vport->xdp_prog == !!prog) {
if (test_bit(IDPF_VPORT_UP, np->state))
- idpf_xdp_copy_prog_to_rqs(vport, prog);
+ idpf_xdp_copy_prog_to_rqs(rsrc, prog);
old = xchg(&vport->xdp_prog, prog);
if (old)
@@ -464,7 +469,7 @@ int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
idpf_vport_ctrl_lock(dev);
vport = idpf_netdev_to_vport(dev);
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model))
goto notsupp;
switch (xdp->command) {
diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
index 479f5ef3c604..7ffc6955dfae 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.h
+++ b/drivers/net/ethernet/intel/idpf/xdp.h
@@ -9,10 +9,10 @@
#include "idpf_txrx.h"
int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
-int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
+int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc);
void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
-void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
-void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc);
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc,
struct bpf_prog *xdp_prog);
int idpf_xdpsqs_get(const struct idpf_vport *vport);
diff --git a/drivers/net/ethernet/intel/idpf/xsk.c b/drivers/net/ethernet/intel/idpf/xsk.c
index fd2cc43ab43c..e4768ec07336 100644
--- a/drivers/net/ethernet/intel/idpf/xsk.c
+++ b/drivers/net/ethernet/intel/idpf/xsk.c
@@ -26,13 +26,14 @@ static void idpf_xsk_setup_rxq(const struct idpf_vport *vport,
static void idpf_xsk_setup_bufq(const struct idpf_vport *vport,
struct idpf_buf_queue *bufq)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct xsk_buff_pool *pool;
u32 qid = U32_MAX;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
if (&grp->splitq.bufq_sets[j].bufq == bufq) {
qid = grp->splitq.rxq_sets[0]->rxq.idx;
goto setup;
--
2.39.2
Powered by blists - more mailing lists