[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4ad37f67-d419-27ce-581b-c0fa4739c3a3@intel.com>
Date: Wed, 12 Apr 2023 16:06:58 -0700
From: Jacob Keller <jacob.e.keller@...el.com>
To: Hariprasad Kelam <hkelam@...vell.com>, <netdev@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
CC: <kuba@...nel.org>, <davem@...emloft.net>,
<willemdebruijn.kernel@...il.com>, <andrew@...n.ch>,
<sgoutham@...vell.com>, <lcherian@...vell.com>,
<gakula@...vell.com>, <jerinj@...vell.com>, <sbhatta@...vell.com>,
<naveenm@...vell.com>, <edumazet@...gle.com>, <pabeni@...hat.com>,
<jhs@...atatu.com>, <xiyou.wangcong@...il.com>, <jiri@...nulli.us>,
<maxtram95@...il.com>, <corbet@....net>
Subject: Re: [net-next Patch v7 2/6] octeontx2-pf: Rename tot_tx_queues to
non_qos_queues
On 4/10/2023 12:29 AM, Hariprasad Kelam wrote:
> current implementation is such that tot_tx_queues contains both
> xdp queues and normal tx queues. which will be allocated in interface
> open calls and deallocated on interface down calls respectively.
>
> With addition of QOS, where send quees are allocated/deallacated upon
> user request Qos send queues won't be part of tot_tx_queues. So this
> patch renames tot_tx_queues to non_qos_queues.
>
> Signed-off-by: Hariprasad Kelam <hkelam@...vell.com>
> Reviewed-by: Simon Horman <simon.horman@...igine.com>
Good to clarify this.
Reviewed-by: Jacob Keller <jacob.e.keller@...el.com>
> ---
> .../ethernet/marvell/octeontx2/nic/otx2_common.c | 12 ++++++------
> .../ethernet/marvell/octeontx2/nic/otx2_common.h | 2 +-
> .../net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 14 +++++++-------
> .../net/ethernet/marvell/octeontx2/nic/otx2_vf.c | 2 +-
> 4 files changed, 15 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
> index 8a41ad8ca04f..43bc56fb3c33 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
> @@ -762,7 +762,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
> int timeout = 1000;
>
> ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
> - for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
> + for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
> incr = (u64)qidx << 32;
> while (timeout) {
> val = otx2_atomic64_add(incr, ptr);
> @@ -1048,7 +1048,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
> }
>
> /* Initialize TX queues */
> - for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
> + for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
> u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
>
> err = otx2_sq_init(pfvf, qidx, sqb_aura);
> @@ -1095,7 +1095,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
>
> /* Set RQ/SQ/CQ counts */
> nixlf->rq_cnt = pfvf->hw.rx_queues;
> - nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
> + nixlf->sq_cnt = pfvf->hw.non_qos_queues;
> nixlf->cq_cnt = pfvf->qset.cq_cnt;
> nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
> nixlf->rss_grps = MAX_RSS_GROUPS;
> @@ -1133,7 +1133,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
> int sqb, qidx;
> u64 iova, pa;
>
> - for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
> + for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
> sq = &qset->sq[qidx];
> if (!sq->sqb_ptrs)
> continue;
> @@ -1349,7 +1349,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
> stack_pages =
> (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
>
> - for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
> + for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
> pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
> /* Initialize aura context */
> err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
> @@ -1369,7 +1369,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
> goto fail;
>
> /* Allocate pointers and free them to aura/pool */
> - for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
> + for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
> pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
> pool = &pfvf->qset.pool[pool_id];
>
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
> index 3d22cc6a2804..b926a50138cc 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
> @@ -189,7 +189,7 @@ struct otx2_hw {
> u16 rx_queues;
> u16 tx_queues;
> u16 xdp_queues;
> - u16 tot_tx_queues;
> + u16 non_qos_queues; /* tx queues plus xdp queues */
> u16 max_queues;
> u16 pool_cnt;
> u16 rqpool_cnt;
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> index 179433d0a54a..33d677849aa9 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> @@ -1257,7 +1257,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
> }
>
> /* SQ */
> - for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
> + for (qidx = 0; qidx < pf->hw.non_qos_queues; qidx++) {
> u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
> u8 sq_op_err_code, mnq_err_code, snd_err_code;
>
> @@ -1383,7 +1383,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
> otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
> /* Free SQB pointers */
> otx2_sq_free_sqbs(pf);
> - for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
> + for (qidx = 0; qidx < pf->hw.non_qos_queues; qidx++) {
> sq = &qset->sq[qidx];
> qmem_free(pf->dev, sq->sqe);
> qmem_free(pf->dev, sq->tso_hdrs);
> @@ -1433,7 +1433,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
> * so, aura count = pool count.
> */
> hw->rqpool_cnt = hw->rx_queues;
> - hw->sqpool_cnt = hw->tot_tx_queues;
> + hw->sqpool_cnt = hw->non_qos_queues;
> hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
>
> /* Maximum hardware supported transmit length */
> @@ -1688,7 +1688,7 @@ int otx2_open(struct net_device *netdev)
>
> netif_carrier_off(netdev);
>
> - pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
> + pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.non_qos_queues;
> /* RQ and SQs are mapped to different CQs,
> * so find out max CQ IRQs (i.e CINTs) needed.
> */
> @@ -1708,7 +1708,7 @@ int otx2_open(struct net_device *netdev)
> if (!qset->cq)
> goto err_free_mem;
>
> - qset->sq = kcalloc(pf->hw.tot_tx_queues,
> + qset->sq = kcalloc(pf->hw.non_qos_queues,
> sizeof(struct otx2_snd_queue), GFP_KERNEL);
> if (!qset->sq)
> goto err_free_mem;
> @@ -2520,7 +2520,7 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
> xdp_features_clear_redirect_target(dev);
> }
>
> - pf->hw.tot_tx_queues += pf->hw.xdp_queues;
> + pf->hw.non_qos_queues += pf->hw.xdp_queues;
>
> if (if_up)
> otx2_open(pf->netdev);
> @@ -2751,7 +2751,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
> hw->pdev = pdev;
> hw->rx_queues = qcount;
> hw->tx_queues = qcount;
> - hw->tot_tx_queues = qcount;
> + hw->non_qos_queues = qcount;
> hw->max_queues = qcount;
> hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
> /* Use CQE of 128 byte descriptor size by default */
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
> index ab126f8706c7..a078949430ce 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
> @@ -570,7 +570,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
> hw->rx_queues = qcount;
> hw->tx_queues = qcount;
> hw->max_queues = qcount;
> - hw->tot_tx_queues = qcount;
> + hw->non_qos_queues = qcount;
> hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
> /* Use CQE of 128 byte descriptor size by default */
> hw->xqe_size = 128;
Powered by blists - more mailing lists