[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aIJJP1OmYAk5Y6PE@opensource>
Date: Thu, 24 Jul 2025 14:54:55 +0000
From: Subbaraya Sundeep <sbhatta@...vell.com>
To: Simon Horman <horms@...nel.org>
CC: <andrew+netdev@...n.ch>, <davem@...emloft.net>, <edumazet@...gle.com>,
<kuba@...nel.org>, <pabeni@...hat.com>, <gakula@...vell.com>,
<hkelam@...vell.com>, <bbhushan2@...vell.com>, <jerinj@...vell.com>,
<lcherian@...vell.com>, <sgoutham@...vell.com>,
<netdev@...r.kernel.org>
Subject: Re: [net-next PATCH v3 07/11] octeontx2-pf: Initialize cn20k
specific aura and pool contexts
Hi Simon,
On 2025-07-22 at 17:03:44, Simon Horman (horms@...nel.org) wrote:
> On Thu, Jul 17, 2025 at 10:37:39PM +0530, Subbaraya Sundeep wrote:
> > From: Linu Cherian <lcherian@...vell.com>
> >
> > With new CN20K NPA pool and aura contexts supported in AF
> > driver this patch modifies PF driver to use new NPA contexts.
> > Implement new hw_ops for intializing aura and pool contexts
> > for all the silicons.
> >
> > Signed-off-by: Linu Cherian <lcherian@...vell.com>
> > Signed-off-by: Subbaraya Sundeep <sbhatta@...vell.com>
>
> ...
>
> > @@ -250,3 +239,170 @@ int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
> >
> > return 0;
> > }
> > +
> > +#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
> > +
> > +static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
> > + int pool_id, int numptrs)
> > +{
> > + struct npa_cn20k_aq_enq_req *aq;
> > + struct otx2_pool *pool;
> > + int err;
> > +
> > + pool = &pfvf->qset.pool[pool_id];
> > +
> > + /* Allocate memory for HW to update Aura count.
> > + * Alloc one cache line, so that it fits all FC_STYPE modes.
> > + */
> > + if (!pool->fc_addr) {
> > + err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
> > + if (err)
> > + return err;
> > + }
> > +
> > + /* Initialize this aura's context via AF */
> > + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
> > + if (!aq) {
> > + /* Shared mbox memory buffer is full, flush it and retry */
> > + err = otx2_sync_mbox_msg(&pfvf->mbox);
> > + if (err)
> > + return err;
> > + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
> > + if (!aq)
> > + return -ENOMEM;
> > + }
> > +
> > + aq->aura_id = aura_id;
> > +
> > + /* Will be filled by AF with correct pool context address */
> > + aq->aura.pool_addr = pool_id;
> > + aq->aura.pool_caching = 1;
> > + aq->aura.shift = ilog2(numptrs) - 8;
> > + aq->aura.count = numptrs;
> > + aq->aura.limit = numptrs;
> > + aq->aura.avg_level = 255;
> > + aq->aura.ena = 1;
> > + aq->aura.fc_ena = 1;
> > + aq->aura.fc_addr = pool->fc_addr->iova;
> > + aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
> > +
> > + /* Enable backpressure for RQ aura */
> > + if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
> > + aq->aura.bp_ena = 0;
> > + /* If NIX1 LF is attached then specify NIX1_RX.
> > + *
> > + * Below NPA_AURA_S[BP_ENA] is set according to the
> > + * NPA_BPINTF_E enumeration given as:
> > + * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
> > + * NIX0_RX is 0x0 + 0*0x1 = 0
> > + * NIX1_RX is 0x0 + 1*0x1 = 1
> > + * But in HRM it is given that
> > + * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
> > + * NIX-RX based on [BP] level. One bit per NIX-RX; index
> > + * enumerated by NPA_BPINTF_E."
> > + */
> > + if (pfvf->nix_blkaddr == BLKADDR_NIX1)
> > + aq->aura.bp_ena = 1;
> > +#ifdef CONFIG_DCB
> > + aq->aura.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
> > +#else
> > + aq->aura.bpid = pfvf->bpid[0];
> > +#endif
>
> >From a build coverage point of view it is a shame that we can't use
> something like this here (because queue_to_pfc_map doesn't exist
> if CONFIG_DCB isn't enabled).
>
> bpid_idx = IS_ENABLED(CONFIG_DCB) ? ...;
>
> But I do wonder if somehow it's nicer to constrain the #ifdef to an
> as-small-as-possible helper. Something like this (compile tested only):
>
> @@ -242,6 +242,15 @@ int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
>
> #define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
>
> +static u8 cn20k_aura_bpid_idx(struct otx2_nic *pfvf, int aura_id)
> +{
> +#ifdef CONFIG_DCB
> + return pfvf->queue_to_pfc_map[aura_id];
> +#else
> + return 0;
> +#endif
> +}
> +
> static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
> int pool_id, int numptrs)
> {
> @@ -289,6 +298,7 @@ static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
> /* Enable backpressure for RQ aura */
> if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
> aq->aura.bp_ena = 0;
> + u8 bpid_idx;
> /* If NIX1 LF is attached then specify NIX1_RX.
> *
> * Below NPA_AURA_S[BP_ENA] is set according to the
> @@ -303,11 +313,9 @@ static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
> */
> if (pfvf->nix_blkaddr == BLKADDR_NIX1)
> aq->aura.bp_ena = 1;
> -#ifdef CONFIG_DCB
> - aq->aura.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
> -#else
> - aq->aura.bpid = pfvf->bpid[0];
> -#endif
> +
> + bpid_idx = cn20k_aura_bpid_idx(pfvf, aura_id);
> + aq->aura.bpid = pfvf->bpid[bpid_idx];
>
Okay will modify as per your suggestion.
Thanks,
Sundeep
> /* Set backpressure level for RQ's Aura */
> aq->aura.bp = RQ_BP_LVL_AURA;
>
> > +
> > + /* Set backpressure level for RQ's Aura */
> > + aq->aura.bp = RQ_BP_LVL_AURA;
> > + }
> > +
> > + /* Fill AQ info */
> > + aq->ctype = NPA_AQ_CTYPE_AURA;
> > + aq->op = NPA_AQ_INSTOP_INIT;
> > +
> > + return 0;
> > +}
>
> ...
Powered by blists - more mailing lists