[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9ea0484d-a2a4-308c-95fd-c9accdcdc424@amd.com>
Date: Fri, 15 Dec 2023 10:01:36 -0800
From: Brett Creeley <bcreeley@....com>
To: Michal Kubiak <michal.kubiak@...el.com>, intel-wired-lan@...ts.osuosl.org
Cc: aleksander.lobakin@...el.com, larysa.zaremba@...el.com,
alan.brady@...el.com, joshua.a.hay@...el.com, emil.s.tantilov@...el.com,
maciej.fijalkowski@...el.com, netdev@...r.kernel.org,
Przemek Kitszel <przemyslaw.kitszel@...el.com>
Subject: Re: [PATCH iwl-net] idpf: enable WB_ON_ITR
On 12/12/2023 6:55 AM, Michal Kubiak wrote:
> Caution: This message originated from an External Source. Use proper caution when opening attachments, clicking links, or responding.
>
>
> From: Joshua Hay <joshua.a.hay@...el.com>
>
> Tell hardware to writeback completed descriptors even when interrupts
> are disabled. Otherwise, descriptors might not be written back until
> the hardware can flush a full cacheline of descriptors. This can cause
> unnecessary delays when traffic is light (or even trigger Tx queue
> timeout).
>
> Fixes: c2d548cad150 ("idpf: add TX splitq napi poll support")
> Fixes: a5ab9ee0df0b ("idpf: add singleq start_xmit and napi poll")
> Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@...el.com>
> Reviewed-by: Alexander Lobakin <aleksander.lobakin@...el.com>
> Signed-off-by: Joshua Hay <joshua.a.hay@...el.com>
> Co-developed-by: Michal Kubiak <michal.kubiak@...el.com>
> Signed-off-by: Michal Kubiak <michal.kubiak@...el.com>
> ---
> drivers/net/ethernet/intel/idpf/idpf_dev.c | 2 ++
> .../ethernet/intel/idpf/idpf_singleq_txrx.c | 6 ++++-
> drivers/net/ethernet/intel/idpf/idpf_txrx.c | 7 +++++-
> drivers/net/ethernet/intel/idpf/idpf_txrx.h | 23 +++++++++++++++++++
> drivers/net/ethernet/intel/idpf/idpf_vf_dev.c | 2 ++
> 5 files changed, 38 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
> index 34ad1ac46b78..2c6776086130 100644
> --- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
> +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
> @@ -96,8 +96,10 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
> intr->dyn_ctl = idpf_get_reg_addr(adapter,
> reg_vals[vec_id].dyn_ctl_reg);
> intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M;
> + intr->dyn_ctl_intena_msk_m = PF_GLINT_DYN_CTL_INTENA_MSK_M;
> intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
> intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
> + intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M;
>
> spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
> IDPF_PF_ITR_IDX_SPACING);
> diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
> index 81288a17da2a..8e1478b7d86c 100644
> --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
> +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
> @@ -1168,8 +1168,10 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
> &work_done);
>
> /* If work not completed, return budget and polling will return */
> - if (!clean_complete)
> + if (!clean_complete) {
> + idpf_vport_intr_set_wb_on_itr(q_vector);
> return budget;
> + }
>
> work_done = min_t(int, work_done, budget - 1);
>
> @@ -1178,6 +1180,8 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
> */
> if (likely(napi_complete_done(napi, work_done)))
> idpf_vport_intr_update_itr_ena_irq(q_vector);
> + else
> + idpf_vport_intr_set_wb_on_itr(q_vector);
>
> return work_done;
> }
> diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
> index 1646ff3877ba..b496566ee2aa 100644
> --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
> +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
> @@ -3639,6 +3639,7 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
> IDPF_NO_ITR_UPDATE_IDX, 0);
>
> writel(intval, q_vector->intr_reg.dyn_ctl);
> + q_vector->wb_on_itr = false;
> }
>
> /**
> @@ -3930,8 +3931,10 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
> clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
>
> /* If work not completed, return budget and polling will return */
> - if (!clean_complete)
> + if (!clean_complete) {
> + idpf_vport_intr_set_wb_on_itr(q_vector);
> return budget;
> + }
>
> work_done = min_t(int, work_done, budget - 1);
>
> @@ -3940,6 +3943,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
> */
> if (likely(napi_complete_done(napi, work_done)))
> idpf_vport_intr_update_itr_ena_irq(q_vector);
> + else
> + idpf_vport_intr_set_wb_on_itr(q_vector);
>
> /* Switch to poll mode in the tear-down path after sending disable
> * queues virtchnl message, as the interrupts will be disabled after
> diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
> index df76493faa75..50761c2d9f3b 100644
> --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
> +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
> @@ -495,9 +495,11 @@ struct idpf_vec_regs {
> struct idpf_intr_reg {
> void __iomem *dyn_ctl;
> u32 dyn_ctl_intena_m;
> + u32 dyn_ctl_intena_msk_m;
> u32 dyn_ctl_itridx_s;
> u32 dyn_ctl_itridx_m;
> u32 dyn_ctl_intrvl_s;
> + u32 dyn_ctl_wb_on_itr_m;
> void __iomem *rx_itr;
> void __iomem *tx_itr;
> void __iomem *icr_ena;
> @@ -534,6 +536,7 @@ struct idpf_q_vector {
> struct napi_struct napi;
> u16 v_idx;
> struct idpf_intr_reg intr_reg;
> + bool wb_on_itr;
Not sure if this was considered, but it might be best to put this before
intr_reg so it's on the same cacheline as intr_reg.
>
> u16 num_txq;
> struct idpf_queue **tx;
> @@ -973,6 +976,26 @@ static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len)
> page_pool_get_dma_dir(pp));
> }
>
> +/**
> + * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
> + * @q_vector: pointer to queue vector struct
> + */
> +static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
> +{
> + struct idpf_intr_reg *reg;
> +
> + if (q_vector->wb_on_itr)
> + return;
> +
> + reg = &q_vector->intr_reg;
> +
> + writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
> + IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s,
> + reg->dyn_ctl);
> +
> + q_vector->wb_on_itr = true;
> +}
> +
> int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
> void idpf_vport_init_num_qs(struct idpf_vport *vport,
> struct virtchnl2_create_vport *vport_msg);
> diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
> index 8ade4e3a9fe1..f5b0a0666636 100644
> --- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
> +++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
> @@ -96,7 +96,9 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
> intr->dyn_ctl = idpf_get_reg_addr(adapter,
> reg_vals[vec_id].dyn_ctl_reg);
> intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M;
> + intr->dyn_ctl_intena_msk_m = VF_INT_DYN_CTLN_INTENA_MSK_M;
> intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
> + intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
>
> spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
> IDPF_VF_ITR_IDX_SPACING);
> --
> 2.33.1
>
>
Powered by blists - more mailing lists