[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200824134136.7ceabe06@kicinski-fedora-PC1C0HJN>
Date: Mon, 24 Aug 2020 13:41:36 -0700
From: Jakub Kicinski <kuba@...nel.org>
To: Tony Nguyen <anthony.l.nguyen@...el.com>
Cc: davem@...emloft.net, Alice Michael <alice.michael@...el.com>,
netdev@...r.kernel.org, nhorman@...hat.com, sassmann@...hat.com,
jeffrey.t.kirsher@...el.com, Alan Brady <alan.brady@...el.com>,
Phani Burra <phani.r.burra@...el.com>,
Joshua Hay <joshua.a.hay@...el.com>,
Madhu Chittim <madhu.chittim@...el.com>,
Pavan Kumar Linga <pavan.kumar.linga@...el.com>,
Donald Skidmore <donald.c.skidmore@...el.com>,
Jesse Brandeburg <jesse.brandeburg@...el.com>,
Sridhar Samudrala <sridhar.samudrala@...el.com>
Subject: Re: [net-next v5 08/15] iecm: Implement vector allocation
On Mon, 24 Aug 2020 10:32:59 -0700 Tony Nguyen wrote:
> static void iecm_mb_intr_rel_irq(struct iecm_adapter *adapter)
> {
> - /* stub */
> + int irq_num;
> +
> + irq_num = adapter->msix_entries[0].vector;
> + synchronize_irq(irq_num);
I don't think you need to sync irq before freeing it.
> + free_irq(irq_num, adapter);
> static int iecm_mb_intr_init(struct iecm_adapter *adapter)
> {
> - /* stub */
> + int err = 0;
> +
> + iecm_get_mb_vec_id(adapter);
> + adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
> + adapter->irq_mb_handler = iecm_mb_intr_clean;
> + err = iecm_mb_intr_req_irq(adapter);
> + return err;
return iecm_mb_intr_req_irq(adapter);
> static void iecm_vport_intr_rel_irq(struct iecm_vport *vport)
> {
> - /* stub */
> + struct iecm_adapter *adapter = vport->adapter;
> + int vector;
> +
> + for (vector = 0; vector < vport->num_q_vectors; vector++) {
> + struct iecm_q_vector *q_vector = &vport->q_vectors[vector];
> + int irq_num, vidx;
> +
> + /* free only the IRQs that were actually requested */
> + if (!q_vector)
> + continue;
> +
> + vidx = vector + vport->q_vector_base;
> + irq_num = adapter->msix_entries[vidx].vector;
> +
> + /* clear the affinity_mask in the IRQ descriptor */
> + irq_set_affinity_hint(irq_num, NULL);
> + synchronize_irq(irq_num);
here as well
> + free_irq(irq_num, q_vector);
> + }
> }
> void iecm_vport_intr_dis_irq_all(struct iecm_vport *vport)
> {
> - /* stub */
> + struct iecm_q_vector *q_vector = vport->q_vectors;
> + struct iecm_hw *hw = &vport->adapter->hw;
> + int q_idx;
> +
> + for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
> + writel_relaxed(0, hw->hw_addr +
> + q_vector[q_idx].intr_reg.dyn_ctl);
Why the use of _releaxed() here? is this performance-sensitive?
There is no barrier after, which makes this code fragile.
> @@ -1052,12 +1122,42 @@ void iecm_vport_intr_dis_irq_all(struct iecm_vport *vport)
> static u32 iecm_vport_intr_buildreg_itr(struct iecm_q_vector *q_vector,
> const int type, u16 itr)
> {
> - /* stub */
> + u32 itr_val;
> +
> + itr &= IECM_ITR_MASK;
> + /* Don't clear PBA because that can cause lost interrupts that
> + * came in while we were cleaning/polling
> + */
> + itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
> + (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
> + (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
> +
> + return itr_val;
> }
Powered by blists - more mailing lists