[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAOMdWSKKyqaJB2Psgcy9piUv3LTDBHhbo_g404fSmqQrVSyr7Q@mail.gmail.com>
Date: Mon, 1 Jul 2024 03:13:40 -0700
From: Allen <allen.lkml@...il.com>
To: Paolo Abeni <pabeni@...hat.com>
Cc: kuba@...nel.org, Guo-Fu Tseng <cooldavid@...ldavid.org>,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, jes@...ined-monkey.org,
kda@...ux-powerpc.org, cai.huoqing@...ux.dev, dougmill@...ux.ibm.com,
npiggin@...il.com, christophe.leroy@...roup.eu, aneesh.kumar@...nel.org,
naveen.n.rao@...ux.ibm.com, nnac123@...ux.ibm.com, tlfalcon@...ux.ibm.com,
marcin.s.wojtas@...il.com, mlindner@...vell.com, stephen@...workplumber.org,
nbd@....name, sean.wang@...iatek.com, Mark-MC.Lee@...iatek.com,
lorenzo@...nel.org, matthias.bgg@...il.com,
angelogioacchino.delregno@...labora.com, borisp@...dia.com,
bryan.whitehead@...rochip.com, UNGLinuxDriver@...rochip.com,
louis.peens@...igine.com, richardcochran@...il.com,
linux-rdma@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-acenic@...site.dk, linux-net-drivers@....com, netdev@...r.kernel.org
Subject: Re: [PATCH 13/15] net: jme: Convert tasklet API to new bottom half
workqueue mechanism
> > Migrate tasklet APIs to the new bottom half workqueue mechanism. It
> > replaces all occurrences of tasklet usage with the appropriate workqueue
> > APIs throughout the jme driver. This transition ensures compatibility
> > with the latest design and enhances performance.
> >
> > Signed-off-by: Allen Pais <allen.lkml@...il.com>
> > ---
> > drivers/net/ethernet/jme.c | 72 +++++++++++++++++++-------------------
> > drivers/net/ethernet/jme.h | 8 ++---
> > 2 files changed, 40 insertions(+), 40 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
> > index b06e24562973..b1a92b851b3b 100644
> > --- a/drivers/net/ethernet/jme.c
> > +++ b/drivers/net/ethernet/jme.c
> > @@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)
> >
> > if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
> > if (dpi->attempt < dpi->cur)
> > - tasklet_schedule(&jme->rxclean_task);
> > + queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > jme_set_rx_pcc(jme, dpi->attempt);
> > dpi->cur = dpi->attempt;
> > dpi->cnt = 0;
> > @@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
> > }
> >
> > static void
> > -jme_pcc_tasklet(struct tasklet_struct *t)
> > +jme_pcc_bh_work(struct work_struct *work)
> > {
> > - struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
> > + struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
> > struct net_device *netdev = jme->dev;
> >
> > if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
> > @@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
> > jme_stop_shutdown_timer(jme);
> >
> > jme_stop_pcc_timer(jme);
> > - tasklet_disable(&jme->txclean_task);
> > - tasklet_disable(&jme->rxclean_task);
> > - tasklet_disable(&jme->rxempty_task);
> > + disable_work_sync(&jme->txclean_bh_work);
> > + disable_work_sync(&jme->rxclean_bh_work);
> > + disable_work_sync(&jme->rxempty_bh_work);
> >
> > if (netif_carrier_ok(netdev)) {
> > jme_disable_rx_engine(jme);
> > @@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
> > rc = jme_setup_rx_resources(jme);
> > if (rc) {
> > pr_err("Allocating resources for RX error, Device STOPPED!\n");
> > - goto out_enable_tasklet;
> > + goto out_enable_bh_work;
> > }
> >
> > rc = jme_setup_tx_resources(jme);
> > @@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
> > jme_start_shutdown_timer(jme);
> > }
> >
> > - goto out_enable_tasklet;
> > + goto out_enable_bh_work;
> >
> > err_out_free_rx_resources:
> > jme_free_rx_resources(jme);
> > -out_enable_tasklet:
> > - tasklet_enable(&jme->txclean_task);
> > - tasklet_enable(&jme->rxclean_task);
> > - tasklet_enable(&jme->rxempty_task);
> > +out_enable_bh_work:
> > + enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > + enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > + enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
>
> This will unconditionally schedule the rxempty_bh_work and is AFAICS a
> different behavior WRT prior this patch.
>
> In turn the rxempty_bh_work() will emit (almost unconditionally) the
> 'RX Queue Full!' message, so the change should be visibile to the user.
>
> I think you should queue the work only if it was queued at cancel time.
> You likely need additional status to do that.
>
Thank you for taking the time out to review. Now that it's been a week, I was
preparing to send out version 3. Before I do that, I want to make sure if this
the below approach is acceptable.
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b06e24562973..b3fc2e5c379f 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)
if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
if (dpi->attempt < dpi->cur)
- tasklet_schedule(&jme->rxclean_task);
+ queue_work(system_bh_wq, &jme->rxclean_bh_work);
jme_set_rx_pcc(jme, dpi->attempt);
dpi->cur = dpi->attempt;
dpi->cnt = 0;
@@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
}
static void
-jme_pcc_tasklet(struct tasklet_struct *t)
+jme_pcc_bh_work(struct work_struct *work)
{
- struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
+ struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
struct net_device *netdev = jme->dev;
if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
@@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
jme_stop_shutdown_timer(jme);
jme_stop_pcc_timer(jme);
- tasklet_disable(&jme->txclean_task);
- tasklet_disable(&jme->rxclean_task);
- tasklet_disable(&jme->rxempty_task);
+ disable_work_sync(&jme->txclean_bh_work);
+ disable_work_sync(&jme->rxclean_bh_work);
+ disable_work_sync(&jme->rxempty_bh_work);
if (netif_carrier_ok(netdev)) {
jme_disable_rx_engine(jme);
@@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
rc = jme_setup_rx_resources(jme);
if (rc) {
pr_err("Allocating resources for RX error,
Device STOPPED!\n");
- goto out_enable_tasklet;
+ goto out_enable_bh_work;
}
rc = jme_setup_tx_resources(jme);
@@ -1326,22 +1326,23 @@ static void jme_link_change_work(struct
work_struct *work)
jme_start_shutdown_timer(jme);
}
- goto out_enable_tasklet;
+ goto out_enable_bh_work;
err_out_free_rx_resources:
jme_free_rx_resources(jme);
-out_enable_tasklet:
- tasklet_enable(&jme->txclean_task);
- tasklet_enable(&jme->rxclean_task);
- tasklet_enable(&jme->rxempty_task);
+out_enable_bh_work:
+ enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+ enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+ if (jme->rxempty_bh_work_queued)
+ enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
out:
atomic_inc(&jme->link_changing);
}
static void
-jme_rx_clean_tasklet(struct tasklet_struct *t)
+jme_rx_clean_bh_work(struct work_struct *work)
{
- struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
+ struct jme_adapter *jme = from_work(jme, work, rxclean_bh_work);
struct dynpcc_info *dpi = &(jme->dpi);
jme_process_receive(jme, jme->rx_ring_size);
@@ -1374,9 +1375,9 @@ jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
}
static void
-jme_rx_empty_tasklet(struct tasklet_struct *t)
+jme_rx_empty_bh_work(struct work_struct *work)
{
- struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
+ struct jme_adapter *jme = from_work(jme, work, rxempty_bh_work);
if (unlikely(atomic_read(&jme->link_changing) != 1))
return;
@@ -1386,7 +1387,7 @@ jme_rx_empty_tasklet(struct tasklet_struct *t)
netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
- jme_rx_clean_tasklet(&jme->rxclean_task);
+ jme_rx_clean_bh_work(&jme->rxclean_bh_work);
while (atomic_read(&jme->rx_empty) > 0) {
atomic_dec(&jme->rx_empty);
@@ -1410,9 +1411,9 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
}
-static void jme_tx_clean_tasklet(struct tasklet_struct *t)
+static void jme_tx_clean_bh_work(struct work_struct *work)
{
- struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
+ struct jme_adapter *jme = from_work(jme, work, txclean_bh_work);
struct jme_ring *txring = &(jme->txring[0]);
struct txdesc *txdesc = txring->desc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
@@ -1510,12 +1511,12 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
if (intrstat & INTR_TMINTR) {
jwrite32(jme, JME_IEVE, INTR_TMINTR);
- tasklet_schedule(&jme->pcc_task);
+ queue_work(system_bh_wq, &jme->pcc_bh_work);
}
if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
- tasklet_schedule(&jme->txclean_task);
+ queue_work(system_bh_wq, &jme->txclean_bh_work);
}
if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
@@ -1538,9 +1539,9 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
} else {
if (intrstat & INTR_RX0EMP) {
atomic_inc(&jme->rx_empty);
- tasklet_hi_schedule(&jme->rxempty_task);
+ queue_work(system_bh_highpri_wq, &jme->rxempty_bh_work);
} else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
- tasklet_hi_schedule(&jme->rxclean_task);
+ queue_work(system_bh_highpri_wq, &jme->rxclean_bh_work);
}
}
@@ -1826,9 +1827,9 @@ jme_open(struct net_device *netdev)
jme_clear_pm_disable_wol(jme);
JME_NAPI_ENABLE(jme);
- tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
- tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
- tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
+ INIT_WORK(&jme->txclean_bh_work, jme_tx_clean_bh_work);
+ INIT_WORK(&jme->rxclean_bh_work, jme_rx_clean_bh_work);
+ INIT_WORK(&jme->rxempty_bh_work, jme_rx_empty_bh_work);
rc = jme_request_irq(jme);
if (rc)
@@ -1914,9 +1915,10 @@ jme_close(struct net_device *netdev)
JME_NAPI_DISABLE(jme);
cancel_work_sync(&jme->linkch_task);
- tasklet_kill(&jme->txclean_task);
- tasklet_kill(&jme->rxclean_task);
- tasklet_kill(&jme->rxempty_task);
+ cancel_work_sync(&jme->txclean_bh_work);
+ cancel_work_sync(&jme->rxclean_bh_work);
+ jme->rxempty_bh_work_queued = false;
+ cancel_work_sync(&jme->rxempty_bh_work);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
@@ -3020,7 +3022,7 @@ jme_init_one(struct pci_dev *pdev,
atomic_set(&jme->tx_cleaning, 1);
atomic_set(&jme->rx_empty, 1);
- tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
+ INIT_WORK(&jme->pcc_bh_work, jme_pcc_bh_work);
INIT_WORK(&jme->linkch_task, jme_link_change_work);
jme->dpi.cur = PCC_P1;
@@ -3180,9 +3182,9 @@ jme_suspend(struct device *dev)
netif_stop_queue(netdev);
jme_stop_irq(jme);
- tasklet_disable(&jme->txclean_task);
- tasklet_disable(&jme->rxclean_task);
- tasklet_disable(&jme->rxempty_task);
+ disable_work_sync(&jme->txclean_bh_work);
+ disable_work_sync(&jme->rxclean_bh_work);
+ disable_work_sync(&jme->rxempty_bh_work);
if (netif_carrier_ok(netdev)) {
if (test_bit(JME_FLAG_POLL, &jme->flags))
@@ -3198,9 +3200,10 @@ jme_suspend(struct device *dev)
jme->phylink = 0;
}
- tasklet_enable(&jme->txclean_task);
- tasklet_enable(&jme->rxclean_task);
- tasklet_enable(&jme->rxempty_task);
+ enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+ enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+ jme->rxempty_bh_work_queued = true;
+ enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
jme_powersave_phy(jme);
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 860494ff3714..44aaf7625dc3 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -406,11 +406,12 @@ struct jme_adapter {
spinlock_t phy_lock;
spinlock_t macaddr_lock;
spinlock_t rxmcs_lock;
- struct tasklet_struct rxempty_task;
- struct tasklet_struct rxclean_task;
- struct tasklet_struct txclean_task;
+ struct work_struct rxempty_bh_work;
+ struct work_struct rxclean_bh_work;
+ struct work_struct txclean_bh_work;
+ bool rxempty_bh_work_queued;
struct work_struct linkch_task;
- struct tasklet_struct pcc_task;
+ struct work_struct pcc_bh_work;
unsigned long flags;
u32 reg_txcs;
u32 reg_txpfc;
Do we need a flag for rxclean and txclean too?
Thanks,
Allen
> Thanks,
>
> Paolo
>
--
- Allen
Powered by blists - more mailing lists