[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191119051307.167977685@linuxfoundation.org>
Date: Tue, 19 Nov 2019 06:17:29 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
Rick Farrington <ricardo.farrington@...ium.com>,
Felix Manlunas <felix.manlunas@...ium.com>,
"David S. Miller" <davem@...emloft.net>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 4.14 049/239] liquidio: fix race condition in instruction completion processing
From: Rick Farrington <ricardo.farrington@...ium.com>
[ Upstream commit b943f17e06493fd2c7fd00743093ad5dcdb90e7f ]
In lio_enable_irq, the pkt_in_done count register was being cleared to
zero. However, there could be some completed instructions which were not
yet processed due to budget and limit constraints.
So, only write this register with the number of actual completions
that were processed.
Signed-off-by: Rick Farrington <ricardo.farrington@...ium.com>
Signed-off-by: Felix Manlunas <felix.manlunas@...ium.com>
Signed-off-by: David S. Miller <davem@...emloft.net>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/net/ethernet/cavium/liquidio/octeon_device.c | 5 +++--
drivers/net/ethernet/cavium/liquidio/octeon_iq.h | 2 ++
drivers/net/ethernet/cavium/liquidio/request_manager.c | 2 ++
3 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 29d53b1763a72..2a9c925376cc1 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1444,8 +1444,9 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
}
if (iq) {
spin_lock_bh(&iq->lock);
- writel(iq->pkt_in_done, iq->inst_cnt_reg);
- iq->pkt_in_done = 0;
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
/* this write needs to be flushed before we release the lock */
mmiowb();
spin_unlock_bh(&iq->lock);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 5c3c8da976f73..1860603452ee7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -84,6 +84,8 @@ struct octeon_instr_queue {
u32 pkt_in_done;
+ u32 pkts_processed;
+
/** A spinlock to protect access to the input ring.*/
spinlock_t iq_flush_running_lock;
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 55e8731264634..0ea623768783e 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -122,6 +122,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
iq->do_auto_flush = 1;
iq->db_timeout = (u32)conf->db_timeout;
atomic_set(&iq->instr_pending, 0);
+ iq->pkts_processed = 0;
/* Initialize the spinlock for this instruction queue */
spin_lock_init(&iq->lock);
@@ -474,6 +475,7 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
lio_process_iq_request_list(oct, iq, 0);
if (inst_processed) {
+ iq->pkts_processed += inst_processed;
atomic_sub(inst_processed, &iq->instr_pending);
iq->stats.instr_processed += inst_processed;
}
--
2.20.1
Powered by blists - more mailing lists