lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 15 May 2019 12:55:25 +0200
From:   Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To:     linux-kernel@...r.kernel.org
Cc:     Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        stable@...r.kernel.org, Denis Bolotin <dbolotin@...vell.com>,
        Michal Kalderon <mkalderon@...vell.com>,
        Ariel Elior <aelior@...vell.com>,
        "David S. Miller" <davem@...emloft.net>,
        Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.0 044/137] qed: Fix the DORQs attentions handling

[ Upstream commit 0d72c2ac89185f179da1e8a91c40c82f3fa38f0b ]

Separate the overflow handling from the hardware interrupt status analysis.
The interrupt status is a single register and is common for all PFs. The
first PF reading the register is not necessarily the one who overflowed.
All PFs must check their overflow status on every attention.
In this change we clear the sticky indication in the attention handler to
allow doorbells to be processed again as soon as possible, but running
the doorbell recovery is scheduled for the periodic handler to reduce the
time spent in the attention handler.
Checking the need for DORQ flush was changed to "db_bar_no_edpm" because
qed_edpm_enabled()'s result could change dynamically and might have
prevented a needed flush.

Signed-off-by: Denis Bolotin <dbolotin@...vell.com>
Signed-off-by: Michal Kalderon <mkalderon@...vell.com>
Signed-off-by: Ariel Elior <aelior@...vell.com>
Signed-off-by: David S. Miller <davem@...emloft.net>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
 drivers/net/ethernet/qlogic/qed/qed.h     |  3 ++
 drivers/net/ethernet/qlogic/qed/qed_int.c | 61 +++++++++++++++++------
 2 files changed, 48 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 07ae600d0f357..f458c9776a89c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -431,6 +431,8 @@ struct qed_qm_info {
 	u8 num_pf_rls;
 };
 
+#define QED_OVERFLOW_BIT	1
+
 struct qed_db_recovery_info {
 	struct list_head list;
 
@@ -438,6 +440,7 @@ struct qed_db_recovery_info {
 	spinlock_t lock;
 	bool dorq_attn;
 	u32 db_recovery_counter;
+	unsigned long overflow;
 };
 
 struct storm_stats {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 00688f4c04645..a7e95f239317f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -376,6 +376,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
 	u32 count = QED_DB_REC_COUNT;
 	u32 usage = 1;
 
+	/* Flush any pending (e)dpms as they may never arrive */
+	qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
 	/* wait for usage to zero or count to run out. This is necessary since
 	 * EDPM doorbell transactions can take multiple 64b cycles, and as such
 	 * can "split" over the pci. Possibly, the doorbell drop can happen with
@@ -404,23 +407,24 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
 
 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-	u32 overflow;
+	u32 attn_ovfl, cur_ovfl;
 	int rc;
 
-	overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
-	DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
-	if (!overflow)
+	attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
+				       &p_hwfn->db_recovery_info.overflow);
+	cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+	if (!cur_ovfl && !attn_ovfl)
 		return 0;
 
-	if (qed_edpm_enabled(p_hwfn)) {
+	DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
+		  attn_ovfl, cur_ovfl);
+
+	if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
 		rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
 		if (rc)
 			return rc;
 	}
 
-	/* Flush any pending (e)dpm as they may never arrive */
-	qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
-
 	/* Release overflow sticky indication (stop silently dropping everything) */
 	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
 
@@ -430,13 +434,35 @@ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 	return 0;
 }
 
-static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
 {
-	u32 int_sts, first_drop_reason, details, address, all_drops_reason;
 	struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+	u32 overflow;
 	int rc;
 
-	p_hwfn->db_recovery_info.dorq_attn = true;
+	overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+	if (!overflow)
+		goto out;
+
+	/* Run PF doorbell recovery in next periodic handler */
+	set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
+
+	if (!p_hwfn->db_bar_no_edpm) {
+		rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+		if (rc)
+			goto out;
+	}
+
+	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+out:
+	/* Schedule the handler even if overflow was not detected */
+	qed_periodic_db_rec_start(p_hwfn);
+}
+
+static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
+{
+	u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+	struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
 
 	/* int_sts may be zero since all PFs were interrupted for doorbell
 	 * overflow but another one already handled it. Can abort here. If
@@ -475,11 +501,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
 			  GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
 			  first_drop_reason, all_drops_reason);
 
-		rc = qed_db_rec_handler(p_hwfn, p_ptt);
-		qed_periodic_db_rec_start(p_hwfn);
-		if (rc)
-			return rc;
-
 		/* Clear the doorbell drop details and prepare for next drop */
 		qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
 
@@ -505,6 +526,14 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
 	return -EINVAL;
 }
 
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+	p_hwfn->db_recovery_info.dorq_attn = true;
+	qed_dorq_attn_overflow(p_hwfn);
+
+	return qed_dorq_attn_int_sts(p_hwfn);
+}
+
 static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
 {
 	if (p_hwfn->db_recovery_info.dorq_attn)
-- 
2.20.1



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ