lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1532681962-12437-1-git-send-email-rahul.lakkireddy@chelsio.com>
Date:   Fri, 27 Jul 2018 14:29:22 +0530
From:   Rahul Lakkireddy <rahul.lakkireddy@...lsio.com>
To:     netdev@...r.kernel.org
Cc:     davem@...emloft.net, ganeshgr@...lsio.com, nirranjan@...lsio.com,
        indranil@...lsio.com
Subject: [PATCH net-next] cxgb4: print ULD queue information managed by LLD

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@...lsio.com>
Signed-off-by: Ganesh Goudar <ganeshgr@...lsio.com>
---
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 296 +++++++++++++++++++--
 1 file changed, 277 insertions(+), 19 deletions(-)

diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 2320f7829a6b..6f312e03432f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2474,16 +2474,64 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
 	return NULL;
 }
 
+static int sge_qinfo_uld_txq_entries(const struct adapter *adap, int uld)
+{
+	const struct sge_uld_txq_info *utxq_info = adap->sge.uld_txq_info[uld];
+
+	if (!utxq_info)
+		return 0;
+
+	return DIV_ROUND_UP(utxq_info->ntxq, 4);
+}
+
+static int sge_qinfo_uld_rspq_entries(const struct adapter *adap, int uld,
+				      bool ciq)
+{
+	const struct sge_uld_rxq_info *urxq_info = adap->sge.uld_rxq_info[uld];
+
+	if (!urxq_info)
+		return 0;
+
+	return ciq ? DIV_ROUND_UP(urxq_info->nciq, 4) :
+		     DIV_ROUND_UP(urxq_info->nrxq, 4);
+}
+
+static int sge_qinfo_uld_rxq_entries(const struct adapter *adap, int uld)
+{
+	return sge_qinfo_uld_rspq_entries(adap, uld, false);
+}
+
+static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
+{
+	return sge_qinfo_uld_rspq_entries(adap, uld, true);
+}
+
 static int sge_qinfo_show(struct seq_file *seq, void *v)
 {
+	int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
+	int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
+	int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
+	const struct sge_uld_txq_info *utxq_info;
+	const struct sge_uld_rxq_info *urxq_info;
 	struct adapter *adap = seq->private;
-	int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
-	int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
-	int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
-	int i, r = (uintptr_t)v - 1;
-	int ofld_idx = r - eth_entries;
-	int ctrl_idx =  ofld_idx - ofld_entries;
-	int fq_idx =  ctrl_idx - ctrl_entries;
+	int i, n, r = (uintptr_t)v - 1;
+	int eth_entries, ctrl_entries;
+	struct sge *s = &adap->sge;
+
+	eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
+	ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
+
+	mutex_lock(&uld_mutex);
+	if (s->uld_txq_info)
+		for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
+			uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
+
+	if (s->uld_rxq_info) {
+		for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
+			uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
+			uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
+		}
+	}
 
 	if (r)
 		seq_putc(seq, '\n');
@@ -2505,9 +2553,10 @@ do { \
 
 	if (r < eth_entries) {
 		int base_qset = r * 4;
-		const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset];
-		const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset];
-		int n = min(4, adap->sge.ethqsets - 4 * r);
+		const struct sge_eth_rxq *rx = &s->ethrxq[base_qset];
+		const struct sge_eth_txq *tx = &s->ethtxq[base_qset];
+
+		n = min(4, s->ethqsets - 4 * r);
 
 		S("QType:", "Ethernet");
 		S("Interface:",
@@ -2532,8 +2581,7 @@ do { \
 		R("RspQ CIDX:", rspq.cidx);
 		R("RspQ Gen:", rspq.gen);
 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
-		S3("u", "Intr pktcnt:",
-		   adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+		S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
 		R("FL ID:", fl.cntxt_id);
 		R("FL size:", fl.size - 8);
 		R("FL pend:", fl.pend_cred);
@@ -2558,9 +2606,196 @@ do { \
 		RL("FLLow:", fl.low);
 		RL("FLStarving:", fl.starving);
 
-	} else if (ctrl_idx < ctrl_entries) {
-		const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
-		int n = min(4, adap->params.nports - 4 * ctrl_idx);
+		goto unlock;
+	}
+
+	r -= eth_entries;
+	if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
+		const struct sge_uld_txq *tx;
+
+		utxq_info = s->uld_txq_info[CXGB4_TX_OFLD];
+		tx = &utxq_info->uldtxq[r * 4];
+		n = min(4, utxq_info->ntxq - 4 * r);
+
+		S("QType:", "OFLD-TXQ");
+		T("TxQ ID:", q.cntxt_id);
+		T("TxQ size:", q.size);
+		T("TxQ inuse:", q.in_use);
+		T("TxQ CIDX:", q.cidx);
+		T("TxQ PIDX:", q.pidx);
+
+		goto unlock;
+	}
+
+	r -= uld_txq_entries[CXGB4_TX_OFLD];
+	if (r < uld_rxq_entries[CXGB4_ULD_RDMA]) {
+		const struct sge_ofld_rxq *rx;
+
+		urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+		rx = &urxq_info->uldrxq[r * 4];
+		n = min(4, urxq_info->nrxq - 4 * r);
+
+		S("QType:", "RDMA-CPL");
+		S("Interface:",
+		  rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		R("RspQ CIDX:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
+		R("FL ID:", fl.cntxt_id);
+		R("FL size:", fl.size - 8);
+		R("FL pend:", fl.pend_cred);
+		R("FL avail:", fl.avail);
+		R("FL PIDX:", fl.pidx);
+		R("FL CIDX:", fl.cidx);
+
+		goto unlock;
+	}
+
+	r -= uld_rxq_entries[CXGB4_ULD_RDMA];
+	if (r < uld_ciq_entries[CXGB4_ULD_RDMA]) {
+		const struct sge_ofld_rxq *rx;
+		int ciq_idx = 0;
+
+		urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+		ciq_idx = urxq_info->nrxq + (r * 4);
+		rx = &urxq_info->uldrxq[ciq_idx];
+		n = min(4, urxq_info->nciq - 4 * r);
+
+		S("QType:", "RDMA-CIQ");
+		S("Interface:",
+		  rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		R("RspQ CIDX:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
+
+		goto unlock;
+	}
+
+	r -= uld_ciq_entries[CXGB4_ULD_RDMA];
+	if (r < uld_rxq_entries[CXGB4_ULD_ISCSI]) {
+		const struct sge_ofld_rxq *rx;
+
+		urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSI];
+		rx = &urxq_info->uldrxq[r * 4];
+		n = min(4, urxq_info->nrxq - 4 * r);
+
+		S("QType:", "iSCSI");
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		R("RspQ CIDX:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
+		R("FL ID:", fl.cntxt_id);
+		R("FL size:", fl.size - 8);
+		R("FL pend:", fl.pend_cred);
+		R("FL avail:", fl.avail);
+		R("FL PIDX:", fl.pidx);
+		R("FL CIDX:", fl.cidx);
+
+		goto unlock;
+	}
+
+	r -= uld_rxq_entries[CXGB4_ULD_ISCSI];
+	if (r < uld_rxq_entries[CXGB4_ULD_ISCSIT]) {
+		const struct sge_ofld_rxq *rx;
+
+		urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSIT];
+		rx = &urxq_info->uldrxq[r * 4];
+		n = min(4, urxq_info->nrxq - 4 * r);
+
+		S("QType:", "iSCSIT");
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		R("RspQ CIDX:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
+		R("FL ID:", fl.cntxt_id);
+		R("FL size:", fl.size - 8);
+		R("FL pend:", fl.pend_cred);
+		R("FL avail:", fl.avail);
+		R("FL PIDX:", fl.pidx);
+		R("FL CIDX:", fl.cidx);
+
+		goto unlock;
+	}
+
+	r -= uld_rxq_entries[CXGB4_ULD_ISCSIT];
+	if (r < uld_rxq_entries[CXGB4_ULD_TLS]) {
+		const struct sge_ofld_rxq *rx;
+
+		urxq_info = s->uld_rxq_info[CXGB4_ULD_TLS];
+		rx = &urxq_info->uldrxq[r * 4];
+		n = min(4, urxq_info->nrxq - 4 * r);
+
+		S("QType:", "TLS");
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		R("RspQ CIDX:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
+		R("FL ID:", fl.cntxt_id);
+		R("FL size:", fl.size - 8);
+		R("FL pend:", fl.pend_cred);
+		R("FL avail:", fl.avail);
+		R("FL PIDX:", fl.pidx);
+		R("FL CIDX:", fl.cidx);
+
+		goto unlock;
+	}
+
+	r -= uld_rxq_entries[CXGB4_ULD_TLS];
+	if (r < uld_txq_entries[CXGB4_TX_CRYPTO]) {
+		const struct sge_ofld_rxq *rx;
+		const struct sge_uld_txq *tx;
+
+		utxq_info = s->uld_txq_info[CXGB4_TX_CRYPTO];
+		urxq_info = s->uld_rxq_info[CXGB4_ULD_CRYPTO];
+		tx = &utxq_info->uldtxq[r * 4];
+		rx = &urxq_info->uldrxq[r * 4];
+		n = min(4, utxq_info->ntxq - 4 * r);
+
+		S("QType:", "Crypto");
+		T("TxQ ID:", q.cntxt_id);
+		T("TxQ size:", q.size);
+		T("TxQ inuse:", q.in_use);
+		T("TxQ CIDX:", q.cidx);
+		T("TxQ PIDX:", q.pidx);
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		R("RspQ CIDX:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
+		R("FL ID:", fl.cntxt_id);
+		R("FL size:", fl.size - 8);
+		R("FL pend:", fl.pend_cred);
+		R("FL avail:", fl.avail);
+		R("FL PIDX:", fl.pidx);
+		R("FL CIDX:", fl.cidx);
+
+		goto unlock;
+	}
+
+	r -= uld_txq_entries[CXGB4_TX_CRYPTO];
+	if (r < ctrl_entries) {
+		const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
+
+		n = min(4, adap->params.nports - 4 * r);
 
 		S("QType:", "Control");
 		T("TxQ ID:", q.cntxt_id);
@@ -2570,8 +2805,13 @@ do { \
 		T("TxQ PIDX:", q.pidx);
 		TL("TxQFull:", q.stops);
 		TL("TxQRestarts:", q.restarts);
-	} else if (fq_idx == 0) {
-		const struct sge_rspq *evtq = &adap->sge.fw_evtq;
+
+		goto unlock;
+	}
+
+	r -= ctrl_entries;
+	if (r < 1) {
+		const struct sge_rspq *evtq = &s->fw_evtq;
 
 		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
@@ -2582,8 +2822,13 @@ do { \
 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
 			   qtimer_val(adap, evtq));
 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
-			   adap->sge.counter_val[evtq->pktcnt_idx]);
+			   s->counter_val[evtq->pktcnt_idx]);
+
+		goto unlock;
 	}
+
+unlock:
+	mutex_unlock(&uld_mutex);
 #undef R
 #undef RL
 #undef T
@@ -2597,8 +2842,21 @@ do { \
 
 static int sge_queue_entries(const struct adapter *adap)
 {
+	int tot_uld_entries = 0;
+	int i;
+
+	mutex_lock(&uld_mutex);
+	for (i = 0; i < CXGB4_TX_MAX; i++)
+		tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
+
+	for (i = 0; i < CXGB4_ULD_MAX; i++) {
+		tot_uld_entries += sge_qinfo_uld_rxq_entries(adap, i);
+		tot_uld_entries += sge_qinfo_uld_ciq_entries(adap, i);
+	}
+	mutex_unlock(&uld_mutex);
+
 	return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
-	       DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
+	       tot_uld_entries +
 	       DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
 }
 
-- 
2.14.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ