lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1475348401-31392-8-git-send-email-Yuval.Mintz@caviumnetworks.com>
Date:   Sat, 1 Oct 2016 22:00:01 +0300
From:   Yuval Mintz <Yuval.Mintz@...iumnetworks.com>
To:     <netdev@...r.kernel.org>, <davem@...emloft.net>
CC:     <linux-rdma@...r.kernel.org>, <Ram.Amrani@...iumnetworks.com>,
        <Michal.Kalderon@...iumnetworks.com>,
        <Ariel.Elior@...iumnetworks.com>, <dledford@...hat.com>,
        Yuval Mintz <Yuval.Mintz@...iumnetworks.com>
Subject: [PATCH net-next 7/7] qed: Add RoCE ll2 & GSI support

From: Ram Amrani <Ram.Amrani@...iumnetworks.com>

Add the RoCE-specific LL2 logic [as well as GSI support] over
the 'generic' LL2 interface.

Signed-off-by: Ram Amrani <Ram.Amrani@...iumnetworks.com>
Signed-off-by: Yuval Mintz <Yuval.Mintz@...iumnetworks.com>
---
 drivers/net/ethernet/qlogic/qed/qed.h      |   1 +
 drivers/net/ethernet/qlogic/qed/qed_hsi.h  |   3 +
 drivers/net/ethernet/qlogic/qed/qed_ll2.c  | 115 +++++++++--
 drivers/net/ethernet/qlogic/qed/qed_ll2.h  |  29 ++-
 drivers/net/ethernet/qlogic/qed/qed_roce.c | 307 +++++++++++++++++++++++++++++
 drivers/net/ethernet/qlogic/qed/qed_roce.h |   1 +
 include/linux/qed/qed_roce_if.h            |  79 ++++++++
 7 files changed, 523 insertions(+), 12 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index c5a098a..653bb57 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -437,6 +437,7 @@ struct qed_hwfn {
 #endif
 
 	struct z_stream_s		*stream;
+	struct qed_roce_ll2_info	*ll2;
 };
 
 struct pci_params {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 2777d5b..72eee29 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -727,6 +727,9 @@ struct core_tx_bd_flags {
 #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT	6
 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK	0x1
 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK		0x1
+#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT	12
+
 };
 
 struct core_tx_bd {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index e0ec8ed..a6db107 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -278,6 +278,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 	struct qed_ll2_tx_packet *p_pkt = NULL;
 	struct qed_ll2_info *p_ll2_conn;
 	struct qed_ll2_tx_queue *p_tx;
+	dma_addr_t tx_frag;
 
 	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
 	if (!p_ll2_conn)
@@ -297,11 +298,22 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 		p_tx->cur_completing_packet = *p_pkt;
 		p_tx->cur_completing_bd_idx = 1;
 		b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+		tx_frag = p_pkt->bds_set[0].tx_frag;
+		if (p_ll2_conn->gsi_enable)
+			qed_ll2b_release_tx_gsi_packet(p_hwfn,
+						       p_ll2_conn->my_id,
+						       p_pkt->cookie,
+						       tx_frag,
+						       b_last_frag,
+						       b_last_packet);
+		else
+			qed_ll2b_complete_tx_packet(p_hwfn,
+						    p_ll2_conn->my_id,
+						    p_pkt->cookie,
+						    tx_frag,
+						    b_last_frag,
+						    b_last_packet);
 
-		qed_ll2b_complete_tx_packet(p_hwfn, p_ll2_conn->my_id,
-					    p_pkt->cookie,
-					    p_pkt->bds_set[0].tx_frag,
-					    b_last_frag, b_last_packet);
 	}
 }
 
@@ -313,6 +325,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
 	struct qed_ll2_tx_packet *p_pkt;
 	bool b_last_frag = false;
 	unsigned long flags;
+	dma_addr_t tx_frag;
 	int rc = -EINVAL;
 
 	spin_lock_irqsave(&p_tx->lock, flags);
@@ -353,11 +366,19 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
 
 		spin_unlock_irqrestore(&p_tx->lock, flags);
-		qed_ll2b_complete_tx_packet(p_hwfn,
-					    p_ll2_conn->my_id,
-					    p_pkt->cookie,
-					    p_pkt->bds_set[0].tx_frag,
-					    b_last_frag, !num_bds);
+		tx_frag = p_pkt->bds_set[0].tx_frag;
+		if (p_ll2_conn->gsi_enable)
+			qed_ll2b_complete_tx_gsi_packet(p_hwfn,
+							p_ll2_conn->my_id,
+							p_pkt->cookie,
+							tx_frag,
+							b_last_frag, !num_bds);
+		else
+			qed_ll2b_complete_tx_packet(p_hwfn,
+						    p_ll2_conn->my_id,
+						    p_pkt->cookie,
+						    tx_frag,
+						    b_last_frag, !num_bds);
 		spin_lock_irqsave(&p_tx->lock, flags);
 	}
 
@@ -368,6 +389,54 @@ out:
 	return rc;
 }
 
+static int
+qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
+			   struct qed_ll2_info *p_ll2_info,
+			   union core_rx_cqe_union *p_cqe,
+			   unsigned long lock_flags, bool b_last_cqe)
+{
+	struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
+	struct qed_ll2_rx_packet *p_pkt = NULL;
+	u16 packet_length, parse_flags, vlan;
+	u32 src_mac_addrhi;
+	u16 src_mac_addrlo;
+
+	if (!list_empty(&p_rx->active_descq))
+		p_pkt = list_first_entry(&p_rx->active_descq,
+					 struct qed_ll2_rx_packet, list_entry);
+	if (!p_pkt) {
+		DP_NOTICE(p_hwfn,
+			  "GSI Rx completion but active_descq is empty\n");
+		return -EIO;
+	}
+
+	list_del(&p_pkt->list_entry);
+	parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
+	packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
+	vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
+	src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
+	src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
+	if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+		DP_NOTICE(p_hwfn,
+			  "Mismatch between active_descq and the LL2 Rx chain\n");
+	list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+	spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+	qed_ll2b_complete_rx_gsi_packet(p_hwfn,
+					p_ll2_info->my_id,
+					p_pkt->cookie,
+					p_pkt->rx_buf_addr,
+					packet_length,
+					p_cqe->rx_cqe_gsi.data_length_error,
+					parse_flags,
+					vlan,
+					src_mac_addrhi,
+					src_mac_addrlo, b_last_cqe);
+	spin_lock_irqsave(&p_rx->lock, lock_flags);
+
+	return 0;
+}
+
 static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
 				      struct qed_ll2_info *p_ll2_conn,
 				      union core_rx_cqe_union *p_cqe,
@@ -429,6 +498,10 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 			DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
 			rc = -EINVAL;
 			break;
+		case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
+			rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
+							cqe, flags, b_last_cqe);
+			break;
 		case CORE_RX_CQE_TYPE_REGULAR:
 			rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
 							cqe, flags, b_last_cqe);
@@ -527,6 +600,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
 	}
 
 	p_ramrod->action_on_error.error_type = action_on_error;
+	p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
@@ -589,6 +663,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
 		DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
 	}
 
+	p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
@@ -775,6 +850,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
 	p_ll2_info->tx_dest = p_params->tx_dest;
 	p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
 	p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
+	p_ll2_info->gsi_enable = p_params->gsi_enable;
 
 	rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
 	if (rc)
@@ -1026,6 +1102,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
 					     u16 vlan,
 					     u8 bd_flags,
 					     u16 l4_hdr_offset_w,
+					     enum core_roce_flavor_type type,
 					     dma_addr_t first_frag,
 					     u16 first_frag_len)
 {
@@ -1046,6 +1123,9 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
 	DMA_REGPAIR_LE(start_bd->addr, first_frag);
 	start_bd->nbytes = cpu_to_le16(first_frag_len);
 
+	SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
+		  type);
+
 	DP_VERBOSE(p_hwfn,
 		   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
 		   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
@@ -1137,11 +1217,13 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 			      u16 vlan,
 			      u8 bd_flags,
 			      u16 l4_hdr_offset_w,
+			      enum qed_ll2_roce_flavor_type qed_roce_flavor,
 			      dma_addr_t first_frag,
 			      u16 first_frag_len, void *cookie, u8 notify_fw)
 {
 	struct qed_ll2_tx_packet *p_curp = NULL;
 	struct qed_ll2_info *p_ll2_conn = NULL;
+	enum core_roce_flavor_type roce_flavor;
 	struct qed_ll2_tx_queue *p_tx;
 	struct qed_chain *p_tx_chain;
 	unsigned long flags;
@@ -1174,6 +1256,15 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 		goto out;
 	}
 
+	if (qed_roce_flavor == QED_LL2_ROCE) {
+		roce_flavor = CORE_ROCE;
+	} else if (qed_roce_flavor == QED_LL2_RROCE) {
+		roce_flavor = CORE_RROCE;
+	} else {
+		rc = -EINVAL;
+		goto out;
+	}
+
 	/* Prepare packet and BD, and perhaps send a doorbell to FW */
 	qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
 				      num_of_bds, first_frag,
@@ -1181,6 +1272,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 	qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
 					 num_of_bds, CORE_TX_DEST_NW,
 					 vlan, bd_flags, l4_hdr_offset_w,
+					 roce_flavor,
 					 first_frag, first_frag_len);
 
 	qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
@@ -1476,6 +1568,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
 	ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
 	ll2_info.tx_tc = 0;
 	ll2_info.tx_dest = CORE_TX_DEST_NW;
+	ll2_info.gsi_enable = 1;
 
 	rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
 					QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
@@ -1625,8 +1718,8 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
 				       cdev->ll2->handle,
 				       1 + skb_shinfo(skb)->nr_frags,
-				       vlan, flags, 0, mapping,
-				       skb->len, skb, 1);
+				       vlan, flags, 0, 0 /* RoCE FLAVOR */,
+				       mapping, skb->len, skb, 1);
 	if (rc)
 		goto err;
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index a037c48..80a5dc2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -24,6 +24,12 @@
 
 #define QED_MAX_NUM_OF_LL2_CONNECTIONS                    (4)
 
+enum qed_ll2_roce_flavor_type {
+	QED_LL2_ROCE,
+	QED_LL2_RROCE,
+	MAX_QED_LL2_ROCE_FLAVOR_TYPE
+};
+
 enum qed_ll2_conn_type {
 	QED_LL2_TYPE_RESERVED,
 	QED_LL2_TYPE_ISCSI,
@@ -119,6 +125,7 @@ struct qed_ll2_info {
 	u8 tx_stats_en;
 	struct qed_ll2_rx_queue rx_queue;
 	struct qed_ll2_tx_queue tx_queue;
+	u8 gsi_enable;
 };
 
 /**
@@ -199,6 +206,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
 			      u16 vlan,
 			      u8 bd_flags,
 			      u16 l4_hdr_offset_w,
+			      enum qed_ll2_roce_flavor_type qed_roce_flavor,
 			      dma_addr_t first_frag,
 			      u16 first_frag_len, void *cookie, u8 notify_fw);
 
@@ -285,5 +293,24 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
  */
 void qed_ll2_free(struct qed_hwfn *p_hwfn,
 		  struct qed_ll2_info *p_ll2_connections);
-
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+				     u8 connection_handle,
+				     void *cookie,
+				     dma_addr_t rx_buf_addr,
+				     u16 data_length,
+				     u8 data_length_error,
+				     u16 parse_flags,
+				     u16 vlan,
+				     u32 src_mac_addr_hi,
+				     u16 src_mac_addr_lo, bool b_last_packet);
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+				     u8 connection_handle,
+				     void *cookie,
+				     dma_addr_t first_frag_addr,
+				     bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+				    u8 connection_handle,
+				    void *cookie,
+				    dma_addr_t first_frag_addr,
+				    bool b_last_fragment, bool b_last_packet);
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 8b4854d..2343005 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -64,6 +64,7 @@
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
 #include "qed_roce.h"
+#include "qed_ll2.h"
 
 void qed_async_roce_event(struct qed_hwfn *p_hwfn,
 			  struct event_ring_entry *p_eqe)
@@ -2611,6 +2612,306 @@ void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 }
 
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+				     u8 connection_handle,
+				     void *cookie,
+				     dma_addr_t first_frag_addr,
+				     bool b_last_fragment, bool b_last_packet)
+{
+	struct qed_roce_ll2_packet *packet = cookie;
+	struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+
+	roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
+}
+
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+				    u8 connection_handle,
+				    void *cookie,
+				    dma_addr_t first_frag_addr,
+				    bool b_last_fragment, bool b_last_packet)
+{
+	qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
+					cookie, first_frag_addr,
+					b_last_fragment, b_last_packet);
+}
+
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+				     u8 connection_handle,
+				     void *cookie,
+				     dma_addr_t rx_buf_addr,
+				     u16 data_length,
+				     u8 data_length_error,
+				     u16 parse_flags,
+				     u16 vlan,
+				     u32 src_mac_addr_hi,
+				     u16 src_mac_addr_lo, bool b_last_packet)
+{
+	struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+	struct qed_roce_ll2_rx_params params;
+	struct qed_dev *cdev = p_hwfn->cdev;
+	struct qed_roce_ll2_packet pkt;
+
+	DP_VERBOSE(cdev,
+		   QED_MSG_LL2,
+		   "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
+		   (void *)(uintptr_t)rx_buf_addr,
+		   data_length, data_length_error);
+
+	memset(&pkt, 0, sizeof(pkt));
+	pkt.n_seg = 1;
+	pkt.payload[0].baddr = rx_buf_addr;
+	pkt.payload[0].len = data_length;
+
+	memset(&params, 0, sizeof(params));
+	params.vlan_id = vlan;
+	*((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
+	*((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
+
+	if (data_length_error) {
+		DP_ERR(cdev,
+		       "roce ll2 rx complete: data length error %d, length=%d\n",
+		       data_length_error, data_length);
+		params.rc = -EINVAL;
+	}
+
+	roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
+}
+
+static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
+				       u8 *old_mac_address,
+				       u8 *new_mac_address)
+{
+	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+	struct qed_ptt *p_ptt;
+	int rc = 0;
+
+	if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
+		DP_ERR(cdev,
+		       "qed roce mac filter failed - roce_info/ll2 NULL\n");
+		return -EINVAL;
+	}
+
+	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+	if (!p_ptt) {
+		DP_ERR(cdev,
+		       "qed roce ll2 mac filter set: failed to acquire PTT\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hwfn->ll2->lock);
+	if (old_mac_address)
+		qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+					  old_mac_address);
+	if (new_mac_address)
+		rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+					    new_mac_address);
+	mutex_unlock(&hwfn->ll2->lock);
+
+	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+
+	if (rc)
+		DP_ERR(cdev,
+		       "qed roce ll2 mac filter set: failed to add mac filter\n");
+
+	return rc;
+}
+
+static int qed_roce_ll2_start(struct qed_dev *cdev,
+			      struct qed_roce_ll2_params *params)
+{
+	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+	struct qed_roce_ll2_info *roce_ll2;
+	struct qed_ll2_info ll2_params;
+	int rc;
+
+	if (!params) {
+		DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
+		return -EINVAL;
+	}
+	if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
+		DP_ERR(cdev,
+		       "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
+		       params->cbs.tx_cb, params->cbs.rx_cb);
+		return -EINVAL;
+	}
+	if (!is_valid_ether_addr(params->mac_address)) {
+		DP_ERR(cdev,
+		       "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
+		       params->mac_address);
+		return -EINVAL;
+	}
+
+	/* Initialize */
+	roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
+	if (!roce_ll2) {
+		DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
+		return -ENOMEM;
+	}
+	memset(roce_ll2, 0, sizeof(*roce_ll2));
+	roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+	roce_ll2->cbs = params->cbs;
+	roce_ll2->cb_cookie = params->cb_cookie;
+	mutex_init(&roce_ll2->lock);
+
+	memset(&ll2_params, 0, sizeof(ll2_params));
+	ll2_params.conn_type = QED_LL2_TYPE_ROCE;
+	ll2_params.mtu = params->mtu;
+	ll2_params.rx_drop_ttl0_flg = true;
+	ll2_params.rx_vlan_removal_en = false;
+	ll2_params.tx_dest = CORE_TX_DEST_NW;
+	ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
+	ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
+	ll2_params.gsi_enable = true;
+
+	rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
+					params->max_rx_buffers,
+					params->max_tx_buffers,
+					&roce_ll2->handle);
+	if (rc) {
+		DP_ERR(cdev,
+		       "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
+		       rc);
+		goto err;
+	}
+
+	rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
+					  roce_ll2->handle);
+	if (rc) {
+		DP_ERR(cdev,
+		       "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
+		       rc);
+		goto err1;
+	}
+
+	hwfn->ll2 = roce_ll2;
+
+	rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
+	if (rc) {
+		hwfn->ll2 = NULL;
+		goto err2;
+	}
+	ether_addr_copy(roce_ll2->mac_address, params->mac_address);
+
+	return 0;
+
+err2:
+	qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err1:
+	qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err:
+	kfree(roce_ll2);
+	return rc;
+}
+
+static int qed_roce_ll2_stop(struct qed_dev *cdev)
+{
+	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+	struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+	int rc;
+
+	if (!cdev) {
+		DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
+		return -EINVAL;
+	}
+
+	if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
+		DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
+		return -EINVAL;
+	}
+
+	/* remove LL2 MAC address filter */
+	rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
+	eth_zero_addr(roce_ll2->mac_address);
+
+	rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
+					  roce_ll2->handle);
+	if (rc)
+		DP_ERR(cdev,
+		       "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
+		       rc);
+
+	qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+
+	roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+	kfree(roce_ll2);
+
+	return rc;
+}
+
+static int qed_roce_ll2_tx(struct qed_dev *cdev,
+			   struct qed_roce_ll2_packet *pkt,
+			   struct qed_roce_ll2_tx_params *params)
+{
+	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+	struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+	enum qed_ll2_roce_flavor_type qed_roce_flavor;
+	u8 flags = 0;
+	int rc;
+	int i;
+
+	if (!cdev || !pkt || !params) {
+		DP_ERR(cdev,
+		       "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
+		       cdev, pkt, params);
+		return -EINVAL;
+	}
+
+	qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
+						      : QED_LL2_RROCE;
+
+	if (pkt->roce_mode == ROCE_V2_IPV4)
+		flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+
+	/* Tx header */
+	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
+				       1 + pkt->n_seg, 0, flags, 0,
+				       qed_roce_flavor, pkt->header.baddr,
+				       pkt->header.len, pkt, 1);
+	if (rc) {
+		DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
+		return QED_ROCE_TX_HEAD_FAILURE;
+	}
+
+	/* Tx payload */
+	for (i = 0; i < pkt->n_seg; i++) {
+		rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+						       roce_ll2->handle,
+						       pkt->payload[i].baddr,
+						       pkt->payload[i].len);
+		if (rc) {
+			/* If failed not much to do here, partial packet has
+			 * been posted * we can't free memory, will need to wait
+			 * for completion
+			 */
+			DP_ERR(cdev,
+			       "roce ll2 tx: payload failed (rc=%d)\n", rc);
+			return QED_ROCE_TX_FRAG_FAILURE;
+		}
+	}
+
+	return 0;
+}
+
+static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
+				       struct qed_roce_ll2_buffer *buf,
+				       u64 cookie, u8 notify_fw)
+{
+	return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+				      QED_LEADING_HWFN(cdev)->ll2->handle,
+				      buf->baddr, buf->len,
+				      (void *)(uintptr_t)cookie, notify_fw);
+}
+
+static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+	struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+
+	return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+				 roce_ll2->handle, stats);
+}
+
 static const struct qed_rdma_ops qed_rdma_ops_pass = {
 	.common = &qed_common_ops_pass,
 	.fill_dev_info = &qed_fill_rdma_dev_info,
@@ -2638,6 +2939,12 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
 	.rdma_free_tid = &qed_rdma_free_tid,
 	.rdma_register_tid = &qed_rdma_register_tid,
 	.rdma_deregister_tid = &qed_rdma_deregister_tid,
+	.roce_ll2_start = &qed_roce_ll2_start,
+	.roce_ll2_stop = &qed_roce_ll2_stop,
+	.roce_ll2_tx = &qed_roce_ll2_tx,
+	.roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
+	.roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
+	.roce_ll2_stats = &qed_roce_ll2_stats,
 };
 
 const struct qed_rdma_ops *qed_get_rdma_ops()
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index b8ddda4..2f091e8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -42,6 +42,7 @@
 #include "qed.h"
 #include "qed_dev_api.h"
 #include "qed_hsi.h"
+#include "qed_ll2.h"
 
 #define QED_RDMA_MAX_FMR                    (RDMA_MAX_TIDS)
 #define QED_RDMA_MAX_P_KEY                  (1)
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
index 0b6df6e..53047d3 100644
--- a/include/linux/qed/qed_roce_if.h
+++ b/include/linux/qed/qed_roce_if.h
@@ -39,6 +39,16 @@
 #include <linux/slab.h>
 #include <linux/qed/qed_if.h>
 #include <linux/qed/qed_ll2_if.h>
+#include <linux/qed/rdma_common.h>
+
+enum qed_roce_ll2_tx_dest {
+	/* Light L2 TX Destination to the Network */
+	QED_ROCE_LL2_TX_DEST_NW,
+
+	/* Light L2 TX Destination to the Loopback */
+	QED_ROCE_LL2_TX_DEST_LB,
+	QED_ROCE_LL2_TX_DEST_MAX
+};
 
 #define QED_RDMA_MAX_CNQ_SIZE               (0xFFFF)
 
@@ -461,6 +471,61 @@ struct qed_rdma_counters_out_params {
 #define QED_ROCE_TX_HEAD_FAILURE        (1)
 #define QED_ROCE_TX_FRAG_FAILURE        (2)
 
+struct qed_roce_ll2_header {
+	void *vaddr;
+	dma_addr_t baddr;
+	size_t len;
+};
+
+struct qed_roce_ll2_buffer {
+	dma_addr_t baddr;
+	size_t len;
+};
+
+struct qed_roce_ll2_packet {
+	struct qed_roce_ll2_header header;
+	int n_seg;
+	struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
+	int roce_mode;
+	enum qed_roce_ll2_tx_dest tx_dest;
+};
+
+struct qed_roce_ll2_tx_params {
+	int reserved;
+};
+
+struct qed_roce_ll2_rx_params {
+	u16 vlan_id;
+	u8 smac[ETH_ALEN];
+	int rc;
+};
+
+struct qed_roce_ll2_cbs {
+	void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
+
+	void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
+		      struct qed_roce_ll2_rx_params *params);
+};
+
+struct qed_roce_ll2_params {
+	u16 max_rx_buffers;
+	u16 max_tx_buffers;
+	u16 mtu;
+	u8 mac_address[ETH_ALEN];
+	struct qed_roce_ll2_cbs cbs;
+	void *cb_cookie;
+};
+
+struct qed_roce_ll2_info {
+	u8 handle;
+	struct qed_roce_ll2_cbs cbs;
+	u8 mac_address[ETH_ALEN];
+	void *cb_cookie;
+
+	/* Lock to protect ll2 */
+	struct mutex lock;
+};
+
 enum qed_rdma_type {
 	QED_RDMA_TYPE_ROCE,
 };
@@ -518,6 +583,20 @@ struct qed_rdma_ops {
 	int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
 	int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
 	void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
+	int (*roce_ll2_start)(struct qed_dev *cdev,
+			      struct qed_roce_ll2_params *params);
+	int (*roce_ll2_stop)(struct qed_dev *cdev);
+	int (*roce_ll2_tx)(struct qed_dev *cdev,
+			   struct qed_roce_ll2_packet *packet,
+			   struct qed_roce_ll2_tx_params *params);
+	int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
+				       struct qed_roce_ll2_buffer *buf,
+				       u64 cookie, u8 notify_fw);
+	int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
+				       u8 *old_mac_address,
+				       u8 *new_mac_address);
+	int (*roce_ll2_stats)(struct qed_dev *cdev,
+			      struct qed_ll2_stats *stats);
 };
 
 const struct qed_rdma_ops *qed_get_rdma_ops(void);
-- 
1.9.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ