lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200217172839.32066-3-rohitm@chelsio.com>
Date:   Mon, 17 Feb 2020 22:58:35 +0530
From:   Rohit Maheshwari <rohitm@...lsio.com>
To:     davem@...emloft.net, netdev@...r.kernel.org
Cc:     manojmalviya@...lsio.com, varun@...lsio.com,
        Rohit Maheshwari <rohitm@...lsio.com>
Subject: [RFC net-next 2/6] cxgb4/chcr: Save crypto keys and handle HW response

As part of this patch generated and saved crypto keys, handled HW
response of act_open_req and set_tcb_req. Defined connection state
update.

Signed-off-by: Rohit Maheshwari <rohitm@...lsio.com>
---
 drivers/crypto/chelsio/chcr_common.h        |  64 ++++
 drivers/crypto/chelsio/chcr_core.c          |  20 +-
 drivers/crypto/chelsio/chcr_core.h          |   2 +
 drivers/crypto/chelsio/chcr_ktls.c          | 357 ++++++++++++++++++++
 drivers/crypto/chelsio/chcr_ktls.h          |  17 +
 drivers/net/ethernet/chelsio/cxgb4/l2t.c    |  11 +
 drivers/net/ethernet/chelsio/cxgb4/l2t.h    |   1 +
 drivers/net/ethernet/chelsio/cxgb4/t4_msg.h |   8 +
 drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h |  39 ++-
 9 files changed, 505 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_common.h b/drivers/crypto/chelsio/chcr_common.h
index c0b9a8806c23..852f64322326 100644
--- a/drivers/crypto/chelsio/chcr_common.h
+++ b/drivers/crypto/chelsio/chcr_common.h
@@ -6,6 +6,10 @@
 
 #include "cxgb4.h"
 
+#define CHCR_MAX_SALT                      4
+#define CHCR_KEYCTX_MAC_KEY_SIZE_128       0
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128    0
+
 enum chcr_state {
 	CHCR_INIT = 0,
 	CHCR_ATTACH,
@@ -28,5 +32,65 @@ struct uld_ctx {
 	struct chcr_dev dev;
 };
 
+struct ktls_key_ctx {
+	__be32 ctx_hdr;
+	u8 salt[CHCR_MAX_SALT];
+	__be64 iv_to_auth;
+	unsigned char key[TLS_CIPHER_AES_GCM_128_KEY_SIZE +
+			  TLS_CIPHER_AES_GCM_256_TAG_SIZE];
+};
+
+/* Crypto key context */
+#define KEY_CONTEXT_CTX_LEN_S           24
+#define KEY_CONTEXT_CTX_LEN_V(x)        ((x) << KEY_CONTEXT_CTX_LEN_S)
+
+#define KEY_CONTEXT_SALT_PRESENT_S      10
+#define KEY_CONTEXT_SALT_PRESENT_V(x)   ((x) << KEY_CONTEXT_SALT_PRESENT_S)
+#define KEY_CONTEXT_SALT_PRESENT_F      KEY_CONTEXT_SALT_PRESENT_V(1U)
+
+#define KEY_CONTEXT_VALID_S     0
+#define KEY_CONTEXT_VALID_V(x)  ((x) << KEY_CONTEXT_VALID_S)
+#define KEY_CONTEXT_VALID_F     KEY_CONTEXT_VALID_V(1U)
+
+#define KEY_CONTEXT_CK_SIZE_S           6
+#define KEY_CONTEXT_CK_SIZE_V(x)        ((x) << KEY_CONTEXT_CK_SIZE_S)
+
+#define KEY_CONTEXT_MK_SIZE_S           2
+#define KEY_CONTEXT_MK_SIZE_V(x)        ((x) << KEY_CONTEXT_MK_SIZE_S)
+
+#define KEY_CONTEXT_OPAD_PRESENT_S      11
+#define KEY_CONTEXT_OPAD_PRESENT_V(x)   ((x) << KEY_CONTEXT_OPAD_PRESENT_S)
+#define KEY_CONTEXT_OPAD_PRESENT_F      KEY_CONTEXT_OPAD_PRESENT_V(1U)
+
+#define FILL_KEY_CTX_HDR(ck_size, mk_size, ctx_len) \
+		htonl(KEY_CONTEXT_MK_SIZE_V(mk_size) | \
+		      KEY_CONTEXT_CK_SIZE_V(ck_size) | \
+		      KEY_CONTEXT_VALID_F | \
+		      KEY_CONTEXT_SALT_PRESENT_F | \
+		      KEY_CONTEXT_CTX_LEN_V((ctx_len)))
+
 struct uld_ctx *assign_chcr_device(void);
+
+static inline void *chcr_copy_to_txd(const void *src, const struct sge_txq *q,
+				     void *pos, int length)
+{
+	int left = (void *)q->stat - pos;
+	u64 *p;
+
+	if (likely(length <= left)) {
+		memcpy(pos, src, length);
+		pos += length;
+	} else {
+		memcpy(pos, src, left);
+		memcpy(q->desc, src + left, length - left);
+		pos = (void *)q->desc + (length - left);
+	}
+	/* 0-pad to multiple of 16 */
+	p = PTR_ALIGN(pos, 8);
+	if ((uintptr_t)p & 8) {
+		*p = 0;
+		return p + 1;
+	}
+	return p;
+}
 #endif /* __CHCR_COMMON_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 2660f1b24cfc..fd5ab5caa7d9 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -28,13 +28,17 @@
 
 static struct chcr_driver_data drv_data;
 
-typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
-static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
+typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
+static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
 static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
 static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
 
 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
 	[CPL_FW6_PLD] = cpl_fw6_pld_handler,
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+	[CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
+	[CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
+#endif
 };
 
 static struct cxgb4_uld_info chcr_uld_info = {
@@ -160,14 +164,13 @@ static int chcr_dev_move(struct uld_ctx *u_ctx)
 	return 0;
 }
 
-static int cpl_fw6_pld_handler(struct chcr_dev *dev,
+static int cpl_fw6_pld_handler(struct adapter *adap,
 			       unsigned char *input)
 {
 	struct crypto_async_request *req;
 	struct cpl_fw6_pld *fw6_pld;
 	u32 ack_err_status = 0;
 	int error_status = 0;
-	struct adapter *adap = padap(dev);
 
 	fw6_pld = (struct cpl_fw6_pld *)input;
 	req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
@@ -224,17 +227,18 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
 {
 	struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
 	struct chcr_dev *dev = &u_ctx->dev;
+	struct adapter *adap = padap(dev);
 	const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
 
-	if (rpl->opcode != CPL_FW6_PLD) {
-		pr_err("Unsupported opcode\n");
+	if (!work_handlers[rpl->opcode]) {
+		pr_err("Unsupported opcode %d received\n", rpl->opcode);
 		return 0;
 	}
 
 	if (!pgl)
-		work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]);
+		work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]);
 	else
-		work_handlers[rpl->opcode](dev, pgl->va);
+		work_handlers[rpl->opcode](adap, pgl->va);
 	return 0;
 }
 
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 48e3ddfdd9e2..2dcbd188290a 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -225,5 +225,7 @@ void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
 #ifdef CONFIG_CHELSIO_TLS_DEVICE
 void chcr_enable_ktls(struct adapter *adap);
 void chcr_disable_ktls(struct adapter *adap);
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
 #endif
 #endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
index 531cd9dd0d41..9b3546459709 100644
--- a/drivers/crypto/chelsio/chcr_ktls.c
+++ b/drivers/crypto/chelsio/chcr_ktls.c
@@ -4,6 +4,153 @@
 #ifdef CONFIG_CHELSIO_TLS_DEVICE
 #include "chcr_ktls.h"
 
+static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
+/*
+ * chcr_ktls_save_keys: calculate and save crypto keys.
+ * @tx_info - driver specific tls info.
+ * @crypto_info - tls crypto information.
+ * @direction - TX/RX direction.
+ * return - SUCCESS/FAILURE.
+ */
+static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
+			       struct tls_crypto_info *crypto_info,
+			       enum tls_offload_ctx_dir direction)
+{
+	int ck_size, key_ctx_size, mac_key_size, keylen, ghash_size, ret;
+	unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE];
+	struct tls12_crypto_info_aes_gcm_128 *info_128_gcm;
+	struct ktls_key_ctx *kctx = &tx_info->key_ctx;
+	struct crypto_cipher *cipher;
+	unsigned char *key, *salt;
+
+	switch (crypto_info->cipher_type) {
+	case TLS_CIPHER_AES_GCM_128:
+		info_128_gcm =
+			(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+		keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+		tx_info->salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+		mac_key_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+		tx_info->iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
+		tx_info->iv = be64_to_cpu(*(__be64 *)info_128_gcm->iv);
+
+		ghash_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+		key = info_128_gcm->key;
+		salt = info_128_gcm->salt;
+		tx_info->record_no = *(u64 *)info_128_gcm->rec_seq;
+
+		break;
+
+	default:
+		pr_err("GCM: cipher type 0x%x not supported\n",
+		       crypto_info->cipher_type);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	key_ctx_size = CHCR_KTLS_KEY_CTX_LEN +
+		       roundup(keylen, 16) + ghash_size;
+	/* Calculate the H = CIPH(K, 0 repeated 16 times).
+	 * It will go in key context
+	 */
+	cipher = crypto_alloc_cipher("aes", 0, 0);
+	if (IS_ERR(cipher)) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = crypto_cipher_setkey(cipher, key, keylen);
+	if (ret)
+		goto out1;
+
+	memset(ghash_h, 0, ghash_size);
+	crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+
+	/* fill the Key context */
+	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+		kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+						 mac_key_size,
+						 key_ctx_size >> 4);
+	} else {
+		ret = -EINVAL;
+		goto out1;
+	}
+
+	memcpy(kctx->salt, salt, tx_info->salt_size);
+	memcpy(kctx->key, key, keylen);
+	memcpy(kctx->key + keylen, ghash_h, ghash_size);
+	tx_info->key_ctx_len = key_ctx_size;
+
+out1:
+	crypto_free_cipher(cipher);
+out:
+	return ret;
+}
+
+static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
+					     int new_state)
+{
+	unsigned long flags;
+
+	/* This function can be called from both rx (interrupt context) and tx
+	 * queue contexts.
+	 */
+	spin_lock_irqsave(&tx_info->lock, flags);
+	switch (tx_info->connection_state) {
+	case KTLS_CONN_CLOSED:
+		tx_info->connection_state = new_state;
+		break;
+
+	case KTLS_CONN_ACT_OPEN_REQ:
+		/* update to the next state and also initialize TCB */
+		if (new_state > tx_info->connection_state)
+			tx_info->connection_state = new_state;
+		/* FALLTHRU */
+	case KTLS_CONN_ACT_OPEN_RPL:
+		/* There is a possibility that KTLS_CONN_ACT_OPEN_REQ will be
+		 * received after HW response is received on RX queue, so always
+		 * check if state is greater than the current state. And if the
+		 * state is lesser or equal means we are stuck in the same
+		 * state, tcb init might be not received by HW, try sending it
+		 * again.
+		 */
+		if (!chcr_init_tcb_fields(tx_info))
+			tx_info->connection_state = KTLS_CONN_SET_TCB_REQ;
+		break;
+
+	case KTLS_CONN_SET_TCB_REQ:
+		/* set tcb req will be called from rx handler from the act open
+		 * rpl, so there is a possibility that KTLS_CONN_ACT_OPEN_REQ is
+		 * requested after connection state is already set to
+		 * KTLS_CONN_SET_TCB_REQ. ignore if it is not the greater state.
+		 * Also look if l2t is valid and move to tx_ready state.
+		 */
+		if (new_state > tx_info->connection_state)
+			tx_info->connection_state = new_state;
+		/* FALLTHRU */
+	case KTLS_CONN_SET_TCB_RPL:
+		/* This state is set by rx handler, so possibility rx response
+		 * is already reached and then tx is updating its lower state
+		 * like KTLS_CONN_ACT_OPEN_REQ or KTLS_CONN_SET_TCB_REQ, ignore
+		 * those states.
+		 * Check if l2t state is valid, then move to ready state.
+		 */
+		if (cxgb4_check_l2t_valid(tx_info->l2te))
+			tx_info->connection_state = KTLS_CONN_TX_READY;
+		break;
+
+	case KTLS_CONN_TX_READY:
+		/* nothing to be done here */
+		break;
+
+	default:
+		pr_err("unknown KTLS connection state\n");
+		break;
+	}
+	spin_unlock_irqrestore(&tx_info->lock, flags);
+
+	return tx_info->connection_state;
+}
 /*
  * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
  * @sk - tcp socket.
@@ -86,6 +233,8 @@ static int chcr_setup_connection(struct sock *sk,
 	if (ret == NET_XMIT_DROP)
 		goto out;
 
+	/* update the connection state */
+	chcr_ktls_update_connection_state(tx_info, KTLS_CONN_ACT_OPEN_REQ);
 out:
 	return ret;
 }
@@ -191,6 +340,11 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
 	tx_info->prev_seq = start_offload_tcp_sn;
 	tx_info->tcp_start_seq_number = start_offload_tcp_sn;
 
+	/* save crypto keys */
+	ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
+	if (ret < 0)
+		goto out2;
+
 	/* get peer ip */
 	if (sk->sk_family == AF_INET ||
 	    (sk->sk_family == AF_INET6 && !sk->sk_ipv6only &&
@@ -274,4 +428,207 @@ void chcr_disable_ktls(struct adapter *adap)
 		netdev->tlsdev_ops = NULL;
 	}
 }
+
+/*
+ * chcr_write_cpl_set_tcb_ulp: update tcb values.
+ * TCB is responsible to create tcp headers, so all the related values
+ * should be correctly updated.
+ * @tx_info - driver specific tls info.
+ * @q - tx queue on which packet is going out.
+ * @tid - TCB identifier.
+ * @pos - current index where should we start writing.
+ * @skb_ptr - what we are going to write in skb buffer.
+ * @word - TCB word.
+ * @mask - TCB word related mask.
+ * @val - TCB word related value.
+ * @reply - set 1 if looking for TP response.
+ * return - next position to write.
+ */
+static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+					struct sge_eth_txq *q, u32 tid,
+					void *pos, bool skb_ptr, u16 word,
+					u64 mask, u64 val, u32 reply)
+{
+	struct cpl_set_tcb_field_core *cpl;
+	struct ulptx_idata *idata;
+	struct ulp_txpkt *txpkt;
+	void *save_pos = NULL;
+	u8 buf[48] = {0};
+	int left;
+
+	if (!skb_ptr) {
+		left = (void *)q->q.stat - pos;
+		if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
+			if (!left) {
+				pos = q->q.desc;
+			} else {
+				save_pos = pos;
+				pos = buf;
+			}
+		}
+	}
+	/* ULP_TXPKT */
+	txpkt = pos;
+	txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+	txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
+
+	/* ULPTX_IDATA sub-command */
+	idata = (struct ulptx_idata *)(txpkt + 1);
+	idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+	idata->len = htonl(sizeof(*cpl));
+	pos = idata + 1;
+
+	cpl = pos;
+	/* CPL_SET_TCB_FIELD */
+	OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+	cpl->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
+			NO_REPLY_V(!reply));
+	cpl->word_cookie = htons(TCB_WORD_V(word));
+	cpl->mask = cpu_to_be64(mask);
+	cpl->val = cpu_to_be64(val);
+
+	/* ULPTX_NOOP */
+	idata = (struct ulptx_idata *)(cpl + 1);
+	idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
+	idata->len = htonl(0);
+	/* if its not an skb buffer then we need to see if it is written in
+	 * local buffer, and copy it to tx queue.
+	 */
+	if (!skb_ptr) {
+		if (save_pos) {
+			pos = chcr_copy_to_txd(buf, &q->q, save_pos,
+					       CHCR_SET_TCB_FIELD_LEN);
+		} else {
+			/* check again if we are at the end of the queue */
+			if (left == CHCR_SET_TCB_FIELD_LEN)
+				pos = q->q.desc;
+			else
+				pos = idata + 1;
+		}
+	} else {
+		pos = idata + 1;
+	}
+
+	return pos;
+}
+
+/*
+ * chcr_init_tcb_fields:  Initialize tcb fields so that TP will be able to
+ * handle nic TLS packets.
+ * @tx_info - driver specific tls info.
+ * return: NET_TX_OK/NET_XMIT_DROP
+ */
+static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info)
+{
+	u32 l2t_idx = tx_info->l2te->idx;
+	u32 tid = tx_info->tid, len;
+	struct fw_ulptx_wr *wr;
+	struct sk_buff *skb;
+	int ret = 0;
+	void *pos;
+
+	/* As part of initalization, 4 tcb updates are being sent */
+	len = sizeof(*wr) + 4 * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (unlikely(!skb))
+		return -ENOMEM;
+	/* mark it a control pkt */
+	set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
+
+	wr = __skb_put_zero(skb, len);
+	/* FW_ULPTX_WR */
+	wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+	wr->flowid_len16 = htonl(FW_ULPTX_WR_DATA_F | FW_WR_LEN16_V(len / 16));
+	wr->cookie = 0;
+	pos = wr + 1;
+	pos  = chcr_write_cpl_set_tcb_ulp(tx_info, NULL, tid, pos, 1,
+					  TCB_T_FLAGS_W,
+					  TCB_T_FLAGS_V(TF_CORE_BYPASS_F |
+							TF_NON_OFFLOAD_F),
+					  TCB_T_FLAGS_V(TF_CORE_BYPASS_F),
+					  0);
+	/* set SND_NXT_RAW and SND_UNA_RAW to 0 */
+	pos = chcr_write_cpl_set_tcb_ulp(tx_info, NULL, tid, pos, 1,
+					 TCB_SND_UNA_RAW_W,
+					 TCB_SND_NXT_RAW_V(TCB_SND_NXT_RAW_M) |
+					 TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
+					 TCB_SND_NXT_RAW_V(0) |
+					 TCB_SND_UNA_RAW_V(0), 0);
+	/* set SND_MAX_RAW to 0 */
+	pos = chcr_write_cpl_set_tcb_ulp(tx_info, NULL, tid, pos, 1,
+					 TCB_SND_MAX_RAW_W,
+					 TCB_SND_MAX_RAW_V(TCB_SND_MAX_RAW_M),
+					 TCB_SND_MAX_RAW_V(0), 0);
+	/* this is the last tcb field, set the reply bit for the confirmation */
+	chcr_write_cpl_set_tcb_ulp(tx_info, NULL, tid, pos, 1, TCB_L2T_IX_W,
+				   TCB_L2T_IX_V(TCB_L2T_IX_M),
+				   TCB_L2T_IX_V(l2t_idx), 1);
+
+	ret = cxgb4_ofld_send(tx_info->netdev, skb);
+
+	if (ret == NET_XMIT_DROP)
+		return ret;
+
+	return ret;
+}
+
+/*
+ * chcr_ktls_cpl_act_open_rpl: connection reply received from TP.
+ */
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input)
+{
+	const struct cpl_act_open_rpl *p = (void *)input;
+	struct chcr_ktls_info *tx_info = NULL;
+	unsigned int atid, tid, status;
+	struct tid_info *t;
+
+	tid = GET_TID(p);
+	status = AOPEN_STATUS_G(ntohl(p->atid_status));
+	atid = TID_TID_G(AOPEN_ATID_G(ntohl(p->atid_status)));
+
+	t = &adap->tids;
+	tx_info = lookup_atid(t, atid);
+
+	if (!tx_info || tx_info->atid != atid) {
+		pr_err("tx_info or atid is not correct\n");
+		return -1;
+	}
+
+	if (!status) {
+		tx_info->tid = tid;
+		cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
+
+		cxgb4_free_atid(t, atid);
+		tx_info->atid = -1;
+		/* update the connection state */
+		chcr_ktls_update_connection_state(tx_info,
+						  KTLS_CONN_ACT_OPEN_RPL);
+	}
+	return 0;
+}
+
+/*
+ * chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP.
+ */
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
+{
+	const struct cpl_set_tcb_rpl *p = (void *)input;
+	struct chcr_ktls_info *tx_info = NULL;
+	struct tid_info *t;
+	u32 tid, status;
+
+	tid = GET_TID(p);
+	status = p->status;
+
+	t = &adap->tids;
+	tx_info = lookup_tid(t, tid);
+	if (!tx_info || tx_info->tid != tid) {
+		pr_err("tx_info or atid is not correct\n");
+		return -1;
+	}
+	/* update the connection state */
+	chcr_ktls_update_connection_state(tx_info, KTLS_CONN_SET_TCB_RPL);
+	return 0;
+}
 #endif /* CONFIG_CHELSIO_TLS_DEVICE */
diff --git a/drivers/crypto/chelsio/chcr_ktls.h b/drivers/crypto/chelsio/chcr_ktls.h
index c057d6fd867a..e51e18eef9fe 100644
--- a/drivers/crypto/chelsio/chcr_ktls.h
+++ b/drivers/crypto/chelsio/chcr_ktls.h
@@ -8,23 +8,38 @@
 #include <net/tls.h>
 #include "cxgb4.h"
 #include "t4_msg.h"
+#include "t4_tcb.h"
 #include "l2t.h"
 #include "chcr_common.h"
 
+#define CHCR_KTLS_KEY_CTX_LEN 16
+#define CHCR_SET_TCB_FIELD_LEN sizeof(struct cpl_set_tcb_field)
+
 enum chcr_ktls_conn_state {
 	KTLS_CONN_CLOSED,
+	KTLS_CONN_ACT_OPEN_REQ,
+	KTLS_CONN_ACT_OPEN_RPL,
+	KTLS_CONN_SET_TCB_REQ,
+	KTLS_CONN_SET_TCB_RPL,
+	KTLS_CONN_TX_READY,
 };
 
 struct chcr_ktls_info {
 	struct sock *sk;
 	spinlock_t lock; /* state machine lock */
+	struct ktls_key_ctx key_ctx;
 	struct adapter *adap;
 	struct l2t_entry *l2te;
 	struct net_device *netdev;
+	u64 iv;
+	u64 record_no;
 	int tid;
 	int atid;
 	int rx_qid;
+	u32 iv_size;
 	u32 prev_seq;
+	u32 salt_size;
+	u32 key_ctx_len;
 	u32 tcp_start_seq_number;
 	enum chcr_ktls_conn_state connection_state;
 	u8 tx_chan;
@@ -59,5 +74,7 @@ static inline int chcr_get_first_rx_qid(struct adapter *adap)
 
 void chcr_enable_ktls(struct adapter *adap);
 void chcr_disable_ktls(struct adapter *adap);
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
 #endif /* CONFIG_CHELSIO_TLS_DEVICE */
 #endif /* __CHCR_KTLS_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 1a16449e9deb..b66e0332dbb3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -700,6 +700,17 @@ static char l2e_state(const struct l2t_entry *e)
 	}
 }
 
+bool cxgb4_check_l2t_valid(struct l2t_entry *e)
+{
+	bool valid;
+
+	spin_lock(&e->lock);
+	valid = (e->state == L2T_STATE_VALID);
+	spin_unlock(&e->lock);
+	return valid;
+}
+EXPORT_SYMBOL(cxgb4_check_l2t_valid);
+
 static int l2t_seq_show(struct seq_file *seq, void *v)
 {
 	if (v == SEQ_START_TOKEN)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 79665bd8f881..340fecb28a13 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -122,6 +122,7 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
 					 u8 port, u8 *dmac);
 struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
 void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
+bool cxgb4_check_l2t_valid(struct l2t_entry *e);
 
 extern const struct file_operations t4_l2t_fops;
 #endif  /* __CXGB4_L2T_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 575c6abcdae7..e9c775f1dd3e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -705,6 +705,14 @@ struct cpl_set_tcb_field {
 	__be64 val;
 };
 
+struct cpl_set_tcb_field_core {
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+	__be16 word_cookie;
+	__be64 mask;
+	__be64 val;
+};
+
 /* cpl_set_tcb_field.word_cookie fields */
 #define TCB_WORD_S	0
 #define TCB_WORD_V(x)	((x) << TCB_WORD_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 1b9afb192f7f..c4a99cc15102 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -35,6 +35,11 @@
 #ifndef __T4_TCB_H
 #define __T4_TCB_H
 
+#define TCB_L2T_IX_W    0
+#define TCB_L2T_IX_S    12
+#define TCB_L2T_IX_M    0xfffULL
+#define TCB_L2T_IX_V(x) ((x) << TCB_L2T_IX_S)
+
 #define TCB_SMAC_SEL_W		0
 #define TCB_SMAC_SEL_S		24
 #define TCB_SMAC_SEL_M		0xffULL
@@ -45,11 +50,6 @@
 #define TCB_T_FLAGS_M		0xffffffffffffffffULL
 #define TCB_T_FLAGS_V(x)	((__u64)(x) << TCB_T_FLAGS_S)
 
-#define TCB_RQ_START_W		30
-#define TCB_RQ_START_S		0
-#define TCB_RQ_START_M		0x3ffffffULL
-#define TCB_RQ_START_V(x)	((x) << TCB_RQ_START_S)
-
 #define TF_CCTRL_ECE_S		60
 #define TF_CCTRL_CWR_S		61
 #define TF_CCTRL_RFR_S		62
@@ -69,13 +69,40 @@
 #define TCB_RTT_TS_RECENT_AGE_M		0xffffffffULL
 #define TCB_RTT_TS_RECENT_AGE_V(x)	((x) << TCB_RTT_TS_RECENT_AGE_S)
 
-#define TCB_SND_UNA_RAW_W	10
+#define TCB_SND_UNA_RAW_W    10
+#define TCB_SND_UNA_RAW_S    0
+#define TCB_SND_UNA_RAW_M    0xfffffffULL
+#define TCB_SND_UNA_RAW_V(x) ((x) << TCB_SND_UNA_RAW_S)
+
+#define TCB_SND_NXT_RAW_W    10
+#define TCB_SND_NXT_RAW_S    28
+#define TCB_SND_NXT_RAW_M    0xfffffffULL
+#define TCB_SND_NXT_RAW_V(x) ((x) << TCB_SND_NXT_RAW_S)
+
+#define TCB_SND_MAX_RAW_W    11
+#define TCB_SND_MAX_RAW_S    24
+#define TCB_SND_MAX_RAW_M    0xfffffffULL
+#define TCB_SND_MAX_RAW_V(x) ((x) << TCB_SND_MAX_RAW_S)
+
 #define TCB_RX_FRAG2_PTR_RAW_W	27
 #define TCB_RX_FRAG3_LEN_RAW_W	29
 #define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W	30
 #define TCB_PDU_HDR_LEN_W	31
 
+#define TCB_RQ_START_W         30
+#define TCB_RQ_START_S         0
+#define TCB_RQ_START_M         0x3ffffffULL
+#define TCB_RQ_START_V(x)      ((x) << TCB_RQ_START_S)
+
 #define TF_RX_PDU_OUT_S		49
 #define TF_RX_PDU_OUT_V(x)	((__u64)(x) << TF_RX_PDU_OUT_S)
 
+#define TF_CORE_BYPASS_S    63
+#define TF_CORE_BYPASS_V(x) ((__u64)(x) << TF_CORE_BYPASS_S)
+#define TF_CORE_BYPASS_F TF_CORE_BYPASS_V(1)
+
+#define TF_NON_OFFLOAD_S    1
+#define TF_NON_OFFLOAD_V(x) ((x) << TF_NON_OFFLOAD_S)
+#define TF_NON_OFFLOAD_F TF_NON_OFFLOAD_V(1)
+
 #endif /* __T4_TCB_H */
-- 
2.25.0.191.gde93cc1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ