lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 4 Jul 2019 14:35:21 -0700
From:   Jakub Kicinski <jakub.kicinski@...ronome.com>
To:     Saeed Mahameed <saeedm@...lanox.com>
Cc:     "David S. Miller" <davem@...emloft.net>,
        "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
        Tariq Toukan <tariqt@...lanox.com>,
        Eran Ben Elisha <eranbe@...lanox.com>,
        Boris Pismenny <borisp@...lanox.com>
Subject: Re: [net-next 14/14] net/mlx5e: Add kTLS TX HW offload support

On Thu, 4 Jul 2019 18:16:15 +0000, Saeed Mahameed wrote:
> +struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
> +					 struct mlx5e_txqsq *sq,
> +					 struct sk_buff *skb,
> +					 struct mlx5e_tx_wqe **wqe, u16 *pi)
> +{
> +	struct mlx5e_ktls_offload_context_tx *priv_tx;
> +	struct mlx5e_sq_stats *stats = sq->stats;
> +	struct mlx5_wqe_ctrl_seg *cseg;
> +	struct tls_context *tls_ctx;
> +	int datalen;
> +	u32 seq;
> +
> +	if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
> +		goto out;
> +
> +	datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
> +	if (!datalen)
> +		goto out;
> +
> +	tls_ctx = tls_get_ctx(skb->sk);
> +	if (unlikely(tls_ctx->netdev != netdev))
> +		goto out;

If a frame for a different device leaked through the stack, we need to
fix the stack.  Also you should drop the frame in this case, the device
will no encrypt it.

> +	priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
> +
> +	if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
> +		mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
> +		*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
> +		stats->ktls_ctx++;
> +	}
> +
> +	seq = ntohl(tcp_hdr(skb)->seq);
> +	if (unlikely(priv_tx->expected_seq != seq)) {
> +		skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
> +		if (unlikely(!skb))
> +			goto out;
> +		*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
> +	}
> +
> +	priv_tx->expected_seq = seq + datalen;
> +
> +	cseg = &(*wqe)->ctrl;
> +	cseg->imm = cpu_to_be32(priv_tx->tisn);
> +
> +	stats->ktls_enc_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
> +	stats->ktls_enc_bytes   += datalen;
> +
> +out:
> +	return skb;
> +}

> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
> index 483d321d2151..6854f132d505 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
> @@ -50,6 +50,15 @@ static const struct counter_desc sw_stats_desc[] = {
>  #ifdef CONFIG_MLX5_EN_TLS
>  	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
>  	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
> +
> +	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_ktls_ooo) },
> +	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_ktls_ooo_drop_no_sync_data) },
> +	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_ktls_ooo_drop_bypass_req) },
> +	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_ktls_ooo_dump_bytes) },
> +	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_ktls_ooo_dump_packets) },
> +	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_ktls_enc_packets) },
> +	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_ktls_enc_bytes) },
> +	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_ktls_ctx) },
>  #endif
>  
>  	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
> @@ -218,6 +227,16 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
>  #ifdef CONFIG_MLX5_EN_TLS
>  			s->tx_tls_ooo		+= sq_stats->tls_ooo;
>  			s->tx_tls_resync_bytes	+= sq_stats->tls_resync_bytes;
> +
> +			s->tx_ktls_enc_packets           += sq_stats->ktls_enc_packets;
> +			s->tx_ktls_enc_bytes             += sq_stats->ktls_enc_bytes;
> +			s->tx_ktls_ooo                   += sq_stats->ktls_ooo;
> +			s->tx_ktls_ooo_drop_no_sync_data +=
> +				sq_stats->ktls_ooo_drop_no_sync_data;
> +			s->tx_ktls_ooo_drop_bypass_req   += sq_stats->ktls_ooo_drop_bypass_req;
> +			s->tx_ktls_ooo_dump_bytes        += sq_stats->ktls_ooo_dump_bytes;
> +			s->tx_ktls_ooo_dump_packets      += sq_stats->ktls_ooo_dump_packets;
> +			s->tx_ktls_ctx                   += sq_stats->ktls_ctx;
>  #endif
>  			s->tx_cqes		+= sq_stats->cqes;
>  		}
> @@ -1238,6 +1257,16 @@ static const struct counter_desc sq_stats_desc[] = {
>  	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
>  	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
>  	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
> +#ifdef CONFIG_MLX5_EN_TLS
> +	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, ktls_ooo) },
> +	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, ktls_ooo_drop_no_sync_data) },
> +	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, ktls_ooo_drop_bypass_req) },
> +	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, ktls_ooo_dump_bytes) },
> +	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, ktls_ooo_dump_packets) },
> +	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, ktls_enc_packets) },
> +	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, ktls_enc_bytes) },
> +	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, ktls_ctx) },
> +#endif
>  	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
>  	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
>  	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },

For the stats as discussed please use common names, and preferably
add yours to the doc too, so we have them documented.  Unless the 
stat seems very specific to your implementation, perhaps.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ