lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ed4c056d-03ee-d825-845d-a9e5b4d58c26@linux.ibm.com>
Date:   Wed, 9 Feb 2022 17:02:32 +0100
From:   Karsten Graul <kgraul@...ux.ibm.com>
To:     "D. Wythe" <alibuda@...ux.alibaba.com>
Cc:     kuba@...nel.org, davem@...emloft.net, netdev@...r.kernel.org,
        linux-s390@...r.kernel.org, linux-rdma@...r.kernel.org
Subject: Re: [PATCH net-next v6 2/5] net/smc: Limit backlog connections

On 09/02/2022 15:11, D. Wythe wrote:
> +static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
> +					  struct request_sock *req,
> +					  struct dst_entry *dst,
> +					  struct request_sock *req_unhash,
> +					  bool *own_req)
> +{
> +	struct smc_sock *smc;
> +
> +	smc = (struct smc_sock *)((uintptr_t)sk->sk_user_data & ~SK_USER_DATA_NOCOPY);

Did you run checkpatch.pl for these patches, for me this and other lines look longer 
than 80 characters.

> +
> +	if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->smc_pendings) >
> +				sk->sk_max_ack_backlog)
> +		goto drop;
> +
> +	if (sk_acceptq_is_full(&smc->sk)) {
> +		NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
> +		goto drop;
> +	}
> +
> +	/* passthrough to origin syn recv sock fct */
> +	return smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash, own_req);
> +
> +drop:
> +	dst_release(dst);
> +	tcp_listendrop(sk);
> +	return NULL;
> +}
> +
>  static struct smc_hashinfo smc_v4_hashinfo = {
>  	.lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
>  };
> @@ -1595,6 +1623,9 @@ static void smc_listen_out(struct smc_sock *new_smc)
>  	struct smc_sock *lsmc = new_smc->listen_smc;
>  	struct sock *newsmcsk = &new_smc->sk;
>  
> +	if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
> +		atomic_dec(&lsmc->smc_pendings);
> +
>  	if (lsmc->sk.sk_state == SMC_LISTEN) {
>  		lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
>  		smc_accept_enqueue(&lsmc->sk, newsmcsk);
> @@ -2200,6 +2231,9 @@ static void smc_tcp_listen_work(struct work_struct *work)
>  		if (!new_smc)
>  			continue;
>  
> +		if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
> +			atomic_inc(&lsmc->smc_pendings);
> +
>  		new_smc->listen_smc = lsmc;
>  		new_smc->use_fallback = lsmc->use_fallback;
>  		new_smc->fallback_rsn = lsmc->fallback_rsn;
> @@ -2266,6 +2300,15 @@ static int smc_listen(struct socket *sock, int backlog)
>  	smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
>  	smc->clcsock->sk->sk_user_data =
>  		(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
> +
> +	/* save origin ops */
> +	smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
> +
> +	smc->af_ops = *smc->ori_af_ops;
> +	smc->af_ops.syn_recv_sock = smc_tcp_syn_recv_sock;
> +
> +	inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops;
> +
>  	rc = kernel_listen(smc->clcsock, backlog);
>  	if (rc) {
>  		smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
> diff --git a/net/smc/smc.h b/net/smc/smc.h
> index 37b2001..5e5e38d 100644
> --- a/net/smc/smc.h
> +++ b/net/smc/smc.h
> @@ -252,6 +252,10 @@ struct smc_sock {				/* smc sock container */
>  	bool			use_fallback;	/* fallback to tcp */
>  	int			fallback_rsn;	/* reason for fallback */
>  	u32			peer_diagnosis; /* decline reason from peer */
> +	atomic_t                smc_pendings;   /* pending smc connections */

I don't like the name smc_pendings, its not very specific. 
What about queued_smc_hs?
And for the comment: queued smc handshakes

> +	struct inet_connection_sock_af_ops		af_ops;
> +	const struct inet_connection_sock_af_ops	*ori_af_ops;
> +						/* origin af ops */
origin -> original
>  	int			sockopt_defer_accept;
>  						/* sockopt TCP_DEFER_ACCEPT
>  						 * value

-- 
Karsten

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ