[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CADvbK_eObVPH9GJfkpCsHt1obg6sDY0jQ0cpA=c6yyjRiQEaYw@mail.gmail.com>
Date: Thu, 23 Oct 2025 17:57:40 -0400
From: Xin Long <lucien.xin@...il.com>
To: Kuniyuki Iwashima <kuniyu@...gle.com>
Cc: davem@...emloft.net, edumazet@...gle.com, horms@...nel.org,
kuba@...nel.org, kuni1840@...il.com, linux-sctp@...r.kernel.org,
marcelo.leitner@...il.com, netdev@...r.kernel.org, pabeni@...hat.com
Subject: Re: [PATCH v2 net-next 4/8] net: Add sk_clone().
On Thu, Oct 23, 2025 at 4:48 PM Kuniyuki Iwashima <kuniyu@...gle.com> wrote:
>
> From: Xin Long <lucien.xin@...il.com>
> Date: Thu, 23 Oct 2025 15:55:57 -0400
> > On Thu, Oct 23, 2025 at 3:22 PM Kuniyuki Iwashima <kuniyu@...gle.com> wrote:
> > >
> > > On Thu, Oct 23, 2025 at 12:08 PM Xin Long <lucien.xin@...il.com> wrote:
> > > >
> > > > On Wed, Oct 22, 2025 at 6:57 PM Kuniyuki Iwashima <kuniyu@...gle.com> wrote:
> > > > >
> > > > > On Wed, Oct 22, 2025 at 3:04 PM Xin Long <lucien.xin@...il.com> wrote:
> > > > > >
> > > > > > On Wed, Oct 22, 2025 at 5:17 PM Kuniyuki Iwashima <kuniyu@...gle.com> wrote:
> > > > > > >
> > > > > > > sctp_accept() will use sk_clone_lock(), but it will be called
> > > > > > > with the parent socket locked, and sctp_migrate() acquires the
> > > > > > > child lock later.
> > > > > > >
> > > > > > > Let's add no lock version of sk_clone_lock().
> > > > > > >
> > > > > > > Note that lockdep complains if we simply use bh_lock_sock_nested().
> > > > > > >
> > > > > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@...gle.com>
> > > > > > > ---
> > > > > > > include/net/sock.h | 7 ++++++-
> > > > > > > net/core/sock.c | 21 ++++++++++++++-------
> > > > > > > 2 files changed, 20 insertions(+), 8 deletions(-)
> > > > > > >
> > > > > > > diff --git a/include/net/sock.h b/include/net/sock.h
> > > > > > > index 01ce231603db..c7e58b8e8a90 100644
> > > > > > > --- a/include/net/sock.h
> > > > > > > +++ b/include/net/sock.h
> > > > > > > @@ -1822,7 +1822,12 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
> > > > > > > void sk_free(struct sock *sk);
> > > > > > > void sk_net_refcnt_upgrade(struct sock *sk);
> > > > > > > void sk_destruct(struct sock *sk);
> > > > > > > -struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
> > > > > > > +struct sock *sk_clone(const struct sock *sk, const gfp_t priority, bool lock);
> > > > > > > +
> > > > > > > +static inline struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
> > > > > > > +{
> > > > > > > + return sk_clone(sk, priority, true);
> > > > > > > +}
> > > > > > >
> > > > > > > struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
> > > > > > > gfp_t priority);
> > > > > > > diff --git a/net/core/sock.c b/net/core/sock.c
> > > > > > > index a99132cc0965..0a3021f8f8c1 100644
> > > > > > > --- a/net/core/sock.c
> > > > > > > +++ b/net/core/sock.c
> > > > > > > @@ -2462,13 +2462,16 @@ static void sk_init_common(struct sock *sk)
> > > > > > > }
> > > > > > >
> > > > > > > /**
> > > > > > > - * sk_clone_lock - clone a socket, and lock its clone
> > > > > > > - * @sk: the socket to clone
> > > > > > > - * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
> > > > > > > + * sk_clone - clone a socket
> > > > > > > + * @sk: the socket to clone
> > > > > > > + * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
> > > > > > > + * @lock: if true, lock the cloned sk
> > > > > > > *
> > > > > > > - * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
> > > > > > > + * If @lock is true, the clone is locked by bh_lock_sock(), and
> > > > > > > + * caller must unlock socket even in error path by bh_unlock_sock().
> > > > > > > */
> > > > > > > -struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
> > > > > > > +struct sock *sk_clone(const struct sock *sk, const gfp_t priority,
> > > > > > > + bool lock)
> > > > > > > {
> > > > > > > struct proto *prot = READ_ONCE(sk->sk_prot);
> > > > > > > struct sk_filter *filter;
> > > > > > > @@ -2497,9 +2500,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
> > > > > > > __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker,
> > > > > > > false, priority);
> > > > > > > }
> > > > > > > +
> > > > > > > sk_node_init(&newsk->sk_node);
> > > > > > > sock_lock_init(newsk);
> > > > > > > - bh_lock_sock(newsk);
> > > > > > > +
> > > > > > > + if (lock)
> > > > > > > + bh_lock_sock(newsk);
> > > > > > > +
> > > > > > does it really need bh_lock_sock() that early, if not, maybe we can move
> > > > > > it out of sk_clone_lock(), and names sk_clone_lock() back to sk_clone()?
> > > > >
> > > > > I think sk_clone_lock() and leaf functions do not have
> > > > > lockdep_sock_is_held(), and probably the closest one is
> > > > > security_inet_csk_clone() which requires lock_sock() for
> > > > > bpf_setsockopt(), this can be easily adjusted though.
> > > > > (see bpf_lsm_locked_sockopt_hooks)
> > > > >
> > > > Right.
> > > >
> > > > > Only concern would be moving bh_lock_sock() there will
> > > > > introduce one cache line miss.
> > > > I think it’s negligible, and it’s not even on the data path, though others
> > > > may have different opinions.
> > >
> > > For SCTP, yes, but I'd avoid it for TCP.
> > Okay, not a problem, just doesn't look common to pass such a parameter.
>
> Another option would be add a check like this ?
>
> ---8<---
> diff --git a/include/net/sock.h b/include/net/sock.h
> index c7e58b8e8a90..e708b70b04da 100644
> --- a/include/net/sock.h
> +++ b/include/net/sock.h
> @@ -2904,6 +2904,12 @@ static inline bool sk_is_inet(const struct sock *sk)
> return family == AF_INET || family == AF_INET6;
> }
>
> +static inline bool sk_is_sctp(const struct sock *sk)
> +{
> + return IS_ENABLED(CONFIG_SCTP) &&
> + sk->sk_protocol == IPPROTO_SCTP;
> +}
> +
Oh, better not, I'm actually planning to use sk_clone() in quic_accept() :D
https://github.com/lxin/quic/blob/main/modules/net/quic/socket.c#L1421
> static inline bool sk_is_tcp(const struct sock *sk)
> {
> return sk_is_inet(sk) &&
> diff --git a/net/core/sock.c b/net/core/sock.c
> index 0a3021f8f8c1..ed5f36c6f33e 100644
> --- a/net/core/sock.c
> +++ b/net/core/sock.c
> @@ -2470,10 +2470,10 @@ static void sk_init_common(struct sock *sk)
> * If @lock is true, the clone is locked by bh_lock_sock(), and
> * caller must unlock socket even in error path by bh_unlock_sock().
> */
> -struct sock *sk_clone(const struct sock *sk, const gfp_t priority,
> - bool lock)
> +struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
> {
> struct proto *prot = READ_ONCE(sk->sk_prot);
> + bool lock = !sk_is_sctp(sk);
> struct sk_filter *filter;
> bool is_charged = true;
> struct sock *newsk;
> @@ -2597,7 +2597,8 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority,
> * destructor and make plain sk_free()
> */
> newsk->sk_destruct = NULL;
> - bh_unlock_sock(newsk);
> + if (lock)
> + bh_unlock_sock(newsk);
> sk_free(newsk);
> newsk = NULL;
> goto out;
> ---8<---
>
>
> >
> > There is also bh_unlock_sock(newsk) in the free path in sk_clone(),
> > does it also need a 'if (lock)' check?
>
> Good catch. Will fix it in v3.
>
> Thanks!
Powered by blists - more mailing lists