lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAADnVQLXyA__zdDSiTdhaw=dXyfgmkr--cH068JvNK=JAYvRDA@mail.gmail.com>
Date: Thu, 10 Oct 2024 09:21:48 -0700
From: Alexei Starovoitov <alexei.starovoitov@...il.com>
To: "D. Wythe" <alibuda@...ux.alibaba.com>
Cc: kgraul@...ux.ibm.com, wenjia@...ux.ibm.com, jaka@...ux.ibm.com, 
	wintera@...ux.ibm.com, guwen@...ux.alibaba.com, 
	Alexei Starovoitov <ast@...nel.org>, Jakub Kicinski <kuba@...nel.org>, 
	"David S. Miller" <davem@...emloft.net>, Network Development <netdev@...r.kernel.org>, 
	linux-s390 <linux-s390@...r.kernel.org>, linux-rdma@...r.kernel.org, 
	Tony Lu <tonylu@...ux.alibaba.com>, Paolo Abeni <pabeni@...hat.com>, 
	Eric Dumazet <edumazet@...gle.com>, bpf <bpf@...r.kernel.org>
Subject: Re: [PATCH net-next] net/smc: Introduce a hook to modify syn_smc at runtime

On Wed, Oct 9, 2024 at 8:58 PM D. Wythe <alibuda@...ux.alibaba.com> wrote:
>
>
> +__bpf_hook_start();
> +
> +__weak noinline int select_syn_smc(const struct sock *sk, struct sockaddr *peer)
> +{
> +       return 1;
> +}
> +
> +__bpf_hook_end();
> +
>  int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb)
>  {
>         struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
> @@ -156,19 +165,43 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
>         return NULL;
>  }
>
> -static bool smc_hs_congested(const struct sock *sk)
> +static void smc_openreq_init(struct request_sock *req,
> +                            const struct tcp_options_received *rx_opt,
> +                            struct sk_buff *skb, const struct sock *sk)
>  {
> +       struct inet_request_sock *ireq = inet_rsk(req);
> +       struct sockaddr_storage rmt_sockaddr = {};
>         const struct smc_sock *smc;
>
>         smc = smc_clcsock_user_data(sk);
>
>         if (!smc)
> -               return true;
> +               return;
>
> -       if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq))
> -               return true;
> +       if (smc->limit_smc_hs && workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq))
> +               goto out_no_smc;
>
> -       return false;
> +       rmt_sockaddr.ss_family = sk->sk_family;
> +
> +       if (rmt_sockaddr.ss_family == AF_INET) {
> +               struct sockaddr_in *rmt4_sockaddr =  (struct sockaddr_in *)&rmt_sockaddr;
> +
> +               rmt4_sockaddr->sin_addr.s_addr = ireq->ir_rmt_addr;
> +               rmt4_sockaddr->sin_port = ireq->ir_rmt_port;
> +#if IS_ENABLED(CONFIG_IPV6)
> +       } else {
> +               struct sockaddr_in6 *rmt6_sockaddr =  (struct sockaddr_in6 *)&rmt_sockaddr;
> +
> +               rmt6_sockaddr->sin6_addr = ireq->ir_v6_rmt_addr;
> +               rmt6_sockaddr->sin6_port = ireq->ir_rmt_port;
> +#endif /* CONFIG_IPV6 */
> +       }
> +
> +       ireq->smc_ok = select_syn_smc(sk, (struct sockaddr *)&rmt_sockaddr);
> +       return;
> +out_no_smc:
> +       ireq->smc_ok = 0;
> +       return;
>  }
>
>  struct smc_hashinfo smc_v4_hashinfo = {
> @@ -1671,7 +1704,7 @@ int smc_connect(struct socket *sock, struct sockaddr *addr,
>         }
>
>         smc_copy_sock_settings_to_clc(smc);
> -       tcp_sk(smc->clcsock->sk)->syn_smc = 1;
> +       tcp_sk(smc->clcsock->sk)->syn_smc = select_syn_smc(sk, addr);
>         if (smc->connect_nonblock) {
>                 rc = -EALREADY;
>                 goto out;
> @@ -2650,8 +2683,7 @@ int smc_listen(struct socket *sock, int backlog)
>
>         inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops;
>
> -       if (smc->limit_smc_hs)
> -               tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested;
> +       tcp_sk(smc->clcsock->sk)->smc_openreq_init = smc_openreq_init;
>
>         rc = kernel_listen(smc->clcsock, backlog);
>         if (rc) {
> @@ -3475,6 +3507,24 @@ static void __net_exit smc_net_stat_exit(struct net *net)
>         .exit = smc_net_stat_exit,
>  };
>
> +#if IS_ENABLED(CONFIG_BPF_SYSCALL)
> +BTF_SET8_START(bpf_smc_fmodret_ids)
> +BTF_ID_FLAGS(func, select_syn_smc)
> +BTF_SET8_END(bpf_smc_fmodret_ids)
> +
> +static const struct btf_kfunc_id_set bpf_smc_fmodret_set = {
> +       .owner = THIS_MODULE,
> +       .set   = &bpf_smc_fmodret_ids,
> +};
> +
> +static int bpf_smc_kfunc_init(void)
> +{
> +       return register_btf_fmodret_id_set(&bpf_smc_fmodret_set);
> +}

fmodret was an approach that hid-bpf took initially,
but eventually they removed it all and switched to struct-ops approach.
Please learn that lesson.
Use struct_ops from the beginning.

I did a presentation recently explaining the motivation behind
struct_ops and tips on how to extend the kernel.
TLDR: the step one is to design the extension _without_ bpf.
The interface should be usable for kernel modules.
And then when you have *_ops style api in place
the bpf progs will plug-in without extra work.

Slides:
https://github.com/4ast/docs/blob/main/BPF%20struct-ops.pdf

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ