lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 4 Apr 2023 15:36:44 +0000
From:   Chuck Lever III <chuck.lever@...cle.com>
To:     Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
        Eric Dumazet <edumazet@...gle.com>
CC:     Boris Pismenny <borisp@...dia.com>,
        "open list:NETWORKING [GENERAL]" <netdev@...r.kernel.org>,
        "kernel-tls-handshake@...ts.linux.dev" 
        <kernel-tls-handshake@...ts.linux.dev>,
        John Haxby <john.haxby@...cle.com>
Subject: Re: [PATCH v8 1/4] net/handshake: Create a NETLINK service for
 handling handshake requests



> On Apr 3, 2023, at 2:46 PM, Chuck Lever <cel@...nel.org> wrote:
> 
> diff --git a/net/handshake/request.c b/net/handshake/request.c
> new file mode 100644
> index 000000000000..0ef18a38c047
> --- /dev/null
> +++ b/net/handshake/request.c
> @@ -0,0 +1,344 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Handshake request lifetime events
> + *
> + * Author: Chuck Lever <chuck.lever@...cle.com>
> + *
> + * Copyright (c) 2023, Oracle and/or its affiliates.
> + */
> +
> +#include <linux/types.h>
> +#include <linux/socket.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/skbuff.h>
> +#include <linux/inet.h>
> +#include <linux/fdtable.h>
> +#include <linux/rhashtable.h>
> +
> +#include <net/sock.h>
> +#include <net/genetlink.h>
> +#include <net/netns/generic.h>
> +
> +#include <uapi/linux/handshake.h>
> +#include "handshake.h"
> +
> +#include <trace/events/handshake.h>
> +
> +/*
> + * We need both a handshake_req -> sock mapping, and a sock ->
> + * handshake_req mapping. Both are one-to-one.
> + *
> + * To avoid adding another pointer field to struct sock, net/handshake
> + * maintains a hash table, indexed by the memory address of @sock, to
> + * find the struct handshake_req outstanding for that socket. The
> + * reverse direction uses a simple pointer field in the handshake_req
> + * struct.
> + */
> +
> +static struct rhashtable handshake_rhashtbl ____cacheline_aligned_in_smp;
> +
> +static const struct rhashtable_params handshake_rhash_params = {
> + .key_len = sizeof_field(struct handshake_req, hr_sk),
> + .key_offset = offsetof(struct handshake_req, hr_sk),
> + .head_offset = offsetof(struct handshake_req, hr_rhash),
> + .automatic_shrinking = true,
> +};
> +
> +int handshake_req_hash_init(void)
> +{
> + return rhashtable_init(&handshake_rhashtbl, &handshake_rhash_params);
> +}
> +
> +void handshake_req_hash_destroy(void)
> +{
> + rhashtable_destroy(&handshake_rhashtbl);
> +}
> +
> +struct handshake_req *handshake_req_hash_lookup(struct sock *sk)
> +{
> + return rhashtable_lookup_fast(&handshake_rhashtbl, &sk,
> +      handshake_rhash_params);
> +}
> +
> +static bool handshake_req_hash_add(struct handshake_req *req)
> +{
> + int ret;
> +
> + ret = rhashtable_lookup_insert_fast(&handshake_rhashtbl,
> +    &req->hr_rhash,
> +    handshake_rhash_params);
> + return ret == 0;
> +}
> +
> +static void handshake_req_destroy(struct handshake_req *req)
> +{
> + if (req->hr_proto->hp_destroy)
> + req->hr_proto->hp_destroy(req);
> + rhashtable_remove_fast(&handshake_rhashtbl, &req->hr_rhash,
> +       handshake_rhash_params);
> + kfree(req);
> +}
> +
> +static void handshake_sk_destruct(struct sock *sk)
> +{
> + void (*sk_destruct)(struct sock *sk);
> + struct handshake_req *req;
> +
> + req = handshake_req_hash_lookup(sk);
> + if (!req)
> + return;
> +
> + trace_handshake_destruct(sock_net(sk), req, sk);
> + sk_destruct = req->hr_odestruct;
> + handshake_req_destroy(req);
> + if (sk_destruct)
> + sk_destruct(sk);
> +}
> +
> +/**
> + * handshake_req_alloc - Allocate a handshake request
> + * @proto: security protocol
> + * @flags: memory allocation flags
> + *
> + * Returns an initialized handshake_req or NULL.
> + */
> +struct handshake_req *handshake_req_alloc(const struct handshake_proto *proto,
> +  gfp_t flags)
> +{
> + struct handshake_req *req;
> +
> + if (!proto)
> + return NULL;
> + if (proto->hp_handler_class <= HANDSHAKE_HANDLER_CLASS_NONE)
> + return NULL;
> + if (proto->hp_handler_class >= HANDSHAKE_HANDLER_CLASS_MAX)
> + return NULL;
> + if (!proto->hp_accept || !proto->hp_done)
> + return NULL;
> +
> + req = kzalloc(struct_size(req, hr_priv, proto->hp_privsize), flags);
> + if (!req)
> + return NULL;
> +
> + INIT_LIST_HEAD(&req->hr_list);
> + req->hr_proto = proto;
> + return req;
> +}
> +EXPORT_SYMBOL(handshake_req_alloc);
> +
> +/**
> + * handshake_req_private - Get per-handshake private data
> + * @req: handshake arguments
> + *
> + */
> +void *handshake_req_private(struct handshake_req *req)
> +{
> + return (void *)&req->hr_priv;
> +}
> +EXPORT_SYMBOL(handshake_req_private);
> +
> +static bool __add_pending_locked(struct handshake_net *hn,
> + struct handshake_req *req)
> +{
> + if (WARN_ON_ONCE(!list_empty(&req->hr_list)))
> + return false;
> + hn->hn_pending++;
> + list_add_tail(&req->hr_list, &hn->hn_requests);
> + return true;
> +}
> +
> +static void __remove_pending_locked(struct handshake_net *hn,
> +    struct handshake_req *req)
> +{
> + hn->hn_pending--;
> + list_del_init(&req->hr_list);
> +}
> +
> +/*
> + * Returns %true if the request was found on @net's pending list,
> + * otherwise %false.
> + *
> + * If @req was on a pending list, it has not yet been accepted.
> + */
> +static bool remove_pending(struct handshake_net *hn, struct handshake_req *req)
> +{
> + bool ret = false;
> +
> + spin_lock(&hn->hn_lock);
> + if (!list_empty(&req->hr_list)) {
> + __remove_pending_locked(hn, req);
> + ret = true;
> + }
> + spin_unlock(&hn->hn_lock);
> +
> + return ret;
> +}
> +
> +struct handshake_req *handshake_req_next(struct handshake_net *hn, int class)
> +{
> + struct handshake_req *req, *pos;
> +
> + req = NULL;
> + spin_lock(&hn->hn_lock);
> + list_for_each_entry(pos, &hn->hn_requests, hr_list) {
> + if (pos->hr_proto->hp_handler_class != class)
> + continue;
> + __remove_pending_locked(hn, pos);
> + req = pos;
> + break;
> + }
> + spin_unlock(&hn->hn_lock);
> +
> + return req;
> +}
> +
> +/**
> + * handshake_req_submit - Submit a handshake request
> + * @sock: open socket on which to perform the handshake
> + * @req: handshake arguments
> + * @flags: memory allocation flags
> + *
> + * Return values:
> + *   %0: Request queued
> + *   %-EINVAL: Invalid argument
> + *   %-EBUSY: A handshake is already under way for this socket
> + *   %-ESRCH: No handshake agent is available
> + *   %-EAGAIN: Too many pending handshake requests
> + *   %-ENOMEM: Failed to allocate memory
> + *   %-EMSGSIZE: Failed to construct notification message
> + *   %-EOPNOTSUPP: Handshake module not initialized
> + *
> + * A zero return value from handshake_req_submit() means that
> + * exactly one subsequent completion callback is guaranteed.
> + *
> + * A negative return value from handshake_req_submit() means that
> + * no completion callback will be done and that @req has been
> + * destroyed.
> + */
> +int handshake_req_submit(struct socket *sock, struct handshake_req *req,
> + gfp_t flags)
> +{
> + struct handshake_net *hn;
> + struct net *net;
> + int ret;
> +
> + if (!sock || !req || !sock->file) {
> + kfree(req);
> + return -EINVAL;
> + }
> +
> + req->hr_sk = sock->sk;
> + if (!req->hr_sk) {
> + kfree(req);
> + return -EINVAL;
> + }
> + req->hr_odestruct = req->hr_sk->sk_destruct;
> + req->hr_sk->sk_destruct = handshake_sk_destruct;
> +
> + ret = -EOPNOTSUPP;
> + net = sock_net(req->hr_sk);
> + hn = handshake_pernet(net);
> + if (!hn)
> + goto out_err;
> +
> + ret = -EAGAIN;
> + if (READ_ONCE(hn->hn_pending) >= hn->hn_pending_max)
> + goto out_err;
> +
> + spin_lock(&hn->hn_lock);
> + ret = -EOPNOTSUPP;
> + if (test_bit(HANDSHAKE_F_NET_DRAINING, &hn->hn_flags))
> + goto out_unlock;
> + ret = -EBUSY;
> + if (!handshake_req_hash_add(req))
> + goto out_unlock;
> + if (!__add_pending_locked(hn, req))
> + goto out_unlock;
> + spin_unlock(&hn->hn_lock);
> +
> + ret = handshake_genl_notify(net, req->hr_proto->hp_handler_class,
> +    flags);
> + if (ret) {
> + trace_handshake_notify_err(net, req, req->hr_sk, ret);
> + if (remove_pending(hn, req))
> + goto out_err;
> + }
> +
> + /* Prevent socket release while a handshake request is pending */
> + sock_hold(req->hr_sk);
> +
> + trace_handshake_submit(net, req, req->hr_sk);
> + return 0;
> +
> +out_unlock:
> + spin_unlock(&hn->hn_lock);
> +out_err:
> + trace_handshake_submit_err(net, req, req->hr_sk, ret);
> + handshake_req_destroy(req);
> + return ret;
> +}
> +EXPORT_SYMBOL(handshake_req_submit);
> +
> +void handshake_complete(struct handshake_req *req, unsigned int status,
> + struct genl_info *info)
> +{
> + struct sock *sk = req->hr_sk;
> + struct net *net = sock_net(sk);
> +
> + if (!test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) {
> + trace_handshake_complete(net, req, sk, status);
> + req->hr_proto->hp_done(req, status, info);
> +
> + /* Handshake request is no longer pending */
> + sock_put(sk);
> + }
> +}
> +
> +/**
> + * handshake_req_cancel - Cancel an in-progress handshake
> + * @sock: socket on which there is an ongoing handshake
> + *
> + * Request cancellation races with request completion. To determine
> + * who won, callers examine the return value from this function.
> + *
> + * Return values:
> + *   %true - Uncompleted handshake request was canceled or not found
> + *   %false - Handshake request already completed
> + */
> +bool handshake_req_cancel(struct socket *sock)
> +{
> + struct handshake_req *req;
> + struct handshake_net *hn;
> + struct sock *sk;
> + struct net *net;
> +
> + sk = sock->sk;
> + net = sock_net(sk);

We're still seeing NULL pointer dereferences here.
Typically this happens after the remote closes the
connection early.

I guess I cannot rely on sock_hold(sk); from preventing
someone from doing a "sock->sk = NULL;"

Would it make more sense for req_submit and req_cancel to
operate on "struct sock *" rather than "struct socket *" ?


> + req = handshake_req_hash_lookup(sk);
> + if (!req) {
> + trace_handshake_cancel_none(net, req, sk);
> + return true;
> + }
> +
> + hn = handshake_pernet(net);
> + if (hn && remove_pending(hn, req)) {
> + /* Request hadn't been accepted */
> + trace_handshake_cancel(net, req, sk);
> + sock_put(sk);
> + return true;
> + }
> + if (test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) {
> + /* Request already completed */
> + trace_handshake_cancel_busy(net, req, sk);
> + return false;
> + }
> +
> + trace_handshake_cancel(net, req, sk);
> +
> + /* Handshake request is no longer pending */
> + sock_put(sk);
> +
> + return true;
> +}
> +EXPORT_SYMBOL(handshake_req_cancel);


--
Chuck Lever


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ