lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250108145641.GA21926@j66a10360.sqa.eu95>
Date: Wed, 8 Jan 2025 22:56:41 +0800
From: "D. Wythe" <alibuda@...ux.alibaba.com    >
To: John Ousterhout <ouster@...stanford.edu>
Cc: netdev@...r.kernel.org, pabeni@...hat.com, edumazet@...gle.com,
	horms@...nel.org, kuba@...nel.org
Subject: Re: [PATCH net-next v5 07/12] net: homa: create homa_sock.h and
 homa_sock.c

On Mon, Jan 06, 2025 at 10:12:13AM -0800, John Ousterhout wrote:
> These files provide functions for managing the state that Homa keeps
> for each open Homa socket.
> 
> Signed-off-by: John Ousterhout <ouster@...stanford.edu>
> ---
>  net/homa/homa_sock.c | 382 ++++++++++++++++++++++++++++++++++++++++
>  net/homa/homa_sock.h | 406 +++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 788 insertions(+)
>  create mode 100644 net/homa/homa_sock.c
>  create mode 100644 net/homa/homa_sock.h
> 
> diff --git a/net/homa/homa_sock.c b/net/homa/homa_sock.c
> new file mode 100644
> index 000000000000..723752c6d055
> --- /dev/null
> +++ b/net/homa/homa_sock.c
> @@ -0,0 +1,382 @@
> +// SPDX-License-Identifier: BSD-2-Clause
> +
> +/* This file manages homa_sock and homa_socktab objects. */
> +
> +#include "homa_impl.h"
> +#include "homa_peer.h"
> +#include "homa_pool.h"
> +
> +/**
> + * homa_socktab_init() - Constructor for homa_socktabs.
> + * @socktab:  The object to initialize; previous contents are discarded.
> + */
> +int homa_sock_init(struct homa_sock *hsk, struct homa *homa)
> +{
> +	struct homa_socktab *socktab = homa->port_map;
> +	int result = 0;
> +	int i;
> +
> +	spin_lock_bh(&socktab->write_lock);
> +	atomic_set(&hsk->protect_count, 0);
> +	spin_lock_init(&hsk->lock);
> +	hsk->last_locker = "none";
> +	atomic_set(&hsk->protect_count, 0);
> +	hsk->homa = homa;
> +	hsk->ip_header_length = (hsk->inet.sk.sk_family == AF_INET)
> +			? HOMA_IPV4_HEADER_LENGTH : HOMA_IPV6_HEADER_LENGTH;
> +	hsk->shutdown = false;
> +	while (1) {
> +		if (homa->next_client_port < HOMA_MIN_DEFAULT_PORT)
> +			homa->next_client_port = HOMA_MIN_DEFAULT_PORT;
> +		if (!homa_sock_find(socktab, homa->next_client_port))
> +			break;
> +		homa->next_client_port++;

It seems there might be a possibility of an infinite loop if all the
ports are in use.

> +	}
> +	hsk->port = homa->next_client_port;
> +	hsk->inet.inet_num = hsk->port;
> +	hsk->inet.inet_sport = htons(hsk->port);
> +	homa->next_client_port++;
> +	hsk->socktab_links.sock = hsk;
> +	hlist_add_head_rcu(&hsk->socktab_links.hash_links,
> +			   &socktab->buckets[homa_port_hash(hsk->port)]);
> +	INIT_LIST_HEAD(&hsk->active_rpcs);
> +	INIT_LIST_HEAD(&hsk->dead_rpcs);
> +	hsk->dead_skbs = 0;
> +	INIT_LIST_HEAD(&hsk->waiting_for_bufs);
> +	INIT_LIST_HEAD(&hsk->ready_requests);
> +	INIT_LIST_HEAD(&hsk->ready_responses);
> +	INIT_LIST_HEAD(&hsk->request_interests);
> +	INIT_LIST_HEAD(&hsk->response_interests);
> +	for (i = 0; i < HOMA_CLIENT_RPC_BUCKETS; i++) {
> +		struct homa_rpc_bucket *bucket = &hsk->client_rpc_buckets[i];
> +
> +		spin_lock_init(&bucket->lock);
> +		INIT_HLIST_HEAD(&bucket->rpcs);
> +		bucket->id = i;
> +	}
> +	for (i = 0; i < HOMA_SERVER_RPC_BUCKETS; i++) {
> +		struct homa_rpc_bucket *bucket = &hsk->server_rpc_buckets[i];
> +
> +		spin_lock_init(&bucket->lock);
> +		INIT_HLIST_HEAD(&bucket->rpcs);
> +		bucket->id = i + 1000000;
> +	}
> +	hsk->buffer_pool = kzalloc(sizeof(*hsk->buffer_pool), GFP_KERNEL);

using GFP_ATOMIC. I noticed that Homa frequently uses GFP_KERNEL with BH disabled.
Please fix it all.

> +	if (!hsk->buffer_pool)
> +		result = -ENOMEM;
> +	spin_unlock_bh(&socktab->write_lock);
> +	return result;
> +}
> +
> +/*
> -- 
> 2.34.1
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ