[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9aa67df2-a539-29eb-c9e9-4dddcb73ec19@gmail.com>
Date: Fri, 5 Jun 2020 09:43:25 -0700
From: Eric Dumazet <eric.dumazet@...il.com>
To: Matthew Wilcox <willy@...radead.org>,
Bjorn Andersson <bjorn.andersson@...aro.org>,
Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>,
Jakub Kicinski <kuba@...nel.org>,
"David S. Miller" <davem@...emloft.net>, netdev@...r.kernel.org,
Eric Biggers <ebiggers@...gle.com>
Subject: Re: [PATCH] qrtr: Convert qrtr_ports from IDR to XArray
On 6/5/20 5:00 AM, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
>
> The XArray interface is easier for this driver to use. Also fixes a
> bug reported by the improper use of GFP_ATOMIC.
>
This does not look stable candidate.
If you try to add a Fixes: tag, you might discover that this bug is old,
and I do not believe XArray has been backported to stable branches ?
Please submit a fix suitable for old kernels (as old as v4.7)
Then when net-next is open in ~2 weeks, the Xarray stuff can be proposed.
Thanks.
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
> ---
> net/qrtr/qrtr.c | 39 +++++++++++++--------------------------
> 1 file changed, 13 insertions(+), 26 deletions(-)
>
> diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
> index 2d8d6131bc5f..488f8f326ee5 100644
> --- a/net/qrtr/qrtr.c
> +++ b/net/qrtr/qrtr.c
> @@ -20,6 +20,7 @@
> /* auto-bind range */
> #define QRTR_MIN_EPH_SOCKET 0x4000
> #define QRTR_MAX_EPH_SOCKET 0x7fff
> +#define QRTR_PORT_RANGE XA_LIMIT(QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET)
>
> /**
> * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
> @@ -106,8 +107,7 @@ static LIST_HEAD(qrtr_all_nodes);
> static DEFINE_MUTEX(qrtr_node_lock);
>
> /* local port allocation management */
> -static DEFINE_IDR(qrtr_ports);
> -static DEFINE_MUTEX(qrtr_port_lock);
> +static DEFINE_XARRAY_ALLOC(qrtr_ports);
>
> /**
> * struct qrtr_node - endpoint node
> @@ -623,7 +623,7 @@ static struct qrtr_sock *qrtr_port_lookup(int port)
> port = 0;
>
> rcu_read_lock();
> - ipc = idr_find(&qrtr_ports, port);
> + ipc = xa_load(&qrtr_ports, port);
> if (ipc)
> sock_hold(&ipc->sk);
> rcu_read_unlock();
> @@ -665,9 +665,7 @@ static void qrtr_port_remove(struct qrtr_sock *ipc)
>
> __sock_put(&ipc->sk);
>
> - mutex_lock(&qrtr_port_lock);
> - idr_remove(&qrtr_ports, port);
> - mutex_unlock(&qrtr_port_lock);
> + xa_erase(&qrtr_ports, port);
>
> /* Ensure that if qrtr_port_lookup() did enter the RCU read section we
> * wait for it to up increment the refcount */
> @@ -688,25 +686,18 @@ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
> {
> int rc;
>
> - mutex_lock(&qrtr_port_lock);
> if (!*port) {
> - rc = idr_alloc(&qrtr_ports, ipc,
> - QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
> - GFP_ATOMIC);
> - if (rc >= 0)
> - *port = rc;
> + rc = xa_alloc(&qrtr_ports, port, ipc, QRTR_PORT_RANGE,
> + GFP_KERNEL);
> } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
> rc = -EACCES;
> } else if (*port == QRTR_PORT_CTRL) {
> - rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
> + rc = xa_insert(&qrtr_ports, 0, ipc, GFP_KERNEL);
> } else {
> - rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
> - if (rc >= 0)
> - *port = rc;
> + rc = xa_insert(&qrtr_ports, *port, ipc, GFP_KERNEL);
> }
> - mutex_unlock(&qrtr_port_lock);
>
> - if (rc == -ENOSPC)
> + if (rc == -EBUSY)
> return -EADDRINUSE;
> else if (rc < 0)
> return rc;
> @@ -720,20 +711,16 @@ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
> static void qrtr_reset_ports(void)
> {
> struct qrtr_sock *ipc;
> - int id;
> -
> - mutex_lock(&qrtr_port_lock);
> - idr_for_each_entry(&qrtr_ports, ipc, id) {
> - /* Don't reset control port */
> - if (id == 0)
> - continue;
> + unsigned long index;
>
> + rcu_read_lock();
> + xa_for_each_start(&qrtr_ports, index, ipc, 1) {
> sock_hold(&ipc->sk);
> ipc->sk.sk_err = ENETRESET;
> ipc->sk.sk_error_report(&ipc->sk);
> sock_put(&ipc->sk);
> }
> - mutex_unlock(&qrtr_port_lock);
> + rcu_read_unlock();
> }
>
> /* Bind socket to address.
>
Powered by blists - more mailing lists