[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210331052357.GA15610@work>
Date: Wed, 31 Mar 2021 10:53:57 +0530
From: Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>
To: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Cc: Bjorn Andersson <bjorn.andersson@...aro.org>,
Jakub Kicinski <kuba@...nel.org>,
"David S. Miller" <davem@...emloft.net>, netdev@...r.kernel.org,
Eric Biggers <ebiggers@...gle.com>
Subject: Re: [PATCH] qrtr: Convert qrtr_ports from IDR to XArray
On Wed, Mar 31, 2021 at 05:36:42AM +0100, Matthew Wilcox (Oracle) wrote:
> The XArray interface is easier for this driver to use. Also fixes a
> bug reported by the improper use of GFP_ATOMIC.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>
Thanks,
Mani
> ---
> net/qrtr/qrtr.c | 42 ++++++++++++++----------------------------
> 1 file changed, 14 insertions(+), 28 deletions(-)
>
> diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
> index dfc820ee553a..4b46c69e14ab 100644
> --- a/net/qrtr/qrtr.c
> +++ b/net/qrtr/qrtr.c
> @@ -20,6 +20,8 @@
> /* auto-bind range */
> #define QRTR_MIN_EPH_SOCKET 0x4000
> #define QRTR_MAX_EPH_SOCKET 0x7fff
> +#define QRTR_EPH_PORT_RANGE \
> + XA_LIMIT(QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET)
>
> /**
> * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
> @@ -106,8 +108,7 @@ static LIST_HEAD(qrtr_all_nodes);
> static DEFINE_MUTEX(qrtr_node_lock);
>
> /* local port allocation management */
> -static DEFINE_IDR(qrtr_ports);
> -static DEFINE_MUTEX(qrtr_port_lock);
> +static DEFINE_XARRAY_ALLOC(qrtr_ports);
>
> /**
> * struct qrtr_node - endpoint node
> @@ -653,7 +654,7 @@ static struct qrtr_sock *qrtr_port_lookup(int port)
> port = 0;
>
> rcu_read_lock();
> - ipc = idr_find(&qrtr_ports, port);
> + ipc = xa_load(&qrtr_ports, port);
> if (ipc)
> sock_hold(&ipc->sk);
> rcu_read_unlock();
> @@ -695,9 +696,7 @@ static void qrtr_port_remove(struct qrtr_sock *ipc)
>
> __sock_put(&ipc->sk);
>
> - mutex_lock(&qrtr_port_lock);
> - idr_remove(&qrtr_ports, port);
> - mutex_unlock(&qrtr_port_lock);
> + xa_erase(&qrtr_ports, port);
>
> /* Ensure that if qrtr_port_lookup() did enter the RCU read section we
> * wait for it to up increment the refcount */
> @@ -716,29 +715,20 @@ static void qrtr_port_remove(struct qrtr_sock *ipc)
> */
> static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
> {
> - u32 min_port;
> int rc;
>
> - mutex_lock(&qrtr_port_lock);
> if (!*port) {
> - min_port = QRTR_MIN_EPH_SOCKET;
> - rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC);
> - if (!rc)
> - *port = min_port;
> + rc = xa_alloc(&qrtr_ports, port, ipc, QRTR_EPH_PORT_RANGE,
> + GFP_KERNEL);
> } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
> rc = -EACCES;
> } else if (*port == QRTR_PORT_CTRL) {
> - min_port = 0;
> - rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC);
> + rc = xa_insert(&qrtr_ports, 0, ipc, GFP_KERNEL);
> } else {
> - min_port = *port;
> - rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC);
> - if (!rc)
> - *port = min_port;
> + rc = xa_insert(&qrtr_ports, *port, ipc, GFP_KERNEL);
> }
> - mutex_unlock(&qrtr_port_lock);
>
> - if (rc == -ENOSPC)
> + if (rc == -EBUSY)
> return -EADDRINUSE;
> else if (rc < 0)
> return rc;
> @@ -752,20 +742,16 @@ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
> static void qrtr_reset_ports(void)
> {
> struct qrtr_sock *ipc;
> - int id;
> -
> - mutex_lock(&qrtr_port_lock);
> - idr_for_each_entry(&qrtr_ports, ipc, id) {
> - /* Don't reset control port */
> - if (id == 0)
> - continue;
> + unsigned long index;
>
> + rcu_read_lock();
> + xa_for_each_start(&qrtr_ports, index, ipc, 1) {
> sock_hold(&ipc->sk);
> ipc->sk.sk_err = ENETRESET;
> ipc->sk.sk_error_report(&ipc->sk);
> sock_put(&ipc->sk);
> }
> - mutex_unlock(&qrtr_port_lock);
> + rcu_read_unlock();
> }
>
> /* Bind socket to address.
> --
> 2.30.2
>
Powered by blists - more mailing lists