[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1479341856-30320-65-git-send-email-mawilcox@linuxonhyperv.com>
Date: Wed, 16 Nov 2016 16:17:29 -0800
From: Matthew Wilcox <mawilcox@...uxonhyperv.com>
To: linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
Konstantin Khlebnikov <koct9i@...il.com>,
Ross Zwisler <ross.zwisler@...ux.intel.com>
Cc: linux-fsdevel@...r.kernel.org,
Matthew Wilcox <mawilcox@...rosoft.com>, linux-mm@...ck.org,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: [PATCH 25/29] rxrpc: Abstract away knowledge of IDR internals
From: Matthew Wilcox <mawilcox@...rosoft.com>
Add idr_get_cursor() / idr_set_cursor() APIs, and remove the rounding
up to IDR_SIZE.
Signed-off-by: Matthew Wilcox <mawilcox@...rosoft.com>
---
include/linux/idr.h | 26 ++++++++++++++++++++++++++
net/rxrpc/af_rxrpc.c | 11 ++++++-----
net/rxrpc/conn_client.c | 4 ++--
3 files changed, 34 insertions(+), 7 deletions(-)
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 3639a28..1eb755f 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -56,6 +56,32 @@ struct idr {
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
/**
+ * idr_get_cursor - Return the current position of the cyclic allocator
+ * @idr: idr handle
+ *
+ * The value returned is the value that will be next returned from
+ * idr_alloc_cyclic() if it is free (otherwise the search will start from
+ * this position).
+ */
+static inline unsigned int idr_get_cursor(struct idr *idr)
+{
+ return READ_ONCE(idr->cur);
+}
+
+/**
+ * idr_set_cursor - Set the current position of the cyclic allocator
+ * @idr: idr handle
+ * @val: new position
+ *
+ * The next call to idr_alloc_cyclic() will return @val if it is free
+ * (otherwise the search will start from this position).
+ */
+static inline void idr_set_cursor(struct idr *idr, unsigned int val)
+{
+ WRITE_ONCE(idr->cur, val);
+}
+
+/**
* DOC: idr sync
* idr synchronization (stolen from radix-tree.h)
*
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 2d59c9b..5f63f6d 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -762,16 +762,17 @@ static const struct net_proto_family rxrpc_family_ops = {
static int __init af_rxrpc_init(void)
{
int ret = -1;
+ unsigned int tmp;
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch));
rxrpc_epoch |= RXRPC_RANDOM_EPOCH;
- get_random_bytes(&rxrpc_client_conn_ids.cur,
- sizeof(rxrpc_client_conn_ids.cur));
- rxrpc_client_conn_ids.cur &= 0x3fffffff;
- if (rxrpc_client_conn_ids.cur == 0)
- rxrpc_client_conn_ids.cur = 1;
+ get_random_bytes(&tmp, sizeof(tmp));
+ tmp &= 0x3fffffff;
+ if (tmp == 0)
+ tmp = 1;
+ idr_set_cursor(&rxrpc_client_conn_ids, tmp);
ret = -ENOMEM;
rxrpc_call_jar = kmem_cache_create(
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 60ef960..9706f60 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -263,12 +263,12 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
* times the maximum number of client conns away from the current
* allocation point to try and keep the IDs concentrated.
*/
- id_cursor = READ_ONCE(rxrpc_client_conn_ids.cur);
+ id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
id = conn->proto.cid >> RXRPC_CIDSHIFT;
distance = id - id_cursor;
if (distance < 0)
distance = -distance;
- limit = round_up(rxrpc_max_client_connections, IDR_SIZE) * 4;
+ limit = rxrpc_max_client_connections * 4;
if (distance > limit)
goto mark_dont_reuse;
--
2.10.2
Powered by blists - more mailing lists