[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191018040748.30593-16-toshiaki.makita1@gmail.com>
Date: Fri, 18 Oct 2019 13:07:48 +0900
From: Toshiaki Makita <toshiaki.makita1@...il.com>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <jakub.kicinski@...ronome.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>,
Jiri Pirko <jiri@...nulli.us>,
Pablo Neira Ayuso <pablo@...filter.org>,
Jozsef Kadlecsik <kadlec@...filter.org>,
Florian Westphal <fw@...len.de>,
Pravin B Shelar <pshelar@....org>
Cc: Toshiaki Makita <toshiaki.makita1@...il.com>,
netdev@...r.kernel.org, bpf@...r.kernel.org,
William Tu <u9012063@...il.com>,
Stanislav Fomichev <sdf@...ichev.me>
Subject: [RFC PATCH v2 bpf-next 15/15] bpf, hashtab: Compare keys in long
memcmp() is generally slow. Compare keys in long if possible.
This improves xdp_flow performance.
Signed-off-by: Toshiaki Makita <toshiaki.makita1@...il.com>
---
kernel/bpf/hashtab.c | 27 +++++++++++++++++++++++++--
1 file changed, 25 insertions(+), 2 deletions(-)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 22066a6..8b5ffd4 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -417,6 +417,29 @@ static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32
return &__select_bucket(htab, hash)->head;
}
+/* key1 must be aligned to sizeof long */
+static bool key_equal(void *key1, void *key2, u32 size)
+{
+ /* Check for key1 */
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct htab_elem, key),
+ sizeof(long)));
+
+ if (IS_ALIGNED((unsigned long)key2 | (unsigned long)size,
+ sizeof(long))) {
+ unsigned long *lkey1, *lkey2;
+
+ for (lkey1 = key1, lkey2 = key2; size > 0;
+ lkey1++, lkey2++, size -= sizeof(long)) {
+ if (*lkey1 != *lkey2)
+ return false;
+ }
+
+ return true;
+ }
+
+ return !memcmp(key1, key2, size);
+}
+
/* this lookup function can only be called with bucket lock taken */
static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
void *key, u32 key_size)
@@ -425,7 +448,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash
struct htab_elem *l;
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
- if (l->hash == hash && !memcmp(&l->key, key, key_size))
+ if (l->hash == hash && key_equal(&l->key, key, key_size))
return l;
return NULL;
@@ -444,7 +467,7 @@ static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
again:
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
- if (l->hash == hash && !memcmp(&l->key, key, key_size))
+ if (l->hash == hash && key_equal(&l->key, key, key_size))
return l;
if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
--
1.8.3.1
Powered by blists - more mailing lists